1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
27 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
49 #include "integrate.h"
52 #include "target-def.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode
;
58 typedef struct minipool_fixup Mfix
;
60 const struct attribute_spec arm_attribute_table
[];
62 /* Forward function declarations. */
63 static arm_stack_offsets
*arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code
, enum machine_mode
, rtx
,
66 HOST_WIDE_INT
, rtx
, rtx
, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx
, int);
69 static int arm_legitimate_index_p (enum machine_mode
, rtx
, RTX_CODE
, int);
70 static int thumb_base_register_rtx_p (rtx
, enum machine_mode
, int);
71 inline static int thumb_index_register_rtx_p (rtx
, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT
, enum rtx_code
);
75 static rtx
emit_sfm (int, int);
76 static int arm_size_return_regs (void);
78 static bool arm_assemble_integer (rtx
, unsigned int, int);
80 static const char *fp_const_from_val (REAL_VALUE_TYPE
*);
81 static arm_cc
get_arm_condition_code (rtx
);
82 static HOST_WIDE_INT
int_log2 (HOST_WIDE_INT
);
83 static rtx
is_jump_table (rtx
);
84 static const char *output_multi_immediate (rtx
*, const char *, const char *,
86 static const char *shift_op (rtx
, HOST_WIDE_INT
*);
87 static struct machine_function
*arm_init_machine_status (void);
88 static void thumb_exit (FILE *, int);
89 static rtx
is_jump_table (rtx
);
90 static HOST_WIDE_INT
get_jump_table_size (rtx
);
91 static Mnode
*move_minipool_fix_forward_ref (Mnode
*, Mnode
*, HOST_WIDE_INT
);
92 static Mnode
*add_minipool_forward_ref (Mfix
*);
93 static Mnode
*move_minipool_fix_backward_ref (Mnode
*, Mnode
*, HOST_WIDE_INT
);
94 static Mnode
*add_minipool_backward_ref (Mfix
*);
95 static void assign_minipool_offsets (Mfix
*);
96 static void arm_print_value (FILE *, rtx
);
97 static void dump_minipool (rtx
);
98 static int arm_barrier_cost (rtx
);
99 static Mfix
*create_fix_barrier (Mfix
*, HOST_WIDE_INT
);
100 static void push_minipool_barrier (rtx
, HOST_WIDE_INT
);
101 static void push_minipool_fix (rtx
, HOST_WIDE_INT
, rtx
*, enum machine_mode
,
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx
, HOST_WIDE_INT
, int);
105 static int current_file_function_operand (rtx
);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree
);
109 static unsigned long arm_compute_func_type (void);
110 static tree
arm_handle_fndecl_attribute (tree
*, tree
, tree
, int, bool *);
111 static tree
arm_handle_isr_attribute (tree
*, tree
, tree
, int, bool *);
112 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
113 static tree
arm_handle_notshared_attribute (tree
*, tree
, tree
, int, bool *);
115 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT
);
116 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT
);
117 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT
);
118 static int arm_comp_type_attributes (tree
, tree
);
119 static void arm_set_default_type_attributes (tree
);
120 static int arm_adjust_cost (rtx
, rtx
, rtx
, int);
121 static int count_insns_for_constant (HOST_WIDE_INT
, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree
, tree
);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
, HOST_WIDE_INT
,
127 static int arm_rtx_costs_1 (rtx
, enum rtx_code
, enum rtx_code
);
128 static bool arm_size_rtx_costs (rtx
, int, int, int *);
129 static bool arm_slowmul_rtx_costs (rtx
, int, int, int *);
130 static bool arm_fastmul_rtx_costs (rtx
, int, int, int *);
131 static bool arm_xscale_rtx_costs (rtx
, int, int, int *);
132 static bool arm_9e_rtx_costs (rtx
, int, int, int *);
133 static int arm_address_cost (rtx
);
134 static bool arm_memory_load_p (rtx
);
135 static bool arm_cirrus_insn_p (rtx
);
136 static void cirrus_reorg (rtx
);
137 static void arm_init_builtins (void);
138 static rtx
arm_expand_builtin (tree
, rtx
, rtx
, enum machine_mode
, int);
139 static void arm_init_iwmmxt_builtins (void);
140 static rtx
safe_vector_operand (rtx
, enum machine_mode
);
141 static rtx
arm_expand_binop_builtin (enum insn_code
, tree
, rtx
);
142 static rtx
arm_expand_unop_builtin (enum insn_code
, tree
, rtx
, int);
143 static rtx
arm_expand_builtin (tree
, rtx
, rtx
, enum machine_mode
, int);
144 static void emit_constant_insn (rtx cond
, rtx pattern
);
145 static int arm_arg_partial_bytes (CUMULATIVE_ARGS
*, enum machine_mode
,
148 #ifdef OBJECT_FORMAT_ELF
149 static void arm_elf_asm_constructor (rtx
, int);
152 static void arm_encode_section_info (tree
, rtx
, int);
155 static void arm_file_end (void);
158 static void aof_globalize_label (FILE *, const char *);
159 static void aof_dump_imports (FILE *);
160 static void aof_dump_pic_table (FILE *);
161 static void aof_file_start (void);
162 static void aof_file_end (void);
164 static rtx
arm_struct_value_rtx (tree
, int);
165 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS
*, enum machine_mode
,
167 static bool arm_pass_by_reference (CUMULATIVE_ARGS
*,
168 enum machine_mode
, tree
, bool);
169 static bool arm_promote_prototypes (tree
);
170 static bool arm_default_short_enums (void);
171 static bool arm_align_anon_bitfield (void);
172 static bool arm_return_in_msb (tree
);
173 static bool arm_must_pass_in_stack (enum machine_mode
, tree
);
174 #ifdef TARGET_UNWIND_INFO
175 static void arm_unwind_emit (FILE *, rtx
);
176 static bool arm_output_ttype (rtx
);
179 static tree
arm_cxx_guard_type (void);
180 static bool arm_cxx_guard_mask_bit (void);
181 static tree
arm_get_cookie_size (tree
);
182 static bool arm_cookie_has_size (void);
183 static bool arm_cxx_cdtor_returns_this (void);
184 static bool arm_cxx_key_method_may_be_inline (void);
185 static void arm_cxx_determine_class_data_visibility (tree
);
186 static bool arm_cxx_class_data_always_comdat (void);
187 static bool arm_cxx_use_aeabi_atexit (void);
188 static void arm_init_libfuncs (void);
189 static bool arm_handle_option (size_t, const char *, int);
190 static unsigned HOST_WIDE_INT
arm_shift_truncation_mask (enum machine_mode
);
192 /* Initialize the GCC target structure. */
193 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
194 #undef TARGET_MERGE_DECL_ATTRIBUTES
195 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
198 #undef TARGET_ATTRIBUTE_TABLE
199 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
201 #undef TARGET_ASM_FILE_END
202 #define TARGET_ASM_FILE_END arm_file_end
205 #undef TARGET_ASM_BYTE_OP
206 #define TARGET_ASM_BYTE_OP "\tDCB\t"
207 #undef TARGET_ASM_ALIGNED_HI_OP
208 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
209 #undef TARGET_ASM_ALIGNED_SI_OP
210 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
211 #undef TARGET_ASM_GLOBALIZE_LABEL
212 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
213 #undef TARGET_ASM_FILE_START
214 #define TARGET_ASM_FILE_START aof_file_start
215 #undef TARGET_ASM_FILE_END
216 #define TARGET_ASM_FILE_END aof_file_end
218 #undef TARGET_ASM_ALIGNED_SI_OP
219 #define TARGET_ASM_ALIGNED_SI_OP NULL
220 #undef TARGET_ASM_INTEGER
221 #define TARGET_ASM_INTEGER arm_assemble_integer
224 #undef TARGET_ASM_FUNCTION_PROLOGUE
225 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
227 #undef TARGET_ASM_FUNCTION_EPILOGUE
228 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
230 #undef TARGET_DEFAULT_TARGET_FLAGS
231 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
232 #undef TARGET_HANDLE_OPTION
233 #define TARGET_HANDLE_OPTION arm_handle_option
235 #undef TARGET_COMP_TYPE_ATTRIBUTES
236 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
238 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
239 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
241 #undef TARGET_SCHED_ADJUST_COST
242 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
244 #undef TARGET_ENCODE_SECTION_INFO
246 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
248 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
251 #undef TARGET_STRIP_NAME_ENCODING
252 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
254 #undef TARGET_ASM_INTERNAL_LABEL
255 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
257 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
258 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
260 #undef TARGET_ASM_OUTPUT_MI_THUNK
261 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
262 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
263 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
265 /* This will be overridden in arm_override_options. */
266 #undef TARGET_RTX_COSTS
267 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
268 #undef TARGET_ADDRESS_COST
269 #define TARGET_ADDRESS_COST arm_address_cost
271 #undef TARGET_SHIFT_TRUNCATION_MASK
272 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
273 #undef TARGET_VECTOR_MODE_SUPPORTED_P
274 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
276 #undef TARGET_MACHINE_DEPENDENT_REORG
277 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
279 #undef TARGET_INIT_BUILTINS
280 #define TARGET_INIT_BUILTINS arm_init_builtins
281 #undef TARGET_EXPAND_BUILTIN
282 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
284 #undef TARGET_INIT_LIBFUNCS
285 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
287 #undef TARGET_PROMOTE_FUNCTION_ARGS
288 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
289 #undef TARGET_PROMOTE_FUNCTION_RETURN
290 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
291 #undef TARGET_PROMOTE_PROTOTYPES
292 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
293 #undef TARGET_PASS_BY_REFERENCE
294 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
295 #undef TARGET_ARG_PARTIAL_BYTES
296 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
298 #undef TARGET_STRUCT_VALUE_RTX
299 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
301 #undef TARGET_SETUP_INCOMING_VARARGS
302 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
304 #undef TARGET_DEFAULT_SHORT_ENUMS
305 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
307 #undef TARGET_ALIGN_ANON_BITFIELD
308 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
310 #undef TARGET_CXX_GUARD_TYPE
311 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
313 #undef TARGET_CXX_GUARD_MASK_BIT
314 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
316 #undef TARGET_CXX_GET_COOKIE_SIZE
317 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
319 #undef TARGET_CXX_COOKIE_HAS_SIZE
320 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
322 #undef TARGET_CXX_CDTOR_RETURNS_THIS
323 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
325 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
326 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
328 #undef TARGET_CXX_USE_AEABI_ATEXIT
329 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
331 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
332 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
333 arm_cxx_determine_class_data_visibility
335 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
336 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
338 #undef TARGET_RETURN_IN_MSB
339 #define TARGET_RETURN_IN_MSB arm_return_in_msb
341 #undef TARGET_MUST_PASS_IN_STACK
342 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
344 #ifdef TARGET_UNWIND_INFO
345 #undef TARGET_UNWIND_EMIT
346 #define TARGET_UNWIND_EMIT arm_unwind_emit
348 /* EABI unwinding tables use a different format for the typeinfo tables. */
349 #undef TARGET_ASM_TTYPE
350 #define TARGET_ASM_TTYPE arm_output_ttype
352 #undef TARGET_ARM_EABI_UNWINDER
353 #define TARGET_ARM_EABI_UNWINDER true
354 #endif /* TARGET_UNWIND_INFO */
356 struct gcc_target targetm
= TARGET_INITIALIZER
;
358 /* Obstack for minipool constant handling. */
359 static struct obstack minipool_obstack
;
360 static char * minipool_startobj
;
362 /* The maximum number of insns skipped which
363 will be conditionalised if possible. */
364 static int max_insns_skipped
= 5;
366 extern FILE * asm_out_file
;
368 /* True if we are currently building a constant table. */
369 int making_const_table
;
371 /* Define the information needed to generate branch insns. This is
372 stored from the compare operation. */
373 rtx arm_compare_op0
, arm_compare_op1
;
375 /* The processor for which instructions should be scheduled. */
376 enum processor_type arm_tune
= arm_none
;
378 /* Which floating point model to use. */
379 enum arm_fp_model arm_fp_model
;
381 /* Which floating point hardware is available. */
382 enum fputype arm_fpu_arch
;
384 /* Which floating point hardware to schedule for. */
385 enum fputype arm_fpu_tune
;
387 /* Whether to use floating point hardware. */
388 enum float_abi_type arm_float_abi
;
390 /* Which ABI to use. */
391 enum arm_abi_type arm_abi
;
393 /* Used to parse -mstructure_size_boundary command line option. */
394 int arm_structure_size_boundary
= DEFAULT_STRUCTURE_SIZE_BOUNDARY
;
396 /* Used for Thumb call_via trampolines. */
397 rtx thumb_call_via_label
[14];
398 static int thumb_call_reg_needed
;
400 /* Bit values used to identify processor capabilities. */
401 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
402 #define FL_ARCH3M (1 << 1) /* Extended multiply */
403 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
404 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
405 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
406 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
407 #define FL_THUMB (1 << 6) /* Thumb aware */
408 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
409 #define FL_STRONG (1 << 8) /* StrongARM */
410 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
411 #define FL_XSCALE (1 << 10) /* XScale */
412 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
413 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
414 media instructions. */
415 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
416 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
417 Note: ARM6 & 7 derivatives only. */
419 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
421 #define FL_FOR_ARCH2 0
422 #define FL_FOR_ARCH3 FL_MODE32
423 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
424 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
425 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
426 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
427 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
428 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
429 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
430 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
431 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
432 #define FL_FOR_ARCH6J FL_FOR_ARCH6
433 #define FL_FOR_ARCH6K FL_FOR_ARCH6
434 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
435 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6
437 /* The bits in this mask specify which
438 instructions we are allowed to generate. */
439 static unsigned long insn_flags
= 0;
441 /* The bits in this mask specify which instruction scheduling options should
443 static unsigned long tune_flags
= 0;
445 /* The following are used in the arm.md file as equivalents to bits
446 in the above two flag variables. */
448 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
451 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
454 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
457 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
460 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
463 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
466 /* Nonzero if this chip can benefit from load scheduling. */
467 int arm_ld_sched
= 0;
469 /* Nonzero if this chip is a StrongARM. */
470 int arm_tune_strongarm
= 0;
472 /* Nonzero if this chip is a Cirrus variant. */
473 int arm_arch_cirrus
= 0;
475 /* Nonzero if this chip supports Intel Wireless MMX technology. */
476 int arm_arch_iwmmxt
= 0;
478 /* Nonzero if this chip is an XScale. */
479 int arm_arch_xscale
= 0;
481 /* Nonzero if tuning for XScale */
482 int arm_tune_xscale
= 0;
484 /* Nonzero if we want to tune for stores that access the write-buffer.
485 This typically means an ARM6 or ARM7 with MMU or MPU. */
486 int arm_tune_wbuf
= 0;
488 /* Nonzero if generating Thumb instructions. */
491 /* Nonzero if we should define __THUMB_INTERWORK__ in the
493 XXX This is a bit of a hack, it's intended to help work around
494 problems in GLD which doesn't understand that armv5t code is
495 interworking clean. */
496 int arm_cpp_interwork
= 0;
498 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
499 must report the mode of the memory reference from PRINT_OPERAND to
500 PRINT_OPERAND_ADDRESS. */
501 enum machine_mode output_memory_reference_mode
;
503 /* The register number to be used for the PIC offset register. */
504 int arm_pic_register
= INVALID_REGNUM
;
506 /* Set to 1 when a return insn is output, this means that the epilogue
508 int return_used_this_function
;
510 /* Set to 1 after arm_reorg has started. Reset to start at the start of
511 the next function. */
512 static int after_arm_reorg
= 0;
514 /* The maximum number of insns to be used when loading a constant. */
515 static int arm_constant_limit
= 3;
517 /* For an explanation of these variables, see final_prescan_insn below. */
519 enum arm_cond_code arm_current_cc
;
521 int arm_target_label
;
523 /* The condition codes of the ARM, and the inverse function. */
524 static const char * const arm_condition_codes
[] =
526 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
527 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
530 #define streq(string1, string2) (strcmp (string1, string2) == 0)
532 /* Initialization code. */
536 const char *const name
;
537 enum processor_type core
;
539 const unsigned long flags
;
540 bool (* rtx_costs
) (rtx
, int, int, int *);
543 /* Not all of these give usefully different compilation alternatives,
544 but there is no simple way of generalizing them. */
545 static const struct processors all_cores
[] =
548 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
549 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
550 #include "arm-cores.def"
552 {NULL
, arm_none
, NULL
, 0, NULL
}
555 static const struct processors all_architectures
[] =
557 /* ARM Architectures */
558 /* We don't specify rtx_costs here as it will be figured out
561 {"armv2", arm2
, "2", FL_CO_PROC
| FL_MODE26
| FL_FOR_ARCH2
, NULL
},
562 {"armv2a", arm2
, "2", FL_CO_PROC
| FL_MODE26
| FL_FOR_ARCH2
, NULL
},
563 {"armv3", arm6
, "3", FL_CO_PROC
| FL_MODE26
| FL_FOR_ARCH3
, NULL
},
564 {"armv3m", arm7m
, "3M", FL_CO_PROC
| FL_MODE26
| FL_FOR_ARCH3M
, NULL
},
565 {"armv4", arm7tdmi
, "4", FL_CO_PROC
| FL_MODE26
| FL_FOR_ARCH4
, NULL
},
566 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
567 implementations that support it, so we will leave it out for now. */
568 {"armv4t", arm7tdmi
, "4T", FL_CO_PROC
| FL_FOR_ARCH4T
, NULL
},
569 {"armv5", arm10tdmi
, "5", FL_CO_PROC
| FL_FOR_ARCH5
, NULL
},
570 {"armv5t", arm10tdmi
, "5T", FL_CO_PROC
| FL_FOR_ARCH5T
, NULL
},
571 {"armv5e", arm1026ejs
, "5E", FL_CO_PROC
| FL_FOR_ARCH5E
, NULL
},
572 {"armv5te", arm1026ejs
, "5TE", FL_CO_PROC
| FL_FOR_ARCH5TE
, NULL
},
573 {"armv6", arm1136js
, "6", FL_CO_PROC
| FL_FOR_ARCH6
, NULL
},
574 {"armv6j", arm1136js
, "6J", FL_CO_PROC
| FL_FOR_ARCH6J
, NULL
},
575 {"armv6k", mpcore
, "6K", FL_CO_PROC
| FL_FOR_ARCH6K
, NULL
},
576 {"armv6z", arm1176jzs
, "6Z", FL_CO_PROC
| FL_FOR_ARCH6Z
, NULL
},
577 {"armv6zk", arm1176jzs
, "6ZK", FL_CO_PROC
| FL_FOR_ARCH6ZK
, NULL
},
578 {"ep9312", ep9312
, "4T", FL_LDSCHED
| FL_CIRRUS
| FL_FOR_ARCH4
, NULL
},
579 {"iwmmxt", iwmmxt
, "5TE", FL_LDSCHED
| FL_STRONG
| FL_FOR_ARCH5TE
| FL_XSCALE
| FL_IWMMXT
, NULL
},
580 {NULL
, arm_none
, NULL
, 0 , NULL
}
583 struct arm_cpu_select
587 const struct processors
* processors
;
590 /* This is a magic structure. The 'string' field is magically filled in
591 with a pointer to the value specified by the user on the command line
592 assuming that the user has specified such a value. */
594 static struct arm_cpu_select arm_select
[] =
596 /* string name processors */
597 { NULL
, "-mcpu=", all_cores
},
598 { NULL
, "-march=", all_architectures
},
599 { NULL
, "-mtune=", all_cores
}
602 /* Defines representing the indexes into the above table. */
603 #define ARM_OPT_SET_CPU 0
604 #define ARM_OPT_SET_ARCH 1
605 #define ARM_OPT_SET_TUNE 2
607 /* The name of the proprocessor macro to define for this architecture. */
609 char arm_arch_name
[] = "__ARM_ARCH_0UNK__";
618 /* Available values for for -mfpu=. */
620 static const struct fpu_desc all_fpus
[] =
622 {"fpa", FPUTYPE_FPA
},
623 {"fpe2", FPUTYPE_FPA_EMU2
},
624 {"fpe3", FPUTYPE_FPA_EMU2
},
625 {"maverick", FPUTYPE_MAVERICK
},
630 /* Floating point models used by the different hardware.
631 See fputype in arm.h. */
633 static const enum fputype fp_model_for_fpu
[] =
635 /* No FP hardware. */
636 ARM_FP_MODEL_UNKNOWN
, /* FPUTYPE_NONE */
637 ARM_FP_MODEL_FPA
, /* FPUTYPE_FPA */
638 ARM_FP_MODEL_FPA
, /* FPUTYPE_FPA_EMU2 */
639 ARM_FP_MODEL_FPA
, /* FPUTYPE_FPA_EMU3 */
640 ARM_FP_MODEL_MAVERICK
, /* FPUTYPE_MAVERICK */
641 ARM_FP_MODEL_VFP
/* FPUTYPE_VFP */
648 enum float_abi_type abi_type
;
652 /* Available values for -mfloat-abi=. */
654 static const struct float_abi all_float_abis
[] =
656 {"soft", ARM_FLOAT_ABI_SOFT
},
657 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
658 {"hard", ARM_FLOAT_ABI_HARD
}
665 enum arm_abi_type abi_type
;
669 /* Available values for -mabi=. */
671 static const struct abi_name arm_all_abis
[] =
673 {"apcs-gnu", ARM_ABI_APCS
},
674 {"atpcs", ARM_ABI_ATPCS
},
675 {"aapcs", ARM_ABI_AAPCS
},
676 {"iwmmxt", ARM_ABI_IWMMXT
}
679 /* Return the number of bits set in VALUE. */
681 bit_count (unsigned long value
)
683 unsigned long count
= 0;
688 value
&= value
- 1; /* Clear the least-significant set bit. */
694 /* Set up library functions unique to ARM. */
697 arm_init_libfuncs (void)
699 /* There are no special library functions unless we are using the
704 /* The functions below are described in Section 4 of the "Run-Time
705 ABI for the ARM architecture", Version 1.0. */
707 /* Double-precision floating-point arithmetic. Table 2. */
708 set_optab_libfunc (add_optab
, DFmode
, "__aeabi_dadd");
709 set_optab_libfunc (sdiv_optab
, DFmode
, "__aeabi_ddiv");
710 set_optab_libfunc (smul_optab
, DFmode
, "__aeabi_dmul");
711 set_optab_libfunc (neg_optab
, DFmode
, "__aeabi_dneg");
712 set_optab_libfunc (sub_optab
, DFmode
, "__aeabi_dsub");
714 /* Double-precision comparisons. Table 3. */
715 set_optab_libfunc (eq_optab
, DFmode
, "__aeabi_dcmpeq");
716 set_optab_libfunc (ne_optab
, DFmode
, NULL
);
717 set_optab_libfunc (lt_optab
, DFmode
, "__aeabi_dcmplt");
718 set_optab_libfunc (le_optab
, DFmode
, "__aeabi_dcmple");
719 set_optab_libfunc (ge_optab
, DFmode
, "__aeabi_dcmpge");
720 set_optab_libfunc (gt_optab
, DFmode
, "__aeabi_dcmpgt");
721 set_optab_libfunc (unord_optab
, DFmode
, "__aeabi_dcmpun");
723 /* Single-precision floating-point arithmetic. Table 4. */
724 set_optab_libfunc (add_optab
, SFmode
, "__aeabi_fadd");
725 set_optab_libfunc (sdiv_optab
, SFmode
, "__aeabi_fdiv");
726 set_optab_libfunc (smul_optab
, SFmode
, "__aeabi_fmul");
727 set_optab_libfunc (neg_optab
, SFmode
, "__aeabi_fneg");
728 set_optab_libfunc (sub_optab
, SFmode
, "__aeabi_fsub");
730 /* Single-precision comparisons. Table 5. */
731 set_optab_libfunc (eq_optab
, SFmode
, "__aeabi_fcmpeq");
732 set_optab_libfunc (ne_optab
, SFmode
, NULL
);
733 set_optab_libfunc (lt_optab
, SFmode
, "__aeabi_fcmplt");
734 set_optab_libfunc (le_optab
, SFmode
, "__aeabi_fcmple");
735 set_optab_libfunc (ge_optab
, SFmode
, "__aeabi_fcmpge");
736 set_optab_libfunc (gt_optab
, SFmode
, "__aeabi_fcmpgt");
737 set_optab_libfunc (unord_optab
, SFmode
, "__aeabi_fcmpun");
739 /* Floating-point to integer conversions. Table 6. */
740 set_conv_libfunc (sfix_optab
, SImode
, DFmode
, "__aeabi_d2iz");
741 set_conv_libfunc (ufix_optab
, SImode
, DFmode
, "__aeabi_d2uiz");
742 set_conv_libfunc (sfix_optab
, DImode
, DFmode
, "__aeabi_d2lz");
743 set_conv_libfunc (ufix_optab
, DImode
, DFmode
, "__aeabi_d2ulz");
744 set_conv_libfunc (sfix_optab
, SImode
, SFmode
, "__aeabi_f2iz");
745 set_conv_libfunc (ufix_optab
, SImode
, SFmode
, "__aeabi_f2uiz");
746 set_conv_libfunc (sfix_optab
, DImode
, SFmode
, "__aeabi_f2lz");
747 set_conv_libfunc (ufix_optab
, DImode
, SFmode
, "__aeabi_f2ulz");
749 /* Conversions between floating types. Table 7. */
750 set_conv_libfunc (trunc_optab
, SFmode
, DFmode
, "__aeabi_d2f");
751 set_conv_libfunc (sext_optab
, DFmode
, SFmode
, "__aeabi_f2d");
753 /* Integer to floating-point conversions. Table 8. */
754 set_conv_libfunc (sfloat_optab
, DFmode
, SImode
, "__aeabi_i2d");
755 set_conv_libfunc (ufloat_optab
, DFmode
, SImode
, "__aeabi_ui2d");
756 set_conv_libfunc (sfloat_optab
, DFmode
, DImode
, "__aeabi_l2d");
757 set_conv_libfunc (ufloat_optab
, DFmode
, DImode
, "__aeabi_ul2d");
758 set_conv_libfunc (sfloat_optab
, SFmode
, SImode
, "__aeabi_i2f");
759 set_conv_libfunc (ufloat_optab
, SFmode
, SImode
, "__aeabi_ui2f");
760 set_conv_libfunc (sfloat_optab
, SFmode
, DImode
, "__aeabi_l2f");
761 set_conv_libfunc (ufloat_optab
, SFmode
, DImode
, "__aeabi_ul2f");
763 /* Long long. Table 9. */
764 set_optab_libfunc (smul_optab
, DImode
, "__aeabi_lmul");
765 set_optab_libfunc (sdivmod_optab
, DImode
, "__aeabi_ldivmod");
766 set_optab_libfunc (udivmod_optab
, DImode
, "__aeabi_uldivmod");
767 set_optab_libfunc (ashl_optab
, DImode
, "__aeabi_llsl");
768 set_optab_libfunc (lshr_optab
, DImode
, "__aeabi_llsr");
769 set_optab_libfunc (ashr_optab
, DImode
, "__aeabi_lasr");
770 set_optab_libfunc (cmp_optab
, DImode
, "__aeabi_lcmp");
771 set_optab_libfunc (ucmp_optab
, DImode
, "__aeabi_ulcmp");
773 /* Integer (32/32->32) division. \S 4.3.1. */
774 set_optab_libfunc (sdivmod_optab
, SImode
, "__aeabi_idivmod");
775 set_optab_libfunc (udivmod_optab
, SImode
, "__aeabi_uidivmod");
777 /* The divmod functions are designed so that they can be used for
778 plain division, even though they return both the quotient and the
779 remainder. The quotient is returned in the usual location (i.e.,
780 r0 for SImode, {r0, r1} for DImode), just as would be expected
781 for an ordinary division routine. Because the AAPCS calling
782 conventions specify that all of { r0, r1, r2, r3 } are
783 callee-saved registers, there is no need to tell the compiler
784 explicitly that those registers are clobbered by these
786 set_optab_libfunc (sdiv_optab
, DImode
, "__aeabi_ldivmod");
787 set_optab_libfunc (udiv_optab
, DImode
, "__aeabi_uldivmod");
788 set_optab_libfunc (sdiv_optab
, SImode
, "__aeabi_idivmod");
789 set_optab_libfunc (udiv_optab
, SImode
, "__aeabi_uidivmod");
791 /* We don't have mod libcalls. Fortunately gcc knows how to use the
792 divmod libcalls instead. */
793 set_optab_libfunc (smod_optab
, DImode
, NULL
);
794 set_optab_libfunc (umod_optab
, DImode
, NULL
);
795 set_optab_libfunc (smod_optab
, SImode
, NULL
);
796 set_optab_libfunc (umod_optab
, SImode
, NULL
);
799 /* Implement TARGET_HANDLE_OPTION. */
802 arm_handle_option (size_t code
, const char *arg
, int value ATTRIBUTE_UNUSED
)
807 arm_select
[1].string
= arg
;
811 arm_select
[0].string
= arg
;
814 case OPT_mhard_float
:
815 target_float_abi_name
= "hard";
818 case OPT_msoft_float
:
819 target_float_abi_name
= "soft";
823 arm_select
[2].string
= arg
;
831 /* Fix up any incompatible options that the user has specified.
832 This has now turned into a maze. */
834 arm_override_options (void)
837 enum processor_type target_arch_cpu
= arm_none
;
839 /* Set up the flags based on the cpu/architecture selected by the user. */
840 for (i
= ARRAY_SIZE (arm_select
); i
--;)
842 struct arm_cpu_select
* ptr
= arm_select
+ i
;
844 if (ptr
->string
!= NULL
&& ptr
->string
[0] != '\0')
846 const struct processors
* sel
;
848 for (sel
= ptr
->processors
; sel
->name
!= NULL
; sel
++)
849 if (streq (ptr
->string
, sel
->name
))
851 /* Set the architecture define. */
852 if (i
!= ARM_OPT_SET_TUNE
)
853 sprintf (arm_arch_name
, "__ARM_ARCH_%s__", sel
->arch
);
855 /* Determine the processor core for which we should
856 tune code-generation. */
857 if (/* -mcpu= is a sensible default. */
859 /* -mtune= overrides -mcpu= and -march=. */
860 || i
== ARM_OPT_SET_TUNE
)
861 arm_tune
= (enum processor_type
) (sel
- ptr
->processors
);
863 /* Remember the CPU associated with this architecture.
864 If no other option is used to set the CPU type,
865 we'll use this to guess the most suitable tuning
867 if (i
== ARM_OPT_SET_ARCH
)
868 target_arch_cpu
= sel
->core
;
870 if (i
!= ARM_OPT_SET_TUNE
)
872 /* If we have been given an architecture and a processor
873 make sure that they are compatible. We only generate
874 a warning though, and we prefer the CPU over the
876 if (insn_flags
!= 0 && (insn_flags
^ sel
->flags
))
877 warning (0, "switch -mcpu=%s conflicts with -march= switch",
880 insn_flags
= sel
->flags
;
886 if (sel
->name
== NULL
)
887 error ("bad value (%s) for %s switch", ptr
->string
, ptr
->name
);
891 /* Guess the tuning options from the architecture if necessary. */
892 if (arm_tune
== arm_none
)
893 arm_tune
= target_arch_cpu
;
895 /* If the user did not specify a processor, choose one for them. */
898 const struct processors
* sel
;
900 enum processor_type cpu
;
902 cpu
= TARGET_CPU_DEFAULT
;
905 #ifdef SUBTARGET_CPU_DEFAULT
906 /* Use the subtarget default CPU if none was specified by
908 cpu
= SUBTARGET_CPU_DEFAULT
;
910 /* Default to ARM6. */
914 sel
= &all_cores
[cpu
];
916 insn_flags
= sel
->flags
;
918 /* Now check to see if the user has specified some command line
919 switch that require certain abilities from the cpu. */
922 if (TARGET_INTERWORK
|| TARGET_THUMB
)
924 sought
|= (FL_THUMB
| FL_MODE32
);
926 /* There are no ARM processors that support both APCS-26 and
927 interworking. Therefore we force FL_MODE26 to be removed
928 from insn_flags here (if it was set), so that the search
929 below will always be able to find a compatible processor. */
930 insn_flags
&= ~FL_MODE26
;
933 if (sought
!= 0 && ((sought
& insn_flags
) != sought
))
935 /* Try to locate a CPU type that supports all of the abilities
936 of the default CPU, plus the extra abilities requested by
938 for (sel
= all_cores
; sel
->name
!= NULL
; sel
++)
939 if ((sel
->flags
& sought
) == (sought
| insn_flags
))
942 if (sel
->name
== NULL
)
944 unsigned current_bit_count
= 0;
945 const struct processors
* best_fit
= NULL
;
947 /* Ideally we would like to issue an error message here
948 saying that it was not possible to find a CPU compatible
949 with the default CPU, but which also supports the command
950 line options specified by the programmer, and so they
951 ought to use the -mcpu=<name> command line option to
952 override the default CPU type.
954 If we cannot find a cpu that has both the
955 characteristics of the default cpu and the given
956 command line options we scan the array again looking
958 for (sel
= all_cores
; sel
->name
!= NULL
; sel
++)
959 if ((sel
->flags
& sought
) == sought
)
963 count
= bit_count (sel
->flags
& insn_flags
);
965 if (count
>= current_bit_count
)
968 current_bit_count
= count
;
972 gcc_assert (best_fit
);
976 insn_flags
= sel
->flags
;
978 sprintf (arm_arch_name
, "__ARM_ARCH_%s__", sel
->arch
);
979 if (arm_tune
== arm_none
)
980 arm_tune
= (enum processor_type
) (sel
- all_cores
);
983 /* The processor for which we should tune should now have been
985 gcc_assert (arm_tune
!= arm_none
);
987 tune_flags
= all_cores
[(int)arm_tune
].flags
;
989 targetm
.rtx_costs
= arm_size_rtx_costs
;
991 targetm
.rtx_costs
= all_cores
[(int)arm_tune
].rtx_costs
;
993 /* Make sure that the processor choice does not conflict with any of the
994 other command line choices. */
995 if (TARGET_INTERWORK
&& !(insn_flags
& FL_THUMB
))
997 warning (0, "target CPU does not support interworking" );
998 target_flags
&= ~MASK_INTERWORK
;
1001 if (TARGET_THUMB
&& !(insn_flags
& FL_THUMB
))
1003 warning (0, "target CPU does not support THUMB instructions");
1004 target_flags
&= ~MASK_THUMB
;
1007 if (TARGET_APCS_FRAME
&& TARGET_THUMB
)
1009 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1010 target_flags
&= ~MASK_APCS_FRAME
;
1013 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1014 from here where no function is being compiled currently. */
1015 if ((TARGET_TPCS_FRAME
|| TARGET_TPCS_LEAF_FRAME
) && TARGET_ARM
)
1016 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1018 if (TARGET_ARM
&& TARGET_CALLEE_INTERWORKING
)
1019 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1021 if (TARGET_ARM
&& TARGET_CALLER_INTERWORKING
)
1022 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1024 if (TARGET_APCS_STACK
&& !TARGET_APCS_FRAME
)
1026 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1027 target_flags
|= MASK_APCS_FRAME
;
1030 if (TARGET_POKE_FUNCTION_NAME
)
1031 target_flags
|= MASK_APCS_FRAME
;
1033 if (TARGET_APCS_REENT
&& flag_pic
)
1034 error ("-fpic and -mapcs-reent are incompatible");
1036 if (TARGET_APCS_REENT
)
1037 warning (0, "APCS reentrant code not supported. Ignored");
1039 /* If this target is normally configured to use APCS frames, warn if they
1040 are turned off and debugging is turned on. */
1042 && write_symbols
!= NO_DEBUG
1043 && !TARGET_APCS_FRAME
1044 && (TARGET_DEFAULT
& MASK_APCS_FRAME
))
1045 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1047 /* If stack checking is disabled, we can use r10 as the PIC register,
1048 which keeps r9 available. */
1050 arm_pic_register
= TARGET_APCS_STACK
? 9 : 10;
1052 if (TARGET_APCS_FLOAT
)
1053 warning (0, "passing floating point arguments in fp regs not yet supported");
1055 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1056 arm_arch3m
= (insn_flags
& FL_ARCH3M
) != 0;
1057 arm_arch4
= (insn_flags
& FL_ARCH4
) != 0;
1058 arm_arch4t
= arm_arch4
& ((insn_flags
& FL_THUMB
) != 0);
1059 arm_arch5
= (insn_flags
& FL_ARCH5
) != 0;
1060 arm_arch5e
= (insn_flags
& FL_ARCH5E
) != 0;
1061 arm_arch6
= (insn_flags
& FL_ARCH6
) != 0;
1062 arm_arch_xscale
= (insn_flags
& FL_XSCALE
) != 0;
1063 arm_arch_cirrus
= (insn_flags
& FL_CIRRUS
) != 0;
1065 arm_ld_sched
= (tune_flags
& FL_LDSCHED
) != 0;
1066 arm_tune_strongarm
= (tune_flags
& FL_STRONG
) != 0;
1067 thumb_code
= (TARGET_ARM
== 0);
1068 arm_tune_wbuf
= (tune_flags
& FL_WBUF
) != 0;
1069 arm_tune_xscale
= (tune_flags
& FL_XSCALE
) != 0;
1070 arm_arch_iwmmxt
= (insn_flags
& FL_IWMMXT
) != 0;
1072 /* V5 code we generate is completely interworking capable, so we turn off
1073 TARGET_INTERWORK here to avoid many tests later on. */
1075 /* XXX However, we must pass the right pre-processor defines to CPP
1076 or GLD can get confused. This is a hack. */
1077 if (TARGET_INTERWORK
)
1078 arm_cpp_interwork
= 1;
1081 target_flags
&= ~MASK_INTERWORK
;
1083 if (target_abi_name
)
1085 for (i
= 0; i
< ARRAY_SIZE (arm_all_abis
); i
++)
1087 if (streq (arm_all_abis
[i
].name
, target_abi_name
))
1089 arm_abi
= arm_all_abis
[i
].abi_type
;
1093 if (i
== ARRAY_SIZE (arm_all_abis
))
1094 error ("invalid ABI option: -mabi=%s", target_abi_name
);
1097 arm_abi
= ARM_DEFAULT_ABI
;
1099 if (TARGET_IWMMXT
&& !ARM_DOUBLEWORD_ALIGN
)
1100 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1102 if (TARGET_IWMMXT_ABI
&& !TARGET_IWMMXT
)
1103 error ("iwmmxt abi requires an iwmmxt capable cpu");
1105 arm_fp_model
= ARM_FP_MODEL_UNKNOWN
;
1106 if (target_fpu_name
== NULL
&& target_fpe_name
!= NULL
)
1108 if (streq (target_fpe_name
, "2"))
1109 target_fpu_name
= "fpe2";
1110 else if (streq (target_fpe_name
, "3"))
1111 target_fpu_name
= "fpe3";
1113 error ("invalid floating point emulation option: -mfpe=%s",
1116 if (target_fpu_name
!= NULL
)
1118 /* The user specified a FPU. */
1119 for (i
= 0; i
< ARRAY_SIZE (all_fpus
); i
++)
1121 if (streq (all_fpus
[i
].name
, target_fpu_name
))
1123 arm_fpu_arch
= all_fpus
[i
].fpu
;
1124 arm_fpu_tune
= arm_fpu_arch
;
1125 arm_fp_model
= fp_model_for_fpu
[arm_fpu_arch
];
1129 if (arm_fp_model
== ARM_FP_MODEL_UNKNOWN
)
1130 error ("invalid floating point option: -mfpu=%s", target_fpu_name
);
1134 #ifdef FPUTYPE_DEFAULT
1135 /* Use the default if it is specified for this platform. */
1136 arm_fpu_arch
= FPUTYPE_DEFAULT
;
1137 arm_fpu_tune
= FPUTYPE_DEFAULT
;
1139 /* Pick one based on CPU type. */
1140 /* ??? Some targets assume FPA is the default.
1141 if ((insn_flags & FL_VFP) != 0)
1142 arm_fpu_arch = FPUTYPE_VFP;
1145 if (arm_arch_cirrus
)
1146 arm_fpu_arch
= FPUTYPE_MAVERICK
;
1148 arm_fpu_arch
= FPUTYPE_FPA_EMU2
;
1150 if (tune_flags
& FL_CO_PROC
&& arm_fpu_arch
== FPUTYPE_FPA_EMU2
)
1151 arm_fpu_tune
= FPUTYPE_FPA
;
1153 arm_fpu_tune
= arm_fpu_arch
;
1154 arm_fp_model
= fp_model_for_fpu
[arm_fpu_arch
];
1155 gcc_assert (arm_fp_model
!= ARM_FP_MODEL_UNKNOWN
);
1158 if (target_float_abi_name
!= NULL
)
1160 /* The user specified a FP ABI. */
1161 for (i
= 0; i
< ARRAY_SIZE (all_float_abis
); i
++)
1163 if (streq (all_float_abis
[i
].name
, target_float_abi_name
))
1165 arm_float_abi
= all_float_abis
[i
].abi_type
;
1169 if (i
== ARRAY_SIZE (all_float_abis
))
1170 error ("invalid floating point abi: -mfloat-abi=%s",
1171 target_float_abi_name
);
1174 arm_float_abi
= TARGET_DEFAULT_FLOAT_ABI
;
1176 if (arm_float_abi
== ARM_FLOAT_ABI_HARD
&& TARGET_VFP
)
1177 sorry ("-mfloat-abi=hard and VFP");
1179 /* If soft-float is specified then don't use FPU. */
1180 if (TARGET_SOFT_FLOAT
)
1181 arm_fpu_arch
= FPUTYPE_NONE
;
1183 /* For arm2/3 there is no need to do any scheduling if there is only
1184 a floating point emulator, or we are doing software floating-point. */
1185 if ((TARGET_SOFT_FLOAT
1186 || arm_fpu_tune
== FPUTYPE_FPA_EMU2
1187 || arm_fpu_tune
== FPUTYPE_FPA_EMU3
)
1188 && (tune_flags
& FL_MODE32
) == 0)
1189 flag_schedule_insns
= flag_schedule_insns_after_reload
= 0;
1191 /* Override the default structure alignment for AAPCS ABI. */
1192 if (arm_abi
== ARM_ABI_AAPCS
)
1193 arm_structure_size_boundary
= 8;
1195 if (structure_size_string
!= NULL
)
1197 int size
= strtol (structure_size_string
, NULL
, 0);
1199 if (size
== 8 || size
== 32
1200 || (ARM_DOUBLEWORD_ALIGN
&& size
== 64))
1201 arm_structure_size_boundary
= size
;
1203 warning (0, "structure size boundary can only be set to %s",
1204 ARM_DOUBLEWORD_ALIGN
? "8, 32 or 64": "8 or 32");
1207 if (arm_pic_register_string
!= NULL
)
1209 int pic_register
= decode_reg_name (arm_pic_register_string
);
1212 warning (0, "-mpic-register= is useless without -fpic");
1214 /* Prevent the user from choosing an obviously stupid PIC register. */
1215 else if (pic_register
< 0 || call_used_regs
[pic_register
]
1216 || pic_register
== HARD_FRAME_POINTER_REGNUM
1217 || pic_register
== STACK_POINTER_REGNUM
1218 || pic_register
>= PC_REGNUM
)
1219 error ("unable to use '%s' for PIC register", arm_pic_register_string
);
1221 arm_pic_register
= pic_register
;
1224 if (TARGET_THUMB
&& flag_schedule_insns
)
1226 /* Don't warn since it's on by default in -O2. */
1227 flag_schedule_insns
= 0;
1232 /* There's some dispute as to whether this should be 1 or 2. However,
1233 experiments seem to show that in pathological cases a setting of
1234 1 degrades less severely than a setting of 2. This could change if
1235 other parts of the compiler change their behavior. */
1236 arm_constant_limit
= 1;
1238 /* If optimizing for size, bump the number of instructions that we
1239 are prepared to conditionally execute (even on a StrongARM). */
1240 max_insns_skipped
= 6;
1244 /* For processors with load scheduling, it never costs more than
1245 2 cycles to load a constant, and the load scheduler may well
1246 reduce that to 1. */
1248 arm_constant_limit
= 1;
1250 /* On XScale the longer latency of a load makes it more difficult
1251 to achieve a good schedule, so it's faster to synthesize
1252 constants that can be done in two insns. */
1253 if (arm_tune_xscale
)
1254 arm_constant_limit
= 2;
1256 /* StrongARM has early execution of branches, so a sequence
1257 that is worth skipping is shorter. */
1258 if (arm_tune_strongarm
)
1259 max_insns_skipped
= 3;
1262 /* Register global variables with the garbage collector. */
1263 arm_add_gc_roots ();
1267 arm_add_gc_roots (void)
1269 gcc_obstack_init(&minipool_obstack
);
1270 minipool_startobj
= (char *) obstack_alloc (&minipool_obstack
, 0);
1273 /* A table of known ARM exception types.
1274 For use with the interrupt function attribute. */
1278 const char *const arg
;
1279 const unsigned long return_value
;
1283 static const isr_attribute_arg isr_attribute_args
[] =
1285 { "IRQ", ARM_FT_ISR
},
1286 { "irq", ARM_FT_ISR
},
1287 { "FIQ", ARM_FT_FIQ
},
1288 { "fiq", ARM_FT_FIQ
},
1289 { "ABORT", ARM_FT_ISR
},
1290 { "abort", ARM_FT_ISR
},
1291 { "ABORT", ARM_FT_ISR
},
1292 { "abort", ARM_FT_ISR
},
1293 { "UNDEF", ARM_FT_EXCEPTION
},
1294 { "undef", ARM_FT_EXCEPTION
},
1295 { "SWI", ARM_FT_EXCEPTION
},
1296 { "swi", ARM_FT_EXCEPTION
},
1297 { NULL
, ARM_FT_NORMAL
}
1300 /* Returns the (interrupt) function type of the current
1301 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1303 static unsigned long
1304 arm_isr_value (tree argument
)
1306 const isr_attribute_arg
* ptr
;
1309 /* No argument - default to IRQ. */
1310 if (argument
== NULL_TREE
)
1313 /* Get the value of the argument. */
1314 if (TREE_VALUE (argument
) == NULL_TREE
1315 || TREE_CODE (TREE_VALUE (argument
)) != STRING_CST
)
1316 return ARM_FT_UNKNOWN
;
1318 arg
= TREE_STRING_POINTER (TREE_VALUE (argument
));
1320 /* Check it against the list of known arguments. */
1321 for (ptr
= isr_attribute_args
; ptr
->arg
!= NULL
; ptr
++)
1322 if (streq (arg
, ptr
->arg
))
1323 return ptr
->return_value
;
1325 /* An unrecognized interrupt type. */
1326 return ARM_FT_UNKNOWN
;
1329 /* Computes the type of the current function. */
1331 static unsigned long
1332 arm_compute_func_type (void)
1334 unsigned long type
= ARM_FT_UNKNOWN
;
1338 gcc_assert (TREE_CODE (current_function_decl
) == FUNCTION_DECL
);
1340 /* Decide if the current function is volatile. Such functions
1341 never return, and many memory cycles can be saved by not storing
1342 register values that will never be needed again. This optimization
1343 was added to speed up context switching in a kernel application. */
1345 && TREE_NOTHROW (current_function_decl
)
1346 && TREE_THIS_VOLATILE (current_function_decl
))
1347 type
|= ARM_FT_VOLATILE
;
1349 if (cfun
->static_chain_decl
!= NULL
)
1350 type
|= ARM_FT_NESTED
;
1352 attr
= DECL_ATTRIBUTES (current_function_decl
);
1354 a
= lookup_attribute ("naked", attr
);
1356 type
|= ARM_FT_NAKED
;
1358 a
= lookup_attribute ("isr", attr
);
1360 a
= lookup_attribute ("interrupt", attr
);
1363 type
|= TARGET_INTERWORK
? ARM_FT_INTERWORKED
: ARM_FT_NORMAL
;
1365 type
|= arm_isr_value (TREE_VALUE (a
));
1370 /* Returns the type of the current function. */
1373 arm_current_func_type (void)
1375 if (ARM_FUNC_TYPE (cfun
->machine
->func_type
) == ARM_FT_UNKNOWN
)
1376 cfun
->machine
->func_type
= arm_compute_func_type ();
1378 return cfun
->machine
->func_type
;
1381 /* Return 1 if it is possible to return using a single instruction.
1382 If SIBLING is non-null, this is a test for a return before a sibling
1383 call. SIBLING is the call insn, so we can examine its register usage. */
1386 use_return_insn (int iscond
, rtx sibling
)
1389 unsigned int func_type
;
1390 unsigned long saved_int_regs
;
1391 unsigned HOST_WIDE_INT stack_adjust
;
1392 arm_stack_offsets
*offsets
;
1394 /* Never use a return instruction before reload has run. */
1395 if (!reload_completed
)
1398 func_type
= arm_current_func_type ();
1400 /* Naked functions and volatile functions need special
1402 if (func_type
& (ARM_FT_VOLATILE
| ARM_FT_NAKED
))
1405 /* So do interrupt functions that use the frame pointer. */
1406 if (IS_INTERRUPT (func_type
) && frame_pointer_needed
)
1409 offsets
= arm_get_frame_offsets ();
1410 stack_adjust
= offsets
->outgoing_args
- offsets
->saved_regs
;
1412 /* As do variadic functions. */
1413 if (current_function_pretend_args_size
1414 || cfun
->machine
->uses_anonymous_args
1415 /* Or if the function calls __builtin_eh_return () */
1416 || current_function_calls_eh_return
1417 /* Or if the function calls alloca */
1418 || current_function_calls_alloca
1419 /* Or if there is a stack adjustment. However, if the stack pointer
1420 is saved on the stack, we can use a pre-incrementing stack load. */
1421 || !(stack_adjust
== 0 || (frame_pointer_needed
&& stack_adjust
== 4)))
1424 saved_int_regs
= arm_compute_save_reg_mask ();
1426 /* Unfortunately, the insn
1428 ldmib sp, {..., sp, ...}
1430 triggers a bug on most SA-110 based devices, such that the stack
1431 pointer won't be correctly restored if the instruction takes a
1432 page fault. We work around this problem by popping r3 along with
1433 the other registers, since that is never slower than executing
1434 another instruction.
1436 We test for !arm_arch5 here, because code for any architecture
1437 less than this could potentially be run on one of the buggy
1439 if (stack_adjust
== 4 && !arm_arch5
)
1441 /* Validate that r3 is a call-clobbered register (always true in
1442 the default abi) ... */
1443 if (!call_used_regs
[3])
1446 /* ... that it isn't being used for a return value ... */
1447 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD
))
1450 /* ... or for a tail-call argument ... */
1453 gcc_assert (GET_CODE (sibling
) == CALL_INSN
);
1455 if (find_regno_fusage (sibling
, USE
, 3))
1459 /* ... and that there are no call-saved registers in r0-r2
1460 (always true in the default ABI). */
1461 if (saved_int_regs
& 0x7)
1465 /* Can't be done if interworking with Thumb, and any registers have been
1467 if (TARGET_INTERWORK
&& saved_int_regs
!= 0)
1470 /* On StrongARM, conditional returns are expensive if they aren't
1471 taken and multiple registers have been stacked. */
1472 if (iscond
&& arm_tune_strongarm
)
1474 /* Conditional return when just the LR is stored is a simple
1475 conditional-load instruction, that's not expensive. */
1476 if (saved_int_regs
!= 0 && saved_int_regs
!= (1 << LR_REGNUM
))
1479 if (flag_pic
&& regs_ever_live
[PIC_OFFSET_TABLE_REGNUM
])
1483 /* If there are saved registers but the LR isn't saved, then we need
1484 two instructions for the return. */
1485 if (saved_int_regs
&& !(saved_int_regs
& (1 << LR_REGNUM
)))
1488 /* Can't be done if any of the FPA regs are pushed,
1489 since this also requires an insn. */
1490 if (TARGET_HARD_FLOAT
&& TARGET_FPA
)
1491 for (regno
= FIRST_FPA_REGNUM
; regno
<= LAST_FPA_REGNUM
; regno
++)
1492 if (regs_ever_live
[regno
] && !call_used_regs
[regno
])
1495 /* Likewise VFP regs. */
1496 if (TARGET_HARD_FLOAT
&& TARGET_VFP
)
1497 for (regno
= FIRST_VFP_REGNUM
; regno
<= LAST_VFP_REGNUM
; regno
++)
1498 if (regs_ever_live
[regno
] && !call_used_regs
[regno
])
1501 if (TARGET_REALLY_IWMMXT
)
1502 for (regno
= FIRST_IWMMXT_REGNUM
; regno
<= LAST_IWMMXT_REGNUM
; regno
++)
1503 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
1509 /* Return TRUE if int I is a valid immediate ARM constant. */
1512 const_ok_for_arm (HOST_WIDE_INT i
)
1516 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1517 be all zero, or all one. */
1518 if ((i
& ~(unsigned HOST_WIDE_INT
) 0xffffffff) != 0
1519 && ((i
& ~(unsigned HOST_WIDE_INT
) 0xffffffff)
1520 != ((~(unsigned HOST_WIDE_INT
) 0)
1521 & ~(unsigned HOST_WIDE_INT
) 0xffffffff)))
1524 i
&= (unsigned HOST_WIDE_INT
) 0xffffffff;
1526 /* Fast return for 0 and small values. We must do this for zero, since
1527 the code below can't handle that one case. */
1528 if ((i
& ~(unsigned HOST_WIDE_INT
) 0xff) == 0)
1531 /* Get the number of trailing zeros, rounded down to the nearest even
1533 lowbit
= (ffs ((int) i
) - 1) & ~1;
1535 if ((i
& ~(((unsigned HOST_WIDE_INT
) 0xff) << lowbit
)) == 0)
1537 else if (lowbit
<= 4
1538 && ((i
& ~0xc000003f) == 0
1539 || (i
& ~0xf000000f) == 0
1540 || (i
& ~0xfc000003) == 0))
1546 /* Return true if I is a valid constant for the operation CODE. */
1548 const_ok_for_op (HOST_WIDE_INT i
, enum rtx_code code
)
1550 if (const_ok_for_arm (i
))
1556 return const_ok_for_arm (ARM_SIGN_EXTEND (-i
));
1558 case MINUS
: /* Should only occur with (MINUS I reg) => rsb */
1564 return const_ok_for_arm (ARM_SIGN_EXTEND (~i
));
1571 /* Emit a sequence of insns to handle a large constant.
1572 CODE is the code of the operation required, it can be any of SET, PLUS,
1573 IOR, AND, XOR, MINUS;
1574 MODE is the mode in which the operation is being performed;
1575 VAL is the integer to operate on;
1576 SOURCE is the other operand (a register, or a null-pointer for SET);
1577 SUBTARGETS means it is safe to create scratch registers if that will
1578 either produce a simpler sequence, or we will want to cse the values.
1579 Return value is the number of insns emitted. */
1582 arm_split_constant (enum rtx_code code
, enum machine_mode mode
, rtx insn
,
1583 HOST_WIDE_INT val
, rtx target
, rtx source
, int subtargets
)
1587 if (insn
&& GET_CODE (PATTERN (insn
)) == COND_EXEC
)
1588 cond
= COND_EXEC_TEST (PATTERN (insn
));
1592 if (subtargets
|| code
== SET
1593 || (GET_CODE (target
) == REG
&& GET_CODE (source
) == REG
1594 && REGNO (target
) != REGNO (source
)))
1596 /* After arm_reorg has been called, we can't fix up expensive
1597 constants by pushing them into memory so we must synthesize
1598 them in-line, regardless of the cost. This is only likely to
1599 be more costly on chips that have load delay slots and we are
1600 compiling without running the scheduler (so no splitting
1601 occurred before the final instruction emission).
1603 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1605 if (!after_arm_reorg
1607 && (arm_gen_constant (code
, mode
, NULL_RTX
, val
, target
, source
,
1609 > arm_constant_limit
+ (code
!= SET
)))
1613 /* Currently SET is the only monadic value for CODE, all
1614 the rest are diadic. */
1615 emit_insn (gen_rtx_SET (VOIDmode
, target
, GEN_INT (val
)));
1620 rtx temp
= subtargets
? gen_reg_rtx (mode
) : target
;
1622 emit_insn (gen_rtx_SET (VOIDmode
, temp
, GEN_INT (val
)));
1623 /* For MINUS, the value is subtracted from, since we never
1624 have subtraction of a constant. */
1626 emit_insn (gen_rtx_SET (VOIDmode
, target
,
1627 gen_rtx_MINUS (mode
, temp
, source
)));
1629 emit_insn (gen_rtx_SET (VOIDmode
, target
,
1630 gen_rtx_fmt_ee (code
, mode
, source
, temp
)));
1636 return arm_gen_constant (code
, mode
, cond
, val
, target
, source
, subtargets
,
1641 count_insns_for_constant (HOST_WIDE_INT remainder
, int i
)
1643 HOST_WIDE_INT temp1
;
1651 if (remainder
& (3 << (i
- 2)))
1656 temp1
= remainder
& ((0x0ff << end
)
1657 | ((i
< end
) ? (0xff >> (32 - end
)) : 0));
1658 remainder
&= ~temp1
;
1663 } while (remainder
);
1667 /* Emit an instruction with the indicated PATTERN. If COND is
1668 non-NULL, conditionalize the execution of the instruction on COND
1672 emit_constant_insn (rtx cond
, rtx pattern
)
1675 pattern
= gen_rtx_COND_EXEC (VOIDmode
, copy_rtx (cond
), pattern
);
1676 emit_insn (pattern
);
1679 /* As above, but extra parameter GENERATE which, if clear, suppresses
1683 arm_gen_constant (enum rtx_code code
, enum machine_mode mode
, rtx cond
,
1684 HOST_WIDE_INT val
, rtx target
, rtx source
, int subtargets
,
1689 int can_negate_initial
= 0;
1692 int num_bits_set
= 0;
1693 int set_sign_bit_copies
= 0;
1694 int clear_sign_bit_copies
= 0;
1695 int clear_zero_bit_copies
= 0;
1696 int set_zero_bit_copies
= 0;
1698 unsigned HOST_WIDE_INT temp1
, temp2
;
1699 unsigned HOST_WIDE_INT remainder
= val
& 0xffffffff;
1701 /* Find out which operations are safe for a given CODE. Also do a quick
1702 check for degenerate cases; these can occur when DImode operations
1714 can_negate_initial
= 1;
1718 if (remainder
== 0xffffffff)
1721 emit_constant_insn (cond
,
1722 gen_rtx_SET (VOIDmode
, target
,
1723 GEN_INT (ARM_SIGN_EXTEND (val
))));
1728 if (reload_completed
&& rtx_equal_p (target
, source
))
1731 emit_constant_insn (cond
,
1732 gen_rtx_SET (VOIDmode
, target
, source
));
1741 emit_constant_insn (cond
,
1742 gen_rtx_SET (VOIDmode
, target
, const0_rtx
));
1745 if (remainder
== 0xffffffff)
1747 if (reload_completed
&& rtx_equal_p (target
, source
))
1750 emit_constant_insn (cond
,
1751 gen_rtx_SET (VOIDmode
, target
, source
));
1760 if (reload_completed
&& rtx_equal_p (target
, source
))
1763 emit_constant_insn (cond
,
1764 gen_rtx_SET (VOIDmode
, target
, source
));
1768 /* We don't know how to handle other cases yet. */
1769 gcc_assert (remainder
== 0xffffffff);
1772 emit_constant_insn (cond
,
1773 gen_rtx_SET (VOIDmode
, target
,
1774 gen_rtx_NOT (mode
, source
)));
1778 /* We treat MINUS as (val - source), since (source - val) is always
1779 passed as (source + (-val)). */
1783 emit_constant_insn (cond
,
1784 gen_rtx_SET (VOIDmode
, target
,
1785 gen_rtx_NEG (mode
, source
)));
1788 if (const_ok_for_arm (val
))
1791 emit_constant_insn (cond
,
1792 gen_rtx_SET (VOIDmode
, target
,
1793 gen_rtx_MINUS (mode
, GEN_INT (val
),
1805 /* If we can do it in one insn get out quickly. */
1806 if (const_ok_for_arm (val
)
1807 || (can_negate_initial
&& const_ok_for_arm (-val
))
1808 || (can_invert
&& const_ok_for_arm (~val
)))
1811 emit_constant_insn (cond
,
1812 gen_rtx_SET (VOIDmode
, target
,
1814 ? gen_rtx_fmt_ee (code
, mode
, source
,
1820 /* Calculate a few attributes that may be useful for specific
1822 for (i
= 31; i
>= 0; i
--)
1824 if ((remainder
& (1 << i
)) == 0)
1825 clear_sign_bit_copies
++;
1830 for (i
= 31; i
>= 0; i
--)
1832 if ((remainder
& (1 << i
)) != 0)
1833 set_sign_bit_copies
++;
1838 for (i
= 0; i
<= 31; i
++)
1840 if ((remainder
& (1 << i
)) == 0)
1841 clear_zero_bit_copies
++;
1846 for (i
= 0; i
<= 31; i
++)
1848 if ((remainder
& (1 << i
)) != 0)
1849 set_zero_bit_copies
++;
1857 /* See if we can do this by sign_extending a constant that is known
1858 to be negative. This is a good, way of doing it, since the shift
1859 may well merge into a subsequent insn. */
1860 if (set_sign_bit_copies
> 1)
1862 if (const_ok_for_arm
1863 (temp1
= ARM_SIGN_EXTEND (remainder
1864 << (set_sign_bit_copies
- 1))))
1868 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
1869 emit_constant_insn (cond
,
1870 gen_rtx_SET (VOIDmode
, new_src
,
1872 emit_constant_insn (cond
,
1873 gen_ashrsi3 (target
, new_src
,
1874 GEN_INT (set_sign_bit_copies
- 1)));
1878 /* For an inverted constant, we will need to set the low bits,
1879 these will be shifted out of harm's way. */
1880 temp1
|= (1 << (set_sign_bit_copies
- 1)) - 1;
1881 if (const_ok_for_arm (~temp1
))
1885 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
1886 emit_constant_insn (cond
,
1887 gen_rtx_SET (VOIDmode
, new_src
,
1889 emit_constant_insn (cond
,
1890 gen_ashrsi3 (target
, new_src
,
1891 GEN_INT (set_sign_bit_copies
- 1)));
1897 /* See if we can calculate the value as the difference between two
1898 valid immediates. */
1899 if (clear_sign_bit_copies
+ clear_zero_bit_copies
<= 16)
1901 int topshift
= clear_sign_bit_copies
& ~1;
1903 temp1
= ARM_SIGN_EXTEND ((remainder
+ (0x00800000 >> topshift
))
1904 & (0xff000000 >> topshift
));
1906 /* If temp1 is zero, then that means the 9 most significant
1907 bits of remainder were 1 and we've caused it to overflow.
1908 When topshift is 0 we don't need to do anything since we
1909 can borrow from 'bit 32'. */
1910 if (temp1
== 0 && topshift
!= 0)
1911 temp1
= 0x80000000 >> (topshift
- 1);
1913 temp2
= ARM_SIGN_EXTEND (temp1
- remainder
);
1915 if (const_ok_for_arm (temp2
))
1919 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
1920 emit_constant_insn (cond
,
1921 gen_rtx_SET (VOIDmode
, new_src
,
1923 emit_constant_insn (cond
,
1924 gen_addsi3 (target
, new_src
,
1932 /* See if we can generate this by setting the bottom (or the top)
1933 16 bits, and then shifting these into the other half of the
1934 word. We only look for the simplest cases, to do more would cost
1935 too much. Be careful, however, not to generate this when the
1936 alternative would take fewer insns. */
1937 if (val
& 0xffff0000)
1939 temp1
= remainder
& 0xffff0000;
1940 temp2
= remainder
& 0x0000ffff;
1942 /* Overlaps outside this range are best done using other methods. */
1943 for (i
= 9; i
< 24; i
++)
1945 if ((((temp2
| (temp2
<< i
)) & 0xffffffff) == remainder
)
1946 && !const_ok_for_arm (temp2
))
1948 rtx new_src
= (subtargets
1949 ? (generate
? gen_reg_rtx (mode
) : NULL_RTX
)
1951 insns
= arm_gen_constant (code
, mode
, cond
, temp2
, new_src
,
1952 source
, subtargets
, generate
);
1960 gen_rtx_ASHIFT (mode
, source
,
1967 /* Don't duplicate cases already considered. */
1968 for (i
= 17; i
< 24; i
++)
1970 if (((temp1
| (temp1
>> i
)) == remainder
)
1971 && !const_ok_for_arm (temp1
))
1973 rtx new_src
= (subtargets
1974 ? (generate
? gen_reg_rtx (mode
) : NULL_RTX
)
1976 insns
= arm_gen_constant (code
, mode
, cond
, temp1
, new_src
,
1977 source
, subtargets
, generate
);
1982 gen_rtx_SET (VOIDmode
, target
,
1985 gen_rtx_LSHIFTRT (mode
, source
,
1996 /* If we have IOR or XOR, and the constant can be loaded in a
1997 single instruction, and we can find a temporary to put it in,
1998 then this can be done in two instructions instead of 3-4. */
2000 /* TARGET can't be NULL if SUBTARGETS is 0 */
2001 || (reload_completed
&& !reg_mentioned_p (target
, source
)))
2003 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val
)))
2007 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
2009 emit_constant_insn (cond
,
2010 gen_rtx_SET (VOIDmode
, sub
,
2012 emit_constant_insn (cond
,
2013 gen_rtx_SET (VOIDmode
, target
,
2014 gen_rtx_fmt_ee (code
, mode
,
2024 if (set_sign_bit_copies
> 8
2025 && (val
& (-1 << (32 - set_sign_bit_copies
))) == val
)
2029 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
2030 rtx shift
= GEN_INT (set_sign_bit_copies
);
2034 gen_rtx_SET (VOIDmode
, sub
,
2036 gen_rtx_ASHIFT (mode
,
2041 gen_rtx_SET (VOIDmode
, target
,
2043 gen_rtx_LSHIFTRT (mode
, sub
,
2049 if (set_zero_bit_copies
> 8
2050 && (remainder
& ((1 << set_zero_bit_copies
) - 1)) == remainder
)
2054 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
2055 rtx shift
= GEN_INT (set_zero_bit_copies
);
2059 gen_rtx_SET (VOIDmode
, sub
,
2061 gen_rtx_LSHIFTRT (mode
,
2066 gen_rtx_SET (VOIDmode
, target
,
2068 gen_rtx_ASHIFT (mode
, sub
,
2074 if (const_ok_for_arm (temp1
= ARM_SIGN_EXTEND (~val
)))
2078 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
2079 emit_constant_insn (cond
,
2080 gen_rtx_SET (VOIDmode
, sub
,
2081 gen_rtx_NOT (mode
, source
)));
2084 sub
= gen_reg_rtx (mode
);
2085 emit_constant_insn (cond
,
2086 gen_rtx_SET (VOIDmode
, sub
,
2087 gen_rtx_AND (mode
, source
,
2089 emit_constant_insn (cond
,
2090 gen_rtx_SET (VOIDmode
, target
,
2091 gen_rtx_NOT (mode
, sub
)));
2098 /* See if two shifts will do 2 or more insn's worth of work. */
2099 if (clear_sign_bit_copies
>= 16 && clear_sign_bit_copies
< 24)
2101 HOST_WIDE_INT shift_mask
= ((0xffffffff
2102 << (32 - clear_sign_bit_copies
))
2105 if ((remainder
| shift_mask
) != 0xffffffff)
2109 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
2110 insns
= arm_gen_constant (AND
, mode
, cond
,
2111 remainder
| shift_mask
,
2112 new_src
, source
, subtargets
, 1);
2117 rtx targ
= subtargets
? NULL_RTX
: target
;
2118 insns
= arm_gen_constant (AND
, mode
, cond
,
2119 remainder
| shift_mask
,
2120 targ
, source
, subtargets
, 0);
2126 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
2127 rtx shift
= GEN_INT (clear_sign_bit_copies
);
2129 emit_insn (gen_ashlsi3 (new_src
, source
, shift
));
2130 emit_insn (gen_lshrsi3 (target
, new_src
, shift
));
2136 if (clear_zero_bit_copies
>= 16 && clear_zero_bit_copies
< 24)
2138 HOST_WIDE_INT shift_mask
= (1 << clear_zero_bit_copies
) - 1;
2140 if ((remainder
| shift_mask
) != 0xffffffff)
2144 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
2146 insns
= arm_gen_constant (AND
, mode
, cond
,
2147 remainder
| shift_mask
,
2148 new_src
, source
, subtargets
, 1);
2153 rtx targ
= subtargets
? NULL_RTX
: target
;
2155 insns
= arm_gen_constant (AND
, mode
, cond
,
2156 remainder
| shift_mask
,
2157 targ
, source
, subtargets
, 0);
2163 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
2164 rtx shift
= GEN_INT (clear_zero_bit_copies
);
2166 emit_insn (gen_lshrsi3 (new_src
, source
, shift
));
2167 emit_insn (gen_ashlsi3 (target
, new_src
, shift
));
2179 for (i
= 0; i
< 32; i
++)
2180 if (remainder
& (1 << i
))
2183 if (code
== AND
|| (can_invert
&& num_bits_set
> 16))
2184 remainder
= (~remainder
) & 0xffffffff;
2185 else if (code
== PLUS
&& num_bits_set
> 16)
2186 remainder
= (-remainder
) & 0xffffffff;
2193 /* Now try and find a way of doing the job in either two or three
2195 We start by looking for the largest block of zeros that are aligned on
2196 a 2-bit boundary, we then fill up the temps, wrapping around to the
2197 top of the word when we drop off the bottom.
2198 In the worst case this code should produce no more than four insns. */
2201 int best_consecutive_zeros
= 0;
2203 for (i
= 0; i
< 32; i
+= 2)
2205 int consecutive_zeros
= 0;
2207 if (!(remainder
& (3 << i
)))
2209 while ((i
< 32) && !(remainder
& (3 << i
)))
2211 consecutive_zeros
+= 2;
2214 if (consecutive_zeros
> best_consecutive_zeros
)
2216 best_consecutive_zeros
= consecutive_zeros
;
2217 best_start
= i
- consecutive_zeros
;
2223 /* So long as it won't require any more insns to do so, it's
2224 desirable to emit a small constant (in bits 0...9) in the last
2225 insn. This way there is more chance that it can be combined with
2226 a later addressing insn to form a pre-indexed load or store
2227 operation. Consider:
2229 *((volatile int *)0xe0000100) = 1;
2230 *((volatile int *)0xe0000110) = 2;
2232 We want this to wind up as:
2236 str rB, [rA, #0x100]
2238 str rB, [rA, #0x110]
2240 rather than having to synthesize both large constants from scratch.
2242 Therefore, we calculate how many insns would be required to emit
2243 the constant starting from `best_start', and also starting from
2244 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2245 yield a shorter sequence, we may as well use zero. */
2247 && ((((unsigned HOST_WIDE_INT
) 1) << best_start
) < remainder
)
2248 && (count_insns_for_constant (remainder
, 0) <=
2249 count_insns_for_constant (remainder
, best_start
)))
2252 /* Now start emitting the insns. */
2260 if (remainder
& (3 << (i
- 2)))
2265 temp1
= remainder
& ((0x0ff << end
)
2266 | ((i
< end
) ? (0xff >> (32 - end
)) : 0));
2267 remainder
&= ~temp1
;
2271 rtx new_src
, temp1_rtx
;
2273 if (code
== SET
|| code
== MINUS
)
2275 new_src
= (subtargets
? gen_reg_rtx (mode
) : target
);
2276 if (can_invert
&& code
!= MINUS
)
2281 if (remainder
&& subtargets
)
2282 new_src
= gen_reg_rtx (mode
);
2287 else if (can_negate
)
2291 temp1
= trunc_int_for_mode (temp1
, mode
);
2292 temp1_rtx
= GEN_INT (temp1
);
2296 else if (code
== MINUS
)
2297 temp1_rtx
= gen_rtx_MINUS (mode
, temp1_rtx
, source
);
2299 temp1_rtx
= gen_rtx_fmt_ee (code
, mode
, source
, temp1_rtx
);
2301 emit_constant_insn (cond
,
2302 gen_rtx_SET (VOIDmode
, new_src
,
2312 else if (code
== MINUS
)
2326 /* Canonicalize a comparison so that we are more likely to recognize it.
2327 This can be done for a few constant compares, where we can make the
2328 immediate value easier to load. */
2331 arm_canonicalize_comparison (enum rtx_code code
, rtx
* op1
)
2333 unsigned HOST_WIDE_INT i
= INTVAL (*op1
);
2343 if (i
!= ((((unsigned HOST_WIDE_INT
) 1) << (HOST_BITS_PER_WIDE_INT
- 1)) - 1)
2344 && (const_ok_for_arm (i
+ 1) || const_ok_for_arm (-(i
+ 1))))
2346 *op1
= GEN_INT (i
+ 1);
2347 return code
== GT
? GE
: LT
;
2353 if (i
!= (((unsigned HOST_WIDE_INT
) 1) << (HOST_BITS_PER_WIDE_INT
- 1))
2354 && (const_ok_for_arm (i
- 1) || const_ok_for_arm (-(i
- 1))))
2356 *op1
= GEN_INT (i
- 1);
2357 return code
== GE
? GT
: LE
;
2363 if (i
!= ~((unsigned HOST_WIDE_INT
) 0)
2364 && (const_ok_for_arm (i
+ 1) || const_ok_for_arm (-(i
+ 1))))
2366 *op1
= GEN_INT (i
+ 1);
2367 return code
== GTU
? GEU
: LTU
;
2374 && (const_ok_for_arm (i
- 1) || const_ok_for_arm (-(i
- 1))))
2376 *op1
= GEN_INT (i
- 1);
2377 return code
== GEU
? GTU
: LEU
;
2389 /* Define how to find the value returned by a function. */
2392 arm_function_value(tree type
, tree func ATTRIBUTE_UNUSED
)
2394 enum machine_mode mode
;
2395 int unsignedp ATTRIBUTE_UNUSED
;
2396 rtx r ATTRIBUTE_UNUSED
;
2398 mode
= TYPE_MODE (type
);
2399 /* Promote integer types. */
2400 if (INTEGRAL_TYPE_P (type
))
2401 PROMOTE_FUNCTION_MODE (mode
, unsignedp
, type
);
2403 /* Promotes small structs returned in a register to full-word size
2404 for big-endian AAPCS. */
2405 if (arm_return_in_msb (type
))
2407 HOST_WIDE_INT size
= int_size_in_bytes (type
);
2408 if (size
% UNITS_PER_WORD
!= 0)
2410 size
+= UNITS_PER_WORD
- size
% UNITS_PER_WORD
;
2411 mode
= mode_for_size (size
* BITS_PER_UNIT
, MODE_INT
, 0);
2415 return LIBCALL_VALUE(mode
);
2418 /* Determine the amount of memory needed to store the possible return
2419 registers of an untyped call. */
2421 arm_apply_result_size (void)
2427 if (TARGET_HARD_FLOAT_ABI
)
2431 if (TARGET_MAVERICK
)
2434 if (TARGET_IWMMXT_ABI
)
2441 /* Decide whether a type should be returned in memory (true)
2442 or in a register (false). This is called by the macro
2443 RETURN_IN_MEMORY. */
2445 arm_return_in_memory (tree type
)
2449 if (!AGGREGATE_TYPE_P (type
) &&
2450 (TREE_CODE (type
) != VECTOR_TYPE
) &&
2451 !(TARGET_AAPCS_BASED
&& TREE_CODE (type
) == COMPLEX_TYPE
))
2452 /* All simple types are returned in registers.
2453 For AAPCS, complex types are treated the same as aggregates. */
2456 size
= int_size_in_bytes (type
);
2458 if (arm_abi
!= ARM_ABI_APCS
)
2460 /* ATPCS and later return aggregate types in memory only if they are
2461 larger than a word (or are variable size). */
2462 return (size
< 0 || size
> UNITS_PER_WORD
);
2465 /* To maximize backwards compatibility with previous versions of gcc,
2466 return vectors up to 4 words in registers. */
2467 if (TREE_CODE (type
) == VECTOR_TYPE
)
2468 return (size
< 0 || size
> (4 * UNITS_PER_WORD
));
2470 /* For the arm-wince targets we choose to be compatible with Microsoft's
2471 ARM and Thumb compilers, which always return aggregates in memory. */
2473 /* All structures/unions bigger than one word are returned in memory.
2474 Also catch the case where int_size_in_bytes returns -1. In this case
2475 the aggregate is either huge or of variable size, and in either case
2476 we will want to return it via memory and not in a register. */
2477 if (size
< 0 || size
> UNITS_PER_WORD
)
2480 if (TREE_CODE (type
) == RECORD_TYPE
)
2484 /* For a struct the APCS says that we only return in a register
2485 if the type is 'integer like' and every addressable element
2486 has an offset of zero. For practical purposes this means
2487 that the structure can have at most one non bit-field element
2488 and that this element must be the first one in the structure. */
2490 /* Find the first field, ignoring non FIELD_DECL things which will
2491 have been created by C++. */
2492 for (field
= TYPE_FIELDS (type
);
2493 field
&& TREE_CODE (field
) != FIELD_DECL
;
2494 field
= TREE_CHAIN (field
))
2498 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2500 /* Check that the first field is valid for returning in a register. */
2502 /* ... Floats are not allowed */
2503 if (FLOAT_TYPE_P (TREE_TYPE (field
)))
2506 /* ... Aggregates that are not themselves valid for returning in
2507 a register are not allowed. */
2508 if (RETURN_IN_MEMORY (TREE_TYPE (field
)))
2511 /* Now check the remaining fields, if any. Only bitfields are allowed,
2512 since they are not addressable. */
2513 for (field
= TREE_CHAIN (field
);
2515 field
= TREE_CHAIN (field
))
2517 if (TREE_CODE (field
) != FIELD_DECL
)
2520 if (!DECL_BIT_FIELD_TYPE (field
))
2527 if (TREE_CODE (type
) == UNION_TYPE
)
2531 /* Unions can be returned in registers if every element is
2532 integral, or can be returned in an integer register. */
2533 for (field
= TYPE_FIELDS (type
);
2535 field
= TREE_CHAIN (field
))
2537 if (TREE_CODE (field
) != FIELD_DECL
)
2540 if (FLOAT_TYPE_P (TREE_TYPE (field
)))
2543 if (RETURN_IN_MEMORY (TREE_TYPE (field
)))
2549 #endif /* not ARM_WINCE */
2551 /* Return all other types in memory. */
2555 /* Indicate whether or not words of a double are in big-endian order. */
2558 arm_float_words_big_endian (void)
2560 if (TARGET_MAVERICK
)
2563 /* For FPA, float words are always big-endian. For VFP, floats words
2564 follow the memory system mode. */
2572 return (TARGET_BIG_END
? 1 : 0);
2577 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2578 for a call to a function whose data type is FNTYPE.
2579 For a library call, FNTYPE is NULL. */
2581 arm_init_cumulative_args (CUMULATIVE_ARGS
*pcum
, tree fntype
,
2582 rtx libname ATTRIBUTE_UNUSED
,
2583 tree fndecl ATTRIBUTE_UNUSED
)
2585 /* On the ARM, the offset starts at 0. */
2586 pcum
->nregs
= ((fntype
&& aggregate_value_p (TREE_TYPE (fntype
), fntype
)) ? 1 : 0);
2587 pcum
->iwmmxt_nregs
= 0;
2588 pcum
->can_split
= true;
2590 pcum
->call_cookie
= CALL_NORMAL
;
2592 if (TARGET_LONG_CALLS
)
2593 pcum
->call_cookie
= CALL_LONG
;
2595 /* Check for long call/short call attributes. The attributes
2596 override any command line option. */
2599 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype
)))
2600 pcum
->call_cookie
= CALL_SHORT
;
2601 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype
)))
2602 pcum
->call_cookie
= CALL_LONG
;
2605 /* Varargs vectors are treated the same as long long.
2606 named_count avoids having to change the way arm handles 'named' */
2607 pcum
->named_count
= 0;
2610 if (TARGET_REALLY_IWMMXT
&& fntype
)
2614 for (fn_arg
= TYPE_ARG_TYPES (fntype
);
2616 fn_arg
= TREE_CHAIN (fn_arg
))
2617 pcum
->named_count
+= 1;
2619 if (! pcum
->named_count
)
2620 pcum
->named_count
= INT_MAX
;
2625 /* Return true if mode/type need doubleword alignment. */
2627 arm_needs_doubleword_align (enum machine_mode mode
, tree type
)
2629 return (GET_MODE_ALIGNMENT (mode
) > PARM_BOUNDARY
2630 || (type
&& TYPE_ALIGN (type
) > PARM_BOUNDARY
));
2634 /* Determine where to put an argument to a function.
2635 Value is zero to push the argument on the stack,
2636 or a hard register in which to store the argument.
2638 MODE is the argument's machine mode.
2639 TYPE is the data type of the argument (as a tree).
2640 This is null for libcalls where that information may
2642 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2643 the preceding args and about the function being called.
2644 NAMED is nonzero if this argument is a named parameter
2645 (otherwise it is an extra parameter matching an ellipsis). */
2648 arm_function_arg (CUMULATIVE_ARGS
*pcum
, enum machine_mode mode
,
2649 tree type
, int named
)
2653 /* Varargs vectors are treated the same as long long.
2654 named_count avoids having to change the way arm handles 'named' */
2655 if (TARGET_IWMMXT_ABI
2656 && arm_vector_mode_supported_p (mode
)
2657 && pcum
->named_count
> pcum
->nargs
+ 1)
2659 if (pcum
->iwmmxt_nregs
<= 9)
2660 return gen_rtx_REG (mode
, pcum
->iwmmxt_nregs
+ FIRST_IWMMXT_REGNUM
);
2663 pcum
->can_split
= false;
2668 /* Put doubleword aligned quantities in even register pairs. */
2670 && ARM_DOUBLEWORD_ALIGN
2671 && arm_needs_doubleword_align (mode
, type
))
2674 if (mode
== VOIDmode
)
2675 /* Compute operand 2 of the call insn. */
2676 return GEN_INT (pcum
->call_cookie
);
2678 /* Only allow splitting an arg between regs and memory if all preceding
2679 args were allocated to regs. For args passed by reference we only count
2680 the reference pointer. */
2681 if (pcum
->can_split
)
2684 nregs
= ARM_NUM_REGS2 (mode
, type
);
2686 if (!named
|| pcum
->nregs
+ nregs
> NUM_ARG_REGS
)
2689 return gen_rtx_REG (mode
, pcum
->nregs
);
2693 arm_arg_partial_bytes (CUMULATIVE_ARGS
*pcum
, enum machine_mode mode
,
2694 tree type
, bool named ATTRIBUTE_UNUSED
)
2696 int nregs
= pcum
->nregs
;
2698 if (arm_vector_mode_supported_p (mode
))
2701 if (NUM_ARG_REGS
> nregs
2702 && (NUM_ARG_REGS
< nregs
+ ARM_NUM_REGS2 (mode
, type
))
2704 return (NUM_ARG_REGS
- nregs
) * UNITS_PER_WORD
;
2709 /* Variable sized types are passed by reference. This is a GCC
2710 extension to the ARM ABI. */
2713 arm_pass_by_reference (CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
,
2714 enum machine_mode mode ATTRIBUTE_UNUSED
,
2715 tree type
, bool named ATTRIBUTE_UNUSED
)
2717 return type
&& TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
;
2720 /* Encode the current state of the #pragma [no_]long_calls. */
2723 OFF
, /* No #pramgma [no_]long_calls is in effect. */
2724 LONG
, /* #pragma long_calls is in effect. */
2725 SHORT
/* #pragma no_long_calls is in effect. */
2728 static arm_pragma_enum arm_pragma_long_calls
= OFF
;
2731 arm_pr_long_calls (struct cpp_reader
* pfile ATTRIBUTE_UNUSED
)
2733 arm_pragma_long_calls
= LONG
;
2737 arm_pr_no_long_calls (struct cpp_reader
* pfile ATTRIBUTE_UNUSED
)
2739 arm_pragma_long_calls
= SHORT
;
2743 arm_pr_long_calls_off (struct cpp_reader
* pfile ATTRIBUTE_UNUSED
)
2745 arm_pragma_long_calls
= OFF
;
2748 /* Table of machine attributes. */
2749 const struct attribute_spec arm_attribute_table
[] =
2751 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2752 /* Function calls made to this symbol must be done indirectly, because
2753 it may lie outside of the 26 bit addressing range of a normal function
2755 { "long_call", 0, 0, false, true, true, NULL
},
2756 /* Whereas these functions are always known to reside within the 26 bit
2757 addressing range. */
2758 { "short_call", 0, 0, false, true, true, NULL
},
2759 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2760 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute
},
2761 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute
},
2762 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute
},
2764 /* ARM/PE has three new attributes:
2766 dllexport - for exporting a function/variable that will live in a dll
2767 dllimport - for importing a function/variable from a dll
2769 Microsoft allows multiple declspecs in one __declspec, separating
2770 them with spaces. We do NOT support this. Instead, use __declspec
2773 { "dllimport", 0, 0, true, false, false, NULL
},
2774 { "dllexport", 0, 0, true, false, false, NULL
},
2775 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute
},
2776 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2777 { "dllimport", 0, 0, false, false, false, handle_dll_attribute
},
2778 { "dllexport", 0, 0, false, false, false, handle_dll_attribute
},
2779 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute
},
2781 { NULL
, 0, 0, false, false, false, NULL
}
2784 /* Handle an attribute requiring a FUNCTION_DECL;
2785 arguments as in struct attribute_spec.handler. */
2787 arm_handle_fndecl_attribute (tree
*node
, tree name
, tree args ATTRIBUTE_UNUSED
,
2788 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
2790 if (TREE_CODE (*node
) != FUNCTION_DECL
)
2792 warning (OPT_Wattributes
, "%qs attribute only applies to functions",
2793 IDENTIFIER_POINTER (name
));
2794 *no_add_attrs
= true;
2800 /* Handle an "interrupt" or "isr" attribute;
2801 arguments as in struct attribute_spec.handler. */
2803 arm_handle_isr_attribute (tree
*node
, tree name
, tree args
, int flags
,
2808 if (TREE_CODE (*node
) != FUNCTION_DECL
)
2810 warning (OPT_Wattributes
, "%qs attribute only applies to functions",
2811 IDENTIFIER_POINTER (name
));
2812 *no_add_attrs
= true;
2814 /* FIXME: the argument if any is checked for type attributes;
2815 should it be checked for decl ones? */
2819 if (TREE_CODE (*node
) == FUNCTION_TYPE
2820 || TREE_CODE (*node
) == METHOD_TYPE
)
2822 if (arm_isr_value (args
) == ARM_FT_UNKNOWN
)
2824 warning (OPT_Wattributes
, "%qs attribute ignored",
2825 IDENTIFIER_POINTER (name
));
2826 *no_add_attrs
= true;
2829 else if (TREE_CODE (*node
) == POINTER_TYPE
2830 && (TREE_CODE (TREE_TYPE (*node
)) == FUNCTION_TYPE
2831 || TREE_CODE (TREE_TYPE (*node
)) == METHOD_TYPE
)
2832 && arm_isr_value (args
) != ARM_FT_UNKNOWN
)
2834 *node
= build_variant_type_copy (*node
);
2835 TREE_TYPE (*node
) = build_type_attribute_variant
2837 tree_cons (name
, args
, TYPE_ATTRIBUTES (TREE_TYPE (*node
))));
2838 *no_add_attrs
= true;
2842 /* Possibly pass this attribute on from the type to a decl. */
2843 if (flags
& ((int) ATTR_FLAG_DECL_NEXT
2844 | (int) ATTR_FLAG_FUNCTION_NEXT
2845 | (int) ATTR_FLAG_ARRAY_NEXT
))
2847 *no_add_attrs
= true;
2848 return tree_cons (name
, args
, NULL_TREE
);
2852 warning (OPT_Wattributes
, "%qs attribute ignored",
2853 IDENTIFIER_POINTER (name
));
2861 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2862 /* Handle the "notshared" attribute. This attribute is another way of
2863 requesting hidden visibility. ARM's compiler supports
2864 "__declspec(notshared)"; we support the same thing via an
2868 arm_handle_notshared_attribute (tree
*node
,
2869 tree name ATTRIBUTE_UNUSED
,
2870 tree args ATTRIBUTE_UNUSED
,
2871 int flags ATTRIBUTE_UNUSED
,
2874 tree decl
= TYPE_NAME (*node
);
2878 DECL_VISIBILITY (decl
) = VISIBILITY_HIDDEN
;
2879 DECL_VISIBILITY_SPECIFIED (decl
) = 1;
2880 *no_add_attrs
= false;
2886 /* Return 0 if the attributes for two types are incompatible, 1 if they
2887 are compatible, and 2 if they are nearly compatible (which causes a
2888 warning to be generated). */
2890 arm_comp_type_attributes (tree type1
, tree type2
)
2894 /* Check for mismatch of non-default calling convention. */
2895 if (TREE_CODE (type1
) != FUNCTION_TYPE
)
2898 /* Check for mismatched call attributes. */
2899 l1
= lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1
)) != NULL
;
2900 l2
= lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2
)) != NULL
;
2901 s1
= lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1
)) != NULL
;
2902 s2
= lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2
)) != NULL
;
2904 /* Only bother to check if an attribute is defined. */
2905 if (l1
| l2
| s1
| s2
)
2907 /* If one type has an attribute, the other must have the same attribute. */
2908 if ((l1
!= l2
) || (s1
!= s2
))
2911 /* Disallow mixed attributes. */
2912 if ((l1
& s2
) || (l2
& s1
))
2916 /* Check for mismatched ISR attribute. */
2917 l1
= lookup_attribute ("isr", TYPE_ATTRIBUTES (type1
)) != NULL
;
2919 l1
= lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1
)) != NULL
;
2920 l2
= lookup_attribute ("isr", TYPE_ATTRIBUTES (type2
)) != NULL
;
2922 l1
= lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2
)) != NULL
;
2929 /* Encode long_call or short_call attribute by prefixing
2930 symbol name in DECL with a special character FLAG. */
2932 arm_encode_call_attribute (tree decl
, int flag
)
2934 const char * str
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
2935 int len
= strlen (str
);
2938 /* Do not allow weak functions to be treated as short call. */
2939 if (DECL_WEAK (decl
) && flag
== SHORT_CALL_FLAG_CHAR
)
2942 newstr
= alloca (len
+ 2);
2944 strcpy (newstr
+ 1, str
);
2946 newstr
= (char *) ggc_alloc_string (newstr
, len
+ 1);
2947 XSTR (XEXP (DECL_RTL (decl
), 0), 0) = newstr
;
2950 /* Assigns default attributes to newly defined type. This is used to
2951 set short_call/long_call attributes for function types of
2952 functions defined inside corresponding #pragma scopes. */
2954 arm_set_default_type_attributes (tree type
)
2956 /* Add __attribute__ ((long_call)) to all functions, when
2957 inside #pragma long_calls or __attribute__ ((short_call)),
2958 when inside #pragma no_long_calls. */
2959 if (TREE_CODE (type
) == FUNCTION_TYPE
|| TREE_CODE (type
) == METHOD_TYPE
)
2961 tree type_attr_list
, attr_name
;
2962 type_attr_list
= TYPE_ATTRIBUTES (type
);
2964 if (arm_pragma_long_calls
== LONG
)
2965 attr_name
= get_identifier ("long_call");
2966 else if (arm_pragma_long_calls
== SHORT
)
2967 attr_name
= get_identifier ("short_call");
2971 type_attr_list
= tree_cons (attr_name
, NULL_TREE
, type_attr_list
);
2972 TYPE_ATTRIBUTES (type
) = type_attr_list
;
2976 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
2977 defined within the current compilation unit. If this cannot be
2978 determined, then 0 is returned. */
2980 current_file_function_operand (rtx sym_ref
)
2982 /* This is a bit of a fib. A function will have a short call flag
2983 applied to its name if it has the short call attribute, or it has
2984 already been defined within the current compilation unit. */
2985 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref
, 0)))
2988 /* The current function is always defined within the current compilation
2989 unit. If it s a weak definition however, then this may not be the real
2990 definition of the function, and so we have to say no. */
2991 if (sym_ref
== XEXP (DECL_RTL (current_function_decl
), 0)
2992 && !DECL_WEAK (current_function_decl
))
2995 /* We cannot make the determination - default to returning 0. */
2999 /* Return nonzero if a 32 bit "long_call" should be generated for
3000 this call. We generate a long_call if the function:
3002 a. has an __attribute__((long call))
3003 or b. is within the scope of a #pragma long_calls
3004 or c. the -mlong-calls command line switch has been specified
3006 1. -ffunction-sections is in effect
3007 or 2. the current function has __attribute__ ((section))
3008 or 3. the target function has __attribute__ ((section))
3010 However we do not generate a long call if the function:
3012 d. has an __attribute__ ((short_call))
3013 or e. is inside the scope of a #pragma no_long_calls
3014 or f. is defined within the current compilation unit.
3016 This function will be called by C fragments contained in the machine
3017 description file. SYM_REF and CALL_COOKIE correspond to the matched
3018 rtl operands. CALL_SYMBOL is used to distinguish between
3019 two different callers of the function. It is set to 1 in the
3020 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
3021 and "call_value" patterns. This is because of the difference in the
3022 SYM_REFs passed by these patterns. */
3024 arm_is_longcall_p (rtx sym_ref
, int call_cookie
, int call_symbol
)
3028 if (GET_CODE (sym_ref
) != MEM
)
3031 sym_ref
= XEXP (sym_ref
, 0);
3034 if (GET_CODE (sym_ref
) != SYMBOL_REF
)
3037 if (call_cookie
& CALL_SHORT
)
3040 if (TARGET_LONG_CALLS
)
3042 if (flag_function_sections
3043 || DECL_SECTION_NAME (current_function_decl
))
3044 /* c.3 is handled by the definition of the
3045 ARM_DECLARE_FUNCTION_SIZE macro. */
3049 if (current_file_function_operand (sym_ref
))
3052 return (call_cookie
& CALL_LONG
)
3053 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref
, 0))
3054 || TARGET_LONG_CALLS
;
3057 /* Return nonzero if it is ok to make a tail-call to DECL. */
3059 arm_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
3061 int call_type
= TARGET_LONG_CALLS
? CALL_LONG
: CALL_NORMAL
;
3063 if (cfun
->machine
->sibcall_blocked
)
3066 /* Never tailcall something for which we have no decl, or if we
3067 are in Thumb mode. */
3068 if (decl
== NULL
|| TARGET_THUMB
)
3071 /* Get the calling method. */
3072 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl
))))
3073 call_type
= CALL_SHORT
;
3074 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl
))))
3075 call_type
= CALL_LONG
;
3077 /* Cannot tail-call to long calls, since these are out of range of
3078 a branch instruction. However, if not compiling PIC, we know
3079 we can reach the symbol if it is in this compilation unit. */
3080 if (call_type
== CALL_LONG
&& (flag_pic
|| !TREE_ASM_WRITTEN (decl
)))
3083 /* If we are interworking and the function is not declared static
3084 then we can't tail-call it unless we know that it exists in this
3085 compilation unit (since it might be a Thumb routine). */
3086 if (TARGET_INTERWORK
&& TREE_PUBLIC (decl
) && !TREE_ASM_WRITTEN (decl
))
3089 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3090 if (IS_INTERRUPT (arm_current_func_type ()))
3093 /* Everything else is ok. */
3098 /* Addressing mode support functions. */
3100 /* Return nonzero if X is a legitimate immediate operand when compiling
3103 legitimate_pic_operand_p (rtx x
)
3107 && (GET_CODE (x
) == SYMBOL_REF
3108 || (GET_CODE (x
) == CONST
3109 && GET_CODE (XEXP (x
, 0)) == PLUS
3110 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
)))
3117 legitimize_pic_address (rtx orig
, enum machine_mode mode
, rtx reg
)
3119 if (GET_CODE (orig
) == SYMBOL_REF
3120 || GET_CODE (orig
) == LABEL_REF
)
3122 #ifndef AOF_ASSEMBLER
3123 rtx pic_ref
, address
;
3130 gcc_assert (!no_new_pseudos
);
3131 reg
= gen_reg_rtx (Pmode
);
3136 #ifdef AOF_ASSEMBLER
3137 /* The AOF assembler can generate relocations for these directly, and
3138 understands that the PIC register has to be added into the offset. */
3139 insn
= emit_insn (gen_pic_load_addr_based (reg
, orig
));
3142 address
= gen_reg_rtx (Pmode
);
3147 emit_insn (gen_pic_load_addr_arm (address
, orig
));
3149 emit_insn (gen_pic_load_addr_thumb (address
, orig
));
3151 if ((GET_CODE (orig
) == LABEL_REF
3152 || (GET_CODE (orig
) == SYMBOL_REF
&&
3153 SYMBOL_REF_LOCAL_P (orig
)))
3155 pic_ref
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, address
);
3158 pic_ref
= gen_const_mem (Pmode
,
3159 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
3163 insn
= emit_move_insn (reg
, pic_ref
);
3165 current_function_uses_pic_offset_table
= 1;
3166 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3168 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EQUAL
, orig
,
3172 else if (GET_CODE (orig
) == CONST
)
3176 if (GET_CODE (XEXP (orig
, 0)) == PLUS
3177 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
3182 gcc_assert (!no_new_pseudos
);
3183 reg
= gen_reg_rtx (Pmode
);
3186 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
3188 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
3189 offset
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
3190 base
== reg
? 0 : reg
);
3192 if (GET_CODE (offset
) == CONST_INT
)
3194 /* The base register doesn't really matter, we only want to
3195 test the index for the appropriate mode. */
3196 if (!arm_legitimate_index_p (mode
, offset
, SET
, 0))
3198 gcc_assert (!no_new_pseudos
);
3199 offset
= force_reg (Pmode
, offset
);
3202 if (GET_CODE (offset
) == CONST_INT
)
3203 return plus_constant (base
, INTVAL (offset
));
3206 if (GET_MODE_SIZE (mode
) > 4
3207 && (GET_MODE_CLASS (mode
) == MODE_INT
3208 || TARGET_SOFT_FLOAT
))
3210 emit_insn (gen_addsi3 (reg
, base
, offset
));
3214 return gen_rtx_PLUS (Pmode
, base
, offset
);
3221 /* Find a spare low register to use during the prolog of a function. */
3224 thumb_find_work_register (unsigned long pushed_regs_mask
)
3228 /* Check the argument registers first as these are call-used. The
3229 register allocation order means that sometimes r3 might be used
3230 but earlier argument registers might not, so check them all. */
3231 for (reg
= LAST_ARG_REGNUM
; reg
>= 0; reg
--)
3232 if (!regs_ever_live
[reg
])
3235 /* Before going on to check the call-saved registers we can try a couple
3236 more ways of deducing that r3 is available. The first is when we are
3237 pushing anonymous arguments onto the stack and we have less than 4
3238 registers worth of fixed arguments(*). In this case r3 will be part of
3239 the variable argument list and so we can be sure that it will be
3240 pushed right at the start of the function. Hence it will be available
3241 for the rest of the prologue.
3242 (*): ie current_function_pretend_args_size is greater than 0. */
3243 if (cfun
->machine
->uses_anonymous_args
3244 && current_function_pretend_args_size
> 0)
3245 return LAST_ARG_REGNUM
;
3247 /* The other case is when we have fixed arguments but less than 4 registers
3248 worth. In this case r3 might be used in the body of the function, but
3249 it is not being used to convey an argument into the function. In theory
3250 we could just check current_function_args_size to see how many bytes are
3251 being passed in argument registers, but it seems that it is unreliable.
3252 Sometimes it will have the value 0 when in fact arguments are being
3253 passed. (See testcase execute/20021111-1.c for an example). So we also
3254 check the args_info.nregs field as well. The problem with this field is
3255 that it makes no allowances for arguments that are passed to the
3256 function but which are not used. Hence we could miss an opportunity
3257 when a function has an unused argument in r3. But it is better to be
3258 safe than to be sorry. */
3259 if (! cfun
->machine
->uses_anonymous_args
3260 && current_function_args_size
>= 0
3261 && current_function_args_size
<= (LAST_ARG_REGNUM
* UNITS_PER_WORD
)
3262 && cfun
->args_info
.nregs
< 4)
3263 return LAST_ARG_REGNUM
;
3265 /* Otherwise look for a call-saved register that is going to be pushed. */
3266 for (reg
= LAST_LO_REGNUM
; reg
> LAST_ARG_REGNUM
; reg
--)
3267 if (pushed_regs_mask
& (1 << reg
))
3270 /* Something went wrong - thumb_compute_save_reg_mask()
3271 should have arranged for a suitable register to be pushed. */
3276 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3280 arm_load_pic_register (unsigned int scratch
)
3282 #ifndef AOF_ASSEMBLER
3283 rtx l1
, pic_tmp
, pic_tmp2
, pic_rtx
;
3284 rtx global_offset_table
;
3286 if (current_function_uses_pic_offset_table
== 0 || TARGET_SINGLE_PIC_BASE
)
3289 gcc_assert (flag_pic
);
3291 l1
= gen_label_rtx ();
3293 global_offset_table
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
3294 /* On the ARM the PC register contains 'dot + 8' at the time of the
3295 addition, on the Thumb it is 'dot + 4'. */
3296 pic_tmp
= plus_constant (gen_rtx_LABEL_REF (Pmode
, l1
), TARGET_ARM
? 8 : 4);
3298 pic_tmp2
= gen_rtx_CONST (VOIDmode
,
3299 gen_rtx_PLUS (Pmode
, global_offset_table
, pc_rtx
));
3301 pic_tmp2
= gen_rtx_CONST (VOIDmode
, global_offset_table
);
3303 pic_rtx
= gen_rtx_CONST (Pmode
, gen_rtx_MINUS (Pmode
, pic_tmp2
, pic_tmp
));
3307 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx
, pic_rtx
));
3308 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx
, l1
));
3312 if (REGNO (pic_offset_table_rtx
) > LAST_LO_REGNUM
)
3314 /* We will have pushed the pic register, so should always be
3315 able to find a work register. */
3316 pic_tmp
= gen_rtx_REG (SImode
, scratch
);
3317 emit_insn (gen_pic_load_addr_thumb (pic_tmp
, pic_rtx
));
3318 emit_insn (gen_movsi (pic_offset_table_rtx
, pic_tmp
));
3321 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx
, pic_rtx
));
3322 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx
, l1
));
3325 /* Need to emit this whether or not we obey regdecls,
3326 since setjmp/longjmp can cause life info to screw up. */
3327 emit_insn (gen_rtx_USE (VOIDmode
, pic_offset_table_rtx
));
3328 #endif /* AOF_ASSEMBLER */
3332 /* Return nonzero if X is valid as an ARM state addressing register. */
3334 arm_address_register_rtx_p (rtx x
, int strict_p
)
3338 if (GET_CODE (x
) != REG
)
3344 return ARM_REGNO_OK_FOR_BASE_P (regno
);
3346 return (regno
<= LAST_ARM_REGNUM
3347 || regno
>= FIRST_PSEUDO_REGISTER
3348 || regno
== FRAME_POINTER_REGNUM
3349 || regno
== ARG_POINTER_REGNUM
);
3352 /* Return nonzero if X is a valid ARM state address operand. */
3354 arm_legitimate_address_p (enum machine_mode mode
, rtx x
, RTX_CODE outer
,
3358 enum rtx_code code
= GET_CODE (x
);
3360 if (arm_address_register_rtx_p (x
, strict_p
))
3363 use_ldrd
= (TARGET_LDRD
3365 || (mode
== DFmode
&& (TARGET_SOFT_FLOAT
|| TARGET_VFP
))));
3367 if (code
== POST_INC
|| code
== PRE_DEC
3368 || ((code
== PRE_INC
|| code
== POST_DEC
)
3369 && (use_ldrd
|| GET_MODE_SIZE (mode
) <= 4)))
3370 return arm_address_register_rtx_p (XEXP (x
, 0), strict_p
);
3372 else if ((code
== POST_MODIFY
|| code
== PRE_MODIFY
)
3373 && arm_address_register_rtx_p (XEXP (x
, 0), strict_p
)
3374 && GET_CODE (XEXP (x
, 1)) == PLUS
3375 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
3377 rtx addend
= XEXP (XEXP (x
, 1), 1);
3379 /* Don't allow ldrd post increment by register because it's hard
3380 to fixup invalid register choices. */
3382 && GET_CODE (x
) == POST_MODIFY
3383 && GET_CODE (addend
) == REG
)
3386 return ((use_ldrd
|| GET_MODE_SIZE (mode
) <= 4)
3387 && arm_legitimate_index_p (mode
, addend
, outer
, strict_p
));
3390 /* After reload constants split into minipools will have addresses
3391 from a LABEL_REF. */
3392 else if (reload_completed
3393 && (code
== LABEL_REF
3395 && GET_CODE (XEXP (x
, 0)) == PLUS
3396 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == LABEL_REF
3397 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
3400 else if (mode
== TImode
)
3403 else if (code
== PLUS
)
3405 rtx xop0
= XEXP (x
, 0);
3406 rtx xop1
= XEXP (x
, 1);
3408 return ((arm_address_register_rtx_p (xop0
, strict_p
)
3409 && arm_legitimate_index_p (mode
, xop1
, outer
, strict_p
))
3410 || (arm_address_register_rtx_p (xop1
, strict_p
)
3411 && arm_legitimate_index_p (mode
, xop0
, outer
, strict_p
)));
3415 /* Reload currently can't handle MINUS, so disable this for now */
3416 else if (GET_CODE (x
) == MINUS
)
3418 rtx xop0
= XEXP (x
, 0);
3419 rtx xop1
= XEXP (x
, 1);
3421 return (arm_address_register_rtx_p (xop0
, strict_p
)
3422 && arm_legitimate_index_p (mode
, xop1
, outer
, strict_p
));
3426 else if (GET_MODE_CLASS (mode
) != MODE_FLOAT
3427 && code
== SYMBOL_REF
3428 && CONSTANT_POOL_ADDRESS_P (x
)
3430 && symbol_mentioned_p (get_pool_constant (x
))))
3436 /* Return nonzero if INDEX is valid for an address index operand in
3439 arm_legitimate_index_p (enum machine_mode mode
, rtx index
, RTX_CODE outer
,
3442 HOST_WIDE_INT range
;
3443 enum rtx_code code
= GET_CODE (index
);
3445 /* Standard coprocessor addressing modes. */
3446 if (TARGET_HARD_FLOAT
3447 && (TARGET_FPA
|| TARGET_MAVERICK
)
3448 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
3449 || (TARGET_MAVERICK
&& mode
== DImode
)))
3450 return (code
== CONST_INT
&& INTVAL (index
) < 1024
3451 && INTVAL (index
) > -1024
3452 && (INTVAL (index
) & 3) == 0);
3454 if (TARGET_REALLY_IWMMXT
&& VALID_IWMMXT_REG_MODE (mode
))
3455 return (code
== CONST_INT
3456 && INTVAL (index
) < 1024
3457 && INTVAL (index
) > -1024
3458 && (INTVAL (index
) & 3) == 0);
3460 if (arm_address_register_rtx_p (index
, strict_p
)
3461 && (GET_MODE_SIZE (mode
) <= 4))
3464 if (mode
== DImode
|| mode
== DFmode
)
3466 if (code
== CONST_INT
)
3468 HOST_WIDE_INT val
= INTVAL (index
);
3471 return val
> -256 && val
< 256;
3473 return val
> -4096 && val
< 4092;
3476 return TARGET_LDRD
&& arm_address_register_rtx_p (index
, strict_p
);
3479 if (GET_MODE_SIZE (mode
) <= 4
3482 || (mode
== QImode
&& outer
== SIGN_EXTEND
))))
3486 rtx xiop0
= XEXP (index
, 0);
3487 rtx xiop1
= XEXP (index
, 1);
3489 return ((arm_address_register_rtx_p (xiop0
, strict_p
)
3490 && power_of_two_operand (xiop1
, SImode
))
3491 || (arm_address_register_rtx_p (xiop1
, strict_p
)
3492 && power_of_two_operand (xiop0
, SImode
)));
3494 else if (code
== LSHIFTRT
|| code
== ASHIFTRT
3495 || code
== ASHIFT
|| code
== ROTATERT
)
3497 rtx op
= XEXP (index
, 1);
3499 return (arm_address_register_rtx_p (XEXP (index
, 0), strict_p
)
3500 && GET_CODE (op
) == CONST_INT
3502 && INTVAL (op
) <= 31);
3506 /* For ARM v4 we may be doing a sign-extend operation during the
3510 if (mode
== HImode
|| (outer
== SIGN_EXTEND
&& mode
== QImode
))
3516 range
= (mode
== HImode
) ? 4095 : 4096;
3518 return (code
== CONST_INT
3519 && INTVAL (index
) < range
3520 && INTVAL (index
) > -range
);
3523 /* Return nonzero if X is valid as a Thumb state base register. */
3525 thumb_base_register_rtx_p (rtx x
, enum machine_mode mode
, int strict_p
)
3529 if (GET_CODE (x
) != REG
)
3535 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno
, mode
);
3537 return (regno
<= LAST_LO_REGNUM
3538 || regno
> LAST_VIRTUAL_REGISTER
3539 || regno
== FRAME_POINTER_REGNUM
3540 || (GET_MODE_SIZE (mode
) >= 4
3541 && (regno
== STACK_POINTER_REGNUM
3542 || regno
>= FIRST_PSEUDO_REGISTER
3543 || x
== hard_frame_pointer_rtx
3544 || x
== arg_pointer_rtx
)));
3547 /* Return nonzero if x is a legitimate index register. This is the case
3548 for any base register that can access a QImode object. */
3550 thumb_index_register_rtx_p (rtx x
, int strict_p
)
3552 return thumb_base_register_rtx_p (x
, QImode
, strict_p
);
3555 /* Return nonzero if x is a legitimate Thumb-state address.
3557 The AP may be eliminated to either the SP or the FP, so we use the
3558 least common denominator, e.g. SImode, and offsets from 0 to 64.
3560 ??? Verify whether the above is the right approach.
3562 ??? Also, the FP may be eliminated to the SP, so perhaps that
3563 needs special handling also.
3565 ??? Look at how the mips16 port solves this problem. It probably uses
3566 better ways to solve some of these problems.
3568 Although it is not incorrect, we don't accept QImode and HImode
3569 addresses based on the frame pointer or arg pointer until the
3570 reload pass starts. This is so that eliminating such addresses
3571 into stack based ones won't produce impossible code. */
3573 thumb_legitimate_address_p (enum machine_mode mode
, rtx x
, int strict_p
)
3575 /* ??? Not clear if this is right. Experiment. */
3576 if (GET_MODE_SIZE (mode
) < 4
3577 && !(reload_in_progress
|| reload_completed
)
3578 && (reg_mentioned_p (frame_pointer_rtx
, x
)
3579 || reg_mentioned_p (arg_pointer_rtx
, x
)
3580 || reg_mentioned_p (virtual_incoming_args_rtx
, x
)
3581 || reg_mentioned_p (virtual_outgoing_args_rtx
, x
)
3582 || reg_mentioned_p (virtual_stack_dynamic_rtx
, x
)
3583 || reg_mentioned_p (virtual_stack_vars_rtx
, x
)))
3586 /* Accept any base register. SP only in SImode or larger. */
3587 else if (thumb_base_register_rtx_p (x
, mode
, strict_p
))
3590 /* This is PC relative data before arm_reorg runs. */
3591 else if (GET_MODE_SIZE (mode
) >= 4 && CONSTANT_P (x
)
3592 && GET_CODE (x
) == SYMBOL_REF
3593 && CONSTANT_POOL_ADDRESS_P (x
) && ! flag_pic
)
3596 /* This is PC relative data after arm_reorg runs. */
3597 else if (GET_MODE_SIZE (mode
) >= 4 && reload_completed
3598 && (GET_CODE (x
) == LABEL_REF
3599 || (GET_CODE (x
) == CONST
3600 && GET_CODE (XEXP (x
, 0)) == PLUS
3601 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == LABEL_REF
3602 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
3605 /* Post-inc indexing only supported for SImode and larger. */
3606 else if (GET_CODE (x
) == POST_INC
&& GET_MODE_SIZE (mode
) >= 4
3607 && thumb_index_register_rtx_p (XEXP (x
, 0), strict_p
))
3610 else if (GET_CODE (x
) == PLUS
)
3612 /* REG+REG address can be any two index registers. */
3613 /* We disallow FRAME+REG addressing since we know that FRAME
3614 will be replaced with STACK, and SP relative addressing only
3615 permits SP+OFFSET. */
3616 if (GET_MODE_SIZE (mode
) <= 4
3617 && XEXP (x
, 0) != frame_pointer_rtx
3618 && XEXP (x
, 1) != frame_pointer_rtx
3619 && thumb_index_register_rtx_p (XEXP (x
, 0), strict_p
)
3620 && thumb_index_register_rtx_p (XEXP (x
, 1), strict_p
))
3623 /* REG+const has 5-7 bit offset for non-SP registers. */
3624 else if ((thumb_index_register_rtx_p (XEXP (x
, 0), strict_p
)
3625 || XEXP (x
, 0) == arg_pointer_rtx
)
3626 && GET_CODE (XEXP (x
, 1)) == CONST_INT
3627 && thumb_legitimate_offset_p (mode
, INTVAL (XEXP (x
, 1))))
3630 /* REG+const has 10 bit offset for SP, but only SImode and
3631 larger is supported. */
3632 /* ??? Should probably check for DI/DFmode overflow here
3633 just like GO_IF_LEGITIMATE_OFFSET does. */
3634 else if (GET_CODE (XEXP (x
, 0)) == REG
3635 && REGNO (XEXP (x
, 0)) == STACK_POINTER_REGNUM
3636 && GET_MODE_SIZE (mode
) >= 4
3637 && GET_CODE (XEXP (x
, 1)) == CONST_INT
3638 && INTVAL (XEXP (x
, 1)) >= 0
3639 && INTVAL (XEXP (x
, 1)) + GET_MODE_SIZE (mode
) <= 1024
3640 && (INTVAL (XEXP (x
, 1)) & 3) == 0)
3643 else if (GET_CODE (XEXP (x
, 0)) == REG
3644 && REGNO (XEXP (x
, 0)) == FRAME_POINTER_REGNUM
3645 && GET_MODE_SIZE (mode
) >= 4
3646 && GET_CODE (XEXP (x
, 1)) == CONST_INT
3647 && (INTVAL (XEXP (x
, 1)) & 3) == 0)
3651 else if (GET_MODE_CLASS (mode
) != MODE_FLOAT
3652 && GET_MODE_SIZE (mode
) == 4
3653 && GET_CODE (x
) == SYMBOL_REF
3654 && CONSTANT_POOL_ADDRESS_P (x
)
3656 && symbol_mentioned_p (get_pool_constant (x
))))
3662 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3663 instruction of mode MODE. */
3665 thumb_legitimate_offset_p (enum machine_mode mode
, HOST_WIDE_INT val
)
3667 switch (GET_MODE_SIZE (mode
))
3670 return val
>= 0 && val
< 32;
3673 return val
>= 0 && val
< 64 && (val
& 1) == 0;
3677 && (val
+ GET_MODE_SIZE (mode
)) <= 128
3682 /* Try machine-dependent ways of modifying an illegitimate address
3683 to be legitimate. If we find one, return the new, valid address. */
3685 arm_legitimize_address (rtx x
, rtx orig_x
, enum machine_mode mode
)
3687 if (GET_CODE (x
) == PLUS
)
3689 rtx xop0
= XEXP (x
, 0);
3690 rtx xop1
= XEXP (x
, 1);
3692 if (CONSTANT_P (xop0
) && !symbol_mentioned_p (xop0
))
3693 xop0
= force_reg (SImode
, xop0
);
3695 if (CONSTANT_P (xop1
) && !symbol_mentioned_p (xop1
))
3696 xop1
= force_reg (SImode
, xop1
);
3698 if (ARM_BASE_REGISTER_RTX_P (xop0
)
3699 && GET_CODE (xop1
) == CONST_INT
)
3701 HOST_WIDE_INT n
, low_n
;
3705 /* VFP addressing modes actually allow greater offsets, but for
3706 now we just stick with the lowest common denominator. */
3708 || ((TARGET_SOFT_FLOAT
|| TARGET_VFP
) && mode
== DFmode
))
3720 low_n
= ((mode
) == TImode
? 0
3721 : n
>= 0 ? (n
& 0xfff) : -((-n
) & 0xfff));
3725 base_reg
= gen_reg_rtx (SImode
);
3726 val
= force_operand (gen_rtx_PLUS (SImode
, xop0
,
3727 GEN_INT (n
)), NULL_RTX
);
3728 emit_move_insn (base_reg
, val
);
3729 x
= (low_n
== 0 ? base_reg
3730 : gen_rtx_PLUS (SImode
, base_reg
, GEN_INT (low_n
)));
3732 else if (xop0
!= XEXP (x
, 0) || xop1
!= XEXP (x
, 1))
3733 x
= gen_rtx_PLUS (SImode
, xop0
, xop1
);
3736 /* XXX We don't allow MINUS any more -- see comment in
3737 arm_legitimate_address_p (). */
3738 else if (GET_CODE (x
) == MINUS
)
3740 rtx xop0
= XEXP (x
, 0);
3741 rtx xop1
= XEXP (x
, 1);
3743 if (CONSTANT_P (xop0
))
3744 xop0
= force_reg (SImode
, xop0
);
3746 if (CONSTANT_P (xop1
) && ! symbol_mentioned_p (xop1
))
3747 xop1
= force_reg (SImode
, xop1
);
3749 if (xop0
!= XEXP (x
, 0) || xop1
!= XEXP (x
, 1))
3750 x
= gen_rtx_MINUS (SImode
, xop0
, xop1
);
3755 /* We need to find and carefully transform any SYMBOL and LABEL
3756 references; so go back to the original address expression. */
3757 rtx new_x
= legitimize_pic_address (orig_x
, mode
, NULL_RTX
);
3759 if (new_x
!= orig_x
)
3767 /* Try machine-dependent ways of modifying an illegitimate Thumb address
3768 to be legitimate. If we find one, return the new, valid address. */
3770 thumb_legitimize_address (rtx x
, rtx orig_x
, enum machine_mode mode
)
3772 if (GET_CODE (x
) == PLUS
3773 && GET_CODE (XEXP (x
, 1)) == CONST_INT
3774 && (INTVAL (XEXP (x
, 1)) >= 32 * GET_MODE_SIZE (mode
)
3775 || INTVAL (XEXP (x
, 1)) < 0))
3777 rtx xop0
= XEXP (x
, 0);
3778 rtx xop1
= XEXP (x
, 1);
3779 HOST_WIDE_INT offset
= INTVAL (xop1
);
3781 /* Try and fold the offset into a biasing of the base register and
3782 then offsetting that. Don't do this when optimizing for space
3783 since it can cause too many CSEs. */
3784 if (optimize_size
&& offset
>= 0
3785 && offset
< 256 + 31 * GET_MODE_SIZE (mode
))
3787 HOST_WIDE_INT delta
;
3790 delta
= offset
- (256 - GET_MODE_SIZE (mode
));
3791 else if (offset
< 32 * GET_MODE_SIZE (mode
) + 8)
3792 delta
= 31 * GET_MODE_SIZE (mode
);
3794 delta
= offset
& (~31 * GET_MODE_SIZE (mode
));
3796 xop0
= force_operand (plus_constant (xop0
, offset
- delta
),
3798 x
= plus_constant (xop0
, delta
);
3800 else if (offset
< 0 && offset
> -256)
3801 /* Small negative offsets are best done with a subtract before the
3802 dereference, forcing these into a register normally takes two
3804 x
= force_operand (x
, NULL_RTX
);
3807 /* For the remaining cases, force the constant into a register. */
3808 xop1
= force_reg (SImode
, xop1
);
3809 x
= gen_rtx_PLUS (SImode
, xop0
, xop1
);
3812 else if (GET_CODE (x
) == PLUS
3813 && s_register_operand (XEXP (x
, 1), SImode
)
3814 && !s_register_operand (XEXP (x
, 0), SImode
))
3816 rtx xop0
= force_operand (XEXP (x
, 0), NULL_RTX
);
3818 x
= gen_rtx_PLUS (SImode
, xop0
, XEXP (x
, 1));
3823 /* We need to find and carefully transform any SYMBOL and LABEL
3824 references; so go back to the original address expression. */
3825 rtx new_x
= legitimize_pic_address (orig_x
, mode
, NULL_RTX
);
3827 if (new_x
!= orig_x
)
3836 #define REG_OR_SUBREG_REG(X) \
3837 (GET_CODE (X) == REG \
3838 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
3840 #define REG_OR_SUBREG_RTX(X) \
3841 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
3843 #ifndef COSTS_N_INSNS
3844 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
3847 thumb_rtx_costs (rtx x
, enum rtx_code code
, enum rtx_code outer
)
3849 enum machine_mode mode
= GET_MODE (x
);
3862 return COSTS_N_INSNS (1);
3865 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
3868 unsigned HOST_WIDE_INT i
= INTVAL (XEXP (x
, 1));
3875 return COSTS_N_INSNS (2) + cycles
;
3877 return COSTS_N_INSNS (1) + 16;
3880 return (COSTS_N_INSNS (1)
3881 + 4 * ((GET_CODE (SET_SRC (x
)) == MEM
)
3882 + GET_CODE (SET_DEST (x
)) == MEM
));
3887 if ((unsigned HOST_WIDE_INT
) INTVAL (x
) < 256)
3889 if (thumb_shiftable_const (INTVAL (x
)))
3890 return COSTS_N_INSNS (2);
3891 return COSTS_N_INSNS (3);
3893 else if ((outer
== PLUS
|| outer
== COMPARE
)
3894 && INTVAL (x
) < 256 && INTVAL (x
) > -256)
3896 else if (outer
== AND
3897 && INTVAL (x
) < 256 && INTVAL (x
) >= -256)
3898 return COSTS_N_INSNS (1);
3899 else if (outer
== ASHIFT
|| outer
== ASHIFTRT
3900 || outer
== LSHIFTRT
)
3902 return COSTS_N_INSNS (2);
3908 return COSTS_N_INSNS (3);
3926 /* XXX another guess. */
3927 /* Memory costs quite a lot for the first word, but subsequent words
3928 load at the equivalent of a single insn each. */
3929 return (10 + 4 * ((GET_MODE_SIZE (mode
) - 1) / UNITS_PER_WORD
)
3930 + ((GET_CODE (x
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (x
))
3935 if (GET_CODE (XEXP (x
, 1)) == PC
|| GET_CODE (XEXP (x
, 2)) == PC
)
3940 /* XXX still guessing. */
3941 switch (GET_MODE (XEXP (x
, 0)))
3944 return (1 + (mode
== DImode
? 4 : 0)
3945 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
3948 return (4 + (mode
== DImode
? 4 : 0)
3949 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
3952 return (1 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
3964 /* Worker routine for arm_rtx_costs. */
3966 arm_rtx_costs_1 (rtx x
, enum rtx_code code
, enum rtx_code outer
)
3968 enum machine_mode mode
= GET_MODE (x
);
3969 enum rtx_code subcode
;
3975 /* Memory costs quite a lot for the first word, but subsequent words
3976 load at the equivalent of a single insn each. */
3977 return (10 + 4 * ((GET_MODE_SIZE (mode
) - 1) / UNITS_PER_WORD
)
3978 + (GET_CODE (x
) == SYMBOL_REF
3979 && CONSTANT_POOL_ADDRESS_P (x
) ? 4 : 0));
3985 return optimize_size
? COSTS_N_INSNS (2) : 100;
3988 if (mode
== SImode
&& GET_CODE (XEXP (x
, 1)) == REG
)
3995 case ASHIFT
: case LSHIFTRT
: case ASHIFTRT
:
3997 return (8 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : 8)
3998 + ((GET_CODE (XEXP (x
, 0)) == REG
3999 || (GET_CODE (XEXP (x
, 0)) == SUBREG
4000 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
4002 return (1 + ((GET_CODE (XEXP (x
, 0)) == REG
4003 || (GET_CODE (XEXP (x
, 0)) == SUBREG
4004 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
4006 + ((GET_CODE (XEXP (x
, 1)) == REG
4007 || (GET_CODE (XEXP (x
, 1)) == SUBREG
4008 && GET_CODE (SUBREG_REG (XEXP (x
, 1))) == REG
)
4009 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
))
4014 return (4 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 8)
4015 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
4016 || (GET_CODE (XEXP (x
, 0)) == CONST_INT
4017 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))))
4020 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4021 return (2 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
4022 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
4023 && arm_const_double_rtx (XEXP (x
, 1))))
4025 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
4026 || (GET_CODE (XEXP (x
, 0)) == CONST_DOUBLE
4027 && arm_const_double_rtx (XEXP (x
, 0))))
4030 if (((GET_CODE (XEXP (x
, 0)) == CONST_INT
4031 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))
4032 && REG_OR_SUBREG_REG (XEXP (x
, 1))))
4033 || (((subcode
= GET_CODE (XEXP (x
, 1))) == ASHIFT
4034 || subcode
== ASHIFTRT
|| subcode
== LSHIFTRT
4035 || subcode
== ROTATE
|| subcode
== ROTATERT
4037 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
4038 && ((INTVAL (XEXP (XEXP (x
, 1), 1)) &
4039 (INTVAL (XEXP (XEXP (x
, 1), 1)) - 1)) == 0)))
4040 && REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 0))
4041 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 1))
4042 || GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
)
4043 && REG_OR_SUBREG_REG (XEXP (x
, 0))))
4048 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4049 return (2 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
4050 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
4051 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
4052 && arm_const_double_rtx (XEXP (x
, 1))))
4056 case AND
: case XOR
: case IOR
:
4059 /* Normally the frame registers will be spilt into reg+const during
4060 reload, so it is a bad idea to combine them with other instructions,
4061 since then they might not be moved outside of loops. As a compromise
4062 we allow integration with ops that have a constant as their second
4064 if ((REG_OR_SUBREG_REG (XEXP (x
, 0))
4065 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))
4066 && GET_CODE (XEXP (x
, 1)) != CONST_INT
)
4067 || (REG_OR_SUBREG_REG (XEXP (x
, 0))
4068 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))))
4072 return (4 + extra_cost
+ (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
4073 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
4074 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
4075 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
)))
4078 if (REG_OR_SUBREG_REG (XEXP (x
, 0)))
4079 return (1 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : extra_cost
)
4080 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
4081 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
4082 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
)))
4085 else if (REG_OR_SUBREG_REG (XEXP (x
, 1)))
4086 return (1 + extra_cost
4087 + ((((subcode
= GET_CODE (XEXP (x
, 0))) == ASHIFT
4088 || subcode
== LSHIFTRT
|| subcode
== ASHIFTRT
4089 || subcode
== ROTATE
|| subcode
== ROTATERT
4091 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
4092 && ((INTVAL (XEXP (XEXP (x
, 0), 1)) &
4093 (INTVAL (XEXP (XEXP (x
, 0), 1)) - 1)) == 0)))
4094 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 0)))
4095 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 1)))
4096 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
))
4102 /* This should have been handled by the CPU specific routines. */
4106 if (arm_arch3m
&& mode
== SImode
4107 && GET_CODE (XEXP (x
, 0)) == LSHIFTRT
4108 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
4109 && (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0))
4110 == GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)))
4111 && (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == ZERO_EXTEND
4112 || GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == SIGN_EXTEND
))
4117 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4118 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 6);
4122 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
4124 return 1 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
4127 if (GET_CODE (XEXP (x
, 1)) == PC
|| GET_CODE (XEXP (x
, 2)) == PC
)
4135 return 4 + (mode
== DImode
? 4 : 0);
4138 if (GET_MODE (XEXP (x
, 0)) == QImode
)
4139 return (4 + (mode
== DImode
? 4 : 0)
4140 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
4143 switch (GET_MODE (XEXP (x
, 0)))
4146 return (1 + (mode
== DImode
? 4 : 0)
4147 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
4150 return (4 + (mode
== DImode
? 4 : 0)
4151 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
4154 return (1 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
4169 if (const_ok_for_arm (INTVAL (x
)))
4170 return outer
== SET
? 2 : -1;
4171 else if (outer
== AND
4172 && const_ok_for_arm (~INTVAL (x
)))
4174 else if ((outer
== COMPARE
4175 || outer
== PLUS
|| outer
== MINUS
)
4176 && const_ok_for_arm (-INTVAL (x
)))
4187 if (arm_const_double_rtx (x
))
4188 return outer
== SET
? 2 : -1;
4189 else if ((outer
== COMPARE
|| outer
== PLUS
)
4190 && neg_const_double_rtx_ok_for_fpa (x
))
4199 /* RTX costs when optimizing for size. */
4201 arm_size_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
4203 enum machine_mode mode
= GET_MODE (x
);
4207 /* XXX TBD. For now, use the standard costs. */
4208 *total
= thumb_rtx_costs (x
, code
, outer_code
);
4215 /* A memory access costs 1 insn if the mode is small, or the address is
4216 a single register, otherwise it costs one insn per word. */
4217 if (REG_P (XEXP (x
, 0)))
4218 *total
= COSTS_N_INSNS (1);
4220 *total
= COSTS_N_INSNS (ARM_NUM_REGS (mode
));
4227 /* Needs a libcall, so it costs about this. */
4228 *total
= COSTS_N_INSNS (2);
4232 if (mode
== SImode
&& GET_CODE (XEXP (x
, 1)) == REG
)
4234 *total
= COSTS_N_INSNS (2) + rtx_cost (XEXP (x
, 0), code
);
4242 if (mode
== DImode
&& GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4244 *total
= COSTS_N_INSNS (3) + rtx_cost (XEXP (x
, 0), code
);
4247 else if (mode
== SImode
)
4249 *total
= COSTS_N_INSNS (1) + rtx_cost (XEXP (x
, 0), code
);
4250 /* Slightly disparage register shifts, but not by much. */
4251 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
4252 *total
+= 1 + rtx_cost (XEXP (x
, 1), code
);
4256 /* Needs a libcall. */
4257 *total
= COSTS_N_INSNS (2);
4261 if (TARGET_HARD_FLOAT
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4263 *total
= COSTS_N_INSNS (1);
4269 enum rtx_code subcode0
= GET_CODE (XEXP (x
, 0));
4270 enum rtx_code subcode1
= GET_CODE (XEXP (x
, 1));
4272 if (subcode0
== ROTATE
|| subcode0
== ROTATERT
|| subcode0
== ASHIFT
4273 || subcode0
== LSHIFTRT
|| subcode0
== ASHIFTRT
4274 || subcode1
== ROTATE
|| subcode1
== ROTATERT
4275 || subcode1
== ASHIFT
|| subcode1
== LSHIFTRT
4276 || subcode1
== ASHIFTRT
)
4278 /* It's just the cost of the two operands. */
4283 *total
= COSTS_N_INSNS (1);
4287 *total
= COSTS_N_INSNS (ARM_NUM_REGS (mode
));
4291 if (TARGET_HARD_FLOAT
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4293 *total
= COSTS_N_INSNS (1);
4298 case AND
: case XOR
: case IOR
:
4301 enum rtx_code subcode
= GET_CODE (XEXP (x
, 0));
4303 if (subcode
== ROTATE
|| subcode
== ROTATERT
|| subcode
== ASHIFT
4304 || subcode
== LSHIFTRT
|| subcode
== ASHIFTRT
4305 || (code
== AND
&& subcode
== NOT
))
4307 /* It's just the cost of the two operands. */
4313 *total
= COSTS_N_INSNS (ARM_NUM_REGS (mode
));
4317 *total
= COSTS_N_INSNS (ARM_NUM_REGS (mode
));
4321 if (TARGET_HARD_FLOAT
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4322 *total
= COSTS_N_INSNS (1);
4325 *total
= COSTS_N_INSNS (ARM_NUM_REGS (mode
));
4334 if (cc_register (XEXP (x
, 0), VOIDmode
))
4337 *total
= COSTS_N_INSNS (1);
4341 if (TARGET_HARD_FLOAT
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4342 *total
= COSTS_N_INSNS (1);
4344 *total
= COSTS_N_INSNS (1 + ARM_NUM_REGS (mode
));
4349 if (GET_MODE_SIZE (GET_MODE (XEXP (x
, 0))) < 4)
4351 if (!(arm_arch4
&& MEM_P (XEXP (x
, 0))))
4352 *total
+= COSTS_N_INSNS (arm_arch6
? 1 : 2);
4355 *total
+= COSTS_N_INSNS (1);
4360 if (!(arm_arch4
&& MEM_P (XEXP (x
, 0))))
4362 switch (GET_MODE (XEXP (x
, 0)))
4365 *total
+= COSTS_N_INSNS (1);
4369 *total
+= COSTS_N_INSNS (arm_arch6
? 1 : 2);
4375 *total
+= COSTS_N_INSNS (2);
4380 *total
+= COSTS_N_INSNS (1);
4385 if (const_ok_for_arm (INTVAL (x
)))
4386 *total
= COSTS_N_INSNS (outer_code
== SET
? 1 : 0);
4387 else if (const_ok_for_arm (~INTVAL (x
)))
4388 *total
= COSTS_N_INSNS (outer_code
== AND
? 0 : 1);
4389 else if (const_ok_for_arm (-INTVAL (x
)))
4391 if (outer_code
== COMPARE
|| outer_code
== PLUS
4392 || outer_code
== MINUS
)
4395 *total
= COSTS_N_INSNS (1);
4398 *total
= COSTS_N_INSNS (2);
4404 *total
= COSTS_N_INSNS (2);
4408 *total
= COSTS_N_INSNS (4);
4412 if (mode
!= VOIDmode
)
4413 *total
= COSTS_N_INSNS (ARM_NUM_REGS (mode
));
4415 *total
= COSTS_N_INSNS (4); /* How knows? */
4420 /* RTX costs for cores with a slow MUL implementation. */
4423 arm_slowmul_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
4425 enum machine_mode mode
= GET_MODE (x
);
4429 *total
= thumb_rtx_costs (x
, code
, outer_code
);
4436 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
4443 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4445 unsigned HOST_WIDE_INT i
= (INTVAL (XEXP (x
, 1))
4446 & (unsigned HOST_WIDE_INT
) 0xffffffff);
4447 int cost
, const_ok
= const_ok_for_arm (i
);
4448 int j
, booth_unit_size
;
4450 /* Tune as appropriate. */
4451 cost
= const_ok
? 4 : 8;
4452 booth_unit_size
= 2;
4453 for (j
= 0; i
&& j
< 32; j
+= booth_unit_size
)
4455 i
>>= booth_unit_size
;
4463 *total
= 30 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4)
4464 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 4);
4468 *total
= arm_rtx_costs_1 (x
, code
, outer_code
);
4474 /* RTX cost for cores with a fast multiply unit (M variants). */
4477 arm_fastmul_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
4479 enum machine_mode mode
= GET_MODE (x
);
4483 *total
= thumb_rtx_costs (x
, code
, outer_code
);
4490 /* There is no point basing this on the tuning, since it is always the
4491 fast variant if it exists at all. */
4493 && (GET_CODE (XEXP (x
, 0)) == GET_CODE (XEXP (x
, 1)))
4494 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
4495 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
))
4502 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
4509 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4511 unsigned HOST_WIDE_INT i
= (INTVAL (XEXP (x
, 1))
4512 & (unsigned HOST_WIDE_INT
) 0xffffffff);
4513 int cost
, const_ok
= const_ok_for_arm (i
);
4514 int j
, booth_unit_size
;
4516 /* Tune as appropriate. */
4517 cost
= const_ok
? 4 : 8;
4518 booth_unit_size
= 8;
4519 for (j
= 0; i
&& j
< 32; j
+= booth_unit_size
)
4521 i
>>= booth_unit_size
;
4529 *total
= 8 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4)
4530 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 4);
4534 *total
= arm_rtx_costs_1 (x
, code
, outer_code
);
4540 /* RTX cost for XScale CPUs. */
4543 arm_xscale_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
4545 enum machine_mode mode
= GET_MODE (x
);
4549 *total
= thumb_rtx_costs (x
, code
, outer_code
);
4556 /* There is no point basing this on the tuning, since it is always the
4557 fast variant if it exists at all. */
4559 && (GET_CODE (XEXP (x
, 0)) == GET_CODE (XEXP (x
, 1)))
4560 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
4561 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
))
4568 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
4575 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4577 unsigned HOST_WIDE_INT i
= (INTVAL (XEXP (x
, 1))
4578 & (unsigned HOST_WIDE_INT
) 0xffffffff);
4579 int cost
, const_ok
= const_ok_for_arm (i
);
4580 unsigned HOST_WIDE_INT masked_const
;
4582 /* The cost will be related to two insns.
4583 First a load of the constant (MOV or LDR), then a multiply. */
4586 cost
+= 1; /* LDR is probably more expensive because
4587 of longer result latency. */
4588 masked_const
= i
& 0xffff8000;
4589 if (masked_const
!= 0 && masked_const
!= 0xffff8000)
4591 masked_const
= i
& 0xf8000000;
4592 if (masked_const
== 0 || masked_const
== 0xf8000000)
4601 *total
= 8 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4)
4602 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 4);
4606 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4607 will stall until the multiplication is complete. */
4608 if (GET_CODE (XEXP (x
, 0)) == MULT
)
4609 *total
= 4 + rtx_cost (XEXP (x
, 0), code
);
4611 *total
= arm_rtx_costs_1 (x
, code
, outer_code
);
4615 *total
= arm_rtx_costs_1 (x
, code
, outer_code
);
4621 /* RTX costs for 9e (and later) cores. */
4624 arm_9e_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
4626 enum machine_mode mode
= GET_MODE (x
);
4635 *total
= COSTS_N_INSNS (3);
4639 *total
= thumb_rtx_costs (x
, code
, outer_code
);
4647 /* There is no point basing this on the tuning, since it is always the
4648 fast variant if it exists at all. */
4650 && (GET_CODE (XEXP (x
, 0)) == GET_CODE (XEXP (x
, 1)))
4651 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
4652 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
))
4659 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4676 *total
= cost
+ (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : nonreg_cost
)
4677 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : nonreg_cost
);
4681 *total
= arm_rtx_costs_1 (x
, code
, outer_code
);
4685 /* All address computations that can be done are free, but rtx cost returns
4686 the same for practically all of them. So we weight the different types
4687 of address here in the order (most pref first):
4688 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
4690 arm_arm_address_cost (rtx x
)
4692 enum rtx_code c
= GET_CODE (x
);
4694 if (c
== PRE_INC
|| c
== PRE_DEC
|| c
== POST_INC
|| c
== POST_DEC
)
4696 if (c
== MEM
|| c
== LABEL_REF
|| c
== SYMBOL_REF
)
4699 if (c
== PLUS
|| c
== MINUS
)
4701 if (GET_CODE (XEXP (x
, 0)) == CONST_INT
)
4704 if (ARITHMETIC_P (XEXP (x
, 0)) || ARITHMETIC_P (XEXP (x
, 1)))
4714 arm_thumb_address_cost (rtx x
)
4716 enum rtx_code c
= GET_CODE (x
);
4721 && GET_CODE (XEXP (x
, 0)) == REG
4722 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4729 arm_address_cost (rtx x
)
4731 return TARGET_ARM
? arm_arm_address_cost (x
) : arm_thumb_address_cost (x
);
4735 arm_adjust_cost (rtx insn
, rtx link
, rtx dep
, int cost
)
4739 /* Some true dependencies can have a higher cost depending
4740 on precisely how certain input operands are used. */
4742 && REG_NOTE_KIND (link
) == 0
4743 && recog_memoized (insn
) >= 0
4744 && recog_memoized (dep
) >= 0)
4746 int shift_opnum
= get_attr_shift (insn
);
4747 enum attr_type attr_type
= get_attr_type (dep
);
4749 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
4750 operand for INSN. If we have a shifted input operand and the
4751 instruction we depend on is another ALU instruction, then we may
4752 have to account for an additional stall. */
4753 if (shift_opnum
!= 0
4754 && (attr_type
== TYPE_ALU_SHIFT
|| attr_type
== TYPE_ALU_SHIFT_REG
))
4756 rtx shifted_operand
;
4759 /* Get the shifted operand. */
4760 extract_insn (insn
);
4761 shifted_operand
= recog_data
.operand
[shift_opnum
];
4763 /* Iterate over all the operands in DEP. If we write an operand
4764 that overlaps with SHIFTED_OPERAND, then we have increase the
4765 cost of this dependency. */
4767 preprocess_constraints ();
4768 for (opno
= 0; opno
< recog_data
.n_operands
; opno
++)
4770 /* We can ignore strict inputs. */
4771 if (recog_data
.operand_type
[opno
] == OP_IN
)
4774 if (reg_overlap_mentioned_p (recog_data
.operand
[opno
],
4781 /* XXX This is not strictly true for the FPA. */
4782 if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
4783 || REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
)
4786 /* Call insns don't incur a stall, even if they follow a load. */
4787 if (REG_NOTE_KIND (link
) == 0
4788 && GET_CODE (insn
) == CALL_INSN
)
4791 if ((i_pat
= single_set (insn
)) != NULL
4792 && GET_CODE (SET_SRC (i_pat
)) == MEM
4793 && (d_pat
= single_set (dep
)) != NULL
4794 && GET_CODE (SET_DEST (d_pat
)) == MEM
)
4796 rtx src_mem
= XEXP (SET_SRC (i_pat
), 0);
4797 /* This is a load after a store, there is no conflict if the load reads
4798 from a cached area. Assume that loads from the stack, and from the
4799 constant pool are cached, and that others will miss. This is a
4802 if ((GET_CODE (src_mem
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (src_mem
))
4803 || reg_mentioned_p (stack_pointer_rtx
, src_mem
)
4804 || reg_mentioned_p (frame_pointer_rtx
, src_mem
)
4805 || reg_mentioned_p (hard_frame_pointer_rtx
, src_mem
))
4812 static int fp_consts_inited
= 0;
4814 /* Only zero is valid for VFP. Other values are also valid for FPA. */
4815 static const char * const strings_fp
[8] =
4818 "4", "5", "0.5", "10"
4821 static REAL_VALUE_TYPE values_fp
[8];
4824 init_fp_table (void)
4830 fp_consts_inited
= 1;
4832 fp_consts_inited
= 8;
4834 for (i
= 0; i
< fp_consts_inited
; i
++)
4836 r
= REAL_VALUE_ATOF (strings_fp
[i
], DFmode
);
4841 /* Return TRUE if rtx X is a valid immediate FP constant. */
4843 arm_const_double_rtx (rtx x
)
4848 if (!fp_consts_inited
)
4851 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
4852 if (REAL_VALUE_MINUS_ZERO (r
))
4855 for (i
= 0; i
< fp_consts_inited
; i
++)
4856 if (REAL_VALUES_EQUAL (r
, values_fp
[i
]))
4862 /* Return TRUE if rtx X is a valid immediate FPA constant. */
4864 neg_const_double_rtx_ok_for_fpa (rtx x
)
4869 if (!fp_consts_inited
)
4872 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
4873 r
= REAL_VALUE_NEGATE (r
);
4874 if (REAL_VALUE_MINUS_ZERO (r
))
4877 for (i
= 0; i
< 8; i
++)
4878 if (REAL_VALUES_EQUAL (r
, values_fp
[i
]))
4884 /* Predicates for `match_operand' and `match_operator'. */
4886 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
4888 cirrus_memory_offset (rtx op
)
4890 /* Reject eliminable registers. */
4891 if (! (reload_in_progress
|| reload_completed
)
4892 && ( reg_mentioned_p (frame_pointer_rtx
, op
)
4893 || reg_mentioned_p (arg_pointer_rtx
, op
)
4894 || reg_mentioned_p (virtual_incoming_args_rtx
, op
)
4895 || reg_mentioned_p (virtual_outgoing_args_rtx
, op
)
4896 || reg_mentioned_p (virtual_stack_dynamic_rtx
, op
)
4897 || reg_mentioned_p (virtual_stack_vars_rtx
, op
)))
4900 if (GET_CODE (op
) == MEM
)
4906 /* Match: (mem (reg)). */
4907 if (GET_CODE (ind
) == REG
)
4913 if (GET_CODE (ind
) == PLUS
4914 && GET_CODE (XEXP (ind
, 0)) == REG
4915 && REG_MODE_OK_FOR_BASE_P (XEXP (ind
, 0), VOIDmode
)
4916 && GET_CODE (XEXP (ind
, 1)) == CONST_INT
)
4923 /* Return TRUE if OP is a valid VFP memory address pattern.
4924 WB if true if writeback address modes are allowed. */
4927 arm_coproc_mem_operand (rtx op
, bool wb
)
4931 /* Reject eliminable registers. */
4932 if (! (reload_in_progress
|| reload_completed
)
4933 && ( reg_mentioned_p (frame_pointer_rtx
, op
)
4934 || reg_mentioned_p (arg_pointer_rtx
, op
)
4935 || reg_mentioned_p (virtual_incoming_args_rtx
, op
)
4936 || reg_mentioned_p (virtual_outgoing_args_rtx
, op
)
4937 || reg_mentioned_p (virtual_stack_dynamic_rtx
, op
)
4938 || reg_mentioned_p (virtual_stack_vars_rtx
, op
)))
4941 /* Constants are converted into offsets from labels. */
4942 if (GET_CODE (op
) != MEM
)
4947 if (reload_completed
4948 && (GET_CODE (ind
) == LABEL_REF
4949 || (GET_CODE (ind
) == CONST
4950 && GET_CODE (XEXP (ind
, 0)) == PLUS
4951 && GET_CODE (XEXP (XEXP (ind
, 0), 0)) == LABEL_REF
4952 && GET_CODE (XEXP (XEXP (ind
, 0), 1)) == CONST_INT
)))
4955 /* Match: (mem (reg)). */
4956 if (GET_CODE (ind
) == REG
)
4957 return arm_address_register_rtx_p (ind
, 0);
4959 /* Autoincremment addressing modes. */
4961 && (GET_CODE (ind
) == PRE_INC
4962 || GET_CODE (ind
) == POST_INC
4963 || GET_CODE (ind
) == PRE_DEC
4964 || GET_CODE (ind
) == POST_DEC
))
4965 return arm_address_register_rtx_p (XEXP (ind
, 0), 0);
4968 && (GET_CODE (ind
) == POST_MODIFY
|| GET_CODE (ind
) == PRE_MODIFY
)
4969 && arm_address_register_rtx_p (XEXP (ind
, 0), 0)
4970 && GET_CODE (XEXP (ind
, 1)) == PLUS
4971 && rtx_equal_p (XEXP (XEXP (ind
, 1), 0), XEXP (ind
, 0)))
4972 ind
= XEXP (ind
, 1);
4977 if (GET_CODE (ind
) == PLUS
4978 && GET_CODE (XEXP (ind
, 0)) == REG
4979 && REG_MODE_OK_FOR_BASE_P (XEXP (ind
, 0), VOIDmode
)
4980 && GET_CODE (XEXP (ind
, 1)) == CONST_INT
4981 && INTVAL (XEXP (ind
, 1)) > -1024
4982 && INTVAL (XEXP (ind
, 1)) < 1024
4983 && (INTVAL (XEXP (ind
, 1)) & 3) == 0)
4989 /* Return true if X is a register that will be eliminated later on. */
4991 arm_eliminable_register (rtx x
)
4993 return REG_P (x
) && (REGNO (x
) == FRAME_POINTER_REGNUM
4994 || REGNO (x
) == ARG_POINTER_REGNUM
4995 || (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
4996 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
));
4999 /* Return GENERAL_REGS if a scratch register required to reload x to/from
5000 VFP registers. Otherwise return NO_REGS. */
5003 vfp_secondary_reload_class (enum machine_mode mode
, rtx x
)
5005 if (arm_coproc_mem_operand (x
, FALSE
) || s_register_operand (x
, mode
))
5008 return GENERAL_REGS
;
5011 /* Values which must be returned in the most-significant end of the return
5015 arm_return_in_msb (tree valtype
)
5017 return (TARGET_AAPCS_BASED
5019 && (AGGREGATE_TYPE_P (valtype
)
5020 || TREE_CODE (valtype
) == COMPLEX_TYPE
));
5023 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5024 Use by the Cirrus Maverick code which has to workaround
5025 a hardware bug triggered by such instructions. */
5027 arm_memory_load_p (rtx insn
)
5029 rtx body
, lhs
, rhs
;;
5031 if (insn
== NULL_RTX
|| GET_CODE (insn
) != INSN
)
5034 body
= PATTERN (insn
);
5036 if (GET_CODE (body
) != SET
)
5039 lhs
= XEXP (body
, 0);
5040 rhs
= XEXP (body
, 1);
5042 lhs
= REG_OR_SUBREG_RTX (lhs
);
5044 /* If the destination is not a general purpose
5045 register we do not have to worry. */
5046 if (GET_CODE (lhs
) != REG
5047 || REGNO_REG_CLASS (REGNO (lhs
)) != GENERAL_REGS
)
5050 /* As well as loads from memory we also have to react
5051 to loads of invalid constants which will be turned
5052 into loads from the minipool. */
5053 return (GET_CODE (rhs
) == MEM
5054 || GET_CODE (rhs
) == SYMBOL_REF
5055 || note_invalid_constants (insn
, -1, false));
5058 /* Return TRUE if INSN is a Cirrus instruction. */
5060 arm_cirrus_insn_p (rtx insn
)
5062 enum attr_cirrus attr
;
5064 /* get_attr cannot accept USE or CLOBBER. */
5066 || GET_CODE (insn
) != INSN
5067 || GET_CODE (PATTERN (insn
)) == USE
5068 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
5071 attr
= get_attr_cirrus (insn
);
5073 return attr
!= CIRRUS_NOT
;
5076 /* Cirrus reorg for invalid instruction combinations. */
5078 cirrus_reorg (rtx first
)
5080 enum attr_cirrus attr
;
5081 rtx body
= PATTERN (first
);
5085 /* Any branch must be followed by 2 non Cirrus instructions. */
5086 if (GET_CODE (first
) == JUMP_INSN
&& GET_CODE (body
) != RETURN
)
5089 t
= next_nonnote_insn (first
);
5091 if (arm_cirrus_insn_p (t
))
5094 if (arm_cirrus_insn_p (next_nonnote_insn (t
)))
5098 emit_insn_after (gen_nop (), first
);
5103 /* (float (blah)) is in parallel with a clobber. */
5104 if (GET_CODE (body
) == PARALLEL
&& XVECLEN (body
, 0) > 0)
5105 body
= XVECEXP (body
, 0, 0);
5107 if (GET_CODE (body
) == SET
)
5109 rtx lhs
= XEXP (body
, 0), rhs
= XEXP (body
, 1);
5111 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5112 be followed by a non Cirrus insn. */
5113 if (get_attr_cirrus (first
) == CIRRUS_DOUBLE
)
5115 if (arm_cirrus_insn_p (next_nonnote_insn (first
)))
5116 emit_insn_after (gen_nop (), first
);
5120 else if (arm_memory_load_p (first
))
5122 unsigned int arm_regno
;
5124 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5125 ldr/cfmv64hr combination where the Rd field is the same
5126 in both instructions must be split with a non Cirrus
5133 /* Get Arm register number for ldr insn. */
5134 if (GET_CODE (lhs
) == REG
)
5135 arm_regno
= REGNO (lhs
);
5138 gcc_assert (GET_CODE (rhs
) == REG
);
5139 arm_regno
= REGNO (rhs
);
5143 first
= next_nonnote_insn (first
);
5145 if (! arm_cirrus_insn_p (first
))
5148 body
= PATTERN (first
);
5150 /* (float (blah)) is in parallel with a clobber. */
5151 if (GET_CODE (body
) == PARALLEL
&& XVECLEN (body
, 0))
5152 body
= XVECEXP (body
, 0, 0);
5154 if (GET_CODE (body
) == FLOAT
)
5155 body
= XEXP (body
, 0);
5157 if (get_attr_cirrus (first
) == CIRRUS_MOVE
5158 && GET_CODE (XEXP (body
, 1)) == REG
5159 && arm_regno
== REGNO (XEXP (body
, 1)))
5160 emit_insn_after (gen_nop (), first
);
5166 /* get_attr cannot accept USE or CLOBBER. */
5168 || GET_CODE (first
) != INSN
5169 || GET_CODE (PATTERN (first
)) == USE
5170 || GET_CODE (PATTERN (first
)) == CLOBBER
)
5173 attr
= get_attr_cirrus (first
);
5175 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5176 must be followed by a non-coprocessor instruction. */
5177 if (attr
== CIRRUS_COMPARE
)
5181 t
= next_nonnote_insn (first
);
5183 if (arm_cirrus_insn_p (t
))
5186 if (arm_cirrus_insn_p (next_nonnote_insn (t
)))
5190 emit_insn_after (gen_nop (), first
);
5196 /* Return TRUE if X references a SYMBOL_REF. */
5198 symbol_mentioned_p (rtx x
)
5203 if (GET_CODE (x
) == SYMBOL_REF
)
5206 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
5208 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
5214 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5215 if (symbol_mentioned_p (XVECEXP (x
, i
, j
)))
5218 else if (fmt
[i
] == 'e' && symbol_mentioned_p (XEXP (x
, i
)))
5225 /* Return TRUE if X references a LABEL_REF. */
5227 label_mentioned_p (rtx x
)
5232 if (GET_CODE (x
) == LABEL_REF
)
5235 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
5236 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
5242 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5243 if (label_mentioned_p (XVECEXP (x
, i
, j
)))
5246 else if (fmt
[i
] == 'e' && label_mentioned_p (XEXP (x
, i
)))
5256 enum rtx_code code
= GET_CODE (x
);
5273 /* Return 1 if memory locations are adjacent. */
5275 adjacent_mem_locations (rtx a
, rtx b
)
5277 /* We don't guarantee to preserve the order of these memory refs. */
5278 if (volatile_refs_p (a
) || volatile_refs_p (b
))
5281 if ((GET_CODE (XEXP (a
, 0)) == REG
5282 || (GET_CODE (XEXP (a
, 0)) == PLUS
5283 && GET_CODE (XEXP (XEXP (a
, 0), 1)) == CONST_INT
))
5284 && (GET_CODE (XEXP (b
, 0)) == REG
5285 || (GET_CODE (XEXP (b
, 0)) == PLUS
5286 && GET_CODE (XEXP (XEXP (b
, 0), 1)) == CONST_INT
)))
5288 HOST_WIDE_INT val0
= 0, val1
= 0;
5292 if (GET_CODE (XEXP (a
, 0)) == PLUS
)
5294 reg0
= XEXP (XEXP (a
, 0), 0);
5295 val0
= INTVAL (XEXP (XEXP (a
, 0), 1));
5300 if (GET_CODE (XEXP (b
, 0)) == PLUS
)
5302 reg1
= XEXP (XEXP (b
, 0), 0);
5303 val1
= INTVAL (XEXP (XEXP (b
, 0), 1));
5308 /* Don't accept any offset that will require multiple
5309 instructions to handle, since this would cause the
5310 arith_adjacentmem pattern to output an overlong sequence. */
5311 if (!const_ok_for_op (PLUS
, val0
) || !const_ok_for_op (PLUS
, val1
))
5314 /* Don't allow an eliminable register: register elimination can make
5315 the offset too large. */
5316 if (arm_eliminable_register (reg0
))
5319 val_diff
= val1
- val0
;
5323 /* If the target has load delay slots, then there's no benefit
5324 to using an ldm instruction unless the offset is zero and
5325 we are optimizing for size. */
5326 return (optimize_size
&& (REGNO (reg0
) == REGNO (reg1
))
5327 && (val0
== 0 || val1
== 0 || val0
== 4 || val1
== 4)
5328 && (val_diff
== 4 || val_diff
== -4));
5331 return ((REGNO (reg0
) == REGNO (reg1
))
5332 && (val_diff
== 4 || val_diff
== -4));
5339 load_multiple_sequence (rtx
*operands
, int nops
, int *regs
, int *base
,
5340 HOST_WIDE_INT
*load_offset
)
5342 int unsorted_regs
[4];
5343 HOST_WIDE_INT unsorted_offsets
[4];
5348 /* Can only handle 2, 3, or 4 insns at present,
5349 though could be easily extended if required. */
5350 gcc_assert (nops
>= 2 && nops
<= 4);
5352 /* Loop over the operands and check that the memory references are
5353 suitable (i.e. immediate offsets from the same base register). At
5354 the same time, extract the target register, and the memory
5356 for (i
= 0; i
< nops
; i
++)
5361 /* Convert a subreg of a mem into the mem itself. */
5362 if (GET_CODE (operands
[nops
+ i
]) == SUBREG
)
5363 operands
[nops
+ i
] = alter_subreg (operands
+ (nops
+ i
));
5365 gcc_assert (GET_CODE (operands
[nops
+ i
]) == MEM
);
5367 /* Don't reorder volatile memory references; it doesn't seem worth
5368 looking for the case where the order is ok anyway. */
5369 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
5372 offset
= const0_rtx
;
5374 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
5375 || (GET_CODE (reg
) == SUBREG
5376 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
5377 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
5378 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
5380 || (GET_CODE (reg
) == SUBREG
5381 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
5382 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
5387 base_reg
= REGNO (reg
);
5388 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
5389 ? REGNO (operands
[i
])
5390 : REGNO (SUBREG_REG (operands
[i
])));
5395 if (base_reg
!= (int) REGNO (reg
))
5396 /* Not addressed from the same base register. */
5399 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
5400 ? REGNO (operands
[i
])
5401 : REGNO (SUBREG_REG (operands
[i
])));
5402 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
5406 /* If it isn't an integer register, or if it overwrites the
5407 base register but isn't the last insn in the list, then
5408 we can't do this. */
5409 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14
5410 || (i
!= nops
- 1 && unsorted_regs
[i
] == base_reg
))
5413 unsorted_offsets
[i
] = INTVAL (offset
);
5416 /* Not a suitable memory address. */
5420 /* All the useful information has now been extracted from the
5421 operands into unsorted_regs and unsorted_offsets; additionally,
5422 order[0] has been set to the lowest numbered register in the
5423 list. Sort the registers into order, and check that the memory
5424 offsets are ascending and adjacent. */
5426 for (i
= 1; i
< nops
; i
++)
5430 order
[i
] = order
[i
- 1];
5431 for (j
= 0; j
< nops
; j
++)
5432 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
5433 && (order
[i
] == order
[i
- 1]
5434 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
5437 /* Have we found a suitable register? if not, one must be used more
5439 if (order
[i
] == order
[i
- 1])
5442 /* Is the memory address adjacent and ascending? */
5443 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
5451 for (i
= 0; i
< nops
; i
++)
5452 regs
[i
] = unsorted_regs
[order
[i
]];
5454 *load_offset
= unsorted_offsets
[order
[0]];
5457 if (unsorted_offsets
[order
[0]] == 0)
5458 return 1; /* ldmia */
5460 if (unsorted_offsets
[order
[0]] == 4)
5461 return 2; /* ldmib */
5463 if (unsorted_offsets
[order
[nops
- 1]] == 0)
5464 return 3; /* ldmda */
5466 if (unsorted_offsets
[order
[nops
- 1]] == -4)
5467 return 4; /* ldmdb */
5469 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5470 if the offset isn't small enough. The reason 2 ldrs are faster
5471 is because these ARMs are able to do more than one cache access
5472 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5473 whilst the ARM8 has a double bandwidth cache. This means that
5474 these cores can do both an instruction fetch and a data fetch in
5475 a single cycle, so the trick of calculating the address into a
5476 scratch register (one of the result regs) and then doing a load
5477 multiple actually becomes slower (and no smaller in code size).
5478 That is the transformation
5480 ldr rd1, [rbase + offset]
5481 ldr rd2, [rbase + offset + 4]
5485 add rd1, rbase, offset
5486 ldmia rd1, {rd1, rd2}
5488 produces worse code -- '3 cycles + any stalls on rd2' instead of
5489 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5490 access per cycle, the first sequence could never complete in less
5491 than 6 cycles, whereas the ldm sequence would only take 5 and
5492 would make better use of sequential accesses if not hitting the
5495 We cheat here and test 'arm_ld_sched' which we currently know to
5496 only be true for the ARM8, ARM9 and StrongARM. If this ever
5497 changes, then the test below needs to be reworked. */
5498 if (nops
== 2 && arm_ld_sched
)
5501 /* Can't do it without setting up the offset, only do this if it takes
5502 no more than one insn. */
5503 return (const_ok_for_arm (unsorted_offsets
[order
[0]])
5504 || const_ok_for_arm (-unsorted_offsets
[order
[0]])) ? 5 : 0;
5508 emit_ldm_seq (rtx
*operands
, int nops
)
5512 HOST_WIDE_INT offset
;
5516 switch (load_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
5519 strcpy (buf
, "ldm%?ia\t");
5523 strcpy (buf
, "ldm%?ib\t");
5527 strcpy (buf
, "ldm%?da\t");
5531 strcpy (buf
, "ldm%?db\t");
5536 sprintf (buf
, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
5537 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
5540 sprintf (buf
, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
5541 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
5543 output_asm_insn (buf
, operands
);
5545 strcpy (buf
, "ldm%?ia\t");
5552 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
5553 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
5555 for (i
= 1; i
< nops
; i
++)
5556 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
5557 reg_names
[regs
[i
]]);
5559 strcat (buf
, "}\t%@ phole ldm");
5561 output_asm_insn (buf
, operands
);
5566 store_multiple_sequence (rtx
*operands
, int nops
, int *regs
, int *base
,
5567 HOST_WIDE_INT
* load_offset
)
5569 int unsorted_regs
[4];
5570 HOST_WIDE_INT unsorted_offsets
[4];
5575 /* Can only handle 2, 3, or 4 insns at present, though could be easily
5576 extended if required. */
5577 gcc_assert (nops
>= 2 && nops
<= 4);
5579 /* Loop over the operands and check that the memory references are
5580 suitable (i.e. immediate offsets from the same base register). At
5581 the same time, extract the target register, and the memory
5583 for (i
= 0; i
< nops
; i
++)
5588 /* Convert a subreg of a mem into the mem itself. */
5589 if (GET_CODE (operands
[nops
+ i
]) == SUBREG
)
5590 operands
[nops
+ i
] = alter_subreg (operands
+ (nops
+ i
));
5592 gcc_assert (GET_CODE (operands
[nops
+ i
]) == MEM
);
5594 /* Don't reorder volatile memory references; it doesn't seem worth
5595 looking for the case where the order is ok anyway. */
5596 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
5599 offset
= const0_rtx
;
5601 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
5602 || (GET_CODE (reg
) == SUBREG
5603 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
5604 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
5605 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
5607 || (GET_CODE (reg
) == SUBREG
5608 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
5609 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
5614 base_reg
= REGNO (reg
);
5615 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
5616 ? REGNO (operands
[i
])
5617 : REGNO (SUBREG_REG (operands
[i
])));
5622 if (base_reg
!= (int) REGNO (reg
))
5623 /* Not addressed from the same base register. */
5626 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
5627 ? REGNO (operands
[i
])
5628 : REGNO (SUBREG_REG (operands
[i
])));
5629 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
5633 /* If it isn't an integer register, then we can't do this. */
5634 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14)
5637 unsorted_offsets
[i
] = INTVAL (offset
);
5640 /* Not a suitable memory address. */
5644 /* All the useful information has now been extracted from the
5645 operands into unsorted_regs and unsorted_offsets; additionally,
5646 order[0] has been set to the lowest numbered register in the
5647 list. Sort the registers into order, and check that the memory
5648 offsets are ascending and adjacent. */
5650 for (i
= 1; i
< nops
; i
++)
5654 order
[i
] = order
[i
- 1];
5655 for (j
= 0; j
< nops
; j
++)
5656 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
5657 && (order
[i
] == order
[i
- 1]
5658 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
5661 /* Have we found a suitable register? if not, one must be used more
5663 if (order
[i
] == order
[i
- 1])
5666 /* Is the memory address adjacent and ascending? */
5667 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
5675 for (i
= 0; i
< nops
; i
++)
5676 regs
[i
] = unsorted_regs
[order
[i
]];
5678 *load_offset
= unsorted_offsets
[order
[0]];
5681 if (unsorted_offsets
[order
[0]] == 0)
5682 return 1; /* stmia */
5684 if (unsorted_offsets
[order
[0]] == 4)
5685 return 2; /* stmib */
5687 if (unsorted_offsets
[order
[nops
- 1]] == 0)
5688 return 3; /* stmda */
5690 if (unsorted_offsets
[order
[nops
- 1]] == -4)
5691 return 4; /* stmdb */
5697 emit_stm_seq (rtx
*operands
, int nops
)
5701 HOST_WIDE_INT offset
;
5705 switch (store_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
5708 strcpy (buf
, "stm%?ia\t");
5712 strcpy (buf
, "stm%?ib\t");
5716 strcpy (buf
, "stm%?da\t");
5720 strcpy (buf
, "stm%?db\t");
5727 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
5728 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
5730 for (i
= 1; i
< nops
; i
++)
5731 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
5732 reg_names
[regs
[i
]]);
5734 strcat (buf
, "}\t%@ phole stm");
5736 output_asm_insn (buf
, operands
);
5741 /* Routines for use in generating RTL. */
5744 arm_gen_load_multiple (int base_regno
, int count
, rtx from
, int up
,
5745 int write_back
, rtx basemem
, HOST_WIDE_INT
*offsetp
)
5747 HOST_WIDE_INT offset
= *offsetp
;
5750 int sign
= up
? 1 : -1;
5753 /* XScale has load-store double instructions, but they have stricter
5754 alignment requirements than load-store multiple, so we cannot
5757 For XScale ldm requires 2 + NREGS cycles to complete and blocks
5758 the pipeline until completion.
5766 An ldr instruction takes 1-3 cycles, but does not block the
5775 Best case ldr will always win. However, the more ldr instructions
5776 we issue, the less likely we are to be able to schedule them well.
5777 Using ldr instructions also increases code size.
5779 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
5780 for counts of 3 or 4 regs. */
5781 if (arm_tune_xscale
&& count
<= 2 && ! optimize_size
)
5787 for (i
= 0; i
< count
; i
++)
5789 addr
= plus_constant (from
, i
* 4 * sign
);
5790 mem
= adjust_automodify_address (basemem
, SImode
, addr
, offset
);
5791 emit_move_insn (gen_rtx_REG (SImode
, base_regno
+ i
), mem
);
5797 emit_move_insn (from
, plus_constant (from
, count
* 4 * sign
));
5807 result
= gen_rtx_PARALLEL (VOIDmode
,
5808 rtvec_alloc (count
+ (write_back
? 1 : 0)));
5811 XVECEXP (result
, 0, 0)
5812 = gen_rtx_SET (GET_MODE (from
), from
,
5813 plus_constant (from
, count
* 4 * sign
));
5818 for (j
= 0; i
< count
; i
++, j
++)
5820 addr
= plus_constant (from
, j
* 4 * sign
);
5821 mem
= adjust_automodify_address_nv (basemem
, SImode
, addr
, offset
);
5822 XVECEXP (result
, 0, i
)
5823 = gen_rtx_SET (VOIDmode
, gen_rtx_REG (SImode
, base_regno
+ j
), mem
);
5834 arm_gen_store_multiple (int base_regno
, int count
, rtx to
, int up
,
5835 int write_back
, rtx basemem
, HOST_WIDE_INT
*offsetp
)
5837 HOST_WIDE_INT offset
= *offsetp
;
5840 int sign
= up
? 1 : -1;
5843 /* See arm_gen_load_multiple for discussion of
5844 the pros/cons of ldm/stm usage for XScale. */
5845 if (arm_tune_xscale
&& count
<= 2 && ! optimize_size
)
5851 for (i
= 0; i
< count
; i
++)
5853 addr
= plus_constant (to
, i
* 4 * sign
);
5854 mem
= adjust_automodify_address (basemem
, SImode
, addr
, offset
);
5855 emit_move_insn (mem
, gen_rtx_REG (SImode
, base_regno
+ i
));
5861 emit_move_insn (to
, plus_constant (to
, count
* 4 * sign
));
5871 result
= gen_rtx_PARALLEL (VOIDmode
,
5872 rtvec_alloc (count
+ (write_back
? 1 : 0)));
5875 XVECEXP (result
, 0, 0)
5876 = gen_rtx_SET (GET_MODE (to
), to
,
5877 plus_constant (to
, count
* 4 * sign
));
5882 for (j
= 0; i
< count
; i
++, j
++)
5884 addr
= plus_constant (to
, j
* 4 * sign
);
5885 mem
= adjust_automodify_address_nv (basemem
, SImode
, addr
, offset
);
5886 XVECEXP (result
, 0, i
)
5887 = gen_rtx_SET (VOIDmode
, mem
, gen_rtx_REG (SImode
, base_regno
+ j
));
5898 arm_gen_movmemqi (rtx
*operands
)
5900 HOST_WIDE_INT in_words_to_go
, out_words_to_go
, last_bytes
;
5901 HOST_WIDE_INT srcoffset
, dstoffset
;
5903 rtx src
, dst
, srcbase
, dstbase
;
5904 rtx part_bytes_reg
= NULL
;
5907 if (GET_CODE (operands
[2]) != CONST_INT
5908 || GET_CODE (operands
[3]) != CONST_INT
5909 || INTVAL (operands
[2]) > 64
5910 || INTVAL (operands
[3]) & 3)
5913 dstbase
= operands
[0];
5914 srcbase
= operands
[1];
5916 dst
= copy_to_mode_reg (SImode
, XEXP (dstbase
, 0));
5917 src
= copy_to_mode_reg (SImode
, XEXP (srcbase
, 0));
5919 in_words_to_go
= ARM_NUM_INTS (INTVAL (operands
[2]));
5920 out_words_to_go
= INTVAL (operands
[2]) / 4;
5921 last_bytes
= INTVAL (operands
[2]) & 3;
5922 dstoffset
= srcoffset
= 0;
5924 if (out_words_to_go
!= in_words_to_go
&& ((in_words_to_go
- 1) & 3) != 0)
5925 part_bytes_reg
= gen_rtx_REG (SImode
, (in_words_to_go
- 1) & 3);
5927 for (i
= 0; in_words_to_go
>= 2; i
+=4)
5929 if (in_words_to_go
> 4)
5930 emit_insn (arm_gen_load_multiple (0, 4, src
, TRUE
, TRUE
,
5931 srcbase
, &srcoffset
));
5933 emit_insn (arm_gen_load_multiple (0, in_words_to_go
, src
, TRUE
,
5934 FALSE
, srcbase
, &srcoffset
));
5936 if (out_words_to_go
)
5938 if (out_words_to_go
> 4)
5939 emit_insn (arm_gen_store_multiple (0, 4, dst
, TRUE
, TRUE
,
5940 dstbase
, &dstoffset
));
5941 else if (out_words_to_go
!= 1)
5942 emit_insn (arm_gen_store_multiple (0, out_words_to_go
,
5946 dstbase
, &dstoffset
));
5949 mem
= adjust_automodify_address (dstbase
, SImode
, dst
, dstoffset
);
5950 emit_move_insn (mem
, gen_rtx_REG (SImode
, 0));
5951 if (last_bytes
!= 0)
5953 emit_insn (gen_addsi3 (dst
, dst
, GEN_INT (4)));
5959 in_words_to_go
-= in_words_to_go
< 4 ? in_words_to_go
: 4;
5960 out_words_to_go
-= out_words_to_go
< 4 ? out_words_to_go
: 4;
5963 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
5964 if (out_words_to_go
)
5968 mem
= adjust_automodify_address (srcbase
, SImode
, src
, srcoffset
);
5969 sreg
= copy_to_reg (mem
);
5971 mem
= adjust_automodify_address (dstbase
, SImode
, dst
, dstoffset
);
5972 emit_move_insn (mem
, sreg
);
5975 gcc_assert (!in_words_to_go
); /* Sanity check */
5980 gcc_assert (in_words_to_go
> 0);
5982 mem
= adjust_automodify_address (srcbase
, SImode
, src
, srcoffset
);
5983 part_bytes_reg
= copy_to_mode_reg (SImode
, mem
);
5986 gcc_assert (!last_bytes
|| part_bytes_reg
);
5988 if (BYTES_BIG_ENDIAN
&& last_bytes
)
5990 rtx tmp
= gen_reg_rtx (SImode
);
5992 /* The bytes we want are in the top end of the word. */
5993 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
,
5994 GEN_INT (8 * (4 - last_bytes
))));
5995 part_bytes_reg
= tmp
;
5999 mem
= adjust_automodify_address (dstbase
, QImode
,
6000 plus_constant (dst
, last_bytes
- 1),
6001 dstoffset
+ last_bytes
- 1);
6002 emit_move_insn (mem
, gen_lowpart (QImode
, part_bytes_reg
));
6006 tmp
= gen_reg_rtx (SImode
);
6007 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
6008 part_bytes_reg
= tmp
;
6017 mem
= adjust_automodify_address (dstbase
, HImode
, dst
, dstoffset
);
6018 emit_move_insn (mem
, gen_lowpart (HImode
, part_bytes_reg
));
6022 rtx tmp
= gen_reg_rtx (SImode
);
6023 emit_insn (gen_addsi3 (dst
, dst
, const2_rtx
));
6024 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (16)));
6025 part_bytes_reg
= tmp
;
6032 mem
= adjust_automodify_address (dstbase
, QImode
, dst
, dstoffset
);
6033 emit_move_insn (mem
, gen_lowpart (QImode
, part_bytes_reg
));
6040 /* Generate a memory reference for a half word, such that it will be loaded
6041 into the top 16 bits of the word. We can assume that the address is
6042 known to be alignable and of the form reg, or plus (reg, const). */
6045 arm_gen_rotated_half_load (rtx memref
)
6047 HOST_WIDE_INT offset
= 0;
6048 rtx base
= XEXP (memref
, 0);
6050 if (GET_CODE (base
) == PLUS
)
6052 offset
= INTVAL (XEXP (base
, 1));
6053 base
= XEXP (base
, 0);
6056 /* If we aren't allowed to generate unaligned addresses, then fail. */
6057 if ((BYTES_BIG_ENDIAN
? 1 : 0) ^ ((offset
& 2) == 0))
6060 base
= gen_rtx_MEM (SImode
, plus_constant (base
, offset
& ~2));
6062 if ((BYTES_BIG_ENDIAN
? 1 : 0) ^ ((offset
& 2) == 2))
6065 return gen_rtx_ROTATE (SImode
, base
, GEN_INT (16));
6068 /* Select a dominance comparison mode if possible for a test of the general
6069 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6070 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6071 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6072 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6073 In all cases OP will be either EQ or NE, but we don't need to know which
6074 here. If we are unable to support a dominance comparison we return
6075 CC mode. This will then fail to match for the RTL expressions that
6076 generate this call. */
6078 arm_select_dominance_cc_mode (rtx x
, rtx y
, HOST_WIDE_INT cond_or
)
6080 enum rtx_code cond1
, cond2
;
6083 /* Currently we will probably get the wrong result if the individual
6084 comparisons are not simple. This also ensures that it is safe to
6085 reverse a comparison if necessary. */
6086 if ((arm_select_cc_mode (cond1
= GET_CODE (x
), XEXP (x
, 0), XEXP (x
, 1))
6088 || (arm_select_cc_mode (cond2
= GET_CODE (y
), XEXP (y
, 0), XEXP (y
, 1))
6092 /* The if_then_else variant of this tests the second condition if the
6093 first passes, but is true if the first fails. Reverse the first
6094 condition to get a true "inclusive-or" expression. */
6095 if (cond_or
== DOM_CC_NX_OR_Y
)
6096 cond1
= reverse_condition (cond1
);
6098 /* If the comparisons are not equal, and one doesn't dominate the other,
6099 then we can't do this. */
6101 && !comparison_dominates_p (cond1
, cond2
)
6102 && (swapped
= 1, !comparison_dominates_p (cond2
, cond1
)))
6107 enum rtx_code temp
= cond1
;
6115 if (cond_or
== DOM_CC_X_AND_Y
)
6120 case EQ
: return CC_DEQmode
;
6121 case LE
: return CC_DLEmode
;
6122 case LEU
: return CC_DLEUmode
;
6123 case GE
: return CC_DGEmode
;
6124 case GEU
: return CC_DGEUmode
;
6125 default: gcc_unreachable ();
6129 if (cond_or
== DOM_CC_X_AND_Y
)
6145 if (cond_or
== DOM_CC_X_AND_Y
)
6161 if (cond_or
== DOM_CC_X_AND_Y
)
6177 if (cond_or
== DOM_CC_X_AND_Y
)
6192 /* The remaining cases only occur when both comparisons are the
6195 gcc_assert (cond1
== cond2
);
6199 gcc_assert (cond1
== cond2
);
6203 gcc_assert (cond1
== cond2
);
6207 gcc_assert (cond1
== cond2
);
6211 gcc_assert (cond1
== cond2
);
6220 arm_select_cc_mode (enum rtx_code op
, rtx x
, rtx y
)
6222 /* All floating point compares return CCFP if it is an equality
6223 comparison, and CCFPE otherwise. */
6224 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
6244 if (TARGET_HARD_FLOAT
&& TARGET_MAVERICK
)
6253 /* A compare with a shifted operand. Because of canonicalization, the
6254 comparison will have to be swapped when we emit the assembler. */
6255 if (GET_MODE (y
) == SImode
&& GET_CODE (y
) == REG
6256 && (GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
6257 || GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ROTATE
6258 || GET_CODE (x
) == ROTATERT
))
6261 /* This operation is performed swapped, but since we only rely on the Z
6262 flag we don't need an additional mode. */
6263 if (GET_MODE (y
) == SImode
&& REG_P (y
)
6264 && GET_CODE (x
) == NEG
6265 && (op
== EQ
|| op
== NE
))
6268 /* This is a special case that is used by combine to allow a
6269 comparison of a shifted byte load to be split into a zero-extend
6270 followed by a comparison of the shifted integer (only valid for
6271 equalities and unsigned inequalities). */
6272 if (GET_MODE (x
) == SImode
6273 && GET_CODE (x
) == ASHIFT
6274 && GET_CODE (XEXP (x
, 1)) == CONST_INT
&& INTVAL (XEXP (x
, 1)) == 24
6275 && GET_CODE (XEXP (x
, 0)) == SUBREG
6276 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == MEM
6277 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == QImode
6278 && (op
== EQ
|| op
== NE
6279 || op
== GEU
|| op
== GTU
|| op
== LTU
|| op
== LEU
)
6280 && GET_CODE (y
) == CONST_INT
)
6283 /* A construct for a conditional compare, if the false arm contains
6284 0, then both conditions must be true, otherwise either condition
6285 must be true. Not all conditions are possible, so CCmode is
6286 returned if it can't be done. */
6287 if (GET_CODE (x
) == IF_THEN_ELSE
6288 && (XEXP (x
, 2) == const0_rtx
6289 || XEXP (x
, 2) == const1_rtx
)
6290 && COMPARISON_P (XEXP (x
, 0))
6291 && COMPARISON_P (XEXP (x
, 1)))
6292 return arm_select_dominance_cc_mode (XEXP (x
, 0), XEXP (x
, 1),
6293 INTVAL (XEXP (x
, 2)));
6295 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6296 if (GET_CODE (x
) == AND
6297 && COMPARISON_P (XEXP (x
, 0))
6298 && COMPARISON_P (XEXP (x
, 1)))
6299 return arm_select_dominance_cc_mode (XEXP (x
, 0), XEXP (x
, 1),
6302 if (GET_CODE (x
) == IOR
6303 && COMPARISON_P (XEXP (x
, 0))
6304 && COMPARISON_P (XEXP (x
, 1)))
6305 return arm_select_dominance_cc_mode (XEXP (x
, 0), XEXP (x
, 1),
6308 /* An operation (on Thumb) where we want to test for a single bit.
6309 This is done by shifting that bit up into the top bit of a
6310 scratch register; we can then branch on the sign bit. */
6312 && GET_MODE (x
) == SImode
6313 && (op
== EQ
|| op
== NE
)
6314 && (GET_CODE (x
) == ZERO_EXTRACT
))
6317 /* An operation that sets the condition codes as a side-effect, the
6318 V flag is not set correctly, so we can only use comparisons where
6319 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6321 if (GET_MODE (x
) == SImode
6323 && (op
== EQ
|| op
== NE
|| op
== LT
|| op
== GE
)
6324 && (GET_CODE (x
) == PLUS
|| GET_CODE (x
) == MINUS
6325 || GET_CODE (x
) == AND
|| GET_CODE (x
) == IOR
6326 || GET_CODE (x
) == XOR
|| GET_CODE (x
) == MULT
6327 || GET_CODE (x
) == NOT
|| GET_CODE (x
) == NEG
6328 || GET_CODE (x
) == LSHIFTRT
6329 || GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
6330 || GET_CODE (x
) == ROTATERT
6331 || (TARGET_ARM
&& GET_CODE (x
) == ZERO_EXTRACT
)))
6334 if (GET_MODE (x
) == QImode
&& (op
== EQ
|| op
== NE
))
6337 if (GET_MODE (x
) == SImode
&& (op
== LTU
|| op
== GEU
)
6338 && GET_CODE (x
) == PLUS
6339 && (rtx_equal_p (XEXP (x
, 0), y
) || rtx_equal_p (XEXP (x
, 1), y
)))
6345 /* X and Y are two things to compare using CODE. Emit the compare insn and
6346 return the rtx for register 0 in the proper mode. FP means this is a
6347 floating point compare: I don't think that it is needed on the arm. */
6349 arm_gen_compare_reg (enum rtx_code code
, rtx x
, rtx y
)
6351 enum machine_mode mode
= SELECT_CC_MODE (code
, x
, y
);
6352 rtx cc_reg
= gen_rtx_REG (mode
, CC_REGNUM
);
6354 emit_insn (gen_rtx_SET (VOIDmode
, cc_reg
,
6355 gen_rtx_COMPARE (mode
, x
, y
)));
6360 /* Generate a sequence of insns that will generate the correct return
6361 address mask depending on the physical architecture that the program
6364 arm_gen_return_addr_mask (void)
6366 rtx reg
= gen_reg_rtx (Pmode
);
6368 emit_insn (gen_return_addr_mask (reg
));
6373 arm_reload_in_hi (rtx
*operands
)
6375 rtx ref
= operands
[1];
6377 HOST_WIDE_INT offset
= 0;
6379 if (GET_CODE (ref
) == SUBREG
)
6381 offset
= SUBREG_BYTE (ref
);
6382 ref
= SUBREG_REG (ref
);
6385 if (GET_CODE (ref
) == REG
)
6387 /* We have a pseudo which has been spilt onto the stack; there
6388 are two cases here: the first where there is a simple
6389 stack-slot replacement and a second where the stack-slot is
6390 out of range, or is used as a subreg. */
6391 if (reg_equiv_mem
[REGNO (ref
)])
6393 ref
= reg_equiv_mem
[REGNO (ref
)];
6394 base
= find_replacement (&XEXP (ref
, 0));
6397 /* The slot is out of range, or was dressed up in a SUBREG. */
6398 base
= reg_equiv_address
[REGNO (ref
)];
6401 base
= find_replacement (&XEXP (ref
, 0));
6403 /* Handle the case where the address is too complex to be offset by 1. */
6404 if (GET_CODE (base
) == MINUS
6405 || (GET_CODE (base
) == PLUS
&& GET_CODE (XEXP (base
, 1)) != CONST_INT
))
6407 rtx base_plus
= gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
6409 emit_insn (gen_rtx_SET (VOIDmode
, base_plus
, base
));
6412 else if (GET_CODE (base
) == PLUS
)
6414 /* The addend must be CONST_INT, or we would have dealt with it above. */
6415 HOST_WIDE_INT hi
, lo
;
6417 offset
+= INTVAL (XEXP (base
, 1));
6418 base
= XEXP (base
, 0);
6420 /* Rework the address into a legal sequence of insns. */
6421 /* Valid range for lo is -4095 -> 4095 */
6424 : -((-offset
) & 0xfff));
6426 /* Corner case, if lo is the max offset then we would be out of range
6427 once we have added the additional 1 below, so bump the msb into the
6428 pre-loading insn(s). */
6432 hi
= ((((offset
- lo
) & (HOST_WIDE_INT
) 0xffffffff)
6433 ^ (HOST_WIDE_INT
) 0x80000000)
6434 - (HOST_WIDE_INT
) 0x80000000);
6436 gcc_assert (hi
+ lo
== offset
);
6440 rtx base_plus
= gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
6442 /* Get the base address; addsi3 knows how to handle constants
6443 that require more than one insn. */
6444 emit_insn (gen_addsi3 (base_plus
, base
, GEN_INT (hi
)));
6450 /* Operands[2] may overlap operands[0] (though it won't overlap
6451 operands[1]), that's why we asked for a DImode reg -- so we can
6452 use the bit that does not overlap. */
6453 if (REGNO (operands
[2]) == REGNO (operands
[0]))
6454 scratch
= gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
6456 scratch
= gen_rtx_REG (SImode
, REGNO (operands
[2]));
6458 emit_insn (gen_zero_extendqisi2 (scratch
,
6459 gen_rtx_MEM (QImode
,
6460 plus_constant (base
,
6462 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode
, operands
[0], 0),
6463 gen_rtx_MEM (QImode
,
6464 plus_constant (base
,
6466 if (!BYTES_BIG_ENDIAN
)
6467 emit_insn (gen_rtx_SET (VOIDmode
, gen_rtx_SUBREG (SImode
, operands
[0], 0),
6468 gen_rtx_IOR (SImode
,
6471 gen_rtx_SUBREG (SImode
, operands
[0], 0),
6475 emit_insn (gen_rtx_SET (VOIDmode
, gen_rtx_SUBREG (SImode
, operands
[0], 0),
6476 gen_rtx_IOR (SImode
,
6477 gen_rtx_ASHIFT (SImode
, scratch
,
6479 gen_rtx_SUBREG (SImode
, operands
[0],
6483 /* Handle storing a half-word to memory during reload by synthesizing as two
6484 byte stores. Take care not to clobber the input values until after we
6485 have moved them somewhere safe. This code assumes that if the DImode
6486 scratch in operands[2] overlaps either the input value or output address
6487 in some way, then that value must die in this insn (we absolutely need
6488 two scratch registers for some corner cases). */
6490 arm_reload_out_hi (rtx
*operands
)
6492 rtx ref
= operands
[0];
6493 rtx outval
= operands
[1];
6495 HOST_WIDE_INT offset
= 0;
6497 if (GET_CODE (ref
) == SUBREG
)
6499 offset
= SUBREG_BYTE (ref
);
6500 ref
= SUBREG_REG (ref
);
6503 if (GET_CODE (ref
) == REG
)
6505 /* We have a pseudo which has been spilt onto the stack; there
6506 are two cases here: the first where there is a simple
6507 stack-slot replacement and a second where the stack-slot is
6508 out of range, or is used as a subreg. */
6509 if (reg_equiv_mem
[REGNO (ref
)])
6511 ref
= reg_equiv_mem
[REGNO (ref
)];
6512 base
= find_replacement (&XEXP (ref
, 0));
6515 /* The slot is out of range, or was dressed up in a SUBREG. */
6516 base
= reg_equiv_address
[REGNO (ref
)];
6519 base
= find_replacement (&XEXP (ref
, 0));
6521 scratch
= gen_rtx_REG (SImode
, REGNO (operands
[2]));
6523 /* Handle the case where the address is too complex to be offset by 1. */
6524 if (GET_CODE (base
) == MINUS
6525 || (GET_CODE (base
) == PLUS
&& GET_CODE (XEXP (base
, 1)) != CONST_INT
))
6527 rtx base_plus
= gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
6529 /* Be careful not to destroy OUTVAL. */
6530 if (reg_overlap_mentioned_p (base_plus
, outval
))
6532 /* Updating base_plus might destroy outval, see if we can
6533 swap the scratch and base_plus. */
6534 if (!reg_overlap_mentioned_p (scratch
, outval
))
6537 scratch
= base_plus
;
6542 rtx scratch_hi
= gen_rtx_REG (HImode
, REGNO (operands
[2]));
6544 /* Be conservative and copy OUTVAL into the scratch now,
6545 this should only be necessary if outval is a subreg
6546 of something larger than a word. */
6547 /* XXX Might this clobber base? I can't see how it can,
6548 since scratch is known to overlap with OUTVAL, and
6549 must be wider than a word. */
6550 emit_insn (gen_movhi (scratch_hi
, outval
));
6551 outval
= scratch_hi
;
6555 emit_insn (gen_rtx_SET (VOIDmode
, base_plus
, base
));
6558 else if (GET_CODE (base
) == PLUS
)
6560 /* The addend must be CONST_INT, or we would have dealt with it above. */
6561 HOST_WIDE_INT hi
, lo
;
6563 offset
+= INTVAL (XEXP (base
, 1));
6564 base
= XEXP (base
, 0);
6566 /* Rework the address into a legal sequence of insns. */
6567 /* Valid range for lo is -4095 -> 4095 */
6570 : -((-offset
) & 0xfff));
6572 /* Corner case, if lo is the max offset then we would be out of range
6573 once we have added the additional 1 below, so bump the msb into the
6574 pre-loading insn(s). */
6578 hi
= ((((offset
- lo
) & (HOST_WIDE_INT
) 0xffffffff)
6579 ^ (HOST_WIDE_INT
) 0x80000000)
6580 - (HOST_WIDE_INT
) 0x80000000);
6582 gcc_assert (hi
+ lo
== offset
);
6586 rtx base_plus
= gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
6588 /* Be careful not to destroy OUTVAL. */
6589 if (reg_overlap_mentioned_p (base_plus
, outval
))
6591 /* Updating base_plus might destroy outval, see if we
6592 can swap the scratch and base_plus. */
6593 if (!reg_overlap_mentioned_p (scratch
, outval
))
6596 scratch
= base_plus
;
6601 rtx scratch_hi
= gen_rtx_REG (HImode
, REGNO (operands
[2]));
6603 /* Be conservative and copy outval into scratch now,
6604 this should only be necessary if outval is a
6605 subreg of something larger than a word. */
6606 /* XXX Might this clobber base? I can't see how it
6607 can, since scratch is known to overlap with
6609 emit_insn (gen_movhi (scratch_hi
, outval
));
6610 outval
= scratch_hi
;
6614 /* Get the base address; addsi3 knows how to handle constants
6615 that require more than one insn. */
6616 emit_insn (gen_addsi3 (base_plus
, base
, GEN_INT (hi
)));
6622 if (BYTES_BIG_ENDIAN
)
6624 emit_insn (gen_movqi (gen_rtx_MEM (QImode
,
6625 plus_constant (base
, offset
+ 1)),
6626 gen_lowpart (QImode
, outval
)));
6627 emit_insn (gen_lshrsi3 (scratch
,
6628 gen_rtx_SUBREG (SImode
, outval
, 0),
6630 emit_insn (gen_movqi (gen_rtx_MEM (QImode
, plus_constant (base
, offset
)),
6631 gen_lowpart (QImode
, scratch
)));
6635 emit_insn (gen_movqi (gen_rtx_MEM (QImode
, plus_constant (base
, offset
)),
6636 gen_lowpart (QImode
, outval
)));
6637 emit_insn (gen_lshrsi3 (scratch
,
6638 gen_rtx_SUBREG (SImode
, outval
, 0),
6640 emit_insn (gen_movqi (gen_rtx_MEM (QImode
,
6641 plus_constant (base
, offset
+ 1)),
6642 gen_lowpart (QImode
, scratch
)));
6646 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
6647 (padded to the size of a word) should be passed in a register. */
6650 arm_must_pass_in_stack (enum machine_mode mode
, tree type
)
6652 if (TARGET_AAPCS_BASED
)
6653 return must_pass_in_stack_var_size (mode
, type
);
6655 return must_pass_in_stack_var_size_or_pad (mode
, type
);
6659 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
6660 Return true if an argument passed on the stack should be padded upwards,
6661 i.e. if the least-significant byte has useful data. */
6664 arm_pad_arg_upward (enum machine_mode mode
, tree type
)
6666 if (!TARGET_AAPCS_BASED
)
6667 return DEFAULT_FUNCTION_ARG_PADDING(mode
, type
);
6669 if (type
&& BYTES_BIG_ENDIAN
&& INTEGRAL_TYPE_P (type
))
6676 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
6677 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
6678 byte of the register has useful data, and return the opposite if the
6679 most significant byte does.
6680 For AAPCS, small aggregates and small complex types are always padded
6684 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED
,
6685 tree type
, int first ATTRIBUTE_UNUSED
)
6687 if (TARGET_AAPCS_BASED
6689 && (AGGREGATE_TYPE_P (type
) || TREE_CODE (type
) == COMPLEX_TYPE
)
6690 && int_size_in_bytes (type
) <= 4)
6693 /* Otherwise, use default padding. */
6694 return !BYTES_BIG_ENDIAN
;
6699 /* Print a symbolic form of X to the debug file, F. */
6701 arm_print_value (FILE *f
, rtx x
)
6703 switch (GET_CODE (x
))
6706 fprintf (f
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (x
));
6710 fprintf (f
, "<0x%lx,0x%lx>", (long)XWINT (x
, 2), (long)XWINT (x
, 3));
6718 for (i
= 0; i
< CONST_VECTOR_NUNITS (x
); i
++)
6720 fprintf (f
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (CONST_VECTOR_ELT (x
, i
)));
6721 if (i
< (CONST_VECTOR_NUNITS (x
) - 1))
6729 fprintf (f
, "\"%s\"", XSTR (x
, 0));
6733 fprintf (f
, "`%s'", XSTR (x
, 0));
6737 fprintf (f
, "L%d", INSN_UID (XEXP (x
, 0)));
6741 arm_print_value (f
, XEXP (x
, 0));
6745 arm_print_value (f
, XEXP (x
, 0));
6747 arm_print_value (f
, XEXP (x
, 1));
6755 fprintf (f
, "????");
6760 /* Routines for manipulation of the constant pool. */
6762 /* Arm instructions cannot load a large constant directly into a
6763 register; they have to come from a pc relative load. The constant
6764 must therefore be placed in the addressable range of the pc
6765 relative load. Depending on the precise pc relative load
6766 instruction the range is somewhere between 256 bytes and 4k. This
6767 means that we often have to dump a constant inside a function, and
6768 generate code to branch around it.
6770 It is important to minimize this, since the branches will slow
6771 things down and make the code larger.
6773 Normally we can hide the table after an existing unconditional
6774 branch so that there is no interruption of the flow, but in the
6775 worst case the code looks like this:
6793 We fix this by performing a scan after scheduling, which notices
6794 which instructions need to have their operands fetched from the
6795 constant table and builds the table.
6797 The algorithm starts by building a table of all the constants that
6798 need fixing up and all the natural barriers in the function (places
6799 where a constant table can be dropped without breaking the flow).
6800 For each fixup we note how far the pc-relative replacement will be
6801 able to reach and the offset of the instruction into the function.
6803 Having built the table we then group the fixes together to form
6804 tables that are as large as possible (subject to addressing
6805 constraints) and emit each table of constants after the last
6806 barrier that is within range of all the instructions in the group.
6807 If a group does not contain a barrier, then we forcibly create one
6808 by inserting a jump instruction into the flow. Once the table has
6809 been inserted, the insns are then modified to reference the
6810 relevant entry in the pool.
6812 Possible enhancements to the algorithm (not implemented) are:
6814 1) For some processors and object formats, there may be benefit in
6815 aligning the pools to the start of cache lines; this alignment
6816 would need to be taken into account when calculating addressability
6819 /* These typedefs are located at the start of this file, so that
6820 they can be used in the prototypes there. This comment is to
6821 remind readers of that fact so that the following structures
6822 can be understood more easily.
6824 typedef struct minipool_node Mnode;
6825 typedef struct minipool_fixup Mfix; */
6827 struct minipool_node
6829 /* Doubly linked chain of entries. */
6832 /* The maximum offset into the code that this entry can be placed. While
6833 pushing fixes for forward references, all entries are sorted in order
6834 of increasing max_address. */
6835 HOST_WIDE_INT max_address
;
6836 /* Similarly for an entry inserted for a backwards ref. */
6837 HOST_WIDE_INT min_address
;
6838 /* The number of fixes referencing this entry. This can become zero
6839 if we "unpush" an entry. In this case we ignore the entry when we
6840 come to emit the code. */
6842 /* The offset from the start of the minipool. */
6843 HOST_WIDE_INT offset
;
6844 /* The value in table. */
6846 /* The mode of value. */
6847 enum machine_mode mode
;
6848 /* The size of the value. With iWMMXt enabled
6849 sizes > 4 also imply an alignment of 8-bytes. */
6853 struct minipool_fixup
6857 HOST_WIDE_INT address
;
6859 enum machine_mode mode
;
6863 HOST_WIDE_INT forwards
;
6864 HOST_WIDE_INT backwards
;
6867 /* Fixes less than a word need padding out to a word boundary. */
6868 #define MINIPOOL_FIX_SIZE(mode) \
6869 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
6871 static Mnode
* minipool_vector_head
;
6872 static Mnode
* minipool_vector_tail
;
6873 static rtx minipool_vector_label
;
6875 /* The linked list of all minipool fixes required for this function. */
6876 Mfix
* minipool_fix_head
;
6877 Mfix
* minipool_fix_tail
;
6878 /* The fix entry for the current minipool, once it has been placed. */
6879 Mfix
* minipool_barrier
;
6881 /* Determines if INSN is the start of a jump table. Returns the end
6882 of the TABLE or NULL_RTX. */
6884 is_jump_table (rtx insn
)
6888 if (GET_CODE (insn
) == JUMP_INSN
6889 && JUMP_LABEL (insn
) != NULL
6890 && ((table
= next_real_insn (JUMP_LABEL (insn
)))
6891 == next_real_insn (insn
))
6893 && GET_CODE (table
) == JUMP_INSN
6894 && (GET_CODE (PATTERN (table
)) == ADDR_VEC
6895 || GET_CODE (PATTERN (table
)) == ADDR_DIFF_VEC
))
6901 #ifndef JUMP_TABLES_IN_TEXT_SECTION
6902 #define JUMP_TABLES_IN_TEXT_SECTION 0
6905 static HOST_WIDE_INT
6906 get_jump_table_size (rtx insn
)
6908 /* ADDR_VECs only take room if read-only data does into the text
6910 if (JUMP_TABLES_IN_TEXT_SECTION
6911 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
6916 rtx body
= PATTERN (insn
);
6917 int elt
= GET_CODE (body
) == ADDR_DIFF_VEC
? 1 : 0;
6919 return GET_MODE_SIZE (GET_MODE (body
)) * XVECLEN (body
, elt
);
6925 /* Move a minipool fix MP from its current location to before MAX_MP.
6926 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
6927 constraints may need updating. */
6929 move_minipool_fix_forward_ref (Mnode
*mp
, Mnode
*max_mp
,
6930 HOST_WIDE_INT max_address
)
6932 /* The code below assumes these are different. */
6933 gcc_assert (mp
!= max_mp
);
6937 if (max_address
< mp
->max_address
)
6938 mp
->max_address
= max_address
;
6942 if (max_address
> max_mp
->max_address
- mp
->fix_size
)
6943 mp
->max_address
= max_mp
->max_address
- mp
->fix_size
;
6945 mp
->max_address
= max_address
;
6947 /* Unlink MP from its current position. Since max_mp is non-null,
6948 mp->prev must be non-null. */
6949 mp
->prev
->next
= mp
->next
;
6950 if (mp
->next
!= NULL
)
6951 mp
->next
->prev
= mp
->prev
;
6953 minipool_vector_tail
= mp
->prev
;
6955 /* Re-insert it before MAX_MP. */
6957 mp
->prev
= max_mp
->prev
;
6960 if (mp
->prev
!= NULL
)
6961 mp
->prev
->next
= mp
;
6963 minipool_vector_head
= mp
;
6966 /* Save the new entry. */
6969 /* Scan over the preceding entries and adjust their addresses as
6971 while (mp
->prev
!= NULL
6972 && mp
->prev
->max_address
> mp
->max_address
- mp
->prev
->fix_size
)
6974 mp
->prev
->max_address
= mp
->max_address
- mp
->prev
->fix_size
;
6981 /* Add a constant to the minipool for a forward reference. Returns the
6982 node added or NULL if the constant will not fit in this pool. */
6984 add_minipool_forward_ref (Mfix
*fix
)
6986 /* If set, max_mp is the first pool_entry that has a lower
6987 constraint than the one we are trying to add. */
6988 Mnode
* max_mp
= NULL
;
6989 HOST_WIDE_INT max_address
= fix
->address
+ fix
->forwards
;
6992 /* If this fix's address is greater than the address of the first
6993 entry, then we can't put the fix in this pool. We subtract the
6994 size of the current fix to ensure that if the table is fully
6995 packed we still have enough room to insert this value by shuffling
6996 the other fixes forwards. */
6997 if (minipool_vector_head
&&
6998 fix
->address
>= minipool_vector_head
->max_address
- fix
->fix_size
)
7001 /* Scan the pool to see if a constant with the same value has
7002 already been added. While we are doing this, also note the
7003 location where we must insert the constant if it doesn't already
7005 for (mp
= minipool_vector_head
; mp
!= NULL
; mp
= mp
->next
)
7007 if (GET_CODE (fix
->value
) == GET_CODE (mp
->value
)
7008 && fix
->mode
== mp
->mode
7009 && (GET_CODE (fix
->value
) != CODE_LABEL
7010 || (CODE_LABEL_NUMBER (fix
->value
)
7011 == CODE_LABEL_NUMBER (mp
->value
)))
7012 && rtx_equal_p (fix
->value
, mp
->value
))
7014 /* More than one fix references this entry. */
7016 return move_minipool_fix_forward_ref (mp
, max_mp
, max_address
);
7019 /* Note the insertion point if necessary. */
7021 && mp
->max_address
> max_address
)
7024 /* If we are inserting an 8-bytes aligned quantity and
7025 we have not already found an insertion point, then
7026 make sure that all such 8-byte aligned quantities are
7027 placed at the start of the pool. */
7028 if (ARM_DOUBLEWORD_ALIGN
7030 && fix
->fix_size
== 8
7031 && mp
->fix_size
!= 8)
7034 max_address
= mp
->max_address
;
7038 /* The value is not currently in the minipool, so we need to create
7039 a new entry for it. If MAX_MP is NULL, the entry will be put on
7040 the end of the list since the placement is less constrained than
7041 any existing entry. Otherwise, we insert the new fix before
7042 MAX_MP and, if necessary, adjust the constraints on the other
7044 mp
= xmalloc (sizeof (* mp
));
7045 mp
->fix_size
= fix
->fix_size
;
7046 mp
->mode
= fix
->mode
;
7047 mp
->value
= fix
->value
;
7049 /* Not yet required for a backwards ref. */
7050 mp
->min_address
= -65536;
7054 mp
->max_address
= max_address
;
7056 mp
->prev
= minipool_vector_tail
;
7058 if (mp
->prev
== NULL
)
7060 minipool_vector_head
= mp
;
7061 minipool_vector_label
= gen_label_rtx ();
7064 mp
->prev
->next
= mp
;
7066 minipool_vector_tail
= mp
;
7070 if (max_address
> max_mp
->max_address
- mp
->fix_size
)
7071 mp
->max_address
= max_mp
->max_address
- mp
->fix_size
;
7073 mp
->max_address
= max_address
;
7076 mp
->prev
= max_mp
->prev
;
7078 if (mp
->prev
!= NULL
)
7079 mp
->prev
->next
= mp
;
7081 minipool_vector_head
= mp
;
7084 /* Save the new entry. */
7087 /* Scan over the preceding entries and adjust their addresses as
7089 while (mp
->prev
!= NULL
7090 && mp
->prev
->max_address
> mp
->max_address
- mp
->prev
->fix_size
)
7092 mp
->prev
->max_address
= mp
->max_address
- mp
->prev
->fix_size
;
7100 move_minipool_fix_backward_ref (Mnode
*mp
, Mnode
*min_mp
,
7101 HOST_WIDE_INT min_address
)
7103 HOST_WIDE_INT offset
;
7105 /* The code below assumes these are different. */
7106 gcc_assert (mp
!= min_mp
);
7110 if (min_address
> mp
->min_address
)
7111 mp
->min_address
= min_address
;
7115 /* We will adjust this below if it is too loose. */
7116 mp
->min_address
= min_address
;
7118 /* Unlink MP from its current position. Since min_mp is non-null,
7119 mp->next must be non-null. */
7120 mp
->next
->prev
= mp
->prev
;
7121 if (mp
->prev
!= NULL
)
7122 mp
->prev
->next
= mp
->next
;
7124 minipool_vector_head
= mp
->next
;
7126 /* Reinsert it after MIN_MP. */
7128 mp
->next
= min_mp
->next
;
7130 if (mp
->next
!= NULL
)
7131 mp
->next
->prev
= mp
;
7133 minipool_vector_tail
= mp
;
7139 for (mp
= minipool_vector_head
; mp
!= NULL
; mp
= mp
->next
)
7141 mp
->offset
= offset
;
7142 if (mp
->refcount
> 0)
7143 offset
+= mp
->fix_size
;
7145 if (mp
->next
&& mp
->next
->min_address
< mp
->min_address
+ mp
->fix_size
)
7146 mp
->next
->min_address
= mp
->min_address
+ mp
->fix_size
;
7152 /* Add a constant to the minipool for a backward reference. Returns the
7153 node added or NULL if the constant will not fit in this pool.
7155 Note that the code for insertion for a backwards reference can be
7156 somewhat confusing because the calculated offsets for each fix do
7157 not take into account the size of the pool (which is still under
7160 add_minipool_backward_ref (Mfix
*fix
)
7162 /* If set, min_mp is the last pool_entry that has a lower constraint
7163 than the one we are trying to add. */
7164 Mnode
*min_mp
= NULL
;
7165 /* This can be negative, since it is only a constraint. */
7166 HOST_WIDE_INT min_address
= fix
->address
- fix
->backwards
;
7169 /* If we can't reach the current pool from this insn, or if we can't
7170 insert this entry at the end of the pool without pushing other
7171 fixes out of range, then we don't try. This ensures that we
7172 can't fail later on. */
7173 if (min_address
>= minipool_barrier
->address
7174 || (minipool_vector_tail
->min_address
+ fix
->fix_size
7175 >= minipool_barrier
->address
))
7178 /* Scan the pool to see if a constant with the same value has
7179 already been added. While we are doing this, also note the
7180 location where we must insert the constant if it doesn't already
7182 for (mp
= minipool_vector_tail
; mp
!= NULL
; mp
= mp
->prev
)
7184 if (GET_CODE (fix
->value
) == GET_CODE (mp
->value
)
7185 && fix
->mode
== mp
->mode
7186 && (GET_CODE (fix
->value
) != CODE_LABEL
7187 || (CODE_LABEL_NUMBER (fix
->value
)
7188 == CODE_LABEL_NUMBER (mp
->value
)))
7189 && rtx_equal_p (fix
->value
, mp
->value
)
7190 /* Check that there is enough slack to move this entry to the
7191 end of the table (this is conservative). */
7193 > (minipool_barrier
->address
7194 + minipool_vector_tail
->offset
7195 + minipool_vector_tail
->fix_size
)))
7198 return move_minipool_fix_backward_ref (mp
, min_mp
, min_address
);
7202 mp
->min_address
+= fix
->fix_size
;
7205 /* Note the insertion point if necessary. */
7206 if (mp
->min_address
< min_address
)
7208 /* For now, we do not allow the insertion of 8-byte alignment
7209 requiring nodes anywhere but at the start of the pool. */
7210 if (ARM_DOUBLEWORD_ALIGN
7211 && fix
->fix_size
== 8 && mp
->fix_size
!= 8)
7216 else if (mp
->max_address
7217 < minipool_barrier
->address
+ mp
->offset
+ fix
->fix_size
)
7219 /* Inserting before this entry would push the fix beyond
7220 its maximum address (which can happen if we have
7221 re-located a forwards fix); force the new fix to come
7224 min_address
= mp
->min_address
+ fix
->fix_size
;
7226 /* If we are inserting an 8-bytes aligned quantity and
7227 we have not already found an insertion point, then
7228 make sure that all such 8-byte aligned quantities are
7229 placed at the start of the pool. */
7230 else if (ARM_DOUBLEWORD_ALIGN
7232 && fix
->fix_size
== 8
7233 && mp
->fix_size
< 8)
7236 min_address
= mp
->min_address
+ fix
->fix_size
;
7241 /* We need to create a new entry. */
7242 mp
= xmalloc (sizeof (* mp
));
7243 mp
->fix_size
= fix
->fix_size
;
7244 mp
->mode
= fix
->mode
;
7245 mp
->value
= fix
->value
;
7247 mp
->max_address
= minipool_barrier
->address
+ 65536;
7249 mp
->min_address
= min_address
;
7254 mp
->next
= minipool_vector_head
;
7256 if (mp
->next
== NULL
)
7258 minipool_vector_tail
= mp
;
7259 minipool_vector_label
= gen_label_rtx ();
7262 mp
->next
->prev
= mp
;
7264 minipool_vector_head
= mp
;
7268 mp
->next
= min_mp
->next
;
7272 if (mp
->next
!= NULL
)
7273 mp
->next
->prev
= mp
;
7275 minipool_vector_tail
= mp
;
7278 /* Save the new entry. */
7286 /* Scan over the following entries and adjust their offsets. */
7287 while (mp
->next
!= NULL
)
7289 if (mp
->next
->min_address
< mp
->min_address
+ mp
->fix_size
)
7290 mp
->next
->min_address
= mp
->min_address
+ mp
->fix_size
;
7293 mp
->next
->offset
= mp
->offset
+ mp
->fix_size
;
7295 mp
->next
->offset
= mp
->offset
;
7304 assign_minipool_offsets (Mfix
*barrier
)
7306 HOST_WIDE_INT offset
= 0;
7309 minipool_barrier
= barrier
;
7311 for (mp
= minipool_vector_head
; mp
!= NULL
; mp
= mp
->next
)
7313 mp
->offset
= offset
;
7315 if (mp
->refcount
> 0)
7316 offset
+= mp
->fix_size
;
7320 /* Output the literal table */
7322 dump_minipool (rtx scan
)
7328 if (ARM_DOUBLEWORD_ALIGN
)
7329 for (mp
= minipool_vector_head
; mp
!= NULL
; mp
= mp
->next
)
7330 if (mp
->refcount
> 0 && mp
->fix_size
== 8)
7338 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7339 INSN_UID (scan
), (unsigned long) minipool_barrier
->address
, align64
? 8 : 4);
7341 scan
= emit_label_after (gen_label_rtx (), scan
);
7342 scan
= emit_insn_after (align64
? gen_align_8 () : gen_align_4 (), scan
);
7343 scan
= emit_label_after (minipool_vector_label
, scan
);
7345 for (mp
= minipool_vector_head
; mp
!= NULL
; mp
= nmp
)
7347 if (mp
->refcount
> 0)
7352 ";; Offset %u, min %ld, max %ld ",
7353 (unsigned) mp
->offset
, (unsigned long) mp
->min_address
,
7354 (unsigned long) mp
->max_address
);
7355 arm_print_value (dump_file
, mp
->value
);
7356 fputc ('\n', dump_file
);
7359 switch (mp
->fix_size
)
7361 #ifdef HAVE_consttable_1
7363 scan
= emit_insn_after (gen_consttable_1 (mp
->value
), scan
);
7367 #ifdef HAVE_consttable_2
7369 scan
= emit_insn_after (gen_consttable_2 (mp
->value
), scan
);
7373 #ifdef HAVE_consttable_4
7375 scan
= emit_insn_after (gen_consttable_4 (mp
->value
), scan
);
7379 #ifdef HAVE_consttable_8
7381 scan
= emit_insn_after (gen_consttable_8 (mp
->value
), scan
);
7394 minipool_vector_head
= minipool_vector_tail
= NULL
;
7395 scan
= emit_insn_after (gen_consttable_end (), scan
);
7396 scan
= emit_barrier_after (scan
);
7399 /* Return the cost of forcibly inserting a barrier after INSN. */
7401 arm_barrier_cost (rtx insn
)
7403 /* Basing the location of the pool on the loop depth is preferable,
7404 but at the moment, the basic block information seems to be
7405 corrupt by this stage of the compilation. */
7407 rtx next
= next_nonnote_insn (insn
);
7409 if (next
!= NULL
&& GET_CODE (next
) == CODE_LABEL
)
7412 switch (GET_CODE (insn
))
7415 /* It will always be better to place the table before the label, rather
7424 return base_cost
- 10;
7427 return base_cost
+ 10;
7431 /* Find the best place in the insn stream in the range
7432 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7433 Create the barrier by inserting a jump and add a new fix entry for
7436 create_fix_barrier (Mfix
*fix
, HOST_WIDE_INT max_address
)
7438 HOST_WIDE_INT count
= 0;
7440 rtx from
= fix
->insn
;
7441 rtx selected
= from
;
7443 HOST_WIDE_INT selected_address
;
7445 HOST_WIDE_INT max_count
= max_address
- fix
->address
;
7446 rtx label
= gen_label_rtx ();
7448 selected_cost
= arm_barrier_cost (from
);
7449 selected_address
= fix
->address
;
7451 while (from
&& count
< max_count
)
7456 /* This code shouldn't have been called if there was a natural barrier
7458 gcc_assert (GET_CODE (from
) != BARRIER
);
7460 /* Count the length of this insn. */
7461 count
+= get_attr_length (from
);
7463 /* If there is a jump table, add its length. */
7464 tmp
= is_jump_table (from
);
7467 count
+= get_jump_table_size (tmp
);
7469 /* Jump tables aren't in a basic block, so base the cost on
7470 the dispatch insn. If we select this location, we will
7471 still put the pool after the table. */
7472 new_cost
= arm_barrier_cost (from
);
7474 if (count
< max_count
&& new_cost
<= selected_cost
)
7477 selected_cost
= new_cost
;
7478 selected_address
= fix
->address
+ count
;
7481 /* Continue after the dispatch table. */
7482 from
= NEXT_INSN (tmp
);
7486 new_cost
= arm_barrier_cost (from
);
7488 if (count
< max_count
&& new_cost
<= selected_cost
)
7491 selected_cost
= new_cost
;
7492 selected_address
= fix
->address
+ count
;
7495 from
= NEXT_INSN (from
);
7498 /* Create a new JUMP_INSN that branches around a barrier. */
7499 from
= emit_jump_insn_after (gen_jump (label
), selected
);
7500 JUMP_LABEL (from
) = label
;
7501 barrier
= emit_barrier_after (from
);
7502 emit_label_after (label
, barrier
);
7504 /* Create a minipool barrier entry for the new barrier. */
7505 new_fix
= (Mfix
*) obstack_alloc (&minipool_obstack
, sizeof (* new_fix
));
7506 new_fix
->insn
= barrier
;
7507 new_fix
->address
= selected_address
;
7508 new_fix
->next
= fix
->next
;
7509 fix
->next
= new_fix
;
7514 /* Record that there is a natural barrier in the insn stream at
7517 push_minipool_barrier (rtx insn
, HOST_WIDE_INT address
)
7519 Mfix
* fix
= (Mfix
*) obstack_alloc (&minipool_obstack
, sizeof (* fix
));
7522 fix
->address
= address
;
7525 if (minipool_fix_head
!= NULL
)
7526 minipool_fix_tail
->next
= fix
;
7528 minipool_fix_head
= fix
;
7530 minipool_fix_tail
= fix
;
7533 /* Record INSN, which will need fixing up to load a value from the
7534 minipool. ADDRESS is the offset of the insn since the start of the
7535 function; LOC is a pointer to the part of the insn which requires
7536 fixing; VALUE is the constant that must be loaded, which is of type
7539 push_minipool_fix (rtx insn
, HOST_WIDE_INT address
, rtx
*loc
,
7540 enum machine_mode mode
, rtx value
)
7542 Mfix
* fix
= (Mfix
*) obstack_alloc (&minipool_obstack
, sizeof (* fix
));
7544 #ifdef AOF_ASSEMBLER
7545 /* PIC symbol references need to be converted into offsets into the
7547 /* XXX This shouldn't be done here. */
7548 if (flag_pic
&& GET_CODE (value
) == SYMBOL_REF
)
7549 value
= aof_pic_entry (value
);
7550 #endif /* AOF_ASSEMBLER */
7553 fix
->address
= address
;
7556 fix
->fix_size
= MINIPOOL_FIX_SIZE (mode
);
7558 fix
->forwards
= get_attr_pool_range (insn
);
7559 fix
->backwards
= get_attr_neg_pool_range (insn
);
7560 fix
->minipool
= NULL
;
7562 /* If an insn doesn't have a range defined for it, then it isn't
7563 expecting to be reworked by this code. Better to stop now than
7564 to generate duff assembly code. */
7565 gcc_assert (fix
->forwards
|| fix
->backwards
);
7567 /* With AAPCS/iWMMXt enabled, the pool is aligned to an 8-byte boundary.
7568 So there might be an empty word before the start of the pool.
7569 Hence we reduce the forward range by 4 to allow for this
7571 if (ARM_DOUBLEWORD_ALIGN
&& fix
->fix_size
== 8)
7577 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7578 GET_MODE_NAME (mode
),
7579 INSN_UID (insn
), (unsigned long) address
,
7580 -1 * (long)fix
->backwards
, (long)fix
->forwards
);
7581 arm_print_value (dump_file
, fix
->value
);
7582 fprintf (dump_file
, "\n");
7585 /* Add it to the chain of fixes. */
7588 if (minipool_fix_head
!= NULL
)
7589 minipool_fix_tail
->next
= fix
;
7591 minipool_fix_head
= fix
;
7593 minipool_fix_tail
= fix
;
7596 /* Return the cost of synthesizing a 64-bit constant VAL inline.
7597 Returns the number of insns needed, or 99 if we don't know how to
7600 arm_const_double_inline_cost (rtx val
)
7602 rtx lowpart
, highpart
;
7603 enum machine_mode mode
;
7605 mode
= GET_MODE (val
);
7607 if (mode
== VOIDmode
)
7610 gcc_assert (GET_MODE_SIZE (mode
) == 8);
7612 lowpart
= gen_lowpart (SImode
, val
);
7613 highpart
= gen_highpart_mode (SImode
, mode
, val
);
7615 gcc_assert (GET_CODE (lowpart
) == CONST_INT
);
7616 gcc_assert (GET_CODE (highpart
) == CONST_INT
);
7618 return (arm_gen_constant (SET
, SImode
, NULL_RTX
, INTVAL (lowpart
),
7619 NULL_RTX
, NULL_RTX
, 0, 0)
7620 + arm_gen_constant (SET
, SImode
, NULL_RTX
, INTVAL (highpart
),
7621 NULL_RTX
, NULL_RTX
, 0, 0));
7624 /* Return true if it is worthwhile to split a 64-bit constant into two
7625 32-bit operations. This is the case if optimizing for size, or
7626 if we have load delay slots, or if one 32-bit part can be done with
7627 a single data operation. */
7629 arm_const_double_by_parts (rtx val
)
7631 enum machine_mode mode
= GET_MODE (val
);
7634 if (optimize_size
|| arm_ld_sched
)
7637 if (mode
== VOIDmode
)
7640 part
= gen_highpart_mode (SImode
, mode
, val
);
7642 gcc_assert (GET_CODE (part
) == CONST_INT
);
7644 if (const_ok_for_arm (INTVAL (part
))
7645 || const_ok_for_arm (~INTVAL (part
)))
7648 part
= gen_lowpart (SImode
, val
);
7650 gcc_assert (GET_CODE (part
) == CONST_INT
);
7652 if (const_ok_for_arm (INTVAL (part
))
7653 || const_ok_for_arm (~INTVAL (part
)))
7659 /* Scan INSN and note any of its operands that need fixing.
7660 If DO_PUSHES is false we do not actually push any of the fixups
7661 needed. The function returns TRUE if any fixups were needed/pushed.
7662 This is used by arm_memory_load_p() which needs to know about loads
7663 of constants that will be converted into minipool loads. */
7665 note_invalid_constants (rtx insn
, HOST_WIDE_INT address
, int do_pushes
)
7667 bool result
= false;
7670 extract_insn (insn
);
7672 if (!constrain_operands (1))
7673 fatal_insn_not_found (insn
);
7675 if (recog_data
.n_alternatives
== 0)
7678 /* Fill in recog_op_alt with information about the constraints of
7680 preprocess_constraints ();
7682 for (opno
= 0; opno
< recog_data
.n_operands
; opno
++)
7684 /* Things we need to fix can only occur in inputs. */
7685 if (recog_data
.operand_type
[opno
] != OP_IN
)
7688 /* If this alternative is a memory reference, then any mention
7689 of constants in this alternative is really to fool reload
7690 into allowing us to accept one there. We need to fix them up
7691 now so that we output the right code. */
7692 if (recog_op_alt
[opno
][which_alternative
].memory_ok
)
7694 rtx op
= recog_data
.operand
[opno
];
7696 if (CONSTANT_P (op
))
7699 push_minipool_fix (insn
, address
, recog_data
.operand_loc
[opno
],
7700 recog_data
.operand_mode
[opno
], op
);
7703 else if (GET_CODE (op
) == MEM
7704 && GET_CODE (XEXP (op
, 0)) == SYMBOL_REF
7705 && CONSTANT_POOL_ADDRESS_P (XEXP (op
, 0)))
7709 rtx cop
= avoid_constant_pool_reference (op
);
7711 /* Casting the address of something to a mode narrower
7712 than a word can cause avoid_constant_pool_reference()
7713 to return the pool reference itself. That's no good to
7714 us here. Lets just hope that we can use the
7715 constant pool value directly. */
7717 cop
= get_pool_constant (XEXP (op
, 0));
7719 push_minipool_fix (insn
, address
,
7720 recog_data
.operand_loc
[opno
],
7721 recog_data
.operand_mode
[opno
], cop
);
7732 /* Gcc puts the pool in the wrong place for ARM, since we can only
7733 load addresses a limited distance around the pc. We do some
7734 special munging to move the constant pool values to the correct
7735 point in the code. */
7740 HOST_WIDE_INT address
= 0;
7743 minipool_fix_head
= minipool_fix_tail
= NULL
;
7745 /* The first insn must always be a note, or the code below won't
7746 scan it properly. */
7747 insn
= get_insns ();
7748 gcc_assert (GET_CODE (insn
) == NOTE
);
7750 /* Scan all the insns and record the operands that will need fixing. */
7751 for (insn
= next_nonnote_insn (insn
); insn
; insn
= next_nonnote_insn (insn
))
7753 if (TARGET_CIRRUS_FIX_INVALID_INSNS
7754 && (arm_cirrus_insn_p (insn
)
7755 || GET_CODE (insn
) == JUMP_INSN
7756 || arm_memory_load_p (insn
)))
7757 cirrus_reorg (insn
);
7759 if (GET_CODE (insn
) == BARRIER
)
7760 push_minipool_barrier (insn
, address
);
7761 else if (INSN_P (insn
))
7765 note_invalid_constants (insn
, address
, true);
7766 address
+= get_attr_length (insn
);
7768 /* If the insn is a vector jump, add the size of the table
7769 and skip the table. */
7770 if ((table
= is_jump_table (insn
)) != NULL
)
7772 address
+= get_jump_table_size (table
);
7778 fix
= minipool_fix_head
;
7780 /* Now scan the fixups and perform the required changes. */
7785 Mfix
* last_added_fix
;
7786 Mfix
* last_barrier
= NULL
;
7789 /* Skip any further barriers before the next fix. */
7790 while (fix
&& GET_CODE (fix
->insn
) == BARRIER
)
7793 /* No more fixes. */
7797 last_added_fix
= NULL
;
7799 for (ftmp
= fix
; ftmp
; ftmp
= ftmp
->next
)
7801 if (GET_CODE (ftmp
->insn
) == BARRIER
)
7803 if (ftmp
->address
>= minipool_vector_head
->max_address
)
7806 last_barrier
= ftmp
;
7808 else if ((ftmp
->minipool
= add_minipool_forward_ref (ftmp
)) == NULL
)
7811 last_added_fix
= ftmp
; /* Keep track of the last fix added. */
7814 /* If we found a barrier, drop back to that; any fixes that we
7815 could have reached but come after the barrier will now go in
7816 the next mini-pool. */
7817 if (last_barrier
!= NULL
)
7819 /* Reduce the refcount for those fixes that won't go into this
7821 for (fdel
= last_barrier
->next
;
7822 fdel
&& fdel
!= ftmp
;
7825 fdel
->minipool
->refcount
--;
7826 fdel
->minipool
= NULL
;
7829 ftmp
= last_barrier
;
7833 /* ftmp is first fix that we can't fit into this pool and
7834 there no natural barriers that we could use. Insert a
7835 new barrier in the code somewhere between the previous
7836 fix and this one, and arrange to jump around it. */
7837 HOST_WIDE_INT max_address
;
7839 /* The last item on the list of fixes must be a barrier, so
7840 we can never run off the end of the list of fixes without
7841 last_barrier being set. */
7844 max_address
= minipool_vector_head
->max_address
;
7845 /* Check that there isn't another fix that is in range that
7846 we couldn't fit into this pool because the pool was
7847 already too large: we need to put the pool before such an
7849 if (ftmp
->address
< max_address
)
7850 max_address
= ftmp
->address
;
7852 last_barrier
= create_fix_barrier (last_added_fix
, max_address
);
7855 assign_minipool_offsets (last_barrier
);
7859 if (GET_CODE (ftmp
->insn
) != BARRIER
7860 && ((ftmp
->minipool
= add_minipool_backward_ref (ftmp
))
7867 /* Scan over the fixes we have identified for this pool, fixing them
7868 up and adding the constants to the pool itself. */
7869 for (this_fix
= fix
; this_fix
&& ftmp
!= this_fix
;
7870 this_fix
= this_fix
->next
)
7871 if (GET_CODE (this_fix
->insn
) != BARRIER
)
7874 = plus_constant (gen_rtx_LABEL_REF (VOIDmode
,
7875 minipool_vector_label
),
7876 this_fix
->minipool
->offset
);
7877 *this_fix
->loc
= gen_rtx_MEM (this_fix
->mode
, addr
);
7880 dump_minipool (last_barrier
->insn
);
7884 /* From now on we must synthesize any constants that we can't handle
7885 directly. This can happen if the RTL gets split during final
7886 instruction generation. */
7887 after_arm_reorg
= 1;
7889 /* Free the minipool memory. */
7890 obstack_free (&minipool_obstack
, minipool_startobj
);
7893 /* Routines to output assembly language. */
7895 /* If the rtx is the correct value then return the string of the number.
7896 In this way we can ensure that valid double constants are generated even
7897 when cross compiling. */
7899 fp_immediate_constant (rtx x
)
7904 if (!fp_consts_inited
)
7907 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
7908 for (i
= 0; i
< 8; i
++)
7909 if (REAL_VALUES_EQUAL (r
, values_fp
[i
]))
7910 return strings_fp
[i
];
7915 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
7917 fp_const_from_val (REAL_VALUE_TYPE
*r
)
7921 if (!fp_consts_inited
)
7924 for (i
= 0; i
< 8; i
++)
7925 if (REAL_VALUES_EQUAL (*r
, values_fp
[i
]))
7926 return strings_fp
[i
];
7931 /* Output the operands of a LDM/STM instruction to STREAM.
7932 MASK is the ARM register set mask of which only bits 0-15 are important.
7933 REG is the base register, either the frame pointer or the stack pointer,
7934 INSTR is the possibly suffixed load or store instruction. */
7937 print_multi_reg (FILE *stream
, const char *instr
, unsigned reg
,
7941 bool not_first
= FALSE
;
7943 fputc ('\t', stream
);
7944 asm_fprintf (stream
, instr
, reg
);
7945 fputs (", {", stream
);
7947 for (i
= 0; i
<= LAST_ARM_REGNUM
; i
++)
7948 if (mask
& (1 << i
))
7951 fprintf (stream
, ", ");
7953 asm_fprintf (stream
, "%r", i
);
7957 fprintf (stream
, "}\n");
7961 /* Output a FLDMX instruction to STREAM.
7962 BASE if the register containing the address.
7963 REG and COUNT specify the register range.
7964 Extra registers may be added to avoid hardware bugs. */
7967 arm_output_fldmx (FILE * stream
, unsigned int base
, int reg
, int count
)
7971 /* Workaround ARM10 VFPr1 bug. */
7972 if (count
== 2 && !arm_arch6
)
7979 fputc ('\t', stream
);
7980 asm_fprintf (stream
, "fldmfdx\t%r!, {", base
);
7982 for (i
= reg
; i
< reg
+ count
; i
++)
7985 fputs (", ", stream
);
7986 asm_fprintf (stream
, "d%d", i
);
7988 fputs ("}\n", stream
);
7993 /* Output the assembly for a store multiple. */
7996 vfp_output_fstmx (rtx
* operands
)
8003 strcpy (pattern
, "fstmfdx\t%m0!, {%P1");
8004 p
= strlen (pattern
);
8006 gcc_assert (GET_CODE (operands
[1]) == REG
);
8008 base
= (REGNO (operands
[1]) - FIRST_VFP_REGNUM
) / 2;
8009 for (i
= 1; i
< XVECLEN (operands
[2], 0); i
++)
8011 p
+= sprintf (&pattern
[p
], ", d%d", base
+ i
);
8013 strcpy (&pattern
[p
], "}");
8015 output_asm_insn (pattern
, operands
);
8020 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8021 number of bytes pushed. */
8024 vfp_emit_fstmx (int base_reg
, int count
)
8031 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8032 register pairs are stored by a store multiple insn. We avoid this
8033 by pushing an extra pair. */
8034 if (count
== 2 && !arm_arch6
)
8036 if (base_reg
== LAST_VFP_REGNUM
- 3)
8041 /* ??? The frame layout is implementation defined. We describe
8042 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8043 We really need some way of representing the whole block so that the
8044 unwinder can figure it out at runtime. */
8045 par
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (count
));
8046 dwarf
= gen_rtx_SEQUENCE (VOIDmode
, rtvec_alloc (count
+ 1));
8048 reg
= gen_rtx_REG (DFmode
, base_reg
);
8052 = gen_rtx_SET (VOIDmode
,
8053 gen_rtx_MEM (BLKmode
,
8054 gen_rtx_PRE_DEC (BLKmode
, stack_pointer_rtx
)),
8055 gen_rtx_UNSPEC (BLKmode
,
8059 tmp
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
8060 gen_rtx_PLUS (SImode
, stack_pointer_rtx
,
8061 GEN_INT (-(count
* 8 + 4))));
8062 RTX_FRAME_RELATED_P (tmp
) = 1;
8063 XVECEXP (dwarf
, 0, 0) = tmp
;
8065 tmp
= gen_rtx_SET (VOIDmode
,
8066 gen_rtx_MEM (DFmode
, stack_pointer_rtx
),
8068 RTX_FRAME_RELATED_P (tmp
) = 1;
8069 XVECEXP (dwarf
, 0, 1) = tmp
;
8071 for (i
= 1; i
< count
; i
++)
8073 reg
= gen_rtx_REG (DFmode
, base_reg
);
8075 XVECEXP (par
, 0, i
) = gen_rtx_USE (VOIDmode
, reg
);
8077 tmp
= gen_rtx_SET (VOIDmode
,
8078 gen_rtx_MEM (DFmode
,
8079 gen_rtx_PLUS (SImode
,
8083 RTX_FRAME_RELATED_P (tmp
) = 1;
8084 XVECEXP (dwarf
, 0, i
+ 1) = tmp
;
8087 par
= emit_insn (par
);
8088 REG_NOTES (par
) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
, dwarf
,
8090 RTX_FRAME_RELATED_P (par
) = 1;
8092 return count
* 8 + 4;
8096 /* Output a 'call' insn. */
8098 output_call (rtx
*operands
)
8100 gcc_assert (!arm_arch5
); /* Patterns should call blx <reg> directly. */
8102 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8103 if (REGNO (operands
[0]) == LR_REGNUM
)
8105 operands
[0] = gen_rtx_REG (SImode
, IP_REGNUM
);
8106 output_asm_insn ("mov%?\t%0, %|lr", operands
);
8109 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
8111 if (TARGET_INTERWORK
|| arm_arch4t
)
8112 output_asm_insn ("bx%?\t%0", operands
);
8114 output_asm_insn ("mov%?\t%|pc, %0", operands
);
8119 /* Output a 'call' insn that is a reference in memory. */
8121 output_call_mem (rtx
*operands
)
8123 if (TARGET_INTERWORK
&& !arm_arch5
)
8125 output_asm_insn ("ldr%?\t%|ip, %0", operands
);
8126 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
8127 output_asm_insn ("bx%?\t%|ip", operands
);
8129 else if (regno_use_in (LR_REGNUM
, operands
[0]))
8131 /* LR is used in the memory address. We load the address in the
8132 first instruction. It's safe to use IP as the target of the
8133 load since the call will kill it anyway. */
8134 output_asm_insn ("ldr%?\t%|ip, %0", operands
);
8136 output_asm_insn ("blx%?\t%|ip", operands
);
8139 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
8141 output_asm_insn ("bx%?\t%|ip", operands
);
8143 output_asm_insn ("mov%?\t%|pc, %|ip", operands
);
8148 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
8149 output_asm_insn ("ldr%?\t%|pc, %0", operands
);
8156 /* Output a move from arm registers to an fpa registers.
8157 OPERANDS[0] is an fpa register.
8158 OPERANDS[1] is the first registers of an arm register pair. */
8160 output_mov_long_double_fpa_from_arm (rtx
*operands
)
8162 int arm_reg0
= REGNO (operands
[1]);
8165 gcc_assert (arm_reg0
!= IP_REGNUM
);
8167 ops
[0] = gen_rtx_REG (SImode
, arm_reg0
);
8168 ops
[1] = gen_rtx_REG (SImode
, 1 + arm_reg0
);
8169 ops
[2] = gen_rtx_REG (SImode
, 2 + arm_reg0
);
8171 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops
);
8172 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands
);
8177 /* Output a move from an fpa register to arm registers.
8178 OPERANDS[0] is the first registers of an arm register pair.
8179 OPERANDS[1] is an fpa register. */
8181 output_mov_long_double_arm_from_fpa (rtx
*operands
)
8183 int arm_reg0
= REGNO (operands
[0]);
8186 gcc_assert (arm_reg0
!= IP_REGNUM
);
8188 ops
[0] = gen_rtx_REG (SImode
, arm_reg0
);
8189 ops
[1] = gen_rtx_REG (SImode
, 1 + arm_reg0
);
8190 ops
[2] = gen_rtx_REG (SImode
, 2 + arm_reg0
);
8192 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands
);
8193 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops
);
8197 /* Output a move from arm registers to arm registers of a long double
8198 OPERANDS[0] is the destination.
8199 OPERANDS[1] is the source. */
8201 output_mov_long_double_arm_from_arm (rtx
*operands
)
8203 /* We have to be careful here because the two might overlap. */
8204 int dest_start
= REGNO (operands
[0]);
8205 int src_start
= REGNO (operands
[1]);
8209 if (dest_start
< src_start
)
8211 for (i
= 0; i
< 3; i
++)
8213 ops
[0] = gen_rtx_REG (SImode
, dest_start
+ i
);
8214 ops
[1] = gen_rtx_REG (SImode
, src_start
+ i
);
8215 output_asm_insn ("mov%?\t%0, %1", ops
);
8220 for (i
= 2; i
>= 0; i
--)
8222 ops
[0] = gen_rtx_REG (SImode
, dest_start
+ i
);
8223 ops
[1] = gen_rtx_REG (SImode
, src_start
+ i
);
8224 output_asm_insn ("mov%?\t%0, %1", ops
);
8232 /* Output a move from arm registers to an fpa registers.
8233 OPERANDS[0] is an fpa register.
8234 OPERANDS[1] is the first registers of an arm register pair. */
8236 output_mov_double_fpa_from_arm (rtx
*operands
)
8238 int arm_reg0
= REGNO (operands
[1]);
8241 gcc_assert (arm_reg0
!= IP_REGNUM
);
8243 ops
[0] = gen_rtx_REG (SImode
, arm_reg0
);
8244 ops
[1] = gen_rtx_REG (SImode
, 1 + arm_reg0
);
8245 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops
);
8246 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands
);
8250 /* Output a move from an fpa register to arm registers.
8251 OPERANDS[0] is the first registers of an arm register pair.
8252 OPERANDS[1] is an fpa register. */
8254 output_mov_double_arm_from_fpa (rtx
*operands
)
8256 int arm_reg0
= REGNO (operands
[0]);
8259 gcc_assert (arm_reg0
!= IP_REGNUM
);
8261 ops
[0] = gen_rtx_REG (SImode
, arm_reg0
);
8262 ops
[1] = gen_rtx_REG (SImode
, 1 + arm_reg0
);
8263 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands
);
8264 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops
);
8268 /* Output a move between double words.
8269 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8270 or MEM<-REG and all MEMs must be offsettable addresses. */
8272 output_move_double (rtx
*operands
)
8274 enum rtx_code code0
= GET_CODE (operands
[0]);
8275 enum rtx_code code1
= GET_CODE (operands
[1]);
8280 int reg0
= REGNO (operands
[0]);
8282 otherops
[0] = gen_rtx_REG (SImode
, 1 + reg0
);
8284 gcc_assert (code1
== MEM
); /* Constraints should ensure this. */
8286 switch (GET_CODE (XEXP (operands
[1], 0)))
8289 output_asm_insn ("ldm%?ia\t%m1, %M0", operands
);
8293 gcc_assert (TARGET_LDRD
);
8294 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands
);
8298 output_asm_insn ("ldm%?db\t%m1!, %M0", operands
);
8302 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands
);
8306 gcc_assert (TARGET_LDRD
);
8307 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands
);
8312 otherops
[0] = operands
[0];
8313 otherops
[1] = XEXP (XEXP (XEXP (operands
[1], 0), 1), 0);
8314 otherops
[2] = XEXP (XEXP (XEXP (operands
[1], 0), 1), 1);
8316 if (GET_CODE (XEXP (operands
[1], 0)) == PRE_MODIFY
)
8318 if (reg_overlap_mentioned_p (otherops
[0], otherops
[2]))
8320 /* Registers overlap so split out the increment. */
8321 output_asm_insn ("add%?\t%1, %1, %2", otherops
);
8322 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops
);
8325 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops
);
8329 /* We only allow constant increments, so this is safe. */
8330 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops
);
8336 output_asm_insn ("adr%?\t%0, %1", operands
);
8337 output_asm_insn ("ldm%?ia\t%0, %M0", operands
);
8341 if (arm_add_operand (XEXP (XEXP (operands
[1], 0), 1),
8342 GET_MODE (XEXP (XEXP (operands
[1], 0), 1))))
8344 otherops
[0] = operands
[0];
8345 otherops
[1] = XEXP (XEXP (operands
[1], 0), 0);
8346 otherops
[2] = XEXP (XEXP (operands
[1], 0), 1);
8348 if (GET_CODE (XEXP (operands
[1], 0)) == PLUS
)
8350 if (GET_CODE (otherops
[2]) == CONST_INT
)
8352 switch ((int) INTVAL (otherops
[2]))
8355 output_asm_insn ("ldm%?db\t%1, %M0", otherops
);
8358 output_asm_insn ("ldm%?da\t%1, %M0", otherops
);
8361 output_asm_insn ("ldm%?ib\t%1, %M0", otherops
);
8366 && (GET_CODE (otherops
[2]) == REG
8367 || (GET_CODE (otherops
[2]) == CONST_INT
8368 && INTVAL (otherops
[2]) > -256
8369 && INTVAL (otherops
[2]) < 256)))
8371 if (reg_overlap_mentioned_p (otherops
[0],
8374 /* Swap base and index registers over to
8375 avoid a conflict. */
8376 otherops
[1] = XEXP (XEXP (operands
[1], 0), 1);
8377 otherops
[2] = XEXP (XEXP (operands
[1], 0), 0);
8380 /* If both registers conflict, it will usually
8381 have been fixed by a splitter. */
8382 if (reg_overlap_mentioned_p (otherops
[0], otherops
[2]))
8384 output_asm_insn ("add%?\t%1, %1, %2", otherops
);
8385 output_asm_insn ("ldr%?d\t%0, [%1]",
8389 output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops
);
8393 if (GET_CODE (otherops
[2]) == CONST_INT
)
8395 if (!(const_ok_for_arm (INTVAL (otherops
[2]))))
8396 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops
);
8398 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
8401 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
8404 output_asm_insn ("sub%?\t%0, %1, %2", otherops
);
8406 return "ldm%?ia\t%0, %M0";
8410 otherops
[1] = adjust_address (operands
[1], SImode
, 4);
8411 /* Take care of overlapping base/data reg. */
8412 if (reg_mentioned_p (operands
[0], operands
[1]))
8414 output_asm_insn ("ldr%?\t%0, %1", otherops
);
8415 output_asm_insn ("ldr%?\t%0, %1", operands
);
8419 output_asm_insn ("ldr%?\t%0, %1", operands
);
8420 output_asm_insn ("ldr%?\t%0, %1", otherops
);
8427 /* Constraints should ensure this. */
8428 gcc_assert (code0
== MEM
&& code1
== REG
);
8429 gcc_assert (REGNO (operands
[1]) != IP_REGNUM
);
8431 switch (GET_CODE (XEXP (operands
[0], 0)))
8434 output_asm_insn ("stm%?ia\t%m0, %M1", operands
);
8438 gcc_assert (TARGET_LDRD
);
8439 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands
);
8443 output_asm_insn ("stm%?db\t%m0!, %M1", operands
);
8447 output_asm_insn ("stm%?ia\t%m0!, %M1", operands
);
8451 gcc_assert (TARGET_LDRD
);
8452 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands
);
8457 otherops
[0] = operands
[1];
8458 otherops
[1] = XEXP (XEXP (XEXP (operands
[0], 0), 1), 0);
8459 otherops
[2] = XEXP (XEXP (XEXP (operands
[0], 0), 1), 1);
8461 if (GET_CODE (XEXP (operands
[0], 0)) == PRE_MODIFY
)
8462 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops
);
8464 output_asm_insn ("str%?d\t%0, [%1], %2", otherops
);
8468 otherops
[2] = XEXP (XEXP (operands
[0], 0), 1);
8469 if (GET_CODE (otherops
[2]) == CONST_INT
)
8471 switch ((int) INTVAL (XEXP (XEXP (operands
[0], 0), 1)))
8474 output_asm_insn ("stm%?db\t%m0, %M1", operands
);
8478 output_asm_insn ("stm%?da\t%m0, %M1", operands
);
8482 output_asm_insn ("stm%?ib\t%m0, %M1", operands
);
8487 && (GET_CODE (otherops
[2]) == REG
8488 || (GET_CODE (otherops
[2]) == CONST_INT
8489 && INTVAL (otherops
[2]) > -256
8490 && INTVAL (otherops
[2]) < 256)))
8492 otherops
[0] = operands
[1];
8493 otherops
[1] = XEXP (XEXP (operands
[0], 0), 0);
8494 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops
);
8500 otherops
[0] = adjust_address (operands
[0], SImode
, 4);
8501 otherops
[1] = gen_rtx_REG (SImode
, 1 + REGNO (operands
[1]));
8502 output_asm_insn ("str%?\t%1, %0", operands
);
8503 output_asm_insn ("str%?\t%1, %0", otherops
);
8510 /* Output an ADD r, s, #n where n may be too big for one instruction.
8511 If adding zero to one register, output nothing. */
8513 output_add_immediate (rtx
*operands
)
8515 HOST_WIDE_INT n
= INTVAL (operands
[2]);
8517 if (n
!= 0 || REGNO (operands
[0]) != REGNO (operands
[1]))
8520 output_multi_immediate (operands
,
8521 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8524 output_multi_immediate (operands
,
8525 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8532 /* Output a multiple immediate operation.
8533 OPERANDS is the vector of operands referred to in the output patterns.
8534 INSTR1 is the output pattern to use for the first constant.
8535 INSTR2 is the output pattern to use for subsequent constants.
8536 IMMED_OP is the index of the constant slot in OPERANDS.
8537 N is the constant value. */
8539 output_multi_immediate (rtx
*operands
, const char *instr1
, const char *instr2
,
8540 int immed_op
, HOST_WIDE_INT n
)
8542 #if HOST_BITS_PER_WIDE_INT > 32
8548 /* Quick and easy output. */
8549 operands
[immed_op
] = const0_rtx
;
8550 output_asm_insn (instr1
, operands
);
8555 const char * instr
= instr1
;
8557 /* Note that n is never zero here (which would give no output). */
8558 for (i
= 0; i
< 32; i
+= 2)
8562 operands
[immed_op
] = GEN_INT (n
& (255 << i
));
8563 output_asm_insn (instr
, operands
);
8573 /* Return the appropriate ARM instruction for the operation code.
8574 The returned result should not be overwritten. OP is the rtx of the
8575 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8578 arithmetic_instr (rtx op
, int shift_first_arg
)
8580 switch (GET_CODE (op
))
8586 return shift_first_arg
? "rsb" : "sub";
8602 /* Ensure valid constant shifts and return the appropriate shift mnemonic
8603 for the operation code. The returned result should not be overwritten.
8604 OP is the rtx code of the shift.
8605 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
8608 shift_op (rtx op
, HOST_WIDE_INT
*amountp
)
8611 enum rtx_code code
= GET_CODE (op
);
8613 switch (GET_CODE (XEXP (op
, 1)))
8621 *amountp
= INTVAL (XEXP (op
, 1));
8643 gcc_assert (*amountp
!= -1);
8644 *amountp
= 32 - *amountp
;
8653 /* We never have to worry about the amount being other than a
8654 power of 2, since this case can never be reloaded from a reg. */
8655 gcc_assert (*amountp
!= -1);
8656 *amountp
= int_log2 (*amountp
);
8665 /* This is not 100% correct, but follows from the desire to merge
8666 multiplication by a power of 2 with the recognizer for a
8667 shift. >=32 is not a valid shift for "asl", so we must try and
8668 output a shift that produces the correct arithmetical result.
8669 Using lsr #32 is identical except for the fact that the carry bit
8670 is not set correctly if we set the flags; but we never use the
8671 carry bit from such an operation, so we can ignore that. */
8672 if (code
== ROTATERT
)
8673 /* Rotate is just modulo 32. */
8675 else if (*amountp
!= (*amountp
& 31))
8682 /* Shifts of 0 are no-ops. */
8690 /* Obtain the shift from the POWER of two. */
8692 static HOST_WIDE_INT
8693 int_log2 (HOST_WIDE_INT power
)
8695 HOST_WIDE_INT shift
= 0;
8697 while ((((HOST_WIDE_INT
) 1 << shift
) & power
) == 0)
8699 gcc_assert (shift
<= 31);
8706 /* Output a .ascii pseudo-op, keeping track of lengths. This is
8707 because /bin/as is horribly restrictive. The judgement about
8708 whether or not each character is 'printable' (and can be output as
8709 is) or not (and must be printed with an octal escape) must be made
8710 with reference to the *host* character set -- the situation is
8711 similar to that discussed in the comments above pp_c_char in
8712 c-pretty-print.c. */
8714 #define MAX_ASCII_LEN 51
8717 output_ascii_pseudo_op (FILE *stream
, const unsigned char *p
, int len
)
8722 fputs ("\t.ascii\t\"", stream
);
8724 for (i
= 0; i
< len
; i
++)
8728 if (len_so_far
>= MAX_ASCII_LEN
)
8730 fputs ("\"\n\t.ascii\t\"", stream
);
8736 if (c
== '\\' || c
== '\"')
8738 putc ('\\', stream
);
8746 fprintf (stream
, "\\%03o", c
);
8751 fputs ("\"\n", stream
);
8754 /* Compute the register save mask for registers 0 through 12
8755 inclusive. This code is used by arm_compute_save_reg_mask. */
8757 static unsigned long
8758 arm_compute_save_reg0_reg12_mask (void)
8760 unsigned long func_type
= arm_current_func_type ();
8761 unsigned long save_reg_mask
= 0;
8764 if (IS_INTERRUPT (func_type
))
8766 unsigned int max_reg
;
8767 /* Interrupt functions must not corrupt any registers,
8768 even call clobbered ones. If this is a leaf function
8769 we can just examine the registers used by the RTL, but
8770 otherwise we have to assume that whatever function is
8771 called might clobber anything, and so we have to save
8772 all the call-clobbered registers as well. */
8773 if (ARM_FUNC_TYPE (func_type
) == ARM_FT_FIQ
)
8774 /* FIQ handlers have registers r8 - r12 banked, so
8775 we only need to check r0 - r7, Normal ISRs only
8776 bank r14 and r15, so we must check up to r12.
8777 r13 is the stack pointer which is always preserved,
8778 so we do not need to consider it here. */
8783 for (reg
= 0; reg
<= max_reg
; reg
++)
8784 if (regs_ever_live
[reg
]
8785 || (! current_function_is_leaf
&& call_used_regs
[reg
]))
8786 save_reg_mask
|= (1 << reg
);
8788 /* Also save the pic base register if necessary. */
8790 && !TARGET_SINGLE_PIC_BASE
8791 && current_function_uses_pic_offset_table
)
8792 save_reg_mask
|= 1 << PIC_OFFSET_TABLE_REGNUM
;
8796 /* In the normal case we only need to save those registers
8797 which are call saved and which are used by this function. */
8798 for (reg
= 0; reg
<= 10; reg
++)
8799 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
8800 save_reg_mask
|= (1 << reg
);
8802 /* Handle the frame pointer as a special case. */
8803 if (! TARGET_APCS_FRAME
8804 && ! frame_pointer_needed
8805 && regs_ever_live
[HARD_FRAME_POINTER_REGNUM
]
8806 && ! call_used_regs
[HARD_FRAME_POINTER_REGNUM
])
8807 save_reg_mask
|= 1 << HARD_FRAME_POINTER_REGNUM
;
8809 /* If we aren't loading the PIC register,
8810 don't stack it even though it may be live. */
8812 && !TARGET_SINGLE_PIC_BASE
8813 && (regs_ever_live
[PIC_OFFSET_TABLE_REGNUM
]
8814 || current_function_uses_pic_offset_table
))
8815 save_reg_mask
|= 1 << PIC_OFFSET_TABLE_REGNUM
;
8818 /* Save registers so the exception handler can modify them. */
8819 if (current_function_calls_eh_return
)
8825 reg
= EH_RETURN_DATA_REGNO (i
);
8826 if (reg
== INVALID_REGNUM
)
8828 save_reg_mask
|= 1 << reg
;
8832 return save_reg_mask
;
8835 /* Compute a bit mask of which registers need to be
8836 saved on the stack for the current function. */
8838 static unsigned long
8839 arm_compute_save_reg_mask (void)
8841 unsigned int save_reg_mask
= 0;
8842 unsigned long func_type
= arm_current_func_type ();
8844 if (IS_NAKED (func_type
))
8845 /* This should never really happen. */
8848 /* If we are creating a stack frame, then we must save the frame pointer,
8849 IP (which will hold the old stack pointer), LR and the PC. */
8850 if (frame_pointer_needed
)
8852 (1 << ARM_HARD_FRAME_POINTER_REGNUM
)
8857 /* Volatile functions do not return, so there
8858 is no need to save any other registers. */
8859 if (IS_VOLATILE (func_type
))
8860 return save_reg_mask
;
8862 save_reg_mask
|= arm_compute_save_reg0_reg12_mask ();
8864 /* Decide if we need to save the link register.
8865 Interrupt routines have their own banked link register,
8866 so they never need to save it.
8867 Otherwise if we do not use the link register we do not need to save
8868 it. If we are pushing other registers onto the stack however, we
8869 can save an instruction in the epilogue by pushing the link register
8870 now and then popping it back into the PC. This incurs extra memory
8871 accesses though, so we only do it when optimizing for size, and only
8872 if we know that we will not need a fancy return sequence. */
8873 if (regs_ever_live
[LR_REGNUM
]
8876 && ARM_FUNC_TYPE (func_type
) == ARM_FT_NORMAL
8877 && !current_function_calls_eh_return
))
8878 save_reg_mask
|= 1 << LR_REGNUM
;
8880 if (cfun
->machine
->lr_save_eliminated
)
8881 save_reg_mask
&= ~ (1 << LR_REGNUM
);
8883 if (TARGET_REALLY_IWMMXT
8884 && ((bit_count (save_reg_mask
)
8885 + ARM_NUM_INTS (current_function_pretend_args_size
)) % 2) != 0)
8889 /* The total number of registers that are going to be pushed
8890 onto the stack is odd. We need to ensure that the stack
8891 is 64-bit aligned before we start to save iWMMXt registers,
8892 and also before we start to create locals. (A local variable
8893 might be a double or long long which we will load/store using
8894 an iWMMXt instruction). Therefore we need to push another
8895 ARM register, so that the stack will be 64-bit aligned. We
8896 try to avoid using the arg registers (r0 -r3) as they might be
8897 used to pass values in a tail call. */
8898 for (reg
= 4; reg
<= 12; reg
++)
8899 if ((save_reg_mask
& (1 << reg
)) == 0)
8903 save_reg_mask
|= (1 << reg
);
8906 cfun
->machine
->sibcall_blocked
= 1;
8907 save_reg_mask
|= (1 << 3);
8911 return save_reg_mask
;
8915 /* Compute a bit mask of which registers need to be
8916 saved on the stack for the current function. */
8917 static unsigned long
8918 thumb_compute_save_reg_mask (void)
8924 for (reg
= 0; reg
< 12; reg
++)
8925 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
8928 if (flag_pic
&& !TARGET_SINGLE_PIC_BASE
)
8929 mask
|= (1 << PIC_OFFSET_TABLE_REGNUM
);
8931 if (TARGET_SINGLE_PIC_BASE
)
8932 mask
&= ~(1 << arm_pic_register
);
8934 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
8935 if (!frame_pointer_needed
&& CALLER_INTERWORKING_SLOT_SIZE
> 0)
8936 mask
|= 1 << ARM_HARD_FRAME_POINTER_REGNUM
;
8938 /* LR will also be pushed if any lo regs are pushed. */
8939 if (mask
& 0xff || thumb_force_lr_save ())
8940 mask
|= (1 << LR_REGNUM
);
8942 /* Make sure we have a low work register if we need one.
8943 We will need one if we are going to push a high register,
8944 but we are not currently intending to push a low register. */
8945 if ((mask
& 0xff) == 0
8946 && ((mask
& 0x0f00) || TARGET_BACKTRACE
))
8948 /* Use thumb_find_work_register to choose which register
8949 we will use. If the register is live then we will
8950 have to push it. Use LAST_LO_REGNUM as our fallback
8951 choice for the register to select. */
8952 reg
= thumb_find_work_register (1 << LAST_LO_REGNUM
);
8954 if (! call_used_regs
[reg
])
8962 /* Return the number of bytes required to save VFP registers. */
8964 arm_get_vfp_saved_size (void)
8971 /* Space for saved VFP registers. */
8972 if (TARGET_HARD_FLOAT
&& TARGET_VFP
)
8975 for (regno
= FIRST_VFP_REGNUM
;
8976 regno
< LAST_VFP_REGNUM
;
8979 if ((!regs_ever_live
[regno
] || call_used_regs
[regno
])
8980 && (!regs_ever_live
[regno
+ 1] || call_used_regs
[regno
+ 1]))
8984 /* Workaround ARM10 VFPr1 bug. */
8985 if (count
== 2 && !arm_arch6
)
8987 saved
+= count
* 8 + 4;
8996 if (count
== 2 && !arm_arch6
)
8998 saved
+= count
* 8 + 4;
9005 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9006 everything bar the final return instruction. */
9008 output_return_instruction (rtx operand
, int really_return
, int reverse
)
9010 char conditional
[10];
9013 unsigned long live_regs_mask
;
9014 unsigned long func_type
;
9015 arm_stack_offsets
*offsets
;
9017 func_type
= arm_current_func_type ();
9019 if (IS_NAKED (func_type
))
9022 if (IS_VOLATILE (func_type
) && TARGET_ABORT_NORETURN
)
9024 /* If this function was declared non-returning, and we have
9025 found a tail call, then we have to trust that the called
9026 function won't return. */
9031 /* Otherwise, trap an attempted return by aborting. */
9033 ops
[1] = gen_rtx_SYMBOL_REF (Pmode
, NEED_PLT_RELOC
? "abort(PLT)"
9035 assemble_external_libcall (ops
[1]);
9036 output_asm_insn (reverse
? "bl%D0\t%a1" : "bl%d0\t%a1", ops
);
9042 gcc_assert (!current_function_calls_alloca
|| really_return
);
9044 sprintf (conditional
, "%%?%%%c0", reverse
? 'D' : 'd');
9046 return_used_this_function
= 1;
9048 live_regs_mask
= arm_compute_save_reg_mask ();
9052 const char * return_reg
;
9054 /* If we do not have any special requirements for function exit
9055 (e.g. interworking, or ISR) then we can load the return address
9056 directly into the PC. Otherwise we must load it into LR. */
9058 && ! TARGET_INTERWORK
)
9059 return_reg
= reg_names
[PC_REGNUM
];
9061 return_reg
= reg_names
[LR_REGNUM
];
9063 if ((live_regs_mask
& (1 << IP_REGNUM
)) == (1 << IP_REGNUM
))
9065 /* There are three possible reasons for the IP register
9066 being saved. 1) a stack frame was created, in which case
9067 IP contains the old stack pointer, or 2) an ISR routine
9068 corrupted it, or 3) it was saved to align the stack on
9069 iWMMXt. In case 1, restore IP into SP, otherwise just
9071 if (frame_pointer_needed
)
9073 live_regs_mask
&= ~ (1 << IP_REGNUM
);
9074 live_regs_mask
|= (1 << SP_REGNUM
);
9077 gcc_assert (IS_INTERRUPT (func_type
) || TARGET_REALLY_IWMMXT
);
9080 /* On some ARM architectures it is faster to use LDR rather than
9081 LDM to load a single register. On other architectures, the
9082 cost is the same. In 26 bit mode, or for exception handlers,
9083 we have to use LDM to load the PC so that the CPSR is also
9085 for (reg
= 0; reg
<= LAST_ARM_REGNUM
; reg
++)
9086 if (live_regs_mask
== (1U << reg
))
9089 if (reg
<= LAST_ARM_REGNUM
9090 && (reg
!= LR_REGNUM
9092 || ! IS_INTERRUPT (func_type
)))
9094 sprintf (instr
, "ldr%s\t%%|%s, [%%|sp], #4", conditional
,
9095 (reg
== LR_REGNUM
) ? return_reg
: reg_names
[reg
]);
9102 /* Generate the load multiple instruction to restore the
9103 registers. Note we can get here, even if
9104 frame_pointer_needed is true, but only if sp already
9105 points to the base of the saved core registers. */
9106 if (live_regs_mask
& (1 << SP_REGNUM
))
9108 unsigned HOST_WIDE_INT stack_adjust
;
9110 offsets
= arm_get_frame_offsets ();
9111 stack_adjust
= offsets
->outgoing_args
- offsets
->saved_regs
;
9112 gcc_assert (stack_adjust
== 0 || stack_adjust
== 4);
9114 if (stack_adjust
&& arm_arch5
)
9115 sprintf (instr
, "ldm%sib\t%%|sp, {", conditional
);
9118 /* If we can't use ldmib (SA110 bug),
9119 then try to pop r3 instead. */
9121 live_regs_mask
|= 1 << 3;
9122 sprintf (instr
, "ldm%sfd\t%%|sp, {", conditional
);
9126 sprintf (instr
, "ldm%sfd\t%%|sp!, {", conditional
);
9128 p
= instr
+ strlen (instr
);
9130 for (reg
= 0; reg
<= SP_REGNUM
; reg
++)
9131 if (live_regs_mask
& (1 << reg
))
9133 int l
= strlen (reg_names
[reg
]);
9139 memcpy (p
, ", ", 2);
9143 memcpy (p
, "%|", 2);
9144 memcpy (p
+ 2, reg_names
[reg
], l
);
9148 if (live_regs_mask
& (1 << LR_REGNUM
))
9150 sprintf (p
, "%s%%|%s}", first
? "" : ", ", return_reg
);
9151 /* If returning from an interrupt, restore the CPSR. */
9152 if (IS_INTERRUPT (func_type
))
9159 output_asm_insn (instr
, & operand
);
9161 /* See if we need to generate an extra instruction to
9162 perform the actual function return. */
9164 && func_type
!= ARM_FT_INTERWORKED
9165 && (live_regs_mask
& (1 << LR_REGNUM
)) != 0)
9167 /* The return has already been handled
9168 by loading the LR into the PC. */
9175 switch ((int) ARM_FUNC_TYPE (func_type
))
9179 sprintf (instr
, "sub%ss\t%%|pc, %%|lr, #4", conditional
);
9182 case ARM_FT_INTERWORKED
:
9183 sprintf (instr
, "bx%s\t%%|lr", conditional
);
9186 case ARM_FT_EXCEPTION
:
9187 sprintf (instr
, "mov%ss\t%%|pc, %%|lr", conditional
);
9191 /* Use bx if it's available. */
9192 if (arm_arch5
|| arm_arch4t
)
9193 sprintf (instr
, "bx%s\t%%|lr", conditional
);
9195 sprintf (instr
, "mov%s\t%%|pc, %%|lr", conditional
);
9199 output_asm_insn (instr
, & operand
);
9205 /* Write the function name into the code section, directly preceding
9206 the function prologue.
9208 Code will be output similar to this:
9210 .ascii "arm_poke_function_name", 0
9213 .word 0xff000000 + (t1 - t0)
9214 arm_poke_function_name
9216 stmfd sp!, {fp, ip, lr, pc}
9219 When performing a stack backtrace, code can inspect the value
9220 of 'pc' stored at 'fp' + 0. If the trace function then looks
9221 at location pc - 12 and the top 8 bits are set, then we know
9222 that there is a function name embedded immediately preceding this
9223 location and has length ((pc[-3]) & 0xff000000).
9225 We assume that pc is declared as a pointer to an unsigned long.
9227 It is of no benefit to output the function name if we are assembling
9228 a leaf function. These function types will not contain a stack
9229 backtrace structure, therefore it is not possible to determine the
9232 arm_poke_function_name (FILE *stream
, const char *name
)
9234 unsigned long alignlength
;
9235 unsigned long length
;
9238 length
= strlen (name
) + 1;
9239 alignlength
= ROUND_UP_WORD (length
);
9241 ASM_OUTPUT_ASCII (stream
, name
, length
);
9242 ASM_OUTPUT_ALIGN (stream
, 2);
9243 x
= GEN_INT ((unsigned HOST_WIDE_INT
) 0xff000000 + alignlength
);
9244 assemble_aligned_integer (UNITS_PER_WORD
, x
);
9247 /* Place some comments into the assembler stream
9248 describing the current function. */
9250 arm_output_function_prologue (FILE *f
, HOST_WIDE_INT frame_size
)
9252 unsigned long func_type
;
9256 thumb_output_function_prologue (f
, frame_size
);
9261 gcc_assert (!arm_ccfsm_state
&& !arm_target_insn
);
9263 func_type
= arm_current_func_type ();
9265 switch ((int) ARM_FUNC_TYPE (func_type
))
9270 case ARM_FT_INTERWORKED
:
9271 asm_fprintf (f
, "\t%@ Function supports interworking.\n");
9274 asm_fprintf (f
, "\t%@ Interrupt Service Routine.\n");
9277 asm_fprintf (f
, "\t%@ Fast Interrupt Service Routine.\n");
9279 case ARM_FT_EXCEPTION
:
9280 asm_fprintf (f
, "\t%@ ARM Exception Handler.\n");
9284 if (IS_NAKED (func_type
))
9285 asm_fprintf (f
, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9287 if (IS_VOLATILE (func_type
))
9288 asm_fprintf (f
, "\t%@ Volatile: function does not return.\n");
9290 if (IS_NESTED (func_type
))
9291 asm_fprintf (f
, "\t%@ Nested: function declared inside another function.\n");
9293 asm_fprintf (f
, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9294 current_function_args_size
,
9295 current_function_pretend_args_size
, frame_size
);
9297 asm_fprintf (f
, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9298 frame_pointer_needed
,
9299 cfun
->machine
->uses_anonymous_args
);
9301 if (cfun
->machine
->lr_save_eliminated
)
9302 asm_fprintf (f
, "\t%@ link register save eliminated.\n");
9304 if (current_function_calls_eh_return
)
9305 asm_fprintf (f
, "\t@ Calls __builtin_eh_return.\n");
9307 #ifdef AOF_ASSEMBLER
9309 asm_fprintf (f
, "\tmov\t%r, %r\n", IP_REGNUM
, PIC_OFFSET_TABLE_REGNUM
);
9312 return_used_this_function
= 0;
9316 arm_output_epilogue (rtx sibling
)
9319 unsigned long saved_regs_mask
;
9320 unsigned long func_type
;
9321 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9322 frame that is $fp + 4 for a non-variadic function. */
9323 int floats_offset
= 0;
9325 FILE * f
= asm_out_file
;
9326 unsigned int lrm_count
= 0;
9327 int really_return
= (sibling
== NULL
);
9329 arm_stack_offsets
*offsets
;
9331 /* If we have already generated the return instruction
9332 then it is futile to generate anything else. */
9333 if (use_return_insn (FALSE
, sibling
) && return_used_this_function
)
9336 func_type
= arm_current_func_type ();
9338 if (IS_NAKED (func_type
))
9339 /* Naked functions don't have epilogues. */
9342 if (IS_VOLATILE (func_type
) && TARGET_ABORT_NORETURN
)
9346 /* A volatile function should never return. Call abort. */
9347 op
= gen_rtx_SYMBOL_REF (Pmode
, NEED_PLT_RELOC
? "abort(PLT)" : "abort");
9348 assemble_external_libcall (op
);
9349 output_asm_insn ("bl\t%a0", &op
);
9354 /* If we are throwing an exception, then we really must be doing a
9355 return, so we can't tail-call. */
9356 gcc_assert (!current_function_calls_eh_return
|| really_return
);
9358 offsets
= arm_get_frame_offsets ();
9359 saved_regs_mask
= arm_compute_save_reg_mask ();
9362 lrm_count
= bit_count (saved_regs_mask
);
9364 floats_offset
= offsets
->saved_args
;
9365 /* Compute how far away the floats will be. */
9366 for (reg
= 0; reg
<= LAST_ARM_REGNUM
; reg
++)
9367 if (saved_regs_mask
& (1 << reg
))
9370 if (frame_pointer_needed
)
9372 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9373 int vfp_offset
= offsets
->frame
;
9375 if (arm_fpu_arch
== FPUTYPE_FPA_EMU2
)
9377 for (reg
= LAST_FPA_REGNUM
; reg
>= FIRST_FPA_REGNUM
; reg
--)
9378 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
9380 floats_offset
+= 12;
9381 asm_fprintf (f
, "\tldfe\t%r, [%r, #-%d]\n",
9382 reg
, FP_REGNUM
, floats_offset
- vfp_offset
);
9387 start_reg
= LAST_FPA_REGNUM
;
9389 for (reg
= LAST_FPA_REGNUM
; reg
>= FIRST_FPA_REGNUM
; reg
--)
9391 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
9393 floats_offset
+= 12;
9395 /* We can't unstack more than four registers at once. */
9396 if (start_reg
- reg
== 3)
9398 asm_fprintf (f
, "\tlfm\t%r, 4, [%r, #-%d]\n",
9399 reg
, FP_REGNUM
, floats_offset
- vfp_offset
);
9400 start_reg
= reg
- 1;
9405 if (reg
!= start_reg
)
9406 asm_fprintf (f
, "\tlfm\t%r, %d, [%r, #-%d]\n",
9407 reg
+ 1, start_reg
- reg
,
9408 FP_REGNUM
, floats_offset
- vfp_offset
);
9409 start_reg
= reg
- 1;
9413 /* Just in case the last register checked also needs unstacking. */
9414 if (reg
!= start_reg
)
9415 asm_fprintf (f
, "\tlfm\t%r, %d, [%r, #-%d]\n",
9416 reg
+ 1, start_reg
- reg
,
9417 FP_REGNUM
, floats_offset
- vfp_offset
);
9420 if (TARGET_HARD_FLOAT
&& TARGET_VFP
)
9424 /* The fldmx insn does not have base+offset addressing modes,
9425 so we use IP to hold the address. */
9426 saved_size
= arm_get_vfp_saved_size ();
9430 floats_offset
+= saved_size
;
9431 asm_fprintf (f
, "\tsub\t%r, %r, #%d\n", IP_REGNUM
,
9432 FP_REGNUM
, floats_offset
- vfp_offset
);
9434 start_reg
= FIRST_VFP_REGNUM
;
9435 for (reg
= FIRST_VFP_REGNUM
; reg
< LAST_VFP_REGNUM
; reg
+= 2)
9437 if ((!regs_ever_live
[reg
] || call_used_regs
[reg
])
9438 && (!regs_ever_live
[reg
+ 1] || call_used_regs
[reg
+ 1]))
9440 if (start_reg
!= reg
)
9441 arm_output_fldmx (f
, IP_REGNUM
,
9442 (start_reg
- FIRST_VFP_REGNUM
) / 2,
9443 (reg
- start_reg
) / 2);
9444 start_reg
= reg
+ 2;
9447 if (start_reg
!= reg
)
9448 arm_output_fldmx (f
, IP_REGNUM
,
9449 (start_reg
- FIRST_VFP_REGNUM
) / 2,
9450 (reg
- start_reg
) / 2);
9455 /* The frame pointer is guaranteed to be non-double-word aligned.
9456 This is because it is set to (old_stack_pointer - 4) and the
9457 old_stack_pointer was double word aligned. Thus the offset to
9458 the iWMMXt registers to be loaded must also be non-double-word
9459 sized, so that the resultant address *is* double-word aligned.
9460 We can ignore floats_offset since that was already included in
9461 the live_regs_mask. */
9462 lrm_count
+= (lrm_count
% 2 ? 2 : 1);
9464 for (reg
= LAST_IWMMXT_REGNUM
; reg
>= FIRST_IWMMXT_REGNUM
; reg
--)
9465 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
9467 asm_fprintf (f
, "\twldrd\t%r, [%r, #-%d]\n",
9468 reg
, FP_REGNUM
, lrm_count
* 4);
9473 /* saved_regs_mask should contain the IP, which at the time of stack
9474 frame generation actually contains the old stack pointer. So a
9475 quick way to unwind the stack is just pop the IP register directly
9476 into the stack pointer. */
9477 gcc_assert (saved_regs_mask
& (1 << IP_REGNUM
));
9478 saved_regs_mask
&= ~ (1 << IP_REGNUM
);
9479 saved_regs_mask
|= (1 << SP_REGNUM
);
9481 /* There are two registers left in saved_regs_mask - LR and PC. We
9482 only need to restore the LR register (the return address), but to
9483 save time we can load it directly into the PC, unless we need a
9484 special function exit sequence, or we are not really returning. */
9486 && ARM_FUNC_TYPE (func_type
) == ARM_FT_NORMAL
9487 && !current_function_calls_eh_return
)
9488 /* Delete the LR from the register mask, so that the LR on
9489 the stack is loaded into the PC in the register mask. */
9490 saved_regs_mask
&= ~ (1 << LR_REGNUM
);
9492 saved_regs_mask
&= ~ (1 << PC_REGNUM
);
9494 /* We must use SP as the base register, because SP is one of the
9495 registers being restored. If an interrupt or page fault
9496 happens in the ldm instruction, the SP might or might not
9497 have been restored. That would be bad, as then SP will no
9498 longer indicate the safe area of stack, and we can get stack
9499 corruption. Using SP as the base register means that it will
9500 be reset correctly to the original value, should an interrupt
9501 occur. If the stack pointer already points at the right
9502 place, then omit the subtraction. */
9503 if (offsets
->outgoing_args
!= (1 + (int) bit_count (saved_regs_mask
))
9504 || current_function_calls_alloca
)
9505 asm_fprintf (f
, "\tsub\t%r, %r, #%d\n", SP_REGNUM
, FP_REGNUM
,
9506 4 * bit_count (saved_regs_mask
));
9507 print_multi_reg (f
, "ldmfd\t%r", SP_REGNUM
, saved_regs_mask
);
9509 if (IS_INTERRUPT (func_type
))
9510 /* Interrupt handlers will have pushed the
9511 IP onto the stack, so restore it now. */
9512 print_multi_reg (f
, "ldmfd\t%r!", SP_REGNUM
, 1 << IP_REGNUM
);
9516 /* Restore stack pointer if necessary. */
9517 if (offsets
->outgoing_args
!= offsets
->saved_regs
)
9519 operands
[0] = operands
[1] = stack_pointer_rtx
;
9520 operands
[2] = GEN_INT (offsets
->outgoing_args
- offsets
->saved_regs
);
9521 output_add_immediate (operands
);
9524 if (arm_fpu_arch
== FPUTYPE_FPA_EMU2
)
9526 for (reg
= FIRST_FPA_REGNUM
; reg
<= LAST_FPA_REGNUM
; reg
++)
9527 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
9528 asm_fprintf (f
, "\tldfe\t%r, [%r], #12\n",
9533 start_reg
= FIRST_FPA_REGNUM
;
9535 for (reg
= FIRST_FPA_REGNUM
; reg
<= LAST_FPA_REGNUM
; reg
++)
9537 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
9539 if (reg
- start_reg
== 3)
9541 asm_fprintf (f
, "\tlfmfd\t%r, 4, [%r]!\n",
9542 start_reg
, SP_REGNUM
);
9543 start_reg
= reg
+ 1;
9548 if (reg
!= start_reg
)
9549 asm_fprintf (f
, "\tlfmfd\t%r, %d, [%r]!\n",
9550 start_reg
, reg
- start_reg
,
9553 start_reg
= reg
+ 1;
9557 /* Just in case the last register checked also needs unstacking. */
9558 if (reg
!= start_reg
)
9559 asm_fprintf (f
, "\tlfmfd\t%r, %d, [%r]!\n",
9560 start_reg
, reg
- start_reg
, SP_REGNUM
);
9563 if (TARGET_HARD_FLOAT
&& TARGET_VFP
)
9565 start_reg
= FIRST_VFP_REGNUM
;
9566 for (reg
= FIRST_VFP_REGNUM
; reg
< LAST_VFP_REGNUM
; reg
+= 2)
9568 if ((!regs_ever_live
[reg
] || call_used_regs
[reg
])
9569 && (!regs_ever_live
[reg
+ 1] || call_used_regs
[reg
+ 1]))
9571 if (start_reg
!= reg
)
9572 arm_output_fldmx (f
, SP_REGNUM
,
9573 (start_reg
- FIRST_VFP_REGNUM
) / 2,
9574 (reg
- start_reg
) / 2);
9575 start_reg
= reg
+ 2;
9578 if (start_reg
!= reg
)
9579 arm_output_fldmx (f
, SP_REGNUM
,
9580 (start_reg
- FIRST_VFP_REGNUM
) / 2,
9581 (reg
- start_reg
) / 2);
9584 for (reg
= FIRST_IWMMXT_REGNUM
; reg
<= LAST_IWMMXT_REGNUM
; reg
++)
9585 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
9586 asm_fprintf (f
, "\twldrd\t%r, [%r], #8\n", reg
, SP_REGNUM
);
9588 /* If we can, restore the LR into the PC. */
9589 if (ARM_FUNC_TYPE (func_type
) == ARM_FT_NORMAL
9591 && current_function_pretend_args_size
== 0
9592 && saved_regs_mask
& (1 << LR_REGNUM
)
9593 && !current_function_calls_eh_return
)
9595 saved_regs_mask
&= ~ (1 << LR_REGNUM
);
9596 saved_regs_mask
|= (1 << PC_REGNUM
);
9599 /* Load the registers off the stack. If we only have one register
9600 to load use the LDR instruction - it is faster. */
9601 if (saved_regs_mask
== (1 << LR_REGNUM
))
9603 asm_fprintf (f
, "\tldr\t%r, [%r], #4\n", LR_REGNUM
, SP_REGNUM
);
9605 else if (saved_regs_mask
)
9607 if (saved_regs_mask
& (1 << SP_REGNUM
))
9608 /* Note - write back to the stack register is not enabled
9609 (i.e. "ldmfd sp!..."). We know that the stack pointer is
9610 in the list of registers and if we add writeback the
9611 instruction becomes UNPREDICTABLE. */
9612 print_multi_reg (f
, "ldmfd\t%r", SP_REGNUM
, saved_regs_mask
);
9614 print_multi_reg (f
, "ldmfd\t%r!", SP_REGNUM
, saved_regs_mask
);
9617 if (current_function_pretend_args_size
)
9619 /* Unwind the pre-pushed regs. */
9620 operands
[0] = operands
[1] = stack_pointer_rtx
;
9621 operands
[2] = GEN_INT (current_function_pretend_args_size
);
9622 output_add_immediate (operands
);
9626 /* We may have already restored PC directly from the stack. */
9627 if (!really_return
|| saved_regs_mask
& (1 << PC_REGNUM
))
9630 /* Stack adjustment for exception handler. */
9631 if (current_function_calls_eh_return
)
9632 asm_fprintf (f
, "\tadd\t%r, %r, %r\n", SP_REGNUM
, SP_REGNUM
,
9633 ARM_EH_STACKADJ_REGNUM
);
9635 /* Generate the return instruction. */
9636 switch ((int) ARM_FUNC_TYPE (func_type
))
9640 asm_fprintf (f
, "\tsubs\t%r, %r, #4\n", PC_REGNUM
, LR_REGNUM
);
9643 case ARM_FT_EXCEPTION
:
9644 asm_fprintf (f
, "\tmovs\t%r, %r\n", PC_REGNUM
, LR_REGNUM
);
9647 case ARM_FT_INTERWORKED
:
9648 asm_fprintf (f
, "\tbx\t%r\n", LR_REGNUM
);
9652 if (arm_arch5
|| arm_arch4t
)
9653 asm_fprintf (f
, "\tbx\t%r\n", LR_REGNUM
);
9655 asm_fprintf (f
, "\tmov\t%r, %r\n", PC_REGNUM
, LR_REGNUM
);
9663 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED
,
9664 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED
)
9666 arm_stack_offsets
*offsets
;
9672 /* Emit any call-via-reg trampolines that are needed for v4t support
9673 of call_reg and call_value_reg type insns. */
9674 for (regno
= 0; regno
< LR_REGNUM
; regno
++)
9676 rtx label
= cfun
->machine
->call_via
[regno
];
9680 function_section (current_function_decl
);
9681 targetm
.asm_out
.internal_label (asm_out_file
, "L",
9682 CODE_LABEL_NUMBER (label
));
9683 asm_fprintf (asm_out_file
, "\tbx\t%r\n", regno
);
9687 /* ??? Probably not safe to set this here, since it assumes that a
9688 function will be emitted as assembly immediately after we generate
9689 RTL for it. This does not happen for inline functions. */
9690 return_used_this_function
= 0;
9694 /* We need to take into account any stack-frame rounding. */
9695 offsets
= arm_get_frame_offsets ();
9697 gcc_assert (!use_return_insn (FALSE
, NULL
)
9698 || !return_used_this_function
9699 || offsets
->saved_regs
== offsets
->outgoing_args
9700 || frame_pointer_needed
);
9702 /* Reset the ARM-specific per-function variables. */
9703 after_arm_reorg
= 0;
9707 /* Generate and emit an insn that we will recognize as a push_multi.
9708 Unfortunately, since this insn does not reflect very well the actual
9709 semantics of the operation, we need to annotate the insn for the benefit
9710 of DWARF2 frame unwind information. */
9712 emit_multi_reg_push (unsigned long mask
)
9719 int dwarf_par_index
;
9722 for (i
= 0; i
<= LAST_ARM_REGNUM
; i
++)
9723 if (mask
& (1 << i
))
9726 gcc_assert (num_regs
&& num_regs
<= 16);
9728 /* We don't record the PC in the dwarf frame information. */
9729 num_dwarf_regs
= num_regs
;
9730 if (mask
& (1 << PC_REGNUM
))
9733 /* For the body of the insn we are going to generate an UNSPEC in
9734 parallel with several USEs. This allows the insn to be recognized
9735 by the push_multi pattern in the arm.md file. The insn looks
9736 something like this:
9739 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
9740 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
9741 (use (reg:SI 11 fp))
9742 (use (reg:SI 12 ip))
9743 (use (reg:SI 14 lr))
9744 (use (reg:SI 15 pc))
9747 For the frame note however, we try to be more explicit and actually
9748 show each register being stored into the stack frame, plus a (single)
9749 decrement of the stack pointer. We do it this way in order to be
9750 friendly to the stack unwinding code, which only wants to see a single
9751 stack decrement per instruction. The RTL we generate for the note looks
9752 something like this:
9755 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
9756 (set (mem:SI (reg:SI sp)) (reg:SI r4))
9757 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
9758 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
9759 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
9762 This sequence is used both by the code to support stack unwinding for
9763 exceptions handlers and the code to generate dwarf2 frame debugging. */
9765 par
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (num_regs
));
9766 dwarf
= gen_rtx_SEQUENCE (VOIDmode
, rtvec_alloc (num_dwarf_regs
+ 1));
9767 dwarf_par_index
= 1;
9769 for (i
= 0; i
<= LAST_ARM_REGNUM
; i
++)
9771 if (mask
& (1 << i
))
9773 reg
= gen_rtx_REG (SImode
, i
);
9776 = gen_rtx_SET (VOIDmode
,
9777 gen_rtx_MEM (BLKmode
,
9778 gen_rtx_PRE_DEC (BLKmode
,
9779 stack_pointer_rtx
)),
9780 gen_rtx_UNSPEC (BLKmode
,
9786 tmp
= gen_rtx_SET (VOIDmode
,
9787 gen_rtx_MEM (SImode
, stack_pointer_rtx
),
9789 RTX_FRAME_RELATED_P (tmp
) = 1;
9790 XVECEXP (dwarf
, 0, dwarf_par_index
) = tmp
;
9798 for (j
= 1, i
++; j
< num_regs
; i
++)
9800 if (mask
& (1 << i
))
9802 reg
= gen_rtx_REG (SImode
, i
);
9804 XVECEXP (par
, 0, j
) = gen_rtx_USE (VOIDmode
, reg
);
9808 tmp
= gen_rtx_SET (VOIDmode
,
9809 gen_rtx_MEM (SImode
,
9810 plus_constant (stack_pointer_rtx
,
9813 RTX_FRAME_RELATED_P (tmp
) = 1;
9814 XVECEXP (dwarf
, 0, dwarf_par_index
++) = tmp
;
9821 par
= emit_insn (par
);
9823 tmp
= gen_rtx_SET (SImode
,
9825 gen_rtx_PLUS (SImode
,
9827 GEN_INT (-4 * num_regs
)));
9828 RTX_FRAME_RELATED_P (tmp
) = 1;
9829 XVECEXP (dwarf
, 0, 0) = tmp
;
9831 REG_NOTES (par
) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
, dwarf
,
9836 /* Calculate the size of the return value that is passed in registers. */
9838 arm_size_return_regs (void)
9840 enum machine_mode mode
;
9842 if (current_function_return_rtx
!= 0)
9843 mode
= GET_MODE (current_function_return_rtx
);
9845 mode
= DECL_MODE (DECL_RESULT (current_function_decl
));
9847 return GET_MODE_SIZE (mode
);
9851 emit_sfm (int base_reg
, int count
)
9858 par
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (count
));
9859 dwarf
= gen_rtx_SEQUENCE (VOIDmode
, rtvec_alloc (count
+ 1));
9861 reg
= gen_rtx_REG (XFmode
, base_reg
++);
9864 = gen_rtx_SET (VOIDmode
,
9865 gen_rtx_MEM (BLKmode
,
9866 gen_rtx_PRE_DEC (BLKmode
, stack_pointer_rtx
)),
9867 gen_rtx_UNSPEC (BLKmode
,
9870 tmp
= gen_rtx_SET (VOIDmode
,
9871 gen_rtx_MEM (XFmode
, stack_pointer_rtx
), reg
);
9872 RTX_FRAME_RELATED_P (tmp
) = 1;
9873 XVECEXP (dwarf
, 0, 1) = tmp
;
9875 for (i
= 1; i
< count
; i
++)
9877 reg
= gen_rtx_REG (XFmode
, base_reg
++);
9878 XVECEXP (par
, 0, i
) = gen_rtx_USE (VOIDmode
, reg
);
9880 tmp
= gen_rtx_SET (VOIDmode
,
9881 gen_rtx_MEM (XFmode
,
9882 plus_constant (stack_pointer_rtx
,
9885 RTX_FRAME_RELATED_P (tmp
) = 1;
9886 XVECEXP (dwarf
, 0, i
+ 1) = tmp
;
9889 tmp
= gen_rtx_SET (VOIDmode
,
9891 gen_rtx_PLUS (SImode
,
9893 GEN_INT (-12 * count
)));
9894 RTX_FRAME_RELATED_P (tmp
) = 1;
9895 XVECEXP (dwarf
, 0, 0) = tmp
;
9897 par
= emit_insn (par
);
9898 REG_NOTES (par
) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
, dwarf
,
9904 /* Return true if the current function needs to save/restore LR. */
9907 thumb_force_lr_save (void)
9909 return !cfun
->machine
->lr_save_eliminated
9910 && (!leaf_function_p ()
9911 || thumb_far_jump_used_p ()
9912 || regs_ever_live
[LR_REGNUM
]);
9916 /* Compute the distance from register FROM to register TO.
9917 These can be the arg pointer (26), the soft frame pointer (25),
9918 the stack pointer (13) or the hard frame pointer (11).
9919 In thumb mode r7 is used as the soft frame pointer, if needed.
9920 Typical stack layout looks like this:
9922 old stack pointer -> | |
9925 | | saved arguments for
9926 | | vararg functions
9929 hard FP & arg pointer -> | | \
9937 soft frame pointer -> | | /
9947 current stack pointer -> | | /
9950 For a given function some or all of these stack components
9951 may not be needed, giving rise to the possibility of
9952 eliminating some of the registers.
9954 The values returned by this function must reflect the behavior
9955 of arm_expand_prologue() and arm_compute_save_reg_mask().
9957 The sign of the number returned reflects the direction of stack
9958 growth, so the values are positive for all eliminations except
9959 from the soft frame pointer to the hard frame pointer.
9961 SFP may point just inside the local variables block to ensure correct
9965 /* Calculate stack offsets. These are used to calculate register elimination
9966 offsets and in prologue/epilogue code. */
9968 static arm_stack_offsets
*
9969 arm_get_frame_offsets (void)
9971 struct arm_stack_offsets
*offsets
;
9972 unsigned long func_type
;
9975 HOST_WIDE_INT frame_size
;
9977 offsets
= &cfun
->machine
->stack_offsets
;
9979 /* We need to know if we are a leaf function. Unfortunately, it
9980 is possible to be called after start_sequence has been called,
9981 which causes get_insns to return the insns for the sequence,
9982 not the function, which will cause leaf_function_p to return
9983 the incorrect result.
9985 to know about leaf functions once reload has completed, and the
9986 frame size cannot be changed after that time, so we can safely
9987 use the cached value. */
9989 if (reload_completed
)
9992 /* Initially this is the size of the local variables. It will translated
9993 into an offset once we have determined the size of preceding data. */
9994 frame_size
= ROUND_UP_WORD (get_frame_size ());
9996 leaf
= leaf_function_p ();
9998 /* Space for variadic functions. */
9999 offsets
->saved_args
= current_function_pretend_args_size
;
10001 offsets
->frame
= offsets
->saved_args
+ (frame_pointer_needed
? 4 : 0);
10005 unsigned int regno
;
10007 saved
= bit_count (arm_compute_save_reg_mask ()) * 4;
10009 /* We know that SP will be doubleword aligned on entry, and we must
10010 preserve that condition at any subroutine call. We also require the
10011 soft frame pointer to be doubleword aligned. */
10013 if (TARGET_REALLY_IWMMXT
)
10015 /* Check for the call-saved iWMMXt registers. */
10016 for (regno
= FIRST_IWMMXT_REGNUM
;
10017 regno
<= LAST_IWMMXT_REGNUM
;
10019 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
10023 func_type
= arm_current_func_type ();
10024 if (! IS_VOLATILE (func_type
))
10026 /* Space for saved FPA registers. */
10027 for (regno
= FIRST_FPA_REGNUM
; regno
<= LAST_FPA_REGNUM
; regno
++)
10028 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
10031 /* Space for saved VFP registers. */
10032 if (TARGET_HARD_FLOAT
&& TARGET_VFP
)
10033 saved
+= arm_get_vfp_saved_size ();
10036 else /* TARGET_THUMB */
10038 saved
= bit_count (thumb_compute_save_reg_mask ()) * 4;
10039 if (TARGET_BACKTRACE
)
10043 /* Saved registers include the stack frame. */
10044 offsets
->saved_regs
= offsets
->saved_args
+ saved
;
10045 offsets
->soft_frame
= offsets
->saved_regs
+ CALLER_INTERWORKING_SLOT_SIZE
;
10046 /* A leaf function does not need any stack alignment if it has nothing
10048 if (leaf
&& frame_size
== 0)
10050 offsets
->outgoing_args
= offsets
->soft_frame
;
10054 /* Ensure SFP has the correct alignment. */
10055 if (ARM_DOUBLEWORD_ALIGN
10056 && (offsets
->soft_frame
& 7))
10057 offsets
->soft_frame
+= 4;
10059 offsets
->outgoing_args
= offsets
->soft_frame
+ frame_size
10060 + current_function_outgoing_args_size
;
10062 if (ARM_DOUBLEWORD_ALIGN
)
10064 /* Ensure SP remains doubleword aligned. */
10065 if (offsets
->outgoing_args
& 7)
10066 offsets
->outgoing_args
+= 4;
10067 gcc_assert (!(offsets
->outgoing_args
& 7));
10074 /* Calculate the relative offsets for the different stack pointers. Positive
10075 offsets are in the direction of stack growth. */
10078 arm_compute_initial_elimination_offset (unsigned int from
, unsigned int to
)
10080 arm_stack_offsets
*offsets
;
10082 offsets
= arm_get_frame_offsets ();
10084 /* OK, now we have enough information to compute the distances.
10085 There must be an entry in these switch tables for each pair
10086 of registers in ELIMINABLE_REGS, even if some of the entries
10087 seem to be redundant or useless. */
10090 case ARG_POINTER_REGNUM
:
10093 case THUMB_HARD_FRAME_POINTER_REGNUM
:
10096 case FRAME_POINTER_REGNUM
:
10097 /* This is the reverse of the soft frame pointer
10098 to hard frame pointer elimination below. */
10099 return offsets
->soft_frame
- offsets
->saved_args
;
10101 case ARM_HARD_FRAME_POINTER_REGNUM
:
10102 /* If there is no stack frame then the hard
10103 frame pointer and the arg pointer coincide. */
10104 if (offsets
->frame
== offsets
->saved_regs
)
10106 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10107 return (frame_pointer_needed
10108 && cfun
->static_chain_decl
!= NULL
10109 && ! cfun
->machine
->uses_anonymous_args
) ? 4 : 0;
10111 case STACK_POINTER_REGNUM
:
10112 /* If nothing has been pushed on the stack at all
10113 then this will return -4. This *is* correct! */
10114 return offsets
->outgoing_args
- (offsets
->saved_args
+ 4);
10117 gcc_unreachable ();
10119 gcc_unreachable ();
10121 case FRAME_POINTER_REGNUM
:
10124 case THUMB_HARD_FRAME_POINTER_REGNUM
:
10127 case ARM_HARD_FRAME_POINTER_REGNUM
:
10128 /* The hard frame pointer points to the top entry in the
10129 stack frame. The soft frame pointer to the bottom entry
10130 in the stack frame. If there is no stack frame at all,
10131 then they are identical. */
10133 return offsets
->frame
- offsets
->soft_frame
;
10135 case STACK_POINTER_REGNUM
:
10136 return offsets
->outgoing_args
- offsets
->soft_frame
;
10139 gcc_unreachable ();
10141 gcc_unreachable ();
10144 /* You cannot eliminate from the stack pointer.
10145 In theory you could eliminate from the hard frame
10146 pointer to the stack pointer, but this will never
10147 happen, since if a stack frame is not needed the
10148 hard frame pointer will never be used. */
10149 gcc_unreachable ();
10154 /* Generate the prologue instructions for entry into an ARM function. */
10156 arm_expand_prologue (void)
10162 unsigned long live_regs_mask
;
10163 unsigned long func_type
;
10165 int saved_pretend_args
= 0;
10166 int saved_regs
= 0;
10167 unsigned HOST_WIDE_INT args_to_push
;
10168 arm_stack_offsets
*offsets
;
10170 func_type
= arm_current_func_type ();
10172 /* Naked functions don't have prologues. */
10173 if (IS_NAKED (func_type
))
10176 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10177 args_to_push
= current_function_pretend_args_size
;
10179 /* Compute which register we will have to save onto the stack. */
10180 live_regs_mask
= arm_compute_save_reg_mask ();
10182 ip_rtx
= gen_rtx_REG (SImode
, IP_REGNUM
);
10184 if (frame_pointer_needed
)
10186 if (IS_INTERRUPT (func_type
))
10188 /* Interrupt functions must not corrupt any registers.
10189 Creating a frame pointer however, corrupts the IP
10190 register, so we must push it first. */
10191 insn
= emit_multi_reg_push (1 << IP_REGNUM
);
10193 /* Do not set RTX_FRAME_RELATED_P on this insn.
10194 The dwarf stack unwinding code only wants to see one
10195 stack decrement per function, and this is not it. If
10196 this instruction is labeled as being part of the frame
10197 creation sequence then dwarf2out_frame_debug_expr will
10198 die when it encounters the assignment of IP to FP
10199 later on, since the use of SP here establishes SP as
10200 the CFA register and not IP.
10202 Anyway this instruction is not really part of the stack
10203 frame creation although it is part of the prologue. */
10205 else if (IS_NESTED (func_type
))
10207 /* The Static chain register is the same as the IP register
10208 used as a scratch register during stack frame creation.
10209 To get around this need to find somewhere to store IP
10210 whilst the frame is being created. We try the following
10213 1. The last argument register.
10214 2. A slot on the stack above the frame. (This only
10215 works if the function is not a varargs function).
10216 3. Register r3, after pushing the argument registers
10219 Note - we only need to tell the dwarf2 backend about the SP
10220 adjustment in the second variant; the static chain register
10221 doesn't need to be unwound, as it doesn't contain a value
10222 inherited from the caller. */
10224 if (regs_ever_live
[3] == 0)
10226 insn
= gen_rtx_REG (SImode
, 3);
10227 insn
= gen_rtx_SET (SImode
, insn
, ip_rtx
);
10228 insn
= emit_insn (insn
);
10230 else if (args_to_push
== 0)
10233 insn
= gen_rtx_PRE_DEC (SImode
, stack_pointer_rtx
);
10234 insn
= gen_rtx_MEM (SImode
, insn
);
10235 insn
= gen_rtx_SET (VOIDmode
, insn
, ip_rtx
);
10236 insn
= emit_insn (insn
);
10240 /* Just tell the dwarf backend that we adjusted SP. */
10241 dwarf
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
10242 gen_rtx_PLUS (SImode
, stack_pointer_rtx
,
10243 GEN_INT (-fp_offset
)));
10244 RTX_FRAME_RELATED_P (insn
) = 1;
10245 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
10246 dwarf
, REG_NOTES (insn
));
10250 /* Store the args on the stack. */
10251 if (cfun
->machine
->uses_anonymous_args
)
10252 insn
= emit_multi_reg_push
10253 ((0xf0 >> (args_to_push
/ 4)) & 0xf);
10256 (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
10257 GEN_INT (- args_to_push
)));
10259 RTX_FRAME_RELATED_P (insn
) = 1;
10261 saved_pretend_args
= 1;
10262 fp_offset
= args_to_push
;
10265 /* Now reuse r3 to preserve IP. */
10266 insn
= gen_rtx_REG (SImode
, 3);
10267 insn
= gen_rtx_SET (SImode
, insn
, ip_rtx
);
10268 (void) emit_insn (insn
);
10274 insn
= gen_rtx_PLUS (SImode
, stack_pointer_rtx
, GEN_INT (fp_offset
));
10275 insn
= gen_rtx_SET (SImode
, ip_rtx
, insn
);
10278 insn
= gen_movsi (ip_rtx
, stack_pointer_rtx
);
10280 insn
= emit_insn (insn
);
10281 RTX_FRAME_RELATED_P (insn
) = 1;
10286 /* Push the argument registers, or reserve space for them. */
10287 if (cfun
->machine
->uses_anonymous_args
)
10288 insn
= emit_multi_reg_push
10289 ((0xf0 >> (args_to_push
/ 4)) & 0xf);
10292 (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
10293 GEN_INT (- args_to_push
)));
10294 RTX_FRAME_RELATED_P (insn
) = 1;
10297 /* If this is an interrupt service routine, and the link register
10298 is going to be pushed, and we are not creating a stack frame,
10299 (which would involve an extra push of IP and a pop in the epilogue)
10300 subtracting four from LR now will mean that the function return
10301 can be done with a single instruction. */
10302 if ((func_type
== ARM_FT_ISR
|| func_type
== ARM_FT_FIQ
)
10303 && (live_regs_mask
& (1 << LR_REGNUM
)) != 0
10304 && ! frame_pointer_needed
)
10305 emit_insn (gen_rtx_SET (SImode
,
10306 gen_rtx_REG (SImode
, LR_REGNUM
),
10307 gen_rtx_PLUS (SImode
,
10308 gen_rtx_REG (SImode
, LR_REGNUM
),
10311 if (live_regs_mask
)
10313 insn
= emit_multi_reg_push (live_regs_mask
);
10314 saved_regs
+= bit_count (live_regs_mask
) * 4;
10315 RTX_FRAME_RELATED_P (insn
) = 1;
10319 for (reg
= LAST_IWMMXT_REGNUM
; reg
>= FIRST_IWMMXT_REGNUM
; reg
--)
10320 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
10322 insn
= gen_rtx_PRE_DEC (V2SImode
, stack_pointer_rtx
);
10323 insn
= gen_rtx_MEM (V2SImode
, insn
);
10324 insn
= emit_insn (gen_rtx_SET (VOIDmode
, insn
,
10325 gen_rtx_REG (V2SImode
, reg
)));
10326 RTX_FRAME_RELATED_P (insn
) = 1;
10330 if (! IS_VOLATILE (func_type
))
10334 /* Save any floating point call-saved registers used by this
10336 if (arm_fpu_arch
== FPUTYPE_FPA_EMU2
)
10338 for (reg
= LAST_FPA_REGNUM
; reg
>= FIRST_FPA_REGNUM
; reg
--)
10339 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
10341 insn
= gen_rtx_PRE_DEC (XFmode
, stack_pointer_rtx
);
10342 insn
= gen_rtx_MEM (XFmode
, insn
);
10343 insn
= emit_insn (gen_rtx_SET (VOIDmode
, insn
,
10344 gen_rtx_REG (XFmode
, reg
)));
10345 RTX_FRAME_RELATED_P (insn
) = 1;
10351 start_reg
= LAST_FPA_REGNUM
;
10353 for (reg
= LAST_FPA_REGNUM
; reg
>= FIRST_FPA_REGNUM
; reg
--)
10355 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
10357 if (start_reg
- reg
== 3)
10359 insn
= emit_sfm (reg
, 4);
10360 RTX_FRAME_RELATED_P (insn
) = 1;
10362 start_reg
= reg
- 1;
10367 if (start_reg
!= reg
)
10369 insn
= emit_sfm (reg
+ 1, start_reg
- reg
);
10370 RTX_FRAME_RELATED_P (insn
) = 1;
10371 saved_regs
+= (start_reg
- reg
) * 12;
10373 start_reg
= reg
- 1;
10377 if (start_reg
!= reg
)
10379 insn
= emit_sfm (reg
+ 1, start_reg
- reg
);
10380 saved_regs
+= (start_reg
- reg
) * 12;
10381 RTX_FRAME_RELATED_P (insn
) = 1;
10384 if (TARGET_HARD_FLOAT
&& TARGET_VFP
)
10386 start_reg
= FIRST_VFP_REGNUM
;
10388 for (reg
= FIRST_VFP_REGNUM
; reg
< LAST_VFP_REGNUM
; reg
+= 2)
10390 if ((!regs_ever_live
[reg
] || call_used_regs
[reg
])
10391 && (!regs_ever_live
[reg
+ 1] || call_used_regs
[reg
+ 1]))
10393 if (start_reg
!= reg
)
10394 saved_regs
+= vfp_emit_fstmx (start_reg
,
10395 (reg
- start_reg
) / 2);
10396 start_reg
= reg
+ 2;
10399 if (start_reg
!= reg
)
10400 saved_regs
+= vfp_emit_fstmx (start_reg
,
10401 (reg
- start_reg
) / 2);
10405 if (frame_pointer_needed
)
10407 /* Create the new frame pointer. */
10408 insn
= GEN_INT (-(4 + args_to_push
+ fp_offset
));
10409 insn
= emit_insn (gen_addsi3 (hard_frame_pointer_rtx
, ip_rtx
, insn
));
10410 RTX_FRAME_RELATED_P (insn
) = 1;
10412 if (IS_NESTED (func_type
))
10414 /* Recover the static chain register. */
10415 if (regs_ever_live
[3] == 0
10416 || saved_pretend_args
)
10417 insn
= gen_rtx_REG (SImode
, 3);
10418 else /* if (current_function_pretend_args_size == 0) */
10420 insn
= gen_rtx_PLUS (SImode
, hard_frame_pointer_rtx
,
10422 insn
= gen_rtx_MEM (SImode
, insn
);
10425 emit_insn (gen_rtx_SET (SImode
, ip_rtx
, insn
));
10426 /* Add a USE to stop propagate_one_insn() from barfing. */
10427 emit_insn (gen_prologue_use (ip_rtx
));
10431 offsets
= arm_get_frame_offsets ();
10432 if (offsets
->outgoing_args
!= offsets
->saved_args
+ saved_regs
)
10434 /* This add can produce multiple insns for a large constant, so we
10435 need to get tricky. */
10436 rtx last
= get_last_insn ();
10438 amount
= GEN_INT (offsets
->saved_args
+ saved_regs
10439 - offsets
->outgoing_args
);
10441 insn
= emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
10445 last
= last
? NEXT_INSN (last
) : get_insns ();
10446 RTX_FRAME_RELATED_P (last
) = 1;
10448 while (last
!= insn
);
10450 /* If the frame pointer is needed, emit a special barrier that
10451 will prevent the scheduler from moving stores to the frame
10452 before the stack adjustment. */
10453 if (frame_pointer_needed
)
10454 insn
= emit_insn (gen_stack_tie (stack_pointer_rtx
,
10455 hard_frame_pointer_rtx
));
10460 arm_load_pic_register (INVALID_REGNUM
);
10462 /* If we are profiling, make sure no instructions are scheduled before
10463 the call to mcount. Similarly if the user has requested no
10464 scheduling in the prolog. */
10465 if (current_function_profile
|| !TARGET_SCHED_PROLOG
)
10466 emit_insn (gen_blockage ());
10468 /* If the link register is being kept alive, with the return address in it,
10469 then make sure that it does not get reused by the ce2 pass. */
10470 if ((live_regs_mask
& (1 << LR_REGNUM
)) == 0)
10472 emit_insn (gen_prologue_use (gen_rtx_REG (SImode
, LR_REGNUM
)));
10473 cfun
->machine
->lr_save_eliminated
= 1;
10477 /* If CODE is 'd', then the X is a condition operand and the instruction
10478 should only be executed if the condition is true.
10479 if CODE is 'D', then the X is a condition operand and the instruction
10480 should only be executed if the condition is false: however, if the mode
10481 of the comparison is CCFPEmode, then always execute the instruction -- we
10482 do this because in these circumstances !GE does not necessarily imply LT;
10483 in these cases the instruction pattern will take care to make sure that
10484 an instruction containing %d will follow, thereby undoing the effects of
10485 doing this instruction unconditionally.
10486 If CODE is 'N' then X is a floating point operand that must be negated
10488 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10489 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10491 arm_print_operand (FILE *stream
, rtx x
, int code
)
10496 fputs (ASM_COMMENT_START
, stream
);
10500 fputs (user_label_prefix
, stream
);
10504 fputs (REGISTER_PREFIX
, stream
);
10508 if (arm_ccfsm_state
== 3 || arm_ccfsm_state
== 4)
10512 output_operand_lossage ("predicated Thumb instruction");
10515 if (current_insn_predicate
!= NULL
)
10517 output_operand_lossage
10518 ("predicated instruction in conditional sequence");
10522 fputs (arm_condition_codes
[arm_current_cc
], stream
);
10524 else if (current_insn_predicate
)
10526 enum arm_cond_code code
;
10530 output_operand_lossage ("predicated Thumb instruction");
10534 code
= get_arm_condition_code (current_insn_predicate
);
10535 fputs (arm_condition_codes
[code
], stream
);
10542 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
10543 r
= REAL_VALUE_NEGATE (r
);
10544 fprintf (stream
, "%s", fp_const_from_val (&r
));
10549 if (GET_CODE (x
) == CONST_INT
)
10552 val
= ARM_SIGN_EXTEND (~INTVAL (x
));
10553 fprintf (stream
, HOST_WIDE_INT_PRINT_DEC
, val
);
10557 putc ('~', stream
);
10558 output_addr_const (stream
, x
);
10563 fprintf (stream
, "%s", arithmetic_instr (x
, 1));
10566 /* Truncate Cirrus shift counts. */
10568 if (GET_CODE (x
) == CONST_INT
)
10570 fprintf (stream
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 0x3f);
10573 arm_print_operand (stream
, x
, 0);
10577 fprintf (stream
, "%s", arithmetic_instr (x
, 0));
10583 const char * shift
= shift_op (x
, &val
);
10587 fprintf (stream
, ", %s ", shift_op (x
, &val
));
10589 arm_print_operand (stream
, XEXP (x
, 1), 0);
10591 fprintf (stream
, "#" HOST_WIDE_INT_PRINT_DEC
, val
);
10596 /* An explanation of the 'Q', 'R' and 'H' register operands:
10598 In a pair of registers containing a DI or DF value the 'Q'
10599 operand returns the register number of the register containing
10600 the least significant part of the value. The 'R' operand returns
10601 the register number of the register containing the most
10602 significant part of the value.
10604 The 'H' operand returns the higher of the two register numbers.
10605 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
10606 same as the 'Q' operand, since the most significant part of the
10607 value is held in the lower number register. The reverse is true
10608 on systems where WORDS_BIG_ENDIAN is false.
10610 The purpose of these operands is to distinguish between cases
10611 where the endian-ness of the values is important (for example
10612 when they are added together), and cases where the endian-ness
10613 is irrelevant, but the order of register operations is important.
10614 For example when loading a value from memory into a register
10615 pair, the endian-ness does not matter. Provided that the value
10616 from the lower memory address is put into the lower numbered
10617 register, and the value from the higher address is put into the
10618 higher numbered register, the load will work regardless of whether
10619 the value being loaded is big-wordian or little-wordian. The
10620 order of the two register loads can matter however, if the address
10621 of the memory location is actually held in one of the registers
10622 being overwritten by the load. */
10624 if (GET_CODE (x
) != REG
|| REGNO (x
) > LAST_ARM_REGNUM
)
10626 output_operand_lossage ("invalid operand for code '%c'", code
);
10630 asm_fprintf (stream
, "%r", REGNO (x
) + (WORDS_BIG_ENDIAN
? 1 : 0));
10634 if (GET_CODE (x
) != REG
|| REGNO (x
) > LAST_ARM_REGNUM
)
10636 output_operand_lossage ("invalid operand for code '%c'", code
);
10640 asm_fprintf (stream
, "%r", REGNO (x
) + (WORDS_BIG_ENDIAN
? 0 : 1));
10644 if (GET_CODE (x
) != REG
|| REGNO (x
) > LAST_ARM_REGNUM
)
10646 output_operand_lossage ("invalid operand for code '%c'", code
);
10650 asm_fprintf (stream
, "%r", REGNO (x
) + 1);
10654 asm_fprintf (stream
, "%r",
10655 GET_CODE (XEXP (x
, 0)) == REG
10656 ? REGNO (XEXP (x
, 0)) : REGNO (XEXP (XEXP (x
, 0), 0)));
10660 asm_fprintf (stream
, "{%r-%r}",
10662 REGNO (x
) + ARM_NUM_REGS (GET_MODE (x
)) - 1);
10666 /* CONST_TRUE_RTX means always -- that's the default. */
10667 if (x
== const_true_rtx
)
10670 if (!COMPARISON_P (x
))
10672 output_operand_lossage ("invalid operand for code '%c'", code
);
10676 fputs (arm_condition_codes
[get_arm_condition_code (x
)],
10681 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
10682 want to do that. */
10683 if (x
== const_true_rtx
)
10685 output_operand_lossage ("instruction never exectued");
10688 if (!COMPARISON_P (x
))
10690 output_operand_lossage ("invalid operand for code '%c'", code
);
10694 fputs (arm_condition_codes
[ARM_INVERSE_CONDITION_CODE
10695 (get_arm_condition_code (x
))],
10699 /* Cirrus registers can be accessed in a variety of ways:
10700 single floating point (f)
10701 double floating point (d)
10703 64bit integer (dx). */
10704 case 'W': /* Cirrus register in F mode. */
10705 case 'X': /* Cirrus register in D mode. */
10706 case 'Y': /* Cirrus register in FX mode. */
10707 case 'Z': /* Cirrus register in DX mode. */
10708 gcc_assert (GET_CODE (x
) == REG
10709 && REGNO_REG_CLASS (REGNO (x
)) == CIRRUS_REGS
);
10711 fprintf (stream
, "mv%s%s",
10713 : code
== 'X' ? "d"
10714 : code
== 'Y' ? "fx" : "dx", reg_names
[REGNO (x
)] + 2);
10718 /* Print cirrus register in the mode specified by the register's mode. */
10721 int mode
= GET_MODE (x
);
10723 if (GET_CODE (x
) != REG
|| REGNO_REG_CLASS (REGNO (x
)) != CIRRUS_REGS
)
10725 output_operand_lossage ("invalid operand for code '%c'", code
);
10729 fprintf (stream
, "mv%s%s",
10730 mode
== DFmode
? "d"
10731 : mode
== SImode
? "fx"
10732 : mode
== DImode
? "dx"
10733 : "f", reg_names
[REGNO (x
)] + 2);
10739 if (GET_CODE (x
) != REG
10740 || REGNO (x
) < FIRST_IWMMXT_GR_REGNUM
10741 || REGNO (x
) > LAST_IWMMXT_GR_REGNUM
)
10742 /* Bad value for wCG register number. */
10744 output_operand_lossage ("invalid operand for code '%c'", code
);
10749 fprintf (stream
, "%d", REGNO (x
) - FIRST_IWMMXT_GR_REGNUM
);
10752 /* Print an iWMMXt control register name. */
10754 if (GET_CODE (x
) != CONST_INT
10756 || INTVAL (x
) >= 16)
10757 /* Bad value for wC register number. */
10759 output_operand_lossage ("invalid operand for code '%c'", code
);
10765 static const char * wc_reg_names
[16] =
10767 "wCID", "wCon", "wCSSF", "wCASF",
10768 "wC4", "wC5", "wC6", "wC7",
10769 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
10770 "wC12", "wC13", "wC14", "wC15"
10773 fprintf (stream
, wc_reg_names
[INTVAL (x
)]);
10777 /* Print a VFP double precision register name. */
10780 int mode
= GET_MODE (x
);
10783 if (mode
!= DImode
&& mode
!= DFmode
)
10785 output_operand_lossage ("invalid operand for code '%c'", code
);
10789 if (GET_CODE (x
) != REG
10790 || !IS_VFP_REGNUM (REGNO (x
)))
10792 output_operand_lossage ("invalid operand for code '%c'", code
);
10796 num
= REGNO(x
) - FIRST_VFP_REGNUM
;
10799 output_operand_lossage ("invalid operand for code '%c'", code
);
10803 fprintf (stream
, "d%d", num
>> 1);
10810 output_operand_lossage ("missing operand");
10814 switch (GET_CODE (x
))
10817 asm_fprintf (stream
, "%r", REGNO (x
));
10821 output_memory_reference_mode
= GET_MODE (x
);
10822 output_address (XEXP (x
, 0));
10826 fprintf (stream
, "#%s", fp_immediate_constant (x
));
10830 gcc_assert (GET_CODE (x
) != NEG
);
10831 fputc ('#', stream
);
10832 output_addr_const (stream
, x
);
10838 #ifndef AOF_ASSEMBLER
10839 /* Target hook for assembling integer objects. The ARM version needs to
10840 handle word-sized values specially. */
10842 arm_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
10844 if (size
== UNITS_PER_WORD
&& aligned_p
)
10846 fputs ("\t.word\t", asm_out_file
);
10847 output_addr_const (asm_out_file
, x
);
10849 /* Mark symbols as position independent. We only do this in the
10850 .text segment, not in the .data segment. */
10851 if (NEED_GOT_RELOC
&& flag_pic
&& making_const_table
&&
10852 (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
))
10854 if (GET_CODE (x
) == SYMBOL_REF
10855 && (CONSTANT_POOL_ADDRESS_P (x
)
10856 || SYMBOL_REF_LOCAL_P (x
)))
10857 fputs ("(GOTOFF)", asm_out_file
);
10858 else if (GET_CODE (x
) == LABEL_REF
)
10859 fputs ("(GOTOFF)", asm_out_file
);
10861 fputs ("(GOT)", asm_out_file
);
10863 fputc ('\n', asm_out_file
);
10867 if (arm_vector_mode_supported_p (GET_MODE (x
)))
10871 gcc_assert (GET_CODE (x
) == CONST_VECTOR
);
10873 units
= CONST_VECTOR_NUNITS (x
);
10875 switch (GET_MODE (x
))
10877 case V2SImode
: size
= 4; break;
10878 case V4HImode
: size
= 2; break;
10879 case V8QImode
: size
= 1; break;
10881 gcc_unreachable ();
10884 for (i
= 0; i
< units
; i
++)
10888 elt
= CONST_VECTOR_ELT (x
, i
);
10890 (elt
, size
, i
== 0 ? BIGGEST_ALIGNMENT
: size
* BITS_PER_UNIT
, 1);
10896 return default_assemble_integer (x
, size
, aligned_p
);
10900 /* Add a function to the list of static constructors. */
10903 arm_elf_asm_constructor (rtx symbol
, int priority ATTRIBUTE_UNUSED
)
10905 if (!TARGET_AAPCS_BASED
)
10907 default_named_section_asm_out_constructor (symbol
, priority
);
10911 /* Put these in the .init_array section, using a special relocation. */
10913 assemble_align (POINTER_SIZE
);
10914 fputs ("\t.word\t", asm_out_file
);
10915 output_addr_const (asm_out_file
, symbol
);
10916 fputs ("(target1)\n", asm_out_file
);
10920 /* A finite state machine takes care of noticing whether or not instructions
10921 can be conditionally executed, and thus decrease execution time and code
10922 size by deleting branch instructions. The fsm is controlled by
10923 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
10925 /* The state of the fsm controlling condition codes are:
10926 0: normal, do nothing special
10927 1: make ASM_OUTPUT_OPCODE not output this instruction
10928 2: make ASM_OUTPUT_OPCODE not output this instruction
10929 3: make instructions conditional
10930 4: make instructions conditional
10932 State transitions (state->state by whom under condition):
10933 0 -> 1 final_prescan_insn if the `target' is a label
10934 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
10935 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
10936 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
10937 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
10938 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
10939 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
10940 (the target insn is arm_target_insn).
10942 If the jump clobbers the conditions then we use states 2 and 4.
10944 A similar thing can be done with conditional return insns.
10946 XXX In case the `target' is an unconditional branch, this conditionalising
10947 of the instructions always reduces code size, but not always execution
10948 time. But then, I want to reduce the code size to somewhere near what
10949 /bin/cc produces. */
10951 /* Returns the index of the ARM condition code string in
10952 `arm_condition_codes'. COMPARISON should be an rtx like
10953 `(eq (...) (...))'. */
10954 static enum arm_cond_code
10955 get_arm_condition_code (rtx comparison
)
10957 enum machine_mode mode
= GET_MODE (XEXP (comparison
, 0));
10959 enum rtx_code comp_code
= GET_CODE (comparison
);
10961 if (GET_MODE_CLASS (mode
) != MODE_CC
)
10962 mode
= SELECT_CC_MODE (comp_code
, XEXP (comparison
, 0),
10963 XEXP (comparison
, 1));
10967 case CC_DNEmode
: code
= ARM_NE
; goto dominance
;
10968 case CC_DEQmode
: code
= ARM_EQ
; goto dominance
;
10969 case CC_DGEmode
: code
= ARM_GE
; goto dominance
;
10970 case CC_DGTmode
: code
= ARM_GT
; goto dominance
;
10971 case CC_DLEmode
: code
= ARM_LE
; goto dominance
;
10972 case CC_DLTmode
: code
= ARM_LT
; goto dominance
;
10973 case CC_DGEUmode
: code
= ARM_CS
; goto dominance
;
10974 case CC_DGTUmode
: code
= ARM_HI
; goto dominance
;
10975 case CC_DLEUmode
: code
= ARM_LS
; goto dominance
;
10976 case CC_DLTUmode
: code
= ARM_CC
;
10979 gcc_assert (comp_code
== EQ
|| comp_code
== NE
);
10981 if (comp_code
== EQ
)
10982 return ARM_INVERSE_CONDITION_CODE (code
);
10988 case NE
: return ARM_NE
;
10989 case EQ
: return ARM_EQ
;
10990 case GE
: return ARM_PL
;
10991 case LT
: return ARM_MI
;
10992 default: gcc_unreachable ();
10998 case NE
: return ARM_NE
;
10999 case EQ
: return ARM_EQ
;
11000 default: gcc_unreachable ();
11006 case NE
: return ARM_MI
;
11007 case EQ
: return ARM_PL
;
11008 default: gcc_unreachable ();
11013 /* These encodings assume that AC=1 in the FPA system control
11014 byte. This allows us to handle all cases except UNEQ and
11018 case GE
: return ARM_GE
;
11019 case GT
: return ARM_GT
;
11020 case LE
: return ARM_LS
;
11021 case LT
: return ARM_MI
;
11022 case NE
: return ARM_NE
;
11023 case EQ
: return ARM_EQ
;
11024 case ORDERED
: return ARM_VC
;
11025 case UNORDERED
: return ARM_VS
;
11026 case UNLT
: return ARM_LT
;
11027 case UNLE
: return ARM_LE
;
11028 case UNGT
: return ARM_HI
;
11029 case UNGE
: return ARM_PL
;
11030 /* UNEQ and LTGT do not have a representation. */
11031 case UNEQ
: /* Fall through. */
11032 case LTGT
: /* Fall through. */
11033 default: gcc_unreachable ();
11039 case NE
: return ARM_NE
;
11040 case EQ
: return ARM_EQ
;
11041 case GE
: return ARM_LE
;
11042 case GT
: return ARM_LT
;
11043 case LE
: return ARM_GE
;
11044 case LT
: return ARM_GT
;
11045 case GEU
: return ARM_LS
;
11046 case GTU
: return ARM_CC
;
11047 case LEU
: return ARM_CS
;
11048 case LTU
: return ARM_HI
;
11049 default: gcc_unreachable ();
11055 case LTU
: return ARM_CS
;
11056 case GEU
: return ARM_CC
;
11057 default: gcc_unreachable ();
11063 case NE
: return ARM_NE
;
11064 case EQ
: return ARM_EQ
;
11065 case GE
: return ARM_GE
;
11066 case GT
: return ARM_GT
;
11067 case LE
: return ARM_LE
;
11068 case LT
: return ARM_LT
;
11069 case GEU
: return ARM_CS
;
11070 case GTU
: return ARM_HI
;
11071 case LEU
: return ARM_LS
;
11072 case LTU
: return ARM_CC
;
11073 default: gcc_unreachable ();
11076 default: gcc_unreachable ();
11081 arm_final_prescan_insn (rtx insn
)
11083 /* BODY will hold the body of INSN. */
11084 rtx body
= PATTERN (insn
);
11086 /* This will be 1 if trying to repeat the trick, and things need to be
11087 reversed if it appears to fail. */
11090 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11091 taken are clobbered, even if the rtl suggests otherwise. It also
11092 means that we have to grub around within the jump expression to find
11093 out what the conditions are when the jump isn't taken. */
11094 int jump_clobbers
= 0;
11096 /* If we start with a return insn, we only succeed if we find another one. */
11097 int seeking_return
= 0;
11099 /* START_INSN will hold the insn from where we start looking. This is the
11100 first insn after the following code_label if REVERSE is true. */
11101 rtx start_insn
= insn
;
11103 /* If in state 4, check if the target branch is reached, in order to
11104 change back to state 0. */
11105 if (arm_ccfsm_state
== 4)
11107 if (insn
== arm_target_insn
)
11109 arm_target_insn
= NULL
;
11110 arm_ccfsm_state
= 0;
11115 /* If in state 3, it is possible to repeat the trick, if this insn is an
11116 unconditional branch to a label, and immediately following this branch
11117 is the previous target label which is only used once, and the label this
11118 branch jumps to is not too far off. */
11119 if (arm_ccfsm_state
== 3)
11121 if (simplejump_p (insn
))
11123 start_insn
= next_nonnote_insn (start_insn
);
11124 if (GET_CODE (start_insn
) == BARRIER
)
11126 /* XXX Isn't this always a barrier? */
11127 start_insn
= next_nonnote_insn (start_insn
);
11129 if (GET_CODE (start_insn
) == CODE_LABEL
11130 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
11131 && LABEL_NUSES (start_insn
) == 1)
11136 else if (GET_CODE (body
) == RETURN
)
11138 start_insn
= next_nonnote_insn (start_insn
);
11139 if (GET_CODE (start_insn
) == BARRIER
)
11140 start_insn
= next_nonnote_insn (start_insn
);
11141 if (GET_CODE (start_insn
) == CODE_LABEL
11142 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
11143 && LABEL_NUSES (start_insn
) == 1)
11146 seeking_return
= 1;
11155 gcc_assert (!arm_ccfsm_state
|| reverse
);
11156 if (GET_CODE (insn
) != JUMP_INSN
)
11159 /* This jump might be paralleled with a clobber of the condition codes
11160 the jump should always come first */
11161 if (GET_CODE (body
) == PARALLEL
&& XVECLEN (body
, 0) > 0)
11162 body
= XVECEXP (body
, 0, 0);
11165 || (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
11166 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
))
11169 int fail
= FALSE
, succeed
= FALSE
;
11170 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11171 int then_not_else
= TRUE
;
11172 rtx this_insn
= start_insn
, label
= 0;
11174 /* If the jump cannot be done with one instruction, we cannot
11175 conditionally execute the instruction in the inverse case. */
11176 if (get_attr_conds (insn
) == CONDS_JUMP_CLOB
)
11182 /* Register the insn jumped to. */
11185 if (!seeking_return
)
11186 label
= XEXP (SET_SRC (body
), 0);
11188 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == LABEL_REF
)
11189 label
= XEXP (XEXP (SET_SRC (body
), 1), 0);
11190 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == LABEL_REF
)
11192 label
= XEXP (XEXP (SET_SRC (body
), 2), 0);
11193 then_not_else
= FALSE
;
11195 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
)
11196 seeking_return
= 1;
11197 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
)
11199 seeking_return
= 1;
11200 then_not_else
= FALSE
;
11203 gcc_unreachable ();
11205 /* See how many insns this branch skips, and what kind of insns. If all
11206 insns are okay, and the label or unconditional branch to the same
11207 label is not too far away, succeed. */
11208 for (insns_skipped
= 0;
11209 !fail
&& !succeed
&& insns_skipped
++ < max_insns_skipped
;)
11213 this_insn
= next_nonnote_insn (this_insn
);
11217 switch (GET_CODE (this_insn
))
11220 /* Succeed if it is the target label, otherwise fail since
11221 control falls in from somewhere else. */
11222 if (this_insn
== label
)
11226 arm_ccfsm_state
= 2;
11227 this_insn
= next_nonnote_insn (this_insn
);
11230 arm_ccfsm_state
= 1;
11238 /* Succeed if the following insn is the target label.
11240 If return insns are used then the last insn in a function
11241 will be a barrier. */
11242 this_insn
= next_nonnote_insn (this_insn
);
11243 if (this_insn
&& this_insn
== label
)
11247 arm_ccfsm_state
= 2;
11248 this_insn
= next_nonnote_insn (this_insn
);
11251 arm_ccfsm_state
= 1;
11259 /* The AAPCS says that conditional calls should not be
11260 used since they make interworking inefficient (the
11261 linker can't transform BL<cond> into BLX). That's
11262 only a problem if the machine has BLX. */
11269 /* Succeed if the following insn is the target label, or
11270 if the following two insns are a barrier and the
11272 this_insn
= next_nonnote_insn (this_insn
);
11273 if (this_insn
&& GET_CODE (this_insn
) == BARRIER
)
11274 this_insn
= next_nonnote_insn (this_insn
);
11276 if (this_insn
&& this_insn
== label
11277 && insns_skipped
< max_insns_skipped
)
11281 arm_ccfsm_state
= 2;
11282 this_insn
= next_nonnote_insn (this_insn
);
11285 arm_ccfsm_state
= 1;
11293 /* If this is an unconditional branch to the same label, succeed.
11294 If it is to another label, do nothing. If it is conditional,
11296 /* XXX Probably, the tests for SET and the PC are
11299 scanbody
= PATTERN (this_insn
);
11300 if (GET_CODE (scanbody
) == SET
11301 && GET_CODE (SET_DEST (scanbody
)) == PC
)
11303 if (GET_CODE (SET_SRC (scanbody
)) == LABEL_REF
11304 && XEXP (SET_SRC (scanbody
), 0) == label
&& !reverse
)
11306 arm_ccfsm_state
= 2;
11309 else if (GET_CODE (SET_SRC (scanbody
)) == IF_THEN_ELSE
)
11312 /* Fail if a conditional return is undesirable (e.g. on a
11313 StrongARM), but still allow this if optimizing for size. */
11314 else if (GET_CODE (scanbody
) == RETURN
11315 && !use_return_insn (TRUE
, NULL
)
11318 else if (GET_CODE (scanbody
) == RETURN
11321 arm_ccfsm_state
= 2;
11324 else if (GET_CODE (scanbody
) == PARALLEL
)
11326 switch (get_attr_conds (this_insn
))
11336 fail
= TRUE
; /* Unrecognized jump (e.g. epilogue). */
11341 /* Instructions using or affecting the condition codes make it
11343 scanbody
= PATTERN (this_insn
);
11344 if (!(GET_CODE (scanbody
) == SET
11345 || GET_CODE (scanbody
) == PARALLEL
)
11346 || get_attr_conds (this_insn
) != CONDS_NOCOND
)
11349 /* A conditional cirrus instruction must be followed by
11350 a non Cirrus instruction. However, since we
11351 conditionalize instructions in this function and by
11352 the time we get here we can't add instructions
11353 (nops), because shorten_branches() has already been
11354 called, we will disable conditionalizing Cirrus
11355 instructions to be safe. */
11356 if (GET_CODE (scanbody
) != USE
11357 && GET_CODE (scanbody
) != CLOBBER
11358 && get_attr_cirrus (this_insn
) != CIRRUS_NOT
)
11368 if ((!seeking_return
) && (arm_ccfsm_state
== 1 || reverse
))
11369 arm_target_label
= CODE_LABEL_NUMBER (label
);
11372 gcc_assert (seeking_return
|| arm_ccfsm_state
== 2);
11374 while (this_insn
&& GET_CODE (PATTERN (this_insn
)) == USE
)
11376 this_insn
= next_nonnote_insn (this_insn
);
11377 gcc_assert (!this_insn
11378 || (GET_CODE (this_insn
) != BARRIER
11379 && GET_CODE (this_insn
) != CODE_LABEL
));
11383 /* Oh, dear! we ran off the end.. give up. */
11384 recog (PATTERN (insn
), insn
, NULL
);
11385 arm_ccfsm_state
= 0;
11386 arm_target_insn
= NULL
;
11389 arm_target_insn
= this_insn
;
11393 gcc_assert (!reverse
);
11395 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body
),
11397 if (GET_CODE (XEXP (XEXP (SET_SRC (body
), 0), 0)) == AND
)
11398 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
11399 if (GET_CODE (XEXP (SET_SRC (body
), 0)) == NE
)
11400 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
11404 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11407 arm_current_cc
= get_arm_condition_code (XEXP (SET_SRC (body
),
11411 if (reverse
|| then_not_else
)
11412 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
11415 /* Restore recog_data (getting the attributes of other insns can
11416 destroy this array, but final.c assumes that it remains intact
11417 across this call; since the insn has been recognized already we
11418 call recog direct). */
11419 recog (PATTERN (insn
), insn
, NULL
);
11423 /* Returns true if REGNO is a valid register
11424 for holding a quantity of type MODE. */
11426 arm_hard_regno_mode_ok (unsigned int regno
, enum machine_mode mode
)
11428 if (GET_MODE_CLASS (mode
) == MODE_CC
)
11429 return regno
== CC_REGNUM
|| regno
== VFPCC_REGNUM
;
11432 /* For the Thumb we only allow values bigger than SImode in
11433 registers 0 - 6, so that there is always a second low
11434 register available to hold the upper part of the value.
11435 We probably we ought to ensure that the register is the
11436 start of an even numbered register pair. */
11437 return (ARM_NUM_REGS (mode
) < 2) || (regno
< LAST_LO_REGNUM
);
11439 if (IS_CIRRUS_REGNUM (regno
))
11440 /* We have outlawed SI values in Cirrus registers because they
11441 reside in the lower 32 bits, but SF values reside in the
11442 upper 32 bits. This causes gcc all sorts of grief. We can't
11443 even split the registers into pairs because Cirrus SI values
11444 get sign extended to 64bits-- aldyh. */
11445 return (GET_MODE_CLASS (mode
) == MODE_FLOAT
) || (mode
== DImode
);
11447 if (IS_VFP_REGNUM (regno
))
11449 if (mode
== SFmode
|| mode
== SImode
)
11452 /* DFmode values are only valid in even register pairs. */
11453 if (mode
== DFmode
)
11454 return ((regno
- FIRST_VFP_REGNUM
) & 1) == 0;
11458 if (IS_IWMMXT_GR_REGNUM (regno
))
11459 return mode
== SImode
;
11461 if (IS_IWMMXT_REGNUM (regno
))
11462 return VALID_IWMMXT_REG_MODE (mode
);
11464 /* We allow any value to be stored in the general registers.
11465 Restrict doubleword quantities to even register pairs so that we can
11467 if (regno
<= LAST_ARM_REGNUM
)
11468 return !(TARGET_LDRD
&& GET_MODE_SIZE (mode
) > 4 && (regno
& 1) != 0);
11470 if ( regno
== FRAME_POINTER_REGNUM
11471 || regno
== ARG_POINTER_REGNUM
)
11472 /* We only allow integers in the fake hard registers. */
11473 return GET_MODE_CLASS (mode
) == MODE_INT
;
11475 /* The only registers left are the FPA registers
11476 which we only allow to hold FP values. */
11477 return GET_MODE_CLASS (mode
) == MODE_FLOAT
11478 && regno
>= FIRST_FPA_REGNUM
11479 && regno
<= LAST_FPA_REGNUM
;
11483 arm_regno_class (int regno
)
11487 if (regno
== STACK_POINTER_REGNUM
)
11489 if (regno
== CC_REGNUM
)
11496 if ( regno
<= LAST_ARM_REGNUM
11497 || regno
== FRAME_POINTER_REGNUM
11498 || regno
== ARG_POINTER_REGNUM
)
11499 return GENERAL_REGS
;
11501 if (regno
== CC_REGNUM
|| regno
== VFPCC_REGNUM
)
11504 if (IS_CIRRUS_REGNUM (regno
))
11505 return CIRRUS_REGS
;
11507 if (IS_VFP_REGNUM (regno
))
11510 if (IS_IWMMXT_REGNUM (regno
))
11511 return IWMMXT_REGS
;
11513 if (IS_IWMMXT_GR_REGNUM (regno
))
11514 return IWMMXT_GR_REGS
;
11519 /* Handle a special case when computing the offset
11520 of an argument from the frame pointer. */
11522 arm_debugger_arg_offset (int value
, rtx addr
)
11526 /* We are only interested if dbxout_parms() failed to compute the offset. */
11530 /* We can only cope with the case where the address is held in a register. */
11531 if (GET_CODE (addr
) != REG
)
11534 /* If we are using the frame pointer to point at the argument, then
11535 an offset of 0 is correct. */
11536 if (REGNO (addr
) == (unsigned) HARD_FRAME_POINTER_REGNUM
)
11539 /* If we are using the stack pointer to point at the
11540 argument, then an offset of 0 is correct. */
11541 if ((TARGET_THUMB
|| !frame_pointer_needed
)
11542 && REGNO (addr
) == SP_REGNUM
)
11545 /* Oh dear. The argument is pointed to by a register rather
11546 than being held in a register, or being stored at a known
11547 offset from the frame pointer. Since GDB only understands
11548 those two kinds of argument we must translate the address
11549 held in the register into an offset from the frame pointer.
11550 We do this by searching through the insns for the function
11551 looking to see where this register gets its value. If the
11552 register is initialized from the frame pointer plus an offset
11553 then we are in luck and we can continue, otherwise we give up.
11555 This code is exercised by producing debugging information
11556 for a function with arguments like this:
11558 double func (double a, double b, int c, double d) {return d;}
11560 Without this code the stab for parameter 'd' will be set to
11561 an offset of 0 from the frame pointer, rather than 8. */
11563 /* The if() statement says:
11565 If the insn is a normal instruction
11566 and if the insn is setting the value in a register
11567 and if the register being set is the register holding the address of the argument
11568 and if the address is computing by an addition
11569 that involves adding to a register
11570 which is the frame pointer
11575 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
11577 if ( GET_CODE (insn
) == INSN
11578 && GET_CODE (PATTERN (insn
)) == SET
11579 && REGNO (XEXP (PATTERN (insn
), 0)) == REGNO (addr
)
11580 && GET_CODE (XEXP (PATTERN (insn
), 1)) == PLUS
11581 && GET_CODE (XEXP (XEXP (PATTERN (insn
), 1), 0)) == REG
11582 && REGNO (XEXP (XEXP (PATTERN (insn
), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
11583 && GET_CODE (XEXP (XEXP (PATTERN (insn
), 1), 1)) == CONST_INT
11586 value
= INTVAL (XEXP (XEXP (PATTERN (insn
), 1), 1));
11595 warning (0, "unable to compute real location of stacked parameter");
11596 value
= 8; /* XXX magic hack */
11602 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
11605 if ((MASK) & insn_flags) \
11606 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
11607 BUILT_IN_MD, NULL, NULL_TREE); \
11611 struct builtin_description
11613 const unsigned int mask
;
11614 const enum insn_code icode
;
11615 const char * const name
;
11616 const enum arm_builtins code
;
11617 const enum rtx_code comparison
;
11618 const unsigned int flag
;
11621 static const struct builtin_description bdesc_2arg
[] =
11623 #define IWMMXT_BUILTIN(code, string, builtin) \
11624 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
11625 ARM_BUILTIN_##builtin, 0, 0 },
11627 IWMMXT_BUILTIN (addv8qi3
, "waddb", WADDB
)
11628 IWMMXT_BUILTIN (addv4hi3
, "waddh", WADDH
)
11629 IWMMXT_BUILTIN (addv2si3
, "waddw", WADDW
)
11630 IWMMXT_BUILTIN (subv8qi3
, "wsubb", WSUBB
)
11631 IWMMXT_BUILTIN (subv4hi3
, "wsubh", WSUBH
)
11632 IWMMXT_BUILTIN (subv2si3
, "wsubw", WSUBW
)
11633 IWMMXT_BUILTIN (ssaddv8qi3
, "waddbss", WADDSSB
)
11634 IWMMXT_BUILTIN (ssaddv4hi3
, "waddhss", WADDSSH
)
11635 IWMMXT_BUILTIN (ssaddv2si3
, "waddwss", WADDSSW
)
11636 IWMMXT_BUILTIN (sssubv8qi3
, "wsubbss", WSUBSSB
)
11637 IWMMXT_BUILTIN (sssubv4hi3
, "wsubhss", WSUBSSH
)
11638 IWMMXT_BUILTIN (sssubv2si3
, "wsubwss", WSUBSSW
)
11639 IWMMXT_BUILTIN (usaddv8qi3
, "waddbus", WADDUSB
)
11640 IWMMXT_BUILTIN (usaddv4hi3
, "waddhus", WADDUSH
)
11641 IWMMXT_BUILTIN (usaddv2si3
, "waddwus", WADDUSW
)
11642 IWMMXT_BUILTIN (ussubv8qi3
, "wsubbus", WSUBUSB
)
11643 IWMMXT_BUILTIN (ussubv4hi3
, "wsubhus", WSUBUSH
)
11644 IWMMXT_BUILTIN (ussubv2si3
, "wsubwus", WSUBUSW
)
11645 IWMMXT_BUILTIN (mulv4hi3
, "wmulul", WMULUL
)
11646 IWMMXT_BUILTIN (smulv4hi3_highpart
, "wmulsm", WMULSM
)
11647 IWMMXT_BUILTIN (umulv4hi3_highpart
, "wmulum", WMULUM
)
11648 IWMMXT_BUILTIN (eqv8qi3
, "wcmpeqb", WCMPEQB
)
11649 IWMMXT_BUILTIN (eqv4hi3
, "wcmpeqh", WCMPEQH
)
11650 IWMMXT_BUILTIN (eqv2si3
, "wcmpeqw", WCMPEQW
)
11651 IWMMXT_BUILTIN (gtuv8qi3
, "wcmpgtub", WCMPGTUB
)
11652 IWMMXT_BUILTIN (gtuv4hi3
, "wcmpgtuh", WCMPGTUH
)
11653 IWMMXT_BUILTIN (gtuv2si3
, "wcmpgtuw", WCMPGTUW
)
11654 IWMMXT_BUILTIN (gtv8qi3
, "wcmpgtsb", WCMPGTSB
)
11655 IWMMXT_BUILTIN (gtv4hi3
, "wcmpgtsh", WCMPGTSH
)
11656 IWMMXT_BUILTIN (gtv2si3
, "wcmpgtsw", WCMPGTSW
)
11657 IWMMXT_BUILTIN (umaxv8qi3
, "wmaxub", WMAXUB
)
11658 IWMMXT_BUILTIN (smaxv8qi3
, "wmaxsb", WMAXSB
)
11659 IWMMXT_BUILTIN (umaxv4hi3
, "wmaxuh", WMAXUH
)
11660 IWMMXT_BUILTIN (smaxv4hi3
, "wmaxsh", WMAXSH
)
11661 IWMMXT_BUILTIN (umaxv2si3
, "wmaxuw", WMAXUW
)
11662 IWMMXT_BUILTIN (smaxv2si3
, "wmaxsw", WMAXSW
)
11663 IWMMXT_BUILTIN (uminv8qi3
, "wminub", WMINUB
)
11664 IWMMXT_BUILTIN (sminv8qi3
, "wminsb", WMINSB
)
11665 IWMMXT_BUILTIN (uminv4hi3
, "wminuh", WMINUH
)
11666 IWMMXT_BUILTIN (sminv4hi3
, "wminsh", WMINSH
)
11667 IWMMXT_BUILTIN (uminv2si3
, "wminuw", WMINUW
)
11668 IWMMXT_BUILTIN (sminv2si3
, "wminsw", WMINSW
)
11669 IWMMXT_BUILTIN (iwmmxt_anddi3
, "wand", WAND
)
11670 IWMMXT_BUILTIN (iwmmxt_nanddi3
, "wandn", WANDN
)
11671 IWMMXT_BUILTIN (iwmmxt_iordi3
, "wor", WOR
)
11672 IWMMXT_BUILTIN (iwmmxt_xordi3
, "wxor", WXOR
)
11673 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3
, "wavg2b", WAVG2B
)
11674 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3
, "wavg2h", WAVG2H
)
11675 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3
, "wavg2br", WAVG2BR
)
11676 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3
, "wavg2hr", WAVG2HR
)
11677 IWMMXT_BUILTIN (iwmmxt_wunpckilb
, "wunpckilb", WUNPCKILB
)
11678 IWMMXT_BUILTIN (iwmmxt_wunpckilh
, "wunpckilh", WUNPCKILH
)
11679 IWMMXT_BUILTIN (iwmmxt_wunpckilw
, "wunpckilw", WUNPCKILW
)
11680 IWMMXT_BUILTIN (iwmmxt_wunpckihb
, "wunpckihb", WUNPCKIHB
)
11681 IWMMXT_BUILTIN (iwmmxt_wunpckihh
, "wunpckihh", WUNPCKIHH
)
11682 IWMMXT_BUILTIN (iwmmxt_wunpckihw
, "wunpckihw", WUNPCKIHW
)
11683 IWMMXT_BUILTIN (iwmmxt_wmadds
, "wmadds", WMADDS
)
11684 IWMMXT_BUILTIN (iwmmxt_wmaddu
, "wmaddu", WMADDU
)
11686 #define IWMMXT_BUILTIN2(code, builtin) \
11687 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
11689 IWMMXT_BUILTIN2 (iwmmxt_wpackhss
, WPACKHSS
)
11690 IWMMXT_BUILTIN2 (iwmmxt_wpackwss
, WPACKWSS
)
11691 IWMMXT_BUILTIN2 (iwmmxt_wpackdss
, WPACKDSS
)
11692 IWMMXT_BUILTIN2 (iwmmxt_wpackhus
, WPACKHUS
)
11693 IWMMXT_BUILTIN2 (iwmmxt_wpackwus
, WPACKWUS
)
11694 IWMMXT_BUILTIN2 (iwmmxt_wpackdus
, WPACKDUS
)
11695 IWMMXT_BUILTIN2 (ashlv4hi3_di
, WSLLH
)
11696 IWMMXT_BUILTIN2 (ashlv4hi3
, WSLLHI
)
11697 IWMMXT_BUILTIN2 (ashlv2si3_di
, WSLLW
)
11698 IWMMXT_BUILTIN2 (ashlv2si3
, WSLLWI
)
11699 IWMMXT_BUILTIN2 (ashldi3_di
, WSLLD
)
11700 IWMMXT_BUILTIN2 (ashldi3_iwmmxt
, WSLLDI
)
11701 IWMMXT_BUILTIN2 (lshrv4hi3_di
, WSRLH
)
11702 IWMMXT_BUILTIN2 (lshrv4hi3
, WSRLHI
)
11703 IWMMXT_BUILTIN2 (lshrv2si3_di
, WSRLW
)
11704 IWMMXT_BUILTIN2 (lshrv2si3
, WSRLWI
)
11705 IWMMXT_BUILTIN2 (lshrdi3_di
, WSRLD
)
11706 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt
, WSRLDI
)
11707 IWMMXT_BUILTIN2 (ashrv4hi3_di
, WSRAH
)
11708 IWMMXT_BUILTIN2 (ashrv4hi3
, WSRAHI
)
11709 IWMMXT_BUILTIN2 (ashrv2si3_di
, WSRAW
)
11710 IWMMXT_BUILTIN2 (ashrv2si3
, WSRAWI
)
11711 IWMMXT_BUILTIN2 (ashrdi3_di
, WSRAD
)
11712 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt
, WSRADI
)
11713 IWMMXT_BUILTIN2 (rorv4hi3_di
, WRORH
)
11714 IWMMXT_BUILTIN2 (rorv4hi3
, WRORHI
)
11715 IWMMXT_BUILTIN2 (rorv2si3_di
, WRORW
)
11716 IWMMXT_BUILTIN2 (rorv2si3
, WRORWI
)
11717 IWMMXT_BUILTIN2 (rordi3_di
, WRORD
)
11718 IWMMXT_BUILTIN2 (rordi3
, WRORDI
)
11719 IWMMXT_BUILTIN2 (iwmmxt_wmacuz
, WMACUZ
)
11720 IWMMXT_BUILTIN2 (iwmmxt_wmacsz
, WMACSZ
)
11723 static const struct builtin_description bdesc_1arg
[] =
11725 IWMMXT_BUILTIN (iwmmxt_tmovmskb
, "tmovmskb", TMOVMSKB
)
11726 IWMMXT_BUILTIN (iwmmxt_tmovmskh
, "tmovmskh", TMOVMSKH
)
11727 IWMMXT_BUILTIN (iwmmxt_tmovmskw
, "tmovmskw", TMOVMSKW
)
11728 IWMMXT_BUILTIN (iwmmxt_waccb
, "waccb", WACCB
)
11729 IWMMXT_BUILTIN (iwmmxt_wacch
, "wacch", WACCH
)
11730 IWMMXT_BUILTIN (iwmmxt_waccw
, "waccw", WACCW
)
11731 IWMMXT_BUILTIN (iwmmxt_wunpckehub
, "wunpckehub", WUNPCKEHUB
)
11732 IWMMXT_BUILTIN (iwmmxt_wunpckehuh
, "wunpckehuh", WUNPCKEHUH
)
11733 IWMMXT_BUILTIN (iwmmxt_wunpckehuw
, "wunpckehuw", WUNPCKEHUW
)
11734 IWMMXT_BUILTIN (iwmmxt_wunpckehsb
, "wunpckehsb", WUNPCKEHSB
)
11735 IWMMXT_BUILTIN (iwmmxt_wunpckehsh
, "wunpckehsh", WUNPCKEHSH
)
11736 IWMMXT_BUILTIN (iwmmxt_wunpckehsw
, "wunpckehsw", WUNPCKEHSW
)
11737 IWMMXT_BUILTIN (iwmmxt_wunpckelub
, "wunpckelub", WUNPCKELUB
)
11738 IWMMXT_BUILTIN (iwmmxt_wunpckeluh
, "wunpckeluh", WUNPCKELUH
)
11739 IWMMXT_BUILTIN (iwmmxt_wunpckeluw
, "wunpckeluw", WUNPCKELUW
)
11740 IWMMXT_BUILTIN (iwmmxt_wunpckelsb
, "wunpckelsb", WUNPCKELSB
)
11741 IWMMXT_BUILTIN (iwmmxt_wunpckelsh
, "wunpckelsh", WUNPCKELSH
)
11742 IWMMXT_BUILTIN (iwmmxt_wunpckelsw
, "wunpckelsw", WUNPCKELSW
)
11745 /* Set up all the iWMMXt builtins. This is
11746 not called if TARGET_IWMMXT is zero. */
11749 arm_init_iwmmxt_builtins (void)
11751 const struct builtin_description
* d
;
11753 tree endlink
= void_list_node
;
11755 tree V2SI_type_node
= build_vector_type_for_mode (intSI_type_node
, V2SImode
);
11756 tree V4HI_type_node
= build_vector_type_for_mode (intHI_type_node
, V4HImode
);
11757 tree V8QI_type_node
= build_vector_type_for_mode (intQI_type_node
, V8QImode
);
11760 = build_function_type (integer_type_node
,
11761 tree_cons (NULL_TREE
, integer_type_node
, endlink
));
11762 tree v8qi_ftype_v8qi_v8qi_int
11763 = build_function_type (V8QI_type_node
,
11764 tree_cons (NULL_TREE
, V8QI_type_node
,
11765 tree_cons (NULL_TREE
, V8QI_type_node
,
11766 tree_cons (NULL_TREE
,
11769 tree v4hi_ftype_v4hi_int
11770 = build_function_type (V4HI_type_node
,
11771 tree_cons (NULL_TREE
, V4HI_type_node
,
11772 tree_cons (NULL_TREE
, integer_type_node
,
11774 tree v2si_ftype_v2si_int
11775 = build_function_type (V2SI_type_node
,
11776 tree_cons (NULL_TREE
, V2SI_type_node
,
11777 tree_cons (NULL_TREE
, integer_type_node
,
11779 tree v2si_ftype_di_di
11780 = build_function_type (V2SI_type_node
,
11781 tree_cons (NULL_TREE
, long_long_integer_type_node
,
11782 tree_cons (NULL_TREE
, long_long_integer_type_node
,
11784 tree di_ftype_di_int
11785 = build_function_type (long_long_integer_type_node
,
11786 tree_cons (NULL_TREE
, long_long_integer_type_node
,
11787 tree_cons (NULL_TREE
, integer_type_node
,
11789 tree di_ftype_di_int_int
11790 = build_function_type (long_long_integer_type_node
,
11791 tree_cons (NULL_TREE
, long_long_integer_type_node
,
11792 tree_cons (NULL_TREE
, integer_type_node
,
11793 tree_cons (NULL_TREE
,
11796 tree int_ftype_v8qi
11797 = build_function_type (integer_type_node
,
11798 tree_cons (NULL_TREE
, V8QI_type_node
,
11800 tree int_ftype_v4hi
11801 = build_function_type (integer_type_node
,
11802 tree_cons (NULL_TREE
, V4HI_type_node
,
11804 tree int_ftype_v2si
11805 = build_function_type (integer_type_node
,
11806 tree_cons (NULL_TREE
, V2SI_type_node
,
11808 tree int_ftype_v8qi_int
11809 = build_function_type (integer_type_node
,
11810 tree_cons (NULL_TREE
, V8QI_type_node
,
11811 tree_cons (NULL_TREE
, integer_type_node
,
11813 tree int_ftype_v4hi_int
11814 = build_function_type (integer_type_node
,
11815 tree_cons (NULL_TREE
, V4HI_type_node
,
11816 tree_cons (NULL_TREE
, integer_type_node
,
11818 tree int_ftype_v2si_int
11819 = build_function_type (integer_type_node
,
11820 tree_cons (NULL_TREE
, V2SI_type_node
,
11821 tree_cons (NULL_TREE
, integer_type_node
,
11823 tree v8qi_ftype_v8qi_int_int
11824 = build_function_type (V8QI_type_node
,
11825 tree_cons (NULL_TREE
, V8QI_type_node
,
11826 tree_cons (NULL_TREE
, integer_type_node
,
11827 tree_cons (NULL_TREE
,
11830 tree v4hi_ftype_v4hi_int_int
11831 = build_function_type (V4HI_type_node
,
11832 tree_cons (NULL_TREE
, V4HI_type_node
,
11833 tree_cons (NULL_TREE
, integer_type_node
,
11834 tree_cons (NULL_TREE
,
11837 tree v2si_ftype_v2si_int_int
11838 = build_function_type (V2SI_type_node
,
11839 tree_cons (NULL_TREE
, V2SI_type_node
,
11840 tree_cons (NULL_TREE
, integer_type_node
,
11841 tree_cons (NULL_TREE
,
11844 /* Miscellaneous. */
11845 tree v8qi_ftype_v4hi_v4hi
11846 = build_function_type (V8QI_type_node
,
11847 tree_cons (NULL_TREE
, V4HI_type_node
,
11848 tree_cons (NULL_TREE
, V4HI_type_node
,
11850 tree v4hi_ftype_v2si_v2si
11851 = build_function_type (V4HI_type_node
,
11852 tree_cons (NULL_TREE
, V2SI_type_node
,
11853 tree_cons (NULL_TREE
, V2SI_type_node
,
11855 tree v2si_ftype_v4hi_v4hi
11856 = build_function_type (V2SI_type_node
,
11857 tree_cons (NULL_TREE
, V4HI_type_node
,
11858 tree_cons (NULL_TREE
, V4HI_type_node
,
11860 tree v2si_ftype_v8qi_v8qi
11861 = build_function_type (V2SI_type_node
,
11862 tree_cons (NULL_TREE
, V8QI_type_node
,
11863 tree_cons (NULL_TREE
, V8QI_type_node
,
11865 tree v4hi_ftype_v4hi_di
11866 = build_function_type (V4HI_type_node
,
11867 tree_cons (NULL_TREE
, V4HI_type_node
,
11868 tree_cons (NULL_TREE
,
11869 long_long_integer_type_node
,
11871 tree v2si_ftype_v2si_di
11872 = build_function_type (V2SI_type_node
,
11873 tree_cons (NULL_TREE
, V2SI_type_node
,
11874 tree_cons (NULL_TREE
,
11875 long_long_integer_type_node
,
11877 tree void_ftype_int_int
11878 = build_function_type (void_type_node
,
11879 tree_cons (NULL_TREE
, integer_type_node
,
11880 tree_cons (NULL_TREE
, integer_type_node
,
11883 = build_function_type (long_long_unsigned_type_node
, endlink
);
11885 = build_function_type (long_long_integer_type_node
,
11886 tree_cons (NULL_TREE
, V8QI_type_node
,
11889 = build_function_type (long_long_integer_type_node
,
11890 tree_cons (NULL_TREE
, V4HI_type_node
,
11893 = build_function_type (long_long_integer_type_node
,
11894 tree_cons (NULL_TREE
, V2SI_type_node
,
11896 tree v2si_ftype_v4hi
11897 = build_function_type (V2SI_type_node
,
11898 tree_cons (NULL_TREE
, V4HI_type_node
,
11900 tree v4hi_ftype_v8qi
11901 = build_function_type (V4HI_type_node
,
11902 tree_cons (NULL_TREE
, V8QI_type_node
,
11905 tree di_ftype_di_v4hi_v4hi
11906 = build_function_type (long_long_unsigned_type_node
,
11907 tree_cons (NULL_TREE
,
11908 long_long_unsigned_type_node
,
11909 tree_cons (NULL_TREE
, V4HI_type_node
,
11910 tree_cons (NULL_TREE
,
11914 tree di_ftype_v4hi_v4hi
11915 = build_function_type (long_long_unsigned_type_node
,
11916 tree_cons (NULL_TREE
, V4HI_type_node
,
11917 tree_cons (NULL_TREE
, V4HI_type_node
,
11920 /* Normal vector binops. */
11921 tree v8qi_ftype_v8qi_v8qi
11922 = build_function_type (V8QI_type_node
,
11923 tree_cons (NULL_TREE
, V8QI_type_node
,
11924 tree_cons (NULL_TREE
, V8QI_type_node
,
11926 tree v4hi_ftype_v4hi_v4hi
11927 = build_function_type (V4HI_type_node
,
11928 tree_cons (NULL_TREE
, V4HI_type_node
,
11929 tree_cons (NULL_TREE
, V4HI_type_node
,
11931 tree v2si_ftype_v2si_v2si
11932 = build_function_type (V2SI_type_node
,
11933 tree_cons (NULL_TREE
, V2SI_type_node
,
11934 tree_cons (NULL_TREE
, V2SI_type_node
,
11936 tree di_ftype_di_di
11937 = build_function_type (long_long_unsigned_type_node
,
11938 tree_cons (NULL_TREE
, long_long_unsigned_type_node
,
11939 tree_cons (NULL_TREE
,
11940 long_long_unsigned_type_node
,
11943 /* Add all builtins that are more or less simple operations on two
11945 for (i
= 0, d
= bdesc_2arg
; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
11947 /* Use one of the operands; the target can have a different mode for
11948 mask-generating compares. */
11949 enum machine_mode mode
;
11955 mode
= insn_data
[d
->icode
].operand
[1].mode
;
11960 type
= v8qi_ftype_v8qi_v8qi
;
11963 type
= v4hi_ftype_v4hi_v4hi
;
11966 type
= v2si_ftype_v2si_v2si
;
11969 type
= di_ftype_di_di
;
11973 gcc_unreachable ();
11976 def_mbuiltin (d
->mask
, d
->name
, type
, d
->code
);
11979 /* Add the remaining MMX insns with somewhat more complicated types. */
11980 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wzero", di_ftype_void
, ARM_BUILTIN_WZERO
);
11981 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_setwcx", void_ftype_int_int
, ARM_BUILTIN_SETWCX
);
11982 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_getwcx", int_ftype_int
, ARM_BUILTIN_GETWCX
);
11984 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di
, ARM_BUILTIN_WSLLH
);
11985 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsllw", v2si_ftype_v2si_di
, ARM_BUILTIN_WSLLW
);
11986 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wslld", di_ftype_di_di
, ARM_BUILTIN_WSLLD
);
11987 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int
, ARM_BUILTIN_WSLLHI
);
11988 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsllwi", v2si_ftype_v2si_int
, ARM_BUILTIN_WSLLWI
);
11989 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wslldi", di_ftype_di_int
, ARM_BUILTIN_WSLLDI
);
11991 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di
, ARM_BUILTIN_WSRLH
);
11992 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrlw", v2si_ftype_v2si_di
, ARM_BUILTIN_WSRLW
);
11993 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrld", di_ftype_di_di
, ARM_BUILTIN_WSRLD
);
11994 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int
, ARM_BUILTIN_WSRLHI
);
11995 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int
, ARM_BUILTIN_WSRLWI
);
11996 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrldi", di_ftype_di_int
, ARM_BUILTIN_WSRLDI
);
11998 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di
, ARM_BUILTIN_WSRAH
);
11999 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsraw", v2si_ftype_v2si_di
, ARM_BUILTIN_WSRAW
);
12000 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrad", di_ftype_di_di
, ARM_BUILTIN_WSRAD
);
12001 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int
, ARM_BUILTIN_WSRAHI
);
12002 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrawi", v2si_ftype_v2si_int
, ARM_BUILTIN_WSRAWI
);
12003 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsradi", di_ftype_di_int
, ARM_BUILTIN_WSRADI
);
12005 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di
, ARM_BUILTIN_WRORH
);
12006 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wrorw", v2si_ftype_v2si_di
, ARM_BUILTIN_WRORW
);
12007 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wrord", di_ftype_di_di
, ARM_BUILTIN_WRORD
);
12008 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int
, ARM_BUILTIN_WRORHI
);
12009 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wrorwi", v2si_ftype_v2si_int
, ARM_BUILTIN_WRORWI
);
12010 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wrordi", di_ftype_di_int
, ARM_BUILTIN_WRORDI
);
12012 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int
, ARM_BUILTIN_WSHUFH
);
12014 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi
, ARM_BUILTIN_WSADB
);
12015 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi
, ARM_BUILTIN_WSADH
);
12016 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi
, ARM_BUILTIN_WSADBZ
);
12017 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi
, ARM_BUILTIN_WSADHZ
);
12019 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_textrmsb", int_ftype_v8qi_int
, ARM_BUILTIN_TEXTRMSB
);
12020 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_textrmsh", int_ftype_v4hi_int
, ARM_BUILTIN_TEXTRMSH
);
12021 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_textrmsw", int_ftype_v2si_int
, ARM_BUILTIN_TEXTRMSW
);
12022 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_textrmub", int_ftype_v8qi_int
, ARM_BUILTIN_TEXTRMUB
);
12023 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_textrmuh", int_ftype_v4hi_int
, ARM_BUILTIN_TEXTRMUH
);
12024 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_textrmuw", int_ftype_v2si_int
, ARM_BUILTIN_TEXTRMUW
);
12025 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int
, ARM_BUILTIN_TINSRB
);
12026 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int
, ARM_BUILTIN_TINSRH
);
12027 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int
, ARM_BUILTIN_TINSRW
);
12029 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_waccb", di_ftype_v8qi
, ARM_BUILTIN_WACCB
);
12030 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wacch", di_ftype_v4hi
, ARM_BUILTIN_WACCH
);
12031 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_waccw", di_ftype_v2si
, ARM_BUILTIN_WACCW
);
12033 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmovmskb", int_ftype_v8qi
, ARM_BUILTIN_TMOVMSKB
);
12034 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmovmskh", int_ftype_v4hi
, ARM_BUILTIN_TMOVMSKH
);
12035 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmovmskw", int_ftype_v2si
, ARM_BUILTIN_TMOVMSKW
);
12037 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi
, ARM_BUILTIN_WPACKHSS
);
12038 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi
, ARM_BUILTIN_WPACKHUS
);
12039 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si
, ARM_BUILTIN_WPACKWUS
);
12040 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si
, ARM_BUILTIN_WPACKWSS
);
12041 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wpackdus", v2si_ftype_di_di
, ARM_BUILTIN_WPACKDUS
);
12042 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wpackdss", v2si_ftype_di_di
, ARM_BUILTIN_WPACKDSS
);
12044 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi
, ARM_BUILTIN_WUNPCKEHUB
);
12045 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi
, ARM_BUILTIN_WUNPCKEHUH
);
12046 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckehuw", di_ftype_v2si
, ARM_BUILTIN_WUNPCKEHUW
);
12047 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi
, ARM_BUILTIN_WUNPCKEHSB
);
12048 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi
, ARM_BUILTIN_WUNPCKEHSH
);
12049 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckehsw", di_ftype_v2si
, ARM_BUILTIN_WUNPCKEHSW
);
12050 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi
, ARM_BUILTIN_WUNPCKELUB
);
12051 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi
, ARM_BUILTIN_WUNPCKELUH
);
12052 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckeluw", di_ftype_v2si
, ARM_BUILTIN_WUNPCKELUW
);
12053 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi
, ARM_BUILTIN_WUNPCKELSB
);
12054 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi
, ARM_BUILTIN_WUNPCKELSH
);
12055 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckelsw", di_ftype_v2si
, ARM_BUILTIN_WUNPCKELSW
);
12057 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi
, ARM_BUILTIN_WMACS
);
12058 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi
, ARM_BUILTIN_WMACSZ
);
12059 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi
, ARM_BUILTIN_WMACU
);
12060 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi
, ARM_BUILTIN_WMACUZ
);
12062 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int
, ARM_BUILTIN_WALIGN
);
12063 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmia", di_ftype_di_int_int
, ARM_BUILTIN_TMIA
);
12064 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmiaph", di_ftype_di_int_int
, ARM_BUILTIN_TMIAPH
);
12065 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmiabb", di_ftype_di_int_int
, ARM_BUILTIN_TMIABB
);
12066 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmiabt", di_ftype_di_int_int
, ARM_BUILTIN_TMIABT
);
12067 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmiatb", di_ftype_di_int_int
, ARM_BUILTIN_TMIATB
);
12068 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmiatt", di_ftype_di_int_int
, ARM_BUILTIN_TMIATT
);
12072 arm_init_builtins (void)
12074 if (TARGET_REALLY_IWMMXT
)
12075 arm_init_iwmmxt_builtins ();
12078 /* Errors in the source file can cause expand_expr to return const0_rtx
12079 where we expect a vector. To avoid crashing, use one of the vector
12080 clear instructions. */
12083 safe_vector_operand (rtx x
, enum machine_mode mode
)
12085 if (x
!= const0_rtx
)
12087 x
= gen_reg_rtx (mode
);
12089 emit_insn (gen_iwmmxt_clrdi (mode
== DImode
? x
12090 : gen_rtx_SUBREG (DImode
, x
, 0)));
12094 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12097 arm_expand_binop_builtin (enum insn_code icode
,
12098 tree arglist
, rtx target
)
12101 tree arg0
= TREE_VALUE (arglist
);
12102 tree arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
12103 rtx op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
12104 rtx op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
12105 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
12106 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
12107 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
12109 if (VECTOR_MODE_P (mode0
))
12110 op0
= safe_vector_operand (op0
, mode0
);
12111 if (VECTOR_MODE_P (mode1
))
12112 op1
= safe_vector_operand (op1
, mode1
);
12115 || GET_MODE (target
) != tmode
12116 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12117 target
= gen_reg_rtx (tmode
);
12119 gcc_assert (GET_MODE (op0
) == mode0
&& GET_MODE (op1
) == mode1
);
12121 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
12122 op0
= copy_to_mode_reg (mode0
, op0
);
12123 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
12124 op1
= copy_to_mode_reg (mode1
, op1
);
12126 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
12133 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12136 arm_expand_unop_builtin (enum insn_code icode
,
12137 tree arglist
, rtx target
, int do_load
)
12140 tree arg0
= TREE_VALUE (arglist
);
12141 rtx op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
12142 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
12143 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
12146 || GET_MODE (target
) != tmode
12147 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12148 target
= gen_reg_rtx (tmode
);
12150 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
12153 if (VECTOR_MODE_P (mode0
))
12154 op0
= safe_vector_operand (op0
, mode0
);
12156 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
12157 op0
= copy_to_mode_reg (mode0
, op0
);
12160 pat
= GEN_FCN (icode
) (target
, op0
);
12167 /* Expand an expression EXP that calls a built-in function,
12168 with result going to TARGET if that's convenient
12169 (and in mode MODE if that's convenient).
12170 SUBTARGET may be used as the target for computing one of EXP's operands.
12171 IGNORE is nonzero if the value is to be ignored. */
12174 arm_expand_builtin (tree exp
,
12176 rtx subtarget ATTRIBUTE_UNUSED
,
12177 enum machine_mode mode ATTRIBUTE_UNUSED
,
12178 int ignore ATTRIBUTE_UNUSED
)
12180 const struct builtin_description
* d
;
12181 enum insn_code icode
;
12182 tree fndecl
= TREE_OPERAND (TREE_OPERAND (exp
, 0), 0);
12183 tree arglist
= TREE_OPERAND (exp
, 1);
12191 int fcode
= DECL_FUNCTION_CODE (fndecl
);
12193 enum machine_mode tmode
;
12194 enum machine_mode mode0
;
12195 enum machine_mode mode1
;
12196 enum machine_mode mode2
;
12200 case ARM_BUILTIN_TEXTRMSB
:
12201 case ARM_BUILTIN_TEXTRMUB
:
12202 case ARM_BUILTIN_TEXTRMSH
:
12203 case ARM_BUILTIN_TEXTRMUH
:
12204 case ARM_BUILTIN_TEXTRMSW
:
12205 case ARM_BUILTIN_TEXTRMUW
:
12206 icode
= (fcode
== ARM_BUILTIN_TEXTRMSB
? CODE_FOR_iwmmxt_textrmsb
12207 : fcode
== ARM_BUILTIN_TEXTRMUB
? CODE_FOR_iwmmxt_textrmub
12208 : fcode
== ARM_BUILTIN_TEXTRMSH
? CODE_FOR_iwmmxt_textrmsh
12209 : fcode
== ARM_BUILTIN_TEXTRMUH
? CODE_FOR_iwmmxt_textrmuh
12210 : CODE_FOR_iwmmxt_textrmw
);
12212 arg0
= TREE_VALUE (arglist
);
12213 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
12214 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
12215 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
12216 tmode
= insn_data
[icode
].operand
[0].mode
;
12217 mode0
= insn_data
[icode
].operand
[1].mode
;
12218 mode1
= insn_data
[icode
].operand
[2].mode
;
12220 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
12221 op0
= copy_to_mode_reg (mode0
, op0
);
12222 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
12224 /* @@@ better error message */
12225 error ("selector must be an immediate");
12226 return gen_reg_rtx (tmode
);
12229 || GET_MODE (target
) != tmode
12230 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12231 target
= gen_reg_rtx (tmode
);
12232 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
12238 case ARM_BUILTIN_TINSRB
:
12239 case ARM_BUILTIN_TINSRH
:
12240 case ARM_BUILTIN_TINSRW
:
12241 icode
= (fcode
== ARM_BUILTIN_TINSRB
? CODE_FOR_iwmmxt_tinsrb
12242 : fcode
== ARM_BUILTIN_TINSRH
? CODE_FOR_iwmmxt_tinsrh
12243 : CODE_FOR_iwmmxt_tinsrw
);
12244 arg0
= TREE_VALUE (arglist
);
12245 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
12246 arg2
= TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist
)));
12247 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
12248 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
12249 op2
= expand_expr (arg2
, NULL_RTX
, VOIDmode
, 0);
12250 tmode
= insn_data
[icode
].operand
[0].mode
;
12251 mode0
= insn_data
[icode
].operand
[1].mode
;
12252 mode1
= insn_data
[icode
].operand
[2].mode
;
12253 mode2
= insn_data
[icode
].operand
[3].mode
;
12255 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
12256 op0
= copy_to_mode_reg (mode0
, op0
);
12257 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
12258 op1
= copy_to_mode_reg (mode1
, op1
);
12259 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
12261 /* @@@ better error message */
12262 error ("selector must be an immediate");
12266 || GET_MODE (target
) != tmode
12267 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12268 target
= gen_reg_rtx (tmode
);
12269 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
12275 case ARM_BUILTIN_SETWCX
:
12276 arg0
= TREE_VALUE (arglist
);
12277 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
12278 op0
= force_reg (SImode
, expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0));
12279 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
12280 emit_insn (gen_iwmmxt_tmcr (op1
, op0
));
12283 case ARM_BUILTIN_GETWCX
:
12284 arg0
= TREE_VALUE (arglist
);
12285 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
12286 target
= gen_reg_rtx (SImode
);
12287 emit_insn (gen_iwmmxt_tmrc (target
, op0
));
12290 case ARM_BUILTIN_WSHUFH
:
12291 icode
= CODE_FOR_iwmmxt_wshufh
;
12292 arg0
= TREE_VALUE (arglist
);
12293 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
12294 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
12295 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
12296 tmode
= insn_data
[icode
].operand
[0].mode
;
12297 mode1
= insn_data
[icode
].operand
[1].mode
;
12298 mode2
= insn_data
[icode
].operand
[2].mode
;
12300 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode1
))
12301 op0
= copy_to_mode_reg (mode1
, op0
);
12302 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode2
))
12304 /* @@@ better error message */
12305 error ("mask must be an immediate");
12309 || GET_MODE (target
) != tmode
12310 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12311 target
= gen_reg_rtx (tmode
);
12312 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
12318 case ARM_BUILTIN_WSADB
:
12319 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb
, arglist
, target
);
12320 case ARM_BUILTIN_WSADH
:
12321 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh
, arglist
, target
);
12322 case ARM_BUILTIN_WSADBZ
:
12323 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz
, arglist
, target
);
12324 case ARM_BUILTIN_WSADHZ
:
12325 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz
, arglist
, target
);
12327 /* Several three-argument builtins. */
12328 case ARM_BUILTIN_WMACS
:
12329 case ARM_BUILTIN_WMACU
:
12330 case ARM_BUILTIN_WALIGN
:
12331 case ARM_BUILTIN_TMIA
:
12332 case ARM_BUILTIN_TMIAPH
:
12333 case ARM_BUILTIN_TMIATT
:
12334 case ARM_BUILTIN_TMIATB
:
12335 case ARM_BUILTIN_TMIABT
:
12336 case ARM_BUILTIN_TMIABB
:
12337 icode
= (fcode
== ARM_BUILTIN_WMACS
? CODE_FOR_iwmmxt_wmacs
12338 : fcode
== ARM_BUILTIN_WMACU
? CODE_FOR_iwmmxt_wmacu
12339 : fcode
== ARM_BUILTIN_TMIA
? CODE_FOR_iwmmxt_tmia
12340 : fcode
== ARM_BUILTIN_TMIAPH
? CODE_FOR_iwmmxt_tmiaph
12341 : fcode
== ARM_BUILTIN_TMIABB
? CODE_FOR_iwmmxt_tmiabb
12342 : fcode
== ARM_BUILTIN_TMIABT
? CODE_FOR_iwmmxt_tmiabt
12343 : fcode
== ARM_BUILTIN_TMIATB
? CODE_FOR_iwmmxt_tmiatb
12344 : fcode
== ARM_BUILTIN_TMIATT
? CODE_FOR_iwmmxt_tmiatt
12345 : CODE_FOR_iwmmxt_walign
);
12346 arg0
= TREE_VALUE (arglist
);
12347 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
12348 arg2
= TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist
)));
12349 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
12350 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
12351 op2
= expand_expr (arg2
, NULL_RTX
, VOIDmode
, 0);
12352 tmode
= insn_data
[icode
].operand
[0].mode
;
12353 mode0
= insn_data
[icode
].operand
[1].mode
;
12354 mode1
= insn_data
[icode
].operand
[2].mode
;
12355 mode2
= insn_data
[icode
].operand
[3].mode
;
12357 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
12358 op0
= copy_to_mode_reg (mode0
, op0
);
12359 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
12360 op1
= copy_to_mode_reg (mode1
, op1
);
12361 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
12362 op2
= copy_to_mode_reg (mode2
, op2
);
12364 || GET_MODE (target
) != tmode
12365 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12366 target
= gen_reg_rtx (tmode
);
12367 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
12373 case ARM_BUILTIN_WZERO
:
12374 target
= gen_reg_rtx (DImode
);
12375 emit_insn (gen_iwmmxt_clrdi (target
));
12382 for (i
= 0, d
= bdesc_2arg
; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
12383 if (d
->code
== (const enum arm_builtins
) fcode
)
12384 return arm_expand_binop_builtin (d
->icode
, arglist
, target
);
12386 for (i
= 0, d
= bdesc_1arg
; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
12387 if (d
->code
== (const enum arm_builtins
) fcode
)
12388 return arm_expand_unop_builtin (d
->icode
, arglist
, target
, 0);
12390 /* @@@ Should really do something sensible here. */
12394 /* Return the number (counting from 0) of
12395 the least significant set bit in MASK. */
12398 number_of_first_bit_set (unsigned mask
)
12403 (mask
& (1 << bit
)) == 0;
12410 /* Emit code to push or pop registers to or from the stack. F is the
12411 assembly file. MASK is the registers to push or pop. PUSH is
12412 nonzero if we should push, and zero if we should pop. For debugging
12413 output, if pushing, adjust CFA_OFFSET by the amount of space added
12414 to the stack. REAL_REGS should have the same number of bits set as
12415 MASK, and will be used instead (in the same order) to describe which
12416 registers were saved - this is used to mark the save slots when we
12417 push high registers after moving them to low registers. */
12419 thumb_pushpop (FILE *f
, unsigned long mask
, int push
, int *cfa_offset
,
12420 unsigned long real_regs
)
12423 int lo_mask
= mask
& 0xFF;
12424 int pushed_words
= 0;
12428 if (lo_mask
== 0 && !push
&& (mask
& (1 << PC_REGNUM
)))
12430 /* Special case. Do not generate a POP PC statement here, do it in
12432 thumb_exit (f
, -1);
12436 if (ARM_EABI_UNWIND_TABLES
&& push
)
12438 fprintf (f
, "\t.save\t{");
12439 for (regno
= 0; regno
< 15; regno
++)
12441 if (real_regs
& (1 << regno
))
12443 if (real_regs
& ((1 << regno
) -1))
12445 asm_fprintf (f
, "%r", regno
);
12448 fprintf (f
, "}\n");
12451 fprintf (f
, "\t%s\t{", push
? "push" : "pop");
12453 /* Look at the low registers first. */
12454 for (regno
= 0; regno
<= LAST_LO_REGNUM
; regno
++, lo_mask
>>= 1)
12458 asm_fprintf (f
, "%r", regno
);
12460 if ((lo_mask
& ~1) != 0)
12467 if (push
&& (mask
& (1 << LR_REGNUM
)))
12469 /* Catch pushing the LR. */
12473 asm_fprintf (f
, "%r", LR_REGNUM
);
12477 else if (!push
&& (mask
& (1 << PC_REGNUM
)))
12479 /* Catch popping the PC. */
12480 if (TARGET_INTERWORK
|| TARGET_BACKTRACE
12481 || current_function_calls_eh_return
)
12483 /* The PC is never poped directly, instead
12484 it is popped into r3 and then BX is used. */
12485 fprintf (f
, "}\n");
12487 thumb_exit (f
, -1);
12496 asm_fprintf (f
, "%r", PC_REGNUM
);
12500 fprintf (f
, "}\n");
12502 if (push
&& pushed_words
&& dwarf2out_do_frame ())
12504 char *l
= dwarf2out_cfi_label ();
12505 int pushed_mask
= real_regs
;
12507 *cfa_offset
+= pushed_words
* 4;
12508 dwarf2out_def_cfa (l
, SP_REGNUM
, *cfa_offset
);
12511 pushed_mask
= real_regs
;
12512 for (regno
= 0; regno
<= 14; regno
++, pushed_mask
>>= 1)
12514 if (pushed_mask
& 1)
12515 dwarf2out_reg_save (l
, regno
, 4 * pushed_words
++ - *cfa_offset
);
12520 /* Generate code to return from a thumb function.
12521 If 'reg_containing_return_addr' is -1, then the return address is
12522 actually on the stack, at the stack pointer. */
12524 thumb_exit (FILE *f
, int reg_containing_return_addr
)
12526 unsigned regs_available_for_popping
;
12527 unsigned regs_to_pop
;
12529 unsigned available
;
12533 int restore_a4
= FALSE
;
12535 /* Compute the registers we need to pop. */
12539 if (reg_containing_return_addr
== -1)
12541 regs_to_pop
|= 1 << LR_REGNUM
;
12545 if (TARGET_BACKTRACE
)
12547 /* Restore the (ARM) frame pointer and stack pointer. */
12548 regs_to_pop
|= (1 << ARM_HARD_FRAME_POINTER_REGNUM
) | (1 << SP_REGNUM
);
12552 /* If there is nothing to pop then just emit the BX instruction and
12554 if (pops_needed
== 0)
12556 if (current_function_calls_eh_return
)
12557 asm_fprintf (f
, "\tadd\t%r, %r\n", SP_REGNUM
, ARM_EH_STACKADJ_REGNUM
);
12559 asm_fprintf (f
, "\tbx\t%r\n", reg_containing_return_addr
);
12562 /* Otherwise if we are not supporting interworking and we have not created
12563 a backtrace structure and the function was not entered in ARM mode then
12564 just pop the return address straight into the PC. */
12565 else if (!TARGET_INTERWORK
12566 && !TARGET_BACKTRACE
12567 && !is_called_in_ARM_mode (current_function_decl
)
12568 && !current_function_calls_eh_return
)
12570 asm_fprintf (f
, "\tpop\t{%r}\n", PC_REGNUM
);
12574 /* Find out how many of the (return) argument registers we can corrupt. */
12575 regs_available_for_popping
= 0;
12577 /* If returning via __builtin_eh_return, the bottom three registers
12578 all contain information needed for the return. */
12579 if (current_function_calls_eh_return
)
12583 /* If we can deduce the registers used from the function's
12584 return value. This is more reliable that examining
12585 regs_ever_live[] because that will be set if the register is
12586 ever used in the function, not just if the register is used
12587 to hold a return value. */
12589 if (current_function_return_rtx
!= 0)
12590 mode
= GET_MODE (current_function_return_rtx
);
12592 mode
= DECL_MODE (DECL_RESULT (current_function_decl
));
12594 size
= GET_MODE_SIZE (mode
);
12598 /* In a void function we can use any argument register.
12599 In a function that returns a structure on the stack
12600 we can use the second and third argument registers. */
12601 if (mode
== VOIDmode
)
12602 regs_available_for_popping
=
12603 (1 << ARG_REGISTER (1))
12604 | (1 << ARG_REGISTER (2))
12605 | (1 << ARG_REGISTER (3));
12607 regs_available_for_popping
=
12608 (1 << ARG_REGISTER (2))
12609 | (1 << ARG_REGISTER (3));
12611 else if (size
<= 4)
12612 regs_available_for_popping
=
12613 (1 << ARG_REGISTER (2))
12614 | (1 << ARG_REGISTER (3));
12615 else if (size
<= 8)
12616 regs_available_for_popping
=
12617 (1 << ARG_REGISTER (3));
12620 /* Match registers to be popped with registers into which we pop them. */
12621 for (available
= regs_available_for_popping
,
12622 required
= regs_to_pop
;
12623 required
!= 0 && available
!= 0;
12624 available
&= ~(available
& - available
),
12625 required
&= ~(required
& - required
))
12628 /* If we have any popping registers left over, remove them. */
12630 regs_available_for_popping
&= ~available
;
12632 /* Otherwise if we need another popping register we can use
12633 the fourth argument register. */
12634 else if (pops_needed
)
12636 /* If we have not found any free argument registers and
12637 reg a4 contains the return address, we must move it. */
12638 if (regs_available_for_popping
== 0
12639 && reg_containing_return_addr
== LAST_ARG_REGNUM
)
12641 asm_fprintf (f
, "\tmov\t%r, %r\n", LR_REGNUM
, LAST_ARG_REGNUM
);
12642 reg_containing_return_addr
= LR_REGNUM
;
12644 else if (size
> 12)
12646 /* Register a4 is being used to hold part of the return value,
12647 but we have dire need of a free, low register. */
12650 asm_fprintf (f
, "\tmov\t%r, %r\n",IP_REGNUM
, LAST_ARG_REGNUM
);
12653 if (reg_containing_return_addr
!= LAST_ARG_REGNUM
)
12655 /* The fourth argument register is available. */
12656 regs_available_for_popping
|= 1 << LAST_ARG_REGNUM
;
12662 /* Pop as many registers as we can. */
12663 thumb_pushpop (f
, regs_available_for_popping
, FALSE
, NULL
,
12664 regs_available_for_popping
);
12666 /* Process the registers we popped. */
12667 if (reg_containing_return_addr
== -1)
12669 /* The return address was popped into the lowest numbered register. */
12670 regs_to_pop
&= ~(1 << LR_REGNUM
);
12672 reg_containing_return_addr
=
12673 number_of_first_bit_set (regs_available_for_popping
);
12675 /* Remove this register for the mask of available registers, so that
12676 the return address will not be corrupted by further pops. */
12677 regs_available_for_popping
&= ~(1 << reg_containing_return_addr
);
12680 /* If we popped other registers then handle them here. */
12681 if (regs_available_for_popping
)
12685 /* Work out which register currently contains the frame pointer. */
12686 frame_pointer
= number_of_first_bit_set (regs_available_for_popping
);
12688 /* Move it into the correct place. */
12689 asm_fprintf (f
, "\tmov\t%r, %r\n",
12690 ARM_HARD_FRAME_POINTER_REGNUM
, frame_pointer
);
12692 /* (Temporarily) remove it from the mask of popped registers. */
12693 regs_available_for_popping
&= ~(1 << frame_pointer
);
12694 regs_to_pop
&= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM
);
12696 if (regs_available_for_popping
)
12700 /* We popped the stack pointer as well,
12701 find the register that contains it. */
12702 stack_pointer
= number_of_first_bit_set (regs_available_for_popping
);
12704 /* Move it into the stack register. */
12705 asm_fprintf (f
, "\tmov\t%r, %r\n", SP_REGNUM
, stack_pointer
);
12707 /* At this point we have popped all necessary registers, so
12708 do not worry about restoring regs_available_for_popping
12709 to its correct value:
12711 assert (pops_needed == 0)
12712 assert (regs_available_for_popping == (1 << frame_pointer))
12713 assert (regs_to_pop == (1 << STACK_POINTER)) */
12717 /* Since we have just move the popped value into the frame
12718 pointer, the popping register is available for reuse, and
12719 we know that we still have the stack pointer left to pop. */
12720 regs_available_for_popping
|= (1 << frame_pointer
);
12724 /* If we still have registers left on the stack, but we no longer have
12725 any registers into which we can pop them, then we must move the return
12726 address into the link register and make available the register that
12728 if (regs_available_for_popping
== 0 && pops_needed
> 0)
12730 regs_available_for_popping
|= 1 << reg_containing_return_addr
;
12732 asm_fprintf (f
, "\tmov\t%r, %r\n", LR_REGNUM
,
12733 reg_containing_return_addr
);
12735 reg_containing_return_addr
= LR_REGNUM
;
12738 /* If we have registers left on the stack then pop some more.
12739 We know that at most we will want to pop FP and SP. */
12740 if (pops_needed
> 0)
12745 thumb_pushpop (f
, regs_available_for_popping
, FALSE
, NULL
,
12746 regs_available_for_popping
);
12748 /* We have popped either FP or SP.
12749 Move whichever one it is into the correct register. */
12750 popped_into
= number_of_first_bit_set (regs_available_for_popping
);
12751 move_to
= number_of_first_bit_set (regs_to_pop
);
12753 asm_fprintf (f
, "\tmov\t%r, %r\n", move_to
, popped_into
);
12755 regs_to_pop
&= ~(1 << move_to
);
12760 /* If we still have not popped everything then we must have only
12761 had one register available to us and we are now popping the SP. */
12762 if (pops_needed
> 0)
12766 thumb_pushpop (f
, regs_available_for_popping
, FALSE
, NULL
,
12767 regs_available_for_popping
);
12769 popped_into
= number_of_first_bit_set (regs_available_for_popping
);
12771 asm_fprintf (f
, "\tmov\t%r, %r\n", SP_REGNUM
, popped_into
);
12773 assert (regs_to_pop == (1 << STACK_POINTER))
12774 assert (pops_needed == 1)
12778 /* If necessary restore the a4 register. */
12781 if (reg_containing_return_addr
!= LR_REGNUM
)
12783 asm_fprintf (f
, "\tmov\t%r, %r\n", LR_REGNUM
, LAST_ARG_REGNUM
);
12784 reg_containing_return_addr
= LR_REGNUM
;
12787 asm_fprintf (f
, "\tmov\t%r, %r\n", LAST_ARG_REGNUM
, IP_REGNUM
);
12790 if (current_function_calls_eh_return
)
12791 asm_fprintf (f
, "\tadd\t%r, %r\n", SP_REGNUM
, ARM_EH_STACKADJ_REGNUM
);
12793 /* Return to caller. */
12794 asm_fprintf (f
, "\tbx\t%r\n", reg_containing_return_addr
);
12799 thumb_final_prescan_insn (rtx insn
)
12801 if (flag_print_asm_name
)
12802 asm_fprintf (asm_out_file
, "%@ 0x%04x\n",
12803 INSN_ADDRESSES (INSN_UID (insn
)));
12807 thumb_shiftable_const (unsigned HOST_WIDE_INT val
)
12809 unsigned HOST_WIDE_INT mask
= 0xff;
12812 if (val
== 0) /* XXX */
12815 for (i
= 0; i
< 25; i
++)
12816 if ((val
& (mask
<< i
)) == val
)
12822 /* Returns nonzero if the current function contains,
12823 or might contain a far jump. */
12825 thumb_far_jump_used_p (void)
12829 /* This test is only important for leaf functions. */
12830 /* assert (!leaf_function_p ()); */
12832 /* If we have already decided that far jumps may be used,
12833 do not bother checking again, and always return true even if
12834 it turns out that they are not being used. Once we have made
12835 the decision that far jumps are present (and that hence the link
12836 register will be pushed onto the stack) we cannot go back on it. */
12837 if (cfun
->machine
->far_jump_used
)
12840 /* If this function is not being called from the prologue/epilogue
12841 generation code then it must be being called from the
12842 INITIAL_ELIMINATION_OFFSET macro. */
12843 if (!(ARM_DOUBLEWORD_ALIGN
|| reload_completed
))
12845 /* In this case we know that we are being asked about the elimination
12846 of the arg pointer register. If that register is not being used,
12847 then there are no arguments on the stack, and we do not have to
12848 worry that a far jump might force the prologue to push the link
12849 register, changing the stack offsets. In this case we can just
12850 return false, since the presence of far jumps in the function will
12851 not affect stack offsets.
12853 If the arg pointer is live (or if it was live, but has now been
12854 eliminated and so set to dead) then we do have to test to see if
12855 the function might contain a far jump. This test can lead to some
12856 false negatives, since before reload is completed, then length of
12857 branch instructions is not known, so gcc defaults to returning their
12858 longest length, which in turn sets the far jump attribute to true.
12860 A false negative will not result in bad code being generated, but it
12861 will result in a needless push and pop of the link register. We
12862 hope that this does not occur too often.
12864 If we need doubleword stack alignment this could affect the other
12865 elimination offsets so we can't risk getting it wrong. */
12866 if (regs_ever_live
[ARG_POINTER_REGNUM
])
12867 cfun
->machine
->arg_pointer_live
= 1;
12868 else if (!cfun
->machine
->arg_pointer_live
)
12872 /* Check to see if the function contains a branch
12873 insn with the far jump attribute set. */
12874 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
12876 if (GET_CODE (insn
) == JUMP_INSN
12877 /* Ignore tablejump patterns. */
12878 && GET_CODE (PATTERN (insn
)) != ADDR_VEC
12879 && GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
12880 && get_attr_far_jump (insn
) == FAR_JUMP_YES
12883 /* Record the fact that we have decided that
12884 the function does use far jumps. */
12885 cfun
->machine
->far_jump_used
= 1;
12893 /* Return nonzero if FUNC must be entered in ARM mode. */
12895 is_called_in_ARM_mode (tree func
)
12897 gcc_assert (TREE_CODE (func
) == FUNCTION_DECL
);
12899 /* Ignore the problem about functions whose address is taken. */
12900 if (TARGET_CALLEE_INTERWORKING
&& TREE_PUBLIC (func
))
12904 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func
)) != NULL_TREE
;
12910 /* The bits which aren't usefully expanded as rtl. */
12912 thumb_unexpanded_epilogue (void)
12915 unsigned long live_regs_mask
= 0;
12916 int high_regs_pushed
= 0;
12917 int had_to_push_lr
;
12920 if (return_used_this_function
)
12923 if (IS_NAKED (arm_current_func_type ()))
12926 live_regs_mask
= thumb_compute_save_reg_mask ();
12927 high_regs_pushed
= bit_count (live_regs_mask
& 0x0f00);
12929 /* If we can deduce the registers used from the function's return value.
12930 This is more reliable that examining regs_ever_live[] because that
12931 will be set if the register is ever used in the function, not just if
12932 the register is used to hold a return value. */
12933 size
= arm_size_return_regs ();
12935 /* The prolog may have pushed some high registers to use as
12936 work registers. e.g. the testsuite file:
12937 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
12938 compiles to produce:
12939 push {r4, r5, r6, r7, lr}
12943 as part of the prolog. We have to undo that pushing here. */
12945 if (high_regs_pushed
)
12947 unsigned long mask
= live_regs_mask
& 0xff;
12950 /* The available low registers depend on the size of the value we are
12958 /* Oh dear! We have no low registers into which we can pop
12961 ("no low registers available for popping high registers");
12963 for (next_hi_reg
= 8; next_hi_reg
< 13; next_hi_reg
++)
12964 if (live_regs_mask
& (1 << next_hi_reg
))
12967 while (high_regs_pushed
)
12969 /* Find lo register(s) into which the high register(s) can
12971 for (regno
= 0; regno
<= LAST_LO_REGNUM
; regno
++)
12973 if (mask
& (1 << regno
))
12974 high_regs_pushed
--;
12975 if (high_regs_pushed
== 0)
12979 mask
&= (2 << regno
) - 1; /* A noop if regno == 8 */
12981 /* Pop the values into the low register(s). */
12982 thumb_pushpop (asm_out_file
, mask
, 0, NULL
, mask
);
12984 /* Move the value(s) into the high registers. */
12985 for (regno
= 0; regno
<= LAST_LO_REGNUM
; regno
++)
12987 if (mask
& (1 << regno
))
12989 asm_fprintf (asm_out_file
, "\tmov\t%r, %r\n", next_hi_reg
,
12992 for (next_hi_reg
++; next_hi_reg
< 13; next_hi_reg
++)
12993 if (live_regs_mask
& (1 << next_hi_reg
))
12998 live_regs_mask
&= ~0x0f00;
13001 had_to_push_lr
= (live_regs_mask
& (1 << LR_REGNUM
)) != 0;
13002 live_regs_mask
&= 0xff;
13004 if (current_function_pretend_args_size
== 0 || TARGET_BACKTRACE
)
13006 /* Pop the return address into the PC. */
13007 if (had_to_push_lr
)
13008 live_regs_mask
|= 1 << PC_REGNUM
;
13010 /* Either no argument registers were pushed or a backtrace
13011 structure was created which includes an adjusted stack
13012 pointer, so just pop everything. */
13013 if (live_regs_mask
)
13014 thumb_pushpop (asm_out_file
, live_regs_mask
, FALSE
, NULL
,
13017 /* We have either just popped the return address into the
13018 PC or it is was kept in LR for the entire function. */
13019 if (!had_to_push_lr
)
13020 thumb_exit (asm_out_file
, LR_REGNUM
);
13024 /* Pop everything but the return address. */
13025 if (live_regs_mask
)
13026 thumb_pushpop (asm_out_file
, live_regs_mask
, FALSE
, NULL
,
13029 if (had_to_push_lr
)
13033 /* We have no free low regs, so save one. */
13034 asm_fprintf (asm_out_file
, "\tmov\t%r, %r\n", IP_REGNUM
,
13038 /* Get the return address into a temporary register. */
13039 thumb_pushpop (asm_out_file
, 1 << LAST_ARG_REGNUM
, 0, NULL
,
13040 1 << LAST_ARG_REGNUM
);
13044 /* Move the return address to lr. */
13045 asm_fprintf (asm_out_file
, "\tmov\t%r, %r\n", LR_REGNUM
,
13047 /* Restore the low register. */
13048 asm_fprintf (asm_out_file
, "\tmov\t%r, %r\n", LAST_ARG_REGNUM
,
13053 regno
= LAST_ARG_REGNUM
;
13058 /* Remove the argument registers that were pushed onto the stack. */
13059 asm_fprintf (asm_out_file
, "\tadd\t%r, %r, #%d\n",
13060 SP_REGNUM
, SP_REGNUM
,
13061 current_function_pretend_args_size
);
13063 thumb_exit (asm_out_file
, regno
);
13069 /* Functions to save and restore machine-specific function data. */
13070 static struct machine_function
*
13071 arm_init_machine_status (void)
13073 struct machine_function
*machine
;
13074 machine
= (machine_function
*) ggc_alloc_cleared (sizeof (machine_function
));
13076 #if ARM_FT_UNKNOWN != 0
13077 machine
->func_type
= ARM_FT_UNKNOWN
;
13082 /* Return an RTX indicating where the return address to the
13083 calling function can be found. */
13085 arm_return_addr (int count
, rtx frame ATTRIBUTE_UNUSED
)
13090 return get_hard_reg_initial_val (Pmode
, LR_REGNUM
);
13093 /* Do anything needed before RTL is emitted for each function. */
13095 arm_init_expanders (void)
13097 /* Arrange to initialize and mark the machine per-function status. */
13098 init_machine_status
= arm_init_machine_status
;
13100 /* This is to stop the combine pass optimizing away the alignment
13101 adjustment of va_arg. */
13102 /* ??? It is claimed that this should not be necessary. */
13104 mark_reg_pointer (arg_pointer_rtx
, PARM_BOUNDARY
);
13108 /* Like arm_compute_initial_elimination offset. Simpler because
13109 THUMB_HARD_FRAME_POINTER isn't actually the ABI specified frame pointer. */
13112 thumb_compute_initial_elimination_offset (unsigned int from
, unsigned int to
)
13114 arm_stack_offsets
*offsets
;
13116 offsets
= arm_get_frame_offsets ();
13120 case ARG_POINTER_REGNUM
:
13123 case STACK_POINTER_REGNUM
:
13124 return offsets
->outgoing_args
- offsets
->saved_args
;
13126 case FRAME_POINTER_REGNUM
:
13127 return offsets
->soft_frame
- offsets
->saved_args
;
13129 case THUMB_HARD_FRAME_POINTER_REGNUM
:
13130 case ARM_HARD_FRAME_POINTER_REGNUM
:
13131 return offsets
->saved_regs
- offsets
->saved_args
;
13134 gcc_unreachable ();
13138 case FRAME_POINTER_REGNUM
:
13141 case STACK_POINTER_REGNUM
:
13142 return offsets
->outgoing_args
- offsets
->soft_frame
;
13144 case THUMB_HARD_FRAME_POINTER_REGNUM
:
13145 case ARM_HARD_FRAME_POINTER_REGNUM
:
13146 return offsets
->saved_regs
- offsets
->soft_frame
;
13149 gcc_unreachable ();
13154 gcc_unreachable ();
13159 /* Generate the rest of a function's prologue. */
13161 thumb_expand_prologue (void)
13165 HOST_WIDE_INT amount
;
13166 arm_stack_offsets
*offsets
;
13167 unsigned long func_type
;
13169 unsigned long live_regs_mask
;
13171 func_type
= arm_current_func_type ();
13173 /* Naked functions don't have prologues. */
13174 if (IS_NAKED (func_type
))
13177 if (IS_INTERRUPT (func_type
))
13179 error ("interrupt Service Routines cannot be coded in Thumb mode");
13183 live_regs_mask
= thumb_compute_save_reg_mask ();
13184 /* Load the pic register before setting the frame pointer,
13185 so we can use r7 as a temporary work register. */
13187 arm_load_pic_register (thumb_find_work_register (live_regs_mask
));
13189 offsets
= arm_get_frame_offsets ();
13191 if (frame_pointer_needed
)
13193 insn
= emit_insn (gen_movsi (hard_frame_pointer_rtx
,
13194 stack_pointer_rtx
));
13195 RTX_FRAME_RELATED_P (insn
) = 1;
13197 else if (CALLER_INTERWORKING_SLOT_SIZE
> 0)
13198 emit_move_insn (gen_rtx_REG (Pmode
, ARM_HARD_FRAME_POINTER_REGNUM
),
13199 stack_pointer_rtx
);
13201 amount
= offsets
->outgoing_args
- offsets
->saved_regs
;
13206 insn
= emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
13207 GEN_INT (- amount
)));
13208 RTX_FRAME_RELATED_P (insn
) = 1;
13214 /* The stack decrement is too big for an immediate value in a single
13215 insn. In theory we could issue multiple subtracts, but after
13216 three of them it becomes more space efficient to place the full
13217 value in the constant pool and load into a register. (Also the
13218 ARM debugger really likes to see only one stack decrement per
13219 function). So instead we look for a scratch register into which
13220 we can load the decrement, and then we subtract this from the
13221 stack pointer. Unfortunately on the thumb the only available
13222 scratch registers are the argument registers, and we cannot use
13223 these as they may hold arguments to the function. Instead we
13224 attempt to locate a call preserved register which is used by this
13225 function. If we can find one, then we know that it will have
13226 been pushed at the start of the prologue and so we can corrupt
13228 for (regno
= LAST_ARG_REGNUM
+ 1; regno
<= LAST_LO_REGNUM
; regno
++)
13229 if (live_regs_mask
& (1 << regno
)
13230 && !(frame_pointer_needed
13231 && (regno
== THUMB_HARD_FRAME_POINTER_REGNUM
)))
13234 if (regno
> LAST_LO_REGNUM
) /* Very unlikely. */
13236 rtx spare
= gen_rtx_REG (SImode
, IP_REGNUM
);
13238 /* Choose an arbitrary, non-argument low register. */
13239 reg
= gen_rtx_REG (SImode
, LAST_LO_REGNUM
);
13241 /* Save it by copying it into a high, scratch register. */
13242 emit_insn (gen_movsi (spare
, reg
));
13243 /* Add a USE to stop propagate_one_insn() from barfing. */
13244 emit_insn (gen_prologue_use (spare
));
13246 /* Decrement the stack. */
13247 emit_insn (gen_movsi (reg
, GEN_INT (- amount
)));
13248 insn
= emit_insn (gen_addsi3 (stack_pointer_rtx
,
13249 stack_pointer_rtx
, reg
));
13250 RTX_FRAME_RELATED_P (insn
) = 1;
13251 dwarf
= gen_rtx_SET (SImode
, stack_pointer_rtx
,
13252 plus_constant (stack_pointer_rtx
,
13254 RTX_FRAME_RELATED_P (dwarf
) = 1;
13256 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
, dwarf
,
13259 /* Restore the low register's original value. */
13260 emit_insn (gen_movsi (reg
, spare
));
13262 /* Emit a USE of the restored scratch register, so that flow
13263 analysis will not consider the restore redundant. The
13264 register won't be used again in this function and isn't
13265 restored by the epilogue. */
13266 emit_insn (gen_prologue_use (reg
));
13270 reg
= gen_rtx_REG (SImode
, regno
);
13272 emit_insn (gen_movsi (reg
, GEN_INT (- amount
)));
13274 insn
= emit_insn (gen_addsi3 (stack_pointer_rtx
,
13275 stack_pointer_rtx
, reg
));
13276 RTX_FRAME_RELATED_P (insn
) = 1;
13277 dwarf
= gen_rtx_SET (SImode
, stack_pointer_rtx
,
13278 plus_constant (stack_pointer_rtx
,
13280 RTX_FRAME_RELATED_P (dwarf
) = 1;
13282 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
, dwarf
,
13286 /* If the frame pointer is needed, emit a special barrier that
13287 will prevent the scheduler from moving stores to the frame
13288 before the stack adjustment. */
13289 if (frame_pointer_needed
)
13290 emit_insn (gen_stack_tie (stack_pointer_rtx
,
13291 hard_frame_pointer_rtx
));
13294 if (current_function_profile
|| !TARGET_SCHED_PROLOG
)
13295 emit_insn (gen_blockage ());
13297 cfun
->machine
->lr_save_eliminated
= !thumb_force_lr_save ();
13298 if (live_regs_mask
& 0xff)
13299 cfun
->machine
->lr_save_eliminated
= 0;
13301 /* If the link register is being kept alive, with the return address in it,
13302 then make sure that it does not get reused by the ce2 pass. */
13303 if (cfun
->machine
->lr_save_eliminated
)
13304 emit_insn (gen_prologue_use (gen_rtx_REG (SImode
, LR_REGNUM
)));
13309 thumb_expand_epilogue (void)
13311 HOST_WIDE_INT amount
;
13312 arm_stack_offsets
*offsets
;
13315 /* Naked functions don't have prologues. */
13316 if (IS_NAKED (arm_current_func_type ()))
13319 offsets
= arm_get_frame_offsets ();
13320 amount
= offsets
->outgoing_args
- offsets
->saved_regs
;
13322 if (frame_pointer_needed
)
13323 emit_insn (gen_movsi (stack_pointer_rtx
, hard_frame_pointer_rtx
));
13327 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
13328 GEN_INT (amount
)));
13331 /* r3 is always free in the epilogue. */
13332 rtx reg
= gen_rtx_REG (SImode
, LAST_ARG_REGNUM
);
13334 emit_insn (gen_movsi (reg
, GEN_INT (amount
)));
13335 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
, reg
));
13339 /* Emit a USE (stack_pointer_rtx), so that
13340 the stack adjustment will not be deleted. */
13341 emit_insn (gen_prologue_use (stack_pointer_rtx
));
13343 if (current_function_profile
|| !TARGET_SCHED_PROLOG
)
13344 emit_insn (gen_blockage ());
13346 /* Emit a clobber for each insn that will be restored in the epilogue,
13347 so that flow2 will get register lifetimes correct. */
13348 for (regno
= 0; regno
< 13; regno
++)
13349 if (regs_ever_live
[regno
] && !call_used_regs
[regno
])
13350 emit_insn (gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, regno
)));
13352 if (! regs_ever_live
[LR_REGNUM
])
13353 emit_insn (gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, LR_REGNUM
)));
13357 thumb_output_function_prologue (FILE *f
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
13359 unsigned long live_regs_mask
= 0;
13360 unsigned long l_mask
;
13361 unsigned high_regs_pushed
= 0;
13362 int cfa_offset
= 0;
13365 if (IS_NAKED (arm_current_func_type ()))
13368 if (is_called_in_ARM_mode (current_function_decl
))
13372 gcc_assert (GET_CODE (DECL_RTL (current_function_decl
)) == MEM
);
13373 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl
), 0))
13375 name
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
13377 /* Generate code sequence to switch us into Thumb mode. */
13378 /* The .code 32 directive has already been emitted by
13379 ASM_DECLARE_FUNCTION_NAME. */
13380 asm_fprintf (f
, "\torr\t%r, %r, #1\n", IP_REGNUM
, PC_REGNUM
);
13381 asm_fprintf (f
, "\tbx\t%r\n", IP_REGNUM
);
13383 /* Generate a label, so that the debugger will notice the
13384 change in instruction sets. This label is also used by
13385 the assembler to bypass the ARM code when this function
13386 is called from a Thumb encoded function elsewhere in the
13387 same file. Hence the definition of STUB_NAME here must
13388 agree with the definition in gas/config/tc-arm.c. */
13390 #define STUB_NAME ".real_start_of"
13392 fprintf (f
, "\t.code\t16\n");
13394 if (arm_dllexport_name_p (name
))
13395 name
= arm_strip_name_encoding (name
);
13397 asm_fprintf (f
, "\t.globl %s%U%s\n", STUB_NAME
, name
);
13398 fprintf (f
, "\t.thumb_func\n");
13399 asm_fprintf (f
, "%s%U%s:\n", STUB_NAME
, name
);
13402 if (current_function_pretend_args_size
)
13404 /* Output unwind directive for the stack adjustment. */
13405 if (ARM_EABI_UNWIND_TABLES
)
13406 fprintf (f
, "\t.pad #%d\n",
13407 current_function_pretend_args_size
);
13409 if (cfun
->machine
->uses_anonymous_args
)
13413 fprintf (f
, "\tpush\t{");
13415 num_pushes
= ARM_NUM_INTS (current_function_pretend_args_size
);
13417 for (regno
= LAST_ARG_REGNUM
+ 1 - num_pushes
;
13418 regno
<= LAST_ARG_REGNUM
;
13420 asm_fprintf (f
, "%r%s", regno
,
13421 regno
== LAST_ARG_REGNUM
? "" : ", ");
13423 fprintf (f
, "}\n");
13426 asm_fprintf (f
, "\tsub\t%r, %r, #%d\n",
13427 SP_REGNUM
, SP_REGNUM
,
13428 current_function_pretend_args_size
);
13430 /* We don't need to record the stores for unwinding (would it
13431 help the debugger any if we did?), but record the change in
13432 the stack pointer. */
13433 if (dwarf2out_do_frame ())
13435 char *l
= dwarf2out_cfi_label ();
13437 cfa_offset
= cfa_offset
+ current_function_pretend_args_size
;
13438 dwarf2out_def_cfa (l
, SP_REGNUM
, cfa_offset
);
13442 /* Get the registers we are going to push. */
13443 live_regs_mask
= thumb_compute_save_reg_mask ();
13444 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13445 l_mask
= live_regs_mask
& 0x40ff;
13446 /* Then count how many other high registers will need to be pushed. */
13447 high_regs_pushed
= bit_count (live_regs_mask
& 0x0f00);
13449 if (TARGET_BACKTRACE
)
13452 unsigned work_register
;
13454 /* We have been asked to create a stack backtrace structure.
13455 The code looks like this:
13459 0 sub SP, #16 Reserve space for 4 registers.
13460 2 push {R7} Push low registers.
13461 4 add R7, SP, #20 Get the stack pointer before the push.
13462 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13463 8 mov R7, PC Get hold of the start of this code plus 12.
13464 10 str R7, [SP, #16] Store it.
13465 12 mov R7, FP Get hold of the current frame pointer.
13466 14 str R7, [SP, #4] Store it.
13467 16 mov R7, LR Get hold of the current return address.
13468 18 str R7, [SP, #12] Store it.
13469 20 add R7, SP, #16 Point at the start of the backtrace structure.
13470 22 mov FP, R7 Put this value into the frame pointer. */
13472 work_register
= thumb_find_work_register (live_regs_mask
);
13474 if (ARM_EABI_UNWIND_TABLES
)
13475 asm_fprintf (f
, "\t.pad #16\n");
13478 (f
, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13479 SP_REGNUM
, SP_REGNUM
);
13481 if (dwarf2out_do_frame ())
13483 char *l
= dwarf2out_cfi_label ();
13485 cfa_offset
= cfa_offset
+ 16;
13486 dwarf2out_def_cfa (l
, SP_REGNUM
, cfa_offset
);
13491 thumb_pushpop (f
, l_mask
, 1, &cfa_offset
, l_mask
);
13492 offset
= bit_count (l_mask
);
13497 asm_fprintf (f
, "\tadd\t%r, %r, #%d\n", work_register
, SP_REGNUM
,
13498 offset
+ 16 + current_function_pretend_args_size
);
13500 asm_fprintf (f
, "\tstr\t%r, [%r, #%d]\n", work_register
, SP_REGNUM
,
13503 /* Make sure that the instruction fetching the PC is in the right place
13504 to calculate "start of backtrace creation code + 12". */
13507 asm_fprintf (f
, "\tmov\t%r, %r\n", work_register
, PC_REGNUM
);
13508 asm_fprintf (f
, "\tstr\t%r, [%r, #%d]\n", work_register
, SP_REGNUM
,
13510 asm_fprintf (f
, "\tmov\t%r, %r\n", work_register
,
13511 ARM_HARD_FRAME_POINTER_REGNUM
);
13512 asm_fprintf (f
, "\tstr\t%r, [%r, #%d]\n", work_register
, SP_REGNUM
,
13517 asm_fprintf (f
, "\tmov\t%r, %r\n", work_register
,
13518 ARM_HARD_FRAME_POINTER_REGNUM
);
13519 asm_fprintf (f
, "\tstr\t%r, [%r, #%d]\n", work_register
, SP_REGNUM
,
13521 asm_fprintf (f
, "\tmov\t%r, %r\n", work_register
, PC_REGNUM
);
13522 asm_fprintf (f
, "\tstr\t%r, [%r, #%d]\n", work_register
, SP_REGNUM
,
13526 asm_fprintf (f
, "\tmov\t%r, %r\n", work_register
, LR_REGNUM
);
13527 asm_fprintf (f
, "\tstr\t%r, [%r, #%d]\n", work_register
, SP_REGNUM
,
13529 asm_fprintf (f
, "\tadd\t%r, %r, #%d\n", work_register
, SP_REGNUM
,
13531 asm_fprintf (f
, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13532 ARM_HARD_FRAME_POINTER_REGNUM
, work_register
);
13534 /* Optimization: If we are not pushing any low registers but we are going
13535 to push some high registers then delay our first push. This will just
13536 be a push of LR and we can combine it with the push of the first high
13538 else if ((l_mask
& 0xff) != 0
13539 || (high_regs_pushed
== 0 && l_mask
))
13540 thumb_pushpop (f
, l_mask
, 1, &cfa_offset
, l_mask
);
13542 if (high_regs_pushed
)
13544 unsigned pushable_regs
;
13545 unsigned next_hi_reg
;
13547 for (next_hi_reg
= 12; next_hi_reg
> LAST_LO_REGNUM
; next_hi_reg
--)
13548 if (live_regs_mask
& (1 << next_hi_reg
))
13551 pushable_regs
= l_mask
& 0xff;
13553 if (pushable_regs
== 0)
13554 pushable_regs
= 1 << thumb_find_work_register (live_regs_mask
);
13556 while (high_regs_pushed
> 0)
13558 unsigned long real_regs_mask
= 0;
13560 for (regno
= LAST_LO_REGNUM
; regno
>= 0; regno
--)
13562 if (pushable_regs
& (1 << regno
))
13564 asm_fprintf (f
, "\tmov\t%r, %r\n", regno
, next_hi_reg
);
13566 high_regs_pushed
--;
13567 real_regs_mask
|= (1 << next_hi_reg
);
13569 if (high_regs_pushed
)
13571 for (next_hi_reg
--; next_hi_reg
> LAST_LO_REGNUM
;
13573 if (live_regs_mask
& (1 << next_hi_reg
))
13578 pushable_regs
&= ~((1 << regno
) - 1);
13584 /* If we had to find a work register and we have not yet
13585 saved the LR then add it to the list of regs to push. */
13586 if (l_mask
== (1 << LR_REGNUM
))
13588 thumb_pushpop (f
, pushable_regs
| (1 << LR_REGNUM
),
13590 real_regs_mask
| (1 << LR_REGNUM
));
13594 thumb_pushpop (f
, pushable_regs
, 1, &cfa_offset
, real_regs_mask
);
13599 /* Handle the case of a double word load into a low register from
13600 a computed memory address. The computed address may involve a
13601 register which is overwritten by the load. */
13603 thumb_load_double_from_address (rtx
*operands
)
13611 gcc_assert (GET_CODE (operands
[0]) == REG
);
13612 gcc_assert (GET_CODE (operands
[1]) == MEM
);
13614 /* Get the memory address. */
13615 addr
= XEXP (operands
[1], 0);
13617 /* Work out how the memory address is computed. */
13618 switch (GET_CODE (addr
))
13621 operands
[2] = gen_rtx_MEM (SImode
,
13622 plus_constant (XEXP (operands
[1], 0), 4));
13624 if (REGNO (operands
[0]) == REGNO (addr
))
13626 output_asm_insn ("ldr\t%H0, %2", operands
);
13627 output_asm_insn ("ldr\t%0, %1", operands
);
13631 output_asm_insn ("ldr\t%0, %1", operands
);
13632 output_asm_insn ("ldr\t%H0, %2", operands
);
13637 /* Compute <address> + 4 for the high order load. */
13638 operands
[2] = gen_rtx_MEM (SImode
,
13639 plus_constant (XEXP (operands
[1], 0), 4));
13641 output_asm_insn ("ldr\t%0, %1", operands
);
13642 output_asm_insn ("ldr\t%H0, %2", operands
);
13646 arg1
= XEXP (addr
, 0);
13647 arg2
= XEXP (addr
, 1);
13649 if (CONSTANT_P (arg1
))
13650 base
= arg2
, offset
= arg1
;
13652 base
= arg1
, offset
= arg2
;
13654 gcc_assert (GET_CODE (base
) == REG
);
13656 /* Catch the case of <address> = <reg> + <reg> */
13657 if (GET_CODE (offset
) == REG
)
13659 int reg_offset
= REGNO (offset
);
13660 int reg_base
= REGNO (base
);
13661 int reg_dest
= REGNO (operands
[0]);
13663 /* Add the base and offset registers together into the
13664 higher destination register. */
13665 asm_fprintf (asm_out_file
, "\tadd\t%r, %r, %r",
13666 reg_dest
+ 1, reg_base
, reg_offset
);
13668 /* Load the lower destination register from the address in
13669 the higher destination register. */
13670 asm_fprintf (asm_out_file
, "\tldr\t%r, [%r, #0]",
13671 reg_dest
, reg_dest
+ 1);
13673 /* Load the higher destination register from its own address
13675 asm_fprintf (asm_out_file
, "\tldr\t%r, [%r, #4]",
13676 reg_dest
+ 1, reg_dest
+ 1);
13680 /* Compute <address> + 4 for the high order load. */
13681 operands
[2] = gen_rtx_MEM (SImode
,
13682 plus_constant (XEXP (operands
[1], 0), 4));
13684 /* If the computed address is held in the low order register
13685 then load the high order register first, otherwise always
13686 load the low order register first. */
13687 if (REGNO (operands
[0]) == REGNO (base
))
13689 output_asm_insn ("ldr\t%H0, %2", operands
);
13690 output_asm_insn ("ldr\t%0, %1", operands
);
13694 output_asm_insn ("ldr\t%0, %1", operands
);
13695 output_asm_insn ("ldr\t%H0, %2", operands
);
13701 /* With no registers to worry about we can just load the value
13703 operands
[2] = gen_rtx_MEM (SImode
,
13704 plus_constant (XEXP (operands
[1], 0), 4));
13706 output_asm_insn ("ldr\t%H0, %2", operands
);
13707 output_asm_insn ("ldr\t%0, %1", operands
);
13711 gcc_unreachable ();
13718 thumb_output_move_mem_multiple (int n
, rtx
*operands
)
13725 if (REGNO (operands
[4]) > REGNO (operands
[5]))
13728 operands
[4] = operands
[5];
13731 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands
);
13732 output_asm_insn ("stmia\t%0!, {%4, %5}", operands
);
13736 if (REGNO (operands
[4]) > REGNO (operands
[5]))
13739 operands
[4] = operands
[5];
13742 if (REGNO (operands
[5]) > REGNO (operands
[6]))
13745 operands
[5] = operands
[6];
13748 if (REGNO (operands
[4]) > REGNO (operands
[5]))
13751 operands
[4] = operands
[5];
13755 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands
);
13756 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands
);
13760 gcc_unreachable ();
13766 /* Output a call-via instruction for thumb state. */
13768 thumb_call_via_reg (rtx reg
)
13770 int regno
= REGNO (reg
);
13773 gcc_assert (regno
< LR_REGNUM
);
13775 /* If we are in the normal text section we can use a single instance
13776 per compilation unit. If we are doing function sections, then we need
13777 an entry per section, since we can't rely on reachability. */
13778 if (in_text_section ())
13780 thumb_call_reg_needed
= 1;
13782 if (thumb_call_via_label
[regno
] == NULL
)
13783 thumb_call_via_label
[regno
] = gen_label_rtx ();
13784 labelp
= thumb_call_via_label
+ regno
;
13788 if (cfun
->machine
->call_via
[regno
] == NULL
)
13789 cfun
->machine
->call_via
[regno
] = gen_label_rtx ();
13790 labelp
= cfun
->machine
->call_via
+ regno
;
13793 output_asm_insn ("bl\t%a0", labelp
);
13797 /* Routines for generating rtl. */
13799 thumb_expand_movmemqi (rtx
*operands
)
13801 rtx out
= copy_to_mode_reg (SImode
, XEXP (operands
[0], 0));
13802 rtx in
= copy_to_mode_reg (SImode
, XEXP (operands
[1], 0));
13803 HOST_WIDE_INT len
= INTVAL (operands
[2]);
13804 HOST_WIDE_INT offset
= 0;
13808 emit_insn (gen_movmem12b (out
, in
, out
, in
));
13814 emit_insn (gen_movmem8b (out
, in
, out
, in
));
13820 rtx reg
= gen_reg_rtx (SImode
);
13821 emit_insn (gen_movsi (reg
, gen_rtx_MEM (SImode
, in
)));
13822 emit_insn (gen_movsi (gen_rtx_MEM (SImode
, out
), reg
));
13829 rtx reg
= gen_reg_rtx (HImode
);
13830 emit_insn (gen_movhi (reg
, gen_rtx_MEM (HImode
,
13831 plus_constant (in
, offset
))));
13832 emit_insn (gen_movhi (gen_rtx_MEM (HImode
, plus_constant (out
, offset
)),
13840 rtx reg
= gen_reg_rtx (QImode
);
13841 emit_insn (gen_movqi (reg
, gen_rtx_MEM (QImode
,
13842 plus_constant (in
, offset
))));
13843 emit_insn (gen_movqi (gen_rtx_MEM (QImode
, plus_constant (out
, offset
)),
13849 thumb_reload_out_hi (rtx
*operands
)
13851 emit_insn (gen_thumb_movhi_clobber (operands
[0], operands
[1], operands
[2]));
13854 /* Handle reading a half-word from memory during reload. */
13856 thumb_reload_in_hi (rtx
*operands ATTRIBUTE_UNUSED
)
13858 gcc_unreachable ();
13861 /* Return the length of a function name prefix
13862 that starts with the character 'c'. */
13864 arm_get_strip_length (int c
)
13868 ARM_NAME_ENCODING_LENGTHS
13873 /* Return a pointer to a function's name with any
13874 and all prefix encodings stripped from it. */
13876 arm_strip_name_encoding (const char *name
)
13880 while ((skip
= arm_get_strip_length (* name
)))
13886 /* If there is a '*' anywhere in the name's prefix, then
13887 emit the stripped name verbatim, otherwise prepend an
13888 underscore if leading underscores are being used. */
13890 arm_asm_output_labelref (FILE *stream
, const char *name
)
13895 while ((skip
= arm_get_strip_length (* name
)))
13897 verbatim
|= (*name
== '*');
13902 fputs (name
, stream
);
13904 asm_fprintf (stream
, "%U%s", name
);
13908 arm_file_end (void)
13912 if (! thumb_call_reg_needed
)
13916 asm_fprintf (asm_out_file
, "\t.code 16\n");
13917 ASM_OUTPUT_ALIGN (asm_out_file
, 1);
13919 for (regno
= 0; regno
< LR_REGNUM
; regno
++)
13921 rtx label
= thumb_call_via_label
[regno
];
13925 targetm
.asm_out
.internal_label (asm_out_file
, "L",
13926 CODE_LABEL_NUMBER (label
));
13927 asm_fprintf (asm_out_file
, "\tbx\t%r\n", regno
);
13934 #ifdef AOF_ASSEMBLER
13935 /* Special functions only needed when producing AOF syntax assembler. */
13939 struct pic_chain
* next
;
13940 const char * symname
;
13943 static struct pic_chain
* aof_pic_chain
= NULL
;
13946 aof_pic_entry (rtx x
)
13948 struct pic_chain
** chainp
;
13951 if (aof_pic_label
== NULL_RTX
)
13953 aof_pic_label
= gen_rtx_SYMBOL_REF (Pmode
, "x$adcons");
13956 for (offset
= 0, chainp
= &aof_pic_chain
; *chainp
;
13957 offset
+= 4, chainp
= &(*chainp
)->next
)
13958 if ((*chainp
)->symname
== XSTR (x
, 0))
13959 return plus_constant (aof_pic_label
, offset
);
13961 *chainp
= (struct pic_chain
*) xmalloc (sizeof (struct pic_chain
));
13962 (*chainp
)->next
= NULL
;
13963 (*chainp
)->symname
= XSTR (x
, 0);
13964 return plus_constant (aof_pic_label
, offset
);
13968 aof_dump_pic_table (FILE *f
)
13970 struct pic_chain
* chain
;
13972 if (aof_pic_chain
== NULL
)
13975 asm_fprintf (f
, "\tAREA |%r$$adcons|, BASED %r\n",
13976 PIC_OFFSET_TABLE_REGNUM
,
13977 PIC_OFFSET_TABLE_REGNUM
);
13978 fputs ("|x$adcons|\n", f
);
13980 for (chain
= aof_pic_chain
; chain
; chain
= chain
->next
)
13982 fputs ("\tDCD\t", f
);
13983 assemble_name (f
, chain
->symname
);
13988 int arm_text_section_count
= 1;
13991 aof_text_section (void )
13993 static char buf
[100];
13994 sprintf (buf
, "\tAREA |C$$code%d|, CODE, READONLY",
13995 arm_text_section_count
++);
13997 strcat (buf
, ", PIC, REENTRANT");
14001 static int arm_data_section_count
= 1;
14004 aof_data_section (void)
14006 static char buf
[100];
14007 sprintf (buf
, "\tAREA |C$$data%d|, DATA", arm_data_section_count
++);
14011 /* The AOF assembler is religiously strict about declarations of
14012 imported and exported symbols, so that it is impossible to declare
14013 a function as imported near the beginning of the file, and then to
14014 export it later on. It is, however, possible to delay the decision
14015 until all the functions in the file have been compiled. To get
14016 around this, we maintain a list of the imports and exports, and
14017 delete from it any that are subsequently defined. At the end of
14018 compilation we spit the remainder of the list out before the END
14023 struct import
* next
;
14027 static struct import
* imports_list
= NULL
;
14030 aof_add_import (const char *name
)
14032 struct import
* new;
14034 for (new = imports_list
; new; new = new->next
)
14035 if (new->name
== name
)
14038 new = (struct import
*) xmalloc (sizeof (struct import
));
14039 new->next
= imports_list
;
14040 imports_list
= new;
14045 aof_delete_import (const char *name
)
14047 struct import
** old
;
14049 for (old
= &imports_list
; *old
; old
= & (*old
)->next
)
14051 if ((*old
)->name
== name
)
14053 *old
= (*old
)->next
;
14059 int arm_main_function
= 0;
14062 aof_dump_imports (FILE *f
)
14064 /* The AOF assembler needs this to cause the startup code to be extracted
14065 from the library. Brining in __main causes the whole thing to work
14067 if (arm_main_function
)
14070 fputs ("\tIMPORT __main\n", f
);
14071 fputs ("\tDCD __main\n", f
);
14074 /* Now dump the remaining imports. */
14075 while (imports_list
)
14077 fprintf (f
, "\tIMPORT\t");
14078 assemble_name (f
, imports_list
->name
);
14080 imports_list
= imports_list
->next
;
14085 aof_globalize_label (FILE *stream
, const char *name
)
14087 default_globalize_label (stream
, name
);
14088 if (! strcmp (name
, "main"))
14089 arm_main_function
= 1;
14093 aof_file_start (void)
14095 fputs ("__r0\tRN\t0\n", asm_out_file
);
14096 fputs ("__a1\tRN\t0\n", asm_out_file
);
14097 fputs ("__a2\tRN\t1\n", asm_out_file
);
14098 fputs ("__a3\tRN\t2\n", asm_out_file
);
14099 fputs ("__a4\tRN\t3\n", asm_out_file
);
14100 fputs ("__v1\tRN\t4\n", asm_out_file
);
14101 fputs ("__v2\tRN\t5\n", asm_out_file
);
14102 fputs ("__v3\tRN\t6\n", asm_out_file
);
14103 fputs ("__v4\tRN\t7\n", asm_out_file
);
14104 fputs ("__v5\tRN\t8\n", asm_out_file
);
14105 fputs ("__v6\tRN\t9\n", asm_out_file
);
14106 fputs ("__sl\tRN\t10\n", asm_out_file
);
14107 fputs ("__fp\tRN\t11\n", asm_out_file
);
14108 fputs ("__ip\tRN\t12\n", asm_out_file
);
14109 fputs ("__sp\tRN\t13\n", asm_out_file
);
14110 fputs ("__lr\tRN\t14\n", asm_out_file
);
14111 fputs ("__pc\tRN\t15\n", asm_out_file
);
14112 fputs ("__f0\tFN\t0\n", asm_out_file
);
14113 fputs ("__f1\tFN\t1\n", asm_out_file
);
14114 fputs ("__f2\tFN\t2\n", asm_out_file
);
14115 fputs ("__f3\tFN\t3\n", asm_out_file
);
14116 fputs ("__f4\tFN\t4\n", asm_out_file
);
14117 fputs ("__f5\tFN\t5\n", asm_out_file
);
14118 fputs ("__f6\tFN\t6\n", asm_out_file
);
14119 fputs ("__f7\tFN\t7\n", asm_out_file
);
14124 aof_file_end (void)
14127 aof_dump_pic_table (asm_out_file
);
14129 aof_dump_imports (asm_out_file
);
14130 fputs ("\tEND\n", asm_out_file
);
14132 #endif /* AOF_ASSEMBLER */
14135 /* Symbols in the text segment can be accessed without indirecting via the
14136 constant pool; it may take an extra binary operation, but this is still
14137 faster than indirecting via memory. Don't do this when not optimizing,
14138 since we won't be calculating al of the offsets necessary to do this
14142 arm_encode_section_info (tree decl
, rtx rtl
, int first
)
14144 /* This doesn't work with AOF syntax, since the string table may be in
14145 a different AREA. */
14146 #ifndef AOF_ASSEMBLER
14147 if (optimize
> 0 && TREE_CONSTANT (decl
))
14148 SYMBOL_REF_FLAG (XEXP (rtl
, 0)) = 1;
14151 /* If we are referencing a function that is weak then encode a long call
14152 flag in the function name, otherwise if the function is static or
14153 or known to be defined in this file then encode a short call flag. */
14154 if (first
&& DECL_P (decl
))
14156 if (TREE_CODE (decl
) == FUNCTION_DECL
&& DECL_WEAK (decl
))
14157 arm_encode_call_attribute (decl
, LONG_CALL_FLAG_CHAR
);
14158 else if (! TREE_PUBLIC (decl
))
14159 arm_encode_call_attribute (decl
, SHORT_CALL_FLAG_CHAR
);
14162 #endif /* !ARM_PE */
14165 arm_internal_label (FILE *stream
, const char *prefix
, unsigned long labelno
)
14167 if (arm_ccfsm_state
== 3 && (unsigned) arm_target_label
== labelno
14168 && !strcmp (prefix
, "L"))
14170 arm_ccfsm_state
= 0;
14171 arm_target_insn
= NULL
;
14173 default_internal_label (stream
, prefix
, labelno
);
14176 /* Output code to add DELTA to the first argument, and then jump
14177 to FUNCTION. Used for C++ multiple inheritance. */
14179 arm_output_mi_thunk (FILE *file
, tree thunk ATTRIBUTE_UNUSED
,
14180 HOST_WIDE_INT delta
,
14181 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
14184 static int thunk_label
= 0;
14186 int mi_delta
= delta
;
14187 const char *const mi_op
= mi_delta
< 0 ? "sub" : "add";
14189 int this_regno
= (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
)
14192 mi_delta
= - mi_delta
;
14195 int labelno
= thunk_label
++;
14196 ASM_GENERATE_INTERNAL_LABEL (label
, "LTHUMBFUNC", labelno
);
14197 fputs ("\tldr\tr12, ", file
);
14198 assemble_name (file
, label
);
14199 fputc ('\n', file
);
14201 while (mi_delta
!= 0)
14203 if ((mi_delta
& (3 << shift
)) == 0)
14207 asm_fprintf (file
, "\t%s\t%r, %r, #%d\n",
14208 mi_op
, this_regno
, this_regno
,
14209 mi_delta
& (0xff << shift
));
14210 mi_delta
&= ~(0xff << shift
);
14216 fprintf (file
, "\tbx\tr12\n");
14217 ASM_OUTPUT_ALIGN (file
, 2);
14218 assemble_name (file
, label
);
14219 fputs (":\n", file
);
14220 assemble_integer (XEXP (DECL_RTL (function
), 0), 4, BITS_PER_WORD
, 1);
14224 fputs ("\tb\t", file
);
14225 assemble_name (file
, XSTR (XEXP (DECL_RTL (function
), 0), 0));
14226 if (NEED_PLT_RELOC
)
14227 fputs ("(PLT)", file
);
14228 fputc ('\n', file
);
14233 arm_emit_vector_const (FILE *file
, rtx x
)
14236 const char * pattern
;
14238 gcc_assert (GET_CODE (x
) == CONST_VECTOR
);
14240 switch (GET_MODE (x
))
14242 case V2SImode
: pattern
= "%08x"; break;
14243 case V4HImode
: pattern
= "%04x"; break;
14244 case V8QImode
: pattern
= "%02x"; break;
14245 default: gcc_unreachable ();
14248 fprintf (file
, "0x");
14249 for (i
= CONST_VECTOR_NUNITS (x
); i
--;)
14253 element
= CONST_VECTOR_ELT (x
, i
);
14254 fprintf (file
, pattern
, INTVAL (element
));
14261 arm_output_load_gr (rtx
*operands
)
14268 if (GET_CODE (operands
[1]) != MEM
14269 || GET_CODE (sum
= XEXP (operands
[1], 0)) != PLUS
14270 || GET_CODE (reg
= XEXP (sum
, 0)) != REG
14271 || GET_CODE (offset
= XEXP (sum
, 1)) != CONST_INT
14272 || ((INTVAL (offset
) < 1024) && (INTVAL (offset
) > -1024)))
14273 return "wldrw%?\t%0, %1";
14275 /* Fix up an out-of-range load of a GR register. */
14276 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg
);
14277 wcgr
= operands
[0];
14279 output_asm_insn ("ldr%?\t%0, %1", operands
);
14281 operands
[0] = wcgr
;
14283 output_asm_insn ("tmcr%?\t%0, %1", operands
);
14284 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg
);
14290 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED
,
14291 int incoming ATTRIBUTE_UNUSED
)
14294 /* FIXME: The ARM backend has special code to handle structure
14295 returns, and will reserve its own hidden first argument. So
14296 if this macro is enabled a *second* hidden argument will be
14297 reserved, which will break binary compatibility with old
14298 toolchains and also thunk handling. One day this should be
14302 /* Register in which address to store a structure value
14303 is passed to a function. */
14304 return gen_rtx_REG (Pmode
, ARG_REGISTER (1));
14308 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14310 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14311 named arg and all anonymous args onto the stack.
14312 XXX I know the prologue shouldn't be pushing registers, but it is faster
14316 arm_setup_incoming_varargs (CUMULATIVE_ARGS
*cum
,
14317 enum machine_mode mode ATTRIBUTE_UNUSED
,
14318 tree type ATTRIBUTE_UNUSED
,
14320 int second_time ATTRIBUTE_UNUSED
)
14322 cfun
->machine
->uses_anonymous_args
= 1;
14323 if (cum
->nregs
< NUM_ARG_REGS
)
14324 *pretend_size
= (NUM_ARG_REGS
- cum
->nregs
) * UNITS_PER_WORD
;
14327 /* Return nonzero if the CONSUMER instruction (a store) does not need
14328 PRODUCER's value to calculate the address. */
14331 arm_no_early_store_addr_dep (rtx producer
, rtx consumer
)
14333 rtx value
= PATTERN (producer
);
14334 rtx addr
= PATTERN (consumer
);
14336 if (GET_CODE (value
) == COND_EXEC
)
14337 value
= COND_EXEC_CODE (value
);
14338 if (GET_CODE (value
) == PARALLEL
)
14339 value
= XVECEXP (value
, 0, 0);
14340 value
= XEXP (value
, 0);
14341 if (GET_CODE (addr
) == COND_EXEC
)
14342 addr
= COND_EXEC_CODE (addr
);
14343 if (GET_CODE (addr
) == PARALLEL
)
14344 addr
= XVECEXP (addr
, 0, 0);
14345 addr
= XEXP (addr
, 0);
14347 return !reg_overlap_mentioned_p (value
, addr
);
14350 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14351 have an early register shift value or amount dependency on the
14352 result of PRODUCER. */
14355 arm_no_early_alu_shift_dep (rtx producer
, rtx consumer
)
14357 rtx value
= PATTERN (producer
);
14358 rtx op
= PATTERN (consumer
);
14361 if (GET_CODE (value
) == COND_EXEC
)
14362 value
= COND_EXEC_CODE (value
);
14363 if (GET_CODE (value
) == PARALLEL
)
14364 value
= XVECEXP (value
, 0, 0);
14365 value
= XEXP (value
, 0);
14366 if (GET_CODE (op
) == COND_EXEC
)
14367 op
= COND_EXEC_CODE (op
);
14368 if (GET_CODE (op
) == PARALLEL
)
14369 op
= XVECEXP (op
, 0, 0);
14372 early_op
= XEXP (op
, 0);
14373 /* This is either an actual independent shift, or a shift applied to
14374 the first operand of another operation. We want the whole shift
14376 if (GET_CODE (early_op
) == REG
)
14379 return !reg_overlap_mentioned_p (value
, early_op
);
14382 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14383 have an early register shift value dependency on the result of
14387 arm_no_early_alu_shift_value_dep (rtx producer
, rtx consumer
)
14389 rtx value
= PATTERN (producer
);
14390 rtx op
= PATTERN (consumer
);
14393 if (GET_CODE (value
) == COND_EXEC
)
14394 value
= COND_EXEC_CODE (value
);
14395 if (GET_CODE (value
) == PARALLEL
)
14396 value
= XVECEXP (value
, 0, 0);
14397 value
= XEXP (value
, 0);
14398 if (GET_CODE (op
) == COND_EXEC
)
14399 op
= COND_EXEC_CODE (op
);
14400 if (GET_CODE (op
) == PARALLEL
)
14401 op
= XVECEXP (op
, 0, 0);
14404 early_op
= XEXP (op
, 0);
14406 /* This is either an actual independent shift, or a shift applied to
14407 the first operand of another operation. We want the value being
14408 shifted, in either case. */
14409 if (GET_CODE (early_op
) != REG
)
14410 early_op
= XEXP (early_op
, 0);
14412 return !reg_overlap_mentioned_p (value
, early_op
);
14415 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14416 have an early register mult dependency on the result of
14420 arm_no_early_mul_dep (rtx producer
, rtx consumer
)
14422 rtx value
= PATTERN (producer
);
14423 rtx op
= PATTERN (consumer
);
14425 if (GET_CODE (value
) == COND_EXEC
)
14426 value
= COND_EXEC_CODE (value
);
14427 if (GET_CODE (value
) == PARALLEL
)
14428 value
= XVECEXP (value
, 0, 0);
14429 value
= XEXP (value
, 0);
14430 if (GET_CODE (op
) == COND_EXEC
)
14431 op
= COND_EXEC_CODE (op
);
14432 if (GET_CODE (op
) == PARALLEL
)
14433 op
= XVECEXP (op
, 0, 0);
14436 return (GET_CODE (op
) == PLUS
14437 && !reg_overlap_mentioned_p (value
, XEXP (op
, 0)));
14441 /* We can't rely on the caller doing the proper promotion when
14442 using APCS or ATPCS. */
14445 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED
)
14447 return !TARGET_AAPCS_BASED
;
14451 /* AAPCS based ABIs use short enums by default. */
14454 arm_default_short_enums (void)
14456 return TARGET_AAPCS_BASED
;
14460 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14463 arm_align_anon_bitfield (void)
14465 return TARGET_AAPCS_BASED
;
14469 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14472 arm_cxx_guard_type (void)
14474 return TARGET_AAPCS_BASED
? integer_type_node
: long_long_integer_type_node
;
14478 /* The EABI says test the least significant bit of a guard variable. */
14481 arm_cxx_guard_mask_bit (void)
14483 return TARGET_AAPCS_BASED
;
14487 /* The EABI specifies that all array cookies are 8 bytes long. */
14490 arm_get_cookie_size (tree type
)
14494 if (!TARGET_AAPCS_BASED
)
14495 return default_cxx_get_cookie_size (type
);
14497 size
= build_int_cst (sizetype
, 8);
14502 /* The EABI says that array cookies should also contain the element size. */
14505 arm_cookie_has_size (void)
14507 return TARGET_AAPCS_BASED
;
14511 /* The EABI says constructors and destructors should return a pointer to
14512 the object constructed/destroyed. */
14515 arm_cxx_cdtor_returns_this (void)
14517 return TARGET_AAPCS_BASED
;
14520 /* The EABI says that an inline function may never be the key
14524 arm_cxx_key_method_may_be_inline (void)
14526 return !TARGET_AAPCS_BASED
;
14530 arm_cxx_determine_class_data_visibility (tree decl
)
14532 if (!TARGET_AAPCS_BASED
)
14535 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
14536 is exported. However, on systems without dynamic vague linkage,
14537 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
14538 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P
&& DECL_COMDAT (decl
))
14539 DECL_VISIBILITY (decl
) = VISIBILITY_HIDDEN
;
14541 DECL_VISIBILITY (decl
) = VISIBILITY_DEFAULT
;
14542 DECL_VISIBILITY_SPECIFIED (decl
) = 1;
14546 arm_cxx_class_data_always_comdat (void)
14548 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
14549 vague linkage if the class has no key function. */
14550 return !TARGET_AAPCS_BASED
;
14554 /* The EABI says __aeabi_atexit should be used to register static
14558 arm_cxx_use_aeabi_atexit (void)
14560 return TARGET_AAPCS_BASED
;
14565 arm_set_return_address (rtx source
, rtx scratch
)
14567 arm_stack_offsets
*offsets
;
14568 HOST_WIDE_INT delta
;
14570 unsigned long saved_regs
;
14572 saved_regs
= arm_compute_save_reg_mask ();
14574 if ((saved_regs
& (1 << LR_REGNUM
)) == 0)
14575 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNUM
), source
);
14578 if (frame_pointer_needed
)
14579 addr
= plus_constant(hard_frame_pointer_rtx
, -4);
14582 /* LR will be the first saved register. */
14583 offsets
= arm_get_frame_offsets ();
14584 delta
= offsets
->outgoing_args
- (offsets
->frame
+ 4);
14589 emit_insn (gen_addsi3 (scratch
, stack_pointer_rtx
,
14590 GEN_INT (delta
& ~4095)));
14595 addr
= stack_pointer_rtx
;
14597 addr
= plus_constant (addr
, delta
);
14599 emit_move_insn (gen_rtx_MEM (Pmode
, addr
), source
);
14605 thumb_set_return_address (rtx source
, rtx scratch
)
14607 arm_stack_offsets
*offsets
;
14608 HOST_WIDE_INT delta
;
14611 unsigned long mask
;
14613 emit_insn (gen_rtx_USE (VOIDmode
, source
));
14615 mask
= thumb_compute_save_reg_mask ();
14616 if (mask
& (1 << LR_REGNUM
))
14618 offsets
= arm_get_frame_offsets ();
14620 /* Find the saved regs. */
14621 if (frame_pointer_needed
)
14623 delta
= offsets
->soft_frame
- offsets
->saved_args
;
14624 reg
= THUMB_HARD_FRAME_POINTER_REGNUM
;
14628 delta
= offsets
->outgoing_args
- offsets
->saved_args
;
14631 /* Allow for the stack frame. */
14632 if (TARGET_BACKTRACE
)
14634 /* The link register is always the first saved register. */
14637 /* Construct the address. */
14638 addr
= gen_rtx_REG (SImode
, reg
);
14639 if ((reg
!= SP_REGNUM
&& delta
>= 128)
14642 emit_insn (gen_movsi (scratch
, GEN_INT (delta
)));
14643 emit_insn (gen_addsi3 (scratch
, scratch
, stack_pointer_rtx
));
14647 addr
= plus_constant (addr
, delta
);
14649 emit_move_insn (gen_rtx_MEM (Pmode
, addr
), source
);
14652 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNUM
), source
);
14655 /* Implements target hook vector_mode_supported_p. */
14657 arm_vector_mode_supported_p (enum machine_mode mode
)
14659 if ((mode
== V2SImode
)
14660 || (mode
== V4HImode
)
14661 || (mode
== V8QImode
))
14667 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
14668 ARM insns and therefore guarantee that the shift count is modulo 256.
14669 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
14670 guarantee no particular behavior for out-of-range counts. */
14672 static unsigned HOST_WIDE_INT
14673 arm_shift_truncation_mask (enum machine_mode mode
)
14675 return mode
== SImode
? 255 : 0;
14679 /* Map internal gcc register numbers to DWARF2 register numbers. */
14682 arm_dbx_register_number (unsigned int regno
)
14687 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
14688 compatibility. The EABI defines them as registers 96-103. */
14689 if (IS_FPA_REGNUM (regno
))
14690 return (TARGET_AAPCS_BASED
? 96 : 16) + regno
- FIRST_FPA_REGNUM
;
14692 if (IS_VFP_REGNUM (regno
))
14693 return 64 + regno
- FIRST_VFP_REGNUM
;
14695 if (IS_IWMMXT_GR_REGNUM (regno
))
14696 return 104 + regno
- FIRST_IWMMXT_GR_REGNUM
;
14698 if (IS_IWMMXT_REGNUM (regno
))
14699 return 112 + regno
- FIRST_IWMMXT_REGNUM
;
14701 gcc_unreachable ();
14705 #ifdef TARGET_UNWIND_INFO
14706 /* Emit unwind directives for a store-multiple instruction. This should
14707 only ever be generated by the function prologue code, so we expect it
14708 to have a particular form. */
14711 arm_unwind_emit_stm (FILE * asm_out_file
, rtx p
)
14714 HOST_WIDE_INT offset
;
14715 HOST_WIDE_INT nregs
;
14721 /* First insn will adjust the stack pointer. */
14722 e
= XVECEXP (p
, 0, 0);
14723 if (GET_CODE (e
) != SET
14724 || GET_CODE (XEXP (e
, 0)) != REG
14725 || REGNO (XEXP (e
, 0)) != SP_REGNUM
14726 || GET_CODE (XEXP (e
, 1)) != PLUS
)
14729 offset
= -INTVAL (XEXP (XEXP (e
, 1), 1));
14730 nregs
= XVECLEN (p
, 0) - 1;
14732 reg
= REGNO (XEXP (XVECEXP (p
, 0, 1), 1));
14735 /* The function prologue may also push pc, but not annotate it as it is
14736 never restored. We turn this into an stack pointer adjustment. */
14737 if (nregs
* 4 == offset
- 4)
14739 fprintf (asm_out_file
, "\t.pad #4\n");
14744 else if (IS_VFP_REGNUM (reg
))
14746 /* FPA register saves use an additional word. */
14750 else if (reg
>= FIRST_FPA_REGNUM
&& reg
<= LAST_FPA_REGNUM
)
14752 /* FPA registers are done differently. */
14753 asm_fprintf (asm_out_file
, "\t.save %r, %wd\n", reg
, nregs
);
14757 /* Unknown register type. */
14760 /* If the stack increment doesn't match the size of the saved registers,
14761 something has gone horribly wrong. */
14762 if (offset
!= nregs
* reg_size
)
14765 fprintf (asm_out_file
, "\t.save {");
14769 /* The remaining insns will describe the stores. */
14770 for (i
= 1; i
<= nregs
; i
++)
14772 /* Expect (set (mem <addr>) (reg)).
14773 Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
14774 e
= XVECEXP (p
, 0, i
);
14775 if (GET_CODE (e
) != SET
14776 || GET_CODE (XEXP (e
, 0)) != MEM
14777 || GET_CODE (XEXP (e
, 1)) != REG
)
14780 reg
= REGNO (XEXP (e
, 1));
14785 fprintf (asm_out_file
, ", ");
14786 /* We can't use %r for vfp because we need to use the
14787 double precision register names. */
14788 if (IS_VFP_REGNUM (reg
))
14789 asm_fprintf (asm_out_file
, "d%d", (reg
- FIRST_VFP_REGNUM
) / 2);
14791 asm_fprintf (asm_out_file
, "%r", reg
);
14793 #ifdef ENABLE_CHECKING
14794 /* Check that the addresses are consecutive. */
14795 e
= XEXP (XEXP (e
, 0), 0);
14796 if (GET_CODE (e
) == PLUS
)
14798 offset
+= reg_size
;
14799 if (GET_CODE (XEXP (e
, 0)) != REG
14800 || REGNO (XEXP (e
, 0)) != SP_REGNUM
14801 || GET_CODE (XEXP (e
, 1)) != CONST_INT
14802 || offset
!= INTVAL (XEXP (e
, 1)))
14806 || GET_CODE (e
) != REG
14807 || REGNO (e
) != SP_REGNUM
)
14811 fprintf (asm_out_file
, "}\n");
14814 /* Emit unwind directives for a SET. */
14817 arm_unwind_emit_set (FILE * asm_out_file
, rtx p
)
14824 switch (GET_CODE (e0
))
14827 /* Pushing a single register. */
14828 if (GET_CODE (XEXP (e0
, 0)) != PRE_DEC
14829 || GET_CODE (XEXP (XEXP (e0
, 0), 0)) != REG
14830 || REGNO (XEXP (XEXP (e0
, 0), 0)) != SP_REGNUM
)
14833 asm_fprintf (asm_out_file
, "\t.save ");
14834 if (IS_VFP_REGNUM (REGNO (e1
)))
14835 asm_fprintf(asm_out_file
, "{d%d}\n",
14836 (REGNO (e1
) - FIRST_VFP_REGNUM
) / 2);
14838 asm_fprintf(asm_out_file
, "{%r}\n", REGNO (e1
));
14842 if (REGNO (e0
) == SP_REGNUM
)
14844 /* A stack increment. */
14845 if (GET_CODE (e1
) != PLUS
14846 || GET_CODE (XEXP (e1
, 0)) != REG
14847 || REGNO (XEXP (e1
, 0)) != SP_REGNUM
14848 || GET_CODE (XEXP (e1
, 1)) != CONST_INT
)
14851 asm_fprintf (asm_out_file
, "\t.pad #%wd\n",
14852 -INTVAL (XEXP (e1
, 1)));
14854 else if (REGNO (e0
) == HARD_FRAME_POINTER_REGNUM
)
14856 HOST_WIDE_INT offset
;
14859 if (GET_CODE (e1
) == PLUS
)
14861 if (GET_CODE (XEXP (e1
, 0)) != REG
14862 || GET_CODE (XEXP (e1
, 1)) != CONST_INT
)
14864 reg
= REGNO (XEXP (e1
, 0));
14865 offset
= INTVAL (XEXP (e1
, 1));
14866 asm_fprintf (asm_out_file
, "\t.setfp %r, %r, #%wd\n",
14867 HARD_FRAME_POINTER_REGNUM
, reg
,
14868 INTVAL (XEXP (e1
, 1)));
14870 else if (GET_CODE (e1
) == REG
)
14873 asm_fprintf (asm_out_file
, "\t.setfp %r, %r\n",
14874 HARD_FRAME_POINTER_REGNUM
, reg
);
14879 else if (GET_CODE (e1
) == REG
&& REGNO (e1
) == SP_REGNUM
)
14881 /* Move from sp to reg. */
14882 asm_fprintf (asm_out_file
, "\t.movsp %r\n", REGNO (e0
));
14894 /* Emit unwind directives for the given insn. */
14897 arm_unwind_emit (FILE * asm_out_file
, rtx insn
)
14901 if (!ARM_EABI_UNWIND_TABLES
)
14904 if (GET_CODE (insn
) == NOTE
|| !RTX_FRAME_RELATED_P (insn
))
14907 pat
= find_reg_note (insn
, REG_FRAME_RELATED_EXPR
, NULL_RTX
);
14909 pat
= XEXP (pat
, 0);
14911 pat
= PATTERN (insn
);
14913 switch (GET_CODE (pat
))
14916 arm_unwind_emit_set (asm_out_file
, pat
);
14920 /* Store multiple. */
14921 arm_unwind_emit_stm (asm_out_file
, pat
);
14930 /* Output a reference from a function exception table to the type_info
14931 object X. The EABI specifies that the symbol should be relocated by
14932 an R_ARM_TARGET2 relocation. */
14935 arm_output_ttype (rtx x
)
14937 fputs ("\t.word\t", asm_out_file
);
14938 output_addr_const (asm_out_file
, x
);
14939 /* Use special relocations for symbol references. */
14940 if (GET_CODE (x
) != CONST_INT
)
14941 fputs ("(TARGET2)", asm_out_file
);
14942 fputc ('\n', asm_out_file
);
14946 #endif /* TARGET_UNWIND_INFO */
14949 /* Output unwind directives for the start/end of a function. */
14952 arm_output_fn_unwind (FILE * f
, bool prologue
)
14954 if (!ARM_EABI_UNWIND_TABLES
)
14958 fputs ("\t.fnstart\n", f
);
14960 fputs ("\t.fnend\n", f
);