1 /* Output routines for GCC for Renesas / SuperH SH.
2 Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Steve Chamberlain (sac@cygnus.com).
5 Improved by Jim Wilson (wilson@cygnus.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
22 Boston, MA 02110-1301, USA. */
26 #include "coretypes.h"
28 #include "insn-config.h"
36 #include "hard-reg-set.h"
38 #include "insn-attr.h"
42 #include "integrate.h"
46 #include "target-def.h"
48 #include "langhooks.h"
49 #include "basic-block.h"
50 #include "cfglayout.h"
52 #include "sched-int.h"
54 #include "tree-gimple.h"
58 int code_for_indirect_jump_scratch
= CODE_FOR_indirect_jump_scratch
;
60 #define MSW (TARGET_LITTLE_ENDIAN ? 1 : 0)
61 #define LSW (TARGET_LITTLE_ENDIAN ? 0 : 1)
63 /* These are some macros to abstract register modes. */
64 #define CONST_OK_FOR_ADD(size) \
65 (TARGET_SHMEDIA ? CONST_OK_FOR_I10 (size) : CONST_OK_FOR_I08 (size))
66 #define GEN_MOV (*(TARGET_SHMEDIA64 ? gen_movdi : gen_movsi))
67 #define GEN_ADD3 (*(TARGET_SHMEDIA64 ? gen_adddi3 : gen_addsi3))
68 #define GEN_SUB3 (*(TARGET_SHMEDIA64 ? gen_subdi3 : gen_subsi3))
70 /* Set to 1 by expand_prologue() when the function is an interrupt handler. */
71 int current_function_interrupt
;
73 /* ??? The pragma interrupt support will not work for SH3. */
74 /* This is set by #pragma interrupt and #pragma trapa, and causes gcc to
75 output code for the next function appropriate for an interrupt handler. */
78 /* This is set by the trap_exit attribute for functions. It specifies
79 a trap number to be used in a trapa instruction at function exit
80 (instead of an rte instruction). */
83 /* This is used by the sp_switch attribute for functions. It specifies
84 a variable holding the address of the stack the interrupt function
85 should switch to/from at entry/exit. */
88 /* This is set by #pragma trapa, and is similar to the above, except that
89 the compiler doesn't emit code to preserve all registers. */
90 static int pragma_trapa
;
92 /* This is set by #pragma nosave_low_regs. This is useful on the SH3,
93 which has a separate set of low regs for User and Supervisor modes.
94 This should only be used for the lowest level of interrupts. Higher levels
95 of interrupts must save the registers in case they themselves are
97 int pragma_nosave_low_regs
;
99 /* This is used for communication between TARGET_SETUP_INCOMING_VARARGS and
100 sh_expand_prologue. */
101 int current_function_anonymous_args
;
103 /* Global variables for machine-dependent things. */
105 /* Which cpu are we scheduling for. */
106 enum processor_type sh_cpu
;
108 /* Definitions used in ready queue reordering for first scheduling pass. */
110 /* Reg weights arrays for modes SFmode and SImode, indexed by insn LUID. */
111 static short *regmode_weight
[2];
113 /* Total SFmode and SImode weights of scheduled insns. */
114 static int curr_regmode_pressure
[2];
116 /* If true, skip cycles for Q -> R movement. */
117 static int skip_cycles
= 0;
119 /* Cached value of can_issue_more. This is cached in sh_variable_issue hook
120 and returned from sh_reorder2. */
121 static short cached_can_issue_more
;
123 /* Saved operands from the last compare to use when we generate an scc
129 /* Provides the class number of the smallest class containing
132 enum reg_class regno_reg_class
[FIRST_PSEUDO_REGISTER
] =
134 R0_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
135 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
136 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
137 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
138 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
139 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
140 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
141 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
142 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
143 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
144 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
145 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
146 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
147 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
148 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
149 GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
, GENERAL_REGS
,
150 FP0_REGS
,FP_REGS
, FP_REGS
, FP_REGS
,
151 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
152 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
153 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
154 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
155 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
156 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
157 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
158 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
159 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
160 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
161 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
162 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
163 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
164 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
165 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
166 TARGET_REGS
, TARGET_REGS
, TARGET_REGS
, TARGET_REGS
,
167 TARGET_REGS
, TARGET_REGS
, TARGET_REGS
, TARGET_REGS
,
168 DF_REGS
, DF_REGS
, DF_REGS
, DF_REGS
,
169 DF_REGS
, DF_REGS
, DF_REGS
, DF_REGS
,
170 NO_REGS
, GENERAL_REGS
, PR_REGS
, T_REGS
,
171 MAC_REGS
, MAC_REGS
, FPUL_REGS
, FPSCR_REGS
,
172 GENERAL_REGS
, GENERAL_REGS
,
175 char sh_register_names
[FIRST_PSEUDO_REGISTER
] \
176 [MAX_REGISTER_NAME_LENGTH
+ 1] = SH_REGISTER_NAMES_INITIALIZER
;
178 char sh_additional_register_names
[ADDREGNAMES_SIZE
] \
179 [MAX_ADDITIONAL_REGISTER_NAME_LENGTH
+ 1]
180 = SH_ADDITIONAL_REGISTER_NAMES_INITIALIZER
;
182 /* Provide reg_class from a letter such as appears in the machine
183 description. *: target independently reserved letter.
184 reg_class_from_letter['e' - 'a'] is set to NO_REGS for TARGET_FMOVD. */
186 enum reg_class reg_class_from_letter
[] =
188 /* a */ ALL_REGS
, /* b */ TARGET_REGS
, /* c */ FPSCR_REGS
, /* d */ DF_REGS
,
189 /* e */ FP_REGS
, /* f */ FP_REGS
, /* g **/ NO_REGS
, /* h */ NO_REGS
,
190 /* i **/ NO_REGS
, /* j */ NO_REGS
, /* k */ SIBCALL_REGS
, /* l */ PR_REGS
,
191 /* m **/ NO_REGS
, /* n **/ NO_REGS
, /* o **/ NO_REGS
, /* p **/ NO_REGS
,
192 /* q */ NO_REGS
, /* r **/ NO_REGS
, /* s **/ NO_REGS
, /* t */ T_REGS
,
193 /* u */ NO_REGS
, /* v */ NO_REGS
, /* w */ FP0_REGS
, /* x */ MAC_REGS
,
194 /* y */ FPUL_REGS
, /* z */ R0_REGS
197 int assembler_dialect
;
199 static bool shmedia_space_reserved_for_target_registers
;
201 static bool sh_handle_option (size_t, const char *, int);
202 static void split_branches (rtx
);
203 static int branch_dest (rtx
);
204 static void force_into (rtx
, rtx
);
205 static void print_slot (rtx
);
206 static rtx
add_constant (rtx
, enum machine_mode
, rtx
);
207 static void dump_table (rtx
, rtx
);
208 static int hi_const (rtx
);
209 static int broken_move (rtx
);
210 static int mova_p (rtx
);
211 static rtx
find_barrier (int, rtx
, rtx
);
212 static int noncall_uses_reg (rtx
, rtx
, rtx
*);
213 static rtx
gen_block_redirect (rtx
, int, int);
214 static void sh_reorg (void);
215 static void output_stack_adjust (int, rtx
, int, HARD_REG_SET
*);
216 static rtx
frame_insn (rtx
);
217 static rtx
push (int);
218 static void pop (int);
219 static void push_regs (HARD_REG_SET
*, int);
220 static int calc_live_regs (HARD_REG_SET
*);
221 static void mark_use (rtx
, rtx
*);
222 static HOST_WIDE_INT
rounded_frame_size (int);
223 static rtx
mark_constant_pool_use (rtx
);
224 const struct attribute_spec sh_attribute_table
[];
225 static tree
sh_handle_interrupt_handler_attribute (tree
*, tree
, tree
, int, bool *);
226 static tree
sh_handle_sp_switch_attribute (tree
*, tree
, tree
, int, bool *);
227 static tree
sh_handle_trap_exit_attribute (tree
*, tree
, tree
, int, bool *);
228 static tree
sh_handle_renesas_attribute (tree
*, tree
, tree
, int, bool *);
229 static void sh_output_function_epilogue (FILE *, HOST_WIDE_INT
);
230 static void sh_insert_attributes (tree
, tree
*);
231 static const char *sh_check_pch_target_flags (int);
232 static int sh_adjust_cost (rtx
, rtx
, rtx
, int);
233 static int sh_issue_rate (void);
234 static int sh_dfa_new_cycle (FILE *, int, rtx
, int, int, int *sort_p
);
235 static short find_set_regmode_weight (rtx
, enum machine_mode
);
236 static short find_insn_regmode_weight (rtx
, enum machine_mode
);
237 static void find_regmode_weight (int, enum machine_mode
);
238 static void sh_md_init_global (FILE *, int, int);
239 static void sh_md_finish_global (FILE *, int);
240 static int rank_for_reorder (const void *, const void *);
241 static void swap_reorder (rtx
*, int);
242 static void ready_reorder (rtx
*, int);
243 static short high_pressure (enum machine_mode
);
244 static int sh_reorder (FILE *, int, rtx
*, int *, int);
245 static int sh_reorder2 (FILE *, int, rtx
*, int *, int);
246 static void sh_md_init (FILE *, int, int);
247 static int sh_variable_issue (FILE *, int, rtx
, int);
249 static bool sh_function_ok_for_sibcall (tree
, tree
);
251 static bool sh_cannot_modify_jumps_p (void);
252 static int sh_target_reg_class (void);
253 static bool sh_optimize_target_register_callee_saved (bool);
254 static bool sh_ms_bitfield_layout_p (tree
);
256 static void sh_init_builtins (void);
257 static void sh_media_init_builtins (void);
258 static rtx
sh_expand_builtin (tree
, rtx
, rtx
, enum machine_mode
, int);
259 static void sh_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
, HOST_WIDE_INT
, tree
);
260 static void sh_file_start (void);
261 static int flow_dependent_p (rtx
, rtx
);
262 static void flow_dependent_p_1 (rtx
, rtx
, void *);
263 static int shiftcosts (rtx
);
264 static int andcosts (rtx
);
265 static int addsubcosts (rtx
);
266 static int multcosts (rtx
);
267 static bool unspec_caller_rtx_p (rtx
);
268 static bool sh_cannot_copy_insn_p (rtx
);
269 static bool sh_rtx_costs (rtx
, int, int, int *);
270 static int sh_address_cost (rtx
);
271 #ifdef TARGET_ADJUST_UNROLL_MAX
272 static int sh_adjust_unroll_max (struct loop
*, int, int, int, int);
274 static int sh_pr_n_sets (void);
275 static rtx
sh_allocate_initial_value (rtx
);
276 static int shmedia_target_regs_stack_space (HARD_REG_SET
*);
277 static int shmedia_reserve_space_for_target_registers_p (int, HARD_REG_SET
*);
278 static int shmedia_target_regs_stack_adjust (HARD_REG_SET
*);
279 static int scavenge_reg (HARD_REG_SET
*s
);
280 struct save_schedule_s
;
281 static struct save_entry_s
*sh5_schedule_saves (HARD_REG_SET
*,
282 struct save_schedule_s
*, int);
284 static rtx
sh_struct_value_rtx (tree
, int);
285 static bool sh_return_in_memory (tree
, tree
);
286 static rtx
sh_builtin_saveregs (void);
287 static void sh_setup_incoming_varargs (CUMULATIVE_ARGS
*, enum machine_mode
, tree
, int *, int);
288 static bool sh_strict_argument_naming (CUMULATIVE_ARGS
*);
289 static bool sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS
*);
290 static tree
sh_build_builtin_va_list (void);
291 static tree
sh_gimplify_va_arg_expr (tree
, tree
, tree
*, tree
*);
292 static bool sh_pass_by_reference (CUMULATIVE_ARGS
*, enum machine_mode
,
294 static bool sh_callee_copies (CUMULATIVE_ARGS
*, enum machine_mode
,
296 static int sh_arg_partial_bytes (CUMULATIVE_ARGS
*, enum machine_mode
,
298 static int sh_dwarf_calling_convention (tree
);
299 static int hard_regs_intersect_p (HARD_REG_SET
*, HARD_REG_SET
*);
302 /* Initialize the GCC target structure. */
303 #undef TARGET_ATTRIBUTE_TABLE
304 #define TARGET_ATTRIBUTE_TABLE sh_attribute_table
306 /* The next two are used for debug info when compiling with -gdwarf. */
307 #undef TARGET_ASM_UNALIGNED_HI_OP
308 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uaword\t"
309 #undef TARGET_ASM_UNALIGNED_SI_OP
310 #define TARGET_ASM_UNALIGNED_SI_OP "\t.ualong\t"
312 /* These are NULLed out on non-SH5 in OVERRIDE_OPTIONS. */
313 #undef TARGET_ASM_UNALIGNED_DI_OP
314 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaquad\t"
315 #undef TARGET_ASM_ALIGNED_DI_OP
316 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
318 #undef TARGET_ASM_FUNCTION_EPILOGUE
319 #define TARGET_ASM_FUNCTION_EPILOGUE sh_output_function_epilogue
321 #undef TARGET_ASM_OUTPUT_MI_THUNK
322 #define TARGET_ASM_OUTPUT_MI_THUNK sh_output_mi_thunk
324 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
325 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
327 #undef TARGET_ASM_FILE_START
328 #define TARGET_ASM_FILE_START sh_file_start
329 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
330 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
332 #undef TARGET_DEFAULT_TARGET_FLAGS
333 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
334 #undef TARGET_HANDLE_OPTION
335 #define TARGET_HANDLE_OPTION sh_handle_option
337 #undef TARGET_INSERT_ATTRIBUTES
338 #define TARGET_INSERT_ATTRIBUTES sh_insert_attributes
340 #undef TARGET_SCHED_ADJUST_COST
341 #define TARGET_SCHED_ADJUST_COST sh_adjust_cost
343 #undef TARGET_SCHED_ISSUE_RATE
344 #define TARGET_SCHED_ISSUE_RATE sh_issue_rate
346 /* The next 5 hooks have been implemented for reenabling sched1. With the
347 help of these macros we are limiting the movement of insns in sched1 to
348 reduce the register pressure. The overall idea is to keep count of SImode
349 and SFmode regs required by already scheduled insns. When these counts
350 cross some threshold values; give priority to insns that free registers.
351 The insn that frees registers is most likely to be the insn with lowest
352 LUID (original insn order); but such an insn might be there in the stalled
353 queue (Q) instead of the ready queue (R). To solve this, we skip cycles
354 upto a max of 8 cycles so that such insns may move from Q -> R.
356 The description of the hooks are as below:
358 TARGET_SCHED_INIT_GLOBAL: Added a new target hook in the generic
359 scheduler; it is called inside the sched_init function just after
360 find_insn_reg_weights function call. It is used to calculate the SImode
361 and SFmode weights of insns of basic blocks; much similar to what
362 find_insn_reg_weights does.
363 TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
365 TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
366 indicated by TARGET_SCHED_REORDER2; doing this may move insns from
369 TARGET_SCHED_REORDER: If the register pressure for SImode or SFmode is
370 high; reorder the ready queue so that the insn with lowest LUID will be
373 TARGET_SCHED_REORDER2: If the register pressure is high, indicate to
374 TARGET_SCHED_DFA_NEW_CYCLE to skip cycles.
376 TARGET_SCHED_VARIABLE_ISSUE: Cache the value of can_issue_more so that it
377 can be returned from TARGET_SCHED_REORDER2.
379 TARGET_SCHED_INIT: Reset the register pressure counting variables. */
381 #undef TARGET_SCHED_DFA_NEW_CYCLE
382 #define TARGET_SCHED_DFA_NEW_CYCLE sh_dfa_new_cycle
384 #undef TARGET_SCHED_INIT_GLOBAL
385 #define TARGET_SCHED_INIT_GLOBAL sh_md_init_global
387 #undef TARGET_SCHED_FINISH_GLOBAL
388 #define TARGET_SCHED_FINISH_GLOBAL sh_md_finish_global
390 #undef TARGET_SCHED_VARIABLE_ISSUE
391 #define TARGET_SCHED_VARIABLE_ISSUE sh_variable_issue
393 #undef TARGET_SCHED_REORDER
394 #define TARGET_SCHED_REORDER sh_reorder
396 #undef TARGET_SCHED_REORDER2
397 #define TARGET_SCHED_REORDER2 sh_reorder2
399 #undef TARGET_SCHED_INIT
400 #define TARGET_SCHED_INIT sh_md_init
402 #undef TARGET_CANNOT_MODIFY_JUMPS_P
403 #define TARGET_CANNOT_MODIFY_JUMPS_P sh_cannot_modify_jumps_p
404 #undef TARGET_BRANCH_TARGET_REGISTER_CLASS
405 #define TARGET_BRANCH_TARGET_REGISTER_CLASS sh_target_reg_class
406 #undef TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED
407 #define TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED \
408 sh_optimize_target_register_callee_saved
410 #undef TARGET_MS_BITFIELD_LAYOUT_P
411 #define TARGET_MS_BITFIELD_LAYOUT_P sh_ms_bitfield_layout_p
413 #undef TARGET_INIT_BUILTINS
414 #define TARGET_INIT_BUILTINS sh_init_builtins
415 #undef TARGET_EXPAND_BUILTIN
416 #define TARGET_EXPAND_BUILTIN sh_expand_builtin
418 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
419 #define TARGET_FUNCTION_OK_FOR_SIBCALL sh_function_ok_for_sibcall
421 #undef TARGET_CANNOT_COPY_INSN_P
422 #define TARGET_CANNOT_COPY_INSN_P sh_cannot_copy_insn_p
423 #undef TARGET_RTX_COSTS
424 #define TARGET_RTX_COSTS sh_rtx_costs
425 #undef TARGET_ADDRESS_COST
426 #define TARGET_ADDRESS_COST sh_address_cost
427 #undef TARGET_ALLOCATE_INITIAL_VALUE
428 #define TARGET_ALLOCATE_INITIAL_VALUE sh_allocate_initial_value
430 #undef TARGET_MACHINE_DEPENDENT_REORG
431 #define TARGET_MACHINE_DEPENDENT_REORG sh_reorg
434 #undef TARGET_HAVE_TLS
435 #define TARGET_HAVE_TLS true
438 #undef TARGET_PROMOTE_PROTOTYPES
439 #define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes
440 #undef TARGET_PROMOTE_FUNCTION_ARGS
441 #define TARGET_PROMOTE_FUNCTION_ARGS sh_promote_prototypes
442 #undef TARGET_PROMOTE_FUNCTION_RETURN
443 #define TARGET_PROMOTE_FUNCTION_RETURN sh_promote_prototypes
445 #undef TARGET_STRUCT_VALUE_RTX
446 #define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx
447 #undef TARGET_RETURN_IN_MEMORY
448 #define TARGET_RETURN_IN_MEMORY sh_return_in_memory
450 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
451 #define TARGET_EXPAND_BUILTIN_SAVEREGS sh_builtin_saveregs
452 #undef TARGET_SETUP_INCOMING_VARARGS
453 #define TARGET_SETUP_INCOMING_VARARGS sh_setup_incoming_varargs
454 #undef TARGET_STRICT_ARGUMENT_NAMING
455 #define TARGET_STRICT_ARGUMENT_NAMING sh_strict_argument_naming
456 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
457 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named
458 #undef TARGET_MUST_PASS_IN_STACK
459 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
460 #undef TARGET_PASS_BY_REFERENCE
461 #define TARGET_PASS_BY_REFERENCE sh_pass_by_reference
462 #undef TARGET_CALLEE_COPIES
463 #define TARGET_CALLEE_COPIES sh_callee_copies
464 #undef TARGET_ARG_PARTIAL_BYTES
465 #define TARGET_ARG_PARTIAL_BYTES sh_arg_partial_bytes
467 #undef TARGET_BUILD_BUILTIN_VA_LIST
468 #define TARGET_BUILD_BUILTIN_VA_LIST sh_build_builtin_va_list
469 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
470 #define TARGET_GIMPLIFY_VA_ARG_EXPR sh_gimplify_va_arg_expr
472 #undef TARGET_VECTOR_MODE_SUPPORTED_P
473 #define TARGET_VECTOR_MODE_SUPPORTED_P sh_vector_mode_supported_p
475 #undef TARGET_CHECK_PCH_TARGET_FLAGS
476 #define TARGET_CHECK_PCH_TARGET_FLAGS sh_check_pch_target_flags
478 #undef TARGET_DWARF_CALLING_CONVENTION
479 #define TARGET_DWARF_CALLING_CONVENTION sh_dwarf_calling_convention
481 /* Return regmode weight for insn. */
482 #define INSN_REGMODE_WEIGHT(INSN, MODE) regmode_weight[((MODE) == SImode) ? 0 : 1][INSN_UID (INSN)]
484 /* Return current register pressure for regmode. */
485 #define CURR_REGMODE_PRESSURE(MODE) curr_regmode_pressure[((MODE) == SImode) ? 0 : 1]
489 #undef TARGET_ENCODE_SECTION_INFO
490 #define TARGET_ENCODE_SECTION_INFO sh_symbian_encode_section_info
491 #undef TARGET_STRIP_NAME_ENCODING
492 #define TARGET_STRIP_NAME_ENCODING sh_symbian_strip_name_encoding
493 #undef TARGET_CXX_IMPORT_EXPORT_CLASS
494 #define TARGET_CXX_IMPORT_EXPORT_CLASS symbian_import_export_class
498 #ifdef TARGET_ADJUST_UNROLL_MAX
499 #undef TARGET_ADJUST_UNROLL_MAX
500 #define TARGET_ADJUST_UNROLL_MAX sh_adjust_unroll_max
503 struct gcc_target targetm
= TARGET_INITIALIZER
;
505 /* Implement TARGET_HANDLE_OPTION. */
508 sh_handle_option (size_t code
, const char *arg ATTRIBUTE_UNUSED
,
509 int value ATTRIBUTE_UNUSED
)
514 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH1
;
518 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH2
;
522 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH2A
;
526 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH2A_NOFPU
;
530 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH2A_SINGLE
;
533 case OPT_m2a_single_only
:
534 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH2A_SINGLE_ONLY
;
538 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH2E
;
542 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH3
;
546 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH3E
;
550 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH4
;
554 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH4_NOFPU
;
558 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH4_SINGLE
;
561 case OPT_m4_single_only
:
562 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH4_SINGLE_ONLY
;
566 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH4A
;
571 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH4A_NOFPU
;
575 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH4A_SINGLE
;
578 case OPT_m4a_single_only
:
579 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH4A_SINGLE_ONLY
;
583 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH5_32MEDIA
;
586 case OPT_m5_32media_nofpu
:
587 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH5_32MEDIA_NOFPU
;
591 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH5_64MEDIA
;
594 case OPT_m5_64media_nofpu
:
595 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH5_64MEDIA_NOFPU
;
599 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH5_COMPACT
;
602 case OPT_m5_compact_nofpu
:
603 target_flags
= (target_flags
& ~MASK_ARCH
) | SELECT_SH5_COMPACT_NOFPU
;
611 /* Print the operand address in x to the stream. */
614 print_operand_address (FILE *stream
, rtx x
)
616 switch (GET_CODE (x
))
620 fprintf (stream
, "@%s", reg_names
[true_regnum (x
)]);
625 rtx base
= XEXP (x
, 0);
626 rtx index
= XEXP (x
, 1);
628 switch (GET_CODE (index
))
631 fprintf (stream
, "@(%d,%s)", (int) INTVAL (index
),
632 reg_names
[true_regnum (base
)]);
638 int base_num
= true_regnum (base
);
639 int index_num
= true_regnum (index
);
641 fprintf (stream
, "@(r0,%s)",
642 reg_names
[MAX (base_num
, index_num
)]);
653 fprintf (stream
, "@-%s", reg_names
[true_regnum (XEXP (x
, 0))]);
657 fprintf (stream
, "@%s+", reg_names
[true_regnum (XEXP (x
, 0))]);
661 x
= mark_constant_pool_use (x
);
662 output_addr_const (stream
, x
);
667 /* Print operand x (an rtx) in assembler syntax to file stream
668 according to modifier code.
670 '.' print a .s if insn needs delay slot
671 ',' print LOCAL_LABEL_PREFIX
672 '@' print trap, rte or rts depending upon pragma interruptness
673 '#' output a nop if there is nothing to put in the delay slot
674 ''' print likelihood suffix (/u for unlikely).
675 '>' print branch target if -fverbose-asm
676 'O' print a constant without the #
677 'R' print the LSW of a dp value - changes if in little endian
678 'S' print the MSW of a dp value - changes if in little endian
679 'T' print the next word of a dp value - same as 'R' in big endian mode.
680 'M' print an `x' if `m' will print `base,index'.
681 'N' print 'r63' if the operand is (const_int 0).
682 'd' print a V2SF reg as dN instead of fpN.
683 'm' print a pair `base,offset' or `base,index', for LD and ST.
684 'U' Likewise for {LD,ST}{HI,LO}.
685 'u' prints the lowest 16 bits of CONST_INT, as an unsigned value.
686 'o' output an operator. */
689 print_operand (FILE *stream
, rtx x
, int code
)
692 enum machine_mode mode
;
698 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence
, 0, 0))
699 && get_attr_length (XVECEXP (final_sequence
, 0, 1)))
700 fprintf (stream
, ASSEMBLER_DIALECT
? "/s" : ".s");
703 fprintf (stream
, "%s", LOCAL_LABEL_PREFIX
);
707 fprintf (stream
, "trapa #%d", trap_exit
);
708 else if (sh_cfun_interrupt_handler_p ())
709 fprintf (stream
, "rte");
711 fprintf (stream
, "rts");
714 /* Output a nop if there's nothing in the delay slot. */
715 if (dbr_sequence_length () == 0)
716 fprintf (stream
, "\n\tnop");
720 rtx note
= find_reg_note (current_output_insn
, REG_BR_PROB
, 0);
722 if (note
&& INTVAL (XEXP (note
, 0)) * 2 < REG_BR_PROB_BASE
)
723 fputs ("/u", stream
);
727 if (flag_verbose_asm
&& JUMP_LABEL (current_output_insn
))
729 fputs ("\t! target: ", stream
);
730 output_addr_const (stream
, JUMP_LABEL (current_output_insn
));
734 x
= mark_constant_pool_use (x
);
735 output_addr_const (stream
, x
);
737 /* N.B.: %R / %S / %T adjust memory addresses by four.
738 For SHMEDIA, that means they can be used to access the first and
739 second 32 bit part of a 64 bit (or larger) value that
740 might be held in floating point registers or memory.
741 While they can be used to access 64 bit parts of a larger value
742 held in general purpose registers, that won't work with memory -
743 neither for fp registers, since the frxx names are used. */
745 if (REG_P (x
) || GET_CODE (x
) == SUBREG
)
747 regno
= true_regnum (x
);
748 regno
+= FP_REGISTER_P (regno
) ? 1 : LSW
;
749 fputs (reg_names
[regno
], (stream
));
753 x
= adjust_address (x
, SImode
, 4 * LSW
);
754 print_operand_address (stream
, XEXP (x
, 0));
761 if (mode
== VOIDmode
)
763 if (GET_MODE_SIZE (mode
) >= 8)
764 sub
= simplify_subreg (SImode
, x
, mode
, 4 * LSW
);
766 print_operand (stream
, sub
, 0);
768 output_operand_lossage ("invalid operand to %%R");
772 if (REG_P (x
) || GET_CODE (x
) == SUBREG
)
774 regno
= true_regnum (x
);
775 regno
+= FP_REGISTER_P (regno
) ? 0 : MSW
;
776 fputs (reg_names
[regno
], (stream
));
780 x
= adjust_address (x
, SImode
, 4 * MSW
);
781 print_operand_address (stream
, XEXP (x
, 0));
788 if (mode
== VOIDmode
)
790 if (GET_MODE_SIZE (mode
) >= 8)
791 sub
= simplify_subreg (SImode
, x
, mode
, 4 * MSW
);
793 print_operand (stream
, sub
, 0);
795 output_operand_lossage ("invalid operand to %%S");
799 /* Next word of a double. */
800 switch (GET_CODE (x
))
803 fputs (reg_names
[REGNO (x
) + 1], (stream
));
806 if (GET_CODE (XEXP (x
, 0)) != PRE_DEC
807 && GET_CODE (XEXP (x
, 0)) != POST_INC
)
808 x
= adjust_address (x
, SImode
, 4);
809 print_operand_address (stream
, XEXP (x
, 0));
816 switch (GET_CODE (x
))
818 case PLUS
: fputs ("add", stream
); break;
819 case MINUS
: fputs ("sub", stream
); break;
820 case MULT
: fputs ("mul", stream
); break;
821 case DIV
: fputs ("div", stream
); break;
822 case EQ
: fputs ("eq", stream
); break;
823 case NE
: fputs ("ne", stream
); break;
824 case GT
: case LT
: fputs ("gt", stream
); break;
825 case GE
: case LE
: fputs ("ge", stream
); break;
826 case GTU
: case LTU
: fputs ("gtu", stream
); break;
827 case GEU
: case LEU
: fputs ("geu", stream
); break;
833 if (GET_CODE (x
) == MEM
834 && GET_CODE (XEXP (x
, 0)) == PLUS
835 && (GET_CODE (XEXP (XEXP (x
, 0), 1)) == REG
836 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == SUBREG
))
841 gcc_assert (GET_CODE (x
) == MEM
);
845 switch (GET_CODE (x
))
849 print_operand (stream
, x
, 0);
850 fputs (", 0", stream
);
854 print_operand (stream
, XEXP (x
, 0), 0);
855 fputs (", ", stream
);
856 print_operand (stream
, XEXP (x
, 1), 0);
865 gcc_assert (GET_CODE (x
) == REG
&& GET_MODE (x
) == V2SFmode
);
867 fprintf ((stream
), "d%s", reg_names
[REGNO (x
)] + 1);
871 if (x
== CONST0_RTX (GET_MODE (x
)))
873 fprintf ((stream
), "r63");
878 if (GET_CODE (x
) == CONST_INT
)
880 fprintf ((stream
), "%u", (unsigned) INTVAL (x
) & (0x10000 - 1));
890 switch (GET_CODE (x
))
894 rtx inner
= XEXP (x
, 0);
896 enum machine_mode inner_mode
;
898 /* We might see SUBREGs with vector mode registers inside. */
899 if (GET_CODE (inner
) == SUBREG
900 && (GET_MODE_SIZE (GET_MODE (inner
))
901 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner
))))
902 && subreg_lowpart_p (inner
))
903 inner
= SUBREG_REG (inner
);
904 if (GET_CODE (inner
) == CONST_INT
)
906 x
= GEN_INT (trunc_int_for_mode (INTVAL (inner
), GET_MODE (x
)));
909 inner_mode
= GET_MODE (inner
);
910 if (GET_CODE (inner
) == SUBREG
911 && (GET_MODE_SIZE (GET_MODE (inner
))
912 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner
))))
913 && GET_CODE (SUBREG_REG (inner
)) == REG
)
915 offset
= subreg_regno_offset (REGNO (SUBREG_REG (inner
)),
916 GET_MODE (SUBREG_REG (inner
)),
919 inner
= SUBREG_REG (inner
);
921 if (GET_CODE (inner
) != REG
|| GET_MODE_SIZE (inner_mode
) > 8)
923 /* Floating point register pairs are always big endian;
924 general purpose registers are 64 bit wide. */
925 regno
= REGNO (inner
);
926 regno
= (HARD_REGNO_NREGS (regno
, inner_mode
)
927 - HARD_REGNO_NREGS (regno
, mode
))
935 /* FIXME: We need this on SHmedia32 because reload generates
936 some sign-extended HI or QI loads into DImode registers
937 but, because Pmode is SImode, the address ends up with a
938 subreg:SI of the DImode register. Maybe reload should be
939 fixed so as to apply alter_subreg to such loads? */
941 gcc_assert (trapping_target_operand (x
, VOIDmode
));
942 x
= XEXP (XEXP (x
, 2), 0);
945 gcc_assert (SUBREG_BYTE (x
) == 0
946 && GET_CODE (SUBREG_REG (x
)) == REG
);
954 if (FP_REGISTER_P (regno
)
955 && mode
== V16SFmode
)
956 fprintf ((stream
), "mtrx%s", reg_names
[regno
] + 2);
957 else if (FP_REGISTER_P (REGNO (x
))
959 fprintf ((stream
), "fv%s", reg_names
[regno
] + 2);
960 else if (GET_CODE (x
) == REG
962 fprintf ((stream
), "fp%s", reg_names
[regno
] + 2);
963 else if (FP_REGISTER_P (REGNO (x
))
964 && GET_MODE_SIZE (mode
) > 4)
965 fprintf ((stream
), "d%s", reg_names
[regno
] + 1);
967 fputs (reg_names
[regno
], (stream
));
971 output_address (XEXP (x
, 0));
976 && GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
977 && (GET_MODE (XEXP (x
, 0)) == DImode
978 || GET_MODE (XEXP (x
, 0)) == SImode
)
979 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == TRUNCATE
980 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == HImode
)
982 rtx val
= XEXP (XEXP (XEXP (x
, 0), 0), 0);
985 if (GET_CODE (val
) == ASHIFTRT
)
988 if (GET_CODE (XEXP (val
, 0)) == CONST
)
990 output_addr_const (stream
, XEXP (val
, 0));
991 if (GET_CODE (XEXP (val
, 0)) == CONST
)
993 fputs (" >> ", stream
);
994 output_addr_const (stream
, XEXP (val
, 1));
999 if (GET_CODE (val
) == CONST
)
1000 fputc ('(', stream
);
1001 output_addr_const (stream
, val
);
1002 if (GET_CODE (val
) == CONST
)
1003 fputc (')', stream
);
1005 fputs (" & 65535)", stream
);
1012 fputc ('#', stream
);
1013 output_addr_const (stream
, x
);
1020 /* Like force_operand, but guarantees that VALUE ends up in TARGET. */
1022 force_into (rtx value
, rtx target
)
1024 value
= force_operand (value
, target
);
1025 if (! rtx_equal_p (value
, target
))
1026 emit_insn (gen_move_insn (target
, value
));
1029 /* Emit code to perform a block move. Choose the best method.
1031 OPERANDS[0] is the destination.
1032 OPERANDS[1] is the source.
1033 OPERANDS[2] is the size.
1034 OPERANDS[3] is the alignment safe to use. */
1037 expand_block_move (rtx
*operands
)
1039 int align
= INTVAL (operands
[3]);
1040 int constp
= (GET_CODE (operands
[2]) == CONST_INT
);
1041 int bytes
= (constp
? INTVAL (operands
[2]) : 0);
1046 /* If we could use mov.l to move words and dest is word-aligned, we
1047 can use movua.l for loads and still generate a relatively short
1048 and efficient sequence. */
1049 if (TARGET_SH4A_ARCH
&& align
< 4
1050 && MEM_ALIGN (operands
[0]) >= 32
1051 && can_move_by_pieces (bytes
, 32))
1053 rtx dest
= copy_rtx (operands
[0]);
1054 rtx src
= copy_rtx (operands
[1]);
1055 /* We could use different pseudos for each copied word, but
1056 since movua can only load into r0, it's kind of
1058 rtx temp
= gen_reg_rtx (SImode
);
1059 rtx src_addr
= copy_addr_to_reg (XEXP (src
, 0));
1062 while (copied
+ 4 <= bytes
)
1064 rtx to
= adjust_address (dest
, SImode
, copied
);
1065 rtx from
= adjust_automodify_address (src
, SImode
, src_addr
, copied
);
1067 emit_insn (gen_movua (temp
, from
));
1068 emit_move_insn (src_addr
, plus_constant (src_addr
, 4));
1069 emit_move_insn (to
, temp
);
1074 move_by_pieces (adjust_address (dest
, BLKmode
, copied
),
1075 adjust_automodify_address (src
, BLKmode
,
1077 bytes
- copied
, align
, 0);
1082 /* If it isn't a constant number of bytes, or if it doesn't have 4 byte
1083 alignment, or if it isn't a multiple of 4 bytes, then fail. */
1084 if (align
< 4 || (bytes
% 4 != 0))
1087 if (TARGET_HARD_SH4
)
1091 else if (bytes
== 12)
1093 rtx func_addr_rtx
= gen_reg_rtx (Pmode
);
1094 rtx r4
= gen_rtx_REG (SImode
, 4);
1095 rtx r5
= gen_rtx_REG (SImode
, 5);
1097 function_symbol (func_addr_rtx
, "__movmemSI12_i4", SFUNC_STATIC
);
1098 force_into (XEXP (operands
[0], 0), r4
);
1099 force_into (XEXP (operands
[1], 0), r5
);
1100 emit_insn (gen_block_move_real_i4 (func_addr_rtx
));
1103 else if (! TARGET_SMALLCODE
)
1105 const char *entry_name
;
1106 rtx func_addr_rtx
= gen_reg_rtx (Pmode
);
1108 rtx r4
= gen_rtx_REG (SImode
, 4);
1109 rtx r5
= gen_rtx_REG (SImode
, 5);
1110 rtx r6
= gen_rtx_REG (SImode
, 6);
1112 entry_name
= (bytes
& 4 ? "__movmem_i4_odd" : "__movmem_i4_even");
1113 function_symbol (func_addr_rtx
, entry_name
, SFUNC_STATIC
);
1114 force_into (XEXP (operands
[0], 0), r4
);
1115 force_into (XEXP (operands
[1], 0), r5
);
1117 dwords
= bytes
>> 3;
1118 emit_insn (gen_move_insn (r6
, GEN_INT (dwords
- 1)));
1119 emit_insn (gen_block_lump_real_i4 (func_addr_rtx
));
1128 rtx func_addr_rtx
= gen_reg_rtx (Pmode
);
1129 rtx r4
= gen_rtx_REG (SImode
, 4);
1130 rtx r5
= gen_rtx_REG (SImode
, 5);
1132 sprintf (entry
, "__movmemSI%d", bytes
);
1133 function_symbol (func_addr_rtx
, entry
, SFUNC_STATIC
);
1134 force_into (XEXP (operands
[0], 0), r4
);
1135 force_into (XEXP (operands
[1], 0), r5
);
1136 emit_insn (gen_block_move_real (func_addr_rtx
));
1140 /* This is the same number of bytes as a memcpy call, but to a different
1141 less common function name, so this will occasionally use more space. */
1142 if (! TARGET_SMALLCODE
)
1144 rtx func_addr_rtx
= gen_reg_rtx (Pmode
);
1145 int final_switch
, while_loop
;
1146 rtx r4
= gen_rtx_REG (SImode
, 4);
1147 rtx r5
= gen_rtx_REG (SImode
, 5);
1148 rtx r6
= gen_rtx_REG (SImode
, 6);
1150 function_symbol (func_addr_rtx
, "__movmem", SFUNC_STATIC
);
1151 force_into (XEXP (operands
[0], 0), r4
);
1152 force_into (XEXP (operands
[1], 0), r5
);
1154 /* r6 controls the size of the move. 16 is decremented from it
1155 for each 64 bytes moved. Then the negative bit left over is used
1156 as an index into a list of move instructions. e.g., a 72 byte move
1157 would be set up with size(r6) = 14, for one iteration through the
1158 big while loop, and a switch of -2 for the last part. */
1160 final_switch
= 16 - ((bytes
/ 4) % 16);
1161 while_loop
= ((bytes
/ 4) / 16 - 1) * 16;
1162 emit_insn (gen_move_insn (r6
, GEN_INT (while_loop
+ final_switch
)));
1163 emit_insn (gen_block_lump_real (func_addr_rtx
));
1170 /* Prepare operands for a move define_expand; specifically, one of the
1171 operands must be in a register. */
1174 prepare_move_operands (rtx operands
[], enum machine_mode mode
)
1176 if ((mode
== SImode
|| mode
== DImode
)
1178 && ! ((mode
== Pmode
|| mode
== ptr_mode
)
1179 && tls_symbolic_operand (operands
[1], Pmode
) != 0))
1182 if (SYMBOLIC_CONST_P (operands
[1]))
1184 if (GET_CODE (operands
[0]) == MEM
)
1185 operands
[1] = force_reg (Pmode
, operands
[1]);
1186 else if (TARGET_SHMEDIA
1187 && GET_CODE (operands
[1]) == LABEL_REF
1188 && target_reg_operand (operands
[0], mode
))
1192 temp
= no_new_pseudos
? operands
[0] : gen_reg_rtx (Pmode
);
1193 operands
[1] = legitimize_pic_address (operands
[1], mode
, temp
);
1196 else if (GET_CODE (operands
[1]) == CONST
1197 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
1198 && SYMBOLIC_CONST_P (XEXP (XEXP (operands
[1], 0), 0)))
1200 temp
= no_new_pseudos
? operands
[0] : gen_reg_rtx (Pmode
);
1201 temp
= legitimize_pic_address (XEXP (XEXP (operands
[1], 0), 0),
1203 operands
[1] = expand_binop (mode
, add_optab
, temp
,
1204 XEXP (XEXP (operands
[1], 0), 1),
1205 no_new_pseudos
? temp
1206 : gen_reg_rtx (Pmode
),
1207 0, OPTAB_LIB_WIDEN
);
1211 if (! reload_in_progress
&& ! reload_completed
)
1213 /* Copy the source to a register if both operands aren't registers. */
1214 if (! register_operand (operands
[0], mode
)
1215 && ! sh_register_operand (operands
[1], mode
))
1216 operands
[1] = copy_to_mode_reg (mode
, operands
[1]);
1218 if (GET_CODE (operands
[0]) == MEM
&& ! memory_operand (operands
[0], mode
))
1220 /* This is like change_address_1 (operands[0], mode, 0, 1) ,
1221 except that we can't use that function because it is static. */
1222 rtx
new = change_address (operands
[0], mode
, 0);
1223 MEM_COPY_ATTRIBUTES (new, operands
[0]);
1227 /* This case can happen while generating code to move the result
1228 of a library call to the target. Reject `st r0,@(rX,rY)' because
1229 reload will fail to find a spill register for rX, since r0 is already
1230 being used for the source. */
1232 && refers_to_regno_p (R0_REG
, R0_REG
+ 1, operands
[1], (rtx
*)0)
1233 && GET_CODE (operands
[0]) == MEM
1234 && GET_CODE (XEXP (operands
[0], 0)) == PLUS
1235 && GET_CODE (XEXP (XEXP (operands
[0], 0), 1)) == REG
)
1236 operands
[1] = copy_to_mode_reg (mode
, operands
[1]);
1239 if (mode
== Pmode
|| mode
== ptr_mode
)
1242 enum tls_model tls_kind
;
1246 if ((tls_kind
= tls_symbolic_operand (op1
, Pmode
)))
1248 rtx tga_op1
, tga_ret
, tmp
, tmp2
;
1252 case TLS_MODEL_GLOBAL_DYNAMIC
:
1253 tga_ret
= gen_rtx_REG (Pmode
, R0_REG
);
1254 emit_call_insn (gen_tls_global_dynamic (tga_ret
, op1
));
1258 case TLS_MODEL_LOCAL_DYNAMIC
:
1259 tga_ret
= gen_rtx_REG (Pmode
, R0_REG
);
1260 emit_call_insn (gen_tls_local_dynamic (tga_ret
, op1
));
1262 tmp
= gen_reg_rtx (Pmode
);
1263 emit_move_insn (tmp
, tga_ret
);
1265 if (register_operand (op0
, Pmode
))
1268 tmp2
= gen_reg_rtx (Pmode
);
1270 emit_insn (gen_symDTPOFF2reg (tmp2
, op1
, tmp
));
1274 case TLS_MODEL_INITIAL_EXEC
:
1277 /* Don't schedule insns for getting GOT address when
1278 the first scheduling is enabled, to avoid spill
1280 if (flag_schedule_insns
)
1281 emit_insn (gen_blockage ());
1282 emit_insn (gen_GOTaddr2picreg ());
1283 emit_insn (gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
,
1285 if (flag_schedule_insns
)
1286 emit_insn (gen_blockage ());
1288 tga_op1
= no_new_pseudos
? op0
: gen_reg_rtx (Pmode
);
1289 tmp
= gen_sym2GOTTPOFF (op1
);
1290 emit_insn (gen_tls_initial_exec (tga_op1
, tmp
));
1294 case TLS_MODEL_LOCAL_EXEC
:
1295 tmp2
= gen_reg_rtx (Pmode
);
1296 emit_insn (gen_load_gbr (tmp2
));
1297 tmp
= gen_reg_rtx (Pmode
);
1298 emit_insn (gen_symTPOFF2reg (tmp
, op1
));
1300 if (register_operand (op0
, Pmode
))
1303 op1
= gen_reg_rtx (Pmode
);
1305 emit_insn (gen_addsi3 (op1
, tmp
, tmp2
));
1318 /* Prepare the operands for an scc instruction; make sure that the
1319 compare has been done. */
1321 prepare_scc_operands (enum rtx_code code
)
1323 rtx t_reg
= gen_rtx_REG (SImode
, T_REG
);
1324 enum rtx_code oldcode
= code
;
1325 enum machine_mode mode
;
1327 /* First need a compare insn. */
1331 /* It isn't possible to handle this case. */
1348 if (code
!= oldcode
)
1350 rtx tmp
= sh_compare_op0
;
1351 sh_compare_op0
= sh_compare_op1
;
1352 sh_compare_op1
= tmp
;
1355 mode
= GET_MODE (sh_compare_op0
);
1356 if (mode
== VOIDmode
)
1357 mode
= GET_MODE (sh_compare_op1
);
1359 sh_compare_op0
= force_reg (mode
, sh_compare_op0
);
1360 if ((code
!= EQ
&& code
!= NE
1361 && (sh_compare_op1
!= const0_rtx
1362 || code
== GTU
|| code
== GEU
|| code
== LTU
|| code
== LEU
))
1363 || (mode
== DImode
&& sh_compare_op1
!= const0_rtx
)
1364 || (TARGET_SH2E
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
))
1365 sh_compare_op1
= force_reg (mode
, sh_compare_op1
);
1367 if ((TARGET_SH4
|| TARGET_SH2A
) && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1368 (mode
== SFmode
? emit_sf_insn
: emit_df_insn
)
1369 (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2,
1370 gen_rtx_SET (VOIDmode
, t_reg
,
1371 gen_rtx_fmt_ee (code
, SImode
,
1372 sh_compare_op0
, sh_compare_op1
)),
1373 gen_rtx_USE (VOIDmode
, get_fpscr_rtx ()))));
1375 emit_insn (gen_rtx_SET (VOIDmode
, t_reg
,
1376 gen_rtx_fmt_ee (code
, SImode
,
1377 sh_compare_op0
, sh_compare_op1
)));
1382 /* Called from the md file, set up the operands of a compare instruction. */
1385 from_compare (rtx
*operands
, int code
)
1387 enum machine_mode mode
= GET_MODE (sh_compare_op0
);
1389 if (mode
== VOIDmode
)
1390 mode
= GET_MODE (sh_compare_op1
);
1393 || (TARGET_SH2E
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
))
1395 /* Force args into regs, since we can't use constants here. */
1396 sh_compare_op0
= force_reg (mode
, sh_compare_op0
);
1397 if (sh_compare_op1
!= const0_rtx
1398 || code
== GTU
|| code
== GEU
1399 || (TARGET_SH2E
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
))
1400 sh_compare_op1
= force_reg (mode
, sh_compare_op1
);
1402 if (TARGET_SH2E
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
&& code
== GE
)
1404 from_compare (operands
, GT
);
1405 insn
= gen_ieee_ccmpeqsf_t (sh_compare_op0
, sh_compare_op1
);
1408 insn
= gen_rtx_SET (VOIDmode
,
1409 gen_rtx_REG (SImode
, T_REG
),
1410 gen_rtx_fmt_ee (code
, SImode
,
1411 sh_compare_op0
, sh_compare_op1
));
1412 if ((TARGET_SH4
|| TARGET_SH2A
) && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1414 insn
= gen_rtx_PARALLEL (VOIDmode
,
1416 gen_rtx_USE (VOIDmode
, get_fpscr_rtx ())));
1417 (mode
== SFmode
? emit_sf_insn
: emit_df_insn
) (insn
);
1423 /* Functions to output assembly code. */
1425 /* Return a sequence of instructions to perform DI or DF move.
1427 Since the SH cannot move a DI or DF in one instruction, we have
1428 to take care when we see overlapping source and dest registers. */
1431 output_movedouble (rtx insn ATTRIBUTE_UNUSED
, rtx operands
[],
1432 enum machine_mode mode
)
1434 rtx dst
= operands
[0];
1435 rtx src
= operands
[1];
1437 if (GET_CODE (dst
) == MEM
1438 && GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
1439 return "mov.l %T1,%0\n\tmov.l %1,%0";
1441 if (register_operand (dst
, mode
)
1442 && register_operand (src
, mode
))
1444 if (REGNO (src
) == MACH_REG
)
1445 return "sts mach,%S0\n\tsts macl,%R0";
1447 /* When mov.d r1,r2 do r2->r3 then r1->r2;
1448 when mov.d r1,r0 do r1->r0 then r2->r1. */
1450 if (REGNO (src
) + 1 == REGNO (dst
))
1451 return "mov %T1,%T0\n\tmov %1,%0";
1453 return "mov %1,%0\n\tmov %T1,%T0";
1455 else if (GET_CODE (src
) == CONST_INT
)
1457 if (INTVAL (src
) < 0)
1458 output_asm_insn ("mov #-1,%S0", operands
);
1460 output_asm_insn ("mov #0,%S0", operands
);
1462 return "mov %1,%R0";
1464 else if (GET_CODE (src
) == MEM
)
1467 int dreg
= REGNO (dst
);
1468 rtx inside
= XEXP (src
, 0);
1470 switch (GET_CODE (inside
))
1473 ptrreg
= REGNO (inside
);
1477 ptrreg
= subreg_regno (inside
);
1481 ptrreg
= REGNO (XEXP (inside
, 0));
1482 /* ??? A r0+REG address shouldn't be possible here, because it isn't
1483 an offsettable address. Unfortunately, offsettable addresses use
1484 QImode to check the offset, and a QImode offsettable address
1485 requires r0 for the other operand, which is not currently
1486 supported, so we can't use the 'o' constraint.
1487 Thus we must check for and handle r0+REG addresses here.
1488 We punt for now, since this is likely very rare. */
1489 gcc_assert (GET_CODE (XEXP (inside
, 1)) != REG
);
1493 return "mov.l %1,%0\n\tmov.l %1+4,%T0";
1495 return "mov.l %1,%0\n\tmov.l %1,%T0";
1500 /* Work out the safe way to copy. Copy into the second half first. */
1502 return "mov.l %T1,%T0\n\tmov.l %1,%0";
1505 return "mov.l %1,%0\n\tmov.l %T1,%T0";
1508 /* Print an instruction which would have gone into a delay slot after
1509 another instruction, but couldn't because the other instruction expanded
1510 into a sequence where putting the slot insn at the end wouldn't work. */
1513 print_slot (rtx insn
)
1515 final_scan_insn (XVECEXP (insn
, 0, 1), asm_out_file
, optimize
, 1, NULL
);
1517 INSN_DELETED_P (XVECEXP (insn
, 0, 1)) = 1;
1521 output_far_jump (rtx insn
, rtx op
)
1523 struct { rtx lab
, reg
, op
; } this;
1524 rtx braf_base_lab
= NULL_RTX
;
1527 int offset
= branch_dest (insn
) - INSN_ADDRESSES (INSN_UID (insn
));
1530 this.lab
= gen_label_rtx ();
1534 && offset
- get_attr_length (insn
) <= 32766)
1537 jump
= "mov.w %O0,%1; braf %1";
1545 jump
= "mov.l %O0,%1; braf %1";
1547 jump
= "mov.l r0,@-r15; mova %O0,r0; mov.l @r0,%1; add r0,%1; mov.l @r15+,r0; jmp @%1";
1550 jump
= "mov.l %O0,%1; jmp @%1";
1552 /* If we have a scratch register available, use it. */
1553 if (GET_CODE ((prev
= prev_nonnote_insn (insn
))) == INSN
1554 && INSN_CODE (prev
) == CODE_FOR_indirect_jump_scratch
)
1556 this.reg
= SET_DEST (XVECEXP (PATTERN (prev
), 0, 0));
1557 if (REGNO (this.reg
) == R0_REG
&& flag_pic
&& ! TARGET_SH2
)
1558 jump
= "mov.l r1,@-r15; mova %O0,r0; mov.l @r0,r1; add r1,r0; mov.l @r15+,r1; jmp @%1";
1559 output_asm_insn (jump
, &this.lab
);
1560 if (dbr_sequence_length ())
1561 print_slot (final_sequence
);
1563 output_asm_insn ("nop", 0);
1567 /* Output the delay slot insn first if any. */
1568 if (dbr_sequence_length ())
1569 print_slot (final_sequence
);
1571 this.reg
= gen_rtx_REG (SImode
, 13);
1572 /* We must keep the stack aligned to 8-byte boundaries on SH5.
1573 Fortunately, MACL is fixed and call-clobbered, and we never
1574 need its value across jumps, so save r13 in it instead of in
1577 output_asm_insn ("lds r13, macl", 0);
1579 output_asm_insn ("mov.l r13,@-r15", 0);
1580 output_asm_insn (jump
, &this.lab
);
1582 output_asm_insn ("sts macl, r13", 0);
1584 output_asm_insn ("mov.l @r15+,r13", 0);
1586 if (far
&& flag_pic
&& TARGET_SH2
)
1588 braf_base_lab
= gen_label_rtx ();
1589 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1590 CODE_LABEL_NUMBER (braf_base_lab
));
1593 output_asm_insn (".align 2", 0);
1594 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L", CODE_LABEL_NUMBER (this.lab
));
1596 if (far
&& flag_pic
)
1599 this.lab
= braf_base_lab
;
1600 output_asm_insn (".long %O2-%O0", &this.lab
);
1603 output_asm_insn (far
? ".long %O2" : ".word %O2-%O0", &this.lab
);
1607 /* Local label counter, used for constants in the pool and inside
1608 pattern branches. */
1610 static int lf
= 100;
1612 /* Output code for ordinary branches. */
1615 output_branch (int logic
, rtx insn
, rtx
*operands
)
1617 switch (get_attr_length (insn
))
1620 /* This can happen if filling the delay slot has caused a forward
1621 branch to exceed its range (we could reverse it, but only
1622 when we know we won't overextend other branches; this should
1623 best be handled by relaxation).
1624 It can also happen when other condbranches hoist delay slot insn
1625 from their destination, thus leading to code size increase.
1626 But the branch will still be in the range -4092..+4098 bytes. */
1631 /* The call to print_slot will clobber the operands. */
1632 rtx op0
= operands
[0];
1634 /* If the instruction in the delay slot is annulled (true), then
1635 there is no delay slot where we can put it now. The only safe
1636 place for it is after the label. final will do that by default. */
1639 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence
, 0, 0))
1640 && get_attr_length (XVECEXP (final_sequence
, 0, 1)))
1642 asm_fprintf (asm_out_file
, "\tb%s%ss\t%LLF%d\n", logic
? "f" : "t",
1643 ASSEMBLER_DIALECT
? "/" : ".", label
);
1644 print_slot (final_sequence
);
1647 asm_fprintf (asm_out_file
, "\tb%s\t%LLF%d\n", logic
? "f" : "t", label
);
1649 output_asm_insn ("bra\t%l0", &op0
);
1650 fprintf (asm_out_file
, "\tnop\n");
1651 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LF", label
);
1655 /* When relaxing, handle this like a short branch. The linker
1656 will fix it up if it still doesn't fit after relaxation. */
1658 return logic
? "bt%.\t%l0" : "bf%.\t%l0";
1660 /* These are for SH2e, in which we have to account for the
1661 extra nop because of the hardware bug in annulled branches. */
1667 gcc_assert (!final_sequence
1668 || !(INSN_ANNULLED_BRANCH_P
1669 (XVECEXP (final_sequence
, 0, 0))));
1670 asm_fprintf (asm_out_file
, "b%s%ss\t%LLF%d\n",
1672 ASSEMBLER_DIALECT
? "/" : ".", label
);
1673 fprintf (asm_out_file
, "\tnop\n");
1674 output_asm_insn ("bra\t%l0", operands
);
1675 fprintf (asm_out_file
, "\tnop\n");
1676 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LF", label
);
1680 /* When relaxing, fall through. */
1685 sprintf (buffer
, "b%s%ss\t%%l0",
1687 ASSEMBLER_DIALECT
? "/" : ".");
1688 output_asm_insn (buffer
, &operands
[0]);
1693 /* There should be no longer branches now - that would
1694 indicate that something has destroyed the branches set
1695 up in machine_dependent_reorg. */
1701 output_branchy_insn (enum rtx_code code
, const char *template,
1702 rtx insn
, rtx
*operands
)
1704 rtx next_insn
= NEXT_INSN (insn
);
1706 if (next_insn
&& GET_CODE (next_insn
) == JUMP_INSN
&& condjump_p (next_insn
))
1708 rtx src
= SET_SRC (PATTERN (next_insn
));
1709 if (GET_CODE (src
) == IF_THEN_ELSE
&& GET_CODE (XEXP (src
, 0)) != code
)
1711 /* Following branch not taken */
1712 operands
[9] = gen_label_rtx ();
1713 emit_label_after (operands
[9], next_insn
);
1714 INSN_ADDRESSES_NEW (operands
[9],
1715 INSN_ADDRESSES (INSN_UID (next_insn
))
1716 + get_attr_length (next_insn
));
1721 int offset
= (branch_dest (next_insn
)
1722 - INSN_ADDRESSES (INSN_UID (next_insn
)) + 4);
1723 if (offset
>= -252 && offset
<= 258)
1725 if (GET_CODE (src
) == IF_THEN_ELSE
)
1727 src
= XEXP (src
, 1);
1733 operands
[9] = gen_label_rtx ();
1734 emit_label_after (operands
[9], insn
);
1735 INSN_ADDRESSES_NEW (operands
[9],
1736 INSN_ADDRESSES (INSN_UID (insn
))
1737 + get_attr_length (insn
));
1742 output_ieee_ccmpeq (rtx insn
, rtx
*operands
)
1744 return output_branchy_insn (NE
, "bt\t%l9\n\tfcmp/eq\t%1,%0",
1748 /* Output the start of the assembler file. */
1751 sh_file_start (void)
1753 default_file_start ();
1756 /* Declare the .directive section before it is used. */
1757 fputs ("\t.section .directive, \"SM\", @progbits, 1\n", asm_out_file
);
1758 fputs ("\t.asciz \"#<SYMEDIT>#\\n\"\n", asm_out_file
);
1762 /* We need to show the text section with the proper
1763 attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
1764 emits it without attributes in TEXT_SECTION_ASM_OP, else GAS
1765 will complain. We can teach GAS specifically about the
1766 default attributes for our choice of text section, but
1767 then we would have to change GAS again if/when we change
1768 the text section name. */
1769 fprintf (asm_out_file
, "%s\n", TEXT_SECTION_ASM_OP
);
1771 /* Switch to the data section so that the coffsem symbol
1772 isn't in the text section. */
1775 if (TARGET_LITTLE_ENDIAN
)
1776 fputs ("\t.little\n", asm_out_file
);
1780 if (TARGET_SHCOMPACT
)
1781 fputs ("\t.mode\tSHcompact\n", asm_out_file
);
1782 else if (TARGET_SHMEDIA
)
1783 fprintf (asm_out_file
, "\t.mode\tSHmedia\n\t.abi\t%i\n",
1784 TARGET_SHMEDIA64
? 64 : 32);
1788 /* Check if PAT includes UNSPEC_CALLER unspec pattern. */
1791 unspec_caller_rtx_p (rtx pat
)
1793 switch (GET_CODE (pat
))
1796 return unspec_caller_rtx_p (XEXP (pat
, 0));
1799 if (unspec_caller_rtx_p (XEXP (pat
, 0)))
1801 return unspec_caller_rtx_p (XEXP (pat
, 1));
1803 if (XINT (pat
, 1) == UNSPEC_CALLER
)
1812 /* Indicate that INSN cannot be duplicated. This is true for insn
1813 that generates a unique label. */
1816 sh_cannot_copy_insn_p (rtx insn
)
1820 if (!reload_completed
|| !flag_pic
)
1823 if (GET_CODE (insn
) != INSN
)
1825 if (asm_noperands (insn
) >= 0)
1828 pat
= PATTERN (insn
);
1829 if (GET_CODE (pat
) != SET
)
1831 pat
= SET_SRC (pat
);
1833 if (unspec_caller_rtx_p (pat
))
1839 /* Actual number of instructions used to make a shift by N. */
1840 static const char ashiftrt_insns
[] =
1841 { 0,1,2,3,4,5,8,8,8,8,8,8,8,8,8,8,2,3,4,5,8,8,8,8,8,8,8,8,8,8,8,2};
1843 /* Left shift and logical right shift are the same. */
1844 static const char shift_insns
[] =
1845 { 0,1,1,2,2,3,3,4,1,2,2,3,3,4,3,3,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
1847 /* Individual shift amounts needed to get the above length sequences.
1848 One bit right shifts clobber the T bit, so when possible, put one bit
1849 shifts in the middle of the sequence, so the ends are eligible for
1850 branch delay slots. */
1851 static const short shift_amounts
[32][5] = {
1852 {0}, {1}, {2}, {2, 1},
1853 {2, 2}, {2, 1, 2}, {2, 2, 2}, {2, 2, 1, 2},
1854 {8}, {8, 1}, {8, 2}, {8, 1, 2},
1855 {8, 2, 2}, {8, 2, 1, 2}, {8, -2, 8}, {8, -1, 8},
1856 {16}, {16, 1}, {16, 2}, {16, 1, 2},
1857 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
1858 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
1859 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
1861 /* Likewise, but for shift amounts < 16, up to three highmost bits
1862 might be clobbered. This is typically used when combined with some
1863 kind of sign or zero extension. */
1865 static const char ext_shift_insns
[] =
1866 { 0,1,1,2,2,3,2,2,1,2,2,3,3,3,2,2,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
1868 static const short ext_shift_amounts
[32][4] = {
1869 {0}, {1}, {2}, {2, 1},
1870 {2, 2}, {2, 1, 2}, {8, -2}, {8, -1},
1871 {8}, {8, 1}, {8, 2}, {8, 1, 2},
1872 {8, 2, 2}, {16, -2, -1}, {16, -2}, {16, -1},
1873 {16}, {16, 1}, {16, 2}, {16, 1, 2},
1874 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
1875 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
1876 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
1878 /* Assuming we have a value that has been sign-extended by at least one bit,
1879 can we use the ext_shift_amounts with the last shift turned to an arithmetic shift
1880 to shift it by N without data loss, and quicker than by other means? */
1881 #define EXT_SHIFT_SIGNED(n) (((n) | 8) == 15)
1883 /* This is used in length attributes in sh.md to help compute the length
1884 of arbitrary constant shift instructions. */
1887 shift_insns_rtx (rtx insn
)
1889 rtx set_src
= SET_SRC (XVECEXP (PATTERN (insn
), 0, 0));
1890 int shift_count
= INTVAL (XEXP (set_src
, 1));
1891 enum rtx_code shift_code
= GET_CODE (set_src
);
1896 return ashiftrt_insns
[shift_count
];
1899 return shift_insns
[shift_count
];
1905 /* Return the cost of a shift. */
1915 if (GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
1917 if (GET_MODE (x
) == DImode
1918 && GET_CODE (XEXP (x
, 1)) == CONST_INT
1919 && INTVAL (XEXP (x
, 1)) == 1)
1922 /* Everything else is invalid, because there is no pattern for it. */
1925 /* If shift by a non constant, then this will be expensive. */
1926 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
1927 return SH_DYNAMIC_SHIFT_COST
;
1929 value
= INTVAL (XEXP (x
, 1));
1931 /* Otherwise, return the true cost in instructions. */
1932 if (GET_CODE (x
) == ASHIFTRT
)
1934 int cost
= ashiftrt_insns
[value
];
1935 /* If SH3, then we put the constant in a reg and use shad. */
1936 if (cost
> 1 + SH_DYNAMIC_SHIFT_COST
)
1937 cost
= 1 + SH_DYNAMIC_SHIFT_COST
;
1941 return shift_insns
[value
];
1944 /* Return the cost of an AND operation. */
1951 /* Anding with a register is a single cycle and instruction. */
1952 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
1955 i
= INTVAL (XEXP (x
, 1));
1959 if ((GET_CODE (XEXP (x
, 1)) == CONST_INT
1960 && CONST_OK_FOR_I16 (INTVAL (XEXP (x
, 1))))
1961 || EXTRA_CONSTRAINT_C16 (XEXP (x
, 1)))
1967 /* These constants are single cycle extu.[bw] instructions. */
1968 if (i
== 0xff || i
== 0xffff)
1970 /* Constants that can be used in an and immediate instruction in a single
1971 cycle, but this requires r0, so make it a little more expensive. */
1972 if (CONST_OK_FOR_K08 (i
))
1974 /* Constants that can be loaded with a mov immediate and an and.
1975 This case is probably unnecessary. */
1976 if (CONST_OK_FOR_I08 (i
))
1978 /* Any other constants requires a 2 cycle pc-relative load plus an and.
1979 This case is probably unnecessary. */
1983 /* Return the cost of an addition or a subtraction. */
1988 /* Adding a register is a single cycle insn. */
1989 if (GET_CODE (XEXP (x
, 1)) == REG
1990 || GET_CODE (XEXP (x
, 1)) == SUBREG
)
1993 /* Likewise for small constants. */
1994 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
1995 && CONST_OK_FOR_ADD (INTVAL (XEXP (x
, 1))))
1999 switch (GET_CODE (XEXP (x
, 1)))
2004 return TARGET_SHMEDIA64
? 5 : 3;
2007 if (CONST_OK_FOR_I16 (INTVAL (XEXP (x
, 1))))
2009 else if (CONST_OK_FOR_I16 (INTVAL (XEXP (x
, 1)) >> 16))
2011 else if (CONST_OK_FOR_I16 ((INTVAL (XEXP (x
, 1)) >> 16) >> 16))
2019 /* Any other constant requires a 2 cycle pc-relative load plus an
2024 /* Return the cost of a multiply. */
2026 multcosts (rtx x ATTRIBUTE_UNUSED
)
2028 if (sh_multcost
>= 0)
2031 /* ??? We have a mul insn, but it has a latency of three, and doesn't
2032 accept constants. Ideally, we would use a cost of one or two and
2033 add the cost of the operand, but disregard the latter when inside loops
2034 and loop invariant code motion is still to follow.
2035 Using a multiply first and splitting it later if it's a loss
2036 doesn't work because of different sign / zero extension semantics
2037 of multiplies vs. shifts. */
2038 return TARGET_SMALLCODE
? 2 : 3;
2042 /* We have a mul insn, so we can never take more than the mul and the
2043 read of the mac reg, but count more because of the latency and extra
2045 if (TARGET_SMALLCODE
)
2050 /* If we're aiming at small code, then just count the number of
2051 insns in a multiply call sequence. */
2052 if (TARGET_SMALLCODE
)
2055 /* Otherwise count all the insns in the routine we'd be calling too. */
2059 /* Compute a (partial) cost for rtx X. Return true if the complete
2060 cost has been computed, and false if subexpressions should be
2061 scanned. In either case, *TOTAL contains the cost result. */
2064 sh_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
2071 if (INTVAL (x
) == 0)
2073 else if (outer_code
== AND
&& and_operand ((x
), DImode
))
2075 else if ((outer_code
== IOR
|| outer_code
== XOR
2076 || outer_code
== PLUS
)
2077 && CONST_OK_FOR_I10 (INTVAL (x
)))
2079 else if (CONST_OK_FOR_I16 (INTVAL (x
)))
2080 *total
= COSTS_N_INSNS (outer_code
!= SET
);
2081 else if (CONST_OK_FOR_I16 (INTVAL (x
) >> 16))
2082 *total
= COSTS_N_INSNS ((outer_code
!= SET
) + 1);
2083 else if (CONST_OK_FOR_I16 ((INTVAL (x
) >> 16) >> 16))
2084 *total
= COSTS_N_INSNS (3);
2086 *total
= COSTS_N_INSNS (4);
2089 if (CONST_OK_FOR_I08 (INTVAL (x
)))
2091 else if ((outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
2092 && CONST_OK_FOR_K08 (INTVAL (x
)))
2101 if (TARGET_SHMEDIA64
)
2102 *total
= COSTS_N_INSNS (4);
2103 else if (TARGET_SHMEDIA32
)
2104 *total
= COSTS_N_INSNS (2);
2111 *total
= COSTS_N_INSNS (4);
2116 if (x
== CONST0_RTX (GET_MODE (x
)))
2118 else if (sh_1el_vec (x
, VOIDmode
))
2119 *total
= outer_code
!= SET
;
2120 if (sh_rep_vec (x
, VOIDmode
))
2121 *total
= ((GET_MODE_UNIT_SIZE (GET_MODE (x
)) + 3) / 4
2122 + (outer_code
!= SET
));
2123 *total
= COSTS_N_INSNS (3) + (outer_code
!= SET
);
2128 *total
= COSTS_N_INSNS (addsubcosts (x
));
2132 *total
= COSTS_N_INSNS (andcosts (x
));
2136 *total
= COSTS_N_INSNS (multcosts (x
));
2142 *total
= COSTS_N_INSNS (shiftcosts (x
));
2149 *total
= COSTS_N_INSNS (20);
2153 if (sh_1el_vec (x
, VOIDmode
))
2154 *total
= outer_code
!= SET
;
2155 if (sh_rep_vec (x
, VOIDmode
))
2156 *total
= ((GET_MODE_UNIT_SIZE (GET_MODE (x
)) + 3) / 4
2157 + (outer_code
!= SET
));
2158 *total
= COSTS_N_INSNS (3) + (outer_code
!= SET
);
2171 /* Compute the cost of an address. For the SH, all valid addresses are
2172 the same cost. Use a slightly higher cost for reg + reg addressing,
2173 since it increases pressure on r0. */
2176 sh_address_cost (rtx X
)
2178 return (GET_CODE (X
) == PLUS
2179 && ! CONSTANT_P (XEXP (X
, 1))
2180 && ! TARGET_SHMEDIA
? 1 : 0);
2183 /* Code to expand a shift. */
2186 gen_ashift (int type
, int n
, rtx reg
)
2188 /* Negative values here come from the shift_amounts array. */
2201 emit_insn (gen_ashrsi3_k (reg
, reg
, GEN_INT (n
)));
2205 emit_insn (gen_lshrsi3_m (reg
, reg
, GEN_INT (n
)));
2207 emit_insn (gen_lshrsi3_k (reg
, reg
, GEN_INT (n
)));
2210 emit_insn (gen_ashlsi3_std (reg
, reg
, GEN_INT (n
)));
2215 /* Same for HImode */
2218 gen_ashift_hi (int type
, int n
, rtx reg
)
2220 /* Negative values here come from the shift_amounts array. */
2234 /* We don't have HImode right shift operations because using the
2235 ordinary 32 bit shift instructions for that doesn't generate proper
2236 zero/sign extension.
2237 gen_ashift_hi is only called in contexts where we know that the
2238 sign extension works out correctly. */
2241 if (GET_CODE (reg
) == SUBREG
)
2243 offset
= SUBREG_BYTE (reg
);
2244 reg
= SUBREG_REG (reg
);
2246 gen_ashift (type
, n
, gen_rtx_SUBREG (SImode
, reg
, offset
));
2250 emit_insn (gen_ashlhi3_k (reg
, reg
, GEN_INT (n
)));
2255 /* Output RTL to split a constant shift into its component SH constant
2256 shift instructions. */
2259 gen_shifty_op (int code
, rtx
*operands
)
2261 int value
= INTVAL (operands
[2]);
2264 /* Truncate the shift count in case it is out of bounds. */
2265 value
= value
& 0x1f;
2269 if (code
== LSHIFTRT
)
2271 emit_insn (gen_rotlsi3_1 (operands
[0], operands
[0]));
2272 emit_insn (gen_movt (operands
[0]));
2275 else if (code
== ASHIFT
)
2277 /* There is a two instruction sequence for 31 bit left shifts,
2278 but it requires r0. */
2279 if (GET_CODE (operands
[0]) == REG
&& REGNO (operands
[0]) == 0)
2281 emit_insn (gen_andsi3 (operands
[0], operands
[0], const1_rtx
));
2282 emit_insn (gen_rotlsi3_31 (operands
[0], operands
[0]));
2287 else if (value
== 0)
2289 /* This can happen even when optimizing, if there were subregs before
2290 reload. Don't output a nop here, as this is never optimized away;
2291 use a no-op move instead. */
2292 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[0]));
2296 max
= shift_insns
[value
];
2297 for (i
= 0; i
< max
; i
++)
2298 gen_ashift (code
, shift_amounts
[value
][i
], operands
[0]);
2301 /* Same as above, but optimized for values where the topmost bits don't
2305 gen_shifty_hi_op (int code
, rtx
*operands
)
2307 int value
= INTVAL (operands
[2]);
2309 void (*gen_fun
) (int, int, rtx
);
2311 /* This operation is used by and_shl for SImode values with a few
2312 high bits known to be cleared. */
2316 emit_insn (gen_nop ());
2320 gen_fun
= GET_MODE (operands
[0]) == HImode
? gen_ashift_hi
: gen_ashift
;
2323 max
= ext_shift_insns
[value
];
2324 for (i
= 0; i
< max
; i
++)
2325 gen_fun (code
, ext_shift_amounts
[value
][i
], operands
[0]);
2328 /* When shifting right, emit the shifts in reverse order, so that
2329 solitary negative values come first. */
2330 for (i
= ext_shift_insns
[value
] - 1; i
>= 0; i
--)
2331 gen_fun (code
, ext_shift_amounts
[value
][i
], operands
[0]);
2334 /* Output RTL for an arithmetic right shift. */
2336 /* ??? Rewrite to use super-optimizer sequences. */
2339 expand_ashiftrt (rtx
*operands
)
2347 if (GET_CODE (operands
[2]) != CONST_INT
)
2349 rtx count
= copy_to_mode_reg (SImode
, operands
[2]);
2350 emit_insn (gen_negsi2 (count
, count
));
2351 emit_insn (gen_ashrsi3_d (operands
[0], operands
[1], count
));
2354 else if (ashiftrt_insns
[INTVAL (operands
[2]) & 31]
2355 > 1 + SH_DYNAMIC_SHIFT_COST
)
2358 = force_reg (SImode
, GEN_INT (- (INTVAL (operands
[2]) & 31)));
2359 emit_insn (gen_ashrsi3_d (operands
[0], operands
[1], count
));
2363 if (GET_CODE (operands
[2]) != CONST_INT
)
2366 value
= INTVAL (operands
[2]) & 31;
2370 /* If we are called from abs expansion, arrange things so that we
2371 we can use a single MT instruction that doesn't clobber the source,
2372 if LICM can hoist out the load of the constant zero. */
2373 if (currently_expanding_to_rtl
)
2375 emit_insn (gen_cmpgtsi_t (force_reg (SImode
, CONST0_RTX (SImode
)),
2377 emit_insn (gen_mov_neg_si_t (operands
[0]));
2380 emit_insn (gen_ashrsi2_31 (operands
[0], operands
[1]));
2383 else if (value
>= 16 && value
<= 19)
2385 wrk
= gen_reg_rtx (SImode
);
2386 emit_insn (gen_ashrsi2_16 (wrk
, operands
[1]));
2389 gen_ashift (ASHIFTRT
, 1, wrk
);
2390 emit_move_insn (operands
[0], wrk
);
2393 /* Expand a short sequence inline, longer call a magic routine. */
2394 else if (value
<= 5)
2396 wrk
= gen_reg_rtx (SImode
);
2397 emit_move_insn (wrk
, operands
[1]);
2399 gen_ashift (ASHIFTRT
, 1, wrk
);
2400 emit_move_insn (operands
[0], wrk
);
2404 wrk
= gen_reg_rtx (Pmode
);
2406 /* Load the value into an arg reg and call a helper. */
2407 emit_move_insn (gen_rtx_REG (SImode
, 4), operands
[1]);
2408 sprintf (func
, "__ashiftrt_r4_%d", value
);
2409 function_symbol (wrk
, func
, SFUNC_STATIC
);
2410 emit_insn (gen_ashrsi3_n (GEN_INT (value
), wrk
));
2411 emit_move_insn (operands
[0], gen_rtx_REG (SImode
, 4));
2416 sh_dynamicalize_shift_p (rtx count
)
2418 return shift_insns
[INTVAL (count
)] > 1 + SH_DYNAMIC_SHIFT_COST
;
2421 /* Try to find a good way to implement the combiner pattern
2422 [(set (match_operand:SI 0 "register_operand" "r")
2423 (and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
2424 (match_operand:SI 2 "const_int_operand" "n"))
2425 (match_operand:SI 3 "const_int_operand" "n"))) .
2426 LEFT_RTX is operand 2 in the above pattern, and MASK_RTX is operand 3.
2427 return 0 for simple right / left or left/right shift combination.
2428 return 1 for a combination of shifts with zero_extend.
2429 return 2 for a combination of shifts with an AND that needs r0.
2430 return 3 for a combination of shifts with an AND that needs an extra
2431 scratch register, when the three highmost bits of the AND mask are clear.
2432 return 4 for a combination of shifts with an AND that needs an extra
2433 scratch register, when any of the three highmost bits of the AND mask
2435 If ATTRP is set, store an initial right shift width in ATTRP[0],
2436 and the instruction length in ATTRP[1] . These values are not valid
2438 When ATTRP is set and returning 1, ATTRP[2] gets set to the index into
2439 shift_amounts for the last shift value that is to be used before the
2442 shl_and_kind (rtx left_rtx
, rtx mask_rtx
, int *attrp
)
2444 unsigned HOST_WIDE_INT mask
, lsb
, mask2
, lsb2
;
2445 int left
= INTVAL (left_rtx
), right
;
2447 int cost
, best_cost
= 10000;
2448 int best_right
= 0, best_len
= 0;
2452 if (left
< 0 || left
> 31)
2454 if (GET_CODE (mask_rtx
) == CONST_INT
)
2455 mask
= (unsigned HOST_WIDE_INT
) INTVAL (mask_rtx
) >> left
;
2457 mask
= (unsigned HOST_WIDE_INT
) GET_MODE_MASK (SImode
) >> left
;
2458 /* Can this be expressed as a right shift / left shift pair? */
2459 lsb
= ((mask
^ (mask
- 1)) >> 1) + 1;
2460 right
= exact_log2 (lsb
);
2461 mask2
= ~(mask
+ lsb
- 1);
2462 lsb2
= ((mask2
^ (mask2
- 1)) >> 1) + 1;
2463 /* mask has no zeroes but trailing zeroes <==> ! mask2 */
2465 best_cost
= shift_insns
[right
] + shift_insns
[right
+ left
];
2466 /* mask has no trailing zeroes <==> ! right */
2467 else if (! right
&& mask2
== ~(lsb2
- 1))
2469 int late_right
= exact_log2 (lsb2
);
2470 best_cost
= shift_insns
[left
+ late_right
] + shift_insns
[late_right
];
2472 /* Try to use zero extend. */
2473 if (mask2
== ~(lsb2
- 1))
2477 for (width
= 8; width
<= 16; width
+= 8)
2479 /* Can we zero-extend right away? */
2480 if (lsb2
== (unsigned HOST_WIDE_INT
) 1 << width
)
2483 = 1 + ext_shift_insns
[right
] + ext_shift_insns
[left
+ right
];
2484 if (cost
< best_cost
)
2495 /* ??? Could try to put zero extend into initial right shift,
2496 or even shift a bit left before the right shift. */
2497 /* Determine value of first part of left shift, to get to the
2498 zero extend cut-off point. */
2499 first
= width
- exact_log2 (lsb2
) + right
;
2500 if (first
>= 0 && right
+ left
- first
>= 0)
2502 cost
= ext_shift_insns
[right
] + ext_shift_insns
[first
] + 1
2503 + ext_shift_insns
[right
+ left
- first
];
2504 if (cost
< best_cost
)
2516 /* Try to use r0 AND pattern */
2517 for (i
= 0; i
<= 2; i
++)
2521 if (! CONST_OK_FOR_K08 (mask
>> i
))
2523 cost
= (i
!= 0) + 2 + ext_shift_insns
[left
+ i
];
2524 if (cost
< best_cost
)
2529 best_len
= cost
- 1;
2532 /* Try to use a scratch register to hold the AND operand. */
2533 can_ext
= ((mask
<< left
) & ((unsigned HOST_WIDE_INT
) 3 << 30)) == 0;
2534 for (i
= 0; i
<= 2; i
++)
2538 cost
= (i
!= 0) + (CONST_OK_FOR_I08 (mask
>> i
) ? 2 : 3)
2539 + (can_ext
? ext_shift_insns
: shift_insns
)[left
+ i
];
2540 if (cost
< best_cost
)
2545 best_len
= cost
- 1 - ! CONST_OK_FOR_I08 (mask
>> i
);
2551 attrp
[0] = best_right
;
2552 attrp
[1] = best_len
;
2557 /* This is used in length attributes of the unnamed instructions
2558 corresponding to shl_and_kind return values of 1 and 2. */
2560 shl_and_length (rtx insn
)
2562 rtx set_src
, left_rtx
, mask_rtx
;
2565 set_src
= SET_SRC (XVECEXP (PATTERN (insn
), 0, 0));
2566 left_rtx
= XEXP (XEXP (set_src
, 0), 1);
2567 mask_rtx
= XEXP (set_src
, 1);
2568 shl_and_kind (left_rtx
, mask_rtx
, attributes
);
2569 return attributes
[1];
2572 /* This is used in length attribute of the and_shl_scratch instruction. */
2575 shl_and_scr_length (rtx insn
)
2577 rtx set_src
= SET_SRC (XVECEXP (PATTERN (insn
), 0, 0));
2578 int len
= shift_insns
[INTVAL (XEXP (set_src
, 1))];
2579 rtx op
= XEXP (set_src
, 0);
2580 len
+= shift_insns
[INTVAL (XEXP (op
, 1))] + 1;
2581 op
= XEXP (XEXP (op
, 0), 0);
2582 return len
+ shift_insns
[INTVAL (XEXP (op
, 1))];
2585 /* Generate rtl for instructions for which shl_and_kind advised a particular
2586 method of generating them, i.e. returned zero. */
2589 gen_shl_and (rtx dest
, rtx left_rtx
, rtx mask_rtx
, rtx source
)
2592 unsigned HOST_WIDE_INT mask
;
2593 int kind
= shl_and_kind (left_rtx
, mask_rtx
, attributes
);
2594 int right
, total_shift
;
2595 void (*shift_gen_fun
) (int, rtx
*) = gen_shifty_hi_op
;
2597 right
= attributes
[0];
2598 total_shift
= INTVAL (left_rtx
) + right
;
2599 mask
= (unsigned HOST_WIDE_INT
) INTVAL (mask_rtx
) >> total_shift
;
2606 int first
= attributes
[2];
2611 emit_insn ((mask
<< right
) <= 0xff
2612 ? gen_zero_extendqisi2 (dest
,
2613 gen_lowpart (QImode
, source
))
2614 : gen_zero_extendhisi2 (dest
,
2615 gen_lowpart (HImode
, source
)));
2619 emit_insn (gen_movsi (dest
, source
));
2623 operands
[2] = GEN_INT (right
);
2624 gen_shifty_hi_op (LSHIFTRT
, operands
);
2628 operands
[2] = GEN_INT (first
);
2629 gen_shifty_hi_op (ASHIFT
, operands
);
2630 total_shift
-= first
;
2634 emit_insn (mask
<= 0xff
2635 ? gen_zero_extendqisi2 (dest
, gen_lowpart (QImode
, dest
))
2636 : gen_zero_extendhisi2 (dest
, gen_lowpart (HImode
, dest
)));
2637 if (total_shift
> 0)
2639 operands
[2] = GEN_INT (total_shift
);
2640 gen_shifty_hi_op (ASHIFT
, operands
);
2645 shift_gen_fun
= gen_shifty_op
;
2647 /* If the topmost bit that matters is set, set the topmost bits
2648 that don't matter. This way, we might be able to get a shorter
2650 if (mask
& ((HOST_WIDE_INT
) 1 << (31 - total_shift
)))
2651 mask
|= (HOST_WIDE_INT
) ~0 << (31 - total_shift
);
2653 /* Don't expand fine-grained when combining, because that will
2654 make the pattern fail. */
2655 if (currently_expanding_to_rtl
2656 || reload_in_progress
|| reload_completed
)
2660 /* Cases 3 and 4 should be handled by this split
2661 only while combining */
2662 gcc_assert (kind
<= 2);
2665 emit_insn (gen_lshrsi3 (dest
, source
, GEN_INT (right
)));
2668 emit_insn (gen_andsi3 (dest
, source
, GEN_INT (mask
)));
2673 operands
[2] = GEN_INT (total_shift
);
2674 shift_gen_fun (ASHIFT
, operands
);
2681 if (kind
!= 4 && total_shift
< 16)
2683 neg
= -ext_shift_amounts
[total_shift
][1];
2685 neg
-= ext_shift_amounts
[total_shift
][2];
2689 emit_insn (gen_and_shl_scratch (dest
, source
,
2692 GEN_INT (total_shift
+ neg
),
2694 emit_insn (gen_movsi (dest
, dest
));
2701 /* Try to find a good way to implement the combiner pattern
2702 [(set (match_operand:SI 0 "register_operand" "=r")
2703 (sign_extract:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
2704 (match_operand:SI 2 "const_int_operand" "n")
2705 (match_operand:SI 3 "const_int_operand" "n")
2707 (clobber (reg:SI T_REG))]
2708 LEFT_RTX is operand 2 in the above pattern, and SIZE_RTX is operand 3.
2709 return 0 for simple left / right shift combination.
2710 return 1 for left shift / 8 bit sign extend / left shift.
2711 return 2 for left shift / 16 bit sign extend / left shift.
2712 return 3 for left shift / 8 bit sign extend / shift / sign extend.
2713 return 4 for left shift / 16 bit sign extend / shift / sign extend.
2714 return 5 for left shift / 16 bit sign extend / right shift
2715 return 6 for < 8 bit sign extend / left shift.
2716 return 7 for < 8 bit sign extend / left shift / single right shift.
2717 If COSTP is nonzero, assign the calculated cost to *COSTP. */
2720 shl_sext_kind (rtx left_rtx
, rtx size_rtx
, int *costp
)
2722 int left
, size
, insize
, ext
;
2723 int cost
= 0, best_cost
;
2726 left
= INTVAL (left_rtx
);
2727 size
= INTVAL (size_rtx
);
2728 insize
= size
- left
;
2729 gcc_assert (insize
> 0);
2730 /* Default to left / right shift. */
2732 best_cost
= shift_insns
[32 - insize
] + ashiftrt_insns
[32 - size
];
2735 /* 16 bit shift / sign extend / 16 bit shift */
2736 cost
= shift_insns
[16 - insize
] + 1 + ashiftrt_insns
[16 - size
];
2737 /* If ashiftrt_insns[16 - size] is 8, this choice will be overridden
2738 below, by alternative 3 or something even better. */
2739 if (cost
< best_cost
)
2745 /* Try a plain sign extend between two shifts. */
2746 for (ext
= 16; ext
>= insize
; ext
-= 8)
2750 cost
= ext_shift_insns
[ext
- insize
] + 1 + shift_insns
[size
- ext
];
2751 if (cost
< best_cost
)
2753 kind
= ext
/ (unsigned) 8;
2757 /* Check if we can do a sloppy shift with a final signed shift
2758 restoring the sign. */
2759 if (EXT_SHIFT_SIGNED (size
- ext
))
2760 cost
= ext_shift_insns
[ext
- insize
] + ext_shift_insns
[size
- ext
] + 1;
2761 /* If not, maybe it's still cheaper to do the second shift sloppy,
2762 and do a final sign extend? */
2763 else if (size
<= 16)
2764 cost
= ext_shift_insns
[ext
- insize
] + 1
2765 + ext_shift_insns
[size
> ext
? size
- ext
: ext
- size
] + 1;
2768 if (cost
< best_cost
)
2770 kind
= ext
/ (unsigned) 8 + 2;
2774 /* Check if we can sign extend in r0 */
2777 cost
= 3 + shift_insns
[left
];
2778 if (cost
< best_cost
)
2783 /* Try the same with a final signed shift. */
2786 cost
= 3 + ext_shift_insns
[left
+ 1] + 1;
2787 if (cost
< best_cost
)
2796 /* Try to use a dynamic shift. */
2797 cost
= shift_insns
[32 - insize
] + 1 + SH_DYNAMIC_SHIFT_COST
;
2798 if (cost
< best_cost
)
2809 /* Function to be used in the length attribute of the instructions
2810 implementing this pattern. */
2813 shl_sext_length (rtx insn
)
2815 rtx set_src
, left_rtx
, size_rtx
;
2818 set_src
= SET_SRC (XVECEXP (PATTERN (insn
), 0, 0));
2819 left_rtx
= XEXP (XEXP (set_src
, 0), 1);
2820 size_rtx
= XEXP (set_src
, 1);
2821 shl_sext_kind (left_rtx
, size_rtx
, &cost
);
2825 /* Generate rtl for this pattern */
2828 gen_shl_sext (rtx dest
, rtx left_rtx
, rtx size_rtx
, rtx source
)
2831 int left
, size
, insize
, cost
;
2834 kind
= shl_sext_kind (left_rtx
, size_rtx
, &cost
);
2835 left
= INTVAL (left_rtx
);
2836 size
= INTVAL (size_rtx
);
2837 insize
= size
- left
;
2845 int ext
= kind
& 1 ? 8 : 16;
2846 int shift2
= size
- ext
;
2848 /* Don't expand fine-grained when combining, because that will
2849 make the pattern fail. */
2850 if (! currently_expanding_to_rtl
2851 && ! reload_in_progress
&& ! reload_completed
)
2853 emit_insn (gen_shl_sext_ext (dest
, source
, left_rtx
, size_rtx
));
2854 emit_insn (gen_movsi (dest
, source
));
2858 emit_insn (gen_movsi (dest
, source
));
2862 operands
[2] = GEN_INT (ext
- insize
);
2863 gen_shifty_hi_op (ASHIFT
, operands
);
2866 ? gen_extendqisi2 (dest
, gen_lowpart (QImode
, dest
))
2867 : gen_extendhisi2 (dest
, gen_lowpart (HImode
, dest
)));
2872 operands
[2] = GEN_INT (shift2
);
2873 gen_shifty_op (ASHIFT
, operands
);
2880 if (EXT_SHIFT_SIGNED (shift2
))
2882 operands
[2] = GEN_INT (shift2
+ 1);
2883 gen_shifty_op (ASHIFT
, operands
);
2884 operands
[2] = const1_rtx
;
2885 gen_shifty_op (ASHIFTRT
, operands
);
2888 operands
[2] = GEN_INT (shift2
);
2889 gen_shifty_hi_op (ASHIFT
, operands
);
2893 operands
[2] = GEN_INT (-shift2
);
2894 gen_shifty_hi_op (LSHIFTRT
, operands
);
2896 emit_insn (size
<= 8
2897 ? gen_extendqisi2 (dest
, gen_lowpart (QImode
, dest
))
2898 : gen_extendhisi2 (dest
, gen_lowpart (HImode
, dest
)));
2905 if (! currently_expanding_to_rtl
2906 && ! reload_in_progress
&& ! reload_completed
)
2907 emit_insn (gen_shl_sext_ext (dest
, source
, left_rtx
, size_rtx
));
2911 operands
[2] = GEN_INT (16 - insize
);
2912 gen_shifty_hi_op (ASHIFT
, operands
);
2913 emit_insn (gen_extendhisi2 (dest
, gen_lowpart (HImode
, dest
)));
2915 /* Don't use gen_ashrsi3 because it generates new pseudos. */
2917 gen_ashift (ASHIFTRT
, 1, dest
);
2922 /* Don't expand fine-grained when combining, because that will
2923 make the pattern fail. */
2924 if (! currently_expanding_to_rtl
2925 && ! reload_in_progress
&& ! reload_completed
)
2927 emit_insn (gen_shl_sext_ext (dest
, source
, left_rtx
, size_rtx
));
2928 emit_insn (gen_movsi (dest
, source
));
2931 emit_insn (gen_andsi3 (dest
, source
, GEN_INT ((1 << insize
) - 1)));
2932 emit_insn (gen_xorsi3 (dest
, dest
, GEN_INT (1 << (insize
- 1))));
2933 emit_insn (gen_addsi3 (dest
, dest
, GEN_INT (-1 << (insize
- 1))));
2935 operands
[2] = kind
== 7 ? GEN_INT (left
+ 1) : left_rtx
;
2936 gen_shifty_op (ASHIFT
, operands
);
2938 emit_insn (gen_ashrsi3_k (dest
, dest
, const1_rtx
));
2946 /* Prefix a symbol_ref name with "datalabel". */
2949 gen_datalabel_ref (rtx sym
)
2953 if (GET_CODE (sym
) == LABEL_REF
)
2954 return gen_rtx_CONST (GET_MODE (sym
),
2955 gen_rtx_UNSPEC (GET_MODE (sym
),
2959 gcc_assert (GET_CODE (sym
) == SYMBOL_REF
);
2961 str
= XSTR (sym
, 0);
2962 /* Share all SYMBOL_REF strings with the same value - that is important
2964 str
= IDENTIFIER_POINTER (get_identifier (str
));
2965 XSTR (sym
, 0) = str
;
2971 /* The SH cannot load a large constant into a register, constants have to
2972 come from a pc relative load. The reference of a pc relative load
2973 instruction must be less than 1k in front of the instruction. This
2974 means that we often have to dump a constant inside a function, and
2975 generate code to branch around it.
2977 It is important to minimize this, since the branches will slow things
2978 down and make things bigger.
2980 Worst case code looks like:
2998 We fix this by performing a scan before scheduling, which notices which
2999 instructions need to have their operands fetched from the constant table
3000 and builds the table.
3004 scan, find an instruction which needs a pcrel move. Look forward, find the
3005 last barrier which is within MAX_COUNT bytes of the requirement.
3006 If there isn't one, make one. Process all the instructions between
3007 the find and the barrier.
3009 In the above example, we can tell that L3 is within 1k of L1, so
3010 the first move can be shrunk from the 3 insn+constant sequence into
3011 just 1 insn, and the constant moved to L3 to make:
3022 Then the second move becomes the target for the shortening process. */
3026 rtx value
; /* Value in table. */
3027 rtx label
; /* Label of value. */
3028 rtx wend
; /* End of window. */
3029 enum machine_mode mode
; /* Mode of value. */
3031 /* True if this constant is accessed as part of a post-increment
3032 sequence. Note that HImode constants are never accessed in this way. */
3033 bool part_of_sequence_p
;
3036 /* The maximum number of constants that can fit into one pool, since
3037 constants in the range 0..510 are at least 2 bytes long, and in the
3038 range from there to 1018 at least 4 bytes. */
3040 #define MAX_POOL_SIZE 372
3041 static pool_node pool_vector
[MAX_POOL_SIZE
];
3042 static int pool_size
;
3043 static rtx pool_window_label
;
3044 static int pool_window_last
;
3046 /* ??? If we need a constant in HImode which is the truncated value of a
3047 constant we need in SImode, we could combine the two entries thus saving
3048 two bytes. Is this common enough to be worth the effort of implementing
3051 /* ??? This stuff should be done at the same time that we shorten branches.
3052 As it is now, we must assume that all branches are the maximum size, and
3053 this causes us to almost always output constant pools sooner than
3056 /* Add a constant to the pool and return its label. */
3059 add_constant (rtx x
, enum machine_mode mode
, rtx last_value
)
3062 rtx lab
, new, ref
, newref
;
3064 /* First see if we've already got it. */
3065 for (i
= 0; i
< pool_size
; i
++)
3067 if (x
->code
== pool_vector
[i
].value
->code
3068 && mode
== pool_vector
[i
].mode
)
3070 if (x
->code
== CODE_LABEL
)
3072 if (XINT (x
, 3) != XINT (pool_vector
[i
].value
, 3))
3075 if (rtx_equal_p (x
, pool_vector
[i
].value
))
3080 || ! rtx_equal_p (last_value
, pool_vector
[i
-1].value
))
3082 new = gen_label_rtx ();
3083 LABEL_REFS (new) = pool_vector
[i
].label
;
3084 pool_vector
[i
].label
= lab
= new;
3086 if (lab
&& pool_window_label
)
3088 newref
= gen_rtx_LABEL_REF (VOIDmode
, pool_window_label
);
3089 ref
= pool_vector
[pool_window_last
].wend
;
3090 LABEL_NEXTREF (newref
) = ref
;
3091 pool_vector
[pool_window_last
].wend
= newref
;
3094 pool_window_label
= new;
3095 pool_window_last
= i
;
3101 /* Need a new one. */
3102 pool_vector
[pool_size
].value
= x
;
3103 if (last_value
&& rtx_equal_p (last_value
, pool_vector
[pool_size
- 1].value
))
3106 pool_vector
[pool_size
- 1].part_of_sequence_p
= true;
3109 lab
= gen_label_rtx ();
3110 pool_vector
[pool_size
].mode
= mode
;
3111 pool_vector
[pool_size
].label
= lab
;
3112 pool_vector
[pool_size
].wend
= NULL_RTX
;
3113 pool_vector
[pool_size
].part_of_sequence_p
= (lab
== 0);
3114 if (lab
&& pool_window_label
)
3116 newref
= gen_rtx_LABEL_REF (VOIDmode
, pool_window_label
);
3117 ref
= pool_vector
[pool_window_last
].wend
;
3118 LABEL_NEXTREF (newref
) = ref
;
3119 pool_vector
[pool_window_last
].wend
= newref
;
3122 pool_window_label
= lab
;
3123 pool_window_last
= pool_size
;
3128 /* Output the literal table. START, if nonzero, is the first instruction
3129 this table is needed for, and also indicates that there is at least one
3130 casesi_worker_2 instruction; We have to emit the operand3 labels from
3131 these insns at a 4-byte aligned position. BARRIER is the barrier
3132 after which we are to place the table. */
3135 dump_table (rtx start
, rtx barrier
)
3143 /* Do two passes, first time dump out the HI sized constants. */
3145 for (i
= 0; i
< pool_size
; i
++)
3147 pool_node
*p
= &pool_vector
[i
];
3149 if (p
->mode
== HImode
)
3153 scan
= emit_insn_after (gen_align_2 (), scan
);
3156 for (lab
= p
->label
; lab
; lab
= LABEL_REFS (lab
))
3157 scan
= emit_label_after (lab
, scan
);
3158 scan
= emit_insn_after (gen_consttable_2 (p
->value
, const0_rtx
),
3160 for (ref
= p
->wend
; ref
; ref
= LABEL_NEXTREF (ref
))
3162 lab
= XEXP (ref
, 0);
3163 scan
= emit_insn_after (gen_consttable_window_end (lab
), scan
);
3166 else if (p
->mode
== DFmode
)
3174 scan
= emit_insn_after (gen_align_4 (), scan
);
3176 for (; start
!= barrier
; start
= NEXT_INSN (start
))
3177 if (GET_CODE (start
) == INSN
3178 && recog_memoized (start
) == CODE_FOR_casesi_worker_2
)
3180 rtx src
= SET_SRC (XVECEXP (PATTERN (start
), 0, 0));
3181 rtx lab
= XEXP (XVECEXP (src
, 0, 3), 0);
3183 scan
= emit_label_after (lab
, scan
);
3186 if (TARGET_FMOVD
&& TARGET_ALIGN_DOUBLE
&& have_df
)
3188 rtx align_insn
= NULL_RTX
;
3190 scan
= emit_label_after (gen_label_rtx (), scan
);
3191 scan
= emit_insn_after (gen_align_log (GEN_INT (3)), scan
);
3194 for (i
= 0; i
< pool_size
; i
++)
3196 pool_node
*p
= &pool_vector
[i
];
3204 if (align_insn
&& !p
->part_of_sequence_p
)
3206 for (lab
= p
->label
; lab
; lab
= LABEL_REFS (lab
))
3207 emit_label_before (lab
, align_insn
);
3208 emit_insn_before (gen_consttable_4 (p
->value
, const0_rtx
),
3210 for (ref
= p
->wend
; ref
; ref
= LABEL_NEXTREF (ref
))
3212 lab
= XEXP (ref
, 0);
3213 emit_insn_before (gen_consttable_window_end (lab
),
3216 delete_insn (align_insn
);
3217 align_insn
= NULL_RTX
;
3222 for (lab
= p
->label
; lab
; lab
= LABEL_REFS (lab
))
3223 scan
= emit_label_after (lab
, scan
);
3224 scan
= emit_insn_after (gen_consttable_4 (p
->value
,
3226 need_align
= ! need_align
;
3232 scan
= emit_insn_after (gen_align_log (GEN_INT (3)), scan
);
3237 for (lab
= p
->label
; lab
; lab
= LABEL_REFS (lab
))
3238 scan
= emit_label_after (lab
, scan
);
3239 scan
= emit_insn_after (gen_consttable_8 (p
->value
, const0_rtx
),
3246 if (p
->mode
!= HImode
)
3248 for (ref
= p
->wend
; ref
; ref
= LABEL_NEXTREF (ref
))
3250 lab
= XEXP (ref
, 0);
3251 scan
= emit_insn_after (gen_consttable_window_end (lab
),
3260 for (i
= 0; i
< pool_size
; i
++)
3262 pool_node
*p
= &pool_vector
[i
];
3273 scan
= emit_label_after (gen_label_rtx (), scan
);
3274 scan
= emit_insn_after (gen_align_4 (), scan
);
3276 for (lab
= p
->label
; lab
; lab
= LABEL_REFS (lab
))
3277 scan
= emit_label_after (lab
, scan
);
3278 scan
= emit_insn_after (gen_consttable_4 (p
->value
, const0_rtx
),
3286 scan
= emit_label_after (gen_label_rtx (), scan
);
3287 scan
= emit_insn_after (gen_align_4 (), scan
);
3289 for (lab
= p
->label
; lab
; lab
= LABEL_REFS (lab
))
3290 scan
= emit_label_after (lab
, scan
);
3291 scan
= emit_insn_after (gen_consttable_8 (p
->value
, const0_rtx
),
3298 if (p
->mode
!= HImode
)
3300 for (ref
= p
->wend
; ref
; ref
= LABEL_NEXTREF (ref
))
3302 lab
= XEXP (ref
, 0);
3303 scan
= emit_insn_after (gen_consttable_window_end (lab
), scan
);
3308 scan
= emit_insn_after (gen_consttable_end (), scan
);
3309 scan
= emit_barrier_after (scan
);
3311 pool_window_label
= NULL_RTX
;
3312 pool_window_last
= 0;
3315 /* Return nonzero if constant would be an ok source for a
3316 mov.w instead of a mov.l. */
3321 return (GET_CODE (src
) == CONST_INT
3322 && INTVAL (src
) >= -32768
3323 && INTVAL (src
) <= 32767);
3326 /* Nonzero if the insn is a move instruction which needs to be fixed. */
3328 /* ??? For a DImode/DFmode moves, we don't need to fix it if each half of the
3329 CONST_DOUBLE input value is CONST_OK_FOR_I08. For a SFmode move, we don't
3330 need to fix it if the input value is CONST_OK_FOR_I08. */
3333 broken_move (rtx insn
)
3335 if (GET_CODE (insn
) == INSN
)
3337 rtx pat
= PATTERN (insn
);
3338 if (GET_CODE (pat
) == PARALLEL
)
3339 pat
= XVECEXP (pat
, 0, 0);
3340 if (GET_CODE (pat
) == SET
3341 /* We can load any 8 bit value if we don't care what the high
3342 order bits end up as. */
3343 && GET_MODE (SET_DEST (pat
)) != QImode
3344 && (CONSTANT_P (SET_SRC (pat
))
3345 /* Match mova_const. */
3346 || (GET_CODE (SET_SRC (pat
)) == UNSPEC
3347 && XINT (SET_SRC (pat
), 1) == UNSPEC_MOVA
3348 && GET_CODE (XVECEXP (SET_SRC (pat
), 0, 0)) == CONST
))
3350 && GET_CODE (SET_SRC (pat
)) == CONST_DOUBLE
3351 && (fp_zero_operand (SET_SRC (pat
))
3352 || fp_one_operand (SET_SRC (pat
)))
3353 /* ??? If this is a -m4 or -m4-single compilation, in general
3354 we don't know the current setting of fpscr, so disable fldi.
3355 There is an exception if this was a register-register move
3356 before reload - and hence it was ascertained that we have
3357 single precision setting - and in a post-reload optimization
3358 we changed this to do a constant load. In that case
3359 we don't have an r0 clobber, hence we must use fldi. */
3360 && (! TARGET_SH4
|| TARGET_FMOVD
3361 || (GET_CODE (XEXP (XVECEXP (PATTERN (insn
), 0, 2), 0))
3363 && GET_CODE (SET_DEST (pat
)) == REG
3364 && FP_REGISTER_P (REGNO (SET_DEST (pat
))))
3366 && GET_MODE (SET_DEST (pat
)) == SImode
3367 && GET_CODE (SET_SRC (pat
)) == CONST_INT
3368 && CONST_OK_FOR_I20 (INTVAL (SET_SRC (pat
))))
3369 && (GET_CODE (SET_SRC (pat
)) != CONST_INT
3370 || ! CONST_OK_FOR_I08 (INTVAL (SET_SRC (pat
)))))
3380 return (GET_CODE (insn
) == INSN
3381 && GET_CODE (PATTERN (insn
)) == SET
3382 && GET_CODE (SET_SRC (PATTERN (insn
))) == UNSPEC
3383 && XINT (SET_SRC (PATTERN (insn
)), 1) == UNSPEC_MOVA
3384 /* Don't match mova_const. */
3385 && GET_CODE (XVECEXP (SET_SRC (PATTERN (insn
)), 0, 0)) == LABEL_REF
);
3388 /* Fix up a mova from a switch that went out of range. */
3390 fixup_mova (rtx mova
)
3394 SET_SRC (PATTERN (mova
)) = XVECEXP (SET_SRC (PATTERN (mova
)), 0, 0);
3395 INSN_CODE (mova
) = -1;
3400 rtx lab
= gen_label_rtx ();
3401 rtx wpat
, wpat0
, wpat1
, wsrc
, diff
;
3405 worker
= NEXT_INSN (worker
);
3407 && GET_CODE (worker
) != CODE_LABEL
3408 && GET_CODE (worker
) != JUMP_INSN
);
3409 } while (recog_memoized (worker
) != CODE_FOR_casesi_worker_1
);
3410 wpat
= PATTERN (worker
);
3411 wpat0
= XVECEXP (wpat
, 0, 0);
3412 wpat1
= XVECEXP (wpat
, 0, 1);
3413 wsrc
= SET_SRC (wpat0
);
3414 PATTERN (worker
) = (gen_casesi_worker_2
3415 (SET_DEST (wpat0
), XVECEXP (wsrc
, 0, 1),
3416 XEXP (XVECEXP (wsrc
, 0, 2), 0), lab
,
3418 INSN_CODE (worker
) = -1;
3419 diff
= gen_rtx_MINUS (Pmode
, XVECEXP (SET_SRC (PATTERN (mova
)), 0, 0),
3420 gen_rtx_LABEL_REF (Pmode
, lab
));
3421 diff
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, diff
), UNSPEC_PIC
);
3422 SET_SRC (PATTERN (mova
)) = gen_rtx_CONST (Pmode
, diff
);
3423 INSN_CODE (mova
) = -1;
3427 /* Find the last barrier from insn FROM which is close enough to hold the
3428 constant pool. If we can't find one, then create one near the end of
3432 find_barrier (int num_mova
, rtx mova
, rtx from
)
3441 int leading_mova
= num_mova
;
3442 rtx barrier_before_mova
= 0, found_barrier
= 0, good_barrier
= 0;
3446 /* For HImode: range is 510, add 4 because pc counts from address of
3447 second instruction after this one, subtract 2 for the jump instruction
3448 that we may need to emit before the table, subtract 2 for the instruction
3449 that fills the jump delay slot (in very rare cases, reorg will take an
3450 instruction from after the constant pool or will leave the delay slot
3451 empty). This gives 510.
3452 For SImode: range is 1020, add 4 because pc counts from address of
3453 second instruction after this one, subtract 2 in case pc is 2 byte
3454 aligned, subtract 2 for the jump instruction that we may need to emit
3455 before the table, subtract 2 for the instruction that fills the jump
3456 delay slot. This gives 1018. */
3458 /* The branch will always be shortened now that the reference address for
3459 forward branches is the successor address, thus we need no longer make
3460 adjustments to the [sh]i_limit for -O0. */
3465 while (from
&& count_si
< si_limit
&& count_hi
< hi_limit
)
3467 int inc
= get_attr_length (from
);
3470 if (GET_CODE (from
) == CODE_LABEL
)
3473 new_align
= 1 << label_to_alignment (from
);
3474 else if (GET_CODE (prev_nonnote_insn (from
)) == BARRIER
)
3475 new_align
= 1 << barrier_align (from
);
3481 if (GET_CODE (from
) == BARRIER
)
3484 found_barrier
= from
;
3486 /* If we are at the end of the function, or in front of an alignment
3487 instruction, we need not insert an extra alignment. We prefer
3488 this kind of barrier. */
3489 if (barrier_align (from
) > 2)
3490 good_barrier
= from
;
3493 if (broken_move (from
))
3496 enum machine_mode mode
;
3498 pat
= PATTERN (from
);
3499 if (GET_CODE (pat
) == PARALLEL
)
3500 pat
= XVECEXP (pat
, 0, 0);
3501 src
= SET_SRC (pat
);
3502 dst
= SET_DEST (pat
);
3503 mode
= GET_MODE (dst
);
3505 /* We must explicitly check the mode, because sometimes the
3506 front end will generate code to load unsigned constants into
3507 HImode targets without properly sign extending them. */
3509 || (mode
== SImode
&& hi_const (src
) && REGNO (dst
) != FPUL_REG
))
3512 /* We put the short constants before the long constants, so
3513 we must count the length of short constants in the range
3514 for the long constants. */
3515 /* ??? This isn't optimal, but is easy to do. */
3520 /* We dump DF/DI constants before SF/SI ones, because
3521 the limit is the same, but the alignment requirements
3522 are higher. We may waste up to 4 additional bytes
3523 for alignment, and the DF/DI constant may have
3524 another SF/SI constant placed before it. */
3525 if (TARGET_SHCOMPACT
3527 && (mode
== DFmode
|| mode
== DImode
))
3532 while (si_align
> 2 && found_si
+ si_align
- 2 > count_si
)
3534 if (found_si
> count_si
)
3535 count_si
= found_si
;
3536 found_si
+= GET_MODE_SIZE (mode
);
3538 si_limit
-= GET_MODE_SIZE (mode
);
3548 barrier_before_mova
= good_barrier
? good_barrier
: found_barrier
;
3550 if (found_si
> count_si
)
3551 count_si
= found_si
;
3553 else if (GET_CODE (from
) == JUMP_INSN
3554 && (GET_CODE (PATTERN (from
)) == ADDR_VEC
3555 || GET_CODE (PATTERN (from
)) == ADDR_DIFF_VEC
))
3559 if (barrier_align (next_real_insn (from
)) == align_jumps_log
)
3561 /* We have just passed the barrier in front of the
3562 ADDR_DIFF_VEC, which is stored in found_barrier. Since
3563 the ADDR_DIFF_VEC is accessed as data, just like our pool
3564 constants, this is a good opportunity to accommodate what
3565 we have gathered so far.
3566 If we waited any longer, we could end up at a barrier in
3567 front of code, which gives worse cache usage for separated
3568 instruction / data caches. */
3569 good_barrier
= found_barrier
;
3574 rtx body
= PATTERN (from
);
3575 inc
= XVECLEN (body
, 1) * GET_MODE_SIZE (GET_MODE (body
));
3578 /* For the SH1, we generate alignments even after jumps-around-jumps. */
3579 else if (GET_CODE (from
) == JUMP_INSN
3581 && ! TARGET_SMALLCODE
)
3587 if (new_align
> si_align
)
3589 si_limit
-= (count_si
- 1) & (new_align
- si_align
);
3590 si_align
= new_align
;
3592 count_si
= (count_si
+ new_align
- 1) & -new_align
;
3597 if (new_align
> hi_align
)
3599 hi_limit
-= (count_hi
- 1) & (new_align
- hi_align
);
3600 hi_align
= new_align
;
3602 count_hi
= (count_hi
+ new_align
- 1) & -new_align
;
3604 from
= NEXT_INSN (from
);
3611 /* Try as we might, the leading mova is out of range. Change
3612 it into a load (which will become a pcload) and retry. */
3614 return find_barrier (0, 0, mova
);
3618 /* Insert the constant pool table before the mova instruction,
3619 to prevent the mova label reference from going out of range. */
3621 good_barrier
= found_barrier
= barrier_before_mova
;
3627 if (good_barrier
&& next_real_insn (found_barrier
))
3628 found_barrier
= good_barrier
;
3632 /* We didn't find a barrier in time to dump our stuff,
3633 so we'll make one. */
3634 rtx label
= gen_label_rtx ();
3636 /* If we exceeded the range, then we must back up over the last
3637 instruction we looked at. Otherwise, we just need to undo the
3638 NEXT_INSN at the end of the loop. */
3639 if (count_hi
> hi_limit
|| count_si
> si_limit
)
3640 from
= PREV_INSN (PREV_INSN (from
));
3642 from
= PREV_INSN (from
);
3644 /* Walk back to be just before any jump or label.
3645 Putting it before a label reduces the number of times the branch
3646 around the constant pool table will be hit. Putting it before
3647 a jump makes it more likely that the bra delay slot will be
3649 while (GET_CODE (from
) == JUMP_INSN
|| GET_CODE (from
) == NOTE
3650 || GET_CODE (from
) == CODE_LABEL
)
3651 from
= PREV_INSN (from
);
3653 from
= emit_jump_insn_after (gen_jump (label
), from
);
3654 JUMP_LABEL (from
) = label
;
3655 LABEL_NUSES (label
) = 1;
3656 found_barrier
= emit_barrier_after (from
);
3657 emit_label_after (label
, found_barrier
);
3660 return found_barrier
;
3663 /* If the instruction INSN is implemented by a special function, and we can
3664 positively find the register that is used to call the sfunc, and this
3665 register is not used anywhere else in this instruction - except as the
3666 destination of a set, return this register; else, return 0. */
3668 sfunc_uses_reg (rtx insn
)
3671 rtx pattern
, part
, reg_part
, reg
;
3673 if (GET_CODE (insn
) != INSN
)
3675 pattern
= PATTERN (insn
);
3676 if (GET_CODE (pattern
) != PARALLEL
|| get_attr_type (insn
) != TYPE_SFUNC
)
3679 for (reg_part
= 0, i
= XVECLEN (pattern
, 0) - 1; i
>= 1; i
--)
3681 part
= XVECEXP (pattern
, 0, i
);
3682 if (GET_CODE (part
) == USE
&& GET_MODE (XEXP (part
, 0)) == SImode
)
3687 reg
= XEXP (reg_part
, 0);
3688 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 0; i
--)
3690 part
= XVECEXP (pattern
, 0, i
);
3691 if (part
== reg_part
|| GET_CODE (part
) == CLOBBER
)
3693 if (reg_mentioned_p (reg
, ((GET_CODE (part
) == SET
3694 && GET_CODE (SET_DEST (part
)) == REG
)
3695 ? SET_SRC (part
) : part
)))
3701 /* See if the only way in which INSN uses REG is by calling it, or by
3702 setting it while calling it. Set *SET to a SET rtx if the register
3706 noncall_uses_reg (rtx reg
, rtx insn
, rtx
*set
)
3712 reg2
= sfunc_uses_reg (insn
);
3713 if (reg2
&& REGNO (reg2
) == REGNO (reg
))
3715 pattern
= single_set (insn
);
3717 && GET_CODE (SET_DEST (pattern
)) == REG
3718 && REGNO (reg
) == REGNO (SET_DEST (pattern
)))
3722 if (GET_CODE (insn
) != CALL_INSN
)
3724 /* We don't use rtx_equal_p because we don't care if the mode is
3726 pattern
= single_set (insn
);
3728 && GET_CODE (SET_DEST (pattern
)) == REG
3729 && REGNO (reg
) == REGNO (SET_DEST (pattern
)))
3735 par
= PATTERN (insn
);
3736 if (GET_CODE (par
) == PARALLEL
)
3737 for (i
= XVECLEN (par
, 0) - 1; i
>= 0; i
--)
3739 part
= XVECEXP (par
, 0, i
);
3740 if (GET_CODE (part
) != SET
&& reg_mentioned_p (reg
, part
))
3743 return reg_mentioned_p (reg
, SET_SRC (pattern
));
3749 pattern
= PATTERN (insn
);
3751 if (GET_CODE (pattern
) == PARALLEL
)
3755 for (i
= XVECLEN (pattern
, 0) - 1; i
>= 1; i
--)
3756 if (reg_mentioned_p (reg
, XVECEXP (pattern
, 0, i
)))
3758 pattern
= XVECEXP (pattern
, 0, 0);
3761 if (GET_CODE (pattern
) == SET
)
3763 if (reg_mentioned_p (reg
, SET_DEST (pattern
)))
3765 /* We don't use rtx_equal_p, because we don't care if the
3766 mode is different. */
3767 if (GET_CODE (SET_DEST (pattern
)) != REG
3768 || REGNO (reg
) != REGNO (SET_DEST (pattern
)))
3774 pattern
= SET_SRC (pattern
);
3777 if (GET_CODE (pattern
) != CALL
3778 || GET_CODE (XEXP (pattern
, 0)) != MEM
3779 || ! rtx_equal_p (reg
, XEXP (XEXP (pattern
, 0), 0)))
3785 /* Given a X, a pattern of an insn or a part of it, return a mask of used
3786 general registers. Bits 0..15 mean that the respective registers
3787 are used as inputs in the instruction. Bits 16..31 mean that the
3788 registers 0..15, respectively, are used as outputs, or are clobbered.
3789 IS_DEST should be set to 16 if X is the destination of a SET, else to 0. */
3791 regs_used (rtx x
, int is_dest
)
3799 code
= GET_CODE (x
);
3804 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x
))) - 1)
3805 << (REGNO (x
) + is_dest
));
3809 rtx y
= SUBREG_REG (x
);
3811 if (GET_CODE (y
) != REG
)
3814 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x
))) - 1)
3816 subreg_regno_offset (REGNO (y
),
3819 GET_MODE (x
)) + is_dest
));
3823 return regs_used (SET_SRC (x
), 0) | regs_used (SET_DEST (x
), 16);
3825 /* If there was a return value, it must have been indicated with USE. */
3840 fmt
= GET_RTX_FORMAT (code
);
3842 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
3847 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
3848 used
|= regs_used (XVECEXP (x
, i
, j
), is_dest
);
3850 else if (fmt
[i
] == 'e')
3851 used
|= regs_used (XEXP (x
, i
), is_dest
);
3856 /* Create an instruction that prevents redirection of a conditional branch
3857 to the destination of the JUMP with address ADDR.
3858 If the branch needs to be implemented as an indirect jump, try to find
3859 a scratch register for it.
3860 If NEED_BLOCK is 0, don't do anything unless we need a scratch register.
3861 If any preceding insn that doesn't fit into a delay slot is good enough,
3862 pass 1. Pass 2 if a definite blocking insn is needed.
3863 -1 is used internally to avoid deep recursion.
3864 If a blocking instruction is made or recognized, return it. */
3867 gen_block_redirect (rtx jump
, int addr
, int need_block
)
3870 rtx prev
= prev_nonnote_insn (jump
);
3873 /* First, check if we already have an instruction that satisfies our need. */
3874 if (prev
&& GET_CODE (prev
) == INSN
&& ! INSN_DELETED_P (prev
))
3876 if (INSN_CODE (prev
) == CODE_FOR_indirect_jump_scratch
)
3878 if (GET_CODE (PATTERN (prev
)) == USE
3879 || GET_CODE (PATTERN (prev
)) == CLOBBER
3880 || get_attr_in_delay_slot (prev
) == IN_DELAY_SLOT_YES
)
3882 else if ((need_block
&= ~1) < 0)
3884 else if (recog_memoized (prev
) == CODE_FOR_block_branch_redirect
)
3887 if (GET_CODE (PATTERN (jump
)) == RETURN
)
3891 /* Reorg even does nasty things with return insns that cause branches
3892 to go out of range - see find_end_label and callers. */
3893 return emit_insn_before (gen_block_branch_redirect (const0_rtx
) , jump
);
3895 /* We can't use JUMP_LABEL here because it might be undefined
3896 when not optimizing. */
3897 dest
= XEXP (SET_SRC (PATTERN (jump
)), 0);
3898 /* If the branch is out of range, try to find a scratch register for it. */
3900 && (INSN_ADDRESSES (INSN_UID (dest
)) - addr
+ (unsigned) 4092
3904 /* Don't look for the stack pointer as a scratch register,
3905 it would cause trouble if an interrupt occurred. */
3906 unsigned try = 0x7fff, used
;
3907 int jump_left
= flag_expensive_optimizations
+ 1;
3909 /* It is likely that the most recent eligible instruction is wanted for
3910 the delay slot. Therefore, find out which registers it uses, and
3911 try to avoid using them. */
3913 for (scan
= jump
; (scan
= PREV_INSN (scan
)); )
3917 if (INSN_DELETED_P (scan
))
3919 code
= GET_CODE (scan
);
3920 if (code
== CODE_LABEL
|| code
== JUMP_INSN
)
3923 && GET_CODE (PATTERN (scan
)) != USE
3924 && GET_CODE (PATTERN (scan
)) != CLOBBER
3925 && get_attr_in_delay_slot (scan
) == IN_DELAY_SLOT_YES
)
3927 try &= ~regs_used (PATTERN (scan
), 0);
3931 for (used
= dead
= 0, scan
= JUMP_LABEL (jump
);
3932 (scan
= NEXT_INSN (scan
)); )
3936 if (INSN_DELETED_P (scan
))
3938 code
= GET_CODE (scan
);
3941 used
|= regs_used (PATTERN (scan
), 0);
3942 if (code
== CALL_INSN
)
3943 used
|= regs_used (CALL_INSN_FUNCTION_USAGE (scan
), 0);
3944 dead
|= (used
>> 16) & ~used
;
3950 if (code
== JUMP_INSN
)
3952 if (jump_left
-- && simplejump_p (scan
))
3953 scan
= JUMP_LABEL (scan
);
3959 /* Mask out the stack pointer again, in case it was
3960 the only 'free' register we have found. */
3963 /* If the immediate destination is still in range, check for possible
3964 threading with a jump beyond the delay slot insn.
3965 Don't check if we are called recursively; the jump has been or will be
3966 checked in a different invocation then. */
3968 else if (optimize
&& need_block
>= 0)
3970 rtx next
= next_active_insn (next_active_insn (dest
));
3971 if (next
&& GET_CODE (next
) == JUMP_INSN
3972 && GET_CODE (PATTERN (next
)) == SET
3973 && recog_memoized (next
) == CODE_FOR_jump_compact
)
3975 dest
= JUMP_LABEL (next
);
3977 && (INSN_ADDRESSES (INSN_UID (dest
)) - addr
+ (unsigned) 4092
3979 gen_block_redirect (next
, INSN_ADDRESSES (INSN_UID (next
)), -1);
3985 rtx reg
= gen_rtx_REG (SImode
, exact_log2 (dead
& -dead
));
3987 /* It would be nice if we could convert the jump into an indirect
3988 jump / far branch right now, and thus exposing all constituent
3989 instructions to further optimization. However, reorg uses
3990 simplejump_p to determine if there is an unconditional jump where
3991 it should try to schedule instructions from the target of the
3992 branch; simplejump_p fails for indirect jumps even if they have
3994 rtx insn
= emit_insn_before (gen_indirect_jump_scratch
3995 (reg
, GEN_INT (INSN_UID (JUMP_LABEL (jump
))))
3997 /* ??? We would like this to have the scope of the jump, but that
3998 scope will change when a delay slot insn of an inner scope is added.
3999 Hence, after delay slot scheduling, we'll have to expect
4000 NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
4003 INSN_LOCATOR (insn
) = INSN_LOCATOR (jump
);
4004 INSN_CODE (insn
) = CODE_FOR_indirect_jump_scratch
;
4007 else if (need_block
)
4008 /* We can't use JUMP_LABEL here because it might be undefined
4009 when not optimizing. */
4010 return emit_insn_before (gen_block_branch_redirect
4011 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump
)), 0))))
4016 #define CONDJUMP_MIN -252
4017 #define CONDJUMP_MAX 262
4020 /* A label (to be placed) in front of the jump
4021 that jumps to our ultimate destination. */
4023 /* Where we are going to insert it if we cannot move the jump any farther,
4024 or the jump itself if we have picked up an existing jump. */
4026 /* The ultimate destination. */
4028 struct far_branch
*prev
;
4029 /* If the branch has already been created, its address;
4030 else the address of its first prospective user. */
4034 static void gen_far_branch (struct far_branch
*);
4035 enum mdep_reorg_phase_e mdep_reorg_phase
;
4037 gen_far_branch (struct far_branch
*bp
)
4039 rtx insn
= bp
->insert_place
;
4041 rtx label
= gen_label_rtx ();
4044 emit_label_after (label
, insn
);
4047 jump
= emit_jump_insn_after (gen_jump (bp
->far_label
), insn
);
4048 LABEL_NUSES (bp
->far_label
)++;
4051 jump
= emit_jump_insn_after (gen_return (), insn
);
4052 /* Emit a barrier so that reorg knows that any following instructions
4053 are not reachable via a fall-through path.
4054 But don't do this when not optimizing, since we wouldn't suppress the
4055 alignment for the barrier then, and could end up with out-of-range
4056 pc-relative loads. */
4058 emit_barrier_after (jump
);
4059 emit_label_after (bp
->near_label
, insn
);
4060 JUMP_LABEL (jump
) = bp
->far_label
;
4061 ok
= invert_jump (insn
, label
, 1);
4064 /* If we are branching around a jump (rather than a return), prevent
4065 reorg from using an insn from the jump target as the delay slot insn -
4066 when reorg did this, it pessimized code (we rather hide the delay slot)
4067 and it could cause branches to go out of range. */
4070 (gen_stuff_delay_slot
4071 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump
)), 0))),
4072 GEN_INT (recog_memoized (insn
) == CODE_FOR_branch_false
)),
4074 /* Prevent reorg from undoing our splits. */
4075 gen_block_redirect (jump
, bp
->address
+= 2, 2);
4078 /* Fix up ADDR_DIFF_VECs. */
4080 fixup_addr_diff_vecs (rtx first
)
4084 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
4086 rtx vec_lab
, pat
, prev
, prevpat
, x
, braf_label
;
4088 if (GET_CODE (insn
) != JUMP_INSN
4089 || GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
)
4091 pat
= PATTERN (insn
);
4092 vec_lab
= XEXP (XEXP (pat
, 0), 0);
4094 /* Search the matching casesi_jump_2. */
4095 for (prev
= vec_lab
; ; prev
= PREV_INSN (prev
))
4097 if (GET_CODE (prev
) != JUMP_INSN
)
4099 prevpat
= PATTERN (prev
);
4100 if (GET_CODE (prevpat
) != PARALLEL
|| XVECLEN (prevpat
, 0) != 2)
4102 x
= XVECEXP (prevpat
, 0, 1);
4103 if (GET_CODE (x
) != USE
)
4106 if (GET_CODE (x
) == LABEL_REF
&& XEXP (x
, 0) == vec_lab
)
4109 /* FIXME: This is a bug in the optimizer, but it seems harmless
4110 to just avoid panicing. */
4114 /* Emit the reference label of the braf where it belongs, right after
4115 the casesi_jump_2 (i.e. braf). */
4116 braf_label
= XEXP (XEXP (SET_SRC (XVECEXP (prevpat
, 0, 0)), 1), 0);
4117 emit_label_after (braf_label
, prev
);
4119 /* Fix up the ADDR_DIF_VEC to be relative
4120 to the reference address of the braf. */
4121 XEXP (XEXP (pat
, 0), 0) = braf_label
;
4125 /* BARRIER_OR_LABEL is either a BARRIER or a CODE_LABEL immediately following
4126 a barrier. Return the base 2 logarithm of the desired alignment. */
4128 barrier_align (rtx barrier_or_label
)
4130 rtx next
= next_real_insn (barrier_or_label
), pat
, prev
;
4131 int slot
, credit
, jump_to_next
= 0;
4136 pat
= PATTERN (next
);
4138 if (GET_CODE (pat
) == ADDR_DIFF_VEC
)
4141 if (GET_CODE (pat
) == UNSPEC_VOLATILE
&& XINT (pat
, 1) == UNSPECV_ALIGN
)
4142 /* This is a barrier in front of a constant table. */
4145 prev
= prev_real_insn (barrier_or_label
);
4146 if (GET_CODE (PATTERN (prev
)) == ADDR_DIFF_VEC
)
4148 pat
= PATTERN (prev
);
4149 /* If this is a very small table, we want to keep the alignment after
4150 the table to the minimum for proper code alignment. */
4151 return ((TARGET_SMALLCODE
4152 || ((unsigned) XVECLEN (pat
, 1) * GET_MODE_SIZE (GET_MODE (pat
))
4153 <= (unsigned) 1 << (CACHE_LOG
- 2)))
4154 ? 1 << TARGET_SHMEDIA
: align_jumps_log
);
4157 if (TARGET_SMALLCODE
)
4160 if (! TARGET_SH2
|| ! optimize
)
4161 return align_jumps_log
;
4163 /* When fixing up pcloads, a constant table might be inserted just before
4164 the basic block that ends with the barrier. Thus, we can't trust the
4165 instruction lengths before that. */
4166 if (mdep_reorg_phase
> SH_FIXUP_PCLOAD
)
4168 /* Check if there is an immediately preceding branch to the insn beyond
4169 the barrier. We must weight the cost of discarding useful information
4170 from the current cache line when executing this branch and there is
4171 an alignment, against that of fetching unneeded insn in front of the
4172 branch target when there is no alignment. */
4174 /* There are two delay_slot cases to consider. One is the simple case
4175 where the preceding branch is to the insn beyond the barrier (simple
4176 delay slot filling), and the other is where the preceding branch has
4177 a delay slot that is a duplicate of the insn after the barrier
4178 (fill_eager_delay_slots) and the branch is to the insn after the insn
4179 after the barrier. */
4181 /* PREV is presumed to be the JUMP_INSN for the barrier under
4182 investigation. Skip to the insn before it. */
4183 prev
= prev_real_insn (prev
);
4185 for (slot
= 2, credit
= (1 << (CACHE_LOG
- 2)) + 2;
4186 credit
>= 0 && prev
&& GET_CODE (prev
) == INSN
;
4187 prev
= prev_real_insn (prev
))
4190 if (GET_CODE (PATTERN (prev
)) == USE
4191 || GET_CODE (PATTERN (prev
)) == CLOBBER
)
4193 if (GET_CODE (PATTERN (prev
)) == SEQUENCE
)
4195 prev
= XVECEXP (PATTERN (prev
), 0, 1);
4196 if (INSN_UID (prev
) == INSN_UID (next
))
4198 /* Delay slot was filled with insn at jump target. */
4205 get_attr_in_delay_slot (prev
) == IN_DELAY_SLOT_YES
)
4207 credit
-= get_attr_length (prev
);
4210 && GET_CODE (prev
) == JUMP_INSN
4211 && JUMP_LABEL (prev
))
4215 || next_real_insn (JUMP_LABEL (prev
)) == next
4216 /* If relax_delay_slots() decides NEXT was redundant
4217 with some previous instruction, it will have
4218 redirected PREV's jump to the following insn. */
4219 || JUMP_LABEL (prev
) == next_nonnote_insn (next
)
4220 /* There is no upper bound on redundant instructions
4221 that might have been skipped, but we must not put an
4222 alignment where none had been before. */
4223 || (x
= (NEXT_INSN (NEXT_INSN (PREV_INSN (prev
)))),
4225 && (INSN_CODE (x
) == CODE_FOR_block_branch_redirect
4226 || INSN_CODE (x
) == CODE_FOR_indirect_jump_scratch
4227 || INSN_CODE (x
) == CODE_FOR_stuff_delay_slot
))))
4229 rtx pat
= PATTERN (prev
);
4230 if (GET_CODE (pat
) == PARALLEL
)
4231 pat
= XVECEXP (pat
, 0, 0);
4232 if (credit
- slot
>= (GET_CODE (SET_SRC (pat
)) == PC
? 2 : 0))
4238 return align_jumps_log
;
4241 /* If we are inside a phony loop, almost any kind of label can turn up as the
4242 first one in the loop. Aligning a braf label causes incorrect switch
4243 destination addresses; we can detect braf labels because they are
4244 followed by a BARRIER.
4245 Applying loop alignment to small constant or switch tables is a waste
4246 of space, so we suppress this too. */
4248 sh_loop_align (rtx label
)
4253 next
= next_nonnote_insn (next
);
4254 while (next
&& GET_CODE (next
) == CODE_LABEL
);
4258 || GET_CODE (PATTERN (next
)) == ADDR_DIFF_VEC
4259 || recog_memoized (next
) == CODE_FOR_consttable_2
)
4262 return align_loops_log
;
4265 /* Do a final pass over the function, just before delayed branch
4271 rtx first
, insn
, mova
= NULL_RTX
;
4273 rtx r0_rtx
= gen_rtx_REG (Pmode
, 0);
4274 rtx r0_inc_rtx
= gen_rtx_POST_INC (Pmode
, r0_rtx
);
4276 first
= get_insns ();
4278 /* We must split call insns before introducing `mova's. If we're
4279 optimizing, they'll have already been split. Otherwise, make
4280 sure we don't split them too late. */
4282 split_all_insns_noflow ();
4287 /* If relaxing, generate pseudo-ops to associate function calls with
4288 the symbols they call. It does no harm to not generate these
4289 pseudo-ops. However, when we can generate them, it enables to
4290 linker to potentially relax the jsr to a bsr, and eliminate the
4291 register load and, possibly, the constant pool entry. */
4293 mdep_reorg_phase
= SH_INSERT_USES_LABELS
;
4296 /* Remove all REG_LABEL notes. We want to use them for our own
4297 purposes. This works because none of the remaining passes
4298 need to look at them.
4300 ??? But it may break in the future. We should use a machine
4301 dependent REG_NOTE, or some other approach entirely. */
4302 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
4308 while ((note
= find_reg_note (insn
, REG_LABEL
, NULL_RTX
)) != 0)
4309 remove_note (insn
, note
);
4313 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
4315 rtx pattern
, reg
, link
, set
, scan
, dies
, label
;
4316 int rescan
= 0, foundinsn
= 0;
4318 if (GET_CODE (insn
) == CALL_INSN
)
4320 pattern
= PATTERN (insn
);
4322 if (GET_CODE (pattern
) == PARALLEL
)
4323 pattern
= XVECEXP (pattern
, 0, 0);
4324 if (GET_CODE (pattern
) == SET
)
4325 pattern
= SET_SRC (pattern
);
4327 if (GET_CODE (pattern
) != CALL
4328 || GET_CODE (XEXP (pattern
, 0)) != MEM
)
4331 reg
= XEXP (XEXP (pattern
, 0), 0);
4335 reg
= sfunc_uses_reg (insn
);
4340 if (GET_CODE (reg
) != REG
)
4343 /* This is a function call via REG. If the only uses of REG
4344 between the time that it is set and the time that it dies
4345 are in function calls, then we can associate all the
4346 function calls with the setting of REG. */
4348 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
4350 if (REG_NOTE_KIND (link
) != 0)
4352 set
= single_set (XEXP (link
, 0));
4353 if (set
&& rtx_equal_p (reg
, SET_DEST (set
)))
4355 link
= XEXP (link
, 0);
4362 /* ??? Sometimes global register allocation will have
4363 deleted the insn pointed to by LOG_LINKS. Try
4364 scanning backward to find where the register is set. */
4365 for (scan
= PREV_INSN (insn
);
4366 scan
&& GET_CODE (scan
) != CODE_LABEL
;
4367 scan
= PREV_INSN (scan
))
4369 if (! INSN_P (scan
))
4372 if (! reg_mentioned_p (reg
, scan
))
4375 if (noncall_uses_reg (reg
, scan
, &set
))
4389 /* The register is set at LINK. */
4391 /* We can only optimize the function call if the register is
4392 being set to a symbol. In theory, we could sometimes
4393 optimize calls to a constant location, but the assembler
4394 and linker do not support that at present. */
4395 if (GET_CODE (SET_SRC (set
)) != SYMBOL_REF
4396 && GET_CODE (SET_SRC (set
)) != LABEL_REF
)
4399 /* Scan forward from LINK to the place where REG dies, and
4400 make sure that the only insns which use REG are
4401 themselves function calls. */
4403 /* ??? This doesn't work for call targets that were allocated
4404 by reload, since there may not be a REG_DEAD note for the
4408 for (scan
= NEXT_INSN (link
); scan
; scan
= NEXT_INSN (scan
))
4412 /* Don't try to trace forward past a CODE_LABEL if we haven't
4413 seen INSN yet. Ordinarily, we will only find the setting insn
4414 in LOG_LINKS if it is in the same basic block. However,
4415 cross-jumping can insert code labels in between the load and
4416 the call, and can result in situations where a single call
4417 insn may have two targets depending on where we came from. */
4419 if (GET_CODE (scan
) == CODE_LABEL
&& ! foundinsn
)
4422 if (! INSN_P (scan
))
4425 /* Don't try to trace forward past a JUMP. To optimize
4426 safely, we would have to check that all the
4427 instructions at the jump destination did not use REG. */
4429 if (GET_CODE (scan
) == JUMP_INSN
)
4432 if (! reg_mentioned_p (reg
, scan
))
4435 if (noncall_uses_reg (reg
, scan
, &scanset
))
4442 && (GET_CODE (scan
) == CALL_INSN
|| sfunc_uses_reg (scan
)))
4444 /* There is a function call to this register other
4445 than the one we are checking. If we optimize
4446 this call, we need to rescan again below. */
4450 /* ??? We shouldn't have to worry about SCANSET here.
4451 We should just be able to check for a REG_DEAD note
4452 on a function call. However, the REG_DEAD notes are
4453 apparently not dependable around libcalls; c-torture
4454 execute/920501-2 is a test case. If SCANSET is set,
4455 then this insn sets the register, so it must have
4456 died earlier. Unfortunately, this will only handle
4457 the cases in which the register is, in fact, set in a
4460 /* ??? We shouldn't have to use FOUNDINSN here.
4461 However, the LOG_LINKS fields are apparently not
4462 entirely reliable around libcalls;
4463 newlib/libm/math/e_pow.c is a test case. Sometimes
4464 an insn will appear in LOG_LINKS even though it is
4465 not the most recent insn which sets the register. */
4469 || find_reg_note (scan
, REG_DEAD
, reg
)))
4478 /* Either there was a branch, or some insn used REG
4479 other than as a function call address. */
4483 /* Create a code label, and put it in a REG_LABEL note on
4484 the insn which sets the register, and on each call insn
4485 which uses the register. In final_prescan_insn we look
4486 for the REG_LABEL notes, and output the appropriate label
4489 label
= gen_label_rtx ();
4490 REG_NOTES (link
) = gen_rtx_INSN_LIST (REG_LABEL
, label
,
4492 REG_NOTES (insn
) = gen_rtx_INSN_LIST (REG_LABEL
, label
,
4501 scan
= NEXT_INSN (scan
);
4503 && ((GET_CODE (scan
) == CALL_INSN
4504 && reg_mentioned_p (reg
, scan
))
4505 || ((reg2
= sfunc_uses_reg (scan
))
4506 && REGNO (reg2
) == REGNO (reg
))))
4508 = gen_rtx_INSN_LIST (REG_LABEL
, label
, REG_NOTES (scan
));
4510 while (scan
!= dies
);
4516 fixup_addr_diff_vecs (first
);
4520 mdep_reorg_phase
= SH_SHORTEN_BRANCHES0
;
4521 shorten_branches (first
);
4523 /* Scan the function looking for move instructions which have to be
4524 changed to pc-relative loads and insert the literal tables. */
4526 mdep_reorg_phase
= SH_FIXUP_PCLOAD
;
4527 for (insn
= first
, num_mova
= 0; insn
; insn
= NEXT_INSN (insn
))
4531 /* ??? basic block reordering can move a switch table dispatch
4532 below the switch table. Check if that has happened.
4533 We only have the addresses available when optimizing; but then,
4534 this check shouldn't be needed when not optimizing. */
4535 rtx label_ref
= XVECEXP (SET_SRC (PATTERN (insn
)), 0, 0);
4537 && (INSN_ADDRESSES (INSN_UID (insn
))
4538 > INSN_ADDRESSES (INSN_UID (XEXP (label_ref
, 0)))))
4540 /* Change the mova into a load.
4541 broken_move will then return true for it. */
4544 else if (! num_mova
++)
4547 else if (GET_CODE (insn
) == JUMP_INSN
4548 && GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
4556 /* Some code might have been inserted between the mova and
4557 its ADDR_DIFF_VEC. Check if the mova is still in range. */
4558 for (scan
= mova
, total
= 0; scan
!= insn
; scan
= NEXT_INSN (scan
))
4559 total
+= get_attr_length (scan
);
4561 /* range of mova is 1020, add 4 because pc counts from address of
4562 second instruction after this one, subtract 2 in case pc is 2
4563 byte aligned. Possible alignment needed for the ADDR_DIFF_VEC
4564 cancels out with alignment effects of the mova itself. */
4567 /* Change the mova into a load, and restart scanning
4568 there. broken_move will then return true for mova. */
4573 if (broken_move (insn
)
4574 || (GET_CODE (insn
) == INSN
4575 && recog_memoized (insn
) == CODE_FOR_casesi_worker_2
))
4578 /* Scan ahead looking for a barrier to stick the constant table
4580 rtx barrier
= find_barrier (num_mova
, mova
, insn
);
4581 rtx last_float_move
= NULL_RTX
, last_float
= 0, *last_float_addr
= NULL
;
4582 int need_aligned_label
= 0;
4584 if (num_mova
&& ! mova_p (mova
))
4586 /* find_barrier had to change the first mova into a
4587 pcload; thus, we have to start with this new pcload. */
4591 /* Now find all the moves between the points and modify them. */
4592 for (scan
= insn
; scan
!= barrier
; scan
= NEXT_INSN (scan
))
4594 if (GET_CODE (scan
) == CODE_LABEL
)
4596 if (GET_CODE (scan
) == INSN
4597 && recog_memoized (scan
) == CODE_FOR_casesi_worker_2
)
4598 need_aligned_label
= 1;
4599 if (broken_move (scan
))
4601 rtx
*patp
= &PATTERN (scan
), pat
= *patp
;
4605 enum machine_mode mode
;
4607 if (GET_CODE (pat
) == PARALLEL
)
4608 patp
= &XVECEXP (pat
, 0, 0), pat
= *patp
;
4609 src
= SET_SRC (pat
);
4610 dst
= SET_DEST (pat
);
4611 mode
= GET_MODE (dst
);
4613 if (mode
== SImode
&& hi_const (src
)
4614 && REGNO (dst
) != FPUL_REG
)
4619 while (GET_CODE (dst
) == SUBREG
)
4621 offset
+= subreg_regno_offset (REGNO (SUBREG_REG (dst
)),
4622 GET_MODE (SUBREG_REG (dst
)),
4625 dst
= SUBREG_REG (dst
);
4627 dst
= gen_rtx_REG (HImode
, REGNO (dst
) + offset
);
4629 if (GET_CODE (dst
) == REG
&& FP_ANY_REGISTER_P (REGNO (dst
)))
4631 /* This must be an insn that clobbers r0. */
4632 rtx
*clobberp
= &XVECEXP (PATTERN (scan
), 0,
4633 XVECLEN (PATTERN (scan
), 0)
4635 rtx clobber
= *clobberp
;
4637 gcc_assert (GET_CODE (clobber
) == CLOBBER
4638 && rtx_equal_p (XEXP (clobber
, 0), r0_rtx
));
4641 && reg_set_between_p (r0_rtx
, last_float_move
, scan
))
4645 && GET_MODE_SIZE (mode
) != 4
4646 && GET_MODE_SIZE (GET_MODE (last_float
)) == 4)
4648 lab
= add_constant (src
, mode
, last_float
);
4650 emit_insn_before (gen_mova (lab
), scan
);
4653 /* There will be a REG_UNUSED note for r0 on
4654 LAST_FLOAT_MOVE; we have to change it to REG_INC,
4655 lest reorg:mark_target_live_regs will not
4656 consider r0 to be used, and we end up with delay
4657 slot insn in front of SCAN that clobbers r0. */
4659 = find_regno_note (last_float_move
, REG_UNUSED
, 0);
4661 /* If we are not optimizing, then there may not be
4664 PUT_MODE (note
, REG_INC
);
4666 *last_float_addr
= r0_inc_rtx
;
4668 last_float_move
= scan
;
4670 newsrc
= gen_rtx_MEM (mode
,
4671 (((TARGET_SH4
&& ! TARGET_FMOVD
)
4672 || REGNO (dst
) == FPUL_REG
)
4675 last_float_addr
= &XEXP (newsrc
, 0);
4677 /* Remove the clobber of r0. */
4678 *clobberp
= gen_rtx_CLOBBER (GET_MODE (clobber
),
4679 gen_rtx_SCRATCH (Pmode
));
4681 /* This is a mova needing a label. Create it. */
4682 else if (GET_CODE (src
) == UNSPEC
4683 && XINT (src
, 1) == UNSPEC_MOVA
4684 && GET_CODE (XVECEXP (src
, 0, 0)) == CONST
)
4686 lab
= add_constant (XVECEXP (src
, 0, 0), mode
, 0);
4687 newsrc
= gen_rtx_LABEL_REF (VOIDmode
, lab
);
4688 newsrc
= gen_rtx_UNSPEC (SImode
,
4689 gen_rtvec (1, newsrc
),
4694 lab
= add_constant (src
, mode
, 0);
4695 newsrc
= gen_rtx_LABEL_REF (VOIDmode
, lab
);
4696 newsrc
= gen_const_mem (mode
, newsrc
);
4698 *patp
= gen_rtx_SET (VOIDmode
, dst
, newsrc
);
4699 INSN_CODE (scan
) = -1;
4702 dump_table (need_aligned_label
? insn
: 0, barrier
);
4707 mdep_reorg_phase
= SH_SHORTEN_BRANCHES1
;
4708 INSN_ADDRESSES_FREE ();
4709 split_branches (first
);
4711 /* The INSN_REFERENCES_ARE_DELAYED in sh.h is problematic because it
4712 also has an effect on the register that holds the address of the sfunc.
4713 Insert an extra dummy insn in front of each sfunc that pretends to
4714 use this register. */
4715 if (flag_delayed_branch
)
4717 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
4719 rtx reg
= sfunc_uses_reg (insn
);
4723 emit_insn_before (gen_use_sfunc_addr (reg
), insn
);
4727 /* fpscr is not actually a user variable, but we pretend it is for the
4728 sake of the previous optimization passes, since we want it handled like
4729 one. However, we don't have any debugging information for it, so turn
4730 it into a non-user variable now. */
4732 REG_USERVAR_P (get_fpscr_rtx ()) = 0;
4734 mdep_reorg_phase
= SH_AFTER_MDEP_REORG
;
4738 get_dest_uid (rtx label
, int max_uid
)
4740 rtx dest
= next_real_insn (label
);
4743 /* This can happen for an undefined label. */
4745 dest_uid
= INSN_UID (dest
);
4746 /* If this is a newly created branch redirection blocking instruction,
4747 we cannot index the branch_uid or insn_addresses arrays with its
4748 uid. But then, we won't need to, because the actual destination is
4749 the following branch. */
4750 while (dest_uid
>= max_uid
)
4752 dest
= NEXT_INSN (dest
);
4753 dest_uid
= INSN_UID (dest
);
4755 if (GET_CODE (dest
) == JUMP_INSN
&& GET_CODE (PATTERN (dest
)) == RETURN
)
4760 /* Split condbranches that are out of range. Also add clobbers for
4761 scratch registers that are needed in far jumps.
4762 We do this before delay slot scheduling, so that it can take our
4763 newly created instructions into account. It also allows us to
4764 find branches with common targets more easily. */
4767 split_branches (rtx first
)
4770 struct far_branch
**uid_branch
, *far_branch_list
= 0;
4771 int max_uid
= get_max_uid ();
4774 /* Find out which branches are out of range. */
4775 shorten_branches (first
);
4777 uid_branch
= (struct far_branch
**) alloca (max_uid
* sizeof *uid_branch
);
4778 memset ((char *) uid_branch
, 0, max_uid
* sizeof *uid_branch
);
4780 for (insn
= first
; insn
; insn
= NEXT_INSN (insn
))
4781 if (! INSN_P (insn
))
4783 else if (INSN_DELETED_P (insn
))
4785 /* Shorten_branches would split this instruction again,
4786 so transform it into a note. */
4787 PUT_CODE (insn
, NOTE
);
4788 NOTE_LINE_NUMBER (insn
) = NOTE_INSN_DELETED
;
4789 NOTE_SOURCE_FILE (insn
) = 0;
4791 else if (GET_CODE (insn
) == JUMP_INSN
4792 /* Don't mess with ADDR_DIFF_VEC */
4793 && (GET_CODE (PATTERN (insn
)) == SET
4794 || GET_CODE (PATTERN (insn
)) == RETURN
))
4796 enum attr_type type
= get_attr_type (insn
);
4797 if (type
== TYPE_CBRANCH
)
4801 if (get_attr_length (insn
) > 4)
4803 rtx src
= SET_SRC (PATTERN (insn
));
4804 rtx olabel
= XEXP (XEXP (src
, 1), 0);
4805 int addr
= INSN_ADDRESSES (INSN_UID (insn
));
4807 int dest_uid
= get_dest_uid (olabel
, max_uid
);
4808 struct far_branch
*bp
= uid_branch
[dest_uid
];
4810 /* redirect_jump needs a valid JUMP_LABEL, and it might delete
4811 the label if the LABEL_NUSES count drops to zero. There is
4812 always a jump_optimize pass that sets these values, but it
4813 proceeds to delete unreferenced code, and then if not
4814 optimizing, to un-delete the deleted instructions, thus
4815 leaving labels with too low uses counts. */
4818 JUMP_LABEL (insn
) = olabel
;
4819 LABEL_NUSES (olabel
)++;
4823 bp
= (struct far_branch
*) alloca (sizeof *bp
);
4824 uid_branch
[dest_uid
] = bp
;
4825 bp
->prev
= far_branch_list
;
4826 far_branch_list
= bp
;
4828 = XEXP (XEXP (SET_SRC (PATTERN (insn
)), 1), 0);
4829 LABEL_NUSES (bp
->far_label
)++;
4833 label
= bp
->near_label
;
4834 if (! label
&& bp
->address
- addr
>= CONDJUMP_MIN
)
4836 rtx block
= bp
->insert_place
;
4838 if (GET_CODE (PATTERN (block
)) == RETURN
)
4839 block
= PREV_INSN (block
);
4841 block
= gen_block_redirect (block
,
4843 label
= emit_label_after (gen_label_rtx (),
4845 bp
->near_label
= label
;
4847 else if (label
&& ! NEXT_INSN (label
))
4849 if (addr
+ 2 - bp
->address
<= CONDJUMP_MAX
)
4850 bp
->insert_place
= insn
;
4852 gen_far_branch (bp
);
4856 || (NEXT_INSN (label
) && bp
->address
- addr
< CONDJUMP_MIN
))
4858 bp
->near_label
= label
= gen_label_rtx ();
4859 bp
->insert_place
= insn
;
4862 ok
= redirect_jump (insn
, label
, 1);
4867 /* get_attr_length (insn) == 2 */
4868 /* Check if we have a pattern where reorg wants to redirect
4869 the branch to a label from an unconditional branch that
4871 /* We can't use JUMP_LABEL here because it might be undefined
4872 when not optimizing. */
4873 /* A syntax error might cause beyond to be NULL_RTX. */
4875 = next_active_insn (XEXP (XEXP (SET_SRC (PATTERN (insn
)), 1),
4879 && (GET_CODE (beyond
) == JUMP_INSN
4880 || ((beyond
= next_active_insn (beyond
))
4881 && GET_CODE (beyond
) == JUMP_INSN
))
4882 && GET_CODE (PATTERN (beyond
)) == SET
4883 && recog_memoized (beyond
) == CODE_FOR_jump_compact
4885 (INSN_UID (XEXP (SET_SRC (PATTERN (beyond
)), 0)))
4886 - INSN_ADDRESSES (INSN_UID (insn
)) + (unsigned) 252)
4888 gen_block_redirect (beyond
,
4889 INSN_ADDRESSES (INSN_UID (beyond
)), 1);
4892 next
= next_active_insn (insn
);
4894 if ((GET_CODE (next
) == JUMP_INSN
4895 || ((next
= next_active_insn (next
))
4896 && GET_CODE (next
) == JUMP_INSN
))
4897 && GET_CODE (PATTERN (next
)) == SET
4898 && recog_memoized (next
) == CODE_FOR_jump_compact
4900 (INSN_UID (XEXP (SET_SRC (PATTERN (next
)), 0)))
4901 - INSN_ADDRESSES (INSN_UID (insn
)) + (unsigned) 252)
4903 gen_block_redirect (next
, INSN_ADDRESSES (INSN_UID (next
)), 1);
4905 else if (type
== TYPE_JUMP
|| type
== TYPE_RETURN
)
4907 int addr
= INSN_ADDRESSES (INSN_UID (insn
));
4910 struct far_branch
*bp
;
4912 if (type
== TYPE_JUMP
)
4914 far_label
= XEXP (SET_SRC (PATTERN (insn
)), 0);
4915 dest_uid
= get_dest_uid (far_label
, max_uid
);
4918 /* Parse errors can lead to labels outside
4920 if (! NEXT_INSN (far_label
))
4925 JUMP_LABEL (insn
) = far_label
;
4926 LABEL_NUSES (far_label
)++;
4928 redirect_jump (insn
, NULL_RTX
, 1);
4932 bp
= uid_branch
[dest_uid
];
4935 bp
= (struct far_branch
*) alloca (sizeof *bp
);
4936 uid_branch
[dest_uid
] = bp
;
4937 bp
->prev
= far_branch_list
;
4938 far_branch_list
= bp
;
4940 bp
->far_label
= far_label
;
4942 LABEL_NUSES (far_label
)++;
4944 else if (bp
->near_label
&& ! NEXT_INSN (bp
->near_label
))
4945 if (addr
- bp
->address
<= CONDJUMP_MAX
)
4946 emit_label_after (bp
->near_label
, PREV_INSN (insn
));
4949 gen_far_branch (bp
);
4955 bp
->insert_place
= insn
;
4957 emit_insn_before (gen_block_branch_redirect (const0_rtx
), insn
);
4959 gen_block_redirect (insn
, addr
, bp
->near_label
? 2 : 0);
4962 /* Generate all pending far branches,
4963 and free our references to the far labels. */
4964 while (far_branch_list
)
4966 if (far_branch_list
->near_label
4967 && ! NEXT_INSN (far_branch_list
->near_label
))
4968 gen_far_branch (far_branch_list
);
4970 && far_branch_list
->far_label
4971 && ! --LABEL_NUSES (far_branch_list
->far_label
))
4972 delete_insn (far_branch_list
->far_label
);
4973 far_branch_list
= far_branch_list
->prev
;
4976 /* Instruction length information is no longer valid due to the new
4977 instructions that have been generated. */
4978 init_insn_lengths ();
4981 /* Dump out instruction addresses, which is useful for debugging the
4982 constant pool table stuff.
4984 If relaxing, output the label and pseudo-ops used to link together
4985 calls and the instruction which set the registers. */
4987 /* ??? The addresses printed by this routine for insns are nonsense for
4988 insns which are inside of a sequence where none of the inner insns have
4989 variable length. This is because the second pass of shorten_branches
4990 does not bother to update them. */
4993 final_prescan_insn (rtx insn
, rtx
*opvec ATTRIBUTE_UNUSED
,
4994 int noperands ATTRIBUTE_UNUSED
)
4996 if (TARGET_DUMPISIZE
)
4997 fprintf (asm_out_file
, "\n! at %04x\n", INSN_ADDRESSES (INSN_UID (insn
)));
5003 note
= find_reg_note (insn
, REG_LABEL
, NULL_RTX
);
5008 pattern
= PATTERN (insn
);
5009 if (GET_CODE (pattern
) == PARALLEL
)
5010 pattern
= XVECEXP (pattern
, 0, 0);
5011 switch (GET_CODE (pattern
))
5014 if (GET_CODE (SET_SRC (pattern
)) != CALL
5015 && get_attr_type (insn
) != TYPE_SFUNC
)
5017 targetm
.asm_out
.internal_label
5018 (asm_out_file
, "L", CODE_LABEL_NUMBER (XEXP (note
, 0)));
5021 /* else FALLTHROUGH */
5023 asm_fprintf (asm_out_file
, "\t.uses %LL%d\n",
5024 CODE_LABEL_NUMBER (XEXP (note
, 0)));
5034 /* Dump out any constants accumulated in the final pass. These will
5038 output_jump_label_table (void)
5044 fprintf (asm_out_file
, "\t.align 2\n");
5045 for (i
= 0; i
< pool_size
; i
++)
5047 pool_node
*p
= &pool_vector
[i
];
5049 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
5050 CODE_LABEL_NUMBER (p
->label
));
5051 output_asm_insn (".long %O0", &p
->value
);
5059 /* A full frame looks like:
5063 [ if current_function_anonymous_args
5076 local-0 <- fp points here. */
5078 /* Number of bytes pushed for anonymous args, used to pass information
5079 between expand_prologue and expand_epilogue. */
5081 /* Adjust the stack by SIZE bytes. REG holds the rtl of the register to be
5082 adjusted. If epilogue_p is zero, this is for a prologue; otherwise, it's
5083 for an epilogue and a negative value means that it's for a sibcall
5084 epilogue. If LIVE_REGS_MASK is nonzero, it points to a HARD_REG_SET of
5085 all the registers that are about to be restored, and hence dead. */
5088 output_stack_adjust (int size
, rtx reg
, int epilogue_p
,
5089 HARD_REG_SET
*live_regs_mask
)
5091 rtx (*emit_fn
) (rtx
) = epilogue_p
? &emit_insn
: &frame_insn
;
5094 HOST_WIDE_INT align
= STACK_BOUNDARY
/ BITS_PER_UNIT
;
5096 /* This test is bogus, as output_stack_adjust is used to re-align the
5099 gcc_assert (!(size
% align
));
5102 if (CONST_OK_FOR_ADD (size
))
5103 emit_fn (GEN_ADD3 (reg
, reg
, GEN_INT (size
)));
5104 /* Try to do it with two partial adjustments; however, we must make
5105 sure that the stack is properly aligned at all times, in case
5106 an interrupt occurs between the two partial adjustments. */
5107 else if (CONST_OK_FOR_ADD (size
/ 2 & -align
)
5108 && CONST_OK_FOR_ADD (size
- (size
/ 2 & -align
)))
5110 emit_fn (GEN_ADD3 (reg
, reg
, GEN_INT (size
/ 2 & -align
)));
5111 emit_fn (GEN_ADD3 (reg
, reg
, GEN_INT (size
- (size
/ 2 & -align
))));
5117 int temp
= epilogue_p
? 7 : (TARGET_SH5
? 0 : 1);
5120 /* If TEMP is invalid, we could temporarily save a general
5121 register to MACL. However, there is currently no need
5122 to handle this case, so just die when we see it. */
5124 || current_function_interrupt
5125 || ! call_really_used_regs
[temp
] || fixed_regs
[temp
])
5127 if (temp
< 0 && ! current_function_interrupt
5128 && (TARGET_SHMEDIA
|| epilogue_p
>= 0))
5131 COPY_HARD_REG_SET (temps
, call_used_reg_set
);
5132 AND_COMPL_HARD_REG_SET (temps
, call_fixed_reg_set
);
5136 if (current_function_return_rtx
)
5138 enum machine_mode mode
;
5139 mode
= GET_MODE (current_function_return_rtx
);
5140 if (BASE_RETURN_VALUE_REG (mode
) == FIRST_RET_REG
)
5141 nreg
= HARD_REGNO_NREGS (FIRST_RET_REG
, mode
);
5143 for (i
= 0; i
< nreg
; i
++)
5144 CLEAR_HARD_REG_BIT (temps
, FIRST_RET_REG
+ i
);
5145 if (current_function_calls_eh_return
)
5147 CLEAR_HARD_REG_BIT (temps
, EH_RETURN_STACKADJ_REGNO
);
5148 for (i
= 0; i
<= 3; i
++)
5149 CLEAR_HARD_REG_BIT (temps
, EH_RETURN_DATA_REGNO (i
));
5152 if (TARGET_SHMEDIA
&& epilogue_p
< 0)
5153 for (i
= FIRST_TARGET_REG
; i
<= LAST_TARGET_REG
; i
++)
5154 CLEAR_HARD_REG_BIT (temps
, i
);
5155 if (epilogue_p
<= 0)
5157 for (i
= FIRST_PARM_REG
;
5158 i
< FIRST_PARM_REG
+ NPARM_REGS (SImode
); i
++)
5159 CLEAR_HARD_REG_BIT (temps
, i
);
5160 if (cfun
->static_chain_decl
!= NULL
)
5161 CLEAR_HARD_REG_BIT (temps
, STATIC_CHAIN_REGNUM
);
5163 temp
= scavenge_reg (&temps
);
5165 if (temp
< 0 && live_regs_mask
)
5166 temp
= scavenge_reg (live_regs_mask
);
5169 rtx adj_reg
, tmp_reg
, mem
;
5171 /* If we reached here, the most likely case is the (sibcall)
5172 epilogue for non SHmedia. Put a special push/pop sequence
5173 for such case as the last resort. This looks lengthy but
5174 would not be problem because it seems to be very
5177 gcc_assert (!TARGET_SHMEDIA
&& epilogue_p
);
5180 /* ??? There is still the slight possibility that r4 or
5181 r5 have been reserved as fixed registers or assigned
5182 as global registers, and they change during an
5183 interrupt. There are possible ways to handle this:
5185 - If we are adjusting the frame pointer (r14), we can do
5186 with a single temp register and an ordinary push / pop
5188 - Grab any call-used or call-saved registers (i.e. not
5189 fixed or globals) for the temps we need. We might
5190 also grab r14 if we are adjusting the stack pointer.
5191 If we can't find enough available registers, issue
5192 a diagnostic and die - the user must have reserved
5193 way too many registers.
5194 But since all this is rather unlikely to happen and
5195 would require extra testing, we just die if r4 / r5
5196 are not available. */
5197 gcc_assert (!fixed_regs
[4] && !fixed_regs
[5]
5198 && !global_regs
[4] && !global_regs
[5]);
5200 adj_reg
= gen_rtx_REG (GET_MODE (reg
), 4);
5201 tmp_reg
= gen_rtx_REG (GET_MODE (reg
), 5);
5202 emit_move_insn (gen_rtx_MEM (Pmode
, reg
), adj_reg
);
5203 emit_insn (GEN_MOV (adj_reg
, GEN_INT (size
)));
5204 emit_insn (GEN_ADD3 (adj_reg
, adj_reg
, reg
));
5205 mem
= gen_rtx_MEM (Pmode
, gen_rtx_PRE_DEC (Pmode
, adj_reg
));
5206 emit_move_insn (mem
, tmp_reg
);
5207 emit_move_insn (tmp_reg
, gen_rtx_MEM (Pmode
, reg
));
5208 mem
= gen_rtx_MEM (Pmode
, gen_rtx_PRE_DEC (Pmode
, adj_reg
));
5209 emit_move_insn (mem
, tmp_reg
);
5210 emit_move_insn (reg
, adj_reg
);
5211 mem
= gen_rtx_MEM (Pmode
, gen_rtx_POST_INC (Pmode
, reg
));
5212 emit_move_insn (adj_reg
, mem
);
5213 mem
= gen_rtx_MEM (Pmode
, gen_rtx_POST_INC (Pmode
, reg
));
5214 emit_move_insn (tmp_reg
, mem
);
5217 const_reg
= gen_rtx_REG (GET_MODE (reg
), temp
);
5219 /* If SIZE is negative, subtract the positive value.
5220 This sometimes allows a constant pool entry to be shared
5221 between prologue and epilogue code. */
5224 emit_insn (GEN_MOV (const_reg
, GEN_INT (-size
)));
5225 insn
= emit_fn (GEN_SUB3 (reg
, reg
, const_reg
));
5229 emit_insn (GEN_MOV (const_reg
, GEN_INT (size
)));
5230 insn
= emit_fn (GEN_ADD3 (reg
, reg
, const_reg
));
5234 = (gen_rtx_EXPR_LIST
5235 (REG_FRAME_RELATED_EXPR
,
5236 gen_rtx_SET (VOIDmode
, reg
,
5237 gen_rtx_PLUS (SImode
, reg
, GEN_INT (size
))),
5247 RTX_FRAME_RELATED_P (x
) = 1;
5251 /* Output RTL to push register RN onto the stack. */
5258 x
= gen_push_fpul ();
5259 else if (rn
== FPSCR_REG
)
5260 x
= gen_push_fpscr ();
5261 else if ((TARGET_SH4
|| TARGET_SH2A_DOUBLE
) && TARGET_FMOVD
&& ! TARGET_FPU_SINGLE
5262 && FP_OR_XD_REGISTER_P (rn
))
5264 if (FP_REGISTER_P (rn
) && (rn
- FIRST_FP_REG
) & 1)
5266 x
= gen_push_4 (gen_rtx_REG (DFmode
, rn
));
5268 else if (TARGET_SH2E
&& FP_REGISTER_P (rn
))
5269 x
= gen_push_e (gen_rtx_REG (SFmode
, rn
));
5271 x
= gen_push (gen_rtx_REG (SImode
, rn
));
5275 = gen_rtx_EXPR_LIST (REG_INC
,
5276 gen_rtx_REG (SImode
, STACK_POINTER_REGNUM
), 0);
5280 /* Output RTL to pop register RN from the stack. */
5287 x
= gen_pop_fpul ();
5288 else if (rn
== FPSCR_REG
)
5289 x
= gen_pop_fpscr ();
5290 else if ((TARGET_SH4
|| TARGET_SH2A_DOUBLE
) && TARGET_FMOVD
&& ! TARGET_FPU_SINGLE
5291 && FP_OR_XD_REGISTER_P (rn
))
5293 if (FP_REGISTER_P (rn
) && (rn
- FIRST_FP_REG
) & 1)
5295 x
= gen_pop_4 (gen_rtx_REG (DFmode
, rn
));
5297 else if (TARGET_SH2E
&& FP_REGISTER_P (rn
))
5298 x
= gen_pop_e (gen_rtx_REG (SFmode
, rn
));
5300 x
= gen_pop (gen_rtx_REG (SImode
, rn
));
5304 = gen_rtx_EXPR_LIST (REG_INC
,
5305 gen_rtx_REG (SImode
, STACK_POINTER_REGNUM
), 0);
5308 /* Generate code to push the regs specified in the mask. */
5311 push_regs (HARD_REG_SET
*mask
, int interrupt_handler
)
5316 /* Push PR last; this gives better latencies after the prologue, and
5317 candidates for the return delay slot when there are no general
5318 registers pushed. */
5319 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
5321 /* If this is an interrupt handler, and the SZ bit varies,
5322 and we have to push any floating point register, we need
5323 to switch to the correct precision first. */
5324 if (i
== FIRST_FP_REG
&& interrupt_handler
&& TARGET_FMOVD
5325 && hard_regs_intersect_p (mask
, ®_class_contents
[DF_REGS
]))
5327 HARD_REG_SET unsaved
;
5330 COMPL_HARD_REG_SET (unsaved
, *mask
);
5331 fpscr_set_from_mem (NORMAL_MODE (FP_MODE
), unsaved
);
5335 && (i
!= FPSCR_REG
|| ! skip_fpscr
)
5336 && TEST_HARD_REG_BIT (*mask
, i
))
5339 if (TEST_HARD_REG_BIT (*mask
, PR_REG
))
5343 /* Calculate how much extra space is needed to save all callee-saved
5345 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5348 shmedia_target_regs_stack_space (HARD_REG_SET
*live_regs_mask
)
5351 int stack_space
= 0;
5352 int interrupt_handler
= sh_cfun_interrupt_handler_p ();
5354 for (reg
= LAST_TARGET_REG
; reg
>= FIRST_TARGET_REG
; reg
--)
5355 if ((! call_really_used_regs
[reg
] || interrupt_handler
)
5356 && ! TEST_HARD_REG_BIT (*live_regs_mask
, reg
))
5357 /* Leave space to save this target register on the stack,
5358 in case target register allocation wants to use it. */
5359 stack_space
+= GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg
));
5363 /* Decide whether we should reserve space for callee-save target registers,
5364 in case target register allocation wants to use them. REGS_SAVED is
5365 the space, in bytes, that is already required for register saves.
5366 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5369 shmedia_reserve_space_for_target_registers_p (int regs_saved
,
5370 HARD_REG_SET
*live_regs_mask
)
5374 return shmedia_target_regs_stack_space (live_regs_mask
) <= regs_saved
;
5377 /* Decide how much space to reserve for callee-save target registers
5378 in case target register allocation wants to use them.
5379 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5382 shmedia_target_regs_stack_adjust (HARD_REG_SET
*live_regs_mask
)
5384 if (shmedia_space_reserved_for_target_registers
)
5385 return shmedia_target_regs_stack_space (live_regs_mask
);
5390 /* Work out the registers which need to be saved, both as a mask and a
5391 count of saved words. Return the count.
5393 If doing a pragma interrupt function, then push all regs used by the
5394 function, and if we call another function (we can tell by looking at PR),
5395 make sure that all the regs it clobbers are safe too. */
5398 calc_live_regs (HARD_REG_SET
*live_regs_mask
)
5402 int interrupt_handler
;
5403 int pr_live
, has_call
;
5405 interrupt_handler
= sh_cfun_interrupt_handler_p ();
5407 CLEAR_HARD_REG_SET (*live_regs_mask
);
5408 if ((TARGET_SH4
|| TARGET_SH2A_DOUBLE
) && TARGET_FMOVD
&& interrupt_handler
5409 && regs_ever_live
[FPSCR_REG
])
5410 target_flags
&= ~MASK_FPU_SINGLE
;
5411 /* If we can save a lot of saves by switching to double mode, do that. */
5412 else if ((TARGET_SH4
|| TARGET_SH2A_DOUBLE
) && TARGET_FMOVD
&& TARGET_FPU_SINGLE
)
5413 for (count
= 0, reg
= FIRST_FP_REG
; reg
<= LAST_FP_REG
; reg
+= 2)
5414 if (regs_ever_live
[reg
] && regs_ever_live
[reg
+1]
5415 && (! call_really_used_regs
[reg
]
5416 || (interrupt_handler
&& ! pragma_trapa
))
5419 target_flags
&= ~MASK_FPU_SINGLE
;
5422 /* PR_MEDIA_REG is a general purpose register, thus global_alloc already
5423 knows how to use it. That means the pseudo originally allocated for
5424 the initial value can become the PR_MEDIA_REG hard register, as seen for
5425 execute/20010122-1.c:test9. */
5427 /* ??? this function is called from initial_elimination_offset, hence we
5428 can't use the result of sh_media_register_for_return here. */
5429 pr_live
= sh_pr_n_sets ();
5432 rtx pr_initial
= has_hard_reg_initial_val (Pmode
, PR_REG
);
5433 pr_live
= (pr_initial
5434 ? (GET_CODE (pr_initial
) != REG
5435 || REGNO (pr_initial
) != (PR_REG
))
5436 : regs_ever_live
[PR_REG
]);
5437 /* For Shcompact, if not optimizing, we end up with a memory reference
5438 using the return address pointer for __builtin_return_address even
5439 though there is no actual need to put the PR register on the stack. */
5440 pr_live
|= regs_ever_live
[RETURN_ADDRESS_POINTER_REGNUM
];
5442 /* Force PR to be live if the prologue has to call the SHmedia
5443 argument decoder or register saver. */
5444 if (TARGET_SHCOMPACT
5445 && ((current_function_args_info
.call_cookie
5446 & ~ CALL_COOKIE_RET_TRAMP (1))
5447 || current_function_has_nonlocal_label
))
5449 has_call
= TARGET_SHMEDIA
? ! leaf_function_p () : pr_live
;
5450 for (count
= 0, reg
= FIRST_PSEUDO_REGISTER
; reg
-- != 0; )
5452 if (reg
== (TARGET_SHMEDIA
? PR_MEDIA_REG
: PR_REG
)
5454 : (interrupt_handler
&& ! pragma_trapa
)
5455 ? (/* Need to save all the regs ever live. */
5456 (regs_ever_live
[reg
]
5457 || (call_really_used_regs
[reg
]
5458 && (! fixed_regs
[reg
] || reg
== MACH_REG
|| reg
== MACL_REG
5459 || reg
== PIC_OFFSET_TABLE_REGNUM
)
5461 || (has_call
&& REGISTER_NATURAL_MODE (reg
) == SImode
5462 && (GENERAL_REGISTER_P (reg
) || TARGET_REGISTER_P (reg
))))
5463 && reg
!= STACK_POINTER_REGNUM
&& reg
!= ARG_POINTER_REGNUM
5464 && reg
!= RETURN_ADDRESS_POINTER_REGNUM
5465 && reg
!= T_REG
&& reg
!= GBR_REG
5466 /* Push fpscr only on targets which have FPU */
5467 && (reg
!= FPSCR_REG
|| TARGET_FPU_ANY
))
5468 : (/* Only push those regs which are used and need to be saved. */
5471 && current_function_args_info
.call_cookie
5472 && reg
== PIC_OFFSET_TABLE_REGNUM
)
5473 || (regs_ever_live
[reg
] && ! call_really_used_regs
[reg
])
5474 || (current_function_calls_eh_return
5475 && (reg
== EH_RETURN_DATA_REGNO (0)
5476 || reg
== EH_RETURN_DATA_REGNO (1)
5477 || reg
== EH_RETURN_DATA_REGNO (2)
5478 || reg
== EH_RETURN_DATA_REGNO (3)))
5479 || ((reg
== MACL_REG
|| reg
== MACH_REG
)
5480 && regs_ever_live
[reg
]
5481 && sh_cfun_attr_renesas_p ())
5484 SET_HARD_REG_BIT (*live_regs_mask
, reg
);
5485 count
+= GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg
));
5487 if ((TARGET_SH4
|| TARGET_SH2A_DOUBLE
|| TARGET_SH5
) && TARGET_FMOVD
5488 && GET_MODE_CLASS (REGISTER_NATURAL_MODE (reg
)) == MODE_FLOAT
)
5490 if (FP_REGISTER_P (reg
))
5492 if (! TARGET_FPU_SINGLE
&& ! regs_ever_live
[reg
^ 1])
5494 SET_HARD_REG_BIT (*live_regs_mask
, (reg
^ 1));
5495 count
+= GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg
^ 1));
5498 else if (XD_REGISTER_P (reg
))
5500 /* Must switch to double mode to access these registers. */
5501 target_flags
&= ~MASK_FPU_SINGLE
;
5506 /* If we have a target register optimization pass after prologue / epilogue
5507 threading, we need to assume all target registers will be live even if
5509 if (flag_branch_target_load_optimize2
5510 && TARGET_SAVE_ALL_TARGET_REGS
5511 && shmedia_space_reserved_for_target_registers
)
5512 for (reg
= LAST_TARGET_REG
; reg
>= FIRST_TARGET_REG
; reg
--)
5513 if ((! call_really_used_regs
[reg
] || interrupt_handler
)
5514 && ! TEST_HARD_REG_BIT (*live_regs_mask
, reg
))
5516 SET_HARD_REG_BIT (*live_regs_mask
, reg
);
5517 count
+= GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg
));
5519 /* If this is an interrupt handler, we don't have any call-clobbered
5520 registers we can conveniently use for target register save/restore.
5521 Make sure we save at least one general purpose register when we need
5522 to save target registers. */
5523 if (interrupt_handler
5524 && hard_regs_intersect_p (live_regs_mask
,
5525 ®_class_contents
[TARGET_REGS
])
5526 && ! hard_regs_intersect_p (live_regs_mask
,
5527 ®_class_contents
[GENERAL_REGS
]))
5529 SET_HARD_REG_BIT (*live_regs_mask
, R0_REG
);
5530 count
+= GET_MODE_SIZE (REGISTER_NATURAL_MODE (R0_REG
));
5536 /* Code to generate prologue and epilogue sequences */
5538 /* PUSHED is the number of bytes that are being pushed on the
5539 stack for register saves. Return the frame size, padded
5540 appropriately so that the stack stays properly aligned. */
5541 static HOST_WIDE_INT
5542 rounded_frame_size (int pushed
)
5544 HOST_WIDE_INT size
= get_frame_size ();
5545 HOST_WIDE_INT align
= STACK_BOUNDARY
/ BITS_PER_UNIT
;
5547 return ((size
+ pushed
+ align
- 1) & -align
) - pushed
;
5550 /* Choose a call-clobbered target-branch register that remains
5551 unchanged along the whole function. We set it up as the return
5552 value in the prologue. */
5554 sh_media_register_for_return (void)
5559 if (! current_function_is_leaf
)
5561 if (lookup_attribute ("interrupt_handler",
5562 DECL_ATTRIBUTES (current_function_decl
)))
5564 if (sh_cfun_interrupt_handler_p ())
5567 tr0_used
= flag_pic
&& regs_ever_live
[PIC_OFFSET_TABLE_REGNUM
];
5569 for (regno
= FIRST_TARGET_REG
+ tr0_used
; regno
<= LAST_TARGET_REG
; regno
++)
5570 if (call_really_used_regs
[regno
] && ! regs_ever_live
[regno
])
5576 /* The maximum registers we need to save are:
5577 - 62 general purpose registers (r15 is stack pointer, r63 is zero)
5578 - 32 floating point registers (for each pair, we save none,
5579 one single precision value, or a double precision value).
5580 - 8 target registers
5581 - add 1 entry for a delimiter. */
5582 #define MAX_SAVED_REGS (62+32+8)
5584 typedef struct save_entry_s
5593 /* There will be a delimiter entry with VOIDmode both at the start and the
5594 end of a filled in schedule. The end delimiter has the offset of the
5595 save with the smallest (i.e. most negative) offset. */
5596 typedef struct save_schedule_s
5598 save_entry entries
[MAX_SAVED_REGS
+ 2];
5599 int temps
[MAX_TEMPS
+1];
5602 /* Fill in SCHEDULE according to LIVE_REGS_MASK. If RESTORE is nonzero,
5603 use reverse order. Returns the last entry written to (not counting
5604 the delimiter). OFFSET_BASE is a number to be added to all offset
5608 sh5_schedule_saves (HARD_REG_SET
*live_regs_mask
, save_schedule
*schedule
,
5612 save_entry
*entry
= schedule
->entries
;
5616 if (! current_function_interrupt
)
5617 for (i
= FIRST_GENERAL_REG
; tmpx
< MAX_TEMPS
&& i
<= LAST_GENERAL_REG
; i
++)
5618 if (call_really_used_regs
[i
] && ! fixed_regs
[i
] && i
!= PR_MEDIA_REG
5619 && ! FUNCTION_ARG_REGNO_P (i
)
5620 && i
!= FIRST_RET_REG
5621 && ! (cfun
->static_chain_decl
!= NULL
&& i
== STATIC_CHAIN_REGNUM
)
5622 && ! (current_function_calls_eh_return
5623 && (i
== EH_RETURN_STACKADJ_REGNO
5624 || ((unsigned) i
>= EH_RETURN_DATA_REGNO (0)
5625 && (unsigned) i
<= EH_RETURN_DATA_REGNO (3)))))
5626 schedule
->temps
[tmpx
++] = i
;
5628 entry
->mode
= VOIDmode
;
5629 entry
->offset
= offset_base
;
5631 /* We loop twice: first, we save 8-byte aligned registers in the
5632 higher addresses, that are known to be aligned. Then, we
5633 proceed to saving 32-bit registers that don't need 8-byte
5635 If this is an interrupt function, all registers that need saving
5636 need to be saved in full. moreover, we need to postpone saving
5637 target registers till we have saved some general purpose registers
5638 we can then use as scratch registers. */
5639 offset
= offset_base
;
5640 for (align
= 1; align
>= 0; align
--)
5642 for (i
= FIRST_PSEUDO_REGISTER
- 1; i
>= 0; i
--)
5643 if (TEST_HARD_REG_BIT (*live_regs_mask
, i
))
5645 enum machine_mode mode
= REGISTER_NATURAL_MODE (i
);
5648 if (current_function_interrupt
)
5650 if (TARGET_REGISTER_P (i
))
5652 if (GENERAL_REGISTER_P (i
))
5655 if (mode
== SFmode
&& (i
% 2) == 1
5656 && ! TARGET_FPU_SINGLE
&& FP_REGISTER_P (i
)
5657 && (TEST_HARD_REG_BIT (*live_regs_mask
, (i
^ 1))))
5664 /* If we're doing the aligned pass and this is not aligned,
5665 or we're doing the unaligned pass and this is aligned,
5667 if ((GET_MODE_SIZE (mode
) % (STACK_BOUNDARY
/ BITS_PER_UNIT
) == 0)
5671 if (current_function_interrupt
5672 && GENERAL_REGISTER_P (i
)
5673 && tmpx
< MAX_TEMPS
)
5674 schedule
->temps
[tmpx
++] = i
;
5676 offset
-= GET_MODE_SIZE (mode
);
5679 entry
->offset
= offset
;
5682 if (align
&& current_function_interrupt
)
5683 for (i
= LAST_TARGET_REG
; i
>= FIRST_TARGET_REG
; i
--)
5684 if (TEST_HARD_REG_BIT (*live_regs_mask
, i
))
5686 offset
-= GET_MODE_SIZE (DImode
);
5688 entry
->mode
= DImode
;
5689 entry
->offset
= offset
;
5694 entry
->mode
= VOIDmode
;
5695 entry
->offset
= offset
;
5696 schedule
->temps
[tmpx
] = -1;
5701 sh_expand_prologue (void)
5703 HARD_REG_SET live_regs_mask
;
5706 int save_flags
= target_flags
;
5709 current_function_interrupt
= sh_cfun_interrupt_handler_p ();
5711 /* We have pretend args if we had an object sent partially in registers
5712 and partially on the stack, e.g. a large structure. */
5713 pretend_args
= current_function_pretend_args_size
;
5714 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl
)
5715 && (NPARM_REGS(SImode
)
5716 > current_function_args_info
.arg_count
[(int) SH_ARG_INT
]))
5718 output_stack_adjust (-pretend_args
5719 - current_function_args_info
.stack_regs
* 8,
5720 stack_pointer_rtx
, 0, NULL
);
5722 if (TARGET_SHCOMPACT
&& flag_pic
&& current_function_args_info
.call_cookie
)
5723 /* We're going to use the PIC register to load the address of the
5724 incoming-argument decoder and/or of the return trampoline from
5725 the GOT, so make sure the PIC register is preserved and
5727 regs_ever_live
[PIC_OFFSET_TABLE_REGNUM
] = 1;
5729 if (TARGET_SHCOMPACT
5730 && (current_function_args_info
.call_cookie
& ~ CALL_COOKIE_RET_TRAMP(1)))
5734 /* First, make all registers with incoming arguments that will
5735 be pushed onto the stack live, so that register renaming
5736 doesn't overwrite them. */
5737 for (reg
= 0; reg
< NPARM_REGS (SImode
); reg
++)
5738 if (CALL_COOKIE_STACKSEQ_GET (current_function_args_info
.call_cookie
)
5739 >= NPARM_REGS (SImode
) - reg
)
5740 for (; reg
< NPARM_REGS (SImode
); reg
++)
5741 emit_insn (gen_shcompact_preserve_incoming_args
5742 (gen_rtx_REG (SImode
, FIRST_PARM_REG
+ reg
)));
5743 else if (CALL_COOKIE_INT_REG_GET
5744 (current_function_args_info
.call_cookie
, reg
) == 1)
5745 emit_insn (gen_shcompact_preserve_incoming_args
5746 (gen_rtx_REG (SImode
, FIRST_PARM_REG
+ reg
)));
5748 emit_move_insn (gen_rtx_REG (Pmode
, MACL_REG
),
5750 emit_move_insn (gen_rtx_REG (SImode
, R0_REG
),
5751 GEN_INT (current_function_args_info
.call_cookie
));
5752 emit_move_insn (gen_rtx_REG (SImode
, MACH_REG
),
5753 gen_rtx_REG (SImode
, R0_REG
));
5755 else if (TARGET_SHMEDIA
)
5757 int tr
= sh_media_register_for_return ();
5761 rtx insn
= emit_move_insn (gen_rtx_REG (DImode
, tr
),
5762 gen_rtx_REG (DImode
, PR_MEDIA_REG
));
5764 /* ??? We should suppress saving pr when we don't need it, but this
5765 is tricky because of builtin_return_address. */
5767 /* If this function only exits with sibcalls, this copy
5768 will be flagged as dead. */
5769 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD
,
5775 /* Emit the code for SETUP_VARARGS. */
5776 if (current_function_stdarg
)
5778 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl
))
5780 /* Push arg regs as if they'd been provided by caller in stack. */
5781 for (i
= 0; i
< NPARM_REGS(SImode
); i
++)
5783 int rn
= NPARM_REGS(SImode
) + FIRST_PARM_REG
- i
- 1;
5786 if (i
>= (NPARM_REGS(SImode
)
5787 - current_function_args_info
.arg_count
[(int) SH_ARG_INT
]
5791 RTX_FRAME_RELATED_P (insn
) = 0;
5796 /* If we're supposed to switch stacks at function entry, do so now. */
5798 emit_insn (gen_sp_switch_1 ());
5800 d
= calc_live_regs (&live_regs_mask
);
5801 /* ??? Maybe we could save some switching if we can move a mode switch
5802 that already happens to be at the function start into the prologue. */
5803 if (target_flags
!= save_flags
&& ! current_function_interrupt
)
5804 emit_insn (gen_toggle_sz ());
5808 int offset_base
, offset
;
5810 int offset_in_r0
= -1;
5812 int tregs_space
= shmedia_target_regs_stack_adjust (&live_regs_mask
);
5813 int total_size
, save_size
;
5814 save_schedule schedule
;
5818 if (call_really_used_regs
[R0_REG
] && ! fixed_regs
[R0_REG
]
5819 && ! current_function_interrupt
)
5820 r0
= gen_rtx_REG (Pmode
, R0_REG
);
5822 /* D is the actual number of bytes that we need for saving registers,
5823 however, in initial_elimination_offset we have committed to using
5824 an additional TREGS_SPACE amount of bytes - in order to keep both
5825 addresses to arguments supplied by the caller and local variables
5826 valid, we must keep this gap. Place it between the incoming
5827 arguments and the actually saved registers in a bid to optimize
5828 locality of reference. */
5829 total_size
= d
+ tregs_space
;
5830 total_size
+= rounded_frame_size (total_size
);
5831 save_size
= total_size
- rounded_frame_size (d
);
5832 if (save_size
% (STACK_BOUNDARY
/ BITS_PER_UNIT
))
5833 d_rounding
= ((STACK_BOUNDARY
/ BITS_PER_UNIT
)
5834 - save_size
% (STACK_BOUNDARY
/ BITS_PER_UNIT
));
5836 /* If adjusting the stack in a single step costs nothing extra, do so.
5837 I.e. either if a single addi is enough, or we need a movi anyway,
5838 and we don't exceed the maximum offset range (the test for the
5839 latter is conservative for simplicity). */
5841 && (CONST_OK_FOR_I10 (-total_size
)
5842 || (! CONST_OK_FOR_I10 (-(save_size
+ d_rounding
))
5843 && total_size
<= 2044)))
5844 d_rounding
= total_size
- save_size
;
5846 offset_base
= d
+ d_rounding
;
5848 output_stack_adjust (-(save_size
+ d_rounding
), stack_pointer_rtx
,
5851 sh5_schedule_saves (&live_regs_mask
, &schedule
, offset_base
);
5852 tmp_pnt
= schedule
.temps
;
5853 for (entry
= &schedule
.entries
[1]; entry
->mode
!= VOIDmode
; entry
++)
5855 enum machine_mode mode
= entry
->mode
;
5856 unsigned int reg
= entry
->reg
;
5857 rtx reg_rtx
, mem_rtx
, pre_dec
= NULL_RTX
;
5860 offset
= entry
->offset
;
5862 reg_rtx
= gen_rtx_REG (mode
, reg
);
5864 mem_rtx
= gen_rtx_MEM (mode
,
5865 gen_rtx_PLUS (Pmode
,
5869 GO_IF_LEGITIMATE_ADDRESS (mode
, XEXP (mem_rtx
, 0), try_pre_dec
);
5876 if (HAVE_PRE_DECREMENT
5877 && (offset_in_r0
- offset
== GET_MODE_SIZE (mode
)
5878 || mem_rtx
== NULL_RTX
5879 || reg
== PR_REG
|| SPECIAL_REGISTER_P (reg
)))
5881 pre_dec
= gen_rtx_MEM (mode
,
5882 gen_rtx_PRE_DEC (Pmode
, r0
));
5884 GO_IF_LEGITIMATE_ADDRESS (mode
, XEXP (pre_dec
, 0),
5893 offset
+= GET_MODE_SIZE (mode
);
5897 if (mem_rtx
!= NULL_RTX
)
5900 if (offset_in_r0
== -1)
5902 emit_move_insn (r0
, GEN_INT (offset
));
5903 offset_in_r0
= offset
;
5905 else if (offset
!= offset_in_r0
)
5910 GEN_INT (offset
- offset_in_r0
)));
5911 offset_in_r0
+= offset
- offset_in_r0
;
5914 if (pre_dec
!= NULL_RTX
)
5920 (Pmode
, r0
, stack_pointer_rtx
));
5924 offset
-= GET_MODE_SIZE (mode
);
5925 offset_in_r0
-= GET_MODE_SIZE (mode
);
5930 mem_rtx
= gen_rtx_MEM (mode
, r0
);
5932 mem_rtx
= gen_rtx_MEM (mode
,
5933 gen_rtx_PLUS (Pmode
,
5937 /* We must not use an r0-based address for target-branch
5938 registers or for special registers without pre-dec
5939 memory addresses, since we store their values in r0
5941 gcc_assert (!TARGET_REGISTER_P (reg
)
5942 && ((reg
!= PR_REG
&& !SPECIAL_REGISTER_P (reg
))
5943 || mem_rtx
== pre_dec
));
5946 orig_reg_rtx
= reg_rtx
;
5947 if (TARGET_REGISTER_P (reg
)
5948 || ((reg
== PR_REG
|| SPECIAL_REGISTER_P (reg
))
5949 && mem_rtx
!= pre_dec
))
5951 rtx tmp_reg
= gen_rtx_REG (GET_MODE (reg_rtx
), *tmp_pnt
);
5953 emit_move_insn (tmp_reg
, reg_rtx
);
5955 if (REGNO (tmp_reg
) == R0_REG
)
5959 gcc_assert (!refers_to_regno_p
5960 (R0_REG
, R0_REG
+1, mem_rtx
, (rtx
*) 0));
5963 if (*++tmp_pnt
<= 0)
5964 tmp_pnt
= schedule
.temps
;
5971 /* Mark as interesting for dwarf cfi generator */
5972 insn
= emit_move_insn (mem_rtx
, reg_rtx
);
5973 RTX_FRAME_RELATED_P (insn
) = 1;
5974 /* If we use an intermediate register for the save, we can't
5975 describe this exactly in cfi as a copy of the to-be-saved
5976 register into the temporary register and then the temporary
5977 register on the stack, because the temporary register can
5978 have a different natural size than the to-be-saved register.
5979 Thus, we gloss over the intermediate copy and pretend we do
5980 a direct save from the to-be-saved register. */
5981 if (REGNO (reg_rtx
) != reg
)
5985 set
= gen_rtx_SET (VOIDmode
, mem_rtx
, orig_reg_rtx
);
5986 note_rtx
= gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
, set
,
5988 REG_NOTES (insn
) = note_rtx
;
5991 if (TARGET_SHCOMPACT
&& (offset_in_r0
!= -1))
5993 rtx reg_rtx
= gen_rtx_REG (mode
, reg
);
5995 rtx mem_rtx
= gen_rtx_MEM (mode
,
5996 gen_rtx_PLUS (Pmode
,
6000 set
= gen_rtx_SET (VOIDmode
, mem_rtx
, reg_rtx
);
6001 note_rtx
= gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
, set
,
6003 REG_NOTES (insn
) = note_rtx
;
6008 gcc_assert (entry
->offset
== d_rounding
);
6011 push_regs (&live_regs_mask
, current_function_interrupt
);
6013 if (flag_pic
&& regs_ever_live
[PIC_OFFSET_TABLE_REGNUM
])
6015 rtx insn
= get_last_insn ();
6016 rtx last
= emit_insn (gen_GOTaddr2picreg ());
6018 /* Mark these insns as possibly dead. Sometimes, flow2 may
6019 delete all uses of the PIC register. In this case, let it
6020 delete the initialization too. */
6023 insn
= NEXT_INSN (insn
);
6025 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD
,
6029 while (insn
!= last
);
6032 if (SHMEDIA_REGS_STACK_ADJUST ())
6034 /* This must NOT go through the PLT, otherwise mach and macl
6035 may be clobbered. */
6036 function_symbol (gen_rtx_REG (Pmode
, R0_REG
),
6038 ? "__GCC_push_shmedia_regs"
6039 : "__GCC_push_shmedia_regs_nofpu"), SFUNC_GOT
);
6040 emit_insn (gen_shmedia_save_restore_regs_compact
6041 (GEN_INT (-SHMEDIA_REGS_STACK_ADJUST ())));
6044 if (target_flags
!= save_flags
&& ! current_function_interrupt
)
6046 rtx insn
= emit_insn (gen_toggle_sz ());
6048 /* If we're lucky, a mode switch in the function body will
6049 overwrite fpscr, turning this insn dead. Tell flow this
6050 insn is ok to delete. */
6051 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD
,
6056 target_flags
= save_flags
;
6058 output_stack_adjust (-rounded_frame_size (d
) + d_rounding
,
6059 stack_pointer_rtx
, 0, NULL
);
6061 if (frame_pointer_needed
)
6062 frame_insn (GEN_MOV (hard_frame_pointer_rtx
, stack_pointer_rtx
));
6064 if (TARGET_SHCOMPACT
6065 && (current_function_args_info
.call_cookie
& ~ CALL_COOKIE_RET_TRAMP(1)))
6067 /* This must NOT go through the PLT, otherwise mach and macl
6068 may be clobbered. */
6069 function_symbol (gen_rtx_REG (Pmode
, R0_REG
),
6070 "__GCC_shcompact_incoming_args", SFUNC_GOT
);
6071 emit_insn (gen_shcompact_incoming_args ());
6076 sh_expand_epilogue (bool sibcall_p
)
6078 HARD_REG_SET live_regs_mask
;
6082 int save_flags
= target_flags
;
6083 int frame_size
, save_size
;
6084 int fpscr_deferred
= 0;
6085 int e
= sibcall_p
? -1 : 1;
6087 d
= calc_live_regs (&live_regs_mask
);
6090 frame_size
= rounded_frame_size (d
);
6094 int tregs_space
= shmedia_target_regs_stack_adjust (&live_regs_mask
);
6096 if (d
% (STACK_BOUNDARY
/ BITS_PER_UNIT
))
6097 d_rounding
= ((STACK_BOUNDARY
/ BITS_PER_UNIT
)
6098 - d
% (STACK_BOUNDARY
/ BITS_PER_UNIT
));
6100 total_size
= d
+ tregs_space
;
6101 total_size
+= rounded_frame_size (total_size
);
6102 save_size
= total_size
- frame_size
;
6104 /* If adjusting the stack in a single step costs nothing extra, do so.
6105 I.e. either if a single addi is enough, or we need a movi anyway,
6106 and we don't exceed the maximum offset range (the test for the
6107 latter is conservative for simplicity). */
6109 && ! frame_pointer_needed
6110 && (CONST_OK_FOR_I10 (total_size
)
6111 || (! CONST_OK_FOR_I10 (save_size
+ d_rounding
)
6112 && total_size
<= 2044)))
6113 d_rounding
= frame_size
;
6115 frame_size
-= d_rounding
;
6118 if (frame_pointer_needed
)
6120 /* We must avoid scheduling the epilogue with previous basic blocks
6121 when exception handling is enabled. See PR/18032. */
6122 if (flag_exceptions
)
6123 emit_insn (gen_blockage ());
6124 output_stack_adjust (frame_size
, hard_frame_pointer_rtx
, e
,
6127 /* We must avoid moving the stack pointer adjustment past code
6128 which reads from the local frame, else an interrupt could
6129 occur after the SP adjustment and clobber data in the local
6131 emit_insn (gen_blockage ());
6132 emit_insn (GEN_MOV (stack_pointer_rtx
, hard_frame_pointer_rtx
));
6134 else if (frame_size
)
6136 /* We must avoid moving the stack pointer adjustment past code
6137 which reads from the local frame, else an interrupt could
6138 occur after the SP adjustment and clobber data in the local
6140 emit_insn (gen_blockage ());
6141 output_stack_adjust (frame_size
, stack_pointer_rtx
, e
, &live_regs_mask
);
6144 if (SHMEDIA_REGS_STACK_ADJUST ())
6146 function_symbol (gen_rtx_REG (Pmode
, R0_REG
),
6148 ? "__GCC_pop_shmedia_regs"
6149 : "__GCC_pop_shmedia_regs_nofpu"), SFUNC_GOT
);
6150 /* This must NOT go through the PLT, otherwise mach and macl
6151 may be clobbered. */
6152 emit_insn (gen_shmedia_save_restore_regs_compact
6153 (GEN_INT (SHMEDIA_REGS_STACK_ADJUST ())));
6156 /* Pop all the registers. */
6158 if (target_flags
!= save_flags
&& ! current_function_interrupt
)
6159 emit_insn (gen_toggle_sz ());
6162 int offset_base
, offset
;
6163 int offset_in_r0
= -1;
6165 rtx r0
= gen_rtx_REG (Pmode
, R0_REG
);
6166 save_schedule schedule
;
6170 entry
= sh5_schedule_saves (&live_regs_mask
, &schedule
, d_rounding
);
6171 offset_base
= -entry
[1].offset
+ d_rounding
;
6172 tmp_pnt
= schedule
.temps
;
6173 for (; entry
->mode
!= VOIDmode
; entry
--)
6175 enum machine_mode mode
= entry
->mode
;
6176 int reg
= entry
->reg
;
6177 rtx reg_rtx
, mem_rtx
, post_inc
= NULL_RTX
, insn
;
6179 offset
= offset_base
+ entry
->offset
;
6180 reg_rtx
= gen_rtx_REG (mode
, reg
);
6182 mem_rtx
= gen_rtx_MEM (mode
,
6183 gen_rtx_PLUS (Pmode
,
6187 GO_IF_LEGITIMATE_ADDRESS (mode
, XEXP (mem_rtx
, 0), try_post_inc
);
6193 if (HAVE_POST_INCREMENT
6194 && (offset
== offset_in_r0
6195 || (offset
+ GET_MODE_SIZE (mode
) != d
+ d_rounding
6196 && mem_rtx
== NULL_RTX
)
6197 || reg
== PR_REG
|| SPECIAL_REGISTER_P (reg
)))
6199 post_inc
= gen_rtx_MEM (mode
,
6200 gen_rtx_POST_INC (Pmode
, r0
));
6202 GO_IF_LEGITIMATE_ADDRESS (mode
, XEXP (post_inc
, 0),
6205 post_inc
= NULL_RTX
;
6214 if (mem_rtx
!= NULL_RTX
)
6217 if (offset_in_r0
== -1)
6219 emit_move_insn (r0
, GEN_INT (offset
));
6220 offset_in_r0
= offset
;
6222 else if (offset
!= offset_in_r0
)
6227 GEN_INT (offset
- offset_in_r0
)));
6228 offset_in_r0
+= offset
- offset_in_r0
;
6231 if (post_inc
!= NULL_RTX
)
6237 (Pmode
, r0
, stack_pointer_rtx
));
6243 offset_in_r0
+= GET_MODE_SIZE (mode
);
6246 mem_rtx
= gen_rtx_MEM (mode
, r0
);
6248 mem_rtx
= gen_rtx_MEM (mode
,
6249 gen_rtx_PLUS (Pmode
,
6253 gcc_assert ((reg
!= PR_REG
&& !SPECIAL_REGISTER_P (reg
))
6254 || mem_rtx
== post_inc
);
6257 if ((reg
== PR_REG
|| SPECIAL_REGISTER_P (reg
))
6258 && mem_rtx
!= post_inc
)
6260 insn
= emit_move_insn (r0
, mem_rtx
);
6263 else if (TARGET_REGISTER_P (reg
))
6265 rtx tmp_reg
= gen_rtx_REG (mode
, *tmp_pnt
);
6267 /* Give the scheduler a bit of freedom by using up to
6268 MAX_TEMPS registers in a round-robin fashion. */
6269 insn
= emit_move_insn (tmp_reg
, mem_rtx
);
6272 tmp_pnt
= schedule
.temps
;
6275 insn
= emit_move_insn (reg_rtx
, mem_rtx
);
6276 if (reg
== PR_MEDIA_REG
&& sh_media_register_for_return () >= 0)
6277 /* This is dead, unless we return with a sibcall. */
6278 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD
,
6283 gcc_assert (entry
->offset
+ offset_base
== d
+ d_rounding
);
6285 else /* ! TARGET_SH5 */
6288 if (TEST_HARD_REG_BIT (live_regs_mask
, PR_REG
))
6290 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
6292 int j
= (FIRST_PSEUDO_REGISTER
- 1) - i
;
6294 if (j
== FPSCR_REG
&& current_function_interrupt
&& TARGET_FMOVD
6295 && hard_regs_intersect_p (&live_regs_mask
,
6296 ®_class_contents
[DF_REGS
]))
6298 else if (j
!= PR_REG
&& TEST_HARD_REG_BIT (live_regs_mask
, j
))
6300 if (j
== FIRST_FP_REG
&& fpscr_deferred
)
6305 if (target_flags
!= save_flags
&& ! current_function_interrupt
)
6306 emit_insn (gen_toggle_sz ());
6307 target_flags
= save_flags
;
6309 output_stack_adjust (current_function_pretend_args_size
6310 + save_size
+ d_rounding
6311 + current_function_args_info
.stack_regs
* 8,
6312 stack_pointer_rtx
, e
, NULL
);
6314 if (current_function_calls_eh_return
)
6315 emit_insn (GEN_ADD3 (stack_pointer_rtx
, stack_pointer_rtx
,
6316 EH_RETURN_STACKADJ_RTX
));
6318 /* Switch back to the normal stack if necessary. */
6320 emit_insn (gen_sp_switch_2 ());
6322 /* Tell flow the insn that pops PR isn't dead. */
6323 /* PR_REG will never be live in SHmedia mode, and we don't need to
6324 USE PR_MEDIA_REG, since it will be explicitly copied to TR0_REG
6325 by the return pattern. */
6326 if (TEST_HARD_REG_BIT (live_regs_mask
, PR_REG
))
6327 emit_insn (gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, PR_REG
)));
6330 static int sh_need_epilogue_known
= 0;
6333 sh_need_epilogue (void)
6335 if (! sh_need_epilogue_known
)
6340 sh_expand_epilogue (0);
6341 epilogue
= get_insns ();
6343 sh_need_epilogue_known
= (epilogue
== NULL
? -1 : 1);
6345 return sh_need_epilogue_known
> 0;
6348 /* Emit code to change the current function's return address to RA.
6349 TEMP is available as a scratch register, if needed. */
6352 sh_set_return_address (rtx ra
, rtx tmp
)
6354 HARD_REG_SET live_regs_mask
;
6356 int pr_reg
= TARGET_SHMEDIA
? PR_MEDIA_REG
: PR_REG
;
6359 d
= calc_live_regs (&live_regs_mask
);
6361 /* If pr_reg isn't life, we can set it (or the register given in
6362 sh_media_register_for_return) directly. */
6363 if (! TEST_HARD_REG_BIT (live_regs_mask
, pr_reg
))
6369 int rr_regno
= sh_media_register_for_return ();
6374 rr
= gen_rtx_REG (DImode
, rr_regno
);
6377 rr
= gen_rtx_REG (SImode
, pr_reg
);
6379 emit_insn (GEN_MOV (rr
, ra
));
6380 /* Tell flow the register for return isn't dead. */
6381 emit_insn (gen_rtx_USE (VOIDmode
, rr
));
6388 save_schedule schedule
;
6391 entry
= sh5_schedule_saves (&live_regs_mask
, &schedule
, 0);
6392 offset
= entry
[1].offset
;
6393 for (; entry
->mode
!= VOIDmode
; entry
--)
6394 if (entry
->reg
== pr_reg
)
6397 /* We can't find pr register. */
6401 offset
= entry
->offset
- offset
;
6402 pr_offset
= (rounded_frame_size (d
) + offset
6403 + SHMEDIA_REGS_STACK_ADJUST ());
6406 pr_offset
= rounded_frame_size (d
);
6408 emit_insn (GEN_MOV (tmp
, GEN_INT (pr_offset
)));
6409 emit_insn (GEN_ADD3 (tmp
, tmp
, hard_frame_pointer_rtx
));
6411 tmp
= gen_rtx_MEM (Pmode
, tmp
);
6412 emit_insn (GEN_MOV (tmp
, ra
));
6415 /* Clear variables at function end. */
6418 sh_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED
,
6419 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
6421 trap_exit
= pragma_interrupt
= pragma_trapa
= pragma_nosave_low_regs
= 0;
6422 sh_need_epilogue_known
= 0;
6423 sp_switch
= NULL_RTX
;
6427 sh_builtin_saveregs (void)
6429 /* First unnamed integer register. */
6430 int first_intreg
= current_function_args_info
.arg_count
[(int) SH_ARG_INT
];
6431 /* Number of integer registers we need to save. */
6432 int n_intregs
= MAX (0, NPARM_REGS (SImode
) - first_intreg
);
6433 /* First unnamed SFmode float reg */
6434 int first_floatreg
= current_function_args_info
.arg_count
[(int) SH_ARG_FLOAT
];
6435 /* Number of SFmode float regs to save. */
6436 int n_floatregs
= MAX (0, NPARM_REGS (SFmode
) - first_floatreg
);
6439 HOST_WIDE_INT alias_set
;
6445 int pushregs
= n_intregs
;
6447 while (pushregs
< NPARM_REGS (SImode
) - 1
6448 && (CALL_COOKIE_INT_REG_GET
6449 (current_function_args_info
.call_cookie
,
6450 NPARM_REGS (SImode
) - pushregs
)
6453 current_function_args_info
.call_cookie
6454 &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode
)
6459 if (pushregs
== NPARM_REGS (SImode
))
6460 current_function_args_info
.call_cookie
6461 |= (CALL_COOKIE_INT_REG (0, 1)
6462 | CALL_COOKIE_STACKSEQ (pushregs
- 1));
6464 current_function_args_info
.call_cookie
6465 |= CALL_COOKIE_STACKSEQ (pushregs
);
6467 current_function_pretend_args_size
+= 8 * n_intregs
;
6469 if (TARGET_SHCOMPACT
)
6473 if (! TARGET_SH2E
&& ! TARGET_SH4
&& ! TARGET_SH5
)
6475 error ("__builtin_saveregs not supported by this subtarget");
6482 /* Allocate block of memory for the regs. */
6483 /* ??? If n_intregs + n_floatregs == 0, should we allocate at least 1 byte?
6484 Or can assign_stack_local accept a 0 SIZE argument? */
6485 bufsize
= (n_intregs
* UNITS_PER_WORD
) + (n_floatregs
* UNITS_PER_WORD
);
6488 regbuf
= gen_rtx_MEM (BLKmode
,
6489 gen_rtx_REG (Pmode
, ARG_POINTER_REGNUM
));
6490 else if (n_floatregs
& 1)
6494 regbuf
= assign_stack_local (BLKmode
, bufsize
+ UNITS_PER_WORD
, 0);
6495 addr
= copy_to_mode_reg (Pmode
, XEXP (regbuf
, 0));
6496 emit_insn (gen_iorsi3 (addr
, addr
, GEN_INT (UNITS_PER_WORD
)));
6497 regbuf
= change_address (regbuf
, BLKmode
, addr
);
6499 else if (STACK_BOUNDARY
< 64 && TARGET_FPU_DOUBLE
&& n_floatregs
)
6503 regbuf
= assign_stack_local (BLKmode
, bufsize
+ UNITS_PER_WORD
, 0);
6504 addr
= copy_to_mode_reg (Pmode
, plus_constant (XEXP (regbuf
, 0), 4));
6505 mask
= copy_to_mode_reg (Pmode
, GEN_INT (-8));
6506 emit_insn (gen_andsi3 (addr
, addr
, mask
));
6507 regbuf
= change_address (regbuf
, BLKmode
, addr
);
6510 regbuf
= assign_stack_local (BLKmode
, bufsize
, TARGET_FPU_DOUBLE
? 64 : 0);
6511 alias_set
= get_varargs_alias_set ();
6512 set_mem_alias_set (regbuf
, alias_set
);
6515 This is optimized to only save the regs that are necessary. Explicitly
6516 named args need not be saved. */
6518 move_block_from_reg (BASE_ARG_REG (SImode
) + first_intreg
,
6519 adjust_address (regbuf
, BLKmode
,
6520 n_floatregs
* UNITS_PER_WORD
),
6524 /* Return the address of the regbuf. */
6525 return XEXP (regbuf
, 0);
6528 This is optimized to only save the regs that are necessary. Explicitly
6529 named args need not be saved.
6530 We explicitly build a pointer to the buffer because it halves the insn
6531 count when not optimizing (otherwise the pointer is built for each reg
6533 We emit the moves in reverse order so that we can use predecrement. */
6535 fpregs
= copy_to_mode_reg (Pmode
,
6536 plus_constant (XEXP (regbuf
, 0),
6537 n_floatregs
* UNITS_PER_WORD
));
6538 if (TARGET_SH4
|| TARGET_SH2A_DOUBLE
)
6541 for (regno
= NPARM_REGS (DFmode
) - 2; regno
>= first_floatreg
; regno
-= 2)
6543 emit_insn (gen_addsi3 (fpregs
, fpregs
,
6544 GEN_INT (-2 * UNITS_PER_WORD
)));
6545 mem
= gen_rtx_MEM (DFmode
, fpregs
);
6546 set_mem_alias_set (mem
, alias_set
);
6547 emit_move_insn (mem
,
6548 gen_rtx_REG (DFmode
, BASE_ARG_REG (DFmode
) + regno
));
6550 regno
= first_floatreg
;
6553 emit_insn (gen_addsi3 (fpregs
, fpregs
, GEN_INT (-UNITS_PER_WORD
)));
6554 mem
= gen_rtx_MEM (SFmode
, fpregs
);
6555 set_mem_alias_set (mem
, alias_set
);
6556 emit_move_insn (mem
,
6557 gen_rtx_REG (SFmode
, BASE_ARG_REG (SFmode
) + regno
6558 - (TARGET_LITTLE_ENDIAN
!= 0)));
6562 for (regno
= NPARM_REGS (SFmode
) - 1; regno
>= first_floatreg
; regno
--)
6566 emit_insn (gen_addsi3 (fpregs
, fpregs
, GEN_INT (-UNITS_PER_WORD
)));
6567 mem
= gen_rtx_MEM (SFmode
, fpregs
);
6568 set_mem_alias_set (mem
, alias_set
);
6569 emit_move_insn (mem
,
6570 gen_rtx_REG (SFmode
, BASE_ARG_REG (SFmode
) + regno
));
6573 /* Return the address of the regbuf. */
6574 return XEXP (regbuf
, 0);
6577 /* Define the `__builtin_va_list' type for the ABI. */
6580 sh_build_builtin_va_list (void)
6582 tree f_next_o
, f_next_o_limit
, f_next_fp
, f_next_fp_limit
, f_next_stack
;
6585 if (TARGET_SH5
|| (! TARGET_SH2E
&& ! TARGET_SH4
)
6586 || TARGET_HITACHI
|| sh_cfun_attr_renesas_p ())
6587 return ptr_type_node
;
6589 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
6591 f_next_o
= build_decl (FIELD_DECL
, get_identifier ("__va_next_o"),
6593 f_next_o_limit
= build_decl (FIELD_DECL
,
6594 get_identifier ("__va_next_o_limit"),
6596 f_next_fp
= build_decl (FIELD_DECL
, get_identifier ("__va_next_fp"),
6598 f_next_fp_limit
= build_decl (FIELD_DECL
,
6599 get_identifier ("__va_next_fp_limit"),
6601 f_next_stack
= build_decl (FIELD_DECL
, get_identifier ("__va_next_stack"),
6604 DECL_FIELD_CONTEXT (f_next_o
) = record
;
6605 DECL_FIELD_CONTEXT (f_next_o_limit
) = record
;
6606 DECL_FIELD_CONTEXT (f_next_fp
) = record
;
6607 DECL_FIELD_CONTEXT (f_next_fp_limit
) = record
;
6608 DECL_FIELD_CONTEXT (f_next_stack
) = record
;
6610 TYPE_FIELDS (record
) = f_next_o
;
6611 TREE_CHAIN (f_next_o
) = f_next_o_limit
;
6612 TREE_CHAIN (f_next_o_limit
) = f_next_fp
;
6613 TREE_CHAIN (f_next_fp
) = f_next_fp_limit
;
6614 TREE_CHAIN (f_next_fp_limit
) = f_next_stack
;
6616 layout_type (record
);
6621 /* Implement `va_start' for varargs and stdarg. */
6624 sh_va_start (tree valist
, rtx nextarg
)
6626 tree f_next_o
, f_next_o_limit
, f_next_fp
, f_next_fp_limit
, f_next_stack
;
6627 tree next_o
, next_o_limit
, next_fp
, next_fp_limit
, next_stack
;
6633 expand_builtin_saveregs ();
6634 std_expand_builtin_va_start (valist
, nextarg
);
6638 if ((! TARGET_SH2E
&& ! TARGET_SH4
)
6639 || TARGET_HITACHI
|| sh_cfun_attr_renesas_p ())
6641 std_expand_builtin_va_start (valist
, nextarg
);
6645 f_next_o
= TYPE_FIELDS (va_list_type_node
);
6646 f_next_o_limit
= TREE_CHAIN (f_next_o
);
6647 f_next_fp
= TREE_CHAIN (f_next_o_limit
);
6648 f_next_fp_limit
= TREE_CHAIN (f_next_fp
);
6649 f_next_stack
= TREE_CHAIN (f_next_fp_limit
);
6651 next_o
= build (COMPONENT_REF
, TREE_TYPE (f_next_o
), valist
, f_next_o
,
6653 next_o_limit
= build (COMPONENT_REF
, TREE_TYPE (f_next_o_limit
),
6654 valist
, f_next_o_limit
, NULL_TREE
);
6655 next_fp
= build (COMPONENT_REF
, TREE_TYPE (f_next_fp
), valist
, f_next_fp
,
6657 next_fp_limit
= build (COMPONENT_REF
, TREE_TYPE (f_next_fp_limit
),
6658 valist
, f_next_fp_limit
, NULL_TREE
);
6659 next_stack
= build (COMPONENT_REF
, TREE_TYPE (f_next_stack
),
6660 valist
, f_next_stack
, NULL_TREE
);
6662 /* Call __builtin_saveregs. */
6663 u
= make_tree (ptr_type_node
, expand_builtin_saveregs ());
6664 t
= build (MODIFY_EXPR
, ptr_type_node
, next_fp
, u
);
6665 TREE_SIDE_EFFECTS (t
) = 1;
6666 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6668 nfp
= current_function_args_info
.arg_count
[SH_ARG_FLOAT
];
6673 u
= fold (build (PLUS_EXPR
, ptr_type_node
, u
,
6674 build_int_cst (NULL_TREE
, UNITS_PER_WORD
* nfp
)));
6675 t
= build (MODIFY_EXPR
, ptr_type_node
, next_fp_limit
, u
);
6676 TREE_SIDE_EFFECTS (t
) = 1;
6677 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6679 t
= build (MODIFY_EXPR
, ptr_type_node
, next_o
, u
);
6680 TREE_SIDE_EFFECTS (t
) = 1;
6681 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6683 nint
= current_function_args_info
.arg_count
[SH_ARG_INT
];
6688 u
= fold (build (PLUS_EXPR
, ptr_type_node
, u
,
6689 build_int_cst (NULL_TREE
, UNITS_PER_WORD
* nint
)));
6690 t
= build (MODIFY_EXPR
, ptr_type_node
, next_o_limit
, u
);
6691 TREE_SIDE_EFFECTS (t
) = 1;
6692 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6694 u
= make_tree (ptr_type_node
, nextarg
);
6695 t
= build (MODIFY_EXPR
, ptr_type_node
, next_stack
, u
);
6696 TREE_SIDE_EFFECTS (t
) = 1;
6697 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6700 /* TYPE is a RECORD_TYPE. If there is only a single non-zero-sized
6701 member, return it. */
6703 find_sole_member (tree type
)
6705 tree field
, member
= NULL_TREE
;
6707 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
6709 if (TREE_CODE (field
) != FIELD_DECL
)
6711 if (!DECL_SIZE (field
))
6713 if (integer_zerop (DECL_SIZE (field
)))
6721 /* Implement `va_arg'. */
6724 sh_gimplify_va_arg_expr (tree valist
, tree type
, tree
*pre_p
,
6725 tree
*post_p ATTRIBUTE_UNUSED
)
6727 HOST_WIDE_INT size
, rsize
;
6728 tree tmp
, pptr_type_node
;
6729 tree addr
, lab_over
= NULL
, result
= NULL
;
6730 int pass_by_ref
= targetm
.calls
.must_pass_in_stack (TYPE_MODE (type
), type
);
6733 type
= build_pointer_type (type
);
6735 size
= int_size_in_bytes (type
);
6736 rsize
= (size
+ UNITS_PER_WORD
- 1) & -UNITS_PER_WORD
;
6737 pptr_type_node
= build_pointer_type (ptr_type_node
);
6739 if (! TARGET_SH5
&& (TARGET_SH2E
|| TARGET_SH4
)
6740 && ! (TARGET_HITACHI
|| sh_cfun_attr_renesas_p ()))
6742 tree f_next_o
, f_next_o_limit
, f_next_fp
, f_next_fp_limit
, f_next_stack
;
6743 tree next_o
, next_o_limit
, next_fp
, next_fp_limit
, next_stack
;
6748 f_next_o
= TYPE_FIELDS (va_list_type_node
);
6749 f_next_o_limit
= TREE_CHAIN (f_next_o
);
6750 f_next_fp
= TREE_CHAIN (f_next_o_limit
);
6751 f_next_fp_limit
= TREE_CHAIN (f_next_fp
);
6752 f_next_stack
= TREE_CHAIN (f_next_fp_limit
);
6754 next_o
= build (COMPONENT_REF
, TREE_TYPE (f_next_o
), valist
, f_next_o
,
6756 next_o_limit
= build (COMPONENT_REF
, TREE_TYPE (f_next_o_limit
),
6757 valist
, f_next_o_limit
, NULL_TREE
);
6758 next_fp
= build (COMPONENT_REF
, TREE_TYPE (f_next_fp
),
6759 valist
, f_next_fp
, NULL_TREE
);
6760 next_fp_limit
= build (COMPONENT_REF
, TREE_TYPE (f_next_fp_limit
),
6761 valist
, f_next_fp_limit
, NULL_TREE
);
6762 next_stack
= build (COMPONENT_REF
, TREE_TYPE (f_next_stack
),
6763 valist
, f_next_stack
, NULL_TREE
);
6765 /* Structures with a single member with a distinct mode are passed
6766 like their member. This is relevant if the latter has a REAL_TYPE
6767 or COMPLEX_TYPE type. */
6768 while (TREE_CODE (type
) == RECORD_TYPE
6769 && (member
= find_sole_member (type
))
6770 && (TREE_CODE (TREE_TYPE (member
)) == REAL_TYPE
6771 || TREE_CODE (TREE_TYPE (member
)) == COMPLEX_TYPE
6772 || TREE_CODE (TREE_TYPE (member
)) == RECORD_TYPE
))
6774 tree field_type
= TREE_TYPE (member
);
6776 if (TYPE_MODE (type
) == TYPE_MODE (field_type
))
6780 gcc_assert ((TYPE_ALIGN (type
)
6781 < GET_MODE_ALIGNMENT (TYPE_MODE (field_type
)))
6782 || (TYPE_ALIGN (type
)
6783 > GET_MODE_BITSIZE (TYPE_MODE (field_type
))));
6790 pass_as_float
= ((TREE_CODE (type
) == REAL_TYPE
&& size
<= 8)
6791 || (TREE_CODE (type
) == COMPLEX_TYPE
6792 && TREE_CODE (TREE_TYPE (type
)) == REAL_TYPE
6797 pass_as_float
= (TREE_CODE (type
) == REAL_TYPE
&& size
== 4);
6800 addr
= create_tmp_var (pptr_type_node
, NULL
);
6801 lab_false
= create_artificial_label ();
6802 lab_over
= create_artificial_label ();
6804 valist
= build1 (INDIRECT_REF
, ptr_type_node
, addr
);
6808 tree next_fp_tmp
= create_tmp_var (TREE_TYPE (f_next_fp
), NULL
);
6810 bool is_double
= size
== 8 && TREE_CODE (type
) == REAL_TYPE
;
6812 tmp
= build1 (ADDR_EXPR
, pptr_type_node
, next_fp
);
6813 tmp
= build2 (MODIFY_EXPR
, void_type_node
, addr
, tmp
);
6814 gimplify_and_add (tmp
, pre_p
);
6816 tmp
= build2 (MODIFY_EXPR
, ptr_type_node
, next_fp_tmp
, valist
);
6817 gimplify_and_add (tmp
, pre_p
);
6818 tmp
= next_fp_limit
;
6819 if (size
> 4 && !is_double
)
6820 tmp
= build2 (PLUS_EXPR
, TREE_TYPE (tmp
), tmp
,
6821 fold_convert (TREE_TYPE (tmp
), size_int (4 - size
)));
6822 tmp
= build (GE_EXPR
, boolean_type_node
, next_fp_tmp
, tmp
);
6823 cmp
= build (COND_EXPR
, void_type_node
, tmp
,
6824 build (GOTO_EXPR
, void_type_node
, lab_false
),
6827 gimplify_and_add (cmp
, pre_p
);
6829 if (TYPE_ALIGN (type
) > BITS_PER_WORD
|| (is_double
|| size
== 16))
6831 tmp
= fold_convert (ptr_type_node
, size_int (UNITS_PER_WORD
));
6832 tmp
= build (BIT_AND_EXPR
, ptr_type_node
, next_fp_tmp
, tmp
);
6833 tmp
= build (PLUS_EXPR
, ptr_type_node
, next_fp_tmp
, tmp
);
6834 tmp
= build (MODIFY_EXPR
, ptr_type_node
, next_fp_tmp
, tmp
);
6835 gimplify_and_add (tmp
, pre_p
);
6838 gimplify_and_add (cmp
, pre_p
);
6840 #ifdef FUNCTION_ARG_SCmode_WART
6841 if (TYPE_MODE (type
) == SCmode
&& TARGET_SH4
&& TARGET_LITTLE_ENDIAN
)
6843 tree subtype
= TREE_TYPE (type
);
6847 = std_gimplify_va_arg_expr (next_fp_tmp
, subtype
, pre_p
, NULL
);
6848 imag
= get_initialized_tmp_var (imag
, pre_p
, NULL
);
6851 = std_gimplify_va_arg_expr (next_fp_tmp
, subtype
, pre_p
, NULL
);
6852 real
= get_initialized_tmp_var (real
, pre_p
, NULL
);
6854 result
= build (COMPLEX_EXPR
, type
, real
, imag
);
6855 result
= get_initialized_tmp_var (result
, pre_p
, NULL
);
6857 #endif /* FUNCTION_ARG_SCmode_WART */
6859 tmp
= build (GOTO_EXPR
, void_type_node
, lab_over
);
6860 gimplify_and_add (tmp
, pre_p
);
6862 tmp
= build (LABEL_EXPR
, void_type_node
, lab_false
);
6863 gimplify_and_add (tmp
, pre_p
);
6865 tmp
= build1 (ADDR_EXPR
, pptr_type_node
, next_stack
);
6866 tmp
= build (MODIFY_EXPR
, void_type_node
, addr
, tmp
);
6867 gimplify_and_add (tmp
, pre_p
);
6868 tmp
= build2 (MODIFY_EXPR
, ptr_type_node
, next_fp_tmp
, valist
);
6869 gimplify_and_add (tmp
, pre_p
);
6871 tmp
= build2 (MODIFY_EXPR
, ptr_type_node
, valist
, next_fp_tmp
);
6872 gimplify_and_add (tmp
, post_p
);
6873 valist
= next_fp_tmp
;
6877 tmp
= fold_convert (ptr_type_node
, size_int (rsize
));
6878 tmp
= build (PLUS_EXPR
, ptr_type_node
, next_o
, tmp
);
6879 tmp
= build (GT_EXPR
, boolean_type_node
, tmp
, next_o_limit
);
6880 tmp
= build (COND_EXPR
, void_type_node
, tmp
,
6881 build (GOTO_EXPR
, void_type_node
, lab_false
),
6883 gimplify_and_add (tmp
, pre_p
);
6885 tmp
= build1 (ADDR_EXPR
, pptr_type_node
, next_o
);
6886 tmp
= build (MODIFY_EXPR
, void_type_node
, addr
, tmp
);
6887 gimplify_and_add (tmp
, pre_p
);
6889 tmp
= build (GOTO_EXPR
, void_type_node
, lab_over
);
6890 gimplify_and_add (tmp
, pre_p
);
6892 tmp
= build (LABEL_EXPR
, void_type_node
, lab_false
);
6893 gimplify_and_add (tmp
, pre_p
);
6895 if (size
> 4 && ! TARGET_SH4
)
6897 tmp
= build (MODIFY_EXPR
, ptr_type_node
, next_o
, next_o_limit
);
6898 gimplify_and_add (tmp
, pre_p
);
6901 tmp
= build1 (ADDR_EXPR
, pptr_type_node
, next_stack
);
6902 tmp
= build (MODIFY_EXPR
, void_type_node
, addr
, tmp
);
6903 gimplify_and_add (tmp
, pre_p
);
6908 tmp
= build (LABEL_EXPR
, void_type_node
, lab_over
);
6909 gimplify_and_add (tmp
, pre_p
);
6913 /* ??? In va-sh.h, there had been code to make values larger than
6914 size 8 indirect. This does not match the FUNCTION_ARG macros. */
6916 tmp
= std_gimplify_va_arg_expr (valist
, type
, pre_p
, NULL
);
6919 tmp
= build (MODIFY_EXPR
, void_type_node
, result
, tmp
);
6920 gimplify_and_add (tmp
, pre_p
);
6922 tmp
= build (LABEL_EXPR
, void_type_node
, lab_over
);
6923 gimplify_and_add (tmp
, pre_p
);
6929 result
= build_va_arg_indirect_ref (result
);
6935 sh_promote_prototypes (tree type
)
6941 return ! sh_attr_renesas_p (type
);
6944 /* Whether an argument must be passed by reference. On SHcompact, we
6945 pretend arguments wider than 32-bits that would have been passed in
6946 registers are passed by reference, so that an SHmedia trampoline
6947 loads them into the full 64-bits registers. */
6950 shcompact_byref (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6951 tree type
, bool named
)
6953 unsigned HOST_WIDE_INT size
;
6956 size
= int_size_in_bytes (type
);
6958 size
= GET_MODE_SIZE (mode
);
6960 if (cum
->arg_count
[SH_ARG_INT
] < NPARM_REGS (SImode
)
6962 || GET_SH_ARG_CLASS (mode
) == SH_ARG_INT
6963 || (GET_SH_ARG_CLASS (mode
) == SH_ARG_FLOAT
6964 && cum
->arg_count
[SH_ARG_FLOAT
] >= NPARM_REGS (SFmode
)))
6966 && !SHCOMPACT_FORCE_ON_STACK (mode
, type
)
6967 && !SH5_WOULD_BE_PARTIAL_NREGS (*cum
, mode
, type
, named
))
6974 sh_pass_by_reference (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6975 tree type
, bool named
)
6977 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
6980 /* ??? std_gimplify_va_arg_expr passes NULL for cum. That function
6981 wants to know about pass-by-reference semantics for incoming
6986 if (TARGET_SHCOMPACT
)
6988 cum
->byref
= shcompact_byref (cum
, mode
, type
, named
);
6989 return cum
->byref
!= 0;
6996 sh_callee_copies (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6997 tree type
, bool named ATTRIBUTE_UNUSED
)
6999 /* ??? How can it possibly be correct to return true only on the
7000 caller side of the equation? Is there someplace else in the
7001 sh backend that's magically producing the copies? */
7002 return (cum
->outgoing
7003 && ((mode
== BLKmode
? TYPE_ALIGN (type
) : GET_MODE_ALIGNMENT (mode
))
7004 % SH_MIN_ALIGN_FOR_CALLEE_COPY
== 0));
7008 sh_arg_partial_bytes (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
7009 tree type
, bool named ATTRIBUTE_UNUSED
)
7014 && PASS_IN_REG_P (*cum
, mode
, type
)
7015 && !(TARGET_SH4
|| TARGET_SH2A_DOUBLE
)
7016 && (ROUND_REG (*cum
, mode
)
7018 ? ROUND_ADVANCE (GET_MODE_SIZE (mode
))
7019 : ROUND_ADVANCE (int_size_in_bytes (type
)))
7020 > NPARM_REGS (mode
)))
7021 words
= NPARM_REGS (mode
) - ROUND_REG (*cum
, mode
);
7023 else if (!TARGET_SHCOMPACT
7024 && SH5_WOULD_BE_PARTIAL_NREGS (*cum
, mode
, type
, named
))
7025 words
= NPARM_REGS (SImode
) - cum
->arg_count
[SH_ARG_INT
];
7027 return words
* UNITS_PER_WORD
;
7031 /* Define where to put the arguments to a function.
7032 Value is zero to push the argument on the stack,
7033 or a hard register in which to store the argument.
7035 MODE is the argument's machine mode.
7036 TYPE is the data type of the argument (as a tree).
7037 This is null for libcalls where that information may
7039 CUM is a variable of type CUMULATIVE_ARGS which gives info about
7040 the preceding args and about the function being called.
7041 NAMED is nonzero if this argument is a named parameter
7042 (otherwise it is an extra parameter matching an ellipsis).
7044 On SH the first args are normally in registers
7045 and the rest are pushed. Any arg that starts within the first
7046 NPARM_REGS words is at least partially passed in a register unless
7047 its data type forbids. */
7051 sh_function_arg (CUMULATIVE_ARGS
*ca
, enum machine_mode mode
,
7052 tree type
, int named
)
7054 if (! TARGET_SH5
&& mode
== VOIDmode
)
7055 return GEN_INT (ca
->renesas_abi
? 1 : 0);
7058 && PASS_IN_REG_P (*ca
, mode
, type
)
7059 && (named
|| ! (TARGET_HITACHI
|| ca
->renesas_abi
)))
7063 if (mode
== SCmode
&& TARGET_SH4
&& TARGET_LITTLE_ENDIAN
7064 && (! FUNCTION_ARG_SCmode_WART
|| (ROUND_REG (*ca
, mode
) & 1)))
7066 rtx r1
= gen_rtx_EXPR_LIST (VOIDmode
,
7067 gen_rtx_REG (SFmode
,
7069 + (ROUND_REG (*ca
, mode
) ^ 1)),
7071 rtx r2
= gen_rtx_EXPR_LIST (VOIDmode
,
7072 gen_rtx_REG (SFmode
,
7074 + ((ROUND_REG (*ca
, mode
) + 1) ^ 1)),
7076 return gen_rtx_PARALLEL(SCmode
, gen_rtvec(2, r1
, r2
));
7079 /* If the alignment of a DF value causes an SF register to be
7080 skipped, we will use that skipped register for the next SF
7082 if ((TARGET_HITACHI
|| ca
->renesas_abi
)
7083 && ca
->free_single_fp_reg
7085 return gen_rtx_REG (mode
, ca
->free_single_fp_reg
);
7087 regno
= (BASE_ARG_REG (mode
) + ROUND_REG (*ca
, mode
))
7088 ^ (mode
== SFmode
&& TARGET_SH4
7089 && TARGET_LITTLE_ENDIAN
!= 0
7090 && ! TARGET_HITACHI
&& ! ca
->renesas_abi
);
7091 return gen_rtx_REG (mode
, regno
);
7097 if (mode
== VOIDmode
&& TARGET_SHCOMPACT
)
7098 return GEN_INT (ca
->call_cookie
);
7100 /* The following test assumes unnamed arguments are promoted to
7102 if (mode
== SFmode
&& ca
->free_single_fp_reg
)
7103 return SH5_PROTOTYPED_FLOAT_ARG (*ca
, mode
, ca
->free_single_fp_reg
);
7105 if ((GET_SH_ARG_CLASS (mode
) == SH_ARG_FLOAT
)
7106 && (named
|| ! ca
->prototype_p
)
7107 && ca
->arg_count
[(int) SH_ARG_FLOAT
] < NPARM_REGS (SFmode
))
7109 if (! ca
->prototype_p
&& TARGET_SHMEDIA
)
7110 return SH5_PROTOTYPELESS_FLOAT_ARG (*ca
, mode
);
7112 return SH5_PROTOTYPED_FLOAT_ARG (*ca
, mode
,
7114 + ca
->arg_count
[(int) SH_ARG_FLOAT
]);
7117 if (ca
->arg_count
[(int) SH_ARG_INT
] < NPARM_REGS (SImode
)
7118 && (! TARGET_SHCOMPACT
7119 || (! SHCOMPACT_FORCE_ON_STACK (mode
, type
)
7120 && ! SH5_WOULD_BE_PARTIAL_NREGS (*ca
, mode
,
7123 return gen_rtx_REG (mode
, (FIRST_PARM_REG
7124 + ca
->arg_count
[(int) SH_ARG_INT
]));
7133 /* Update the data in CUM to advance over an argument
7134 of mode MODE and data type TYPE.
7135 (TYPE is null for libcalls where that information may not be
7139 sh_function_arg_advance (CUMULATIVE_ARGS
*ca
, enum machine_mode mode
,
7140 tree type
, int named
)
7144 else if (TARGET_SH5
)
7146 tree type2
= (ca
->byref
&& type
7149 enum machine_mode mode2
= (ca
->byref
&& type
7152 int dwords
= ((ca
->byref
7155 ? int_size_in_bytes (type2
)
7156 : GET_MODE_SIZE (mode2
)) + 7) / 8;
7157 int numregs
= MIN (dwords
, NPARM_REGS (SImode
)
7158 - ca
->arg_count
[(int) SH_ARG_INT
]);
7162 ca
->arg_count
[(int) SH_ARG_INT
] += numregs
;
7163 if (TARGET_SHCOMPACT
7164 && SHCOMPACT_FORCE_ON_STACK (mode2
, type2
))
7167 |= CALL_COOKIE_INT_REG (ca
->arg_count
[(int) SH_ARG_INT
]
7169 /* N.B. We want this also for outgoing. */
7170 ca
->stack_regs
+= numregs
;
7175 ca
->stack_regs
+= numregs
;
7176 ca
->byref_regs
+= numregs
;
7180 |= CALL_COOKIE_INT_REG (ca
->arg_count
[(int) SH_ARG_INT
]
7184 |= CALL_COOKIE_INT_REG (ca
->arg_count
[(int) SH_ARG_INT
]
7187 else if (dwords
> numregs
)
7189 int pushregs
= numregs
;
7191 if (TARGET_SHCOMPACT
)
7192 ca
->stack_regs
+= numregs
;
7193 while (pushregs
< NPARM_REGS (SImode
) - 1
7194 && (CALL_COOKIE_INT_REG_GET
7196 NPARM_REGS (SImode
) - pushregs
)
7200 &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode
)
7204 if (numregs
== NPARM_REGS (SImode
))
7206 |= CALL_COOKIE_INT_REG (0, 1)
7207 | CALL_COOKIE_STACKSEQ (numregs
- 1);
7210 |= CALL_COOKIE_STACKSEQ (numregs
);
7213 if (GET_SH_ARG_CLASS (mode2
) == SH_ARG_FLOAT
7214 && (named
|| ! ca
->prototype_p
))
7216 if (mode2
== SFmode
&& ca
->free_single_fp_reg
)
7217 ca
->free_single_fp_reg
= 0;
7218 else if (ca
->arg_count
[(int) SH_ARG_FLOAT
]
7219 < NPARM_REGS (SFmode
))
7222 = MIN ((GET_MODE_SIZE (mode2
) + 7) / 8 * 2,
7224 - ca
->arg_count
[(int) SH_ARG_FLOAT
]);
7226 ca
->arg_count
[(int) SH_ARG_FLOAT
] += numfpregs
;
7228 if (TARGET_SHCOMPACT
&& ! ca
->prototype_p
)
7230 if (ca
->outgoing
&& numregs
> 0)
7234 |= (CALL_COOKIE_INT_REG
7235 (ca
->arg_count
[(int) SH_ARG_INT
]
7236 - numregs
+ ((numfpregs
- 2) / 2),
7237 4 + (ca
->arg_count
[(int) SH_ARG_FLOAT
]
7240 while (numfpregs
-= 2);
7242 else if (mode2
== SFmode
&& (named
)
7243 && (ca
->arg_count
[(int) SH_ARG_FLOAT
]
7244 < NPARM_REGS (SFmode
)))
7245 ca
->free_single_fp_reg
7246 = FIRST_FP_PARM_REG
- numfpregs
7247 + ca
->arg_count
[(int) SH_ARG_FLOAT
] + 1;
7253 if ((TARGET_HITACHI
|| ca
->renesas_abi
) && TARGET_FPU_DOUBLE
)
7255 /* Note that we've used the skipped register. */
7256 if (mode
== SFmode
&& ca
->free_single_fp_reg
)
7258 ca
->free_single_fp_reg
= 0;
7261 /* When we have a DF after an SF, there's an SF register that get
7262 skipped in order to align the DF value. We note this skipped
7263 register, because the next SF value will use it, and not the
7264 SF that follows the DF. */
7266 && ROUND_REG (*ca
, DFmode
) != ROUND_REG (*ca
, SFmode
))
7268 ca
->free_single_fp_reg
= (ROUND_REG (*ca
, SFmode
)
7269 + BASE_ARG_REG (mode
));
7273 if (! ((TARGET_SH4
|| TARGET_SH2A
) || ca
->renesas_abi
)
7274 || PASS_IN_REG_P (*ca
, mode
, type
))
7275 (ca
->arg_count
[(int) GET_SH_ARG_CLASS (mode
)]
7276 = (ROUND_REG (*ca
, mode
)
7278 ? ROUND_ADVANCE (int_size_in_bytes (type
))
7279 : ROUND_ADVANCE (GET_MODE_SIZE (mode
)))));
7282 /* The Renesas calling convention doesn't quite fit into this scheme since
7283 the address is passed like an invisible argument, but one that is always
7284 passed in memory. */
7286 sh_struct_value_rtx (tree fndecl
, int incoming ATTRIBUTE_UNUSED
)
7288 if (TARGET_HITACHI
|| sh_attr_renesas_p (fndecl
))
7290 return gen_rtx_REG (Pmode
, 2);
7293 /* Worker function for TARGET_RETURN_IN_MEMORY. */
7296 sh_return_in_memory (tree type
, tree fndecl
)
7300 if (TYPE_MODE (type
) == BLKmode
)
7301 return ((unsigned HOST_WIDE_INT
) int_size_in_bytes (type
)) > 8;
7303 return GET_MODE_SIZE (TYPE_MODE (type
)) > 8;
7307 return (TYPE_MODE (type
) == BLKmode
7308 || ((TARGET_HITACHI
|| sh_attr_renesas_p (fndecl
))
7309 && TREE_CODE (type
) == RECORD_TYPE
));
7313 /* We actually emit the code in sh_expand_prologue. We used to use
7314 a static variable to flag that we need to emit this code, but that
7315 doesn't when inlining, when functions are deferred and then emitted
7316 later. Fortunately, we already have two flags that are part of struct
7317 function that tell if a function uses varargs or stdarg. */
7319 sh_setup_incoming_varargs (CUMULATIVE_ARGS
*ca
,
7320 enum machine_mode mode
,
7322 int *pretend_arg_size
,
7323 int second_time ATTRIBUTE_UNUSED
)
7325 gcc_assert (current_function_stdarg
);
7326 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl
))
7328 int named_parm_regs
, anon_parm_regs
;
7330 named_parm_regs
= (ROUND_REG (*ca
, mode
)
7332 ? ROUND_ADVANCE (int_size_in_bytes (type
))
7333 : ROUND_ADVANCE (GET_MODE_SIZE (mode
))));
7334 anon_parm_regs
= NPARM_REGS (SImode
) - named_parm_regs
;
7335 if (anon_parm_regs
> 0)
7336 *pretend_arg_size
= anon_parm_regs
* 4;
7341 sh_strict_argument_naming (CUMULATIVE_ARGS
*ca ATTRIBUTE_UNUSED
)
7347 sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS
*ca
)
7349 return ! (TARGET_HITACHI
|| ca
->renesas_abi
) && ! TARGET_SH5
;
7353 /* Define the offset between two registers, one to be eliminated, and
7354 the other its replacement, at the start of a routine. */
7357 initial_elimination_offset (int from
, int to
)
7360 int regs_saved_rounding
= 0;
7361 int total_saved_regs_space
;
7362 int total_auto_space
;
7363 int save_flags
= target_flags
;
7365 HARD_REG_SET live_regs_mask
;
7367 shmedia_space_reserved_for_target_registers
= false;
7368 regs_saved
= calc_live_regs (&live_regs_mask
);
7369 regs_saved
+= SHMEDIA_REGS_STACK_ADJUST ();
7371 if (shmedia_reserve_space_for_target_registers_p (regs_saved
, &live_regs_mask
))
7373 shmedia_space_reserved_for_target_registers
= true;
7374 regs_saved
+= shmedia_target_regs_stack_adjust (&live_regs_mask
);
7377 if (TARGET_SH5
&& regs_saved
% (STACK_BOUNDARY
/ BITS_PER_UNIT
))
7378 regs_saved_rounding
= ((STACK_BOUNDARY
/ BITS_PER_UNIT
)
7379 - regs_saved
% (STACK_BOUNDARY
/ BITS_PER_UNIT
));
7381 total_auto_space
= rounded_frame_size (regs_saved
) - regs_saved_rounding
;
7382 copy_flags
= target_flags
;
7383 target_flags
= save_flags
;
7385 total_saved_regs_space
= regs_saved
+ regs_saved_rounding
;
7387 if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
7388 return total_saved_regs_space
+ total_auto_space
7389 + current_function_args_info
.byref_regs
* 8;
7391 if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
7392 return total_saved_regs_space
+ total_auto_space
7393 + current_function_args_info
.byref_regs
* 8;
7395 /* Initial gap between fp and sp is 0. */
7396 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
7399 if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
7400 return rounded_frame_size (0);
7402 if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
7403 return rounded_frame_size (0);
7405 gcc_assert (from
== RETURN_ADDRESS_POINTER_REGNUM
7406 && (to
== HARD_FRAME_POINTER_REGNUM
7407 || to
== STACK_POINTER_REGNUM
));
7410 int n
= total_saved_regs_space
;
7411 int pr_reg
= TARGET_SHMEDIA
? PR_MEDIA_REG
: PR_REG
;
7412 save_schedule schedule
;
7415 n
+= total_auto_space
;
7417 /* If it wasn't saved, there's not much we can do. */
7418 if (! TEST_HARD_REG_BIT (live_regs_mask
, pr_reg
))
7421 target_flags
= copy_flags
;
7423 sh5_schedule_saves (&live_regs_mask
, &schedule
, n
);
7424 for (entry
= &schedule
.entries
[1]; entry
->mode
!= VOIDmode
; entry
++)
7425 if (entry
->reg
== pr_reg
)
7427 target_flags
= save_flags
;
7428 return entry
->offset
;
7433 return total_auto_space
;
7436 /* Handle machine specific pragmas to be semi-compatible with Renesas
7440 sh_pr_interrupt (struct cpp_reader
*pfile ATTRIBUTE_UNUSED
)
7442 pragma_interrupt
= 1;
7446 sh_pr_trapa (struct cpp_reader
*pfile ATTRIBUTE_UNUSED
)
7448 pragma_interrupt
= pragma_trapa
= 1;
7452 sh_pr_nosave_low_regs (struct cpp_reader
*pfile ATTRIBUTE_UNUSED
)
7454 pragma_nosave_low_regs
= 1;
7457 /* Generate 'handle_interrupt' attribute for decls */
7460 sh_insert_attributes (tree node
, tree
*attributes
)
7462 if (! pragma_interrupt
7463 || TREE_CODE (node
) != FUNCTION_DECL
)
7466 /* We are only interested in fields. */
7470 /* Add a 'handle_interrupt' attribute. */
7471 * attributes
= tree_cons (get_identifier ("interrupt_handler"), NULL
, * attributes
);
7476 /* Supported attributes:
7478 interrupt_handler -- specifies this function is an interrupt handler.
7480 sp_switch -- specifies an alternate stack for an interrupt handler
7483 trap_exit -- use a trapa to exit an interrupt function instead of
7486 renesas -- use Renesas calling/layout conventions (functions and
7491 const struct attribute_spec sh_attribute_table
[] =
7493 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7494 { "interrupt_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute
},
7495 { "sp_switch", 1, 1, true, false, false, sh_handle_sp_switch_attribute
},
7496 { "trap_exit", 1, 1, true, false, false, sh_handle_trap_exit_attribute
},
7497 { "renesas", 0, 0, false, true, false, sh_handle_renesas_attribute
},
7499 /* Symbian support adds three new attributes:
7500 dllexport - for exporting a function/variable that will live in a dll
7501 dllimport - for importing a function/variable from a dll
7503 Microsoft allows multiple declspecs in one __declspec, separating
7504 them with spaces. We do NOT support this. Instead, use __declspec
7506 { "dllimport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute
},
7507 { "dllexport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute
},
7509 { NULL
, 0, 0, false, false, false, NULL
}
7512 /* Handle an "interrupt_handler" attribute; arguments as in
7513 struct attribute_spec.handler. */
7515 sh_handle_interrupt_handler_attribute (tree
*node
, tree name
,
7516 tree args ATTRIBUTE_UNUSED
,
7517 int flags ATTRIBUTE_UNUSED
,
7520 if (TREE_CODE (*node
) != FUNCTION_DECL
)
7522 warning (OPT_Wattributes
, "%qs attribute only applies to functions",
7523 IDENTIFIER_POINTER (name
));
7524 *no_add_attrs
= true;
7526 else if (TARGET_SHCOMPACT
)
7528 error ("attribute interrupt_handler is not compatible with -m5-compact");
7529 *no_add_attrs
= true;
7535 /* Handle an "sp_switch" attribute; arguments as in
7536 struct attribute_spec.handler. */
7538 sh_handle_sp_switch_attribute (tree
*node
, tree name
, tree args
,
7539 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
7541 if (TREE_CODE (*node
) != FUNCTION_DECL
)
7543 warning (OPT_Wattributes
, "%qs attribute only applies to functions",
7544 IDENTIFIER_POINTER (name
));
7545 *no_add_attrs
= true;
7547 else if (!pragma_interrupt
)
7549 /* The sp_switch attribute only has meaning for interrupt functions. */
7550 warning (OPT_Wattributes
, "%qs attribute only applies to "
7551 "interrupt functions", IDENTIFIER_POINTER (name
));
7552 *no_add_attrs
= true;
7554 else if (TREE_CODE (TREE_VALUE (args
)) != STRING_CST
)
7556 /* The argument must be a constant string. */
7557 warning (OPT_Wattributes
, "%qs attribute argument not a string constant",
7558 IDENTIFIER_POINTER (name
));
7559 *no_add_attrs
= true;
7563 const char *s
= ggc_strdup (TREE_STRING_POINTER (TREE_VALUE (args
)));
7564 sp_switch
= gen_rtx_SYMBOL_REF (VOIDmode
, s
);
7570 /* Handle an "trap_exit" attribute; arguments as in
7571 struct attribute_spec.handler. */
7573 sh_handle_trap_exit_attribute (tree
*node
, tree name
, tree args
,
7574 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
7576 if (TREE_CODE (*node
) != FUNCTION_DECL
)
7578 warning (OPT_Wattributes
, "%qs attribute only applies to functions",
7579 IDENTIFIER_POINTER (name
));
7580 *no_add_attrs
= true;
7582 else if (!pragma_interrupt
)
7584 /* The trap_exit attribute only has meaning for interrupt functions. */
7585 warning (OPT_Wattributes
, "%qs attribute only applies to "
7586 "interrupt functions", IDENTIFIER_POINTER (name
));
7587 *no_add_attrs
= true;
7589 else if (TREE_CODE (TREE_VALUE (args
)) != INTEGER_CST
)
7591 /* The argument must be a constant integer. */
7592 warning (OPT_Wattributes
, "%qs attribute argument not an "
7593 "integer constant", IDENTIFIER_POINTER (name
));
7594 *no_add_attrs
= true;
7598 trap_exit
= TREE_INT_CST_LOW (TREE_VALUE (args
));
7605 sh_handle_renesas_attribute (tree
*node ATTRIBUTE_UNUSED
,
7606 tree name ATTRIBUTE_UNUSED
,
7607 tree args ATTRIBUTE_UNUSED
,
7608 int flags ATTRIBUTE_UNUSED
,
7609 bool *no_add_attrs ATTRIBUTE_UNUSED
)
7614 /* True if __attribute__((renesas)) or -mrenesas. */
7616 sh_attr_renesas_p (tree td
)
7623 td
= TREE_TYPE (td
);
7624 if (td
== error_mark_node
)
7626 return (lookup_attribute ("renesas", TYPE_ATTRIBUTES (td
))
7630 /* True if __attribute__((renesas)) or -mrenesas, for the current
7633 sh_cfun_attr_renesas_p (void)
7635 return sh_attr_renesas_p (current_function_decl
);
7639 sh_cfun_interrupt_handler_p (void)
7641 return (lookup_attribute ("interrupt_handler",
7642 DECL_ATTRIBUTES (current_function_decl
))
7646 /* Implement TARGET_CHECK_PCH_TARGET_FLAGS. */
7649 sh_check_pch_target_flags (int old_flags
)
7651 if ((old_flags
^ target_flags
) & (MASK_SH1
| MASK_SH2
| MASK_SH3
7652 | MASK_SH_E
| MASK_HARD_SH4
7653 | MASK_FPU_SINGLE
| MASK_SH4
))
7654 return _("created and used with different architectures / ABIs");
7655 if ((old_flags
^ target_flags
) & MASK_HITACHI
)
7656 return _("created and used with different ABIs");
7657 if ((old_flags
^ target_flags
) & MASK_LITTLE_ENDIAN
)
7658 return _("created and used with different endianness");
7662 /* Predicates used by the templates. */
7664 /* Returns 1 if OP is MACL, MACH or PR. The input must be a REG rtx.
7665 Used only in general_movsrc_operand. */
7668 system_reg_operand (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
7680 /* Nonzero if OP is a floating point value with value 0.0. */
7683 fp_zero_operand (rtx op
)
7687 if (GET_MODE (op
) != SFmode
)
7690 REAL_VALUE_FROM_CONST_DOUBLE (r
, op
);
7691 return REAL_VALUES_EQUAL (r
, dconst0
) && ! REAL_VALUE_MINUS_ZERO (r
);
7694 /* Nonzero if OP is a floating point value with value 1.0. */
7697 fp_one_operand (rtx op
)
7701 if (GET_MODE (op
) != SFmode
)
7704 REAL_VALUE_FROM_CONST_DOUBLE (r
, op
);
7705 return REAL_VALUES_EQUAL (r
, dconst1
);
7708 /* For -m4 and -m4-single-only, mode switching is used. If we are
7709 compiling without -mfmovd, movsf_ie isn't taken into account for
7710 mode switching. We could check in machine_dependent_reorg for
7711 cases where we know we are in single precision mode, but there is
7712 interface to find that out during reload, so we must avoid
7713 choosing an fldi alternative during reload and thus failing to
7714 allocate a scratch register for the constant loading. */
7718 return ! TARGET_SH4
|| TARGET_FMOVD
|| reload_completed
;
7722 tertiary_reload_operand (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
7724 enum rtx_code code
= GET_CODE (op
);
7725 return code
== MEM
|| (TARGET_SH4
&& code
== CONST_DOUBLE
);
7728 /* Return the TLS type for TLS symbols, 0 for otherwise. */
7730 tls_symbolic_operand (rtx op
, enum machine_mode mode ATTRIBUTE_UNUSED
)
7732 if (GET_CODE (op
) != SYMBOL_REF
)
7734 return SYMBOL_REF_TLS_MODEL (op
);
7737 /* Return the destination address of a branch. */
7740 branch_dest (rtx branch
)
7742 rtx dest
= SET_SRC (PATTERN (branch
));
7745 if (GET_CODE (dest
) == IF_THEN_ELSE
)
7746 dest
= XEXP (dest
, 1);
7747 dest
= XEXP (dest
, 0);
7748 dest_uid
= INSN_UID (dest
);
7749 return INSN_ADDRESSES (dest_uid
);
7752 /* Return nonzero if REG is not used after INSN.
7753 We assume REG is a reload reg, and therefore does
7754 not live past labels. It may live past calls or jumps though. */
7756 reg_unused_after (rtx reg
, rtx insn
)
7761 /* If the reg is set by this instruction, then it is safe for our
7762 case. Disregard the case where this is a store to memory, since
7763 we are checking a register used in the store address. */
7764 set
= single_set (insn
);
7765 if (set
&& GET_CODE (SET_DEST (set
)) != MEM
7766 && reg_overlap_mentioned_p (reg
, SET_DEST (set
)))
7769 while ((insn
= NEXT_INSN (insn
)))
7775 code
= GET_CODE (insn
);
7778 /* If this is a label that existed before reload, then the register
7779 if dead here. However, if this is a label added by reorg, then
7780 the register may still be live here. We can't tell the difference,
7781 so we just ignore labels completely. */
7782 if (code
== CODE_LABEL
)
7787 if (code
== JUMP_INSN
)
7790 /* If this is a sequence, we must handle them all at once.
7791 We could have for instance a call that sets the target register,
7792 and an insn in a delay slot that uses the register. In this case,
7793 we must return 0. */
7794 else if (code
== INSN
&& GET_CODE (PATTERN (insn
)) == SEQUENCE
)
7799 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
7801 rtx this_insn
= XVECEXP (PATTERN (insn
), 0, i
);
7802 rtx set
= single_set (this_insn
);
7804 if (GET_CODE (this_insn
) == CALL_INSN
)
7806 else if (GET_CODE (this_insn
) == JUMP_INSN
)
7808 if (INSN_ANNULLED_BRANCH_P (this_insn
))
7813 if (set
&& reg_overlap_mentioned_p (reg
, SET_SRC (set
)))
7815 if (set
&& reg_overlap_mentioned_p (reg
, SET_DEST (set
)))
7817 if (GET_CODE (SET_DEST (set
)) != MEM
)
7823 && reg_overlap_mentioned_p (reg
, PATTERN (this_insn
)))
7828 else if (code
== JUMP_INSN
)
7832 set
= single_set (insn
);
7833 if (set
&& reg_overlap_mentioned_p (reg
, SET_SRC (set
)))
7835 if (set
&& reg_overlap_mentioned_p (reg
, SET_DEST (set
)))
7836 return GET_CODE (SET_DEST (set
)) != MEM
;
7837 if (set
== 0 && reg_overlap_mentioned_p (reg
, PATTERN (insn
)))
7840 if (code
== CALL_INSN
&& call_really_used_regs
[REGNO (reg
)])
7848 static GTY(()) rtx fpscr_rtx
;
7850 get_fpscr_rtx (void)
7854 fpscr_rtx
= gen_rtx_REG (PSImode
, FPSCR_REG
);
7855 REG_USERVAR_P (fpscr_rtx
) = 1;
7856 mark_user_reg (fpscr_rtx
);
7858 if (! reload_completed
|| mdep_reorg_phase
!= SH_AFTER_MDEP_REORG
)
7859 mark_user_reg (fpscr_rtx
);
7864 emit_sf_insn (rtx pat
)
7870 emit_df_insn (rtx pat
)
7876 expand_sf_unop (rtx (*fun
) (rtx
, rtx
, rtx
), rtx
*operands
)
7878 emit_sf_insn ((*fun
) (operands
[0], operands
[1], get_fpscr_rtx ()));
7882 expand_sf_binop (rtx (*fun
) (rtx
, rtx
, rtx
, rtx
), rtx
*operands
)
7884 emit_sf_insn ((*fun
) (operands
[0], operands
[1], operands
[2],
7889 expand_df_unop (rtx (*fun
) (rtx
, rtx
, rtx
), rtx
*operands
)
7891 emit_df_insn ((*fun
) (operands
[0], operands
[1], get_fpscr_rtx ()));
7895 expand_df_binop (rtx (*fun
) (rtx
, rtx
, rtx
, rtx
), rtx
*operands
)
7897 emit_df_insn ((*fun
) (operands
[0], operands
[1], operands
[2],
7901 /* ??? gcc does flow analysis strictly after common subexpression
7902 elimination. As a result, common subexpression elimination fails
7903 when there are some intervening statements setting the same register.
7904 If we did nothing about this, this would hurt the precision switching
7905 for SH4 badly. There is some cse after reload, but it is unable to
7906 undo the extra register pressure from the unused instructions, and
7907 it cannot remove auto-increment loads.
7909 A C code example that shows this flow/cse weakness for (at least) SH
7910 and sparc (as of gcc ss-970706) is this:
7924 So we add another pass before common subexpression elimination, to
7925 remove assignments that are dead due to a following assignment in the
7926 same basic block. */
7929 mark_use (rtx x
, rtx
*reg_set_block
)
7935 code
= GET_CODE (x
);
7940 int regno
= REGNO (x
);
7941 int nregs
= (regno
< FIRST_PSEUDO_REGISTER
7942 ? HARD_REGNO_NREGS (regno
, GET_MODE (x
))
7946 reg_set_block
[regno
+ nregs
- 1] = 0;
7953 rtx dest
= SET_DEST (x
);
7955 if (GET_CODE (dest
) == SUBREG
)
7956 dest
= SUBREG_REG (dest
);
7957 if (GET_CODE (dest
) != REG
)
7958 mark_use (dest
, reg_set_block
);
7959 mark_use (SET_SRC (x
), reg_set_block
);
7966 const char *fmt
= GET_RTX_FORMAT (code
);
7968 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
7971 mark_use (XEXP (x
, i
), reg_set_block
);
7972 else if (fmt
[i
] == 'E')
7973 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
7974 mark_use (XVECEXP (x
, i
, j
), reg_set_block
);
7981 static rtx
get_free_reg (HARD_REG_SET
);
7983 /* This function returns a register to use to load the address to load
7984 the fpscr from. Currently it always returns r1 or r7, but when we are
7985 able to use pseudo registers after combine, or have a better mechanism
7986 for choosing a register, it should be done here. */
7987 /* REGS_LIVE is the liveness information for the point for which we
7988 need this allocation. In some bare-bones exit blocks, r1 is live at the
7989 start. We can even have all of r0..r3 being live:
7990 __complex__ long long f (double d) { if (d == 0) return 2; else return 3; }
7991 INSN before which new insns are placed with will clobber the register
7992 we return. If a basic block consists only of setting the return value
7993 register to a pseudo and using that register, the return value is not
7994 live before or after this block, yet we we'll insert our insns right in
7998 get_free_reg (HARD_REG_SET regs_live
)
8000 if (! TEST_HARD_REG_BIT (regs_live
, 1))
8001 return gen_rtx_REG (Pmode
, 1);
8003 /* Hard reg 1 is live; since this is a SMALL_REGISTER_CLASSES target,
8004 there shouldn't be anything but a jump before the function end. */
8005 gcc_assert (!TEST_HARD_REG_BIT (regs_live
, 7));
8006 return gen_rtx_REG (Pmode
, 7);
8009 /* This function will set the fpscr from memory.
8010 MODE is the mode we are setting it to. */
8012 fpscr_set_from_mem (int mode
, HARD_REG_SET regs_live
)
8014 enum attr_fp_mode fp_mode
= mode
;
8015 rtx addr_reg
= get_free_reg (regs_live
);
8017 if (fp_mode
== (enum attr_fp_mode
) ACTUAL_NORMAL_MODE (FP_MODE
))
8018 emit_insn (gen_fpu_switch1 (addr_reg
));
8020 emit_insn (gen_fpu_switch0 (addr_reg
));
8023 /* Is the given character a logical line separator for the assembler? */
8024 #ifndef IS_ASM_LOGICAL_LINE_SEPARATOR
8025 #define IS_ASM_LOGICAL_LINE_SEPARATOR(C) ((C) == ';')
8029 sh_insn_length_adjustment (rtx insn
)
8031 /* Instructions with unfilled delay slots take up an extra two bytes for
8032 the nop in the delay slot. */
8033 if (((GET_CODE (insn
) == INSN
8034 && GET_CODE (PATTERN (insn
)) != USE
8035 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
8036 || GET_CODE (insn
) == CALL_INSN
8037 || (GET_CODE (insn
) == JUMP_INSN
8038 && GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
8039 && GET_CODE (PATTERN (insn
)) != ADDR_VEC
))
8040 && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn
)))) != SEQUENCE
8041 && get_attr_needs_delay_slot (insn
) == NEEDS_DELAY_SLOT_YES
)
8044 /* SH2e has a bug that prevents the use of annulled branches, so if
8045 the delay slot is not filled, we'll have to put a NOP in it. */
8046 if (sh_cpu
== CPU_SH2E
8047 && GET_CODE (insn
) == JUMP_INSN
8048 && GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
8049 && GET_CODE (PATTERN (insn
)) != ADDR_VEC
8050 && get_attr_type (insn
) == TYPE_CBRANCH
8051 && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn
)))) != SEQUENCE
)
8054 /* sh-dsp parallel processing insn take four bytes instead of two. */
8056 if (GET_CODE (insn
) == INSN
)
8059 rtx body
= PATTERN (insn
);
8060 const char *template;
8062 int maybe_label
= 1;
8064 if (GET_CODE (body
) == ASM_INPUT
)
8065 template = XSTR (body
, 0);
8066 else if (asm_noperands (body
) >= 0)
8068 = decode_asm_operands (body
, NULL
, NULL
, NULL
, NULL
);
8077 while (c
== ' ' || c
== '\t');
8078 /* all sh-dsp parallel-processing insns start with p.
8079 The only non-ppi sh insn starting with p is pref.
8080 The only ppi starting with pr is prnd. */
8081 if ((c
== 'p' || c
== 'P') && strncasecmp ("re", template, 2))
8083 /* The repeat pseudo-insn expands two three insns, a total of
8084 six bytes in size. */
8085 else if ((c
== 'r' || c
== 'R')
8086 && ! strncasecmp ("epeat", template, 5))
8088 while (c
&& c
!= '\n' && ! IS_ASM_LOGICAL_LINE_SEPARATOR (c
))
8090 /* If this is a label, it is obviously not a ppi insn. */
8091 if (c
== ':' && maybe_label
)
8096 else if (c
== '\'' || c
== '"')
8101 maybe_label
= c
!= ':';
8109 /* Return TRUE if X references a SYMBOL_REF or LABEL_REF whose symbol
8110 isn't protected by a PIC unspec. */
8112 nonpic_symbol_mentioned_p (rtx x
)
8114 register const char *fmt
;
8117 if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
8118 || GET_CODE (x
) == PC
)
8121 /* We don't want to look into the possible MEM location of a
8122 CONST_DOUBLE, since we're not going to use it, in general. */
8123 if (GET_CODE (x
) == CONST_DOUBLE
)
8126 if (GET_CODE (x
) == UNSPEC
8127 && (XINT (x
, 1) == UNSPEC_PIC
8128 || XINT (x
, 1) == UNSPEC_GOT
8129 || XINT (x
, 1) == UNSPEC_GOTOFF
8130 || XINT (x
, 1) == UNSPEC_GOTPLT
8131 || XINT (x
, 1) == UNSPEC_GOTTPOFF
8132 || XINT (x
, 1) == UNSPEC_DTPOFF
8133 || XINT (x
, 1) == UNSPEC_PLT
))
8136 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
8137 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
8143 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
8144 if (nonpic_symbol_mentioned_p (XVECEXP (x
, i
, j
)))
8147 else if (fmt
[i
] == 'e' && nonpic_symbol_mentioned_p (XEXP (x
, i
)))
8154 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
8155 @GOTOFF in `reg'. */
8157 legitimize_pic_address (rtx orig
, enum machine_mode mode ATTRIBUTE_UNUSED
,
8160 if (tls_symbolic_operand (orig
, Pmode
))
8163 if (GET_CODE (orig
) == LABEL_REF
8164 || (GET_CODE (orig
) == SYMBOL_REF
&& SYMBOL_REF_LOCAL_P (orig
)))
8167 reg
= gen_reg_rtx (Pmode
);
8169 emit_insn (gen_symGOTOFF2reg (reg
, orig
));
8172 else if (GET_CODE (orig
) == SYMBOL_REF
)
8175 reg
= gen_reg_rtx (Pmode
);
8177 emit_insn (gen_symGOT2reg (reg
, orig
));
8183 /* Mark the use of a constant in the literal table. If the constant
8184 has multiple labels, make it unique. */
8186 mark_constant_pool_use (rtx x
)
8188 rtx insn
, lab
, pattern
;
8193 switch (GET_CODE (x
))
8203 /* Get the first label in the list of labels for the same constant
8204 and delete another labels in the list. */
8206 for (insn
= PREV_INSN (x
); insn
; insn
= PREV_INSN (insn
))
8208 if (GET_CODE (insn
) != CODE_LABEL
8209 || LABEL_REFS (insn
) != NEXT_INSN (insn
))
8214 for (insn
= LABEL_REFS (lab
); insn
; insn
= LABEL_REFS (insn
))
8215 INSN_DELETED_P (insn
) = 1;
8217 /* Mark constants in a window. */
8218 for (insn
= NEXT_INSN (x
); insn
; insn
= NEXT_INSN (insn
))
8220 if (GET_CODE (insn
) != INSN
)
8223 pattern
= PATTERN (insn
);
8224 if (GET_CODE (pattern
) != UNSPEC_VOLATILE
)
8227 switch (XINT (pattern
, 1))
8229 case UNSPECV_CONST2
:
8230 case UNSPECV_CONST4
:
8231 case UNSPECV_CONST8
:
8232 XVECEXP (pattern
, 0, 1) = const1_rtx
;
8234 case UNSPECV_WINDOW_END
:
8235 if (XVECEXP (pattern
, 0, 0) == x
)
8238 case UNSPECV_CONST_END
:
8248 /* Return true if it's possible to redirect BRANCH1 to the destination
8249 of an unconditional jump BRANCH2. We only want to do this if the
8250 resulting branch will have a short displacement. */
8252 sh_can_redirect_branch (rtx branch1
, rtx branch2
)
8254 if (flag_expensive_optimizations
&& simplejump_p (branch2
))
8256 rtx dest
= XEXP (SET_SRC (single_set (branch2
)), 0);
8260 for (distance
= 0, insn
= NEXT_INSN (branch1
);
8261 insn
&& distance
< 256;
8262 insn
= PREV_INSN (insn
))
8267 distance
+= get_attr_length (insn
);
8269 for (distance
= 0, insn
= NEXT_INSN (branch1
);
8270 insn
&& distance
< 256;
8271 insn
= NEXT_INSN (insn
))
8276 distance
+= get_attr_length (insn
);
8282 /* Return nonzero if register old_reg can be renamed to register new_reg. */
8284 sh_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED
,
8285 unsigned int new_reg
)
8287 /* Interrupt functions can only use registers that have already been
8288 saved by the prologue, even if they would normally be
8291 if (sh_cfun_interrupt_handler_p () && !regs_ever_live
[new_reg
])
8297 /* Function to update the integer COST
8298 based on the relationship between INSN that is dependent on
8299 DEP_INSN through the dependence LINK. The default is to make no
8300 adjustment to COST. This can be used for example to specify to
8301 the scheduler that an output- or anti-dependence does not incur
8302 the same cost as a data-dependence. The return value should be
8303 the new value for COST. */
8305 sh_adjust_cost (rtx insn
, rtx link ATTRIBUTE_UNUSED
, rtx dep_insn
, int cost
)
8311 /* On SHmedia, if the dependence is an anti-dependence or
8312 output-dependence, there is no cost. */
8313 if (REG_NOTE_KIND (link
) != 0)
8315 /* However, dependencies between target register loads and
8316 uses of the register in a subsequent block that are separated
8317 by a conditional branch are not modelled - we have to do with
8318 the anti-dependency between the target register load and the
8319 conditional branch that ends the current block. */
8320 if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
8321 && GET_CODE (PATTERN (dep_insn
)) == SET
8322 && (get_attr_type (dep_insn
) == TYPE_PT_MEDIA
8323 || get_attr_type (dep_insn
) == TYPE_PTABS_MEDIA
)
8324 && get_attr_type (insn
) == TYPE_CBRANCH_MEDIA
)
8326 int orig_cost
= cost
;
8327 rtx note
= find_reg_note (insn
, REG_BR_PROB
, 0);
8328 rtx target
= ((! note
8329 || INTVAL (XEXP (note
, 0)) * 2 < REG_BR_PROB_BASE
)
8330 ? insn
: JUMP_LABEL (insn
));
8331 /* On the likely path, the branch costs 1, on the unlikely path,
8335 target
= next_active_insn (target
);
8336 while (target
&& ! flow_dependent_p (target
, dep_insn
)
8338 /* If two branches are executed in immediate succession, with the
8339 first branch properly predicted, this causes a stall at the
8340 second branch, hence we won't need the target for the
8341 second branch for two cycles after the launch of the first
8343 if (cost
> orig_cost
- 2)
8344 cost
= orig_cost
- 2;
8350 else if (get_attr_is_mac_media (insn
)
8351 && get_attr_is_mac_media (dep_insn
))
8354 else if (! reload_completed
8355 && GET_CODE (PATTERN (insn
)) == SET
8356 && GET_CODE (SET_SRC (PATTERN (insn
))) == FLOAT
8357 && GET_CODE (PATTERN (dep_insn
)) == SET
8358 && fp_arith_reg_operand (SET_SRC (PATTERN (dep_insn
)), VOIDmode
)
8361 /* Schedule the ptabs for a casesi_jump_media in preference to stuff
8362 that is needed at the target. */
8363 else if (get_attr_type (insn
) == TYPE_JUMP_MEDIA
8364 && ! flow_dependent_p (insn
, dep_insn
))
8367 else if (REG_NOTE_KIND (link
) == 0)
8369 enum attr_type dep_type
, type
;
8371 if (recog_memoized (insn
) < 0
8372 || recog_memoized (dep_insn
) < 0)
8375 dep_type
= get_attr_type (dep_insn
);
8376 if (dep_type
== TYPE_FLOAD
|| dep_type
== TYPE_PCFLOAD
)
8378 if ((dep_type
== TYPE_LOAD_SI
|| dep_type
== TYPE_PCLOAD_SI
)
8379 && (type
= get_attr_type (insn
)) != TYPE_CALL
8380 && type
!= TYPE_SFUNC
)
8383 /* The only input for a call that is timing-critical is the
8384 function's address. */
8385 if (GET_CODE(insn
) == CALL_INSN
)
8387 rtx call
= PATTERN (insn
);
8389 if (GET_CODE (call
) == PARALLEL
)
8390 call
= XVECEXP (call
, 0 ,0);
8391 if (GET_CODE (call
) == SET
)
8392 call
= SET_SRC (call
);
8393 if (GET_CODE (call
) == CALL
&& GET_CODE (XEXP (call
, 0)) == MEM
8394 /* sibcalli_thunk uses a symbol_ref in an unspec. */
8395 && (GET_CODE (XEXP (XEXP (call
, 0), 0)) == UNSPEC
8396 || ! reg_set_p (XEXP (XEXP (call
, 0), 0), dep_insn
)))
8399 /* Likewise, the most timing critical input for an sfuncs call
8400 is the function address. However, sfuncs typically start
8401 using their arguments pretty quickly.
8402 Assume a four cycle delay before they are needed. */
8403 /* All sfunc calls are parallels with at least four components.
8404 Exploit this to avoid unnecessary calls to sfunc_uses_reg. */
8405 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
8406 && XVECLEN (PATTERN (insn
), 0) >= 4
8407 && (reg
= sfunc_uses_reg (insn
)))
8409 if (! reg_set_p (reg
, dep_insn
))
8412 /* When the preceding instruction loads the shift amount of
8413 the following SHAD/SHLD, the latency of the load is increased
8416 && get_attr_type (insn
) == TYPE_DYN_SHIFT
8417 && get_attr_any_int_load (dep_insn
) == ANY_INT_LOAD_YES
8418 && reg_overlap_mentioned_p (SET_DEST (PATTERN (dep_insn
)),
8419 XEXP (SET_SRC (single_set (insn
)),
8422 /* When an LS group instruction with a latency of less than
8423 3 cycles is followed by a double-precision floating-point
8424 instruction, FIPR, or FTRV, the latency of the first
8425 instruction is increased to 3 cycles. */
8427 && get_attr_insn_class (dep_insn
) == INSN_CLASS_LS_GROUP
8428 && get_attr_dfp_comp (insn
) == DFP_COMP_YES
)
8430 /* The lsw register of a double-precision computation is ready one
8432 else if (reload_completed
8433 && get_attr_dfp_comp (dep_insn
) == DFP_COMP_YES
8434 && (use_pat
= single_set (insn
))
8435 && ! regno_use_in (REGNO (SET_DEST (single_set (dep_insn
))),
8439 if (get_attr_any_fp_comp (dep_insn
) == ANY_FP_COMP_YES
8440 && get_attr_late_fp_use (insn
) == LATE_FP_USE_YES
)
8443 /* An anti-dependence penalty of two applies if the first insn is a double
8444 precision fadd / fsub / fmul. */
8445 else if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
8446 && recog_memoized (dep_insn
) >= 0
8447 && get_attr_type (dep_insn
) == TYPE_DFP_ARITH
8448 /* A lot of alleged anti-flow dependences are fake,
8449 so check this one is real. */
8450 && flow_dependent_p (dep_insn
, insn
))
8457 /* Check if INSN is flow-dependent on DEP_INSN. Can also be used to check
8458 if DEP_INSN is anti-flow dependent on INSN. */
8460 flow_dependent_p (rtx insn
, rtx dep_insn
)
8462 rtx tmp
= PATTERN (insn
);
8464 note_stores (PATTERN (dep_insn
), flow_dependent_p_1
, &tmp
);
8465 return tmp
== NULL_RTX
;
8468 /* A helper function for flow_dependent_p called through note_stores. */
8470 flow_dependent_p_1 (rtx x
, rtx pat ATTRIBUTE_UNUSED
, void *data
)
8472 rtx
* pinsn
= (rtx
*) data
;
8474 if (*pinsn
&& reg_referenced_p (x
, *pinsn
))
8478 /* For use by sh_allocate_initial_value. Note that sh.md contains some
8479 'special function' patterns (type sfunc) that clobber pr, but that
8480 do not look like function calls to leaf_function_p. Hence we must
8481 do this extra check. */
8485 return REG_N_SETS (TARGET_SHMEDIA
? PR_MEDIA_REG
: PR_REG
);
8488 /* Return where to allocate pseudo for a given hard register initial
8491 sh_allocate_initial_value (rtx hard_reg
)
8495 if (REGNO (hard_reg
) == (TARGET_SHMEDIA
? PR_MEDIA_REG
: PR_REG
))
8497 if (current_function_is_leaf
8498 && ! sh_pr_n_sets ()
8499 && ! (TARGET_SHCOMPACT
8500 && ((current_function_args_info
.call_cookie
8501 & ~ CALL_COOKIE_RET_TRAMP (1))
8502 || current_function_has_nonlocal_label
)))
8505 x
= gen_rtx_MEM (Pmode
, return_address_pointer_rtx
);
8513 /* This function returns "2" to indicate dual issue for the SH4
8514 processor. To be used by the DFA pipeline description. */
8516 sh_issue_rate (void)
8518 if (TARGET_SUPERSCALAR
)
8524 /* Functions for ready queue reordering for sched1. */
8526 /* Get weight for mode for a set x. */
8528 find_set_regmode_weight (rtx x
, enum machine_mode mode
)
8530 if (GET_CODE (x
) == CLOBBER
&& register_operand (SET_DEST (x
), mode
))
8532 if (GET_CODE (x
) == SET
&& register_operand (SET_DEST (x
), mode
))
8534 if (GET_CODE (SET_DEST (x
)) == REG
)
8536 if (!reg_mentioned_p (SET_DEST (x
), SET_SRC (x
)))
8546 /* Get regmode weight for insn. */
8548 find_insn_regmode_weight (rtx insn
, enum machine_mode mode
)
8550 short reg_weight
= 0;
8553 /* Increment weight for each register born here. */
8555 reg_weight
+= find_set_regmode_weight (x
, mode
);
8556 if (GET_CODE (x
) == PARALLEL
)
8559 for (j
= XVECLEN (x
, 0) - 1; j
>= 0; j
--)
8561 x
= XVECEXP (PATTERN (insn
), 0, j
);
8562 reg_weight
+= find_set_regmode_weight (x
, mode
);
8565 /* Decrement weight for each register that dies here. */
8566 for (x
= REG_NOTES (insn
); x
; x
= XEXP (x
, 1))
8568 if (REG_NOTE_KIND (x
) == REG_DEAD
|| REG_NOTE_KIND (x
) == REG_UNUSED
)
8570 rtx note
= XEXP (x
, 0);
8571 if (GET_CODE (note
) == REG
&& GET_MODE (note
) == mode
)
8578 /* Calculate regmode weights for all insns of a basic block. */
8580 find_regmode_weight (int b
, enum machine_mode mode
)
8582 rtx insn
, next_tail
, head
, tail
;
8584 get_block_head_tail (b
, &head
, &tail
);
8585 next_tail
= NEXT_INSN (tail
);
8587 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
8589 /* Handle register life information. */
8594 INSN_REGMODE_WEIGHT (insn
, mode
) =
8595 find_insn_regmode_weight (insn
, mode
) + 2 * find_insn_regmode_weight (insn
, DFmode
);
8596 else if (mode
== SImode
)
8597 INSN_REGMODE_WEIGHT (insn
, mode
) =
8598 find_insn_regmode_weight (insn
, mode
) + 2 * find_insn_regmode_weight (insn
, DImode
);
8602 /* Comparison function for ready queue sorting. */
8604 rank_for_reorder (const void *x
, const void *y
)
8606 rtx tmp
= *(const rtx
*) y
;
8607 rtx tmp2
= *(const rtx
*) x
;
8609 /* The insn in a schedule group should be issued the first. */
8610 if (SCHED_GROUP_P (tmp
) != SCHED_GROUP_P (tmp2
))
8611 return SCHED_GROUP_P (tmp2
) ? 1 : -1;
8613 /* If insns are equally good, sort by INSN_LUID (original insn order), This
8614 minimizes instruction movement, thus minimizing sched's effect on
8615 register pressure. */
8616 return INSN_LUID (tmp
) - INSN_LUID (tmp2
);
8619 /* Resort the array A in which only element at index N may be out of order. */
8621 swap_reorder (rtx
*a
, int n
)
8623 rtx insn
= a
[n
- 1];
8626 while (i
>= 0 && rank_for_reorder (a
+ i
, &insn
) >= 0)
8634 #define SCHED_REORDER(READY, N_READY) \
8637 if ((N_READY) == 2) \
8638 swap_reorder (READY, N_READY); \
8639 else if ((N_READY) > 2) \
8640 qsort (READY, N_READY, sizeof (rtx), rank_for_reorder); \
8644 /* Sort the ready list READY by ascending priority, using the SCHED_REORDER
8647 ready_reorder (rtx
*ready
, int nready
)
8649 SCHED_REORDER (ready
, nready
);
8652 /* Calculate regmode weights for all insns of all basic block. */
8654 sh_md_init_global (FILE *dump ATTRIBUTE_UNUSED
,
8655 int verbose ATTRIBUTE_UNUSED
,
8660 regmode_weight
[0] = (short *) xcalloc (old_max_uid
, sizeof (short));
8661 regmode_weight
[1] = (short *) xcalloc (old_max_uid
, sizeof (short));
8663 FOR_EACH_BB_REVERSE (b
)
8665 find_regmode_weight (b
->index
, SImode
);
8666 find_regmode_weight (b
->index
, SFmode
);
8669 CURR_REGMODE_PRESSURE (SImode
) = 0;
8670 CURR_REGMODE_PRESSURE (SFmode
) = 0;
8676 sh_md_finish_global (FILE *dump ATTRIBUTE_UNUSED
,
8677 int verbose ATTRIBUTE_UNUSED
)
8679 if (regmode_weight
[0])
8681 free (regmode_weight
[0]);
8682 regmode_weight
[0] = NULL
;
8684 if (regmode_weight
[1])
8686 free (regmode_weight
[1]);
8687 regmode_weight
[1] = NULL
;
8691 /* Cache the can_issue_more so that we can return it from reorder2. Also,
8692 keep count of register pressures on SImode and SFmode. */
8694 sh_variable_issue (FILE *dump ATTRIBUTE_UNUSED
,
8695 int sched_verbose ATTRIBUTE_UNUSED
,
8699 if (GET_CODE (PATTERN (insn
)) != USE
8700 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
8701 cached_can_issue_more
= can_issue_more
- 1;
8703 cached_can_issue_more
= can_issue_more
;
8705 if (reload_completed
)
8706 return cached_can_issue_more
;
8708 CURR_REGMODE_PRESSURE (SImode
) += INSN_REGMODE_WEIGHT (insn
, SImode
);
8709 CURR_REGMODE_PRESSURE (SFmode
) += INSN_REGMODE_WEIGHT (insn
, SFmode
);
8711 return cached_can_issue_more
;
8715 sh_md_init (FILE *dump ATTRIBUTE_UNUSED
,
8716 int verbose ATTRIBUTE_UNUSED
,
8717 int veclen ATTRIBUTE_UNUSED
)
8719 CURR_REGMODE_PRESSURE (SImode
) = 0;
8720 CURR_REGMODE_PRESSURE (SFmode
) = 0;
8723 /* Some magic numbers. */
8724 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
8725 functions that already have high pressure on r0. */
8726 #define R0_MAX_LIFE_REGIONS 2
8727 #define R0_MAX_LIVE_LENGTH 12
8728 /* Register Pressure thresholds for SImode and SFmode registers. */
8729 #define SIMODE_MAX_WEIGHT 5
8730 #define SFMODE_MAX_WEIGHT 10
8732 /* Return true if the pressure is high for MODE. */
8734 high_pressure (enum machine_mode mode
)
8736 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
8737 functions that already have high pressure on r0. */
8738 if ((REG_N_SETS (0) - REG_N_DEATHS (0)) >= R0_MAX_LIFE_REGIONS
8739 && REG_LIVE_LENGTH (0) >= R0_MAX_LIVE_LENGTH
)
8743 return (CURR_REGMODE_PRESSURE (SFmode
) > SFMODE_MAX_WEIGHT
);
8745 return (CURR_REGMODE_PRESSURE (SImode
) > SIMODE_MAX_WEIGHT
);
8748 /* Reorder ready queue if register pressure is high. */
8750 sh_reorder (FILE *dump ATTRIBUTE_UNUSED
,
8751 int sched_verbose ATTRIBUTE_UNUSED
,
8754 int clock_var ATTRIBUTE_UNUSED
)
8756 if (reload_completed
)
8757 return sh_issue_rate ();
8759 if (high_pressure (SFmode
) || high_pressure (SImode
))
8761 ready_reorder (ready
, *n_readyp
);
8764 return sh_issue_rate ();
8767 /* Skip cycles if the current register pressure is high. */
8769 sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED
,
8770 int sched_verbose ATTRIBUTE_UNUSED
,
8771 rtx
*ready ATTRIBUTE_UNUSED
,
8772 int *n_readyp ATTRIBUTE_UNUSED
,
8773 int clock_var ATTRIBUTE_UNUSED
)
8775 if (reload_completed
)
8776 return cached_can_issue_more
;
8778 if (high_pressure(SFmode
) || high_pressure (SImode
))
8781 return cached_can_issue_more
;
8784 /* Skip cycles without sorting the ready queue. This will move insn from
8785 Q->R. If this is the last cycle we are skipping; allow sorting of ready
8786 queue by sh_reorder. */
8788 /* Generally, skipping these many cycles are sufficient for all insns to move
8793 sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED
,
8794 int sched_verbose ATTRIBUTE_UNUSED
,
8795 rtx insn ATTRIBUTE_UNUSED
,
8800 if (reload_completed
)
8805 if ((clock_var
- last_clock_var
) < MAX_SKIPS
)
8810 /* If this is the last cycle we are skipping, allow reordering of R. */
8811 if ((clock_var
- last_clock_var
) == MAX_SKIPS
)
8823 /* SHmedia requires registers for branches, so we can't generate new
8824 branches past reload. */
8826 sh_cannot_modify_jumps_p (void)
8828 return (TARGET_SHMEDIA
&& (reload_in_progress
|| reload_completed
));
8832 sh_target_reg_class (void)
8834 return TARGET_SHMEDIA
? TARGET_REGS
: NO_REGS
;
8838 sh_optimize_target_register_callee_saved (bool after_prologue_epilogue_gen
)
8843 if (! shmedia_space_reserved_for_target_registers
)
8845 if (after_prologue_epilogue_gen
&& ! TARGET_SAVE_ALL_TARGET_REGS
)
8847 if (calc_live_regs (&dummy
) >= 6 * 8)
8849 /* This is a borderline case. See if we got a nested loop, or a loop
8850 with a call, or with more than 4 labels inside. */
8851 for (insn
= get_insns(); insn
; insn
= NEXT_INSN (insn
))
8853 if (GET_CODE (insn
) == NOTE
8854 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
8860 insn
= NEXT_INSN (insn
);
8861 if ((GET_CODE (insn
) == NOTE
8862 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
8863 || GET_CODE (insn
) == CALL_INSN
8864 || (GET_CODE (insn
) == CODE_LABEL
&& ++labels
> 4))
8867 while (GET_CODE (insn
) != NOTE
8868 || NOTE_LINE_NUMBER (insn
) != NOTE_INSN_LOOP_END
);
8875 sh_ms_bitfield_layout_p (tree record_type ATTRIBUTE_UNUSED
)
8877 return (TARGET_SH5
|| TARGET_HITACHI
|| sh_attr_renesas_p (record_type
));
8881 On the SH1..SH4, the trampoline looks like
8882 2 0002 D202 mov.l l2,r2
8883 1 0000 D301 mov.l l1,r3
8886 5 0008 00000000 l1: .long area
8887 6 000c 00000000 l2: .long function
8889 SH5 (compact) uses r1 instead of r3 for the static chain. */
8892 /* Emit RTL insns to initialize the variable parts of a trampoline.
8893 FNADDR is an RTX for the address of the function's pure code.
8894 CXT is an RTX for the static chain value for the function. */
8897 sh_initialize_trampoline (rtx tramp
, rtx fnaddr
, rtx cxt
)
8899 if (TARGET_SHMEDIA64
)
8904 rtx movi1
= GEN_INT (0xcc000010);
8905 rtx shori1
= GEN_INT (0xc8000010);
8908 /* The following trampoline works within a +- 128 KB range for cxt:
8909 ptb/u cxt,tr1; movi fnaddr >> 48,r0; shori fnaddr >> 32,r0;
8910 shori fnaddr >> 16,r0; shori fnaddr,r0; ptabs/l r0,tr0
8911 gettr tr1,r1; blink tr0,r63 */
8912 /* Address rounding makes it hard to compute the exact bounds of the
8913 offset for this trampoline, but we have a rather generous offset
8914 range, so frame_offset should do fine as an upper bound. */
8915 if (cxt
== virtual_stack_vars_rtx
&& frame_offset
< 0x20000)
8917 /* ??? could optimize this trampoline initialization
8918 by writing DImode words with two insns each. */
8919 rtx mask
= force_reg (DImode
, GEN_INT (0x3fffc00));
8920 rtx insn
= gen_rtx_MINUS (DImode
, cxt
, tramp
);
8921 insn
= gen_rtx_ASHIFT (DImode
, insn
, GEN_INT (10-2));
8922 insn
= gen_rtx_AND (DImode
, insn
, mask
);
8923 /* Or in ptb/u .,tr1 pattern */
8924 insn
= gen_rtx_IOR (DImode
, insn
, gen_int_mode (0xec000010, SImode
));
8925 insn
= force_operand (insn
, NULL_RTX
);
8926 insn
= gen_lowpart (SImode
, insn
);
8927 emit_move_insn (gen_rtx_MEM (SImode
, tramp
), insn
);
8928 insn
= gen_rtx_LSHIFTRT (DImode
, fnaddr
, GEN_INT (38));
8929 insn
= gen_rtx_AND (DImode
, insn
, mask
);
8930 insn
= force_operand (gen_rtx_IOR (DImode
, movi1
, insn
), NULL_RTX
);
8931 insn
= gen_lowpart (SImode
, insn
);
8932 emit_move_insn (gen_rtx_MEM (SImode
, plus_constant (tramp
, 4)), insn
);
8933 insn
= gen_rtx_LSHIFTRT (DImode
, fnaddr
, GEN_INT (22));
8934 insn
= gen_rtx_AND (DImode
, insn
, mask
);
8935 insn
= force_operand (gen_rtx_IOR (DImode
, shori1
, insn
), NULL_RTX
);
8936 insn
= gen_lowpart (SImode
, insn
);
8937 emit_move_insn (gen_rtx_MEM (SImode
, plus_constant (tramp
, 8)), insn
);
8938 insn
= gen_rtx_LSHIFTRT (DImode
, fnaddr
, GEN_INT (6));
8939 insn
= gen_rtx_AND (DImode
, insn
, mask
);
8940 insn
= force_operand (gen_rtx_IOR (DImode
, shori1
, insn
), NULL_RTX
);
8941 insn
= gen_lowpart (SImode
, insn
);
8942 emit_move_insn (gen_rtx_MEM (SImode
, plus_constant (tramp
, 12)),
8944 insn
= gen_rtx_ASHIFT (DImode
, fnaddr
, GEN_INT (10));
8945 insn
= gen_rtx_AND (DImode
, insn
, mask
);
8946 insn
= force_operand (gen_rtx_IOR (DImode
, shori1
, insn
), NULL_RTX
);
8947 insn
= gen_lowpart (SImode
, insn
);
8948 emit_move_insn (gen_rtx_MEM (SImode
, plus_constant (tramp
, 16)),
8950 emit_move_insn (gen_rtx_MEM (SImode
, plus_constant (tramp
, 20)),
8951 GEN_INT (0x6bf10600));
8952 emit_move_insn (gen_rtx_MEM (SImode
, plus_constant (tramp
, 24)),
8953 GEN_INT (0x4415fc10));
8954 emit_move_insn (gen_rtx_MEM (SImode
, plus_constant (tramp
, 28)),
8955 GEN_INT (0x4401fff0));
8956 emit_insn (gen_ic_invalidate_line (tramp
));
8959 tramp_templ
= gen_rtx_SYMBOL_REF (Pmode
,"__GCC_nested_trampoline");
8960 fixed_len
= TRAMPOLINE_SIZE
- 2 * GET_MODE_SIZE (Pmode
);
8962 tramp_templ
= gen_datalabel_ref (tramp_templ
);
8963 dst
= gen_rtx_MEM (BLKmode
, tramp
);
8964 src
= gen_rtx_MEM (BLKmode
, tramp_templ
);
8965 set_mem_align (dst
, 256);
8966 set_mem_align (src
, 64);
8967 emit_block_move (dst
, src
, GEN_INT (fixed_len
), BLOCK_OP_NORMAL
);
8969 emit_move_insn (gen_rtx_MEM (Pmode
, plus_constant (tramp
, fixed_len
)),
8971 emit_move_insn (gen_rtx_MEM (Pmode
,
8972 plus_constant (tramp
,
8974 + GET_MODE_SIZE (Pmode
))),
8976 emit_insn (gen_ic_invalidate_line (tramp
));
8979 else if (TARGET_SHMEDIA
)
8981 /* movi fnaddr >> 16,r1; shori fnaddr,r1; ptabs/l r1,tr0
8982 movi cxt >> 16,r1; shori cxt,r1; blink tr0,r63 */
8983 rtx quad0
= gen_reg_rtx (DImode
), cxtload
= gen_reg_rtx (DImode
);
8984 rtx quad1
= gen_reg_rtx (DImode
), quad2
= gen_reg_rtx (DImode
);
8985 /* movi 0,r1: 0xcc000010 shori 0,r1: c8000010 concatenated,
8986 rotated 10 right, and higher 16 bit of every 32 selected. */
8988 = force_reg (V2HImode
, (simplify_gen_subreg
8989 (V2HImode
, GEN_INT (0x4330432), SImode
, 0)));
8990 rtx ptabs
= force_reg (DImode
, GEN_INT (0x6bf10600));
8991 rtx blink
= force_reg (DImode
, GEN_INT (0x4401fff0));
8993 tramp
= force_reg (Pmode
, tramp
);
8994 fnaddr
= force_reg (SImode
, fnaddr
);
8995 cxt
= force_reg (SImode
, cxt
);
8996 emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode
, quad0
, 0),
8997 gen_rtx_SUBREG (V2HImode
, fnaddr
, 0),
8999 emit_insn (gen_rotrdi3_mextr (quad0
, quad0
,
9000 GEN_INT (TARGET_LITTLE_ENDIAN
? 24 : 56)));
9001 emit_insn (gen_ashldi3_media (quad0
, quad0
, const2_rtx
));
9002 emit_move_insn (gen_rtx_MEM (DImode
, tramp
), quad0
);
9003 emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode
, cxtload
, 0),
9004 gen_rtx_SUBREG (V2HImode
, cxt
, 0),
9006 emit_insn (gen_rotrdi3_mextr (cxtload
, cxtload
,
9007 GEN_INT (TARGET_LITTLE_ENDIAN
? 24 : 56)));
9008 emit_insn (gen_ashldi3_media (cxtload
, cxtload
, const2_rtx
));
9009 if (TARGET_LITTLE_ENDIAN
)
9011 emit_insn (gen_mshflo_l_di (quad1
, ptabs
, cxtload
));
9012 emit_insn (gen_mextr4 (quad2
, cxtload
, blink
));
9016 emit_insn (gen_mextr4 (quad1
, cxtload
, ptabs
));
9017 emit_insn (gen_mshflo_l_di (quad2
, blink
, cxtload
));
9019 emit_move_insn (gen_rtx_MEM (DImode
, plus_constant (tramp
, 8)), quad1
);
9020 emit_move_insn (gen_rtx_MEM (DImode
, plus_constant (tramp
, 16)), quad2
);
9021 emit_insn (gen_ic_invalidate_line (tramp
));
9024 else if (TARGET_SHCOMPACT
)
9026 emit_insn (gen_initialize_trampoline (tramp
, cxt
, fnaddr
));
9029 emit_move_insn (gen_rtx_MEM (SImode
, tramp
),
9030 gen_int_mode (TARGET_LITTLE_ENDIAN
? 0xd301d202 : 0xd202d301,
9032 emit_move_insn (gen_rtx_MEM (SImode
, plus_constant (tramp
, 4)),
9033 gen_int_mode (TARGET_LITTLE_ENDIAN
? 0x0009422b : 0x422b0009,
9035 emit_move_insn (gen_rtx_MEM (SImode
, plus_constant (tramp
, 8)),
9037 emit_move_insn (gen_rtx_MEM (SImode
, plus_constant (tramp
, 12)),
9041 if (TARGET_USERMODE
)
9042 emit_library_call (function_symbol (NULL
, "__ic_invalidate",
9044 0, VOIDmode
, 1, tramp
, SImode
);
9046 emit_insn (gen_ic_invalidate_line (tramp
));
9050 /* FIXME: This is overly conservative. A SHcompact function that
9051 receives arguments ``by reference'' will have them stored in its
9052 own stack frame, so it must not pass pointers or references to
9053 these arguments to other functions by means of sibling calls. */
9054 /* If PIC, we cannot make sibling calls to global functions
9055 because the PLT requires r12 to be live. */
9057 sh_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
9060 && (! TARGET_SHCOMPACT
9061 || current_function_args_info
.stack_regs
== 0)
9062 && ! sh_cfun_interrupt_handler_p ()
9064 || (decl
&& ! TREE_PUBLIC (decl
))
9065 || (decl
&& DECL_VISIBILITY (decl
) != VISIBILITY_DEFAULT
)));
9068 /* Machine specific built-in functions. */
9070 struct builtin_description
9072 const enum insn_code icode
;
9073 const char *const name
;
9077 /* describe number and signedness of arguments; arg[0] == result
9078 (1: unsigned, 2: signed, 4: don't care, 8: pointer 0: no argument */
9079 /* 9: 64 bit pointer, 10: 32 bit pointer */
9080 static const char signature_args
[][4] =
9082 #define SH_BLTIN_V2SI2 0
9084 #define SH_BLTIN_V4HI2 1
9086 #define SH_BLTIN_V2SI3 2
9088 #define SH_BLTIN_V4HI3 3
9090 #define SH_BLTIN_V8QI3 4
9092 #define SH_BLTIN_MAC_HISI 5
9094 #define SH_BLTIN_SH_HI 6
9096 #define SH_BLTIN_SH_SI 7
9098 #define SH_BLTIN_V4HI2V2SI 8
9100 #define SH_BLTIN_V4HI2V8QI 9
9102 #define SH_BLTIN_SISF 10
9104 #define SH_BLTIN_LDUA_L 11
9106 #define SH_BLTIN_LDUA_Q 12
9108 #define SH_BLTIN_STUA_L 13
9110 #define SH_BLTIN_STUA_Q 14
9112 #define SH_BLTIN_LDUA_L64 15
9114 #define SH_BLTIN_LDUA_Q64 16
9116 #define SH_BLTIN_STUA_L64 17
9118 #define SH_BLTIN_STUA_Q64 18
9120 #define SH_BLTIN_NUM_SHARED_SIGNATURES 19
9121 #define SH_BLTIN_2 19
9122 #define SH_BLTIN_SU 19
9124 #define SH_BLTIN_3 20
9125 #define SH_BLTIN_SUS 20
9127 #define SH_BLTIN_PSSV 21
9129 #define SH_BLTIN_XXUU 22
9130 #define SH_BLTIN_UUUU 22
9132 #define SH_BLTIN_PV 23
9135 /* mcmv: operands considered unsigned. */
9136 /* mmulsum_wq, msad_ubq: result considered unsigned long long. */
9137 /* mperm: control value considered unsigned int. */
9138 /* mshalds, mshard, mshards, mshlld, mshlrd: shift count is unsigned int. */
9139 /* mshards_q: returns signed short. */
9140 /* nsb: takes long long arg, returns unsigned char. */
9141 static const struct builtin_description bdesc
[] =
9143 { CODE_FOR_absv2si2
, "__builtin_absv2si2", SH_BLTIN_V2SI2
},
9144 { CODE_FOR_absv4hi2
, "__builtin_absv4hi2", SH_BLTIN_V4HI2
},
9145 { CODE_FOR_addv2si3
, "__builtin_addv2si3", SH_BLTIN_V2SI3
},
9146 { CODE_FOR_addv4hi3
, "__builtin_addv4hi3", SH_BLTIN_V4HI3
},
9147 { CODE_FOR_ssaddv2si3
,"__builtin_ssaddv2si3", SH_BLTIN_V2SI3
},
9148 { CODE_FOR_usaddv8qi3
,"__builtin_usaddv8qi3", SH_BLTIN_V8QI3
},
9149 { CODE_FOR_ssaddv4hi3
,"__builtin_ssaddv4hi3", SH_BLTIN_V4HI3
},
9150 { CODE_FOR_alloco_i
, "__builtin_sh_media_ALLOCO", SH_BLTIN_PV
},
9151 { CODE_FOR_negcmpeqv8qi
,"__builtin_sh_media_MCMPEQ_B", SH_BLTIN_V8QI3
},
9152 { CODE_FOR_negcmpeqv2si
,"__builtin_sh_media_MCMPEQ_L", SH_BLTIN_V2SI3
},
9153 { CODE_FOR_negcmpeqv4hi
,"__builtin_sh_media_MCMPEQ_W", SH_BLTIN_V4HI3
},
9154 { CODE_FOR_negcmpgtuv8qi
,"__builtin_sh_media_MCMPGT_UB", SH_BLTIN_V8QI3
},
9155 { CODE_FOR_negcmpgtv2si
,"__builtin_sh_media_MCMPGT_L", SH_BLTIN_V2SI3
},
9156 { CODE_FOR_negcmpgtv4hi
,"__builtin_sh_media_MCMPGT_W", SH_BLTIN_V4HI3
},
9157 { CODE_FOR_mcmv
, "__builtin_sh_media_MCMV", SH_BLTIN_UUUU
},
9158 { CODE_FOR_mcnvs_lw
, "__builtin_sh_media_MCNVS_LW", SH_BLTIN_3
},
9159 { CODE_FOR_mcnvs_wb
, "__builtin_sh_media_MCNVS_WB", SH_BLTIN_V4HI2V8QI
},
9160 { CODE_FOR_mcnvs_wub
, "__builtin_sh_media_MCNVS_WUB", SH_BLTIN_V4HI2V8QI
},
9161 { CODE_FOR_mextr1
, "__builtin_sh_media_MEXTR1", SH_BLTIN_V8QI3
},
9162 { CODE_FOR_mextr2
, "__builtin_sh_media_MEXTR2", SH_BLTIN_V8QI3
},
9163 { CODE_FOR_mextr3
, "__builtin_sh_media_MEXTR3", SH_BLTIN_V8QI3
},
9164 { CODE_FOR_mextr4
, "__builtin_sh_media_MEXTR4", SH_BLTIN_V8QI3
},
9165 { CODE_FOR_mextr5
, "__builtin_sh_media_MEXTR5", SH_BLTIN_V8QI3
},
9166 { CODE_FOR_mextr6
, "__builtin_sh_media_MEXTR6", SH_BLTIN_V8QI3
},
9167 { CODE_FOR_mextr7
, "__builtin_sh_media_MEXTR7", SH_BLTIN_V8QI3
},
9168 { CODE_FOR_mmacfx_wl
, "__builtin_sh_media_MMACFX_WL", SH_BLTIN_MAC_HISI
},
9169 { CODE_FOR_mmacnfx_wl
,"__builtin_sh_media_MMACNFX_WL", SH_BLTIN_MAC_HISI
},
9170 { CODE_FOR_mulv2si3
, "__builtin_mulv2si3", SH_BLTIN_V2SI3
, },
9171 { CODE_FOR_mulv4hi3
, "__builtin_mulv4hi3", SH_BLTIN_V4HI3
},
9172 { CODE_FOR_mmulfx_l
, "__builtin_sh_media_MMULFX_L", SH_BLTIN_V2SI3
},
9173 { CODE_FOR_mmulfx_w
, "__builtin_sh_media_MMULFX_W", SH_BLTIN_V4HI3
},
9174 { CODE_FOR_mmulfxrp_w
,"__builtin_sh_media_MMULFXRP_W", SH_BLTIN_V4HI3
},
9175 { CODE_FOR_mmulhi_wl
, "__builtin_sh_media_MMULHI_WL", SH_BLTIN_V4HI2V2SI
},
9176 { CODE_FOR_mmullo_wl
, "__builtin_sh_media_MMULLO_WL", SH_BLTIN_V4HI2V2SI
},
9177 { CODE_FOR_mmulsum_wq
,"__builtin_sh_media_MMULSUM_WQ", SH_BLTIN_XXUU
},
9178 { CODE_FOR_mperm_w
, "__builtin_sh_media_MPERM_W", SH_BLTIN_SH_HI
},
9179 { CODE_FOR_msad_ubq
, "__builtin_sh_media_MSAD_UBQ", SH_BLTIN_XXUU
},
9180 { CODE_FOR_mshalds_l
, "__builtin_sh_media_MSHALDS_L", SH_BLTIN_SH_SI
},
9181 { CODE_FOR_mshalds_w
, "__builtin_sh_media_MSHALDS_W", SH_BLTIN_SH_HI
},
9182 { CODE_FOR_ashrv2si3
, "__builtin_ashrv2si3", SH_BLTIN_SH_SI
},
9183 { CODE_FOR_ashrv4hi3
, "__builtin_ashrv4hi3", SH_BLTIN_SH_HI
},
9184 { CODE_FOR_mshards_q
, "__builtin_sh_media_MSHARDS_Q", SH_BLTIN_SUS
},
9185 { CODE_FOR_mshfhi_b
, "__builtin_sh_media_MSHFHI_B", SH_BLTIN_V8QI3
},
9186 { CODE_FOR_mshfhi_l
, "__builtin_sh_media_MSHFHI_L", SH_BLTIN_V2SI3
},
9187 { CODE_FOR_mshfhi_w
, "__builtin_sh_media_MSHFHI_W", SH_BLTIN_V4HI3
},
9188 { CODE_FOR_mshflo_b
, "__builtin_sh_media_MSHFLO_B", SH_BLTIN_V8QI3
},
9189 { CODE_FOR_mshflo_l
, "__builtin_sh_media_MSHFLO_L", SH_BLTIN_V2SI3
},
9190 { CODE_FOR_mshflo_w
, "__builtin_sh_media_MSHFLO_W", SH_BLTIN_V4HI3
},
9191 { CODE_FOR_ashlv2si3
, "__builtin_ashlv2si3", SH_BLTIN_SH_SI
},
9192 { CODE_FOR_ashlv4hi3
, "__builtin_ashlv4hi3", SH_BLTIN_SH_HI
},
9193 { CODE_FOR_lshrv2si3
, "__builtin_lshrv2si3", SH_BLTIN_SH_SI
},
9194 { CODE_FOR_lshrv4hi3
, "__builtin_lshrv4hi3", SH_BLTIN_SH_HI
},
9195 { CODE_FOR_subv2si3
, "__builtin_subv2si3", SH_BLTIN_V2SI3
},
9196 { CODE_FOR_subv4hi3
, "__builtin_subv4hi3", SH_BLTIN_V4HI3
},
9197 { CODE_FOR_sssubv2si3
,"__builtin_sssubv2si3", SH_BLTIN_V2SI3
},
9198 { CODE_FOR_ussubv8qi3
,"__builtin_ussubv8qi3", SH_BLTIN_V8QI3
},
9199 { CODE_FOR_sssubv4hi3
,"__builtin_sssubv4hi3", SH_BLTIN_V4HI3
},
9200 { CODE_FOR_fcosa_s
, "__builtin_sh_media_FCOSA_S", SH_BLTIN_SISF
},
9201 { CODE_FOR_fsina_s
, "__builtin_sh_media_FSINA_S", SH_BLTIN_SISF
},
9202 { CODE_FOR_fipr
, "__builtin_sh_media_FIPR_S", SH_BLTIN_3
},
9203 { CODE_FOR_ftrv
, "__builtin_sh_media_FTRV_S", SH_BLTIN_3
},
9204 { CODE_FOR_mac_media
, "__builtin_sh_media_FMAC_S", SH_BLTIN_3
},
9205 { CODE_FOR_sqrtdf2
, "__builtin_sh_media_FSQRT_D", SH_BLTIN_2
},
9206 { CODE_FOR_sqrtsf2
, "__builtin_sh_media_FSQRT_S", SH_BLTIN_2
},
9207 { CODE_FOR_fsrra_s
, "__builtin_sh_media_FSRRA_S", SH_BLTIN_2
},
9208 { CODE_FOR_ldhi_l
, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L
},
9209 { CODE_FOR_ldhi_q
, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q
},
9210 { CODE_FOR_ldlo_l
, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L
},
9211 { CODE_FOR_ldlo_q
, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q
},
9212 { CODE_FOR_sthi_l
, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L
},
9213 { CODE_FOR_sthi_q
, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q
},
9214 { CODE_FOR_stlo_l
, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L
},
9215 { CODE_FOR_stlo_q
, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q
},
9216 { CODE_FOR_ldhi_l64
, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L64
},
9217 { CODE_FOR_ldhi_q64
, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q64
},
9218 { CODE_FOR_ldlo_l64
, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L64
},
9219 { CODE_FOR_ldlo_q64
, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q64
},
9220 { CODE_FOR_sthi_l64
, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L64
},
9221 { CODE_FOR_sthi_q64
, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q64
},
9222 { CODE_FOR_stlo_l64
, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L64
},
9223 { CODE_FOR_stlo_q64
, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q64
},
9224 { CODE_FOR_nsb
, "__builtin_sh_media_NSB", SH_BLTIN_SU
},
9225 { CODE_FOR_byterev
, "__builtin_sh_media_BYTEREV", SH_BLTIN_2
},
9226 { CODE_FOR_prefetch
, "__builtin_sh_media_PREFO", SH_BLTIN_PSSV
},
9230 sh_media_init_builtins (void)
9232 tree shared
[SH_BLTIN_NUM_SHARED_SIGNATURES
];
9233 const struct builtin_description
*d
;
9235 memset (shared
, 0, sizeof shared
);
9236 for (d
= bdesc
; d
- bdesc
< (int) ARRAY_SIZE (bdesc
); d
++)
9238 tree type
, arg_type
= 0;
9239 int signature
= d
->signature
;
9242 if (signature
< SH_BLTIN_NUM_SHARED_SIGNATURES
&& shared
[signature
])
9243 type
= shared
[signature
];
9246 int has_result
= signature_args
[signature
][0] != 0;
9248 if ((signature_args
[signature
][1] & 8)
9249 && (((signature_args
[signature
][1] & 1) && TARGET_SHMEDIA32
)
9250 || ((signature_args
[signature
][1] & 2) && TARGET_SHMEDIA64
)))
9252 if (! TARGET_FPU_ANY
9253 && FLOAT_MODE_P (insn_data
[d
->icode
].operand
[0].mode
))
9255 type
= void_list_node
;
9258 int arg
= signature_args
[signature
][i
];
9259 int opno
= i
- 1 + has_result
;
9262 arg_type
= ptr_type_node
;
9264 arg_type
= (*lang_hooks
.types
.type_for_mode
)
9265 (insn_data
[d
->icode
].operand
[opno
].mode
,
9270 arg_type
= void_type_node
;
9273 type
= tree_cons (NULL_TREE
, arg_type
, type
);
9275 type
= build_function_type (arg_type
, type
);
9276 if (signature
< SH_BLTIN_NUM_SHARED_SIGNATURES
)
9277 shared
[signature
] = type
;
9279 lang_hooks
.builtin_function (d
->name
, type
, d
- bdesc
, BUILT_IN_MD
,
9284 /* Implements target hook vector_mode_supported_p. */
9286 sh_vector_mode_supported_p (enum machine_mode mode
)
9289 && ((mode
== V2SFmode
)
9290 || (mode
== V4SFmode
)
9291 || (mode
== V16SFmode
)))
9294 else if (TARGET_SHMEDIA
9295 && ((mode
== V8QImode
)
9296 || (mode
== V2HImode
)
9297 || (mode
== V4HImode
)
9298 || (mode
== V2SImode
)))
9304 /* Implements target hook dwarf_calling_convention. Return an enum
9305 of dwarf_calling_convention. */
9307 sh_dwarf_calling_convention (tree func
)
9309 if (sh_attr_renesas_p (func
))
9310 return DW_CC_GNU_renesas_sh
;
9312 return DW_CC_normal
;
9316 sh_init_builtins (void)
9319 sh_media_init_builtins ();
9322 /* Expand an expression EXP that calls a built-in function,
9323 with result going to TARGET if that's convenient
9324 (and in mode MODE if that's convenient).
9325 SUBTARGET may be used as the target for computing one of EXP's operands.
9326 IGNORE is nonzero if the value is to be ignored. */
9329 sh_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
9330 enum machine_mode mode ATTRIBUTE_UNUSED
, int ignore
)
9332 tree fndecl
= TREE_OPERAND (TREE_OPERAND (exp
, 0), 0);
9333 tree arglist
= TREE_OPERAND (exp
, 1);
9334 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
9335 const struct builtin_description
*d
= &bdesc
[fcode
];
9336 enum insn_code icode
= d
->icode
;
9337 int signature
= d
->signature
;
9338 enum machine_mode tmode
= VOIDmode
;
9343 if (signature_args
[signature
][0])
9348 tmode
= insn_data
[icode
].operand
[0].mode
;
9350 || GET_MODE (target
) != tmode
9351 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9352 target
= gen_reg_rtx (tmode
);
9358 for (i
= 1; i
<= 3; i
++, nop
++)
9361 enum machine_mode opmode
, argmode
;
9364 if (! signature_args
[signature
][i
])
9366 arg
= TREE_VALUE (arglist
);
9367 if (arg
== error_mark_node
)
9369 arglist
= TREE_CHAIN (arglist
);
9370 if (signature_args
[signature
][i
] & 8)
9373 optype
= ptr_type_node
;
9377 opmode
= insn_data
[icode
].operand
[nop
].mode
;
9378 optype
= (*lang_hooks
.types
.type_for_mode
) (opmode
, 0);
9380 argmode
= TYPE_MODE (TREE_TYPE (arg
));
9381 if (argmode
!= opmode
)
9382 arg
= build1 (NOP_EXPR
, optype
, arg
);
9383 op
[nop
] = expand_expr (arg
, NULL_RTX
, opmode
, 0);
9384 if (! (*insn_data
[icode
].operand
[nop
].predicate
) (op
[nop
], opmode
))
9385 op
[nop
] = copy_to_mode_reg (opmode
, op
[nop
]);
9391 pat
= (*insn_data
[d
->icode
].genfun
) (op
[0]);
9394 pat
= (*insn_data
[d
->icode
].genfun
) (op
[0], op
[1]);
9397 pat
= (*insn_data
[d
->icode
].genfun
) (op
[0], op
[1], op
[2]);
9400 pat
= (*insn_data
[d
->icode
].genfun
) (op
[0], op
[1], op
[2], op
[3]);
9412 sh_expand_unop_v2sf (enum rtx_code code
, rtx op0
, rtx op1
)
9414 rtx sel0
= const0_rtx
;
9415 rtx sel1
= const1_rtx
;
9416 rtx (*fn
) (rtx
, rtx
, rtx
, rtx
, rtx
) = gen_unary_sf_op
;
9417 rtx op
= gen_rtx_fmt_e (code
, SFmode
, op1
);
9419 emit_insn ((*fn
) (op0
, op1
, op
, sel0
, sel0
));
9420 emit_insn ((*fn
) (op0
, op1
, op
, sel1
, sel1
));
9424 sh_expand_binop_v2sf (enum rtx_code code
, rtx op0
, rtx op1
, rtx op2
)
9426 rtx sel0
= const0_rtx
;
9427 rtx sel1
= const1_rtx
;
9428 rtx (*fn
) (rtx
, rtx
, rtx
, rtx
, rtx
, rtx
, rtx
, rtx
)
9430 rtx op
= gen_rtx_fmt_ee (code
, SFmode
, op1
, op2
);
9432 emit_insn ((*fn
) (op0
, op1
, op2
, op
, sel0
, sel0
, sel0
, sel1
));
9433 emit_insn ((*fn
) (op0
, op1
, op2
, op
, sel1
, sel1
, sel1
, sel0
));
9436 /* Return the class of registers for which a mode change from FROM to TO
9439 sh_cannot_change_mode_class (enum machine_mode from
, enum machine_mode to
,
9440 enum reg_class
class)
9442 /* We want to enable the use of SUBREGs as a means to
9443 VEC_SELECT a single element of a vector. */
9444 if (to
== SFmode
&& VECTOR_MODE_P (from
) && GET_MODE_INNER (from
) == SFmode
)
9445 return (reg_classes_intersect_p (GENERAL_REGS
, class));
9447 if (GET_MODE_SIZE (from
) != GET_MODE_SIZE (to
))
9449 if (TARGET_LITTLE_ENDIAN
)
9451 if (GET_MODE_SIZE (to
) < 8 || GET_MODE_SIZE (from
) < 8)
9452 return reg_classes_intersect_p (DF_REGS
, class);
9456 if (GET_MODE_SIZE (from
) < 8)
9457 return reg_classes_intersect_p (DF_HI_REGS
, class);
9464 /* If ADDRESS refers to a CODE_LABEL, add NUSES to the number of times
9465 that label is used. */
9468 sh_mark_label (rtx address
, int nuses
)
9470 if (GOTOFF_P (address
))
9472 /* Extract the label or symbol. */
9473 address
= XEXP (address
, 0);
9474 if (GET_CODE (address
) == PLUS
)
9475 address
= XEXP (address
, 0);
9476 address
= XVECEXP (address
, 0, 0);
9478 if (GET_CODE (address
) == LABEL_REF
9479 && GET_CODE (XEXP (address
, 0)) == CODE_LABEL
)
9480 LABEL_NUSES (XEXP (address
, 0)) += nuses
;
9483 /* Compute extra cost of moving data between one register class
9486 /* If SECONDARY*_RELOAD_CLASS says something about the src/dst pair, regclass
9487 uses this information. Hence, the general register <-> floating point
9488 register information here is not used for SFmode. */
9491 sh_register_move_cost (enum machine_mode mode
,
9492 enum reg_class srcclass
, enum reg_class dstclass
)
9494 if (dstclass
== T_REGS
|| dstclass
== PR_REGS
)
9497 if (dstclass
== MAC_REGS
&& srcclass
== MAC_REGS
)
9500 if (mode
== SImode
&& ! TARGET_SHMEDIA
&& TARGET_FMOVD
9501 && REGCLASS_HAS_FP_REG (srcclass
)
9502 && REGCLASS_HAS_FP_REG (dstclass
))
9505 if ((REGCLASS_HAS_FP_REG (dstclass
) && srcclass
== MAC_REGS
)
9506 || (dstclass
== MAC_REGS
&& REGCLASS_HAS_FP_REG (srcclass
)))
9509 if ((REGCLASS_HAS_FP_REG (dstclass
)
9510 && REGCLASS_HAS_GENERAL_REG (srcclass
))
9511 || (REGCLASS_HAS_GENERAL_REG (dstclass
)
9512 && REGCLASS_HAS_FP_REG (srcclass
)))
9513 return ((TARGET_SHMEDIA
? 4 : TARGET_FMOVD
? 8 : 12)
9514 * ((GET_MODE_SIZE (mode
) + 7) / 8U));
9516 if ((dstclass
== FPUL_REGS
9517 && REGCLASS_HAS_GENERAL_REG (srcclass
))
9518 || (srcclass
== FPUL_REGS
9519 && REGCLASS_HAS_GENERAL_REG (dstclass
)))
9522 if ((dstclass
== FPUL_REGS
9523 && (srcclass
== PR_REGS
|| srcclass
== MAC_REGS
|| srcclass
== T_REGS
))
9524 || (srcclass
== FPUL_REGS
9525 && (dstclass
== PR_REGS
|| dstclass
== MAC_REGS
)))
9528 if ((srcclass
== TARGET_REGS
&& ! REGCLASS_HAS_GENERAL_REG (dstclass
))
9529 || ((dstclass
) == TARGET_REGS
&& ! REGCLASS_HAS_GENERAL_REG (srcclass
)))
9532 /* ??? ptabs faults on (value & 0x3) == 0x3 */
9534 && ((srcclass
) == TARGET_REGS
|| (srcclass
) == SIBCALL_REGS
))
9536 if (sh_gettrcost
>= 0)
9537 return sh_gettrcost
;
9538 else if (!TARGET_PT_FIXED
)
9542 if ((srcclass
== FPSCR_REGS
&& ! REGCLASS_HAS_GENERAL_REG (dstclass
))
9543 || (dstclass
== FPSCR_REGS
&& ! REGCLASS_HAS_GENERAL_REG (srcclass
)))
9548 && ! REGCLASS_HAS_GENERAL_REG (srcclass
)
9549 && ! REGCLASS_HAS_GENERAL_REG (dstclass
)))
9550 return 2 * ((GET_MODE_SIZE (mode
) + 7) / 8U);
9552 return 2 * ((GET_MODE_SIZE (mode
) + 3) / 4U);
9555 static rtx
emit_load_ptr (rtx
, rtx
);
9558 emit_load_ptr (rtx reg
, rtx addr
)
9560 rtx mem
= gen_rtx_MEM (ptr_mode
, addr
);
9562 if (Pmode
!= ptr_mode
)
9563 mem
= gen_rtx_SIGN_EXTEND (Pmode
, mem
);
9564 return emit_move_insn (reg
, mem
);
9568 sh_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
9569 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
9572 CUMULATIVE_ARGS cum
;
9573 int structure_value_byref
= 0;
9574 rtx
this, this_value
, sibcall
, insns
, funexp
;
9575 tree funtype
= TREE_TYPE (function
);
9576 int simple_add
= CONST_OK_FOR_ADD (delta
);
9578 rtx scratch0
, scratch1
, scratch2
;
9581 reload_completed
= 1;
9582 epilogue_completed
= 1;
9584 current_function_uses_only_leaf_regs
= 1;
9585 reset_block_changes ();
9587 emit_note (NOTE_INSN_PROLOGUE_END
);
9589 /* Find the "this" pointer. We have such a wide range of ABIs for the
9590 SH that it's best to do this completely machine independently.
9591 "this" is passed as first argument, unless a structure return pointer
9592 comes first, in which case "this" comes second. */
9593 INIT_CUMULATIVE_ARGS (cum
, funtype
, NULL_RTX
, 0, 1);
9594 #ifndef PCC_STATIC_STRUCT_RETURN
9595 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
9596 structure_value_byref
= 1;
9597 #endif /* not PCC_STATIC_STRUCT_RETURN */
9598 if (structure_value_byref
&& sh_struct_value_rtx (function
, 0) == 0)
9600 tree ptype
= build_pointer_type (TREE_TYPE (funtype
));
9602 FUNCTION_ARG_ADVANCE (cum
, Pmode
, ptype
, 1);
9604 this = FUNCTION_ARG (cum
, Pmode
, ptr_type_node
, 1);
9606 /* For SHcompact, we only have r0 for a scratch register: r1 is the
9607 static chain pointer (even if you can't have nested virtual functions
9608 right now, someone might implement them sometime), and the rest of the
9609 registers are used for argument passing, are callee-saved, or reserved. */
9610 /* We need to check call_used_regs / fixed_regs in case -fcall_saved-reg /
9611 -ffixed-reg has been used. */
9612 if (! call_used_regs
[0] || fixed_regs
[0])
9613 error ("r0 needs to be available as a call-clobbered register");
9614 scratch0
= scratch1
= scratch2
= gen_rtx_REG (Pmode
, 0);
9617 if (call_used_regs
[1] && ! fixed_regs
[1])
9618 scratch1
= gen_rtx_REG (ptr_mode
, 1);
9619 /* N.B., if not TARGET_HITACHI, register 2 is used to pass the pointer
9620 pointing where to return struct values. */
9621 if (call_used_regs
[3] && ! fixed_regs
[3])
9622 scratch2
= gen_rtx_REG (Pmode
, 3);
9624 else if (TARGET_SHMEDIA
)
9626 for (i
= FIRST_GENERAL_REG
; i
<= LAST_GENERAL_REG
; i
++)
9627 if (i
!= REGNO (scratch0
) &&
9628 call_used_regs
[i
] && ! fixed_regs
[i
] && ! FUNCTION_ARG_REGNO_P (i
))
9630 scratch1
= gen_rtx_REG (ptr_mode
, i
);
9633 if (scratch1
== scratch0
)
9634 error ("Need a second call-clobbered general purpose register");
9635 for (i
= FIRST_TARGET_REG
; i
<= LAST_TARGET_REG
; i
++)
9636 if (call_used_regs
[i
] && ! fixed_regs
[i
])
9638 scratch2
= gen_rtx_REG (Pmode
, i
);
9641 if (scratch2
== scratch0
)
9642 error ("Need a call-clobbered target register");
9645 this_value
= plus_constant (this, delta
);
9647 && (simple_add
|| scratch0
!= scratch1
)
9648 && strict_memory_address_p (ptr_mode
, this_value
))
9650 emit_load_ptr (scratch0
, this_value
);
9656 else if (simple_add
)
9657 emit_move_insn (this, this_value
);
9660 emit_move_insn (scratch1
, GEN_INT (delta
));
9661 emit_insn (gen_add2_insn (this, scratch1
));
9669 emit_load_ptr (scratch0
, this);
9671 offset_addr
= plus_constant (scratch0
, vcall_offset
);
9672 if (strict_memory_address_p (ptr_mode
, offset_addr
))
9674 else if (! TARGET_SH5
&& scratch0
!= scratch1
)
9676 /* scratch0 != scratch1, and we have indexed loads. Get better
9677 schedule by loading the offset into r1 and using an indexed
9678 load - then the load of r1 can issue before the load from
9679 (this + delta) finishes. */
9680 emit_move_insn (scratch1
, GEN_INT (vcall_offset
));
9681 offset_addr
= gen_rtx_PLUS (Pmode
, scratch0
, scratch1
);
9683 else if (CONST_OK_FOR_ADD (vcall_offset
))
9685 emit_insn (gen_add2_insn (scratch0
, GEN_INT (vcall_offset
)));
9686 offset_addr
= scratch0
;
9688 else if (scratch0
!= scratch1
)
9690 emit_move_insn (scratch1
, GEN_INT (vcall_offset
));
9691 emit_insn (gen_add2_insn (scratch0
, scratch1
));
9692 offset_addr
= scratch0
;
9695 gcc_unreachable (); /* FIXME */
9696 emit_load_ptr (scratch0
, offset_addr
);
9698 if (Pmode
!= ptr_mode
)
9699 scratch0
= gen_rtx_TRUNCATE (ptr_mode
, scratch0
);
9700 emit_insn (gen_add2_insn (this, scratch0
));
9703 /* Generate a tail call to the target function. */
9704 if (! TREE_USED (function
))
9706 assemble_external (function
);
9707 TREE_USED (function
) = 1;
9709 funexp
= XEXP (DECL_RTL (function
), 0);
9710 /* If the function is overridden, so is the thunk, hence we don't
9711 need GOT addressing even if this is a public symbol. */
9713 if (TARGET_SH1
&& ! flag_weak
)
9714 sibcall
= gen_sibcalli_thunk (funexp
, const0_rtx
);
9717 if (TARGET_SH2
&& flag_pic
)
9719 sibcall
= gen_sibcall_pcrel (funexp
, const0_rtx
);
9720 XEXP (XVECEXP (sibcall
, 0, 2), 0) = scratch2
;
9724 if (TARGET_SHMEDIA
&& flag_pic
)
9726 funexp
= gen_sym2PIC (funexp
);
9727 PUT_MODE (funexp
, Pmode
);
9729 emit_move_insn (scratch2
, funexp
);
9730 funexp
= gen_rtx_MEM (FUNCTION_MODE
, scratch2
);
9731 sibcall
= gen_sibcall (funexp
, const0_rtx
, NULL_RTX
);
9733 sibcall
= emit_call_insn (sibcall
);
9734 SIBLING_CALL_P (sibcall
) = 1;
9735 use_reg (&CALL_INSN_FUNCTION_USAGE (sibcall
), this);
9738 /* Run just enough of rest_of_compilation to do scheduling and get
9739 the insns emitted. Note that use_thunk calls
9740 assemble_start_function and assemble_end_function. */
9742 insn_locators_initialize ();
9743 insns
= get_insns ();
9747 /* Initialize the bitmap obstacks. */
9748 bitmap_obstack_initialize (NULL
);
9749 bitmap_obstack_initialize (®_obstack
);
9752 rtl_register_cfg_hooks ();
9753 init_rtl_bb_info (ENTRY_BLOCK_PTR
);
9754 init_rtl_bb_info (EXIT_BLOCK_PTR
);
9755 ENTRY_BLOCK_PTR
->flags
|= BB_RTL
;
9756 EXIT_BLOCK_PTR
->flags
|= BB_RTL
;
9757 find_basic_blocks (insns
);
9759 if (flag_schedule_insns_after_reload
)
9761 life_analysis (dump_file
, PROP_FINAL
);
9763 split_all_insns (1);
9765 schedule_insns (dump_file
);
9767 /* We must split jmp insn in PIC case. */
9769 split_all_insns_noflow ();
9774 if (optimize
> 0 && flag_delayed_branch
)
9775 dbr_schedule (insns
, dump_file
);
9777 shorten_branches (insns
);
9778 final_start_function (insns
, file
, 1);
9779 final (insns
, file
, 1);
9780 final_end_function ();
9784 /* Release all memory allocated by flow. */
9785 free_basic_block_vars ();
9787 /* Release the bitmap obstacks. */
9788 bitmap_obstack_release (®_obstack
);
9789 bitmap_obstack_release (NULL
);
9792 reload_completed
= 0;
9793 epilogue_completed
= 0;
9798 function_symbol (rtx target
, const char *name
, enum sh_function_kind kind
)
9802 /* If this is not an ordinary function, the name usually comes from a
9803 string literal or an sprintf buffer. Make sure we use the same
9804 string consistently, so that cse will be able to unify address loads. */
9805 if (kind
!= FUNCTION_ORDINARY
)
9806 name
= IDENTIFIER_POINTER (get_identifier (name
));
9807 sym
= gen_rtx_SYMBOL_REF (Pmode
, name
);
9808 SYMBOL_REF_FLAGS (sym
) = SYMBOL_FLAG_FUNCTION
;
9812 case FUNCTION_ORDINARY
:
9816 rtx reg
= target
? target
: gen_reg_rtx (Pmode
);
9818 emit_insn (gen_symGOT2reg (reg
, sym
));
9824 /* ??? To allow cse to work, we use GOTOFF relocations.
9825 we could add combiner patterns to transform this into
9826 straight pc-relative calls with sym2PIC / bsrf when
9827 label load and function call are still 1:1 and in the
9828 same basic block during combine. */
9829 rtx reg
= target
? target
: gen_reg_rtx (Pmode
);
9831 emit_insn (gen_symGOTOFF2reg (reg
, sym
));
9836 if (target
&& sym
!= target
)
9838 emit_move_insn (target
, sym
);
9844 /* Find the number of a general purpose register in S. */
9846 scavenge_reg (HARD_REG_SET
*s
)
9849 for (r
= FIRST_GENERAL_REG
; r
<= LAST_GENERAL_REG
; r
++)
9850 if (TEST_HARD_REG_BIT (*s
, r
))
9856 sh_get_pr_initial_val (void)
9860 /* ??? Unfortunately, get_hard_reg_initial_val doesn't always work for the
9861 PR register on SHcompact, because it might be clobbered by the prologue.
9862 We check first if that is known to be the case. */
9863 if (TARGET_SHCOMPACT
9864 && ((current_function_args_info
.call_cookie
9865 & ~ CALL_COOKIE_RET_TRAMP (1))
9866 || current_function_has_nonlocal_label
))
9867 return gen_rtx_MEM (SImode
, return_address_pointer_rtx
);
9869 /* If we haven't finished rtl generation, there might be a nonlocal label
9870 that we haven't seen yet.
9871 ??? get_hard_reg_initial_val fails if it is called while no_new_pseudos
9872 is set, unless it has been called before for the same register. And even
9873 then, we end in trouble if we didn't use the register in the same
9874 basic block before. So call get_hard_reg_initial_val now and wrap it
9875 in an unspec if we might need to replace it. */
9876 /* ??? We also must do this for TARGET_SH1 in general, because otherwise
9877 combine can put the pseudo returned by get_hard_reg_initial_val into
9878 instructions that need a general purpose registers, which will fail to
9879 be recognized when the pseudo becomes allocated to PR. */
9881 = get_hard_reg_initial_val (Pmode
, TARGET_SHMEDIA
? PR_MEDIA_REG
: PR_REG
);
9883 return gen_rtx_UNSPEC (SImode
, gen_rtvec (1, val
), UNSPEC_RA
);
9888 sh_expand_t_scc (enum rtx_code code
, rtx target
)
9890 rtx result
= target
;
9893 if (GET_CODE (sh_compare_op0
) != REG
|| REGNO (sh_compare_op0
) != T_REG
9894 || GET_CODE (sh_compare_op1
) != CONST_INT
)
9896 if (GET_CODE (result
) != REG
)
9897 result
= gen_reg_rtx (SImode
);
9898 val
= INTVAL (sh_compare_op1
);
9899 if ((code
== EQ
&& val
== 1) || (code
== NE
&& val
== 0))
9900 emit_insn (gen_movt (result
));
9901 else if ((code
== EQ
&& val
== 0) || (code
== NE
&& val
== 1))
9903 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
9904 emit_insn (gen_subc (result
, result
, result
));
9905 emit_insn (gen_addsi3 (result
, result
, const1_rtx
));
9907 else if (code
== EQ
|| code
== NE
)
9908 emit_insn (gen_move_insn (result
, GEN_INT (code
== NE
)));
9911 if (result
!= target
)
9912 emit_move_insn (target
, result
);
9916 /* INSN is an sfunc; return the rtx that describes the address used. */
9918 extract_sfunc_addr (rtx insn
)
9920 rtx pattern
, part
= NULL_RTX
;
9923 pattern
= PATTERN (insn
);
9924 len
= XVECLEN (pattern
, 0);
9925 for (i
= 0; i
< len
; i
++)
9927 part
= XVECEXP (pattern
, 0, i
);
9928 if (GET_CODE (part
) == USE
&& GET_MODE (XEXP (part
, 0)) == Pmode
9929 && GENERAL_REGISTER_P (true_regnum (XEXP (part
, 0))))
9930 return XEXP (part
, 0);
9932 gcc_assert (GET_CODE (XVECEXP (pattern
, 0, 0)) == UNSPEC_VOLATILE
);
9933 return XVECEXP (XVECEXP (pattern
, 0, 0), 0, 1);
9936 /* Verify that the register in use_sfunc_addr still agrees with the address
9937 used in the sfunc. This prevents fill_slots_from_thread from changing
9939 INSN is the use_sfunc_addr instruction, and REG is the register it
9942 check_use_sfunc_addr (rtx insn
, rtx reg
)
9944 /* Search for the sfunc. It should really come right after INSN. */
9945 while ((insn
= NEXT_INSN (insn
)))
9947 if (GET_CODE (insn
) == CODE_LABEL
|| GET_CODE (insn
) == JUMP_INSN
)
9949 if (! INSN_P (insn
))
9952 if (GET_CODE (PATTERN (insn
)) == SEQUENCE
)
9953 insn
= XVECEXP (PATTERN (insn
), 0, 0);
9954 if (GET_CODE (PATTERN (insn
)) != PARALLEL
9955 || get_attr_type (insn
) != TYPE_SFUNC
)
9957 return rtx_equal_p (extract_sfunc_addr (insn
), reg
);
9962 /* This function returns a constant rtx that represents pi / 2**15 in
9963 SFmode. it's used to scale SFmode angles, in radians, to a
9964 fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
9965 maps to 0x10000). */
9967 static GTY(()) rtx sh_fsca_sf2int_rtx
;
9970 sh_fsca_sf2int (void)
9972 if (! sh_fsca_sf2int_rtx
)
9976 real_from_string (&rv
, "10430.378350470453");
9977 sh_fsca_sf2int_rtx
= const_double_from_real_value (rv
, SFmode
);
9980 return sh_fsca_sf2int_rtx
;
9983 /* This function returns a constant rtx that represents pi / 2**15 in
9984 DFmode. it's used to scale DFmode angles, in radians, to a
9985 fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
9986 maps to 0x10000). */
9988 static GTY(()) rtx sh_fsca_df2int_rtx
;
9991 sh_fsca_df2int (void)
9993 if (! sh_fsca_df2int_rtx
)
9997 real_from_string (&rv
, "10430.378350470453");
9998 sh_fsca_df2int_rtx
= const_double_from_real_value (rv
, DFmode
);
10001 return sh_fsca_df2int_rtx
;
10004 /* This function returns a constant rtx that represents 2**15 / pi in
10005 SFmode. it's used to scale a fixed-point signed 16.16-bit fraction
10006 of a full circle back to a SFmode value, i.e., 0x10000 maps to
10009 static GTY(()) rtx sh_fsca_int2sf_rtx
;
10012 sh_fsca_int2sf (void)
10014 if (! sh_fsca_int2sf_rtx
)
10016 REAL_VALUE_TYPE rv
;
10018 real_from_string (&rv
, "9.587379924285257e-5");
10019 sh_fsca_int2sf_rtx
= const_double_from_real_value (rv
, SFmode
);
10022 return sh_fsca_int2sf_rtx
;
10025 /* Initialize the CUMULATIVE_ARGS structure. */
10028 sh_init_cumulative_args (CUMULATIVE_ARGS
* pcum
,
10030 rtx libname ATTRIBUTE_UNUSED
,
10032 signed int n_named_args
,
10033 enum machine_mode mode
)
10035 pcum
->arg_count
[(int) SH_ARG_FLOAT
] = 0;
10036 pcum
->free_single_fp_reg
= 0;
10037 pcum
->stack_regs
= 0;
10038 pcum
->byref_regs
= 0;
10040 pcum
->outgoing
= (n_named_args
== -1) ? 0 : 1;
10042 /* XXX - Should we check TARGET_HITACHI here ??? */
10043 pcum
->renesas_abi
= sh_attr_renesas_p (fntype
) ? 1 : 0;
10047 pcum
->force_mem
= ((TARGET_HITACHI
|| pcum
->renesas_abi
)
10048 && aggregate_value_p (TREE_TYPE (fntype
), fndecl
));
10049 pcum
->prototype_p
= TYPE_ARG_TYPES (fntype
) ? TRUE
: FALSE
;
10050 pcum
->arg_count
[(int) SH_ARG_INT
]
10051 = TARGET_SH5
&& aggregate_value_p (TREE_TYPE (fntype
), fndecl
);
10054 = CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
10055 && pcum
->arg_count
[(int) SH_ARG_INT
] == 0
10056 && (TYPE_MODE (TREE_TYPE (fntype
)) == BLKmode
10057 ? int_size_in_bytes (TREE_TYPE (fntype
))
10058 : GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (fntype
)))) > 4
10059 && (BASE_RETURN_VALUE_REG (TYPE_MODE (TREE_TYPE (fntype
)))
10060 == FIRST_RET_REG
));
10064 pcum
->arg_count
[(int) SH_ARG_INT
] = 0;
10065 pcum
->prototype_p
= FALSE
;
10066 if (mode
!= VOIDmode
)
10068 pcum
->call_cookie
=
10069 CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
10070 && GET_MODE_SIZE (mode
) > 4
10071 && BASE_RETURN_VALUE_REG (mode
) == FIRST_RET_REG
);
10073 /* If the default ABI is the Renesas ABI then all library
10074 calls must assume that the library will be using the
10075 Renesas ABI. So if the function would return its result
10076 in memory then we must force the address of this memory
10077 block onto the stack. Ideally we would like to call
10078 targetm.calls.return_in_memory() here but we do not have
10079 the TYPE or the FNDECL available so we synthesize the
10080 contents of that function as best we can. */
10082 (TARGET_DEFAULT
& MASK_HITACHI
)
10083 && (mode
== BLKmode
10084 || (GET_MODE_SIZE (mode
) > 4
10085 && !(mode
== DFmode
10086 && TARGET_FPU_DOUBLE
)));
10090 pcum
->call_cookie
= 0;
10091 pcum
->force_mem
= FALSE
;
10096 /* Determine if two hard register sets intersect.
10097 Return 1 if they do. */
10100 hard_regs_intersect_p (HARD_REG_SET
*a
, HARD_REG_SET
*b
)
10103 COPY_HARD_REG_SET (c
, *a
);
10104 AND_HARD_REG_SET (c
, *b
);
10105 GO_IF_HARD_REG_SUBSET (c
, reg_class_contents
[(int) NO_REGS
], lose
);
10111 #ifdef TARGET_ADJUST_UNROLL_MAX
10113 sh_adjust_unroll_max (struct loop
* loop
, int insn_count
,
10114 int max_unrolled_insns
, int strength_reduce_p
,
10117 /* This doesn't work in 4.0 because the old unroller & loop.h is gone. */
10118 if (TARGET_ADJUST_UNROLL
&& TARGET_SHMEDIA
)
10120 /* Throttle back loop unrolling so that the costs of using more
10121 targets than the eight target register we have don't outweigh
10122 the benefits of unrolling. */
10124 int n_labels
= 0, n_calls
= 0, n_exit_dest
= 0, n_inner_loops
= -1;
10125 int n_barriers
= 0;
10130 int unroll_benefit
= 0, mem_latency
= 0;
10131 int base_cost
, best_cost
, cost
;
10132 int factor
, best_factor
;
10134 unsigned max_iterations
= 32767;
10136 int need_precond
= 0, precond
= 0;
10137 basic_block
* bbs
= get_loop_body (loop
);
10138 struct niter_desc
*desc
;
10140 /* Assume that all labels inside the loop are used from inside the
10141 loop. If the loop has multiple entry points, it is unlikely to
10142 be unrolled anyways.
10143 Also assume that all calls are to different functions. That is
10144 somewhat pessimistic, but if you have lots of calls, unrolling the
10145 loop is not likely to gain you much in the first place. */
10146 i
= loop
->num_nodes
- 1;
10147 for (insn
= BB_HEAD (bbs
[i
]); ; )
10149 if (GET_CODE (insn
) == CODE_LABEL
)
10151 else if (GET_CODE (insn
) == CALL_INSN
)
10153 else if (GET_CODE (insn
) == NOTE
10154 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
10156 else if (GET_CODE (insn
) == BARRIER
)
10158 if (insn
!= BB_END (bbs
[i
]))
10159 insn
= NEXT_INSN (insn
);
10161 insn
= BB_HEAD (bbs
[i
]);
10166 /* One label for the loop top is normal, and it won't be duplicated by
10169 return max_unrolled_insns
;
10170 if (n_inner_loops
> 0)
10172 for (dest
= loop
->exit_labels
; dest
&& n_exit_dest
< 8;
10173 dest
= LABEL_NEXTREF (dest
))
10175 for (i
= n_exit_dest
- 1;
10176 i
>= 0 && XEXP (dest
, 0) != XEXP (exit_dest
[i
], 0); i
--);
10178 exit_dest
[n_exit_dest
++] = dest
;
10180 /* If the loop top and call and exit destinations are enough to fill up
10181 the target registers, we're unlikely to do any more damage by
10183 if (n_calls
+ n_exit_dest
>= 7)
10184 return max_unrolled_insns
;
10186 /* ??? In the new loop unroller, there is no longer any strength
10187 reduction information available. Thus, when it comes to unrolling,
10188 we know the cost of everything, but we know the value of nothing. */
10190 if (strength_reduce_p
10191 && (unroll_type
== LPT_UNROLL_RUNTIME
10192 || unroll_type
== LPT_UNROLL_CONSTANT
10193 || unroll_type
== LPT_PEEL_COMPLETELY
))
10195 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
10196 struct iv_class
*bl
;
10198 /* We'll save one compare-and-branch in each loop body copy
10199 but the last one. */
10200 unroll_benefit
= 1;
10201 /* Assess the benefit of removing biv & giv updates. */
10202 for (bl
= ivs
->list
; bl
; bl
= bl
->next
)
10204 rtx increment
= biv_total_increment (bl
);
10205 struct induction
*v
;
10207 if (increment
&& GET_CODE (increment
) == CONST_INT
)
10210 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
10212 if (! v
->ignore
&& v
->same
== 0
10213 && GET_CODE (v
->mult_val
) == CONST_INT
)
10215 /* If this giv uses an array, try to determine
10216 a maximum iteration count from the size of the
10217 array. This need not be correct all the time,
10218 but should not be too far off the mark too often. */
10219 while (v
->giv_type
== DEST_ADDR
)
10221 rtx mem
= PATTERN (v
->insn
);
10222 tree mem_expr
, type
, size_tree
;
10224 if (GET_CODE (SET_SRC (mem
)) == MEM
)
10225 mem
= SET_SRC (mem
);
10226 else if (GET_CODE (SET_DEST (mem
)) == MEM
)
10227 mem
= SET_DEST (mem
);
10230 mem_expr
= MEM_EXPR (mem
);
10233 type
= TREE_TYPE (mem_expr
);
10234 if (TREE_CODE (type
) != ARRAY_TYPE
10235 || ! TYPE_SIZE (type
) || ! TYPE_SIZE_UNIT (type
))
10237 size_tree
= fold (build (TRUNC_DIV_EXPR
,
10240 TYPE_SIZE_UNIT (type
)));
10241 if (TREE_CODE (size_tree
) == INTEGER_CST
10242 && ! TREE_INT_CST_HIGH (size_tree
)
10243 && TREE_INT_CST_LOW (size_tree
) < max_iterations
)
10244 max_iterations
= TREE_INT_CST_LOW (size_tree
);
10252 /* Assume there is at least some benefit. */
10253 unroll_benefit
= 1;
10256 desc
= get_simple_loop_desc (loop
);
10257 n_iterations
= desc
->const_iter
? desc
->niter
: 0;
10259 = max_iterations
< desc
->niter_max
? max_iterations
: desc
->niter_max
;
10261 if (! strength_reduce_p
|| ! n_iterations
)
10263 if (! n_iterations
)
10266 = max_iterations
< 3 ? max_iterations
: max_iterations
* 3 / 4;
10267 if (! n_iterations
)
10270 #if 0 /* ??? See above - missing induction variable information. */
10271 while (unroll_benefit
> 1) /* no loop */
10273 /* We include the benefit of biv/ giv updates. Check if some or
10274 all of these updates are likely to fit into a scheduling
10276 We check for the following case:
10277 - All the insns leading to the first JUMP_INSN are in a strict
10279 - there is at least one memory reference in them.
10281 When we find such a pattern, we assume that we can hide as many
10282 updates as the total of the load latency is, if we have an
10283 unroll factor of at least two. We might or might not also do
10284 this without unrolling, so rather than considering this as an
10285 extra unroll benefit, discount it in the unroll benefits of unroll
10286 factors higher than two. */
10290 insn
= next_active_insn (loop
->start
);
10291 last_set
= single_set (insn
);
10294 if (GET_CODE (SET_SRC (last_set
)) == MEM
)
10296 for (insn
= NEXT_INSN (insn
); insn
!= end
; insn
= NEXT_INSN (insn
))
10298 if (! INSN_P (insn
))
10300 if (GET_CODE (insn
) == JUMP_INSN
)
10302 if (! reg_referenced_p (SET_DEST (last_set
), PATTERN (insn
)))
10304 /* Check if this is a to-be-reduced giv insn. */
10305 struct loop_ivs
*ivs
= LOOP_IVS (loop
);
10306 struct iv_class
*bl
;
10307 struct induction
*v
;
10308 for (bl
= ivs
->list
; bl
; bl
= bl
->next
)
10310 if (bl
->biv
->insn
== insn
)
10312 for (v
= bl
->giv
; v
; v
= v
->next_iv
)
10313 if (v
->insn
== insn
)
10321 set
= single_set (insn
);
10324 if (GET_CODE (SET_SRC (set
)) == MEM
)
10328 if (mem_latency
< 0)
10330 else if (mem_latency
> unroll_benefit
- 1)
10331 mem_latency
= unroll_benefit
- 1;
10335 if (n_labels
+ (unroll_benefit
+ n_labels
* 8) / n_iterations
10337 return max_unrolled_insns
;
10339 n_dest
= n_labels
+ n_calls
+ n_exit_dest
;
10340 base_cost
= n_dest
<= 8 ? 0 : n_dest
- 7;
10343 if (n_barriers
* 2 > n_labels
- 1)
10344 n_barriers
= (n_labels
- 1) / 2;
10345 for (factor
= 2; factor
<= 8; factor
++)
10347 /* Bump up preconditioning cost for each power of two. */
10348 if (! (factor
& (factor
-1)))
10350 /* When preconditioning, only powers of two will be considered. */
10351 else if (need_precond
)
10353 n_dest
= ((unroll_type
!= LPT_PEEL_COMPLETELY
)
10354 + (n_labels
- 1) * factor
+ n_calls
+ n_exit_dest
10355 - (n_barriers
* factor
>> 1)
10358 = ((n_dest
<= 8 ? 0 : n_dest
- 7)
10359 - base_cost
* factor
10360 - ((factor
> 2 ? unroll_benefit
- mem_latency
: unroll_benefit
)
10361 * (factor
- (unroll_type
!= LPT_PEEL_COMPLETELY
)))
10362 + ((unroll_benefit
+ 1 + (n_labels
- 1) * factor
)
10365 cost
+= (precond
+ unroll_benefit
* factor
/ 2) / n_iterations
;
10366 if (cost
< best_cost
)
10369 best_factor
= factor
;
10372 threshold
= best_factor
* insn_count
;
10373 if (max_unrolled_insns
> threshold
)
10374 max_unrolled_insns
= threshold
;
10376 return max_unrolled_insns
;
10378 #endif /* TARGET_ADJUST_UNROLL_MAX */
10380 /* Replace any occurrence of FROM(n) in X with TO(n). The function does
10381 not enter into CONST_DOUBLE for the replace.
10383 Note that copying is not done so X must not be shared unless all copies
10384 are to be modified.
10386 This is like replace_rtx, except that we operate on N_REPLACEMENTS
10387 replacements simultaneously - FROM(n) is replacements[n*2] and to(n) is
10388 replacements[n*2+1] - and that we take mode changes into account.
10390 If a replacement is ambiguous, return NULL_RTX.
10392 If MODIFY is zero, don't modify any rtl in place,
10393 just return zero or nonzero for failure / success. */
10396 replace_n_hard_rtx (rtx x
, rtx
*replacements
, int n_replacements
, int modify
)
10401 /* The following prevents loops occurrence when we change MEM in
10402 CONST_DOUBLE onto the same CONST_DOUBLE. */
10403 if (x
!= 0 && GET_CODE (x
) == CONST_DOUBLE
)
10406 for (i
= n_replacements
- 1; i
>= 0 ; i
--)
10407 if (x
== replacements
[i
*2] && GET_MODE (x
) == GET_MODE (replacements
[i
*2+1]))
10408 return replacements
[i
*2+1];
10410 /* Allow this function to make replacements in EXPR_LISTs. */
10414 if (GET_CODE (x
) == SUBREG
)
10416 rtx
new = replace_n_hard_rtx (SUBREG_REG (x
), replacements
,
10417 n_replacements
, modify
);
10419 if (GET_CODE (new) == CONST_INT
)
10421 x
= simplify_subreg (GET_MODE (x
), new,
10422 GET_MODE (SUBREG_REG (x
)),
10428 SUBREG_REG (x
) = new;
10432 else if (GET_CODE (x
) == REG
)
10434 unsigned regno
= REGNO (x
);
10435 unsigned nregs
= (regno
< FIRST_PSEUDO_REGISTER
10436 ? HARD_REGNO_NREGS (regno
, GET_MODE (x
)) : 1);
10437 rtx result
= NULL_RTX
;
10439 for (i
= n_replacements
- 1; i
>= 0; i
--)
10441 rtx from
= replacements
[i
*2];
10442 rtx to
= replacements
[i
*2+1];
10443 unsigned from_regno
, from_nregs
, to_regno
, new_regno
;
10445 if (GET_CODE (from
) != REG
)
10447 from_regno
= REGNO (from
);
10448 from_nregs
= (from_regno
< FIRST_PSEUDO_REGISTER
10449 ? HARD_REGNO_NREGS (from_regno
, GET_MODE (from
)) : 1);
10450 if (regno
< from_regno
+ from_nregs
&& regno
+ nregs
> from_regno
)
10452 if (regno
< from_regno
10453 || regno
+ nregs
> from_regno
+ nregs
10454 || GET_CODE (to
) != REG
10457 to_regno
= REGNO (to
);
10458 if (to_regno
< FIRST_PSEUDO_REGISTER
)
10460 new_regno
= regno
+ to_regno
- from_regno
;
10461 if ((unsigned) HARD_REGNO_NREGS (new_regno
, GET_MODE (x
))
10464 result
= gen_rtx_REG (GET_MODE (x
), new_regno
);
10466 else if (GET_MODE (x
) <= GET_MODE (to
))
10467 result
= gen_lowpart_common (GET_MODE (x
), to
);
10469 result
= gen_lowpart_SUBREG (GET_MODE (x
), to
);
10472 return result
? result
: x
;
10474 else if (GET_CODE (x
) == ZERO_EXTEND
)
10476 rtx
new = replace_n_hard_rtx (XEXP (x
, 0), replacements
,
10477 n_replacements
, modify
);
10479 if (GET_CODE (new) == CONST_INT
)
10481 x
= simplify_unary_operation (ZERO_EXTEND
, GET_MODE (x
),
10482 new, GET_MODE (XEXP (x
, 0)));
10492 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
10493 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
10499 new = replace_n_hard_rtx (XEXP (x
, i
), replacements
,
10500 n_replacements
, modify
);
10506 else if (fmt
[i
] == 'E')
10507 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
10509 new = replace_n_hard_rtx (XVECEXP (x
, i
, j
), replacements
,
10510 n_replacements
, modify
);
10514 XVECEXP (x
, i
, j
) = new;
10522 sh_gen_truncate (enum machine_mode mode
, rtx x
, int need_sign_ext
)
10524 enum rtx_code code
= TRUNCATE
;
10526 if (GET_CODE (x
) == ZERO_EXTEND
|| GET_CODE (x
) == SIGN_EXTEND
)
10528 rtx inner
= XEXP (x
, 0);
10529 enum machine_mode inner_mode
= GET_MODE (inner
);
10531 if (inner_mode
== mode
)
10533 else if (GET_MODE_SIZE (inner_mode
) >= GET_MODE_SIZE (mode
))
10535 else if (GET_MODE_SIZE (inner_mode
) < GET_MODE_SIZE (mode
)
10536 && (! need_sign_ext
|| GET_CODE (x
) == SIGN_EXTEND
))
10538 code
= GET_CODE (x
);
10542 return gen_rtx_fmt_e (code
, mode
, x
);
10545 /* called via for_each_rtx after reload, to clean up truncates of
10546 registers that span multiple actual hard registers. */
10548 shmedia_cleanup_truncate (rtx
*p
, void *n_changes
)
10552 if (GET_CODE (x
) != TRUNCATE
)
10555 if (GET_MODE_SIZE (GET_MODE (reg
)) > 8 && GET_CODE (reg
) == REG
)
10557 enum machine_mode reg_mode
= GET_MODE (reg
);
10558 XEXP (x
, 0) = simplify_subreg (DImode
, reg
, reg_mode
,
10559 subreg_lowpart_offset (DImode
, reg_mode
));
10560 *(int*) n_changes
+= 1;
10566 /* Load and store depend on the highpart of the address. However,
10567 set_attr_alternative does not give well-defined results before reload,
10568 so we must look at the rtl ourselves to see if any of the feeding
10569 registers is used in a memref. */
10571 /* Called by sh_contains_memref_p via for_each_rtx. */
10573 sh_contains_memref_p_1 (rtx
*loc
, void *data ATTRIBUTE_UNUSED
)
10575 return (GET_CODE (*loc
) == MEM
);
10578 /* Return nonzero iff INSN contains a MEM. */
10580 sh_contains_memref_p (rtx insn
)
10582 return for_each_rtx (&PATTERN (insn
), &sh_contains_memref_p_1
, NULL
);
10585 /* FNADDR is the MEM expression from a call expander. Return an address
10586 to use in an SHmedia insn pattern. */
10588 shmedia_prepare_call_address (rtx fnaddr
, int is_sibcall
)
10592 fnaddr
= XEXP (fnaddr
, 0);
10593 is_sym
= GET_CODE (fnaddr
) == SYMBOL_REF
;
10594 if (flag_pic
&& is_sym
)
10596 if (! SYMBOL_REF_LOCAL_P (fnaddr
))
10598 rtx reg
= gen_reg_rtx (Pmode
);
10600 /* We must not use GOTPLT for sibcalls, because PIC_REG
10601 must be restored before the PLT code gets to run. */
10603 emit_insn (gen_symGOT2reg (reg
, fnaddr
));
10605 emit_insn (gen_symGOTPLT2reg (reg
, fnaddr
));
10610 fnaddr
= gen_sym2PIC (fnaddr
);
10611 PUT_MODE (fnaddr
, Pmode
);
10614 /* If ptabs might trap, make this visible to the rest of the compiler.
10615 We generally assume that symbols pertain to valid locations, but
10616 it is possible to generate invalid symbols with asm or linker tricks.
10617 In a list of functions where each returns its successor, an invalid
10618 symbol might denote an empty list. */
10619 if (!TARGET_PT_FIXED
10620 && (!is_sym
|| TARGET_INVALID_SYMBOLS
)
10621 && (!REG_P (fnaddr
) || ! TARGET_REGISTER_P (REGNO (fnaddr
))))
10623 rtx tr
= gen_reg_rtx (PDImode
);
10625 emit_insn (gen_ptabs (tr
, fnaddr
));
10628 else if (! target_reg_operand (fnaddr
, Pmode
))
10629 fnaddr
= copy_to_mode_reg (Pmode
, fnaddr
);
10633 enum sh_divide_strategy_e sh_div_strategy
= SH_DIV_STRATEGY_DEFAULT
;
10635 /* This defines the storage for the variable part of a -mboard= option.
10636 It is only required when using the sh-superh-elf target */
10638 const char * boardtype
= "7750p2";
10639 const char * osruntime
= "bare";