2008-07-28 Richard Guenther <rguenther@suse.de>
[official-gcc.git] / gcc / config / sh / sh.c
blobc7b8f5820625d66a48a1a6bc32be85325ab65eb6
1 /* Output routines for GCC for Renesas / SuperH SH.
2 Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
4 Contributed by Steve Chamberlain (sac@cygnus.com).
5 Improved by Jim Wilson (wilson@cygnus.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "insn-config.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "flags.h"
31 #include "expr.h"
32 #include "optabs.h"
33 #include "function.h"
34 #include "regs.h"
35 #include "hard-reg-set.h"
36 #include "output.h"
37 #include "insn-attr.h"
38 #include "toplev.h"
39 #include "recog.h"
40 #include "c-pragma.h"
41 #include "integrate.h"
42 #include "dwarf2.h"
43 #include "tm_p.h"
44 #include "target.h"
45 #include "target-def.h"
46 #include "real.h"
47 #include "langhooks.h"
48 #include "basic-block.h"
49 #include "df.h"
50 #include "cfglayout.h"
51 #include "intl.h"
52 #include "sched-int.h"
53 #include "ggc.h"
54 #include "gimple.h"
55 #include "cfgloop.h"
56 #include "alloc-pool.h"
57 #include "tm-constrs.h"
60 int code_for_indirect_jump_scratch = CODE_FOR_indirect_jump_scratch;
62 #define MSW (TARGET_LITTLE_ENDIAN ? 1 : 0)
63 #define LSW (TARGET_LITTLE_ENDIAN ? 0 : 1)
65 /* These are some macros to abstract register modes. */
66 #define CONST_OK_FOR_ADD(size) \
67 (TARGET_SHMEDIA ? CONST_OK_FOR_I10 (size) : CONST_OK_FOR_I08 (size))
68 #define GEN_MOV (*(TARGET_SHMEDIA64 ? gen_movdi : gen_movsi))
69 #define GEN_ADD3 (*(TARGET_SHMEDIA64 ? gen_adddi3 : gen_addsi3))
70 #define GEN_SUB3 (*(TARGET_SHMEDIA64 ? gen_subdi3 : gen_subsi3))
72 /* Used to simplify the logic below. Find the attributes wherever
73 they may be. */
74 #define SH_ATTRIBUTES(decl) \
75 (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
76 : DECL_ATTRIBUTES (decl) \
77 ? (DECL_ATTRIBUTES (decl)) \
78 : TYPE_ATTRIBUTES (TREE_TYPE (decl))
80 /* Set to 1 by expand_prologue() when the function is an interrupt handler. */
81 int current_function_interrupt;
83 tree sh_deferred_function_attributes;
84 tree *sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
86 /* Global variables for machine-dependent things. */
88 /* Which cpu are we scheduling for. */
89 enum processor_type sh_cpu;
91 /* Definitions used in ready queue reordering for first scheduling pass. */
93 /* Reg weights arrays for modes SFmode and SImode, indexed by insn LUID. */
94 static short *regmode_weight[2];
96 /* Total SFmode and SImode weights of scheduled insns. */
97 static int curr_regmode_pressure[2];
99 /* Number of r0 life regions. */
100 static int r0_life_regions;
102 /* If true, skip cycles for Q -> R movement. */
103 static int skip_cycles = 0;
105 /* Cached value of can_issue_more. This is cached in sh_variable_issue hook
106 and returned from sh_reorder2. */
107 static short cached_can_issue_more;
109 /* Saved operands from the last compare to use when we generate an scc
110 or bcc insn. */
112 rtx sh_compare_op0;
113 rtx sh_compare_op1;
115 /* Provides the class number of the smallest class containing
116 reg number. */
118 enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
120 R0_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
121 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
122 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
123 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
124 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
125 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
126 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
127 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
128 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
129 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
130 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
131 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
132 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
133 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
134 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
135 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
136 FP0_REGS,FP_REGS, FP_REGS, FP_REGS,
137 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
138 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
139 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
140 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
141 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
142 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
143 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
144 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
145 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
146 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
147 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
148 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
149 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
150 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
151 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
152 TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
153 TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
154 DF_REGS, DF_REGS, DF_REGS, DF_REGS,
155 DF_REGS, DF_REGS, DF_REGS, DF_REGS,
156 NO_REGS, GENERAL_REGS, PR_REGS, T_REGS,
157 MAC_REGS, MAC_REGS, FPUL_REGS, FPSCR_REGS,
158 GENERAL_REGS, GENERAL_REGS,
161 char sh_register_names[FIRST_PSEUDO_REGISTER] \
162 [MAX_REGISTER_NAME_LENGTH + 1] = SH_REGISTER_NAMES_INITIALIZER;
164 char sh_additional_register_names[ADDREGNAMES_SIZE] \
165 [MAX_ADDITIONAL_REGISTER_NAME_LENGTH + 1]
166 = SH_ADDITIONAL_REGISTER_NAMES_INITIALIZER;
168 int assembler_dialect;
170 static bool shmedia_space_reserved_for_target_registers;
172 static bool sh_handle_option (size_t, const char *, int);
173 static void split_branches (rtx);
174 static int branch_dest (rtx);
175 static void force_into (rtx, rtx);
176 static void print_slot (rtx);
177 static rtx add_constant (rtx, enum machine_mode, rtx);
178 static void dump_table (rtx, rtx);
179 static int hi_const (rtx);
180 static int broken_move (rtx);
181 static int mova_p (rtx);
182 static rtx find_barrier (int, rtx, rtx);
183 static int noncall_uses_reg (rtx, rtx, rtx *);
184 static rtx gen_block_redirect (rtx, int, int);
185 static void sh_reorg (void);
186 static void output_stack_adjust (int, rtx, int, HARD_REG_SET *);
187 static rtx frame_insn (rtx);
188 static rtx push (int);
189 static void pop (int);
190 static void push_regs (HARD_REG_SET *, int);
191 static int calc_live_regs (HARD_REG_SET *);
192 static HOST_WIDE_INT rounded_frame_size (int);
193 static rtx mark_constant_pool_use (rtx);
194 const struct attribute_spec sh_attribute_table[];
195 static tree sh_handle_interrupt_handler_attribute (tree *, tree, tree, int, bool *);
196 static tree sh_handle_resbank_handler_attribute (tree *, tree,
197 tree, int, bool *);
198 static tree sh2a_handle_function_vector_handler_attribute (tree *, tree,
199 tree, int, bool *);
200 static tree sh_handle_sp_switch_attribute (tree *, tree, tree, int, bool *);
201 static tree sh_handle_trap_exit_attribute (tree *, tree, tree, int, bool *);
202 static tree sh_handle_renesas_attribute (tree *, tree, tree, int, bool *);
203 static void sh_output_function_epilogue (FILE *, HOST_WIDE_INT);
204 static void sh_insert_attributes (tree, tree *);
205 static const char *sh_check_pch_target_flags (int);
206 static int sh_adjust_cost (rtx, rtx, rtx, int);
207 static int sh_issue_rate (void);
208 static int sh_dfa_new_cycle (FILE *, int, rtx, int, int, int *sort_p);
209 static short find_set_regmode_weight (rtx, enum machine_mode);
210 static short find_insn_regmode_weight (rtx, enum machine_mode);
211 static void find_regmode_weight (basic_block, enum machine_mode);
212 static int find_r0_life_regions (basic_block);
213 static void sh_md_init_global (FILE *, int, int);
214 static void sh_md_finish_global (FILE *, int);
215 static int rank_for_reorder (const void *, const void *);
216 static void swap_reorder (rtx *, int);
217 static void ready_reorder (rtx *, int);
218 static short high_pressure (enum machine_mode);
219 static int sh_reorder (FILE *, int, rtx *, int *, int);
220 static int sh_reorder2 (FILE *, int, rtx *, int *, int);
221 static void sh_md_init (FILE *, int, int);
222 static int sh_variable_issue (FILE *, int, rtx, int);
224 static bool sh_function_ok_for_sibcall (tree, tree);
226 static bool sh_cannot_modify_jumps_p (void);
227 static int sh_target_reg_class (void);
228 static bool sh_optimize_target_register_callee_saved (bool);
229 static bool sh_ms_bitfield_layout_p (const_tree);
231 static void sh_init_builtins (void);
232 static void sh_media_init_builtins (void);
233 static rtx sh_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
234 static void sh_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
235 static void sh_file_start (void);
236 static int flow_dependent_p (rtx, rtx);
237 static void flow_dependent_p_1 (rtx, const_rtx, void *);
238 static int shiftcosts (rtx);
239 static int andcosts (rtx);
240 static int addsubcosts (rtx);
241 static int multcosts (rtx);
242 static bool unspec_caller_rtx_p (rtx);
243 static bool sh_cannot_copy_insn_p (rtx);
244 static bool sh_rtx_costs (rtx, int, int, int *);
245 static int sh_address_cost (rtx);
246 static int sh_pr_n_sets (void);
247 static rtx sh_allocate_initial_value (rtx);
248 static int shmedia_target_regs_stack_space (HARD_REG_SET *);
249 static int shmedia_reserve_space_for_target_registers_p (int, HARD_REG_SET *);
250 static int shmedia_target_regs_stack_adjust (HARD_REG_SET *);
251 static int scavenge_reg (HARD_REG_SET *s);
252 struct save_schedule_s;
253 static struct save_entry_s *sh5_schedule_saves (HARD_REG_SET *,
254 struct save_schedule_s *, int);
256 static rtx sh_struct_value_rtx (tree, int);
257 static bool sh_return_in_memory (const_tree, const_tree);
258 static rtx sh_builtin_saveregs (void);
259 static void sh_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
260 static bool sh_strict_argument_naming (CUMULATIVE_ARGS *);
261 static bool sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *);
262 static tree sh_build_builtin_va_list (void);
263 static tree sh_canonical_va_list_type (tree);
264 static void sh_va_start (tree, rtx);
265 static tree sh_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
266 static bool sh_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
267 const_tree, bool);
268 static bool sh_callee_copies (CUMULATIVE_ARGS *, enum machine_mode,
269 const_tree, bool);
270 static int sh_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
271 tree, bool);
272 static bool sh_scalar_mode_supported_p (enum machine_mode);
273 static int sh_dwarf_calling_convention (const_tree);
274 static void sh_encode_section_info (tree, rtx, int);
275 static int sh2a_function_vector_p (tree);
278 /* Initialize the GCC target structure. */
279 #undef TARGET_ATTRIBUTE_TABLE
280 #define TARGET_ATTRIBUTE_TABLE sh_attribute_table
282 /* The next two are used for debug info when compiling with -gdwarf. */
283 #undef TARGET_ASM_UNALIGNED_HI_OP
284 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uaword\t"
285 #undef TARGET_ASM_UNALIGNED_SI_OP
286 #define TARGET_ASM_UNALIGNED_SI_OP "\t.ualong\t"
288 /* These are NULLed out on non-SH5 in OVERRIDE_OPTIONS. */
289 #undef TARGET_ASM_UNALIGNED_DI_OP
290 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaquad\t"
291 #undef TARGET_ASM_ALIGNED_DI_OP
292 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
294 #undef TARGET_ASM_FUNCTION_EPILOGUE
295 #define TARGET_ASM_FUNCTION_EPILOGUE sh_output_function_epilogue
297 #undef TARGET_ASM_OUTPUT_MI_THUNK
298 #define TARGET_ASM_OUTPUT_MI_THUNK sh_output_mi_thunk
300 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
301 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
303 #undef TARGET_ASM_FILE_START
304 #define TARGET_ASM_FILE_START sh_file_start
305 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
306 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
308 #undef TARGET_DEFAULT_TARGET_FLAGS
309 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
310 #undef TARGET_HANDLE_OPTION
311 #define TARGET_HANDLE_OPTION sh_handle_option
313 #undef TARGET_INSERT_ATTRIBUTES
314 #define TARGET_INSERT_ATTRIBUTES sh_insert_attributes
316 #undef TARGET_SCHED_ADJUST_COST
317 #define TARGET_SCHED_ADJUST_COST sh_adjust_cost
319 #undef TARGET_SCHED_ISSUE_RATE
320 #define TARGET_SCHED_ISSUE_RATE sh_issue_rate
322 /* The next 5 hooks have been implemented for reenabling sched1. With the
323 help of these macros we are limiting the movement of insns in sched1 to
324 reduce the register pressure. The overall idea is to keep count of SImode
325 and SFmode regs required by already scheduled insns. When these counts
326 cross some threshold values; give priority to insns that free registers.
327 The insn that frees registers is most likely to be the insn with lowest
328 LUID (original insn order); but such an insn might be there in the stalled
329 queue (Q) instead of the ready queue (R). To solve this, we skip cycles
330 upto a max of 8 cycles so that such insns may move from Q -> R.
332 The description of the hooks are as below:
334 TARGET_SCHED_INIT_GLOBAL: Added a new target hook in the generic
335 scheduler; it is called inside the sched_init function just after
336 find_insn_reg_weights function call. It is used to calculate the SImode
337 and SFmode weights of insns of basic blocks; much similar to what
338 find_insn_reg_weights does.
339 TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
341 TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
342 indicated by TARGET_SCHED_REORDER2; doing this may move insns from
343 (Q)->(R).
345 TARGET_SCHED_REORDER: If the register pressure for SImode or SFmode is
346 high; reorder the ready queue so that the insn with lowest LUID will be
347 issued next.
349 TARGET_SCHED_REORDER2: If the register pressure is high, indicate to
350 TARGET_SCHED_DFA_NEW_CYCLE to skip cycles.
352 TARGET_SCHED_VARIABLE_ISSUE: Cache the value of can_issue_more so that it
353 can be returned from TARGET_SCHED_REORDER2.
355 TARGET_SCHED_INIT: Reset the register pressure counting variables. */
357 #undef TARGET_SCHED_DFA_NEW_CYCLE
358 #define TARGET_SCHED_DFA_NEW_CYCLE sh_dfa_new_cycle
360 #undef TARGET_SCHED_INIT_GLOBAL
361 #define TARGET_SCHED_INIT_GLOBAL sh_md_init_global
363 #undef TARGET_SCHED_FINISH_GLOBAL
364 #define TARGET_SCHED_FINISH_GLOBAL sh_md_finish_global
366 #undef TARGET_SCHED_VARIABLE_ISSUE
367 #define TARGET_SCHED_VARIABLE_ISSUE sh_variable_issue
369 #undef TARGET_SCHED_REORDER
370 #define TARGET_SCHED_REORDER sh_reorder
372 #undef TARGET_SCHED_REORDER2
373 #define TARGET_SCHED_REORDER2 sh_reorder2
375 #undef TARGET_SCHED_INIT
376 #define TARGET_SCHED_INIT sh_md_init
378 #undef TARGET_CANNOT_MODIFY_JUMPS_P
379 #define TARGET_CANNOT_MODIFY_JUMPS_P sh_cannot_modify_jumps_p
380 #undef TARGET_BRANCH_TARGET_REGISTER_CLASS
381 #define TARGET_BRANCH_TARGET_REGISTER_CLASS sh_target_reg_class
382 #undef TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED
383 #define TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED \
384 sh_optimize_target_register_callee_saved
386 #undef TARGET_MS_BITFIELD_LAYOUT_P
387 #define TARGET_MS_BITFIELD_LAYOUT_P sh_ms_bitfield_layout_p
389 #undef TARGET_INIT_BUILTINS
390 #define TARGET_INIT_BUILTINS sh_init_builtins
391 #undef TARGET_EXPAND_BUILTIN
392 #define TARGET_EXPAND_BUILTIN sh_expand_builtin
394 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
395 #define TARGET_FUNCTION_OK_FOR_SIBCALL sh_function_ok_for_sibcall
397 #undef TARGET_CANNOT_COPY_INSN_P
398 #define TARGET_CANNOT_COPY_INSN_P sh_cannot_copy_insn_p
399 #undef TARGET_RTX_COSTS
400 #define TARGET_RTX_COSTS sh_rtx_costs
401 #undef TARGET_ADDRESS_COST
402 #define TARGET_ADDRESS_COST sh_address_cost
403 #undef TARGET_ALLOCATE_INITIAL_VALUE
404 #define TARGET_ALLOCATE_INITIAL_VALUE sh_allocate_initial_value
406 #undef TARGET_MACHINE_DEPENDENT_REORG
407 #define TARGET_MACHINE_DEPENDENT_REORG sh_reorg
409 #ifdef HAVE_AS_TLS
410 #undef TARGET_HAVE_TLS
411 #define TARGET_HAVE_TLS true
412 #endif
414 #undef TARGET_PROMOTE_PROTOTYPES
415 #define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes
416 #undef TARGET_PROMOTE_FUNCTION_ARGS
417 #define TARGET_PROMOTE_FUNCTION_ARGS sh_promote_prototypes
418 #undef TARGET_PROMOTE_FUNCTION_RETURN
419 #define TARGET_PROMOTE_FUNCTION_RETURN sh_promote_prototypes
421 #undef TARGET_STRUCT_VALUE_RTX
422 #define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx
423 #undef TARGET_RETURN_IN_MEMORY
424 #define TARGET_RETURN_IN_MEMORY sh_return_in_memory
426 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
427 #define TARGET_EXPAND_BUILTIN_SAVEREGS sh_builtin_saveregs
428 #undef TARGET_SETUP_INCOMING_VARARGS
429 #define TARGET_SETUP_INCOMING_VARARGS sh_setup_incoming_varargs
430 #undef TARGET_STRICT_ARGUMENT_NAMING
431 #define TARGET_STRICT_ARGUMENT_NAMING sh_strict_argument_naming
432 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
433 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named
434 #undef TARGET_MUST_PASS_IN_STACK
435 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
436 #undef TARGET_PASS_BY_REFERENCE
437 #define TARGET_PASS_BY_REFERENCE sh_pass_by_reference
438 #undef TARGET_CALLEE_COPIES
439 #define TARGET_CALLEE_COPIES sh_callee_copies
440 #undef TARGET_ARG_PARTIAL_BYTES
441 #define TARGET_ARG_PARTIAL_BYTES sh_arg_partial_bytes
443 #undef TARGET_BUILD_BUILTIN_VA_LIST
444 #define TARGET_BUILD_BUILTIN_VA_LIST sh_build_builtin_va_list
445 #undef TARGET_CANONICAL_VA_LIST_TYPE
446 #define TARGET_CANONICAL_VA_LIST_TYPE sh_canonical_va_list_type
447 #undef TARGET_EXPAND_BUILTIN_VA_START
448 #define TARGET_EXPAND_BUILTIN_VA_START sh_va_start
449 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
450 #define TARGET_GIMPLIFY_VA_ARG_EXPR sh_gimplify_va_arg_expr
452 #undef TARGET_SCALAR_MODE_SUPPORTED_P
453 #define TARGET_SCALAR_MODE_SUPPORTED_P sh_scalar_mode_supported_p
454 #undef TARGET_VECTOR_MODE_SUPPORTED_P
455 #define TARGET_VECTOR_MODE_SUPPORTED_P sh_vector_mode_supported_p
457 #undef TARGET_CHECK_PCH_TARGET_FLAGS
458 #define TARGET_CHECK_PCH_TARGET_FLAGS sh_check_pch_target_flags
460 #undef TARGET_DWARF_CALLING_CONVENTION
461 #define TARGET_DWARF_CALLING_CONVENTION sh_dwarf_calling_convention
463 /* Return regmode weight for insn. */
464 #define INSN_REGMODE_WEIGHT(INSN, MODE) regmode_weight[((MODE) == SImode) ? 0 : 1][INSN_UID (INSN)]
466 /* Return current register pressure for regmode. */
467 #define CURR_REGMODE_PRESSURE(MODE) curr_regmode_pressure[((MODE) == SImode) ? 0 : 1]
469 #undef TARGET_ENCODE_SECTION_INFO
470 #define TARGET_ENCODE_SECTION_INFO sh_encode_section_info
472 #ifdef SYMBIAN
474 #undef TARGET_ENCODE_SECTION_INFO
475 #define TARGET_ENCODE_SECTION_INFO sh_symbian_encode_section_info
476 #undef TARGET_STRIP_NAME_ENCODING
477 #define TARGET_STRIP_NAME_ENCODING sh_symbian_strip_name_encoding
478 #undef TARGET_CXX_IMPORT_EXPORT_CLASS
479 #define TARGET_CXX_IMPORT_EXPORT_CLASS symbian_import_export_class
481 #endif /* SYMBIAN */
483 #undef TARGET_SECONDARY_RELOAD
484 #define TARGET_SECONDARY_RELOAD sh_secondary_reload
486 /* Machine-specific symbol_ref flags. */
487 #define SYMBOL_FLAG_FUNCVEC_FUNCTION (SYMBOL_FLAG_MACH_DEP << 0)
489 struct gcc_target targetm = TARGET_INITIALIZER;
491 /* Implement TARGET_HANDLE_OPTION. */
493 static bool
494 sh_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED,
495 int value ATTRIBUTE_UNUSED)
497 switch (code)
499 case OPT_m1:
500 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH1;
501 return true;
503 case OPT_m2:
504 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2;
505 return true;
507 case OPT_m2a:
508 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A;
509 return true;
511 case OPT_m2a_nofpu:
512 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_NOFPU;
513 return true;
515 case OPT_m2a_single:
516 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE;
517 return true;
519 case OPT_m2a_single_only:
520 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE_ONLY;
521 return true;
523 case OPT_m2e:
524 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2E;
525 return true;
527 case OPT_m3:
528 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3;
529 return true;
531 case OPT_m3e:
532 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3E;
533 return true;
535 case OPT_m4:
536 case OPT_m4_100:
537 case OPT_m4_200:
538 case OPT_m4_300:
539 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4;
540 return true;
542 case OPT_m4_nofpu:
543 case OPT_m4_100_nofpu:
544 case OPT_m4_200_nofpu:
545 case OPT_m4_300_nofpu:
546 case OPT_m4_340:
547 case OPT_m4_400:
548 case OPT_m4_500:
549 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_NOFPU;
550 return true;
552 case OPT_m4_single:
553 case OPT_m4_100_single:
554 case OPT_m4_200_single:
555 case OPT_m4_300_single:
556 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE;
557 return true;
559 case OPT_m4_single_only:
560 case OPT_m4_100_single_only:
561 case OPT_m4_200_single_only:
562 case OPT_m4_300_single_only:
563 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE_ONLY;
564 return true;
566 case OPT_m4a:
567 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A;
568 return true;
570 case OPT_m4a_nofpu:
571 case OPT_m4al:
572 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_NOFPU;
573 return true;
575 case OPT_m4a_single:
576 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE;
577 return true;
579 case OPT_m4a_single_only:
580 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE_ONLY;
581 return true;
583 case OPT_m5_32media:
584 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA;
585 return true;
587 case OPT_m5_32media_nofpu:
588 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA_NOFPU;
589 return true;
591 case OPT_m5_64media:
592 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA;
593 return true;
595 case OPT_m5_64media_nofpu:
596 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA_NOFPU;
597 return true;
599 case OPT_m5_compact:
600 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT;
601 return true;
603 case OPT_m5_compact_nofpu:
604 target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT_NOFPU;
605 return true;
607 default:
608 return true;
612 /* Print the operand address in x to the stream. */
614 void
615 print_operand_address (FILE *stream, rtx x)
617 switch (GET_CODE (x))
619 case REG:
620 case SUBREG:
621 fprintf (stream, "@%s", reg_names[true_regnum (x)]);
622 break;
624 case PLUS:
626 rtx base = XEXP (x, 0);
627 rtx index = XEXP (x, 1);
629 switch (GET_CODE (index))
631 case CONST_INT:
632 fprintf (stream, "@(%d,%s)", (int) INTVAL (index),
633 reg_names[true_regnum (base)]);
634 break;
636 case REG:
637 case SUBREG:
639 int base_num = true_regnum (base);
640 int index_num = true_regnum (index);
642 fprintf (stream, "@(r0,%s)",
643 reg_names[MAX (base_num, index_num)]);
644 break;
647 default:
648 gcc_unreachable ();
651 break;
653 case PRE_DEC:
654 fprintf (stream, "@-%s", reg_names[true_regnum (XEXP (x, 0))]);
655 break;
657 case POST_INC:
658 fprintf (stream, "@%s+", reg_names[true_regnum (XEXP (x, 0))]);
659 break;
661 default:
662 x = mark_constant_pool_use (x);
663 output_addr_const (stream, x);
664 break;
668 /* Print operand x (an rtx) in assembler syntax to file stream
669 according to modifier code.
671 '.' print a .s if insn needs delay slot
672 ',' print LOCAL_LABEL_PREFIX
673 '@' print trap, rte or rts depending upon pragma interruptness
674 '#' output a nop if there is nothing to put in the delay slot
675 ''' print likelihood suffix (/u for unlikely).
676 '>' print branch target if -fverbose-asm
677 'O' print a constant without the #
678 'R' print the LSW of a dp value - changes if in little endian
679 'S' print the MSW of a dp value - changes if in little endian
680 'T' print the next word of a dp value - same as 'R' in big endian mode.
681 'M' SHMEDIA: print an `x' if `m' will print `base,index'.
682 otherwise: print .b / .w / .l / .s / .d suffix if operand is a MEM.
683 'N' print 'r63' if the operand is (const_int 0).
684 'd' print a V2SF reg as dN instead of fpN.
685 'm' print a pair `base,offset' or `base,index', for LD and ST.
686 'U' Likewise for {LD,ST}{HI,LO}.
687 'V' print the position of a single bit set.
688 'W' print the position of a single bit cleared.
689 't' print a memory address which is a register.
690 'u' prints the lowest 16 bits of CONST_INT, as an unsigned value.
691 'o' output an operator. */
693 void
694 print_operand (FILE *stream, rtx x, int code)
696 int regno;
697 enum machine_mode mode;
699 switch (code)
701 tree trapa_attr;
703 case '.':
704 if (final_sequence
705 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
706 && get_attr_length (XVECEXP (final_sequence, 0, 1)))
707 fprintf (stream, ASSEMBLER_DIALECT ? "/s" : ".s");
708 break;
709 case ',':
710 fprintf (stream, "%s", LOCAL_LABEL_PREFIX);
711 break;
712 case '@':
713 trapa_attr = lookup_attribute ("trap_exit",
714 DECL_ATTRIBUTES (current_function_decl));
715 if (trapa_attr)
716 fprintf (stream, "trapa #%ld",
717 (long) TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (trapa_attr))));
718 else if (sh_cfun_interrupt_handler_p ())
720 if (sh_cfun_resbank_handler_p ())
721 fprintf (stream, "resbank\n");
722 fprintf (stream, "rte");
724 else
725 fprintf (stream, "rts");
726 break;
727 case '#':
728 /* Output a nop if there's nothing in the delay slot. */
729 if (dbr_sequence_length () == 0)
730 fprintf (stream, "\n\tnop");
731 break;
732 case '\'':
734 rtx note = find_reg_note (current_output_insn, REG_BR_PROB, 0);
736 if (note && INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
737 fputs ("/u", stream);
738 break;
740 case '>':
741 if (flag_verbose_asm && JUMP_LABEL (current_output_insn))
743 fputs ("\t! target: ", stream);
744 output_addr_const (stream, JUMP_LABEL (current_output_insn));
746 break;
747 case 'O':
748 x = mark_constant_pool_use (x);
749 output_addr_const (stream, x);
750 break;
751 /* N.B.: %R / %S / %T adjust memory addresses by four.
752 For SHMEDIA, that means they can be used to access the first and
753 second 32 bit part of a 64 bit (or larger) value that
754 might be held in floating point registers or memory.
755 While they can be used to access 64 bit parts of a larger value
756 held in general purpose registers, that won't work with memory -
757 neither for fp registers, since the frxx names are used. */
758 case 'R':
759 if (REG_P (x) || GET_CODE (x) == SUBREG)
761 regno = true_regnum (x);
762 regno += FP_REGISTER_P (regno) ? 1 : LSW;
763 fputs (reg_names[regno], (stream));
765 else if (MEM_P (x))
767 x = adjust_address (x, SImode, 4 * LSW);
768 print_operand_address (stream, XEXP (x, 0));
770 else
772 rtx sub = NULL_RTX;
774 mode = GET_MODE (x);
775 if (mode == VOIDmode)
776 mode = DImode;
777 if (GET_MODE_SIZE (mode) >= 8)
778 sub = simplify_subreg (SImode, x, mode, 4 * LSW);
779 if (sub)
780 print_operand (stream, sub, 0);
781 else
782 output_operand_lossage ("invalid operand to %%R");
784 break;
785 case 'S':
786 if (REG_P (x) || GET_CODE (x) == SUBREG)
788 regno = true_regnum (x);
789 regno += FP_REGISTER_P (regno) ? 0 : MSW;
790 fputs (reg_names[regno], (stream));
792 else if (MEM_P (x))
794 x = adjust_address (x, SImode, 4 * MSW);
795 print_operand_address (stream, XEXP (x, 0));
797 else
799 rtx sub = NULL_RTX;
801 mode = GET_MODE (x);
802 if (mode == VOIDmode)
803 mode = DImode;
804 if (GET_MODE_SIZE (mode) >= 8)
805 sub = simplify_subreg (SImode, x, mode, 4 * MSW);
806 if (sub)
807 print_operand (stream, sub, 0);
808 else
809 output_operand_lossage ("invalid operand to %%S");
811 break;
812 case 'T':
813 /* Next word of a double. */
814 switch (GET_CODE (x))
816 case REG:
817 fputs (reg_names[REGNO (x) + 1], (stream));
818 break;
819 case MEM:
820 if (GET_CODE (XEXP (x, 0)) != PRE_DEC
821 && GET_CODE (XEXP (x, 0)) != POST_INC)
822 x = adjust_address (x, SImode, 4);
823 print_operand_address (stream, XEXP (x, 0));
824 break;
825 default:
826 break;
828 break;
830 case 't':
831 gcc_assert (GET_CODE (x) == MEM);
832 x = XEXP (x, 0);
833 switch (GET_CODE (x))
835 case REG:
836 case SUBREG:
837 print_operand (stream, x, 0);
838 break;
839 default:
840 break;
842 break;
844 case 'o':
845 switch (GET_CODE (x))
847 case PLUS: fputs ("add", stream); break;
848 case MINUS: fputs ("sub", stream); break;
849 case MULT: fputs ("mul", stream); break;
850 case DIV: fputs ("div", stream); break;
851 case EQ: fputs ("eq", stream); break;
852 case NE: fputs ("ne", stream); break;
853 case GT: case LT: fputs ("gt", stream); break;
854 case GE: case LE: fputs ("ge", stream); break;
855 case GTU: case LTU: fputs ("gtu", stream); break;
856 case GEU: case LEU: fputs ("geu", stream); break;
857 default:
858 break;
860 break;
861 case 'M':
862 if (TARGET_SHMEDIA)
864 if (GET_CODE (x) == MEM
865 && GET_CODE (XEXP (x, 0)) == PLUS
866 && (GET_CODE (XEXP (XEXP (x, 0), 1)) == REG
867 || GET_CODE (XEXP (XEXP (x, 0), 1)) == SUBREG))
868 fputc ('x', stream);
870 else
872 if (GET_CODE (x) == MEM)
874 switch (GET_MODE (x))
876 case QImode: fputs (".b", stream); break;
877 case HImode: fputs (".w", stream); break;
878 case SImode: fputs (".l", stream); break;
879 case SFmode: fputs (".s", stream); break;
880 case DFmode: fputs (".d", stream); break;
881 default: gcc_unreachable ();
885 break;
887 case 'm':
888 gcc_assert (GET_CODE (x) == MEM);
889 x = XEXP (x, 0);
890 /* Fall through. */
891 case 'U':
892 switch (GET_CODE (x))
894 case REG:
895 case SUBREG:
896 print_operand (stream, x, 0);
897 fputs (", 0", stream);
898 break;
900 case PLUS:
901 print_operand (stream, XEXP (x, 0), 0);
902 fputs (", ", stream);
903 print_operand (stream, XEXP (x, 1), 0);
904 break;
906 default:
907 gcc_unreachable ();
909 break;
911 case 'V':
913 int num = exact_log2 (INTVAL (x));
914 gcc_assert (num >= 0);
915 fprintf (stream, "#%d", num);
917 break;
919 case 'W':
921 int num = exact_log2 (~INTVAL (x));
922 gcc_assert (num >= 0);
923 fprintf (stream, "#%d", num);
925 break;
927 case 'd':
928 gcc_assert (GET_CODE (x) == REG && GET_MODE (x) == V2SFmode);
930 fprintf ((stream), "d%s", reg_names[REGNO (x)] + 1);
931 break;
933 case 'N':
934 if (x == CONST0_RTX (GET_MODE (x)))
936 fprintf ((stream), "r63");
937 break;
939 goto default_output;
940 case 'u':
941 if (GET_CODE (x) == CONST_INT)
943 fprintf ((stream), "%u", (unsigned) INTVAL (x) & (0x10000 - 1));
944 break;
946 /* Fall through. */
948 default_output:
949 default:
950 regno = 0;
951 mode = GET_MODE (x);
953 switch (GET_CODE (x))
955 case TRUNCATE:
957 rtx inner = XEXP (x, 0);
958 int offset = 0;
959 enum machine_mode inner_mode;
961 /* We might see SUBREGs with vector mode registers inside. */
962 if (GET_CODE (inner) == SUBREG
963 && (GET_MODE_SIZE (GET_MODE (inner))
964 == GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
965 && subreg_lowpart_p (inner))
966 inner = SUBREG_REG (inner);
967 if (GET_CODE (inner) == CONST_INT)
969 x = GEN_INT (trunc_int_for_mode (INTVAL (inner), GET_MODE (x)));
970 goto default_output;
972 inner_mode = GET_MODE (inner);
973 if (GET_CODE (inner) == SUBREG
974 && (GET_MODE_SIZE (GET_MODE (inner))
975 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
976 && GET_CODE (SUBREG_REG (inner)) == REG)
978 offset = subreg_regno_offset (REGNO (SUBREG_REG (inner)),
979 GET_MODE (SUBREG_REG (inner)),
980 SUBREG_BYTE (inner),
981 GET_MODE (inner));
982 inner = SUBREG_REG (inner);
984 if (GET_CODE (inner) != REG || GET_MODE_SIZE (inner_mode) > 8)
985 abort ();
986 /* Floating point register pairs are always big endian;
987 general purpose registers are 64 bit wide. */
988 regno = REGNO (inner);
989 regno = (HARD_REGNO_NREGS (regno, inner_mode)
990 - HARD_REGNO_NREGS (regno, mode))
991 + offset;
992 x = inner;
993 goto reg;
995 case SIGN_EXTEND:
996 x = XEXP (x, 0);
997 goto reg;
998 /* FIXME: We need this on SHmedia32 because reload generates
999 some sign-extended HI or QI loads into DImode registers
1000 but, because Pmode is SImode, the address ends up with a
1001 subreg:SI of the DImode register. Maybe reload should be
1002 fixed so as to apply alter_subreg to such loads? */
1003 case IF_THEN_ELSE:
1004 gcc_assert (trapping_target_operand (x, VOIDmode));
1005 x = XEXP (XEXP (x, 2), 0);
1006 goto default_output;
1007 case SUBREG:
1008 gcc_assert (SUBREG_BYTE (x) == 0
1009 && GET_CODE (SUBREG_REG (x)) == REG);
1011 x = SUBREG_REG (x);
1012 /* Fall through. */
1014 reg:
1015 case REG:
1016 regno += REGNO (x);
1017 if (FP_REGISTER_P (regno)
1018 && mode == V16SFmode)
1019 fprintf ((stream), "mtrx%s", reg_names[regno] + 2);
1020 else if (FP_REGISTER_P (REGNO (x))
1021 && mode == V4SFmode)
1022 fprintf ((stream), "fv%s", reg_names[regno] + 2);
1023 else if (GET_CODE (x) == REG
1024 && mode == V2SFmode)
1025 fprintf ((stream), "fp%s", reg_names[regno] + 2);
1026 else if (FP_REGISTER_P (REGNO (x))
1027 && GET_MODE_SIZE (mode) > 4)
1028 fprintf ((stream), "d%s", reg_names[regno] + 1);
1029 else
1030 fputs (reg_names[regno], (stream));
1031 break;
1033 case MEM:
1034 output_address (XEXP (x, 0));
1035 break;
1037 case CONST:
1038 if (TARGET_SHMEDIA
1039 && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1040 || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
1041 && (GET_MODE (XEXP (x, 0)) == DImode
1042 || GET_MODE (XEXP (x, 0)) == SImode)
1043 && GET_CODE (XEXP (XEXP (x, 0), 0)) == TRUNCATE
1044 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode)
1046 rtx val = XEXP (XEXP (XEXP (x, 0), 0), 0);
1047 rtx val2 = val;
1048 bool nested_expr = false;
1050 fputc ('(', stream);
1051 if (GET_CODE (val) == ASHIFTRT)
1053 fputc ('(', stream);
1054 val2 = XEXP (val, 0);
1056 if (GET_CODE (val2) == CONST
1057 || GET_RTX_CLASS (GET_CODE (val2)) != RTX_OBJ)
1059 fputc ('(', stream);
1060 nested_expr = true;
1062 output_addr_const (stream, val2);
1063 if (nested_expr)
1064 fputc (')', stream);
1065 if (GET_CODE (val) == ASHIFTRT)
1067 fputs (" >> ", stream);
1068 output_addr_const (stream, XEXP (val, 1));
1069 fputc (')', stream);
1071 fputs (" & 65535)", stream);
1072 break;
1075 /* Fall through. */
1076 default:
1077 if (TARGET_SH1)
1078 fputc ('#', stream);
1079 output_addr_const (stream, x);
1080 break;
1082 break;
1087 /* Encode symbol attributes of a SYMBOL_REF into its
1088 SYMBOL_REF_FLAGS. */
1089 static void
1090 sh_encode_section_info (tree decl, rtx rtl, int first)
1092 default_encode_section_info (decl, rtl, first);
1094 if (TREE_CODE (decl) == FUNCTION_DECL
1095 && sh2a_function_vector_p (decl) && TARGET_SH2A)
1096 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FUNCVEC_FUNCTION;
1099 /* Like force_operand, but guarantees that VALUE ends up in TARGET. */
1100 static void
1101 force_into (rtx value, rtx target)
1103 value = force_operand (value, target);
1104 if (! rtx_equal_p (value, target))
1105 emit_insn (gen_move_insn (target, value));
1108 /* Emit code to perform a block move. Choose the best method.
1110 OPERANDS[0] is the destination.
1111 OPERANDS[1] is the source.
1112 OPERANDS[2] is the size.
1113 OPERANDS[3] is the alignment safe to use. */
1116 expand_block_move (rtx *operands)
1118 int align = INTVAL (operands[3]);
1119 int constp = (GET_CODE (operands[2]) == CONST_INT);
1120 int bytes = (constp ? INTVAL (operands[2]) : 0);
1122 if (! constp)
1123 return 0;
1125 /* If we could use mov.l to move words and dest is word-aligned, we
1126 can use movua.l for loads and still generate a relatively short
1127 and efficient sequence. */
1128 if (TARGET_SH4A_ARCH && align < 4
1129 && MEM_ALIGN (operands[0]) >= 32
1130 && can_move_by_pieces (bytes, 32))
1132 rtx dest = copy_rtx (operands[0]);
1133 rtx src = copy_rtx (operands[1]);
1134 /* We could use different pseudos for each copied word, but
1135 since movua can only load into r0, it's kind of
1136 pointless. */
1137 rtx temp = gen_reg_rtx (SImode);
1138 rtx src_addr = copy_addr_to_reg (XEXP (src, 0));
1139 int copied = 0;
1141 while (copied + 4 <= bytes)
1143 rtx to = adjust_address (dest, SImode, copied);
1144 rtx from = adjust_automodify_address (src, BLKmode,
1145 src_addr, copied);
1147 set_mem_size (from, GEN_INT (4));
1148 emit_insn (gen_movua (temp, from));
1149 emit_move_insn (src_addr, plus_constant (src_addr, 4));
1150 emit_move_insn (to, temp);
1151 copied += 4;
1154 if (copied < bytes)
1155 move_by_pieces (adjust_address (dest, BLKmode, copied),
1156 adjust_automodify_address (src, BLKmode,
1157 src_addr, copied),
1158 bytes - copied, align, 0);
1160 return 1;
1163 /* If it isn't a constant number of bytes, or if it doesn't have 4 byte
1164 alignment, or if it isn't a multiple of 4 bytes, then fail. */
1165 if (align < 4 || (bytes % 4 != 0))
1166 return 0;
1168 if (TARGET_HARD_SH4)
1170 if (bytes < 12)
1171 return 0;
1172 else if (bytes == 12)
1174 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1175 rtx r4 = gen_rtx_REG (SImode, 4);
1176 rtx r5 = gen_rtx_REG (SImode, 5);
1178 function_symbol (func_addr_rtx, "__movmemSI12_i4", SFUNC_STATIC);
1179 force_into (XEXP (operands[0], 0), r4);
1180 force_into (XEXP (operands[1], 0), r5);
1181 emit_insn (gen_block_move_real_i4 (func_addr_rtx));
1182 return 1;
1184 else if (! TARGET_SMALLCODE)
1186 const char *entry_name;
1187 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1188 int dwords;
1189 rtx r4 = gen_rtx_REG (SImode, 4);
1190 rtx r5 = gen_rtx_REG (SImode, 5);
1191 rtx r6 = gen_rtx_REG (SImode, 6);
1193 entry_name = (bytes & 4 ? "__movmem_i4_odd" : "__movmem_i4_even");
1194 function_symbol (func_addr_rtx, entry_name, SFUNC_STATIC);
1195 force_into (XEXP (operands[0], 0), r4);
1196 force_into (XEXP (operands[1], 0), r5);
1198 dwords = bytes >> 3;
1199 emit_insn (gen_move_insn (r6, GEN_INT (dwords - 1)));
1200 emit_insn (gen_block_lump_real_i4 (func_addr_rtx));
1201 return 1;
1203 else
1204 return 0;
1206 if (bytes < 64)
1208 char entry[30];
1209 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1210 rtx r4 = gen_rtx_REG (SImode, 4);
1211 rtx r5 = gen_rtx_REG (SImode, 5);
1213 sprintf (entry, "__movmemSI%d", bytes);
1214 function_symbol (func_addr_rtx, entry, SFUNC_STATIC);
1215 force_into (XEXP (operands[0], 0), r4);
1216 force_into (XEXP (operands[1], 0), r5);
1217 emit_insn (gen_block_move_real (func_addr_rtx));
1218 return 1;
1221 /* This is the same number of bytes as a memcpy call, but to a different
1222 less common function name, so this will occasionally use more space. */
1223 if (! TARGET_SMALLCODE)
1225 rtx func_addr_rtx = gen_reg_rtx (Pmode);
1226 int final_switch, while_loop;
1227 rtx r4 = gen_rtx_REG (SImode, 4);
1228 rtx r5 = gen_rtx_REG (SImode, 5);
1229 rtx r6 = gen_rtx_REG (SImode, 6);
1231 function_symbol (func_addr_rtx, "__movmem", SFUNC_STATIC);
1232 force_into (XEXP (operands[0], 0), r4);
1233 force_into (XEXP (operands[1], 0), r5);
1235 /* r6 controls the size of the move. 16 is decremented from it
1236 for each 64 bytes moved. Then the negative bit left over is used
1237 as an index into a list of move instructions. e.g., a 72 byte move
1238 would be set up with size(r6) = 14, for one iteration through the
1239 big while loop, and a switch of -2 for the last part. */
1241 final_switch = 16 - ((bytes / 4) % 16);
1242 while_loop = ((bytes / 4) / 16 - 1) * 16;
1243 emit_insn (gen_move_insn (r6, GEN_INT (while_loop + final_switch)));
1244 emit_insn (gen_block_lump_real (func_addr_rtx));
1245 return 1;
1248 return 0;
1251 /* Prepare operands for a move define_expand; specifically, one of the
1252 operands must be in a register. */
1255 prepare_move_operands (rtx operands[], enum machine_mode mode)
1257 if ((mode == SImode || mode == DImode)
1258 && flag_pic
1259 && ! ((mode == Pmode || mode == ptr_mode)
1260 && tls_symbolic_operand (operands[1], Pmode) != 0))
1262 rtx temp;
1263 if (SYMBOLIC_CONST_P (operands[1]))
1265 if (GET_CODE (operands[0]) == MEM)
1266 operands[1] = force_reg (Pmode, operands[1]);
1267 else if (TARGET_SHMEDIA
1268 && GET_CODE (operands[1]) == LABEL_REF
1269 && target_reg_operand (operands[0], mode))
1270 /* It's ok. */;
1271 else
1273 temp = (!can_create_pseudo_p ()
1274 ? operands[0]
1275 : gen_reg_rtx (Pmode));
1276 operands[1] = legitimize_pic_address (operands[1], mode, temp);
1279 else if (GET_CODE (operands[1]) == CONST
1280 && GET_CODE (XEXP (operands[1], 0)) == PLUS
1281 && SYMBOLIC_CONST_P (XEXP (XEXP (operands[1], 0), 0)))
1283 temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
1284 temp = legitimize_pic_address (XEXP (XEXP (operands[1], 0), 0),
1285 mode, temp);
1286 operands[1] = expand_binop (mode, add_optab, temp,
1287 XEXP (XEXP (operands[1], 0), 1),
1288 (!can_create_pseudo_p ()
1289 ? temp
1290 : gen_reg_rtx (Pmode)),
1291 0, OPTAB_LIB_WIDEN);
1295 if (! reload_in_progress && ! reload_completed)
1297 /* Copy the source to a register if both operands aren't registers. */
1298 if (! register_operand (operands[0], mode)
1299 && ! sh_register_operand (operands[1], mode))
1300 operands[1] = copy_to_mode_reg (mode, operands[1]);
1302 if (GET_CODE (operands[0]) == MEM && ! memory_operand (operands[0], mode))
1304 /* This is like change_address_1 (operands[0], mode, 0, 1) ,
1305 except that we can't use that function because it is static. */
1306 rtx new = change_address (operands[0], mode, 0);
1307 MEM_COPY_ATTRIBUTES (new, operands[0]);
1308 operands[0] = new;
1311 /* This case can happen while generating code to move the result
1312 of a library call to the target. Reject `st r0,@(rX,rY)' because
1313 reload will fail to find a spill register for rX, since r0 is already
1314 being used for the source. */
1315 else if (TARGET_SH1
1316 && refers_to_regno_p (R0_REG, R0_REG + 1, operands[1], (rtx *)0)
1317 && GET_CODE (operands[0]) == MEM
1318 && GET_CODE (XEXP (operands[0], 0)) == PLUS
1319 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == REG)
1320 operands[1] = copy_to_mode_reg (mode, operands[1]);
1323 if (mode == Pmode || mode == ptr_mode)
1325 rtx op0, op1, opc;
1326 enum tls_model tls_kind;
1328 op0 = operands[0];
1329 op1 = operands[1];
1330 if (GET_CODE (op1) == CONST
1331 && GET_CODE (XEXP (op1, 0)) == PLUS
1332 && tls_symbolic_operand (XEXP (XEXP (op1, 0), 0), Pmode))
1334 opc = XEXP (XEXP (op1, 0), 1);
1335 op1 = XEXP (XEXP (op1, 0), 0);
1337 else
1338 opc = NULL_RTX;
1340 if ((tls_kind = tls_symbolic_operand (op1, Pmode)))
1342 rtx tga_op1, tga_ret, tmp, tmp2;
1344 switch (tls_kind)
1346 case TLS_MODEL_GLOBAL_DYNAMIC:
1347 tga_ret = gen_rtx_REG (Pmode, R0_REG);
1348 emit_call_insn (gen_tls_global_dynamic (tga_ret, op1));
1349 op1 = tga_ret;
1350 break;
1352 case TLS_MODEL_LOCAL_DYNAMIC:
1353 tga_ret = gen_rtx_REG (Pmode, R0_REG);
1354 emit_call_insn (gen_tls_local_dynamic (tga_ret, op1));
1356 tmp = gen_reg_rtx (Pmode);
1357 emit_move_insn (tmp, tga_ret);
1359 if (register_operand (op0, Pmode))
1360 tmp2 = op0;
1361 else
1362 tmp2 = gen_reg_rtx (Pmode);
1364 emit_insn (gen_symDTPOFF2reg (tmp2, op1, tmp));
1365 op1 = tmp2;
1366 break;
1368 case TLS_MODEL_INITIAL_EXEC:
1369 if (! flag_pic)
1371 /* Don't schedule insns for getting GOT address when
1372 the first scheduling is enabled, to avoid spill
1373 failures for R0. */
1374 if (flag_schedule_insns)
1375 emit_insn (gen_blockage ());
1376 emit_insn (gen_GOTaddr2picreg ());
1377 emit_use (gen_rtx_REG (SImode, PIC_REG));
1378 if (flag_schedule_insns)
1379 emit_insn (gen_blockage ());
1381 tga_op1 = !can_create_pseudo_p () ? op0 : gen_reg_rtx (Pmode);
1382 tmp = gen_sym2GOTTPOFF (op1);
1383 emit_insn (gen_tls_initial_exec (tga_op1, tmp));
1384 op1 = tga_op1;
1385 break;
1387 case TLS_MODEL_LOCAL_EXEC:
1388 tmp2 = gen_reg_rtx (Pmode);
1389 emit_insn (gen_load_gbr (tmp2));
1390 tmp = gen_reg_rtx (Pmode);
1391 emit_insn (gen_symTPOFF2reg (tmp, op1));
1393 if (register_operand (op0, Pmode))
1394 op1 = op0;
1395 else
1396 op1 = gen_reg_rtx (Pmode);
1398 emit_insn (gen_addsi3 (op1, tmp, tmp2));
1399 break;
1401 default:
1402 gcc_unreachable ();
1404 if (opc)
1405 emit_insn (gen_addsi3 (op1, op1, force_reg (SImode, opc)));
1406 operands[1] = op1;
1410 return 0;
1413 enum rtx_code
1414 prepare_cbranch_operands (rtx *operands, enum machine_mode mode,
1415 enum rtx_code comparison)
1417 rtx op1;
1418 rtx scratch = NULL_RTX;
1420 if (comparison == CODE_FOR_nothing)
1421 comparison = GET_CODE (operands[0]);
1422 else
1423 scratch = operands[4];
1424 if (GET_CODE (operands[1]) == CONST_INT
1425 && GET_CODE (operands[2]) != CONST_INT)
1427 rtx tmp = operands[1];
1429 operands[1] = operands[2];
1430 operands[2] = tmp;
1431 comparison = swap_condition (comparison);
1433 if (GET_CODE (operands[2]) == CONST_INT)
1435 HOST_WIDE_INT val = INTVAL (operands[2]);
1436 if ((val == -1 || val == -0x81)
1437 && (comparison == GT || comparison == LE))
1439 comparison = (comparison == GT) ? GE : LT;
1440 operands[2] = gen_int_mode (val + 1, mode);
1442 else if ((val == 1 || val == 0x80)
1443 && (comparison == GE || comparison == LT))
1445 comparison = (comparison == GE) ? GT : LE;
1446 operands[2] = gen_int_mode (val - 1, mode);
1448 else if (val == 1 && (comparison == GEU || comparison == LTU))
1450 comparison = (comparison == GEU) ? NE : EQ;
1451 operands[2] = CONST0_RTX (mode);
1453 else if (val == 0x80 && (comparison == GEU || comparison == LTU))
1455 comparison = (comparison == GEU) ? GTU : LEU;
1456 operands[2] = gen_int_mode (val - 1, mode);
1458 else if (val == 0 && (comparison == GTU || comparison == LEU))
1459 comparison = (comparison == GTU) ? NE : EQ;
1460 else if (mode == SImode
1461 && ((val == 0x7fffffff
1462 && (comparison == GTU || comparison == LEU))
1463 || ((unsigned HOST_WIDE_INT) val
1464 == (unsigned HOST_WIDE_INT) 0x7fffffff + 1
1465 && (comparison == GEU || comparison == LTU))))
1467 comparison = (comparison == GTU || comparison == GEU) ? LT : GE;
1468 operands[2] = CONST0_RTX (mode);
1471 op1 = operands[1];
1472 if (can_create_pseudo_p ())
1473 operands[1] = force_reg (mode, op1);
1474 /* When we are handling DImode comparisons, we want to keep constants so
1475 that we can optimize the component comparisons; however, memory loads
1476 are better issued as a whole so that they can be scheduled well.
1477 SImode equality comparisons allow I08 constants, but only when they
1478 compare r0. Hence, if operands[1] has to be loaded from somewhere else
1479 into a register, that register might as well be r0, and we allow the
1480 constant. If it is already in a register, this is likely to be
1481 allocated to a different hard register, thus we load the constant into
1482 a register unless it is zero. */
1483 if (!REG_P (operands[2])
1484 && (GET_CODE (operands[2]) != CONST_INT
1485 || (mode == SImode && operands[2] != CONST0_RTX (SImode)
1486 && ((comparison != EQ && comparison != NE)
1487 || (REG_P (op1) && REGNO (op1) != R0_REG)
1488 || !satisfies_constraint_I08 (operands[2])))))
1490 if (scratch && GET_MODE (scratch) == mode)
1492 emit_move_insn (scratch, operands[2]);
1493 operands[2] = scratch;
1495 else if (can_create_pseudo_p ())
1496 operands[2] = force_reg (mode, operands[2]);
1498 return comparison;
1501 void
1502 expand_cbranchsi4 (rtx *operands, enum rtx_code comparison, int probability)
1504 rtx (*branch_expander) (rtx) = gen_branch_true;
1505 rtx jump;
1507 comparison = prepare_cbranch_operands (operands, SImode, comparison);
1508 switch (comparison)
1510 case NE: case LT: case LE: case LTU: case LEU:
1511 comparison = reverse_condition (comparison);
1512 branch_expander = gen_branch_false;
1513 default: ;
1515 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, T_REG),
1516 gen_rtx_fmt_ee (comparison, SImode,
1517 operands[1], operands[2])));
1518 jump = emit_jump_insn (branch_expander (operands[3]));
1519 if (probability >= 0)
1520 REG_NOTES (jump)
1521 = gen_rtx_EXPR_LIST (REG_BR_PROB, GEN_INT (probability),
1522 REG_NOTES (jump));
1526 /* ??? How should we distribute probabilities when more than one branch
1527 is generated. So far we only have soem ad-hoc observations:
1528 - If the operands are random, they are likely to differ in both parts.
1529 - If comparing items in a hash chain, the operands are random or equal;
1530 operation should be EQ or NE.
1531 - If items are searched in an ordered tree from the root, we can expect
1532 the highpart to be unequal about half of the time; operation should be
1533 an inequality comparison, operands non-constant, and overall probability
1534 about 50%. Likewise for quicksort.
1535 - Range checks will be often made against constants. Even if we assume for
1536 simplicity an even distribution of the non-constant operand over a
1537 sub-range here, the same probability could be generated with differently
1538 wide sub-ranges - as long as the ratio of the part of the subrange that
1539 is before the threshold to the part that comes after the threshold stays
1540 the same. Thus, we can't really tell anything here;
1541 assuming random distribution is at least simple.
1544 bool
1545 expand_cbranchdi4 (rtx *operands, enum rtx_code comparison)
1547 enum rtx_code msw_taken, msw_skip, lsw_taken;
1548 rtx skip_label = NULL_RTX;
1549 rtx op1h, op1l, op2h, op2l;
1550 int num_branches;
1551 int prob, rev_prob;
1552 int msw_taken_prob = -1, msw_skip_prob = -1, lsw_taken_prob = -1;
1553 rtx scratch = operands[4];
1555 comparison = prepare_cbranch_operands (operands, DImode, comparison);
1556 op1h = gen_highpart_mode (SImode, DImode, operands[1]);
1557 op2h = gen_highpart_mode (SImode, DImode, operands[2]);
1558 op1l = gen_lowpart (SImode, operands[1]);
1559 op2l = gen_lowpart (SImode, operands[2]);
1560 msw_taken = msw_skip = lsw_taken = CODE_FOR_nothing;
1561 prob = split_branch_probability;
1562 rev_prob = REG_BR_PROB_BASE - prob;
1563 switch (comparison)
1565 /* ??? Should we use the cmpeqdi_t pattern for equality comparisons?
1566 That costs 1 cycle more when the first branch can be predicted taken,
1567 but saves us mispredicts because only one branch needs prediction.
1568 It also enables generating the cmpeqdi_t-1 pattern. */
1569 case EQ:
1570 if (TARGET_CMPEQDI_T)
1572 emit_insn (gen_cmpeqdi_t (operands[1], operands[2]));
1573 emit_jump_insn (gen_branch_true (operands[3]));
1574 return true;
1576 msw_skip = NE;
1577 lsw_taken = EQ;
1578 if (prob >= 0)
1580 /* If we had more precision, we'd use rev_prob - (rev_prob >> 32) .
1582 msw_skip_prob = rev_prob;
1583 if (REG_BR_PROB_BASE <= 65535)
1584 lsw_taken_prob = prob ? REG_BR_PROB_BASE : 0;
1585 else
1587 gcc_assert (HOST_BITS_PER_WIDEST_INT >= 64);
1588 lsw_taken_prob
1589 = (prob
1590 ? (REG_BR_PROB_BASE
1591 - ((HOST_WIDEST_INT) REG_BR_PROB_BASE * rev_prob
1592 / ((HOST_WIDEST_INT) prob << 32)))
1593 : 0);
1596 break;
1597 case NE:
1598 if (TARGET_CMPEQDI_T)
1600 emit_insn (gen_cmpeqdi_t (operands[1], operands[2]));
1601 emit_jump_insn (gen_branch_false (operands[3]));
1602 return true;
1604 msw_taken = NE;
1605 msw_taken_prob = prob;
1606 lsw_taken = NE;
1607 lsw_taken_prob = 0;
1608 break;
1609 case GTU: case GT:
1610 msw_taken = comparison;
1611 if (GET_CODE (op2l) == CONST_INT && INTVAL (op2l) == -1)
1612 break;
1613 if (comparison != GTU || op2h != CONST0_RTX (SImode))
1614 msw_skip = swap_condition (msw_taken);
1615 lsw_taken = GTU;
1616 break;
1617 case GEU: case GE:
1618 if (op2l == CONST0_RTX (SImode))
1619 msw_taken = comparison;
1620 else
1622 msw_taken = comparison == GE ? GT : GTU;
1623 msw_skip = swap_condition (msw_taken);
1624 lsw_taken = GEU;
1626 break;
1627 case LTU: case LT:
1628 msw_taken = comparison;
1629 if (op2l == CONST0_RTX (SImode))
1630 break;
1631 msw_skip = swap_condition (msw_taken);
1632 lsw_taken = LTU;
1633 break;
1634 case LEU: case LE:
1635 if (GET_CODE (op2l) == CONST_INT && INTVAL (op2l) == -1)
1636 msw_taken = comparison;
1637 else
1639 lsw_taken = LEU;
1640 if (comparison == LE)
1641 msw_taken = LT;
1642 else if (op2h != CONST0_RTX (SImode))
1643 msw_taken = LTU;
1644 else
1645 break;
1646 msw_skip = swap_condition (msw_taken);
1648 break;
1649 default: return false;
1651 num_branches = ((msw_taken != CODE_FOR_nothing)
1652 + (msw_skip != CODE_FOR_nothing)
1653 + (lsw_taken != CODE_FOR_nothing));
1654 if (comparison != EQ && comparison != NE && num_branches > 1)
1656 if (!CONSTANT_P (operands[2])
1657 && prob >= (int) (REG_BR_PROB_BASE * 3 / 8U)
1658 && prob <= (int) (REG_BR_PROB_BASE * 5 / 8U))
1660 msw_taken_prob = prob / 2U;
1661 msw_skip_prob
1662 = REG_BR_PROB_BASE * rev_prob / (REG_BR_PROB_BASE + rev_prob);
1663 lsw_taken_prob = prob;
1665 else
1667 msw_taken_prob = prob;
1668 msw_skip_prob = REG_BR_PROB_BASE;
1669 /* ??? If we have a constant op2h, should we use that when
1670 calculating lsw_taken_prob? */
1671 lsw_taken_prob = prob;
1674 operands[1] = op1h;
1675 operands[2] = op2h;
1676 operands[4] = NULL_RTX;
1677 if (reload_completed
1678 && ! arith_reg_or_0_operand (op2h, SImode) && true_regnum (op1h)
1679 && (msw_taken != CODE_FOR_nothing || msw_skip != CODE_FOR_nothing))
1681 emit_move_insn (scratch, operands[2]);
1682 operands[2] = scratch;
1684 if (msw_taken != CODE_FOR_nothing)
1685 expand_cbranchsi4 (operands, msw_taken, msw_taken_prob);
1686 if (msw_skip != CODE_FOR_nothing)
1688 rtx taken_label = operands[3];
1690 /* Operands were possibly modified, but msw_skip doesn't expect this.
1691 Always use the original ones. */
1692 if (msw_taken != CODE_FOR_nothing)
1694 operands[1] = op1h;
1695 operands[2] = op2h;
1698 operands[3] = skip_label = gen_label_rtx ();
1699 expand_cbranchsi4 (operands, msw_skip, msw_skip_prob);
1700 operands[3] = taken_label;
1702 operands[1] = op1l;
1703 operands[2] = op2l;
1704 if (lsw_taken != CODE_FOR_nothing)
1706 if (reload_completed
1707 && ! arith_reg_or_0_operand (op2l, SImode) && true_regnum (op1l))
1708 operands[4] = scratch;
1709 expand_cbranchsi4 (operands, lsw_taken, lsw_taken_prob);
1711 if (msw_skip != CODE_FOR_nothing)
1712 emit_label (skip_label);
1713 return true;
1716 /* Prepare the operands for an scc instruction; make sure that the
1717 compare has been done. */
1719 prepare_scc_operands (enum rtx_code code)
1721 rtx t_reg = gen_rtx_REG (SImode, T_REG);
1722 enum rtx_code oldcode = code;
1723 enum machine_mode mode;
1725 /* First need a compare insn. */
1726 switch (code)
1728 case NE:
1729 /* It isn't possible to handle this case. */
1730 gcc_unreachable ();
1731 case LT:
1732 code = GT;
1733 break;
1734 case LE:
1735 code = GE;
1736 break;
1737 case LTU:
1738 code = GTU;
1739 break;
1740 case LEU:
1741 code = GEU;
1742 break;
1743 default:
1744 break;
1746 if (code != oldcode)
1748 rtx tmp = sh_compare_op0;
1749 sh_compare_op0 = sh_compare_op1;
1750 sh_compare_op1 = tmp;
1753 mode = GET_MODE (sh_compare_op0);
1754 if (mode == VOIDmode)
1755 mode = GET_MODE (sh_compare_op1);
1757 sh_compare_op0 = force_reg (mode, sh_compare_op0);
1758 if ((code != EQ && code != NE
1759 && (sh_compare_op1 != const0_rtx
1760 || code == GTU || code == GEU || code == LTU || code == LEU))
1761 || (mode == DImode && sh_compare_op1 != const0_rtx)
1762 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1763 sh_compare_op1 = force_reg (mode, sh_compare_op1);
1765 if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
1766 (mode == SFmode ? emit_sf_insn : emit_df_insn)
1767 (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
1768 gen_rtx_SET (VOIDmode, t_reg,
1769 gen_rtx_fmt_ee (code, SImode,
1770 sh_compare_op0, sh_compare_op1)),
1771 gen_rtx_USE (VOIDmode, get_fpscr_rtx ()))));
1772 else
1773 emit_insn (gen_rtx_SET (VOIDmode, t_reg,
1774 gen_rtx_fmt_ee (code, SImode,
1775 sh_compare_op0, sh_compare_op1)));
1777 return t_reg;
1780 /* Called from the md file, set up the operands of a compare instruction. */
1782 void
1783 from_compare (rtx *operands, int code)
1785 enum machine_mode mode = GET_MODE (sh_compare_op0);
1786 rtx insn;
1787 if (mode == VOIDmode)
1788 mode = GET_MODE (sh_compare_op1);
1789 if (code != EQ
1790 || mode == DImode
1791 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1793 /* Force args into regs, since we can't use constants here. */
1794 sh_compare_op0 = force_reg (mode, sh_compare_op0);
1795 if (sh_compare_op1 != const0_rtx
1796 || code == GTU || code == GEU
1797 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1798 sh_compare_op1 = force_reg (mode, sh_compare_op1);
1800 if (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT && code == GE)
1802 from_compare (operands, GT);
1803 insn = gen_ieee_ccmpeqsf_t (sh_compare_op0, sh_compare_op1);
1805 else
1806 insn = gen_rtx_SET (VOIDmode,
1807 gen_rtx_REG (SImode, T_REG),
1808 gen_rtx_fmt_ee (code, SImode,
1809 sh_compare_op0, sh_compare_op1));
1810 if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
1812 insn = gen_rtx_PARALLEL (VOIDmode,
1813 gen_rtvec (2, insn,
1814 gen_rtx_USE (VOIDmode, get_fpscr_rtx ())));
1815 (mode == SFmode ? emit_sf_insn : emit_df_insn) (insn);
1817 else
1818 emit_insn (insn);
1821 /* Functions to output assembly code. */
1823 /* Return a sequence of instructions to perform DI or DF move.
1825 Since the SH cannot move a DI or DF in one instruction, we have
1826 to take care when we see overlapping source and dest registers. */
1828 const char *
1829 output_movedouble (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1830 enum machine_mode mode)
1832 rtx dst = operands[0];
1833 rtx src = operands[1];
1835 if (GET_CODE (dst) == MEM
1836 && GET_CODE (XEXP (dst, 0)) == PRE_DEC)
1837 return "mov.l %T1,%0\n\tmov.l %1,%0";
1839 if (register_operand (dst, mode)
1840 && register_operand (src, mode))
1842 if (REGNO (src) == MACH_REG)
1843 return "sts mach,%S0\n\tsts macl,%R0";
1845 /* When mov.d r1,r2 do r2->r3 then r1->r2;
1846 when mov.d r1,r0 do r1->r0 then r2->r1. */
1848 if (REGNO (src) + 1 == REGNO (dst))
1849 return "mov %T1,%T0\n\tmov %1,%0";
1850 else
1851 return "mov %1,%0\n\tmov %T1,%T0";
1853 else if (GET_CODE (src) == CONST_INT)
1855 if (INTVAL (src) < 0)
1856 output_asm_insn ("mov #-1,%S0", operands);
1857 else
1858 output_asm_insn ("mov #0,%S0", operands);
1860 return "mov %1,%R0";
1862 else if (GET_CODE (src) == MEM)
1864 int ptrreg = -1;
1865 int dreg = REGNO (dst);
1866 rtx inside = XEXP (src, 0);
1868 switch (GET_CODE (inside))
1870 case REG:
1871 ptrreg = REGNO (inside);
1872 break;
1874 case SUBREG:
1875 ptrreg = subreg_regno (inside);
1876 break;
1878 case PLUS:
1879 ptrreg = REGNO (XEXP (inside, 0));
1880 /* ??? A r0+REG address shouldn't be possible here, because it isn't
1881 an offsettable address. Unfortunately, offsettable addresses use
1882 QImode to check the offset, and a QImode offsettable address
1883 requires r0 for the other operand, which is not currently
1884 supported, so we can't use the 'o' constraint.
1885 Thus we must check for and handle r0+REG addresses here.
1886 We punt for now, since this is likely very rare. */
1887 gcc_assert (GET_CODE (XEXP (inside, 1)) != REG);
1888 break;
1890 case LABEL_REF:
1891 return "mov.l %1,%0\n\tmov.l %1+4,%T0";
1892 case POST_INC:
1893 return "mov.l %1,%0\n\tmov.l %1,%T0";
1894 default:
1895 gcc_unreachable ();
1898 /* Work out the safe way to copy. Copy into the second half first. */
1899 if (dreg == ptrreg)
1900 return "mov.l %T1,%T0\n\tmov.l %1,%0";
1903 return "mov.l %1,%0\n\tmov.l %T1,%T0";
1906 /* Print an instruction which would have gone into a delay slot after
1907 another instruction, but couldn't because the other instruction expanded
1908 into a sequence where putting the slot insn at the end wouldn't work. */
1910 static void
1911 print_slot (rtx insn)
1913 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file, optimize, 1, NULL);
1915 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
1918 const char *
1919 output_far_jump (rtx insn, rtx op)
1921 struct { rtx lab, reg, op; } this;
1922 rtx braf_base_lab = NULL_RTX;
1923 const char *jump;
1924 int far;
1925 int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn));
1926 rtx prev;
1928 this.lab = gen_label_rtx ();
1930 if (TARGET_SH2
1931 && offset >= -32764
1932 && offset - get_attr_length (insn) <= 32766)
1934 far = 0;
1935 jump = "mov.w %O0,%1; braf %1";
1937 else
1939 far = 1;
1940 if (flag_pic)
1942 if (TARGET_SH2)
1943 jump = "mov.l %O0,%1; braf %1";
1944 else
1945 jump = "mov.l r0,@-r15; mova %O0,r0; mov.l @r0,%1; add r0,%1; mov.l @r15+,r0; jmp @%1";
1947 else
1948 jump = "mov.l %O0,%1; jmp @%1";
1950 /* If we have a scratch register available, use it. */
1951 if (GET_CODE ((prev = prev_nonnote_insn (insn))) == INSN
1952 && INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
1954 this.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0));
1955 if (REGNO (this.reg) == R0_REG && flag_pic && ! TARGET_SH2)
1956 jump = "mov.l r1,@-r15; mova %O0,r0; mov.l @r0,r1; add r1,r0; mov.l @r15+,r1; jmp @%1";
1957 output_asm_insn (jump, &this.lab);
1958 if (dbr_sequence_length ())
1959 print_slot (final_sequence);
1960 else
1961 output_asm_insn ("nop", 0);
1963 else
1965 /* Output the delay slot insn first if any. */
1966 if (dbr_sequence_length ())
1967 print_slot (final_sequence);
1969 this.reg = gen_rtx_REG (SImode, 13);
1970 /* We must keep the stack aligned to 8-byte boundaries on SH5.
1971 Fortunately, MACL is fixed and call-clobbered, and we never
1972 need its value across jumps, so save r13 in it instead of in
1973 the stack. */
1974 if (TARGET_SH5)
1975 output_asm_insn ("lds r13, macl", 0);
1976 else
1977 output_asm_insn ("mov.l r13,@-r15", 0);
1978 output_asm_insn (jump, &this.lab);
1979 if (TARGET_SH5)
1980 output_asm_insn ("sts macl, r13", 0);
1981 else
1982 output_asm_insn ("mov.l @r15+,r13", 0);
1984 if (far && flag_pic && TARGET_SH2)
1986 braf_base_lab = gen_label_rtx ();
1987 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1988 CODE_LABEL_NUMBER (braf_base_lab));
1990 if (far)
1991 output_asm_insn (".align 2", 0);
1992 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (this.lab));
1993 this.op = op;
1994 if (far && flag_pic)
1996 if (TARGET_SH2)
1997 this.lab = braf_base_lab;
1998 output_asm_insn (".long %O2-%O0", &this.lab);
2000 else
2001 output_asm_insn (far ? ".long %O2" : ".word %O2-%O0", &this.lab);
2002 return "";
2005 /* Local label counter, used for constants in the pool and inside
2006 pattern branches. */
2008 static int lf = 100;
2010 /* Output code for ordinary branches. */
2012 const char *
2013 output_branch (int logic, rtx insn, rtx *operands)
2015 switch (get_attr_length (insn))
2017 case 6:
2018 /* This can happen if filling the delay slot has caused a forward
2019 branch to exceed its range (we could reverse it, but only
2020 when we know we won't overextend other branches; this should
2021 best be handled by relaxation).
2022 It can also happen when other condbranches hoist delay slot insn
2023 from their destination, thus leading to code size increase.
2024 But the branch will still be in the range -4092..+4098 bytes. */
2026 if (! TARGET_RELAX)
2028 int label = lf++;
2029 /* The call to print_slot will clobber the operands. */
2030 rtx op0 = operands[0];
2032 /* If the instruction in the delay slot is annulled (true), then
2033 there is no delay slot where we can put it now. The only safe
2034 place for it is after the label. final will do that by default. */
2036 if (final_sequence
2037 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
2038 && get_attr_length (XVECEXP (final_sequence, 0, 1)))
2040 asm_fprintf (asm_out_file, "\tb%s%ss\t%LLF%d\n", logic ? "f" : "t",
2041 ASSEMBLER_DIALECT ? "/" : ".", label);
2042 print_slot (final_sequence);
2044 else
2045 asm_fprintf (asm_out_file, "\tb%s\t%LLF%d\n", logic ? "f" : "t", label);
2047 output_asm_insn ("bra\t%l0", &op0);
2048 fprintf (asm_out_file, "\tnop\n");
2049 (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
2051 return "";
2053 /* When relaxing, handle this like a short branch. The linker
2054 will fix it up if it still doesn't fit after relaxation. */
2055 case 2:
2056 return logic ? "bt%.\t%l0" : "bf%.\t%l0";
2058 /* These are for SH2e, in which we have to account for the
2059 extra nop because of the hardware bug in annulled branches. */
2060 case 8:
2061 if (! TARGET_RELAX)
2063 int label = lf++;
2065 gcc_assert (!final_sequence
2066 || !(INSN_ANNULLED_BRANCH_P
2067 (XVECEXP (final_sequence, 0, 0))));
2068 asm_fprintf (asm_out_file, "b%s%ss\t%LLF%d\n",
2069 logic ? "f" : "t",
2070 ASSEMBLER_DIALECT ? "/" : ".", label);
2071 fprintf (asm_out_file, "\tnop\n");
2072 output_asm_insn ("bra\t%l0", operands);
2073 fprintf (asm_out_file, "\tnop\n");
2074 (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
2076 return "";
2078 /* When relaxing, fall through. */
2079 case 4:
2081 char buffer[10];
2083 sprintf (buffer, "b%s%ss\t%%l0",
2084 logic ? "t" : "f",
2085 ASSEMBLER_DIALECT ? "/" : ".");
2086 output_asm_insn (buffer, &operands[0]);
2087 return "nop";
2090 default:
2091 /* There should be no longer branches now - that would
2092 indicate that something has destroyed the branches set
2093 up in machine_dependent_reorg. */
2094 gcc_unreachable ();
2098 /* Output a code sequence for INSN using TEMPLATE with OPERANDS; but before,
2099 fill in operands 9 as a label to the successor insn.
2100 We try to use jump threading where possible.
2101 IF CODE matches the comparison in the IF_THEN_ELSE of a following jump,
2102 we assume the jump is taken. I.e. EQ means follow jmp and bf, NE means
2103 follow jmp and bt, if the address is in range. */
2104 const char *
2105 output_branchy_insn (enum rtx_code code, const char *template,
2106 rtx insn, rtx *operands)
2108 rtx next_insn = NEXT_INSN (insn);
2110 if (next_insn && GET_CODE (next_insn) == JUMP_INSN && condjump_p (next_insn))
2112 rtx src = SET_SRC (PATTERN (next_insn));
2113 if (GET_CODE (src) == IF_THEN_ELSE && GET_CODE (XEXP (src, 0)) != code)
2115 /* Following branch not taken */
2116 operands[9] = gen_label_rtx ();
2117 emit_label_after (operands[9], next_insn);
2118 INSN_ADDRESSES_NEW (operands[9],
2119 INSN_ADDRESSES (INSN_UID (next_insn))
2120 + get_attr_length (next_insn));
2121 return template;
2123 else
2125 int offset = (branch_dest (next_insn)
2126 - INSN_ADDRESSES (INSN_UID (next_insn)) + 4);
2127 if (offset >= -252 && offset <= 258)
2129 if (GET_CODE (src) == IF_THEN_ELSE)
2130 /* branch_true */
2131 src = XEXP (src, 1);
2132 operands[9] = src;
2133 return template;
2137 operands[9] = gen_label_rtx ();
2138 emit_label_after (operands[9], insn);
2139 INSN_ADDRESSES_NEW (operands[9],
2140 INSN_ADDRESSES (INSN_UID (insn))
2141 + get_attr_length (insn));
2142 return template;
2145 const char *
2146 output_ieee_ccmpeq (rtx insn, rtx *operands)
2148 return output_branchy_insn (NE, "bt\t%l9\n\tfcmp/eq\t%1,%0",
2149 insn, operands);
2152 /* Output the start of the assembler file. */
2154 static void
2155 sh_file_start (void)
2157 default_file_start ();
2159 #ifdef SYMBIAN
2160 /* Declare the .directive section before it is used. */
2161 fputs ("\t.section .directive, \"SM\", @progbits, 1\n", asm_out_file);
2162 fputs ("\t.asciz \"#<SYMEDIT>#\\n\"\n", asm_out_file);
2163 #endif
2165 if (TARGET_ELF)
2166 /* We need to show the text section with the proper
2167 attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
2168 emits it without attributes in TEXT_SECTION_ASM_OP, else GAS
2169 will complain. We can teach GAS specifically about the
2170 default attributes for our choice of text section, but
2171 then we would have to change GAS again if/when we change
2172 the text section name. */
2173 fprintf (asm_out_file, "%s\n", TEXT_SECTION_ASM_OP);
2174 else
2175 /* Switch to the data section so that the coffsem symbol
2176 isn't in the text section. */
2177 switch_to_section (data_section);
2179 if (TARGET_LITTLE_ENDIAN)
2180 fputs ("\t.little\n", asm_out_file);
2182 if (!TARGET_ELF)
2184 if (TARGET_SHCOMPACT)
2185 fputs ("\t.mode\tSHcompact\n", asm_out_file);
2186 else if (TARGET_SHMEDIA)
2187 fprintf (asm_out_file, "\t.mode\tSHmedia\n\t.abi\t%i\n",
2188 TARGET_SHMEDIA64 ? 64 : 32);
2192 /* Check if PAT includes UNSPEC_CALLER unspec pattern. */
2194 static bool
2195 unspec_caller_rtx_p (rtx pat)
2197 switch (GET_CODE (pat))
2199 case CONST:
2200 return unspec_caller_rtx_p (XEXP (pat, 0));
2201 case PLUS:
2202 case MINUS:
2203 if (unspec_caller_rtx_p (XEXP (pat, 0)))
2204 return true;
2205 return unspec_caller_rtx_p (XEXP (pat, 1));
2206 case UNSPEC:
2207 if (XINT (pat, 1) == UNSPEC_CALLER)
2208 return true;
2209 default:
2210 break;
2213 return false;
2216 /* Indicate that INSN cannot be duplicated. This is true for insn
2217 that generates a unique label. */
2219 static bool
2220 sh_cannot_copy_insn_p (rtx insn)
2222 rtx pat;
2224 if (!reload_completed || !flag_pic)
2225 return false;
2227 if (GET_CODE (insn) != INSN)
2228 return false;
2229 if (asm_noperands (insn) >= 0)
2230 return false;
2232 pat = PATTERN (insn);
2233 if (GET_CODE (pat) != SET)
2234 return false;
2235 pat = SET_SRC (pat);
2237 if (unspec_caller_rtx_p (pat))
2238 return true;
2240 return false;
2243 /* Actual number of instructions used to make a shift by N. */
2244 static const char ashiftrt_insns[] =
2245 { 0,1,2,3,4,5,8,8,8,8,8,8,8,8,8,8,2,3,4,5,8,8,8,8,8,8,8,8,8,8,8,2};
2247 /* Left shift and logical right shift are the same. */
2248 static const char shift_insns[] =
2249 { 0,1,1,2,2,3,3,4,1,2,2,3,3,4,3,3,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
2251 /* Individual shift amounts needed to get the above length sequences.
2252 One bit right shifts clobber the T bit, so when possible, put one bit
2253 shifts in the middle of the sequence, so the ends are eligible for
2254 branch delay slots. */
2255 static const short shift_amounts[32][5] = {
2256 {0}, {1}, {2}, {2, 1},
2257 {2, 2}, {2, 1, 2}, {2, 2, 2}, {2, 2, 1, 2},
2258 {8}, {8, 1}, {8, 2}, {8, 1, 2},
2259 {8, 2, 2}, {8, 2, 1, 2}, {8, -2, 8}, {8, -1, 8},
2260 {16}, {16, 1}, {16, 2}, {16, 1, 2},
2261 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
2262 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
2263 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
2265 /* Likewise, but for shift amounts < 16, up to three highmost bits
2266 might be clobbered. This is typically used when combined with some
2267 kind of sign or zero extension. */
2269 static const char ext_shift_insns[] =
2270 { 0,1,1,2,2,3,2,2,1,2,2,3,3,3,2,2,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
2272 static const short ext_shift_amounts[32][4] = {
2273 {0}, {1}, {2}, {2, 1},
2274 {2, 2}, {2, 1, 2}, {8, -2}, {8, -1},
2275 {8}, {8, 1}, {8, 2}, {8, 1, 2},
2276 {8, 2, 2}, {16, -2, -1}, {16, -2}, {16, -1},
2277 {16}, {16, 1}, {16, 2}, {16, 1, 2},
2278 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
2279 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
2280 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
2282 /* Assuming we have a value that has been sign-extended by at least one bit,
2283 can we use the ext_shift_amounts with the last shift turned to an arithmetic shift
2284 to shift it by N without data loss, and quicker than by other means? */
2285 #define EXT_SHIFT_SIGNED(n) (((n) | 8) == 15)
2287 /* This is used in length attributes in sh.md to help compute the length
2288 of arbitrary constant shift instructions. */
2291 shift_insns_rtx (rtx insn)
2293 rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2294 int shift_count = INTVAL (XEXP (set_src, 1));
2295 enum rtx_code shift_code = GET_CODE (set_src);
2297 switch (shift_code)
2299 case ASHIFTRT:
2300 return ashiftrt_insns[shift_count];
2301 case LSHIFTRT:
2302 case ASHIFT:
2303 return shift_insns[shift_count];
2304 default:
2305 gcc_unreachable ();
2309 /* Return the cost of a shift. */
2311 static inline int
2312 shiftcosts (rtx x)
2314 int value;
2316 if (TARGET_SHMEDIA)
2317 return 1;
2319 if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
2321 if (GET_MODE (x) == DImode
2322 && GET_CODE (XEXP (x, 1)) == CONST_INT
2323 && INTVAL (XEXP (x, 1)) == 1)
2324 return 2;
2326 /* Everything else is invalid, because there is no pattern for it. */
2327 return MAX_COST;
2329 /* If shift by a non constant, then this will be expensive. */
2330 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2331 return SH_DYNAMIC_SHIFT_COST;
2333 value = INTVAL (XEXP (x, 1));
2335 /* Otherwise, return the true cost in instructions. */
2336 if (GET_CODE (x) == ASHIFTRT)
2338 int cost = ashiftrt_insns[value];
2339 /* If SH3, then we put the constant in a reg and use shad. */
2340 if (cost > 1 + SH_DYNAMIC_SHIFT_COST)
2341 cost = 1 + SH_DYNAMIC_SHIFT_COST;
2342 return cost;
2344 else
2345 return shift_insns[value];
2348 /* Return the cost of an AND operation. */
2350 static inline int
2351 andcosts (rtx x)
2353 int i;
2355 /* Anding with a register is a single cycle and instruction. */
2356 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2357 return 1;
2359 i = INTVAL (XEXP (x, 1));
2361 if (TARGET_SHMEDIA)
2363 if (satisfies_constraint_I10 (XEXP (x, 1))
2364 || satisfies_constraint_J16 (XEXP (x, 1)))
2365 return 1;
2366 else
2367 return 1 + rtx_cost (XEXP (x, 1), AND);
2370 /* These constants are single cycle extu.[bw] instructions. */
2371 if (i == 0xff || i == 0xffff)
2372 return 1;
2373 /* Constants that can be used in an and immediate instruction in a single
2374 cycle, but this requires r0, so make it a little more expensive. */
2375 if (CONST_OK_FOR_K08 (i))
2376 return 2;
2377 /* Constants that can be loaded with a mov immediate and an and.
2378 This case is probably unnecessary. */
2379 if (CONST_OK_FOR_I08 (i))
2380 return 2;
2381 /* Any other constants requires a 2 cycle pc-relative load plus an and.
2382 This case is probably unnecessary. */
2383 return 3;
2386 /* Return the cost of an addition or a subtraction. */
2388 static inline int
2389 addsubcosts (rtx x)
2391 /* Adding a register is a single cycle insn. */
2392 if (GET_CODE (XEXP (x, 1)) == REG
2393 || GET_CODE (XEXP (x, 1)) == SUBREG)
2394 return 1;
2396 /* Likewise for small constants. */
2397 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2398 && CONST_OK_FOR_ADD (INTVAL (XEXP (x, 1))))
2399 return 1;
2401 if (TARGET_SHMEDIA)
2402 switch (GET_CODE (XEXP (x, 1)))
2404 case CONST:
2405 case LABEL_REF:
2406 case SYMBOL_REF:
2407 return TARGET_SHMEDIA64 ? 5 : 3;
2409 case CONST_INT:
2410 if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1))))
2411 return 2;
2412 else if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1)) >> 16))
2413 return 3;
2414 else if (CONST_OK_FOR_I16 ((INTVAL (XEXP (x, 1)) >> 16) >> 16))
2415 return 4;
2417 /* Fall through. */
2418 default:
2419 return 5;
2422 /* Any other constant requires a 2 cycle pc-relative load plus an
2423 addition. */
2424 return 3;
2427 /* Return the cost of a multiply. */
2428 static inline int
2429 multcosts (rtx x ATTRIBUTE_UNUSED)
2431 if (sh_multcost >= 0)
2432 return sh_multcost;
2433 if (TARGET_SHMEDIA)
2434 /* ??? We have a mul insn, but it has a latency of three, and doesn't
2435 accept constants. Ideally, we would use a cost of one or two and
2436 add the cost of the operand, but disregard the latter when inside loops
2437 and loop invariant code motion is still to follow.
2438 Using a multiply first and splitting it later if it's a loss
2439 doesn't work because of different sign / zero extension semantics
2440 of multiplies vs. shifts. */
2441 return TARGET_SMALLCODE ? 2 : 3;
2443 if (TARGET_SH2)
2445 /* We have a mul insn, so we can never take more than the mul and the
2446 read of the mac reg, but count more because of the latency and extra
2447 reg usage. */
2448 if (TARGET_SMALLCODE)
2449 return 2;
2450 return 3;
2453 /* If we're aiming at small code, then just count the number of
2454 insns in a multiply call sequence. */
2455 if (TARGET_SMALLCODE)
2456 return 5;
2458 /* Otherwise count all the insns in the routine we'd be calling too. */
2459 return 20;
2462 /* Compute a (partial) cost for rtx X. Return true if the complete
2463 cost has been computed, and false if subexpressions should be
2464 scanned. In either case, *TOTAL contains the cost result. */
2466 static bool
2467 sh_rtx_costs (rtx x, int code, int outer_code, int *total)
2469 switch (code)
2471 case CONST_INT:
2472 if (TARGET_SHMEDIA)
2474 if (INTVAL (x) == 0)
2475 *total = 0;
2476 else if (outer_code == AND && and_operand ((x), DImode))
2477 *total = 0;
2478 else if ((outer_code == IOR || outer_code == XOR
2479 || outer_code == PLUS)
2480 && CONST_OK_FOR_I10 (INTVAL (x)))
2481 *total = 0;
2482 else if (CONST_OK_FOR_I16 (INTVAL (x)))
2483 *total = COSTS_N_INSNS (outer_code != SET);
2484 else if (CONST_OK_FOR_I16 (INTVAL (x) >> 16))
2485 *total = COSTS_N_INSNS ((outer_code != SET) + 1);
2486 else if (CONST_OK_FOR_I16 ((INTVAL (x) >> 16) >> 16))
2487 *total = COSTS_N_INSNS ((outer_code != SET) + 2);
2488 else
2489 *total = COSTS_N_INSNS ((outer_code != SET) + 3);
2490 return true;
2492 if (CONST_OK_FOR_I08 (INTVAL (x)))
2493 *total = 0;
2494 else if ((outer_code == AND || outer_code == IOR || outer_code == XOR)
2495 && CONST_OK_FOR_K08 (INTVAL (x)))
2496 *total = 1;
2497 /* prepare_cmp_insn will force costly constants int registers before
2498 the cbranch[sd]i4 patterns can see them, so preserve potentially
2499 interesting ones not covered by I08 above. */
2500 else if (outer_code == COMPARE
2501 && ((unsigned HOST_WIDE_INT) INTVAL (x)
2502 == (unsigned HOST_WIDE_INT) 0x7fffffff + 1
2503 || INTVAL (x) == 0x7fffffff
2504 || INTVAL (x) == 0x80 || INTVAL (x) == -0x81))
2505 *total = 1;
2506 else
2507 *total = 8;
2508 return true;
2510 case CONST:
2511 case LABEL_REF:
2512 case SYMBOL_REF:
2513 if (TARGET_SHMEDIA64)
2514 *total = COSTS_N_INSNS (4);
2515 else if (TARGET_SHMEDIA32)
2516 *total = COSTS_N_INSNS (2);
2517 else
2518 *total = 5;
2519 return true;
2521 case CONST_DOUBLE:
2522 if (TARGET_SHMEDIA)
2523 *total = COSTS_N_INSNS (4);
2524 /* prepare_cmp_insn will force costly constants int registers before
2525 the cbranchdi4 pattern can see them, so preserve potentially
2526 interesting ones. */
2527 else if (outer_code == COMPARE && GET_MODE (x) == DImode)
2528 *total = 1;
2529 else
2530 *total = 10;
2531 return true;
2532 case CONST_VECTOR:
2533 if (x == CONST0_RTX (GET_MODE (x)))
2534 *total = 0;
2535 else if (sh_1el_vec (x, VOIDmode))
2536 *total = outer_code != SET;
2537 if (sh_rep_vec (x, VOIDmode))
2538 *total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
2539 + (outer_code != SET));
2540 *total = COSTS_N_INSNS (3) + (outer_code != SET);
2541 return true;
2543 case PLUS:
2544 case MINUS:
2545 *total = COSTS_N_INSNS (addsubcosts (x));
2546 return true;
2548 case AND:
2549 *total = COSTS_N_INSNS (andcosts (x));
2550 return true;
2552 case MULT:
2553 *total = COSTS_N_INSNS (multcosts (x));
2554 return true;
2556 case ASHIFT:
2557 case ASHIFTRT:
2558 case LSHIFTRT:
2559 *total = COSTS_N_INSNS (shiftcosts (x));
2560 return true;
2562 case DIV:
2563 case UDIV:
2564 case MOD:
2565 case UMOD:
2566 *total = COSTS_N_INSNS (20);
2567 return true;
2569 case PARALLEL:
2570 if (sh_1el_vec (x, VOIDmode))
2571 *total = outer_code != SET;
2572 if (sh_rep_vec (x, VOIDmode))
2573 *total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
2574 + (outer_code != SET));
2575 *total = COSTS_N_INSNS (3) + (outer_code != SET);
2576 return true;
2578 case FLOAT:
2579 case FIX:
2580 *total = 100;
2581 return true;
2583 default:
2584 return false;
2588 /* Compute the cost of an address. For the SH, all valid addresses are
2589 the same cost. Use a slightly higher cost for reg + reg addressing,
2590 since it increases pressure on r0. */
2592 static int
2593 sh_address_cost (rtx X)
2595 return (GET_CODE (X) == PLUS
2596 && ! CONSTANT_P (XEXP (X, 1))
2597 && ! TARGET_SHMEDIA ? 1 : 0);
2600 /* Code to expand a shift. */
2602 void
2603 gen_ashift (int type, int n, rtx reg)
2605 /* Negative values here come from the shift_amounts array. */
2606 if (n < 0)
2608 if (type == ASHIFT)
2609 type = LSHIFTRT;
2610 else
2611 type = ASHIFT;
2612 n = -n;
2615 switch (type)
2617 case ASHIFTRT:
2618 emit_insn (gen_ashrsi3_k (reg, reg, GEN_INT (n)));
2619 break;
2620 case LSHIFTRT:
2621 if (n == 1)
2622 emit_insn (gen_lshrsi3_m (reg, reg, GEN_INT (n)));
2623 else
2624 emit_insn (gen_lshrsi3_k (reg, reg, GEN_INT (n)));
2625 break;
2626 case ASHIFT:
2627 emit_insn (gen_ashlsi3_std (reg, reg, GEN_INT (n)));
2628 break;
2632 /* Same for HImode */
2634 void
2635 gen_ashift_hi (int type, int n, rtx reg)
2637 /* Negative values here come from the shift_amounts array. */
2638 if (n < 0)
2640 if (type == ASHIFT)
2641 type = LSHIFTRT;
2642 else
2643 type = ASHIFT;
2644 n = -n;
2647 switch (type)
2649 case ASHIFTRT:
2650 case LSHIFTRT:
2651 /* We don't have HImode right shift operations because using the
2652 ordinary 32 bit shift instructions for that doesn't generate proper
2653 zero/sign extension.
2654 gen_ashift_hi is only called in contexts where we know that the
2655 sign extension works out correctly. */
2657 int offset = 0;
2658 if (GET_CODE (reg) == SUBREG)
2660 offset = SUBREG_BYTE (reg);
2661 reg = SUBREG_REG (reg);
2663 gen_ashift (type, n, gen_rtx_SUBREG (SImode, reg, offset));
2664 break;
2666 case ASHIFT:
2667 emit_insn (gen_ashlhi3_k (reg, reg, GEN_INT (n)));
2668 break;
2672 /* Output RTL to split a constant shift into its component SH constant
2673 shift instructions. */
2675 void
2676 gen_shifty_op (int code, rtx *operands)
2678 int value = INTVAL (operands[2]);
2679 int max, i;
2681 /* Truncate the shift count in case it is out of bounds. */
2682 value = value & 0x1f;
2684 if (value == 31)
2686 if (code == LSHIFTRT)
2688 emit_insn (gen_rotlsi3_1 (operands[0], operands[0]));
2689 emit_insn (gen_movt (operands[0]));
2690 return;
2692 else if (code == ASHIFT)
2694 /* There is a two instruction sequence for 31 bit left shifts,
2695 but it requires r0. */
2696 if (GET_CODE (operands[0]) == REG && REGNO (operands[0]) == 0)
2698 emit_insn (gen_andsi3 (operands[0], operands[0], const1_rtx));
2699 emit_insn (gen_rotlsi3_31 (operands[0], operands[0]));
2700 return;
2704 else if (value == 0)
2706 /* This can happen even when optimizing, if there were subregs before
2707 reload. Don't output a nop here, as this is never optimized away;
2708 use a no-op move instead. */
2709 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[0]));
2710 return;
2713 max = shift_insns[value];
2714 for (i = 0; i < max; i++)
2715 gen_ashift (code, shift_amounts[value][i], operands[0]);
2718 /* Same as above, but optimized for values where the topmost bits don't
2719 matter. */
2721 void
2722 gen_shifty_hi_op (int code, rtx *operands)
2724 int value = INTVAL (operands[2]);
2725 int max, i;
2726 void (*gen_fun) (int, int, rtx);
2728 /* This operation is used by and_shl for SImode values with a few
2729 high bits known to be cleared. */
2730 value &= 31;
2731 if (value == 0)
2733 emit_insn (gen_nop ());
2734 return;
2737 gen_fun = GET_MODE (operands[0]) == HImode ? gen_ashift_hi : gen_ashift;
2738 if (code == ASHIFT)
2740 max = ext_shift_insns[value];
2741 for (i = 0; i < max; i++)
2742 gen_fun (code, ext_shift_amounts[value][i], operands[0]);
2744 else
2745 /* When shifting right, emit the shifts in reverse order, so that
2746 solitary negative values come first. */
2747 for (i = ext_shift_insns[value] - 1; i >= 0; i--)
2748 gen_fun (code, ext_shift_amounts[value][i], operands[0]);
2751 /* Output RTL for an arithmetic right shift. */
2753 /* ??? Rewrite to use super-optimizer sequences. */
2756 expand_ashiftrt (rtx *operands)
2758 rtx wrk;
2759 char func[18];
2760 int value;
2762 if (TARGET_SH3)
2764 if (GET_CODE (operands[2]) != CONST_INT)
2766 rtx count = copy_to_mode_reg (SImode, operands[2]);
2767 emit_insn (gen_negsi2 (count, count));
2768 emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
2769 return 1;
2771 else if (ashiftrt_insns[INTVAL (operands[2]) & 31]
2772 > 1 + SH_DYNAMIC_SHIFT_COST)
2774 rtx count
2775 = force_reg (SImode, GEN_INT (- (INTVAL (operands[2]) & 31)));
2776 emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
2777 return 1;
2780 if (GET_CODE (operands[2]) != CONST_INT)
2781 return 0;
2783 value = INTVAL (operands[2]) & 31;
2785 if (value == 31)
2787 /* If we are called from abs expansion, arrange things so that we
2788 we can use a single MT instruction that doesn't clobber the source,
2789 if LICM can hoist out the load of the constant zero. */
2790 if (currently_expanding_to_rtl)
2792 emit_insn (gen_cmpgtsi_t (force_reg (SImode, CONST0_RTX (SImode)),
2793 operands[1]));
2794 emit_insn (gen_mov_neg_si_t (operands[0]));
2795 return 1;
2797 emit_insn (gen_ashrsi2_31 (operands[0], operands[1]));
2798 return 1;
2800 else if (value >= 16 && value <= 19)
2802 wrk = gen_reg_rtx (SImode);
2803 emit_insn (gen_ashrsi2_16 (wrk, operands[1]));
2804 value -= 16;
2805 while (value--)
2806 gen_ashift (ASHIFTRT, 1, wrk);
2807 emit_move_insn (operands[0], wrk);
2808 return 1;
2810 /* Expand a short sequence inline, longer call a magic routine. */
2811 else if (value <= 5)
2813 wrk = gen_reg_rtx (SImode);
2814 emit_move_insn (wrk, operands[1]);
2815 while (value--)
2816 gen_ashift (ASHIFTRT, 1, wrk);
2817 emit_move_insn (operands[0], wrk);
2818 return 1;
2821 wrk = gen_reg_rtx (Pmode);
2823 /* Load the value into an arg reg and call a helper. */
2824 emit_move_insn (gen_rtx_REG (SImode, 4), operands[1]);
2825 sprintf (func, "__ashiftrt_r4_%d", value);
2826 function_symbol (wrk, func, SFUNC_STATIC);
2827 emit_insn (gen_ashrsi3_n (GEN_INT (value), wrk));
2828 emit_move_insn (operands[0], gen_rtx_REG (SImode, 4));
2829 return 1;
2833 sh_dynamicalize_shift_p (rtx count)
2835 return shift_insns[INTVAL (count)] > 1 + SH_DYNAMIC_SHIFT_COST;
2838 /* Try to find a good way to implement the combiner pattern
2839 [(set (match_operand:SI 0 "register_operand" "r")
2840 (and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
2841 (match_operand:SI 2 "const_int_operand" "n"))
2842 (match_operand:SI 3 "const_int_operand" "n"))) .
2843 LEFT_RTX is operand 2 in the above pattern, and MASK_RTX is operand 3.
2844 return 0 for simple right / left or left/right shift combination.
2845 return 1 for a combination of shifts with zero_extend.
2846 return 2 for a combination of shifts with an AND that needs r0.
2847 return 3 for a combination of shifts with an AND that needs an extra
2848 scratch register, when the three highmost bits of the AND mask are clear.
2849 return 4 for a combination of shifts with an AND that needs an extra
2850 scratch register, when any of the three highmost bits of the AND mask
2851 is set.
2852 If ATTRP is set, store an initial right shift width in ATTRP[0],
2853 and the instruction length in ATTRP[1] . These values are not valid
2854 when returning 0.
2855 When ATTRP is set and returning 1, ATTRP[2] gets set to the index into
2856 shift_amounts for the last shift value that is to be used before the
2857 sign extend. */
2859 shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
2861 unsigned HOST_WIDE_INT mask, lsb, mask2, lsb2;
2862 int left = INTVAL (left_rtx), right;
2863 int best = 0;
2864 int cost, best_cost = 10000;
2865 int best_right = 0, best_len = 0;
2866 int i;
2867 int can_ext;
2869 if (left < 0 || left > 31)
2870 return 0;
2871 if (GET_CODE (mask_rtx) == CONST_INT)
2872 mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> left;
2873 else
2874 mask = (unsigned HOST_WIDE_INT) GET_MODE_MASK (SImode) >> left;
2875 /* Can this be expressed as a right shift / left shift pair? */
2876 lsb = ((mask ^ (mask - 1)) >> 1) + 1;
2877 right = exact_log2 (lsb);
2878 mask2 = ~(mask + lsb - 1);
2879 lsb2 = ((mask2 ^ (mask2 - 1)) >> 1) + 1;
2880 /* mask has no zeroes but trailing zeroes <==> ! mask2 */
2881 if (! mask2)
2882 best_cost = shift_insns[right] + shift_insns[right + left];
2883 /* mask has no trailing zeroes <==> ! right */
2884 else if (! right && mask2 == ~(lsb2 - 1))
2886 int late_right = exact_log2 (lsb2);
2887 best_cost = shift_insns[left + late_right] + shift_insns[late_right];
2889 /* Try to use zero extend. */
2890 if (mask2 == ~(lsb2 - 1))
2892 int width, first;
2894 for (width = 8; width <= 16; width += 8)
2896 /* Can we zero-extend right away? */
2897 if (lsb2 == (unsigned HOST_WIDE_INT) 1 << width)
2899 cost
2900 = 1 + ext_shift_insns[right] + ext_shift_insns[left + right];
2901 if (cost < best_cost)
2903 best = 1;
2904 best_cost = cost;
2905 best_right = right;
2906 best_len = cost;
2907 if (attrp)
2908 attrp[2] = -1;
2910 continue;
2912 /* ??? Could try to put zero extend into initial right shift,
2913 or even shift a bit left before the right shift. */
2914 /* Determine value of first part of left shift, to get to the
2915 zero extend cut-off point. */
2916 first = width - exact_log2 (lsb2) + right;
2917 if (first >= 0 && right + left - first >= 0)
2919 cost = ext_shift_insns[right] + ext_shift_insns[first] + 1
2920 + ext_shift_insns[right + left - first];
2921 if (cost < best_cost)
2923 best = 1;
2924 best_cost = cost;
2925 best_right = right;
2926 best_len = cost;
2927 if (attrp)
2928 attrp[2] = first;
2933 /* Try to use r0 AND pattern */
2934 for (i = 0; i <= 2; i++)
2936 if (i > right)
2937 break;
2938 if (! CONST_OK_FOR_K08 (mask >> i))
2939 continue;
2940 cost = (i != 0) + 2 + ext_shift_insns[left + i];
2941 if (cost < best_cost)
2943 best = 2;
2944 best_cost = cost;
2945 best_right = i;
2946 best_len = cost - 1;
2949 /* Try to use a scratch register to hold the AND operand. */
2950 can_ext = ((mask << left) & ((unsigned HOST_WIDE_INT) 3 << 30)) == 0;
2951 for (i = 0; i <= 2; i++)
2953 if (i > right)
2954 break;
2955 cost = (i != 0) + (CONST_OK_FOR_I08 (mask >> i) ? 2 : 3)
2956 + (can_ext ? ext_shift_insns : shift_insns)[left + i];
2957 if (cost < best_cost)
2959 best = 4 - can_ext;
2960 best_cost = cost;
2961 best_right = i;
2962 best_len = cost - 1 - ! CONST_OK_FOR_I08 (mask >> i);
2966 if (attrp)
2968 attrp[0] = best_right;
2969 attrp[1] = best_len;
2971 return best;
2974 /* This is used in length attributes of the unnamed instructions
2975 corresponding to shl_and_kind return values of 1 and 2. */
2977 shl_and_length (rtx insn)
2979 rtx set_src, left_rtx, mask_rtx;
2980 int attributes[3];
2982 set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2983 left_rtx = XEXP (XEXP (set_src, 0), 1);
2984 mask_rtx = XEXP (set_src, 1);
2985 shl_and_kind (left_rtx, mask_rtx, attributes);
2986 return attributes[1];
2989 /* This is used in length attribute of the and_shl_scratch instruction. */
2992 shl_and_scr_length (rtx insn)
2994 rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2995 int len = shift_insns[INTVAL (XEXP (set_src, 1))];
2996 rtx op = XEXP (set_src, 0);
2997 len += shift_insns[INTVAL (XEXP (op, 1))] + 1;
2998 op = XEXP (XEXP (op, 0), 0);
2999 return len + shift_insns[INTVAL (XEXP (op, 1))];
3002 /* Generate rtl for instructions for which shl_and_kind advised a particular
3003 method of generating them, i.e. returned zero. */
3006 gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
3008 int attributes[3];
3009 unsigned HOST_WIDE_INT mask;
3010 int kind = shl_and_kind (left_rtx, mask_rtx, attributes);
3011 int right, total_shift;
3012 void (*shift_gen_fun) (int, rtx *) = gen_shifty_hi_op;
3014 right = attributes[0];
3015 total_shift = INTVAL (left_rtx) + right;
3016 mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> total_shift;
3017 switch (kind)
3019 default:
3020 return -1;
3021 case 1:
3023 int first = attributes[2];
3024 rtx operands[3];
3026 if (first < 0)
3028 emit_insn ((mask << right) <= 0xff
3029 ? gen_zero_extendqisi2 (dest,
3030 gen_lowpart (QImode, source))
3031 : gen_zero_extendhisi2 (dest,
3032 gen_lowpart (HImode, source)));
3033 source = dest;
3035 if (source != dest)
3036 emit_insn (gen_movsi (dest, source));
3037 operands[0] = dest;
3038 if (right)
3040 operands[2] = GEN_INT (right);
3041 gen_shifty_hi_op (LSHIFTRT, operands);
3043 if (first > 0)
3045 operands[2] = GEN_INT (first);
3046 gen_shifty_hi_op (ASHIFT, operands);
3047 total_shift -= first;
3048 mask <<= first;
3050 if (first >= 0)
3051 emit_insn (mask <= 0xff
3052 ? gen_zero_extendqisi2 (dest, gen_lowpart (QImode, dest))
3053 : gen_zero_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3054 if (total_shift > 0)
3056 operands[2] = GEN_INT (total_shift);
3057 gen_shifty_hi_op (ASHIFT, operands);
3059 break;
3061 case 4:
3062 shift_gen_fun = gen_shifty_op;
3063 case 3:
3064 /* If the topmost bit that matters is set, set the topmost bits
3065 that don't matter. This way, we might be able to get a shorter
3066 signed constant. */
3067 if (mask & ((HOST_WIDE_INT) 1 << (31 - total_shift)))
3068 mask |= (HOST_WIDE_INT) ~0 << (31 - total_shift);
3069 case 2:
3070 /* Don't expand fine-grained when combining, because that will
3071 make the pattern fail. */
3072 if (currently_expanding_to_rtl
3073 || reload_in_progress || reload_completed)
3075 rtx operands[3];
3077 /* Cases 3 and 4 should be handled by this split
3078 only while combining */
3079 gcc_assert (kind <= 2);
3080 if (right)
3082 emit_insn (gen_lshrsi3 (dest, source, GEN_INT (right)));
3083 source = dest;
3085 emit_insn (gen_andsi3 (dest, source, GEN_INT (mask)));
3086 if (total_shift)
3088 operands[0] = dest;
3089 operands[1] = dest;
3090 operands[2] = GEN_INT (total_shift);
3091 shift_gen_fun (ASHIFT, operands);
3093 break;
3095 else
3097 int neg = 0;
3098 if (kind != 4 && total_shift < 16)
3100 neg = -ext_shift_amounts[total_shift][1];
3101 if (neg > 0)
3102 neg -= ext_shift_amounts[total_shift][2];
3103 else
3104 neg = 0;
3106 emit_insn (gen_and_shl_scratch (dest, source,
3107 GEN_INT (right),
3108 GEN_INT (mask),
3109 GEN_INT (total_shift + neg),
3110 GEN_INT (neg)));
3111 emit_insn (gen_movsi (dest, dest));
3112 break;
3115 return 0;
3118 /* Try to find a good way to implement the combiner pattern
3119 [(set (match_operand:SI 0 "register_operand" "=r")
3120 (sign_extract:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
3121 (match_operand:SI 2 "const_int_operand" "n")
3122 (match_operand:SI 3 "const_int_operand" "n")
3123 (const_int 0)))
3124 (clobber (reg:SI T_REG))]
3125 LEFT_RTX is operand 2 in the above pattern, and SIZE_RTX is operand 3.
3126 return 0 for simple left / right shift combination.
3127 return 1 for left shift / 8 bit sign extend / left shift.
3128 return 2 for left shift / 16 bit sign extend / left shift.
3129 return 3 for left shift / 8 bit sign extend / shift / sign extend.
3130 return 4 for left shift / 16 bit sign extend / shift / sign extend.
3131 return 5 for left shift / 16 bit sign extend / right shift
3132 return 6 for < 8 bit sign extend / left shift.
3133 return 7 for < 8 bit sign extend / left shift / single right shift.
3134 If COSTP is nonzero, assign the calculated cost to *COSTP. */
3137 shl_sext_kind (rtx left_rtx, rtx size_rtx, int *costp)
3139 int left, size, insize, ext;
3140 int cost = 0, best_cost;
3141 int kind;
3143 left = INTVAL (left_rtx);
3144 size = INTVAL (size_rtx);
3145 insize = size - left;
3146 gcc_assert (insize > 0);
3147 /* Default to left / right shift. */
3148 kind = 0;
3149 best_cost = shift_insns[32 - insize] + ashiftrt_insns[32 - size];
3150 if (size <= 16)
3152 /* 16 bit shift / sign extend / 16 bit shift */
3153 cost = shift_insns[16 - insize] + 1 + ashiftrt_insns[16 - size];
3154 /* If ashiftrt_insns[16 - size] is 8, this choice will be overridden
3155 below, by alternative 3 or something even better. */
3156 if (cost < best_cost)
3158 kind = 5;
3159 best_cost = cost;
3162 /* Try a plain sign extend between two shifts. */
3163 for (ext = 16; ext >= insize; ext -= 8)
3165 if (ext <= size)
3167 cost = ext_shift_insns[ext - insize] + 1 + shift_insns[size - ext];
3168 if (cost < best_cost)
3170 kind = ext / (unsigned) 8;
3171 best_cost = cost;
3174 /* Check if we can do a sloppy shift with a final signed shift
3175 restoring the sign. */
3176 if (EXT_SHIFT_SIGNED (size - ext))
3177 cost = ext_shift_insns[ext - insize] + ext_shift_insns[size - ext] + 1;
3178 /* If not, maybe it's still cheaper to do the second shift sloppy,
3179 and do a final sign extend? */
3180 else if (size <= 16)
3181 cost = ext_shift_insns[ext - insize] + 1
3182 + ext_shift_insns[size > ext ? size - ext : ext - size] + 1;
3183 else
3184 continue;
3185 if (cost < best_cost)
3187 kind = ext / (unsigned) 8 + 2;
3188 best_cost = cost;
3191 /* Check if we can sign extend in r0 */
3192 if (insize < 8)
3194 cost = 3 + shift_insns[left];
3195 if (cost < best_cost)
3197 kind = 6;
3198 best_cost = cost;
3200 /* Try the same with a final signed shift. */
3201 if (left < 31)
3203 cost = 3 + ext_shift_insns[left + 1] + 1;
3204 if (cost < best_cost)
3206 kind = 7;
3207 best_cost = cost;
3211 if (TARGET_SH3)
3213 /* Try to use a dynamic shift. */
3214 cost = shift_insns[32 - insize] + 1 + SH_DYNAMIC_SHIFT_COST;
3215 if (cost < best_cost)
3217 kind = 0;
3218 best_cost = cost;
3221 if (costp)
3222 *costp = cost;
3223 return kind;
3226 /* Function to be used in the length attribute of the instructions
3227 implementing this pattern. */
3230 shl_sext_length (rtx insn)
3232 rtx set_src, left_rtx, size_rtx;
3233 int cost;
3235 set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
3236 left_rtx = XEXP (XEXP (set_src, 0), 1);
3237 size_rtx = XEXP (set_src, 1);
3238 shl_sext_kind (left_rtx, size_rtx, &cost);
3239 return cost;
3242 /* Generate rtl for this pattern */
3245 gen_shl_sext (rtx dest, rtx left_rtx, rtx size_rtx, rtx source)
3247 int kind;
3248 int left, size, insize, cost;
3249 rtx operands[3];
3251 kind = shl_sext_kind (left_rtx, size_rtx, &cost);
3252 left = INTVAL (left_rtx);
3253 size = INTVAL (size_rtx);
3254 insize = size - left;
3255 switch (kind)
3257 case 1:
3258 case 2:
3259 case 3:
3260 case 4:
3262 int ext = kind & 1 ? 8 : 16;
3263 int shift2 = size - ext;
3265 /* Don't expand fine-grained when combining, because that will
3266 make the pattern fail. */
3267 if (! currently_expanding_to_rtl
3268 && ! reload_in_progress && ! reload_completed)
3270 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3271 emit_insn (gen_movsi (dest, source));
3272 break;
3274 if (dest != source)
3275 emit_insn (gen_movsi (dest, source));
3276 operands[0] = dest;
3277 if (ext - insize)
3279 operands[2] = GEN_INT (ext - insize);
3280 gen_shifty_hi_op (ASHIFT, operands);
3282 emit_insn (kind & 1
3283 ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
3284 : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3285 if (kind <= 2)
3287 if (shift2)
3289 operands[2] = GEN_INT (shift2);
3290 gen_shifty_op (ASHIFT, operands);
3293 else
3295 if (shift2 > 0)
3297 if (EXT_SHIFT_SIGNED (shift2))
3299 operands[2] = GEN_INT (shift2 + 1);
3300 gen_shifty_op (ASHIFT, operands);
3301 operands[2] = const1_rtx;
3302 gen_shifty_op (ASHIFTRT, operands);
3303 break;
3305 operands[2] = GEN_INT (shift2);
3306 gen_shifty_hi_op (ASHIFT, operands);
3308 else if (shift2)
3310 operands[2] = GEN_INT (-shift2);
3311 gen_shifty_hi_op (LSHIFTRT, operands);
3313 emit_insn (size <= 8
3314 ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
3315 : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3317 break;
3319 case 5:
3321 int i = 16 - size;
3322 if (! currently_expanding_to_rtl
3323 && ! reload_in_progress && ! reload_completed)
3324 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3325 else
3327 operands[0] = dest;
3328 operands[2] = GEN_INT (16 - insize);
3329 gen_shifty_hi_op (ASHIFT, operands);
3330 emit_insn (gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3332 /* Don't use gen_ashrsi3 because it generates new pseudos. */
3333 while (--i >= 0)
3334 gen_ashift (ASHIFTRT, 1, dest);
3335 break;
3337 case 6:
3338 case 7:
3339 /* Don't expand fine-grained when combining, because that will
3340 make the pattern fail. */
3341 if (! currently_expanding_to_rtl
3342 && ! reload_in_progress && ! reload_completed)
3344 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3345 emit_insn (gen_movsi (dest, source));
3346 break;
3348 emit_insn (gen_andsi3 (dest, source, GEN_INT ((1 << insize) - 1)));
3349 emit_insn (gen_xorsi3 (dest, dest, GEN_INT (1 << (insize - 1))));
3350 emit_insn (gen_addsi3 (dest, dest, GEN_INT (-1 << (insize - 1))));
3351 operands[0] = dest;
3352 operands[2] = kind == 7 ? GEN_INT (left + 1) : left_rtx;
3353 gen_shifty_op (ASHIFT, operands);
3354 if (kind == 7)
3355 emit_insn (gen_ashrsi3_k (dest, dest, const1_rtx));
3356 break;
3357 default:
3358 return -1;
3360 return 0;
3363 /* Prefix a symbol_ref name with "datalabel". */
3366 gen_datalabel_ref (rtx sym)
3368 const char *str;
3370 if (GET_CODE (sym) == LABEL_REF)
3371 return gen_rtx_CONST (GET_MODE (sym),
3372 gen_rtx_UNSPEC (GET_MODE (sym),
3373 gen_rtvec (1, sym),
3374 UNSPEC_DATALABEL));
3376 gcc_assert (GET_CODE (sym) == SYMBOL_REF);
3378 str = XSTR (sym, 0);
3379 /* Share all SYMBOL_REF strings with the same value - that is important
3380 for cse. */
3381 str = IDENTIFIER_POINTER (get_identifier (str));
3382 XSTR (sym, 0) = str;
3384 return sym;
3388 static alloc_pool label_ref_list_pool;
3390 typedef struct label_ref_list_d
3392 rtx label;
3393 struct label_ref_list_d *next;
3394 } *label_ref_list_t;
3396 /* The SH cannot load a large constant into a register, constants have to
3397 come from a pc relative load. The reference of a pc relative load
3398 instruction must be less than 1k in front of the instruction. This
3399 means that we often have to dump a constant inside a function, and
3400 generate code to branch around it.
3402 It is important to minimize this, since the branches will slow things
3403 down and make things bigger.
3405 Worst case code looks like:
3407 mov.l L1,rn
3408 bra L2
3410 align
3411 L1: .long value
3415 mov.l L3,rn
3416 bra L4
3418 align
3419 L3: .long value
3423 We fix this by performing a scan before scheduling, which notices which
3424 instructions need to have their operands fetched from the constant table
3425 and builds the table.
3427 The algorithm is:
3429 scan, find an instruction which needs a pcrel move. Look forward, find the
3430 last barrier which is within MAX_COUNT bytes of the requirement.
3431 If there isn't one, make one. Process all the instructions between
3432 the find and the barrier.
3434 In the above example, we can tell that L3 is within 1k of L1, so
3435 the first move can be shrunk from the 3 insn+constant sequence into
3436 just 1 insn, and the constant moved to L3 to make:
3438 mov.l L1,rn
3440 mov.l L3,rn
3441 bra L4
3443 align
3444 L3:.long value
3445 L4:.long value
3447 Then the second move becomes the target for the shortening process. */
3449 typedef struct
3451 rtx value; /* Value in table. */
3452 rtx label; /* Label of value. */
3453 label_ref_list_t wend; /* End of window. */
3454 enum machine_mode mode; /* Mode of value. */
3456 /* True if this constant is accessed as part of a post-increment
3457 sequence. Note that HImode constants are never accessed in this way. */
3458 bool part_of_sequence_p;
3459 } pool_node;
3461 /* The maximum number of constants that can fit into one pool, since
3462 constants in the range 0..510 are at least 2 bytes long, and in the
3463 range from there to 1018 at least 4 bytes. */
3465 #define MAX_POOL_SIZE 372
3466 static pool_node pool_vector[MAX_POOL_SIZE];
3467 static int pool_size;
3468 static rtx pool_window_label;
3469 static int pool_window_last;
3471 static int max_labelno_before_reorg;
3473 /* ??? If we need a constant in HImode which is the truncated value of a
3474 constant we need in SImode, we could combine the two entries thus saving
3475 two bytes. Is this common enough to be worth the effort of implementing
3476 it? */
3478 /* ??? This stuff should be done at the same time that we shorten branches.
3479 As it is now, we must assume that all branches are the maximum size, and
3480 this causes us to almost always output constant pools sooner than
3481 necessary. */
3483 /* Add a constant to the pool and return its label. */
3485 static rtx
3486 add_constant (rtx x, enum machine_mode mode, rtx last_value)
3488 int i;
3489 rtx lab, new;
3490 label_ref_list_t ref, newref;
3492 /* First see if we've already got it. */
3493 for (i = 0; i < pool_size; i++)
3495 if (x->code == pool_vector[i].value->code
3496 && mode == pool_vector[i].mode)
3498 if (x->code == CODE_LABEL)
3500 if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
3501 continue;
3503 if (rtx_equal_p (x, pool_vector[i].value))
3505 lab = new = 0;
3506 if (! last_value
3507 || ! i
3508 || ! rtx_equal_p (last_value, pool_vector[i-1].value))
3510 new = gen_label_rtx ();
3511 LABEL_REFS (new) = pool_vector[i].label;
3512 pool_vector[i].label = lab = new;
3514 if (lab && pool_window_label)
3516 newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
3517 newref->label = pool_window_label;
3518 ref = pool_vector[pool_window_last].wend;
3519 newref->next = ref;
3520 pool_vector[pool_window_last].wend = newref;
3522 if (new)
3523 pool_window_label = new;
3524 pool_window_last = i;
3525 return lab;
3530 /* Need a new one. */
3531 pool_vector[pool_size].value = x;
3532 if (last_value && rtx_equal_p (last_value, pool_vector[pool_size - 1].value))
3534 lab = 0;
3535 pool_vector[pool_size - 1].part_of_sequence_p = true;
3537 else
3538 lab = gen_label_rtx ();
3539 pool_vector[pool_size].mode = mode;
3540 pool_vector[pool_size].label = lab;
3541 pool_vector[pool_size].wend = NULL;
3542 pool_vector[pool_size].part_of_sequence_p = (lab == 0);
3543 if (lab && pool_window_label)
3545 newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
3546 newref->label = pool_window_label;
3547 ref = pool_vector[pool_window_last].wend;
3548 newref->next = ref;
3549 pool_vector[pool_window_last].wend = newref;
3551 if (lab)
3552 pool_window_label = lab;
3553 pool_window_last = pool_size;
3554 pool_size++;
3555 return lab;
3558 /* Output the literal table. START, if nonzero, is the first instruction
3559 this table is needed for, and also indicates that there is at least one
3560 casesi_worker_2 instruction; We have to emit the operand3 labels from
3561 these insns at a 4-byte aligned position. BARRIER is the barrier
3562 after which we are to place the table. */
3564 static void
3565 dump_table (rtx start, rtx barrier)
3567 rtx scan = barrier;
3568 int i;
3569 int need_align = 1;
3570 rtx lab;
3571 label_ref_list_t ref;
3572 int have_df = 0;
3574 /* Do two passes, first time dump out the HI sized constants. */
3576 for (i = 0; i < pool_size; i++)
3578 pool_node *p = &pool_vector[i];
3580 if (p->mode == HImode)
3582 if (need_align)
3584 scan = emit_insn_after (gen_align_2 (), scan);
3585 need_align = 0;
3587 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3588 scan = emit_label_after (lab, scan);
3589 scan = emit_insn_after (gen_consttable_2 (p->value, const0_rtx),
3590 scan);
3591 for (ref = p->wend; ref; ref = ref->next)
3593 lab = ref->label;
3594 scan = emit_insn_after (gen_consttable_window_end (lab), scan);
3597 else if (p->mode == DFmode)
3598 have_df = 1;
3601 need_align = 1;
3603 if (start)
3605 scan = emit_insn_after (gen_align_4 (), scan);
3606 need_align = 0;
3607 for (; start != barrier; start = NEXT_INSN (start))
3608 if (GET_CODE (start) == INSN
3609 && recog_memoized (start) == CODE_FOR_casesi_worker_2)
3611 rtx src = SET_SRC (XVECEXP (PATTERN (start), 0, 0));
3612 rtx lab = XEXP (XVECEXP (src, 0, 3), 0);
3614 scan = emit_label_after (lab, scan);
3617 if (TARGET_FMOVD && TARGET_ALIGN_DOUBLE && have_df)
3619 rtx align_insn = NULL_RTX;
3621 scan = emit_label_after (gen_label_rtx (), scan);
3622 scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
3623 need_align = 0;
3625 for (i = 0; i < pool_size; i++)
3627 pool_node *p = &pool_vector[i];
3629 switch (p->mode)
3631 case HImode:
3632 break;
3633 case SImode:
3634 case SFmode:
3635 if (align_insn && !p->part_of_sequence_p)
3637 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3638 emit_label_before (lab, align_insn);
3639 emit_insn_before (gen_consttable_4 (p->value, const0_rtx),
3640 align_insn);
3641 for (ref = p->wend; ref; ref = ref->next)
3643 lab = ref->label;
3644 emit_insn_before (gen_consttable_window_end (lab),
3645 align_insn);
3647 delete_insn (align_insn);
3648 align_insn = NULL_RTX;
3649 continue;
3651 else
3653 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3654 scan = emit_label_after (lab, scan);
3655 scan = emit_insn_after (gen_consttable_4 (p->value,
3656 const0_rtx), scan);
3657 need_align = ! need_align;
3659 break;
3660 case DFmode:
3661 if (need_align)
3663 scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
3664 align_insn = scan;
3665 need_align = 0;
3667 case DImode:
3668 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3669 scan = emit_label_after (lab, scan);
3670 scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
3671 scan);
3672 break;
3673 default:
3674 gcc_unreachable ();
3677 if (p->mode != HImode)
3679 for (ref = p->wend; ref; ref = ref->next)
3681 lab = ref->label;
3682 scan = emit_insn_after (gen_consttable_window_end (lab),
3683 scan);
3688 pool_size = 0;
3691 for (i = 0; i < pool_size; i++)
3693 pool_node *p = &pool_vector[i];
3695 switch (p->mode)
3697 case HImode:
3698 break;
3699 case SImode:
3700 case SFmode:
3701 if (need_align)
3703 need_align = 0;
3704 scan = emit_label_after (gen_label_rtx (), scan);
3705 scan = emit_insn_after (gen_align_4 (), scan);
3707 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3708 scan = emit_label_after (lab, scan);
3709 scan = emit_insn_after (gen_consttable_4 (p->value, const0_rtx),
3710 scan);
3711 break;
3712 case DFmode:
3713 case DImode:
3714 if (need_align)
3716 need_align = 0;
3717 scan = emit_label_after (gen_label_rtx (), scan);
3718 scan = emit_insn_after (gen_align_4 (), scan);
3720 for (lab = p->label; lab; lab = LABEL_REFS (lab))
3721 scan = emit_label_after (lab, scan);
3722 scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
3723 scan);
3724 break;
3725 default:
3726 gcc_unreachable ();
3729 if (p->mode != HImode)
3731 for (ref = p->wend; ref; ref = ref->next)
3733 lab = ref->label;
3734 scan = emit_insn_after (gen_consttable_window_end (lab), scan);
3739 scan = emit_insn_after (gen_consttable_end (), scan);
3740 scan = emit_barrier_after (scan);
3741 pool_size = 0;
3742 pool_window_label = NULL_RTX;
3743 pool_window_last = 0;
3746 /* Return nonzero if constant would be an ok source for a
3747 mov.w instead of a mov.l. */
3749 static int
3750 hi_const (rtx src)
3752 return (GET_CODE (src) == CONST_INT
3753 && INTVAL (src) >= -32768
3754 && INTVAL (src) <= 32767);
3757 #define MOVA_LABELREF(mova) XVECEXP (SET_SRC (PATTERN (mova)), 0, 0)
3759 /* Nonzero if the insn is a move instruction which needs to be fixed. */
3761 /* ??? For a DImode/DFmode moves, we don't need to fix it if each half of the
3762 CONST_DOUBLE input value is CONST_OK_FOR_I08. For a SFmode move, we don't
3763 need to fix it if the input value is CONST_OK_FOR_I08. */
3765 static int
3766 broken_move (rtx insn)
3768 if (GET_CODE (insn) == INSN)
3770 rtx pat = PATTERN (insn);
3771 if (GET_CODE (pat) == PARALLEL)
3772 pat = XVECEXP (pat, 0, 0);
3773 if (GET_CODE (pat) == SET
3774 /* We can load any 8-bit value if we don't care what the high
3775 order bits end up as. */
3776 && GET_MODE (SET_DEST (pat)) != QImode
3777 && (CONSTANT_P (SET_SRC (pat))
3778 /* Match mova_const. */
3779 || (GET_CODE (SET_SRC (pat)) == UNSPEC
3780 && XINT (SET_SRC (pat), 1) == UNSPEC_MOVA
3781 && GET_CODE (XVECEXP (SET_SRC (pat), 0, 0)) == CONST))
3782 && ! (TARGET_SH2E
3783 && GET_CODE (SET_SRC (pat)) == CONST_DOUBLE
3784 && (fp_zero_operand (SET_SRC (pat))
3785 || fp_one_operand (SET_SRC (pat)))
3786 /* ??? If this is a -m4 or -m4-single compilation, in general
3787 we don't know the current setting of fpscr, so disable fldi.
3788 There is an exception if this was a register-register move
3789 before reload - and hence it was ascertained that we have
3790 single precision setting - and in a post-reload optimization
3791 we changed this to do a constant load. In that case
3792 we don't have an r0 clobber, hence we must use fldi. */
3793 && (! TARGET_SH4 || TARGET_FMOVD
3794 || (GET_CODE (XEXP (XVECEXP (PATTERN (insn), 0, 2), 0))
3795 == SCRATCH))
3796 && GET_CODE (SET_DEST (pat)) == REG
3797 && FP_REGISTER_P (REGNO (SET_DEST (pat))))
3798 && ! (TARGET_SH2A
3799 && GET_MODE (SET_DEST (pat)) == SImode
3800 && (satisfies_constraint_I20 (SET_SRC (pat))
3801 || satisfies_constraint_I28 (SET_SRC (pat))))
3802 && ! satisfies_constraint_I08 (SET_SRC (pat)))
3803 return 1;
3806 return 0;
3809 static int
3810 mova_p (rtx insn)
3812 return (GET_CODE (insn) == INSN
3813 && GET_CODE (PATTERN (insn)) == SET
3814 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
3815 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_MOVA
3816 /* Don't match mova_const. */
3817 && GET_CODE (MOVA_LABELREF (insn)) == LABEL_REF);
3820 /* Fix up a mova from a switch that went out of range. */
3821 static void
3822 fixup_mova (rtx mova)
3824 PUT_MODE (XEXP (MOVA_LABELREF (mova), 0), QImode);
3825 if (! flag_pic)
3827 SET_SRC (PATTERN (mova)) = MOVA_LABELREF (mova);
3828 INSN_CODE (mova) = -1;
3830 else
3832 rtx worker = mova;
3833 rtx lab = gen_label_rtx ();
3834 rtx wpat, wpat0, wpat1, wsrc, diff;
3838 worker = NEXT_INSN (worker);
3839 gcc_assert (worker
3840 && GET_CODE (worker) != CODE_LABEL
3841 && GET_CODE (worker) != JUMP_INSN);
3842 } while (GET_CODE (worker) == NOTE
3843 || recog_memoized (worker) != CODE_FOR_casesi_worker_1);
3844 wpat = PATTERN (worker);
3845 wpat0 = XVECEXP (wpat, 0, 0);
3846 wpat1 = XVECEXP (wpat, 0, 1);
3847 wsrc = SET_SRC (wpat0);
3848 PATTERN (worker) = (gen_casesi_worker_2
3849 (SET_DEST (wpat0), XVECEXP (wsrc, 0, 1),
3850 XEXP (XVECEXP (wsrc, 0, 2), 0), lab,
3851 XEXP (wpat1, 0)));
3852 INSN_CODE (worker) = -1;
3853 diff = gen_rtx_MINUS (Pmode, XVECEXP (SET_SRC (PATTERN (mova)), 0, 0),
3854 gen_rtx_LABEL_REF (Pmode, lab));
3855 diff = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, diff), UNSPEC_PIC);
3856 SET_SRC (PATTERN (mova)) = gen_rtx_CONST (Pmode, diff);
3857 INSN_CODE (mova) = -1;
3861 /* NEW_MOVA is a mova we've just encountered while scanning forward. Update
3862 *num_mova, and check if the new mova is not nested within the first one.
3863 return 0 if *first_mova was replaced, 1 if new_mova was replaced,
3864 2 if new_mova has been assigned to *first_mova, -1 otherwise.. */
3865 static int
3866 untangle_mova (int *num_mova, rtx *first_mova, rtx new_mova)
3868 int n_addr = 0; /* Initialization to shut up spurious warning. */
3869 int f_target, n_target = 0; /* Likewise. */
3871 if (optimize)
3873 n_addr = INSN_ADDRESSES (INSN_UID (new_mova));
3874 n_target = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (new_mova), 0)));
3875 if (n_addr > n_target || n_addr + 1022 < n_target)
3877 /* Change the mova into a load.
3878 broken_move will then return true for it. */
3879 fixup_mova (new_mova);
3880 return 1;
3883 if (!(*num_mova)++)
3885 *first_mova = new_mova;
3886 return 2;
3888 if (!optimize
3889 || ((f_target
3890 = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (*first_mova), 0))))
3891 >= n_target))
3892 return -1;
3894 (*num_mova)--;
3895 if (f_target - INSN_ADDRESSES (INSN_UID (*first_mova))
3896 > n_target - n_addr)
3898 fixup_mova (*first_mova);
3899 return 0;
3901 else
3903 fixup_mova (new_mova);
3904 return 1;
3908 /* Find the last barrier from insn FROM which is close enough to hold the
3909 constant pool. If we can't find one, then create one near the end of
3910 the range. */
3912 static rtx
3913 find_barrier (int num_mova, rtx mova, rtx from)
3915 int count_si = 0;
3916 int count_hi = 0;
3917 int found_hi = 0;
3918 int found_si = 0;
3919 int found_di = 0;
3920 int hi_align = 2;
3921 int si_align = 2;
3922 int leading_mova = num_mova;
3923 rtx barrier_before_mova = 0, found_barrier = 0, good_barrier = 0;
3924 int si_limit;
3925 int hi_limit;
3926 rtx orig = from;
3928 /* For HImode: range is 510, add 4 because pc counts from address of
3929 second instruction after this one, subtract 2 for the jump instruction
3930 that we may need to emit before the table, subtract 2 for the instruction
3931 that fills the jump delay slot (in very rare cases, reorg will take an
3932 instruction from after the constant pool or will leave the delay slot
3933 empty). This gives 510.
3934 For SImode: range is 1020, add 4 because pc counts from address of
3935 second instruction after this one, subtract 2 in case pc is 2 byte
3936 aligned, subtract 2 for the jump instruction that we may need to emit
3937 before the table, subtract 2 for the instruction that fills the jump
3938 delay slot. This gives 1018. */
3940 /* The branch will always be shortened now that the reference address for
3941 forward branches is the successor address, thus we need no longer make
3942 adjustments to the [sh]i_limit for -O0. */
3944 si_limit = 1018;
3945 hi_limit = 510;
3947 while (from && count_si < si_limit && count_hi < hi_limit)
3949 int inc = get_attr_length (from);
3950 int new_align = 1;
3952 /* If this is a label that existed at the time of the compute_alignments
3953 call, determine the alignment. N.B. When find_barrier recurses for
3954 an out-of-reach mova, we might see labels at the start of previously
3955 inserted constant tables. */
3956 if (GET_CODE (from) == CODE_LABEL
3957 && CODE_LABEL_NUMBER (from) <= max_labelno_before_reorg)
3959 if (optimize)
3960 new_align = 1 << label_to_alignment (from);
3961 else if (GET_CODE (prev_nonnote_insn (from)) == BARRIER)
3962 new_align = 1 << barrier_align (from);
3963 else
3964 new_align = 1;
3965 inc = 0;
3967 /* In case we are scanning a constant table because of recursion, check
3968 for explicit alignments. If the table is long, we might be forced
3969 to emit the new table in front of it; the length of the alignment
3970 might be the last straw. */
3971 else if (GET_CODE (from) == INSN
3972 && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
3973 && XINT (PATTERN (from), 1) == UNSPECV_ALIGN)
3974 new_align = INTVAL (XVECEXP (PATTERN (from), 0, 0));
3975 /* When we find the end of a constant table, paste the new constant
3976 at the end. That is better than putting it in front because
3977 this way, we don't need extra alignment for adding a 4-byte-aligned
3978 mov(a) label to a 2/4 or 8/4 byte aligned table. */
3979 else if (GET_CODE (from) == INSN
3980 && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
3981 && XINT (PATTERN (from), 1) == UNSPECV_CONST_END)
3982 return from;
3984 if (GET_CODE (from) == BARRIER)
3986 rtx next;
3988 found_barrier = from;
3990 /* If we are at the end of the function, or in front of an alignment
3991 instruction, we need not insert an extra alignment. We prefer
3992 this kind of barrier. */
3993 if (barrier_align (from) > 2)
3994 good_barrier = from;
3996 /* If we are at the end of a hot/cold block, dump the constants
3997 here. */
3998 next = NEXT_INSN (from);
3999 if (next
4000 && NOTE_P (next)
4001 && NOTE_KIND (next) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
4002 break;
4005 if (broken_move (from))
4007 rtx pat, src, dst;
4008 enum machine_mode mode;
4010 pat = PATTERN (from);
4011 if (GET_CODE (pat) == PARALLEL)
4012 pat = XVECEXP (pat, 0, 0);
4013 src = SET_SRC (pat);
4014 dst = SET_DEST (pat);
4015 mode = GET_MODE (dst);
4017 /* We must explicitly check the mode, because sometimes the
4018 front end will generate code to load unsigned constants into
4019 HImode targets without properly sign extending them. */
4020 if (mode == HImode
4021 || (mode == SImode && hi_const (src) && REGNO (dst) != FPUL_REG))
4023 found_hi += 2;
4024 /* We put the short constants before the long constants, so
4025 we must count the length of short constants in the range
4026 for the long constants. */
4027 /* ??? This isn't optimal, but is easy to do. */
4028 si_limit -= 2;
4030 else
4032 /* We dump DF/DI constants before SF/SI ones, because
4033 the limit is the same, but the alignment requirements
4034 are higher. We may waste up to 4 additional bytes
4035 for alignment, and the DF/DI constant may have
4036 another SF/SI constant placed before it. */
4037 if (TARGET_SHCOMPACT
4038 && ! found_di
4039 && (mode == DFmode || mode == DImode))
4041 found_di = 1;
4042 si_limit -= 8;
4044 while (si_align > 2 && found_si + si_align - 2 > count_si)
4045 si_align >>= 1;
4046 if (found_si > count_si)
4047 count_si = found_si;
4048 found_si += GET_MODE_SIZE (mode);
4049 if (num_mova)
4050 si_limit -= GET_MODE_SIZE (mode);
4054 if (mova_p (from))
4056 switch (untangle_mova (&num_mova, &mova, from))
4058 case 0: return find_barrier (0, 0, mova);
4059 case 2:
4061 leading_mova = 0;
4062 barrier_before_mova
4063 = good_barrier ? good_barrier : found_barrier;
4065 default: break;
4067 if (found_si > count_si)
4068 count_si = found_si;
4070 else if (GET_CODE (from) == JUMP_INSN
4071 && (GET_CODE (PATTERN (from)) == ADDR_VEC
4072 || GET_CODE (PATTERN (from)) == ADDR_DIFF_VEC))
4074 if ((num_mova > 1 && GET_MODE (prev_nonnote_insn (from)) == VOIDmode)
4075 || (num_mova
4076 && (prev_nonnote_insn (from)
4077 == XEXP (MOVA_LABELREF (mova), 0))))
4078 num_mova--;
4079 if (barrier_align (next_real_insn (from)) == align_jumps_log)
4081 /* We have just passed the barrier in front of the
4082 ADDR_DIFF_VEC, which is stored in found_barrier. Since
4083 the ADDR_DIFF_VEC is accessed as data, just like our pool
4084 constants, this is a good opportunity to accommodate what
4085 we have gathered so far.
4086 If we waited any longer, we could end up at a barrier in
4087 front of code, which gives worse cache usage for separated
4088 instruction / data caches. */
4089 good_barrier = found_barrier;
4090 break;
4092 else
4094 rtx body = PATTERN (from);
4095 inc = XVECLEN (body, 1) * GET_MODE_SIZE (GET_MODE (body));
4098 /* For the SH1, we generate alignments even after jumps-around-jumps. */
4099 else if (GET_CODE (from) == JUMP_INSN
4100 && ! TARGET_SH2
4101 && ! TARGET_SMALLCODE)
4102 new_align = 4;
4104 if (found_si)
4106 count_si += inc;
4107 if (new_align > si_align)
4109 si_limit -= (count_si - 1) & (new_align - si_align);
4110 si_align = new_align;
4112 count_si = (count_si + new_align - 1) & -new_align;
4114 if (found_hi)
4116 count_hi += inc;
4117 if (new_align > hi_align)
4119 hi_limit -= (count_hi - 1) & (new_align - hi_align);
4120 hi_align = new_align;
4122 count_hi = (count_hi + new_align - 1) & -new_align;
4124 from = NEXT_INSN (from);
4127 if (num_mova)
4129 if (leading_mova)
4131 /* Try as we might, the leading mova is out of range. Change
4132 it into a load (which will become a pcload) and retry. */
4133 fixup_mova (mova);
4134 return find_barrier (0, 0, mova);
4136 else
4138 /* Insert the constant pool table before the mova instruction,
4139 to prevent the mova label reference from going out of range. */
4140 from = mova;
4141 good_barrier = found_barrier = barrier_before_mova;
4145 if (found_barrier)
4147 if (good_barrier && next_real_insn (found_barrier))
4148 found_barrier = good_barrier;
4150 else
4152 /* We didn't find a barrier in time to dump our stuff,
4153 so we'll make one. */
4154 rtx label = gen_label_rtx ();
4156 /* If we exceeded the range, then we must back up over the last
4157 instruction we looked at. Otherwise, we just need to undo the
4158 NEXT_INSN at the end of the loop. */
4159 if (PREV_INSN (from) != orig
4160 && (count_hi > hi_limit || count_si > si_limit))
4161 from = PREV_INSN (PREV_INSN (from));
4162 else
4163 from = PREV_INSN (from);
4165 /* Walk back to be just before any jump or label.
4166 Putting it before a label reduces the number of times the branch
4167 around the constant pool table will be hit. Putting it before
4168 a jump makes it more likely that the bra delay slot will be
4169 filled. */
4170 while (GET_CODE (from) == JUMP_INSN || GET_CODE (from) == NOTE
4171 || GET_CODE (from) == CODE_LABEL)
4172 from = PREV_INSN (from);
4174 from = emit_jump_insn_after (gen_jump (label), from);
4175 JUMP_LABEL (from) = label;
4176 LABEL_NUSES (label) = 1;
4177 found_barrier = emit_barrier_after (from);
4178 emit_label_after (label, found_barrier);
4181 return found_barrier;
4184 /* If the instruction INSN is implemented by a special function, and we can
4185 positively find the register that is used to call the sfunc, and this
4186 register is not used anywhere else in this instruction - except as the
4187 destination of a set, return this register; else, return 0. */
4189 sfunc_uses_reg (rtx insn)
4191 int i;
4192 rtx pattern, part, reg_part, reg;
4194 if (GET_CODE (insn) != INSN)
4195 return 0;
4196 pattern = PATTERN (insn);
4197 if (GET_CODE (pattern) != PARALLEL || get_attr_type (insn) != TYPE_SFUNC)
4198 return 0;
4200 for (reg_part = 0, i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
4202 part = XVECEXP (pattern, 0, i);
4203 if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == SImode)
4204 reg_part = part;
4206 if (! reg_part)
4207 return 0;
4208 reg = XEXP (reg_part, 0);
4209 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
4211 part = XVECEXP (pattern, 0, i);
4212 if (part == reg_part || GET_CODE (part) == CLOBBER)
4213 continue;
4214 if (reg_mentioned_p (reg, ((GET_CODE (part) == SET
4215 && GET_CODE (SET_DEST (part)) == REG)
4216 ? SET_SRC (part) : part)))
4217 return 0;
4219 return reg;
4222 /* See if the only way in which INSN uses REG is by calling it, or by
4223 setting it while calling it. Set *SET to a SET rtx if the register
4224 is set by INSN. */
4226 static int
4227 noncall_uses_reg (rtx reg, rtx insn, rtx *set)
4229 rtx pattern, reg2;
4231 *set = NULL_RTX;
4233 reg2 = sfunc_uses_reg (insn);
4234 if (reg2 && REGNO (reg2) == REGNO (reg))
4236 pattern = single_set (insn);
4237 if (pattern
4238 && GET_CODE (SET_DEST (pattern)) == REG
4239 && REGNO (reg) == REGNO (SET_DEST (pattern)))
4240 *set = pattern;
4241 return 0;
4243 if (GET_CODE (insn) != CALL_INSN)
4245 /* We don't use rtx_equal_p because we don't care if the mode is
4246 different. */
4247 pattern = single_set (insn);
4248 if (pattern
4249 && GET_CODE (SET_DEST (pattern)) == REG
4250 && REGNO (reg) == REGNO (SET_DEST (pattern)))
4252 rtx par, part;
4253 int i;
4255 *set = pattern;
4256 par = PATTERN (insn);
4257 if (GET_CODE (par) == PARALLEL)
4258 for (i = XVECLEN (par, 0) - 1; i >= 0; i--)
4260 part = XVECEXP (par, 0, i);
4261 if (GET_CODE (part) != SET && reg_mentioned_p (reg, part))
4262 return 1;
4264 return reg_mentioned_p (reg, SET_SRC (pattern));
4267 return 1;
4270 pattern = PATTERN (insn);
4272 if (GET_CODE (pattern) == PARALLEL)
4274 int i;
4276 for (i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
4277 if (reg_mentioned_p (reg, XVECEXP (pattern, 0, i)))
4278 return 1;
4279 pattern = XVECEXP (pattern, 0, 0);
4282 if (GET_CODE (pattern) == SET)
4284 if (reg_mentioned_p (reg, SET_DEST (pattern)))
4286 /* We don't use rtx_equal_p, because we don't care if the
4287 mode is different. */
4288 if (GET_CODE (SET_DEST (pattern)) != REG
4289 || REGNO (reg) != REGNO (SET_DEST (pattern)))
4290 return 1;
4292 *set = pattern;
4295 pattern = SET_SRC (pattern);
4298 if (GET_CODE (pattern) != CALL
4299 || GET_CODE (XEXP (pattern, 0)) != MEM
4300 || ! rtx_equal_p (reg, XEXP (XEXP (pattern, 0), 0)))
4301 return 1;
4303 return 0;
4306 /* Given a X, a pattern of an insn or a part of it, return a mask of used
4307 general registers. Bits 0..15 mean that the respective registers
4308 are used as inputs in the instruction. Bits 16..31 mean that the
4309 registers 0..15, respectively, are used as outputs, or are clobbered.
4310 IS_DEST should be set to 16 if X is the destination of a SET, else to 0. */
4312 regs_used (rtx x, int is_dest)
4314 enum rtx_code code;
4315 const char *fmt;
4316 int i, used = 0;
4318 if (! x)
4319 return used;
4320 code = GET_CODE (x);
4321 switch (code)
4323 case REG:
4324 if (REGNO (x) < 16)
4325 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
4326 << (REGNO (x) + is_dest));
4327 return 0;
4328 case SUBREG:
4330 rtx y = SUBREG_REG (x);
4332 if (GET_CODE (y) != REG)
4333 break;
4334 if (REGNO (y) < 16)
4335 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
4336 << (REGNO (y) +
4337 subreg_regno_offset (REGNO (y),
4338 GET_MODE (y),
4339 SUBREG_BYTE (x),
4340 GET_MODE (x)) + is_dest));
4341 return 0;
4343 case SET:
4344 return regs_used (SET_SRC (x), 0) | regs_used (SET_DEST (x), 16);
4345 case RETURN:
4346 /* If there was a return value, it must have been indicated with USE. */
4347 return 0x00ffff00;
4348 case CLOBBER:
4349 is_dest = 1;
4350 break;
4351 case MEM:
4352 is_dest = 0;
4353 break;
4354 case CALL:
4355 used |= 0x00ff00f0;
4356 break;
4357 default:
4358 break;
4361 fmt = GET_RTX_FORMAT (code);
4363 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4365 if (fmt[i] == 'E')
4367 register int j;
4368 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4369 used |= regs_used (XVECEXP (x, i, j), is_dest);
4371 else if (fmt[i] == 'e')
4372 used |= regs_used (XEXP (x, i), is_dest);
4374 return used;
4377 /* Create an instruction that prevents redirection of a conditional branch
4378 to the destination of the JUMP with address ADDR.
4379 If the branch needs to be implemented as an indirect jump, try to find
4380 a scratch register for it.
4381 If NEED_BLOCK is 0, don't do anything unless we need a scratch register.
4382 If any preceding insn that doesn't fit into a delay slot is good enough,
4383 pass 1. Pass 2 if a definite blocking insn is needed.
4384 -1 is used internally to avoid deep recursion.
4385 If a blocking instruction is made or recognized, return it. */
4387 static rtx
4388 gen_block_redirect (rtx jump, int addr, int need_block)
4390 int dead = 0;
4391 rtx prev = prev_nonnote_insn (jump);
4392 rtx dest;
4394 /* First, check if we already have an instruction that satisfies our need. */
4395 if (prev && GET_CODE (prev) == INSN && ! INSN_DELETED_P (prev))
4397 if (INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
4398 return prev;
4399 if (GET_CODE (PATTERN (prev)) == USE
4400 || GET_CODE (PATTERN (prev)) == CLOBBER
4401 || get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
4402 prev = jump;
4403 else if ((need_block &= ~1) < 0)
4404 return prev;
4405 else if (recog_memoized (prev) == CODE_FOR_block_branch_redirect)
4406 need_block = 0;
4408 if (GET_CODE (PATTERN (jump)) == RETURN)
4410 if (! need_block)
4411 return prev;
4412 /* Reorg even does nasty things with return insns that cause branches
4413 to go out of range - see find_end_label and callers. */
4414 return emit_insn_before (gen_block_branch_redirect (const0_rtx) , jump);
4416 /* We can't use JUMP_LABEL here because it might be undefined
4417 when not optimizing. */
4418 dest = XEXP (SET_SRC (PATTERN (jump)), 0);
4419 /* If the branch is out of range, try to find a scratch register for it. */
4420 if (optimize
4421 && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
4422 > 4092 + 4098))
4424 rtx scan;
4425 /* Don't look for the stack pointer as a scratch register,
4426 it would cause trouble if an interrupt occurred. */
4427 unsigned try = 0x7fff, used;
4428 int jump_left = flag_expensive_optimizations + 1;
4430 /* It is likely that the most recent eligible instruction is wanted for
4431 the delay slot. Therefore, find out which registers it uses, and
4432 try to avoid using them. */
4434 for (scan = jump; (scan = PREV_INSN (scan)); )
4436 enum rtx_code code;
4438 if (INSN_DELETED_P (scan))
4439 continue;
4440 code = GET_CODE (scan);
4441 if (code == CODE_LABEL || code == JUMP_INSN)
4442 break;
4443 if (code == INSN
4444 && GET_CODE (PATTERN (scan)) != USE
4445 && GET_CODE (PATTERN (scan)) != CLOBBER
4446 && get_attr_in_delay_slot (scan) == IN_DELAY_SLOT_YES)
4448 try &= ~regs_used (PATTERN (scan), 0);
4449 break;
4452 for (used = dead = 0, scan = JUMP_LABEL (jump);
4453 (scan = NEXT_INSN (scan)); )
4455 enum rtx_code code;
4457 if (INSN_DELETED_P (scan))
4458 continue;
4459 code = GET_CODE (scan);
4460 if (INSN_P (scan))
4462 used |= regs_used (PATTERN (scan), 0);
4463 if (code == CALL_INSN)
4464 used |= regs_used (CALL_INSN_FUNCTION_USAGE (scan), 0);
4465 dead |= (used >> 16) & ~used;
4466 if (dead & try)
4468 dead &= try;
4469 break;
4471 if (code == JUMP_INSN)
4473 if (jump_left-- && simplejump_p (scan))
4474 scan = JUMP_LABEL (scan);
4475 else
4476 break;
4480 /* Mask out the stack pointer again, in case it was
4481 the only 'free' register we have found. */
4482 dead &= 0x7fff;
4484 /* If the immediate destination is still in range, check for possible
4485 threading with a jump beyond the delay slot insn.
4486 Don't check if we are called recursively; the jump has been or will be
4487 checked in a different invocation then. */
4489 else if (optimize && need_block >= 0)
4491 rtx next = next_active_insn (next_active_insn (dest));
4492 if (next && GET_CODE (next) == JUMP_INSN
4493 && GET_CODE (PATTERN (next)) == SET
4494 && recog_memoized (next) == CODE_FOR_jump_compact)
4496 dest = JUMP_LABEL (next);
4497 if (dest
4498 && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
4499 > 4092 + 4098))
4500 gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), -1);
4504 if (dead)
4506 rtx reg = gen_rtx_REG (SImode, exact_log2 (dead & -dead));
4508 /* It would be nice if we could convert the jump into an indirect
4509 jump / far branch right now, and thus exposing all constituent
4510 instructions to further optimization. However, reorg uses
4511 simplejump_p to determine if there is an unconditional jump where
4512 it should try to schedule instructions from the target of the
4513 branch; simplejump_p fails for indirect jumps even if they have
4514 a JUMP_LABEL. */
4515 rtx insn = emit_insn_before (gen_indirect_jump_scratch
4516 (reg, GEN_INT (INSN_UID (JUMP_LABEL (jump))))
4517 , jump);
4518 /* ??? We would like this to have the scope of the jump, but that
4519 scope will change when a delay slot insn of an inner scope is added.
4520 Hence, after delay slot scheduling, we'll have to expect
4521 NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
4522 the jump. */
4524 INSN_LOCATOR (insn) = INSN_LOCATOR (jump);
4525 INSN_CODE (insn) = CODE_FOR_indirect_jump_scratch;
4526 return insn;
4528 else if (need_block)
4529 /* We can't use JUMP_LABEL here because it might be undefined
4530 when not optimizing. */
4531 return emit_insn_before (gen_block_branch_redirect
4532 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))))
4533 , jump);
4534 return prev;
4537 #define CONDJUMP_MIN -252
4538 #define CONDJUMP_MAX 262
4539 struct far_branch
4541 /* A label (to be placed) in front of the jump
4542 that jumps to our ultimate destination. */
4543 rtx near_label;
4544 /* Where we are going to insert it if we cannot move the jump any farther,
4545 or the jump itself if we have picked up an existing jump. */
4546 rtx insert_place;
4547 /* The ultimate destination. */
4548 rtx far_label;
4549 struct far_branch *prev;
4550 /* If the branch has already been created, its address;
4551 else the address of its first prospective user. */
4552 int address;
4555 static void gen_far_branch (struct far_branch *);
4556 enum mdep_reorg_phase_e mdep_reorg_phase;
4557 static void
4558 gen_far_branch (struct far_branch *bp)
4560 rtx insn = bp->insert_place;
4561 rtx jump;
4562 rtx label = gen_label_rtx ();
4563 int ok;
4565 emit_label_after (label, insn);
4566 if (bp->far_label)
4568 jump = emit_jump_insn_after (gen_jump (bp->far_label), insn);
4569 LABEL_NUSES (bp->far_label)++;
4571 else
4572 jump = emit_jump_insn_after (gen_return (), insn);
4573 /* Emit a barrier so that reorg knows that any following instructions
4574 are not reachable via a fall-through path.
4575 But don't do this when not optimizing, since we wouldn't suppress the
4576 alignment for the barrier then, and could end up with out-of-range
4577 pc-relative loads. */
4578 if (optimize)
4579 emit_barrier_after (jump);
4580 emit_label_after (bp->near_label, insn);
4581 JUMP_LABEL (jump) = bp->far_label;
4582 ok = invert_jump (insn, label, 1);
4583 gcc_assert (ok);
4585 /* If we are branching around a jump (rather than a return), prevent
4586 reorg from using an insn from the jump target as the delay slot insn -
4587 when reorg did this, it pessimized code (we rather hide the delay slot)
4588 and it could cause branches to go out of range. */
4589 if (bp->far_label)
4590 (emit_insn_after
4591 (gen_stuff_delay_slot
4592 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))),
4593 GEN_INT (recog_memoized (insn) == CODE_FOR_branch_false)),
4594 insn));
4595 /* Prevent reorg from undoing our splits. */
4596 gen_block_redirect (jump, bp->address += 2, 2);
4599 /* Fix up ADDR_DIFF_VECs. */
4600 void
4601 fixup_addr_diff_vecs (rtx first)
4603 rtx insn;
4605 for (insn = first; insn; insn = NEXT_INSN (insn))
4607 rtx vec_lab, pat, prev, prevpat, x, braf_label;
4609 if (GET_CODE (insn) != JUMP_INSN
4610 || GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
4611 continue;
4612 pat = PATTERN (insn);
4613 vec_lab = XEXP (XEXP (pat, 0), 0);
4615 /* Search the matching casesi_jump_2. */
4616 for (prev = vec_lab; ; prev = PREV_INSN (prev))
4618 if (GET_CODE (prev) != JUMP_INSN)
4619 continue;
4620 prevpat = PATTERN (prev);
4621 if (GET_CODE (prevpat) != PARALLEL || XVECLEN (prevpat, 0) != 2)
4622 continue;
4623 x = XVECEXP (prevpat, 0, 1);
4624 if (GET_CODE (x) != USE)
4625 continue;
4626 x = XEXP (x, 0);
4627 if (GET_CODE (x) == LABEL_REF && XEXP (x, 0) == vec_lab)
4628 break;
4630 /* FIXME: This is a bug in the optimizer, but it seems harmless
4631 to just avoid panicing. */
4632 if (!prev)
4633 continue;
4635 /* Emit the reference label of the braf where it belongs, right after
4636 the casesi_jump_2 (i.e. braf). */
4637 braf_label = XEXP (XEXP (SET_SRC (XVECEXP (prevpat, 0, 0)), 1), 0);
4638 emit_label_after (braf_label, prev);
4640 /* Fix up the ADDR_DIF_VEC to be relative
4641 to the reference address of the braf. */
4642 XEXP (XEXP (pat, 0), 0) = braf_label;
4646 /* BARRIER_OR_LABEL is either a BARRIER or a CODE_LABEL immediately following
4647 a barrier. Return the base 2 logarithm of the desired alignment. */
4649 barrier_align (rtx barrier_or_label)
4651 rtx next = next_real_insn (barrier_or_label), pat, prev;
4652 int slot, credit, jump_to_next = 0;
4654 if (! next)
4655 return 0;
4657 pat = PATTERN (next);
4659 if (GET_CODE (pat) == ADDR_DIFF_VEC)
4660 return 2;
4662 if (GET_CODE (pat) == UNSPEC_VOLATILE && XINT (pat, 1) == UNSPECV_ALIGN)
4663 /* This is a barrier in front of a constant table. */
4664 return 0;
4666 prev = prev_real_insn (barrier_or_label);
4667 if (GET_CODE (PATTERN (prev)) == ADDR_DIFF_VEC)
4669 pat = PATTERN (prev);
4670 /* If this is a very small table, we want to keep the alignment after
4671 the table to the minimum for proper code alignment. */
4672 return ((TARGET_SMALLCODE
4673 || ((unsigned) XVECLEN (pat, 1) * GET_MODE_SIZE (GET_MODE (pat))
4674 <= (unsigned) 1 << (CACHE_LOG - 2)))
4675 ? 1 << TARGET_SHMEDIA : align_jumps_log);
4678 if (TARGET_SMALLCODE)
4679 return 0;
4681 if (! TARGET_SH2 || ! optimize)
4682 return align_jumps_log;
4684 /* When fixing up pcloads, a constant table might be inserted just before
4685 the basic block that ends with the barrier. Thus, we can't trust the
4686 instruction lengths before that. */
4687 if (mdep_reorg_phase > SH_FIXUP_PCLOAD)
4689 /* Check if there is an immediately preceding branch to the insn beyond
4690 the barrier. We must weight the cost of discarding useful information
4691 from the current cache line when executing this branch and there is
4692 an alignment, against that of fetching unneeded insn in front of the
4693 branch target when there is no alignment. */
4695 /* There are two delay_slot cases to consider. One is the simple case
4696 where the preceding branch is to the insn beyond the barrier (simple
4697 delay slot filling), and the other is where the preceding branch has
4698 a delay slot that is a duplicate of the insn after the barrier
4699 (fill_eager_delay_slots) and the branch is to the insn after the insn
4700 after the barrier. */
4702 /* PREV is presumed to be the JUMP_INSN for the barrier under
4703 investigation. Skip to the insn before it. */
4704 prev = prev_real_insn (prev);
4706 for (slot = 2, credit = (1 << (CACHE_LOG - 2)) + 2;
4707 credit >= 0 && prev && GET_CODE (prev) == INSN;
4708 prev = prev_real_insn (prev))
4710 jump_to_next = 0;
4711 if (GET_CODE (PATTERN (prev)) == USE
4712 || GET_CODE (PATTERN (prev)) == CLOBBER)
4713 continue;
4714 if (GET_CODE (PATTERN (prev)) == SEQUENCE)
4716 prev = XVECEXP (PATTERN (prev), 0, 1);
4717 if (INSN_UID (prev) == INSN_UID (next))
4719 /* Delay slot was filled with insn at jump target. */
4720 jump_to_next = 1;
4721 continue;
4725 if (slot &&
4726 get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
4727 slot = 0;
4728 credit -= get_attr_length (prev);
4730 if (prev
4731 && GET_CODE (prev) == JUMP_INSN
4732 && JUMP_LABEL (prev))
4734 rtx x;
4735 if (jump_to_next
4736 || next_real_insn (JUMP_LABEL (prev)) == next
4737 /* If relax_delay_slots() decides NEXT was redundant
4738 with some previous instruction, it will have
4739 redirected PREV's jump to the following insn. */
4740 || JUMP_LABEL (prev) == next_nonnote_insn (next)
4741 /* There is no upper bound on redundant instructions
4742 that might have been skipped, but we must not put an
4743 alignment where none had been before. */
4744 || (x = (NEXT_INSN (NEXT_INSN (PREV_INSN (prev)))),
4745 (INSN_P (x)
4746 && (INSN_CODE (x) == CODE_FOR_block_branch_redirect
4747 || INSN_CODE (x) == CODE_FOR_indirect_jump_scratch
4748 || INSN_CODE (x) == CODE_FOR_stuff_delay_slot))))
4750 rtx pat = PATTERN (prev);
4751 if (GET_CODE (pat) == PARALLEL)
4752 pat = XVECEXP (pat, 0, 0);
4753 if (credit - slot >= (GET_CODE (SET_SRC (pat)) == PC ? 2 : 0))
4754 return 0;
4759 return align_jumps_log;
4762 /* If we are inside a phony loop, almost any kind of label can turn up as the
4763 first one in the loop. Aligning a braf label causes incorrect switch
4764 destination addresses; we can detect braf labels because they are
4765 followed by a BARRIER.
4766 Applying loop alignment to small constant or switch tables is a waste
4767 of space, so we suppress this too. */
4769 sh_loop_align (rtx label)
4771 rtx next = label;
4774 next = next_nonnote_insn (next);
4775 while (next && GET_CODE (next) == CODE_LABEL);
4777 if (! next
4778 || ! INSN_P (next)
4779 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC
4780 || recog_memoized (next) == CODE_FOR_consttable_2)
4781 return 0;
4783 return align_loops_log;
4786 /* Do a final pass over the function, just before delayed branch
4787 scheduling. */
4789 static void
4790 sh_reorg (void)
4792 rtx first, insn, mova = NULL_RTX;
4793 int num_mova;
4794 rtx r0_rtx = gen_rtx_REG (Pmode, 0);
4795 rtx r0_inc_rtx = gen_rtx_POST_INC (Pmode, r0_rtx);
4797 first = get_insns ();
4798 max_labelno_before_reorg = max_label_num ();
4800 /* We must split call insns before introducing `mova's. If we're
4801 optimizing, they'll have already been split. Otherwise, make
4802 sure we don't split them too late. */
4803 if (! optimize)
4804 split_all_insns_noflow ();
4806 if (TARGET_SHMEDIA)
4807 return;
4809 /* If relaxing, generate pseudo-ops to associate function calls with
4810 the symbols they call. It does no harm to not generate these
4811 pseudo-ops. However, when we can generate them, it enables to
4812 linker to potentially relax the jsr to a bsr, and eliminate the
4813 register load and, possibly, the constant pool entry. */
4815 mdep_reorg_phase = SH_INSERT_USES_LABELS;
4816 if (TARGET_RELAX)
4818 /* Remove all REG_LABEL_OPERAND notes. We want to use them for our
4819 own purposes. This works because none of the remaining passes
4820 need to look at them.
4822 ??? But it may break in the future. We should use a machine
4823 dependent REG_NOTE, or some other approach entirely. */
4824 for (insn = first; insn; insn = NEXT_INSN (insn))
4826 if (INSN_P (insn))
4828 rtx note;
4830 while ((note = find_reg_note (insn, REG_LABEL_OPERAND,
4831 NULL_RTX)) != 0)
4832 remove_note (insn, note);
4836 for (insn = first; insn; insn = NEXT_INSN (insn))
4838 rtx pattern, reg, link, set, scan, dies, label;
4839 int rescan = 0, foundinsn = 0;
4841 if (GET_CODE (insn) == CALL_INSN)
4843 pattern = PATTERN (insn);
4845 if (GET_CODE (pattern) == PARALLEL)
4846 pattern = XVECEXP (pattern, 0, 0);
4847 if (GET_CODE (pattern) == SET)
4848 pattern = SET_SRC (pattern);
4850 if (GET_CODE (pattern) != CALL
4851 || GET_CODE (XEXP (pattern, 0)) != MEM)
4852 continue;
4854 reg = XEXP (XEXP (pattern, 0), 0);
4856 else
4858 reg = sfunc_uses_reg (insn);
4859 if (! reg)
4860 continue;
4863 if (GET_CODE (reg) != REG)
4864 continue;
4866 /* Try scanning backward to find where the register is set. */
4867 link = NULL;
4868 for (scan = PREV_INSN (insn);
4869 scan && GET_CODE (scan) != CODE_LABEL;
4870 scan = PREV_INSN (scan))
4872 if (! INSN_P (scan))
4873 continue;
4875 if (! reg_mentioned_p (reg, scan))
4876 continue;
4878 if (noncall_uses_reg (reg, scan, &set))
4879 break;
4881 if (set)
4883 link = scan;
4884 break;
4888 if (! link)
4889 continue;
4891 /* The register is set at LINK. */
4893 /* We can only optimize the function call if the register is
4894 being set to a symbol. In theory, we could sometimes
4895 optimize calls to a constant location, but the assembler
4896 and linker do not support that at present. */
4897 if (GET_CODE (SET_SRC (set)) != SYMBOL_REF
4898 && GET_CODE (SET_SRC (set)) != LABEL_REF)
4899 continue;
4901 /* Scan forward from LINK to the place where REG dies, and
4902 make sure that the only insns which use REG are
4903 themselves function calls. */
4905 /* ??? This doesn't work for call targets that were allocated
4906 by reload, since there may not be a REG_DEAD note for the
4907 register. */
4909 dies = NULL_RTX;
4910 for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
4912 rtx scanset;
4914 /* Don't try to trace forward past a CODE_LABEL if we haven't
4915 seen INSN yet. Ordinarily, we will only find the setting insn
4916 if it is in the same basic block. However,
4917 cross-jumping can insert code labels in between the load and
4918 the call, and can result in situations where a single call
4919 insn may have two targets depending on where we came from. */
4921 if (GET_CODE (scan) == CODE_LABEL && ! foundinsn)
4922 break;
4924 if (! INSN_P (scan))
4925 continue;
4927 /* Don't try to trace forward past a JUMP. To optimize
4928 safely, we would have to check that all the
4929 instructions at the jump destination did not use REG. */
4931 if (GET_CODE (scan) == JUMP_INSN)
4932 break;
4934 if (! reg_mentioned_p (reg, scan))
4935 continue;
4937 if (noncall_uses_reg (reg, scan, &scanset))
4938 break;
4940 if (scan == insn)
4941 foundinsn = 1;
4943 if (scan != insn
4944 && (GET_CODE (scan) == CALL_INSN || sfunc_uses_reg (scan)))
4946 /* There is a function call to this register other
4947 than the one we are checking. If we optimize
4948 this call, we need to rescan again below. */
4949 rescan = 1;
4952 /* ??? We shouldn't have to worry about SCANSET here.
4953 We should just be able to check for a REG_DEAD note
4954 on a function call. However, the REG_DEAD notes are
4955 apparently not dependable around libcalls; c-torture
4956 execute/920501-2 is a test case. If SCANSET is set,
4957 then this insn sets the register, so it must have
4958 died earlier. Unfortunately, this will only handle
4959 the cases in which the register is, in fact, set in a
4960 later insn. */
4962 /* ??? We shouldn't have to use FOUNDINSN here.
4963 This dates back to when we used LOG_LINKS to find
4964 the most recent insn which sets the register. */
4966 if (foundinsn
4967 && (scanset
4968 || find_reg_note (scan, REG_DEAD, reg)))
4970 dies = scan;
4971 break;
4975 if (! dies)
4977 /* Either there was a branch, or some insn used REG
4978 other than as a function call address. */
4979 continue;
4982 /* Create a code label, and put it in a REG_LABEL_OPERAND note
4983 on the insn which sets the register, and on each call insn
4984 which uses the register. In final_prescan_insn we look for
4985 the REG_LABEL_OPERAND notes, and output the appropriate label
4986 or pseudo-op. */
4988 label = gen_label_rtx ();
4989 add_reg_note (link, REG_LABEL_OPERAND, label);
4990 add_reg_note (insn, REG_LABEL_OPERAND, label);
4991 if (rescan)
4993 scan = link;
4996 rtx reg2;
4998 scan = NEXT_INSN (scan);
4999 if (scan != insn
5000 && ((GET_CODE (scan) == CALL_INSN
5001 && reg_mentioned_p (reg, scan))
5002 || ((reg2 = sfunc_uses_reg (scan))
5003 && REGNO (reg2) == REGNO (reg))))
5004 add_reg_note (scan, REG_LABEL_OPERAND, label);
5006 while (scan != dies);
5011 if (TARGET_SH2)
5012 fixup_addr_diff_vecs (first);
5014 if (optimize)
5016 mdep_reorg_phase = SH_SHORTEN_BRANCHES0;
5017 shorten_branches (first);
5020 /* Scan the function looking for move instructions which have to be
5021 changed to pc-relative loads and insert the literal tables. */
5022 label_ref_list_pool = create_alloc_pool ("label references list",
5023 sizeof (struct label_ref_list_d),
5024 30);
5025 mdep_reorg_phase = SH_FIXUP_PCLOAD;
5026 for (insn = first, num_mova = 0; insn; insn = NEXT_INSN (insn))
5028 if (mova_p (insn))
5030 /* ??? basic block reordering can move a switch table dispatch
5031 below the switch table. Check if that has happened.
5032 We only have the addresses available when optimizing; but then,
5033 this check shouldn't be needed when not optimizing. */
5034 if (!untangle_mova (&num_mova, &mova, insn))
5036 insn = mova;
5037 num_mova = 0;
5040 else if (GET_CODE (insn) == JUMP_INSN
5041 && GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
5042 && num_mova
5043 /* ??? loop invariant motion can also move a mova out of a
5044 loop. Since loop does this code motion anyway, maybe we
5045 should wrap UNSPEC_MOVA into a CONST, so that reload can
5046 move it back. */
5047 && ((num_mova > 1
5048 && GET_MODE (prev_nonnote_insn (insn)) == VOIDmode)
5049 || (prev_nonnote_insn (insn)
5050 == XEXP (MOVA_LABELREF (mova), 0))))
5052 rtx scan;
5053 int total;
5055 num_mova--;
5057 /* Some code might have been inserted between the mova and
5058 its ADDR_DIFF_VEC. Check if the mova is still in range. */
5059 for (scan = mova, total = 0; scan != insn; scan = NEXT_INSN (scan))
5060 total += get_attr_length (scan);
5062 /* range of mova is 1020, add 4 because pc counts from address of
5063 second instruction after this one, subtract 2 in case pc is 2
5064 byte aligned. Possible alignment needed for the ADDR_DIFF_VEC
5065 cancels out with alignment effects of the mova itself. */
5066 if (total > 1022)
5068 /* Change the mova into a load, and restart scanning
5069 there. broken_move will then return true for mova. */
5070 fixup_mova (mova);
5071 insn = mova;
5074 if (broken_move (insn)
5075 || (GET_CODE (insn) == INSN
5076 && recog_memoized (insn) == CODE_FOR_casesi_worker_2))
5078 rtx scan;
5079 /* Scan ahead looking for a barrier to stick the constant table
5080 behind. */
5081 rtx barrier = find_barrier (num_mova, mova, insn);
5082 rtx last_float_move = NULL_RTX, last_float = 0, *last_float_addr = NULL;
5083 int need_aligned_label = 0;
5085 if (num_mova && ! mova_p (mova))
5087 /* find_barrier had to change the first mova into a
5088 pcload; thus, we have to start with this new pcload. */
5089 insn = mova;
5090 num_mova = 0;
5092 /* Now find all the moves between the points and modify them. */
5093 for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
5095 if (GET_CODE (scan) == CODE_LABEL)
5096 last_float = 0;
5097 if (GET_CODE (scan) == INSN
5098 && recog_memoized (scan) == CODE_FOR_casesi_worker_2)
5099 need_aligned_label = 1;
5100 if (broken_move (scan))
5102 rtx *patp = &PATTERN (scan), pat = *patp;
5103 rtx src, dst;
5104 rtx lab;
5105 rtx newsrc;
5106 enum machine_mode mode;
5108 if (GET_CODE (pat) == PARALLEL)
5109 patp = &XVECEXP (pat, 0, 0), pat = *patp;
5110 src = SET_SRC (pat);
5111 dst = SET_DEST (pat);
5112 mode = GET_MODE (dst);
5114 if (mode == SImode && hi_const (src)
5115 && REGNO (dst) != FPUL_REG)
5117 int offset = 0;
5119 mode = HImode;
5120 while (GET_CODE (dst) == SUBREG)
5122 offset += subreg_regno_offset (REGNO (SUBREG_REG (dst)),
5123 GET_MODE (SUBREG_REG (dst)),
5124 SUBREG_BYTE (dst),
5125 GET_MODE (dst));
5126 dst = SUBREG_REG (dst);
5128 dst = gen_rtx_REG (HImode, REGNO (dst) + offset);
5130 if (GET_CODE (dst) == REG && FP_ANY_REGISTER_P (REGNO (dst)))
5132 /* This must be an insn that clobbers r0. */
5133 rtx *clobberp = &XVECEXP (PATTERN (scan), 0,
5134 XVECLEN (PATTERN (scan), 0)
5135 - 1);
5136 rtx clobber = *clobberp;
5138 gcc_assert (GET_CODE (clobber) == CLOBBER
5139 && rtx_equal_p (XEXP (clobber, 0), r0_rtx));
5141 if (last_float
5142 && reg_set_between_p (r0_rtx, last_float_move, scan))
5143 last_float = 0;
5144 if (last_float
5145 && TARGET_SHCOMPACT
5146 && GET_MODE_SIZE (mode) != 4
5147 && GET_MODE_SIZE (GET_MODE (last_float)) == 4)
5148 last_float = 0;
5149 lab = add_constant (src, mode, last_float);
5150 if (lab)
5151 emit_insn_before (gen_mova (lab), scan);
5152 else
5154 /* There will be a REG_UNUSED note for r0 on
5155 LAST_FLOAT_MOVE; we have to change it to REG_INC,
5156 lest reorg:mark_target_live_regs will not
5157 consider r0 to be used, and we end up with delay
5158 slot insn in front of SCAN that clobbers r0. */
5159 rtx note
5160 = find_regno_note (last_float_move, REG_UNUSED, 0);
5162 /* If we are not optimizing, then there may not be
5163 a note. */
5164 if (note)
5165 PUT_MODE (note, REG_INC);
5167 *last_float_addr = r0_inc_rtx;
5169 last_float_move = scan;
5170 last_float = src;
5171 newsrc = gen_const_mem (mode,
5172 (((TARGET_SH4 && ! TARGET_FMOVD)
5173 || REGNO (dst) == FPUL_REG)
5174 ? r0_inc_rtx
5175 : r0_rtx));
5176 last_float_addr = &XEXP (newsrc, 0);
5178 /* Remove the clobber of r0. */
5179 *clobberp = gen_rtx_CLOBBER (GET_MODE (clobber),
5180 gen_rtx_SCRATCH (Pmode));
5182 /* This is a mova needing a label. Create it. */
5183 else if (GET_CODE (src) == UNSPEC
5184 && XINT (src, 1) == UNSPEC_MOVA
5185 && GET_CODE (XVECEXP (src, 0, 0)) == CONST)
5187 lab = add_constant (XVECEXP (src, 0, 0), mode, 0);
5188 newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
5189 newsrc = gen_rtx_UNSPEC (SImode,
5190 gen_rtvec (1, newsrc),
5191 UNSPEC_MOVA);
5193 else
5195 lab = add_constant (src, mode, 0);
5196 newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
5197 newsrc = gen_const_mem (mode, newsrc);
5199 *patp = gen_rtx_SET (VOIDmode, dst, newsrc);
5200 INSN_CODE (scan) = -1;
5203 dump_table (need_aligned_label ? insn : 0, barrier);
5204 insn = barrier;
5207 free_alloc_pool (label_ref_list_pool);
5208 for (insn = first; insn; insn = NEXT_INSN (insn))
5209 PUT_MODE (insn, VOIDmode);
5211 mdep_reorg_phase = SH_SHORTEN_BRANCHES1;
5212 INSN_ADDRESSES_FREE ();
5213 split_branches (first);
5215 /* The INSN_REFERENCES_ARE_DELAYED in sh.h is problematic because it
5216 also has an effect on the register that holds the address of the sfunc.
5217 Insert an extra dummy insn in front of each sfunc that pretends to
5218 use this register. */
5219 if (flag_delayed_branch)
5221 for (insn = first; insn; insn = NEXT_INSN (insn))
5223 rtx reg = sfunc_uses_reg (insn);
5225 if (! reg)
5226 continue;
5227 emit_insn_before (gen_use_sfunc_addr (reg), insn);
5230 #if 0
5231 /* fpscr is not actually a user variable, but we pretend it is for the
5232 sake of the previous optimization passes, since we want it handled like
5233 one. However, we don't have any debugging information for it, so turn
5234 it into a non-user variable now. */
5235 if (TARGET_SH4)
5236 REG_USERVAR_P (get_fpscr_rtx ()) = 0;
5237 #endif
5238 mdep_reorg_phase = SH_AFTER_MDEP_REORG;
5242 get_dest_uid (rtx label, int max_uid)
5244 rtx dest = next_real_insn (label);
5245 int dest_uid;
5246 if (! dest)
5247 /* This can happen for an undefined label. */
5248 return 0;
5249 dest_uid = INSN_UID (dest);
5250 /* If this is a newly created branch redirection blocking instruction,
5251 we cannot index the branch_uid or insn_addresses arrays with its
5252 uid. But then, we won't need to, because the actual destination is
5253 the following branch. */
5254 while (dest_uid >= max_uid)
5256 dest = NEXT_INSN (dest);
5257 dest_uid = INSN_UID (dest);
5259 if (GET_CODE (dest) == JUMP_INSN && GET_CODE (PATTERN (dest)) == RETURN)
5260 return 0;
5261 return dest_uid;
5264 /* Split condbranches that are out of range. Also add clobbers for
5265 scratch registers that are needed in far jumps.
5266 We do this before delay slot scheduling, so that it can take our
5267 newly created instructions into account. It also allows us to
5268 find branches with common targets more easily. */
5270 static void
5271 split_branches (rtx first)
5273 rtx insn;
5274 struct far_branch **uid_branch, *far_branch_list = 0;
5275 int max_uid = get_max_uid ();
5276 int ok;
5278 /* Find out which branches are out of range. */
5279 shorten_branches (first);
5281 uid_branch = (struct far_branch **) alloca (max_uid * sizeof *uid_branch);
5282 memset ((char *) uid_branch, 0, max_uid * sizeof *uid_branch);
5284 for (insn = first; insn; insn = NEXT_INSN (insn))
5285 if (! INSN_P (insn))
5286 continue;
5287 else if (INSN_DELETED_P (insn))
5289 /* Shorten_branches would split this instruction again,
5290 so transform it into a note. */
5291 SET_INSN_DELETED (insn);
5293 else if (GET_CODE (insn) == JUMP_INSN
5294 /* Don't mess with ADDR_DIFF_VEC */
5295 && (GET_CODE (PATTERN (insn)) == SET
5296 || GET_CODE (PATTERN (insn)) == RETURN))
5298 enum attr_type type = get_attr_type (insn);
5299 if (type == TYPE_CBRANCH)
5301 rtx next, beyond;
5303 if (get_attr_length (insn) > 4)
5305 rtx src = SET_SRC (PATTERN (insn));
5306 rtx olabel = XEXP (XEXP (src, 1), 0);
5307 int addr = INSN_ADDRESSES (INSN_UID (insn));
5308 rtx label = 0;
5309 int dest_uid = get_dest_uid (olabel, max_uid);
5310 struct far_branch *bp = uid_branch[dest_uid];
5312 /* redirect_jump needs a valid JUMP_LABEL, and it might delete
5313 the label if the LABEL_NUSES count drops to zero. There is
5314 always a jump_optimize pass that sets these values, but it
5315 proceeds to delete unreferenced code, and then if not
5316 optimizing, to un-delete the deleted instructions, thus
5317 leaving labels with too low uses counts. */
5318 if (! optimize)
5320 JUMP_LABEL (insn) = olabel;
5321 LABEL_NUSES (olabel)++;
5323 if (! bp)
5325 bp = (struct far_branch *) alloca (sizeof *bp);
5326 uid_branch[dest_uid] = bp;
5327 bp->prev = far_branch_list;
5328 far_branch_list = bp;
5329 bp->far_label
5330 = XEXP (XEXP (SET_SRC (PATTERN (insn)), 1), 0);
5331 LABEL_NUSES (bp->far_label)++;
5333 else
5335 label = bp->near_label;
5336 if (! label && bp->address - addr >= CONDJUMP_MIN)
5338 rtx block = bp->insert_place;
5340 if (GET_CODE (PATTERN (block)) == RETURN)
5341 block = PREV_INSN (block);
5342 else
5343 block = gen_block_redirect (block,
5344 bp->address, 2);
5345 label = emit_label_after (gen_label_rtx (),
5346 PREV_INSN (block));
5347 bp->near_label = label;
5349 else if (label && ! NEXT_INSN (label))
5351 if (addr + 2 - bp->address <= CONDJUMP_MAX)
5352 bp->insert_place = insn;
5353 else
5354 gen_far_branch (bp);
5357 if (! label
5358 || (NEXT_INSN (label) && bp->address - addr < CONDJUMP_MIN))
5360 bp->near_label = label = gen_label_rtx ();
5361 bp->insert_place = insn;
5362 bp->address = addr;
5364 ok = redirect_jump (insn, label, 0);
5365 gcc_assert (ok);
5367 else
5369 /* get_attr_length (insn) == 2 */
5370 /* Check if we have a pattern where reorg wants to redirect
5371 the branch to a label from an unconditional branch that
5372 is too far away. */
5373 /* We can't use JUMP_LABEL here because it might be undefined
5374 when not optimizing. */
5375 /* A syntax error might cause beyond to be NULL_RTX. */
5376 beyond
5377 = next_active_insn (XEXP (XEXP (SET_SRC (PATTERN (insn)), 1),
5378 0));
5380 if (beyond
5381 && (GET_CODE (beyond) == JUMP_INSN
5382 || ((beyond = next_active_insn (beyond))
5383 && GET_CODE (beyond) == JUMP_INSN))
5384 && GET_CODE (PATTERN (beyond)) == SET
5385 && recog_memoized (beyond) == CODE_FOR_jump_compact
5386 && ((INSN_ADDRESSES
5387 (INSN_UID (XEXP (SET_SRC (PATTERN (beyond)), 0)))
5388 - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
5389 > 252 + 258 + 2))
5390 gen_block_redirect (beyond,
5391 INSN_ADDRESSES (INSN_UID (beyond)), 1);
5394 next = next_active_insn (insn);
5396 if ((GET_CODE (next) == JUMP_INSN
5397 || ((next = next_active_insn (next))
5398 && GET_CODE (next) == JUMP_INSN))
5399 && GET_CODE (PATTERN (next)) == SET
5400 && recog_memoized (next) == CODE_FOR_jump_compact
5401 && ((INSN_ADDRESSES
5402 (INSN_UID (XEXP (SET_SRC (PATTERN (next)), 0)))
5403 - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
5404 > 252 + 258 + 2))
5405 gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), 1);
5407 else if (type == TYPE_JUMP || type == TYPE_RETURN)
5409 int addr = INSN_ADDRESSES (INSN_UID (insn));
5410 rtx far_label = 0;
5411 int dest_uid = 0;
5412 struct far_branch *bp;
5414 if (type == TYPE_JUMP)
5416 far_label = XEXP (SET_SRC (PATTERN (insn)), 0);
5417 dest_uid = get_dest_uid (far_label, max_uid);
5418 if (! dest_uid)
5420 /* Parse errors can lead to labels outside
5421 the insn stream. */
5422 if (! NEXT_INSN (far_label))
5423 continue;
5425 if (! optimize)
5427 JUMP_LABEL (insn) = far_label;
5428 LABEL_NUSES (far_label)++;
5430 redirect_jump (insn, NULL_RTX, 1);
5431 far_label = 0;
5434 bp = uid_branch[dest_uid];
5435 if (! bp)
5437 bp = (struct far_branch *) alloca (sizeof *bp);
5438 uid_branch[dest_uid] = bp;
5439 bp->prev = far_branch_list;
5440 far_branch_list = bp;
5441 bp->near_label = 0;
5442 bp->far_label = far_label;
5443 if (far_label)
5444 LABEL_NUSES (far_label)++;
5446 else if (bp->near_label && ! NEXT_INSN (bp->near_label))
5447 if (addr - bp->address <= CONDJUMP_MAX)
5448 emit_label_after (bp->near_label, PREV_INSN (insn));
5449 else
5451 gen_far_branch (bp);
5452 bp->near_label = 0;
5454 else
5455 bp->near_label = 0;
5456 bp->address = addr;
5457 bp->insert_place = insn;
5458 if (! far_label)
5459 emit_insn_before (gen_block_branch_redirect (const0_rtx), insn);
5460 else
5461 gen_block_redirect (insn, addr, bp->near_label ? 2 : 0);
5464 /* Generate all pending far branches,
5465 and free our references to the far labels. */
5466 while (far_branch_list)
5468 if (far_branch_list->near_label
5469 && ! NEXT_INSN (far_branch_list->near_label))
5470 gen_far_branch (far_branch_list);
5471 if (optimize
5472 && far_branch_list->far_label
5473 && ! --LABEL_NUSES (far_branch_list->far_label))
5474 delete_insn (far_branch_list->far_label);
5475 far_branch_list = far_branch_list->prev;
5478 /* Instruction length information is no longer valid due to the new
5479 instructions that have been generated. */
5480 init_insn_lengths ();
5483 /* Dump out instruction addresses, which is useful for debugging the
5484 constant pool table stuff.
5486 If relaxing, output the label and pseudo-ops used to link together
5487 calls and the instruction which set the registers. */
5489 /* ??? The addresses printed by this routine for insns are nonsense for
5490 insns which are inside of a sequence where none of the inner insns have
5491 variable length. This is because the second pass of shorten_branches
5492 does not bother to update them. */
5494 void
5495 final_prescan_insn (rtx insn, rtx *opvec ATTRIBUTE_UNUSED,
5496 int noperands ATTRIBUTE_UNUSED)
5498 if (TARGET_DUMPISIZE)
5499 fprintf (asm_out_file, "\n! at %04x\n", INSN_ADDRESSES (INSN_UID (insn)));
5501 if (TARGET_RELAX)
5503 rtx note;
5505 note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX);
5506 if (note)
5508 rtx pattern;
5510 pattern = PATTERN (insn);
5511 if (GET_CODE (pattern) == PARALLEL)
5512 pattern = XVECEXP (pattern, 0, 0);
5513 switch (GET_CODE (pattern))
5515 case SET:
5516 if (GET_CODE (SET_SRC (pattern)) != CALL
5517 && get_attr_type (insn) != TYPE_SFUNC)
5519 targetm.asm_out.internal_label
5520 (asm_out_file, "L", CODE_LABEL_NUMBER (XEXP (note, 0)));
5521 break;
5523 /* else FALLTHROUGH */
5524 case CALL:
5525 asm_fprintf (asm_out_file, "\t.uses %LL%d\n",
5526 CODE_LABEL_NUMBER (XEXP (note, 0)));
5527 break;
5529 default:
5530 gcc_unreachable ();
5536 /* Dump out any constants accumulated in the final pass. These will
5537 only be labels. */
5539 const char *
5540 output_jump_label_table (void)
5542 int i;
5544 if (pool_size)
5546 fprintf (asm_out_file, "\t.align 2\n");
5547 for (i = 0; i < pool_size; i++)
5549 pool_node *p = &pool_vector[i];
5551 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5552 CODE_LABEL_NUMBER (p->label));
5553 output_asm_insn (".long %O0", &p->value);
5555 pool_size = 0;
5558 return "";
5561 /* A full frame looks like:
5563 arg-5
5564 arg-4
5565 [ if current_function_anonymous_args
5566 arg-3
5567 arg-2
5568 arg-1
5569 arg-0 ]
5570 saved-fp
5571 saved-r10
5572 saved-r11
5573 saved-r12
5574 saved-pr
5575 local-n
5577 local-1
5578 local-0 <- fp points here. */
5580 /* Number of bytes pushed for anonymous args, used to pass information
5581 between expand_prologue and expand_epilogue. */
5583 /* Adjust the stack by SIZE bytes. REG holds the rtl of the register to be
5584 adjusted. If epilogue_p is zero, this is for a prologue; otherwise, it's
5585 for an epilogue and a negative value means that it's for a sibcall
5586 epilogue. If LIVE_REGS_MASK is nonzero, it points to a HARD_REG_SET of
5587 all the registers that are about to be restored, and hence dead. */
5589 static void
5590 output_stack_adjust (int size, rtx reg, int epilogue_p,
5591 HARD_REG_SET *live_regs_mask)
5593 rtx (*emit_fn) (rtx) = epilogue_p ? &emit_insn : &frame_insn;
5594 if (size)
5596 HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
5598 /* This test is bogus, as output_stack_adjust is used to re-align the
5599 stack. */
5600 #if 0
5601 gcc_assert (!(size % align));
5602 #endif
5604 if (CONST_OK_FOR_ADD (size))
5605 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size)));
5606 /* Try to do it with two partial adjustments; however, we must make
5607 sure that the stack is properly aligned at all times, in case
5608 an interrupt occurs between the two partial adjustments. */
5609 else if (CONST_OK_FOR_ADD (size / 2 & -align)
5610 && CONST_OK_FOR_ADD (size - (size / 2 & -align)))
5612 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size / 2 & -align)));
5613 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size - (size / 2 & -align))));
5615 else
5617 rtx const_reg;
5618 rtx insn;
5619 int temp = epilogue_p ? 7 : (TARGET_SH5 ? 0 : 1);
5620 int i;
5622 /* If TEMP is invalid, we could temporarily save a general
5623 register to MACL. However, there is currently no need
5624 to handle this case, so just die when we see it. */
5625 if (epilogue_p < 0
5626 || current_function_interrupt
5627 || ! call_really_used_regs[temp] || fixed_regs[temp])
5628 temp = -1;
5629 if (temp < 0 && ! current_function_interrupt
5630 && (TARGET_SHMEDIA || epilogue_p >= 0))
5632 HARD_REG_SET temps;
5633 COPY_HARD_REG_SET (temps, call_used_reg_set);
5634 AND_COMPL_HARD_REG_SET (temps, call_fixed_reg_set);
5635 if (epilogue_p > 0)
5637 int nreg = 0;
5638 if (crtl->return_rtx)
5640 enum machine_mode mode;
5641 mode = GET_MODE (crtl->return_rtx);
5642 if (BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG)
5643 nreg = HARD_REGNO_NREGS (FIRST_RET_REG, mode);
5645 for (i = 0; i < nreg; i++)
5646 CLEAR_HARD_REG_BIT (temps, FIRST_RET_REG + i);
5647 if (crtl->calls_eh_return)
5649 CLEAR_HARD_REG_BIT (temps, EH_RETURN_STACKADJ_REGNO);
5650 for (i = 0; i <= 3; i++)
5651 CLEAR_HARD_REG_BIT (temps, EH_RETURN_DATA_REGNO (i));
5654 if (TARGET_SHMEDIA && epilogue_p < 0)
5655 for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
5656 CLEAR_HARD_REG_BIT (temps, i);
5657 if (epilogue_p <= 0)
5659 for (i = FIRST_PARM_REG;
5660 i < FIRST_PARM_REG + NPARM_REGS (SImode); i++)
5661 CLEAR_HARD_REG_BIT (temps, i);
5662 if (cfun->static_chain_decl != NULL)
5663 CLEAR_HARD_REG_BIT (temps, STATIC_CHAIN_REGNUM);
5665 temp = scavenge_reg (&temps);
5667 if (temp < 0 && live_regs_mask)
5669 HARD_REG_SET temps;
5671 COPY_HARD_REG_SET (temps, *live_regs_mask);
5672 CLEAR_HARD_REG_BIT (temps, REGNO (reg));
5673 temp = scavenge_reg (&temps);
5675 if (temp < 0)
5677 rtx adj_reg, tmp_reg, mem;
5679 /* If we reached here, the most likely case is the (sibcall)
5680 epilogue for non SHmedia. Put a special push/pop sequence
5681 for such case as the last resort. This looks lengthy but
5682 would not be problem because it seems to be very
5683 rare. */
5685 gcc_assert (!TARGET_SHMEDIA && epilogue_p);
5688 /* ??? There is still the slight possibility that r4 or
5689 r5 have been reserved as fixed registers or assigned
5690 as global registers, and they change during an
5691 interrupt. There are possible ways to handle this:
5693 - If we are adjusting the frame pointer (r14), we can do
5694 with a single temp register and an ordinary push / pop
5695 on the stack.
5696 - Grab any call-used or call-saved registers (i.e. not
5697 fixed or globals) for the temps we need. We might
5698 also grab r14 if we are adjusting the stack pointer.
5699 If we can't find enough available registers, issue
5700 a diagnostic and die - the user must have reserved
5701 way too many registers.
5702 But since all this is rather unlikely to happen and
5703 would require extra testing, we just die if r4 / r5
5704 are not available. */
5705 gcc_assert (!fixed_regs[4] && !fixed_regs[5]
5706 && !global_regs[4] && !global_regs[5]);
5708 adj_reg = gen_rtx_REG (GET_MODE (reg), 4);
5709 tmp_reg = gen_rtx_REG (GET_MODE (reg), 5);
5710 emit_move_insn (gen_tmp_stack_mem (Pmode, reg), adj_reg);
5711 emit_insn (GEN_MOV (adj_reg, GEN_INT (size)));
5712 emit_insn (GEN_ADD3 (adj_reg, adj_reg, reg));
5713 mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
5714 emit_move_insn (mem, tmp_reg);
5715 emit_move_insn (tmp_reg, gen_tmp_stack_mem (Pmode, reg));
5716 mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
5717 emit_move_insn (mem, tmp_reg);
5718 emit_move_insn (reg, adj_reg);
5719 mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
5720 emit_move_insn (adj_reg, mem);
5721 mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
5722 emit_move_insn (tmp_reg, mem);
5723 /* Tell flow the insns that pop r4/r5 aren't dead. */
5724 emit_use (tmp_reg);
5725 emit_use (adj_reg);
5726 return;
5728 const_reg = gen_rtx_REG (GET_MODE (reg), temp);
5730 /* If SIZE is negative, subtract the positive value.
5731 This sometimes allows a constant pool entry to be shared
5732 between prologue and epilogue code. */
5733 if (size < 0)
5735 emit_insn (GEN_MOV (const_reg, GEN_INT (-size)));
5736 insn = emit_fn (GEN_SUB3 (reg, reg, const_reg));
5738 else
5740 emit_insn (GEN_MOV (const_reg, GEN_INT (size)));
5741 insn = emit_fn (GEN_ADD3 (reg, reg, const_reg));
5743 if (! epilogue_p)
5744 REG_NOTES (insn)
5745 = (gen_rtx_EXPR_LIST
5746 (REG_FRAME_RELATED_EXPR,
5747 gen_rtx_SET (VOIDmode, reg,
5748 gen_rtx_PLUS (SImode, reg, GEN_INT (size))),
5749 REG_NOTES (insn)));
5754 static rtx
5755 frame_insn (rtx x)
5757 x = emit_insn (x);
5758 RTX_FRAME_RELATED_P (x) = 1;
5759 return x;
5762 /* Output RTL to push register RN onto the stack. */
5764 static rtx
5765 push (int rn)
5767 rtx x;
5768 if (rn == FPUL_REG)
5769 x = gen_push_fpul ();
5770 else if (rn == FPSCR_REG)
5771 x = gen_push_fpscr ();
5772 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
5773 && FP_OR_XD_REGISTER_P (rn))
5775 if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
5776 return NULL_RTX;
5777 x = gen_push_4 (gen_rtx_REG (DFmode, rn));
5779 else if (TARGET_SH2E && FP_REGISTER_P (rn))
5780 x = gen_push_e (gen_rtx_REG (SFmode, rn));
5781 else
5782 x = gen_push (gen_rtx_REG (SImode, rn));
5784 x = frame_insn (x);
5785 REG_NOTES (x)
5786 = gen_rtx_EXPR_LIST (REG_INC,
5787 gen_rtx_REG (SImode, STACK_POINTER_REGNUM), 0);
5788 return x;
5791 /* Output RTL to pop register RN from the stack. */
5793 static void
5794 pop (int rn)
5796 rtx x;
5797 if (rn == FPUL_REG)
5798 x = gen_pop_fpul ();
5799 else if (rn == FPSCR_REG)
5800 x = gen_pop_fpscr ();
5801 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
5802 && FP_OR_XD_REGISTER_P (rn))
5804 if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
5805 return;
5806 x = gen_pop_4 (gen_rtx_REG (DFmode, rn));
5808 else if (TARGET_SH2E && FP_REGISTER_P (rn))
5809 x = gen_pop_e (gen_rtx_REG (SFmode, rn));
5810 else
5811 x = gen_pop (gen_rtx_REG (SImode, rn));
5813 x = emit_insn (x);
5814 REG_NOTES (x)
5815 = gen_rtx_EXPR_LIST (REG_INC,
5816 gen_rtx_REG (SImode, STACK_POINTER_REGNUM), 0);
5819 /* Generate code to push the regs specified in the mask. */
5821 static void
5822 push_regs (HARD_REG_SET *mask, int interrupt_handler)
5824 int i = interrupt_handler ? LAST_BANKED_REG + 1 : 0;
5825 int skip_fpscr = 0;
5827 /* Push PR last; this gives better latencies after the prologue, and
5828 candidates for the return delay slot when there are no general
5829 registers pushed. */
5830 for (; i < FIRST_PSEUDO_REGISTER; i++)
5832 /* If this is an interrupt handler, and the SZ bit varies,
5833 and we have to push any floating point register, we need
5834 to switch to the correct precision first. */
5835 if (i == FIRST_FP_REG && interrupt_handler && TARGET_FMOVD
5836 && hard_reg_set_intersect_p (*mask, reg_class_contents[DF_REGS]))
5838 HARD_REG_SET unsaved;
5840 push (FPSCR_REG);
5841 COMPL_HARD_REG_SET (unsaved, *mask);
5842 fpscr_set_from_mem (NORMAL_MODE (FP_MODE), unsaved);
5843 skip_fpscr = 1;
5845 if (i != PR_REG
5846 && (i != FPSCR_REG || ! skip_fpscr)
5847 && TEST_HARD_REG_BIT (*mask, i))
5849 /* If the ISR has RESBANK attribute assigned, don't push any of
5850 the following registers - R0-R14, MACH, MACL and GBR. */
5851 if (! (sh_cfun_resbank_handler_p ()
5852 && ((i >= FIRST_GENERAL_REG && i < LAST_GENERAL_REG)
5853 || i == MACH_REG
5854 || i == MACL_REG
5855 || i == GBR_REG)))
5856 push (i);
5860 /* Push banked registers last to improve delay slot opportunities. */
5861 if (interrupt_handler)
5862 for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
5863 if (TEST_HARD_REG_BIT (*mask, i))
5864 push (i);
5866 /* Don't push PR register for an ISR with RESBANK attribute assigned. */
5867 if (TEST_HARD_REG_BIT (*mask, PR_REG) && !sh_cfun_resbank_handler_p ())
5868 push (PR_REG);
5871 /* Calculate how much extra space is needed to save all callee-saved
5872 target registers.
5873 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5875 static int
5876 shmedia_target_regs_stack_space (HARD_REG_SET *live_regs_mask)
5878 int reg;
5879 int stack_space = 0;
5880 int interrupt_handler = sh_cfun_interrupt_handler_p ();
5882 for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
5883 if ((! call_really_used_regs[reg] || interrupt_handler)
5884 && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
5885 /* Leave space to save this target register on the stack,
5886 in case target register allocation wants to use it. */
5887 stack_space += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
5888 return stack_space;
5891 /* Decide whether we should reserve space for callee-save target registers,
5892 in case target register allocation wants to use them. REGS_SAVED is
5893 the space, in bytes, that is already required for register saves.
5894 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5896 static int
5897 shmedia_reserve_space_for_target_registers_p (int regs_saved,
5898 HARD_REG_SET *live_regs_mask)
5900 if (optimize_size)
5901 return 0;
5902 return shmedia_target_regs_stack_space (live_regs_mask) <= regs_saved;
5905 /* Decide how much space to reserve for callee-save target registers
5906 in case target register allocation wants to use them.
5907 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
5909 static int
5910 shmedia_target_regs_stack_adjust (HARD_REG_SET *live_regs_mask)
5912 if (shmedia_space_reserved_for_target_registers)
5913 return shmedia_target_regs_stack_space (live_regs_mask);
5914 else
5915 return 0;
5918 /* Work out the registers which need to be saved, both as a mask and a
5919 count of saved words. Return the count.
5921 If doing a pragma interrupt function, then push all regs used by the
5922 function, and if we call another function (we can tell by looking at PR),
5923 make sure that all the regs it clobbers are safe too. */
5925 static int
5926 calc_live_regs (HARD_REG_SET *live_regs_mask)
5928 unsigned int reg;
5929 int count;
5930 tree attrs;
5931 bool interrupt_or_trapa_handler, trapa_handler, interrupt_handler;
5932 bool nosave_low_regs;
5933 int pr_live, has_call;
5935 attrs = DECL_ATTRIBUTES (current_function_decl);
5936 interrupt_or_trapa_handler = sh_cfun_interrupt_handler_p ();
5937 trapa_handler = lookup_attribute ("trapa_handler", attrs) != NULL_TREE;
5938 interrupt_handler = interrupt_or_trapa_handler && ! trapa_handler;
5939 nosave_low_regs = lookup_attribute ("nosave_low_regs", attrs) != NULL_TREE;
5941 CLEAR_HARD_REG_SET (*live_regs_mask);
5942 if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && interrupt_handler
5943 && df_regs_ever_live_p (FPSCR_REG))
5944 target_flags &= ~MASK_FPU_SINGLE;
5945 /* If we can save a lot of saves by switching to double mode, do that. */
5946 else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && TARGET_FPU_SINGLE)
5947 for (count = 0, reg = FIRST_FP_REG; reg <= LAST_FP_REG; reg += 2)
5948 if (df_regs_ever_live_p (reg) && df_regs_ever_live_p (reg+1)
5949 && (! call_really_used_regs[reg]
5950 || interrupt_handler)
5951 && ++count > 2)
5953 target_flags &= ~MASK_FPU_SINGLE;
5954 break;
5956 /* PR_MEDIA_REG is a general purpose register, thus global_alloc already
5957 knows how to use it. That means the pseudo originally allocated for
5958 the initial value can become the PR_MEDIA_REG hard register, as seen for
5959 execute/20010122-1.c:test9. */
5960 if (TARGET_SHMEDIA)
5961 /* ??? this function is called from initial_elimination_offset, hence we
5962 can't use the result of sh_media_register_for_return here. */
5963 pr_live = sh_pr_n_sets ();
5964 else
5966 rtx pr_initial = has_hard_reg_initial_val (Pmode, PR_REG);
5967 pr_live = (pr_initial
5968 ? (GET_CODE (pr_initial) != REG
5969 || REGNO (pr_initial) != (PR_REG))
5970 : df_regs_ever_live_p (PR_REG));
5971 /* For Shcompact, if not optimizing, we end up with a memory reference
5972 using the return address pointer for __builtin_return_address even
5973 though there is no actual need to put the PR register on the stack. */
5974 pr_live |= df_regs_ever_live_p (RETURN_ADDRESS_POINTER_REGNUM);
5976 /* Force PR to be live if the prologue has to call the SHmedia
5977 argument decoder or register saver. */
5978 if (TARGET_SHCOMPACT
5979 && ((crtl->args.info.call_cookie
5980 & ~ CALL_COOKIE_RET_TRAMP (1))
5981 || crtl->saves_all_registers))
5982 pr_live = 1;
5983 has_call = TARGET_SHMEDIA ? ! leaf_function_p () : pr_live;
5984 for (count = 0, reg = FIRST_PSEUDO_REGISTER; reg-- != 0; )
5986 if (reg == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG)
5987 ? pr_live
5988 : interrupt_handler
5989 ? (/* Need to save all the regs ever live. */
5990 (df_regs_ever_live_p (reg)
5991 || (call_really_used_regs[reg]
5992 && (! fixed_regs[reg] || reg == MACH_REG || reg == MACL_REG
5993 || reg == PIC_OFFSET_TABLE_REGNUM)
5994 && has_call)
5995 || (TARGET_SHMEDIA && has_call
5996 && REGISTER_NATURAL_MODE (reg) == SImode
5997 && (GENERAL_REGISTER_P (reg) || TARGET_REGISTER_P (reg))))
5998 && reg != STACK_POINTER_REGNUM && reg != ARG_POINTER_REGNUM
5999 && reg != RETURN_ADDRESS_POINTER_REGNUM
6000 && reg != T_REG && reg != GBR_REG
6001 /* Push fpscr only on targets which have FPU */
6002 && (reg != FPSCR_REG || TARGET_FPU_ANY))
6003 : (/* Only push those regs which are used and need to be saved. */
6004 (TARGET_SHCOMPACT
6005 && flag_pic
6006 && crtl->args.info.call_cookie
6007 && reg == PIC_OFFSET_TABLE_REGNUM)
6008 || (df_regs_ever_live_p (reg)
6009 && (!call_really_used_regs[reg]
6010 || (trapa_handler && reg == FPSCR_REG && TARGET_FPU_ANY)))
6011 || (crtl->calls_eh_return
6012 && (reg == EH_RETURN_DATA_REGNO (0)
6013 || reg == EH_RETURN_DATA_REGNO (1)
6014 || reg == EH_RETURN_DATA_REGNO (2)
6015 || reg == EH_RETURN_DATA_REGNO (3)))
6016 || ((reg == MACL_REG || reg == MACH_REG)
6017 && df_regs_ever_live_p (reg)
6018 && sh_cfun_attr_renesas_p ())
6021 SET_HARD_REG_BIT (*live_regs_mask, reg);
6022 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
6024 if ((TARGET_SH4 || TARGET_SH2A_DOUBLE || TARGET_SH5) && TARGET_FMOVD
6025 && GET_MODE_CLASS (REGISTER_NATURAL_MODE (reg)) == MODE_FLOAT)
6027 if (FP_REGISTER_P (reg))
6029 if (! TARGET_FPU_SINGLE && ! df_regs_ever_live_p (reg ^ 1))
6031 SET_HARD_REG_BIT (*live_regs_mask, (reg ^ 1));
6032 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg ^ 1));
6035 else if (XD_REGISTER_P (reg))
6037 /* Must switch to double mode to access these registers. */
6038 target_flags &= ~MASK_FPU_SINGLE;
6042 if (nosave_low_regs && reg == R8_REG)
6043 break;
6045 /* If we have a target register optimization pass after prologue / epilogue
6046 threading, we need to assume all target registers will be live even if
6047 they aren't now. */
6048 if (flag_branch_target_load_optimize2
6049 && TARGET_SAVE_ALL_TARGET_REGS
6050 && shmedia_space_reserved_for_target_registers)
6051 for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
6052 if ((! call_really_used_regs[reg] || interrupt_handler)
6053 && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
6055 SET_HARD_REG_BIT (*live_regs_mask, reg);
6056 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
6058 /* If this is an interrupt handler, we don't have any call-clobbered
6059 registers we can conveniently use for target register save/restore.
6060 Make sure we save at least one general purpose register when we need
6061 to save target registers. */
6062 if (interrupt_handler
6063 && hard_reg_set_intersect_p (*live_regs_mask,
6064 reg_class_contents[TARGET_REGS])
6065 && ! hard_reg_set_intersect_p (*live_regs_mask,
6066 reg_class_contents[GENERAL_REGS]))
6068 SET_HARD_REG_BIT (*live_regs_mask, R0_REG);
6069 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (R0_REG));
6072 return count;
6075 /* Code to generate prologue and epilogue sequences */
6077 /* PUSHED is the number of bytes that are being pushed on the
6078 stack for register saves. Return the frame size, padded
6079 appropriately so that the stack stays properly aligned. */
6080 static HOST_WIDE_INT
6081 rounded_frame_size (int pushed)
6083 HOST_WIDE_INT size = get_frame_size ();
6084 HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
6086 return ((size + pushed + align - 1) & -align) - pushed;
6089 /* Choose a call-clobbered target-branch register that remains
6090 unchanged along the whole function. We set it up as the return
6091 value in the prologue. */
6093 sh_media_register_for_return (void)
6095 int regno;
6096 int tr0_used;
6098 if (! current_function_is_leaf)
6099 return -1;
6100 if (lookup_attribute ("interrupt_handler",
6101 DECL_ATTRIBUTES (current_function_decl)))
6102 return -1;
6103 if (sh_cfun_interrupt_handler_p ())
6104 return -1;
6106 tr0_used = flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
6108 for (regno = FIRST_TARGET_REG + tr0_used; regno <= LAST_TARGET_REG; regno++)
6109 if (call_really_used_regs[regno] && ! df_regs_ever_live_p (regno))
6110 return regno;
6112 return -1;
6115 /* The maximum registers we need to save are:
6116 - 62 general purpose registers (r15 is stack pointer, r63 is zero)
6117 - 32 floating point registers (for each pair, we save none,
6118 one single precision value, or a double precision value).
6119 - 8 target registers
6120 - add 1 entry for a delimiter. */
6121 #define MAX_SAVED_REGS (62+32+8)
6123 typedef struct save_entry_s
6125 unsigned char reg;
6126 unsigned char mode;
6127 short offset;
6128 } save_entry;
6130 #define MAX_TEMPS 4
6132 /* There will be a delimiter entry with VOIDmode both at the start and the
6133 end of a filled in schedule. The end delimiter has the offset of the
6134 save with the smallest (i.e. most negative) offset. */
6135 typedef struct save_schedule_s
6137 save_entry entries[MAX_SAVED_REGS + 2];
6138 int temps[MAX_TEMPS+1];
6139 } save_schedule;
6141 /* Fill in SCHEDULE according to LIVE_REGS_MASK. If RESTORE is nonzero,
6142 use reverse order. Returns the last entry written to (not counting
6143 the delimiter). OFFSET_BASE is a number to be added to all offset
6144 entries. */
6146 static save_entry *
6147 sh5_schedule_saves (HARD_REG_SET *live_regs_mask, save_schedule *schedule,
6148 int offset_base)
6150 int align, i;
6151 save_entry *entry = schedule->entries;
6152 int tmpx = 0;
6153 int offset;
6155 if (! current_function_interrupt)
6156 for (i = FIRST_GENERAL_REG; tmpx < MAX_TEMPS && i <= LAST_GENERAL_REG; i++)
6157 if (call_really_used_regs[i] && ! fixed_regs[i] && i != PR_MEDIA_REG
6158 && ! FUNCTION_ARG_REGNO_P (i)
6159 && i != FIRST_RET_REG
6160 && ! (cfun->static_chain_decl != NULL && i == STATIC_CHAIN_REGNUM)
6161 && ! (crtl->calls_eh_return
6162 && (i == EH_RETURN_STACKADJ_REGNO
6163 || ((unsigned) i >= EH_RETURN_DATA_REGNO (0)
6164 && (unsigned) i <= EH_RETURN_DATA_REGNO (3)))))
6165 schedule->temps[tmpx++] = i;
6166 entry->reg = -1;
6167 entry->mode = VOIDmode;
6168 entry->offset = offset_base;
6169 entry++;
6170 /* We loop twice: first, we save 8-byte aligned registers in the
6171 higher addresses, that are known to be aligned. Then, we
6172 proceed to saving 32-bit registers that don't need 8-byte
6173 alignment.
6174 If this is an interrupt function, all registers that need saving
6175 need to be saved in full. moreover, we need to postpone saving
6176 target registers till we have saved some general purpose registers
6177 we can then use as scratch registers. */
6178 offset = offset_base;
6179 for (align = 1; align >= 0; align--)
6181 for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
6182 if (TEST_HARD_REG_BIT (*live_regs_mask, i))
6184 enum machine_mode mode = REGISTER_NATURAL_MODE (i);
6185 int reg = i;
6187 if (current_function_interrupt)
6189 if (TARGET_REGISTER_P (i))
6190 continue;
6191 if (GENERAL_REGISTER_P (i))
6192 mode = DImode;
6194 if (mode == SFmode && (i % 2) == 1
6195 && ! TARGET_FPU_SINGLE && FP_REGISTER_P (i)
6196 && (TEST_HARD_REG_BIT (*live_regs_mask, (i ^ 1))))
6198 mode = DFmode;
6199 i--;
6200 reg--;
6203 /* If we're doing the aligned pass and this is not aligned,
6204 or we're doing the unaligned pass and this is aligned,
6205 skip it. */
6206 if ((GET_MODE_SIZE (mode) % (STACK_BOUNDARY / BITS_PER_UNIT) == 0)
6207 != align)
6208 continue;
6210 if (current_function_interrupt
6211 && GENERAL_REGISTER_P (i)
6212 && tmpx < MAX_TEMPS)
6213 schedule->temps[tmpx++] = i;
6215 offset -= GET_MODE_SIZE (mode);
6216 entry->reg = i;
6217 entry->mode = mode;
6218 entry->offset = offset;
6219 entry++;
6221 if (align && current_function_interrupt)
6222 for (i = LAST_TARGET_REG; i >= FIRST_TARGET_REG; i--)
6223 if (TEST_HARD_REG_BIT (*live_regs_mask, i))
6225 offset -= GET_MODE_SIZE (DImode);
6226 entry->reg = i;
6227 entry->mode = DImode;
6228 entry->offset = offset;
6229 entry++;
6232 entry->reg = -1;
6233 entry->mode = VOIDmode;
6234 entry->offset = offset;
6235 schedule->temps[tmpx] = -1;
6236 return entry - 1;
6239 void
6240 sh_expand_prologue (void)
6242 HARD_REG_SET live_regs_mask;
6243 int d, i;
6244 int d_rounding = 0;
6245 int save_flags = target_flags;
6246 int pretend_args;
6247 tree sp_switch_attr
6248 = lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl));
6250 current_function_interrupt = sh_cfun_interrupt_handler_p ();
6252 /* We have pretend args if we had an object sent partially in registers
6253 and partially on the stack, e.g. a large structure. */
6254 pretend_args = crtl->args.pretend_args_size;
6255 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl)
6256 && (NPARM_REGS(SImode)
6257 > crtl->args.info.arg_count[(int) SH_ARG_INT]))
6258 pretend_args = 0;
6259 output_stack_adjust (-pretend_args
6260 - crtl->args.info.stack_regs * 8,
6261 stack_pointer_rtx, 0, NULL);
6263 if (TARGET_SHCOMPACT && flag_pic && crtl->args.info.call_cookie)
6264 /* We're going to use the PIC register to load the address of the
6265 incoming-argument decoder and/or of the return trampoline from
6266 the GOT, so make sure the PIC register is preserved and
6267 initialized. */
6268 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
6270 if (TARGET_SHCOMPACT
6271 && (crtl->args.info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
6273 int reg;
6275 /* First, make all registers with incoming arguments that will
6276 be pushed onto the stack live, so that register renaming
6277 doesn't overwrite them. */
6278 for (reg = 0; reg < NPARM_REGS (SImode); reg++)
6279 if (CALL_COOKIE_STACKSEQ_GET (crtl->args.info.call_cookie)
6280 >= NPARM_REGS (SImode) - reg)
6281 for (; reg < NPARM_REGS (SImode); reg++)
6282 emit_insn (gen_shcompact_preserve_incoming_args
6283 (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
6284 else if (CALL_COOKIE_INT_REG_GET
6285 (crtl->args.info.call_cookie, reg) == 1)
6286 emit_insn (gen_shcompact_preserve_incoming_args
6287 (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
6289 emit_move_insn (gen_rtx_REG (Pmode, MACL_REG),
6290 stack_pointer_rtx);
6291 emit_move_insn (gen_rtx_REG (SImode, R0_REG),
6292 GEN_INT (crtl->args.info.call_cookie));
6293 emit_move_insn (gen_rtx_REG (SImode, MACH_REG),
6294 gen_rtx_REG (SImode, R0_REG));
6296 else if (TARGET_SHMEDIA)
6298 int tr = sh_media_register_for_return ();
6300 if (tr >= 0)
6301 emit_move_insn (gen_rtx_REG (DImode, tr),
6302 gen_rtx_REG (DImode, PR_MEDIA_REG));
6305 /* Emit the code for SETUP_VARARGS. */
6306 if (cfun->stdarg)
6308 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
6310 /* Push arg regs as if they'd been provided by caller in stack. */
6311 for (i = 0; i < NPARM_REGS(SImode); i++)
6313 int rn = NPARM_REGS(SImode) + FIRST_PARM_REG - i - 1;
6314 rtx insn;
6316 if (i >= (NPARM_REGS(SImode)
6317 - crtl->args.info.arg_count[(int) SH_ARG_INT]
6319 break;
6320 insn = push (rn);
6325 /* If we're supposed to switch stacks at function entry, do so now. */
6326 if (sp_switch_attr)
6328 /* The argument specifies a variable holding the address of the
6329 stack the interrupt function should switch to/from at entry/exit. */
6330 const char *s
6331 = ggc_strdup (TREE_STRING_POINTER (TREE_VALUE (sp_switch_attr)));
6332 rtx sp_switch = gen_rtx_SYMBOL_REF (Pmode, s);
6334 emit_insn (gen_sp_switch_1 (sp_switch));
6337 d = calc_live_regs (&live_regs_mask);
6338 /* ??? Maybe we could save some switching if we can move a mode switch
6339 that already happens to be at the function start into the prologue. */
6340 if (target_flags != save_flags && ! current_function_interrupt)
6341 emit_insn (gen_toggle_sz ());
6343 if (TARGET_SH5)
6345 int offset_base, offset;
6346 rtx r0 = NULL_RTX;
6347 int offset_in_r0 = -1;
6348 int sp_in_r0 = 0;
6349 int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
6350 int total_size, save_size;
6351 save_schedule schedule;
6352 save_entry *entry;
6353 int *tmp_pnt;
6355 if (call_really_used_regs[R0_REG] && ! fixed_regs[R0_REG]
6356 && ! current_function_interrupt)
6357 r0 = gen_rtx_REG (Pmode, R0_REG);
6359 /* D is the actual number of bytes that we need for saving registers,
6360 however, in initial_elimination_offset we have committed to using
6361 an additional TREGS_SPACE amount of bytes - in order to keep both
6362 addresses to arguments supplied by the caller and local variables
6363 valid, we must keep this gap. Place it between the incoming
6364 arguments and the actually saved registers in a bid to optimize
6365 locality of reference. */
6366 total_size = d + tregs_space;
6367 total_size += rounded_frame_size (total_size);
6368 save_size = total_size - rounded_frame_size (d);
6369 if (save_size % (STACK_BOUNDARY / BITS_PER_UNIT))
6370 d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
6371 - save_size % (STACK_BOUNDARY / BITS_PER_UNIT));
6373 /* If adjusting the stack in a single step costs nothing extra, do so.
6374 I.e. either if a single addi is enough, or we need a movi anyway,
6375 and we don't exceed the maximum offset range (the test for the
6376 latter is conservative for simplicity). */
6377 if (TARGET_SHMEDIA
6378 && (CONST_OK_FOR_I10 (-total_size)
6379 || (! CONST_OK_FOR_I10 (-(save_size + d_rounding))
6380 && total_size <= 2044)))
6381 d_rounding = total_size - save_size;
6383 offset_base = d + d_rounding;
6385 output_stack_adjust (-(save_size + d_rounding), stack_pointer_rtx,
6386 0, NULL);
6388 sh5_schedule_saves (&live_regs_mask, &schedule, offset_base);
6389 tmp_pnt = schedule.temps;
6390 for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
6392 enum machine_mode mode = entry->mode;
6393 unsigned int reg = entry->reg;
6394 rtx reg_rtx, mem_rtx, pre_dec = NULL_RTX;
6395 rtx orig_reg_rtx;
6397 offset = entry->offset;
6399 reg_rtx = gen_rtx_REG (mode, reg);
6401 mem_rtx = gen_frame_mem (mode,
6402 gen_rtx_PLUS (Pmode,
6403 stack_pointer_rtx,
6404 GEN_INT (offset)));
6406 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_pre_dec);
6408 gcc_assert (r0);
6409 mem_rtx = NULL_RTX;
6411 try_pre_dec:
6413 if (HAVE_PRE_DECREMENT
6414 && (offset_in_r0 - offset == GET_MODE_SIZE (mode)
6415 || mem_rtx == NULL_RTX
6416 || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
6418 pre_dec = gen_frame_mem (mode, gen_rtx_PRE_DEC (Pmode, r0));
6420 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (pre_dec, 0),
6421 pre_dec_ok);
6423 pre_dec = NULL_RTX;
6425 break;
6427 pre_dec_ok:
6428 mem_rtx = NULL_RTX;
6429 offset += GET_MODE_SIZE (mode);
6431 while (0);
6433 if (mem_rtx != NULL_RTX)
6434 goto addr_ok;
6436 if (offset_in_r0 == -1)
6438 emit_move_insn (r0, GEN_INT (offset));
6439 offset_in_r0 = offset;
6441 else if (offset != offset_in_r0)
6443 emit_move_insn (r0,
6444 gen_rtx_PLUS
6445 (Pmode, r0,
6446 GEN_INT (offset - offset_in_r0)));
6447 offset_in_r0 += offset - offset_in_r0;
6450 if (pre_dec != NULL_RTX)
6452 if (! sp_in_r0)
6454 emit_move_insn (r0,
6455 gen_rtx_PLUS
6456 (Pmode, r0, stack_pointer_rtx));
6457 sp_in_r0 = 1;
6460 offset -= GET_MODE_SIZE (mode);
6461 offset_in_r0 -= GET_MODE_SIZE (mode);
6463 mem_rtx = pre_dec;
6465 else if (sp_in_r0)
6466 mem_rtx = gen_frame_mem (mode, r0);
6467 else
6468 mem_rtx = gen_frame_mem (mode,
6469 gen_rtx_PLUS (Pmode,
6470 stack_pointer_rtx,
6471 r0));
6473 /* We must not use an r0-based address for target-branch
6474 registers or for special registers without pre-dec
6475 memory addresses, since we store their values in r0
6476 first. */
6477 gcc_assert (!TARGET_REGISTER_P (reg)
6478 && ((reg != PR_REG && !SPECIAL_REGISTER_P (reg))
6479 || mem_rtx == pre_dec));
6481 addr_ok:
6482 orig_reg_rtx = reg_rtx;
6483 if (TARGET_REGISTER_P (reg)
6484 || ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
6485 && mem_rtx != pre_dec))
6487 rtx tmp_reg = gen_rtx_REG (GET_MODE (reg_rtx), *tmp_pnt);
6489 emit_move_insn (tmp_reg, reg_rtx);
6491 if (REGNO (tmp_reg) == R0_REG)
6493 offset_in_r0 = -1;
6494 sp_in_r0 = 0;
6495 gcc_assert (!refers_to_regno_p
6496 (R0_REG, R0_REG+1, mem_rtx, (rtx *) 0));
6499 if (*++tmp_pnt <= 0)
6500 tmp_pnt = schedule.temps;
6502 reg_rtx = tmp_reg;
6505 rtx insn;
6507 /* Mark as interesting for dwarf cfi generator */
6508 insn = emit_move_insn (mem_rtx, reg_rtx);
6509 RTX_FRAME_RELATED_P (insn) = 1;
6510 /* If we use an intermediate register for the save, we can't
6511 describe this exactly in cfi as a copy of the to-be-saved
6512 register into the temporary register and then the temporary
6513 register on the stack, because the temporary register can
6514 have a different natural size than the to-be-saved register.
6515 Thus, we gloss over the intermediate copy and pretend we do
6516 a direct save from the to-be-saved register. */
6517 if (REGNO (reg_rtx) != reg)
6519 rtx set, note_rtx;
6521 set = gen_rtx_SET (VOIDmode, mem_rtx, orig_reg_rtx);
6522 note_rtx = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, set,
6523 REG_NOTES (insn));
6524 REG_NOTES (insn) = note_rtx;
6527 if (TARGET_SHCOMPACT && (offset_in_r0 != -1))
6529 rtx reg_rtx = gen_rtx_REG (mode, reg);
6530 rtx set, note_rtx;
6531 rtx mem_rtx = gen_frame_mem (mode,
6532 gen_rtx_PLUS (Pmode,
6533 stack_pointer_rtx,
6534 GEN_INT (offset)));
6536 set = gen_rtx_SET (VOIDmode, mem_rtx, reg_rtx);
6537 note_rtx = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, set,
6538 REG_NOTES (insn));
6539 REG_NOTES (insn) = note_rtx;
6544 gcc_assert (entry->offset == d_rounding);
6546 else
6547 push_regs (&live_regs_mask, current_function_interrupt);
6549 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
6550 emit_insn (gen_GOTaddr2picreg ());
6552 if (SHMEDIA_REGS_STACK_ADJUST ())
6554 /* This must NOT go through the PLT, otherwise mach and macl
6555 may be clobbered. */
6556 function_symbol (gen_rtx_REG (Pmode, R0_REG),
6557 (TARGET_FPU_ANY
6558 ? "__GCC_push_shmedia_regs"
6559 : "__GCC_push_shmedia_regs_nofpu"), SFUNC_GOT);
6560 emit_insn (gen_shmedia_save_restore_regs_compact
6561 (GEN_INT (-SHMEDIA_REGS_STACK_ADJUST ())));
6564 if (target_flags != save_flags && ! current_function_interrupt)
6565 emit_insn (gen_toggle_sz ());
6567 target_flags = save_flags;
6569 output_stack_adjust (-rounded_frame_size (d) + d_rounding,
6570 stack_pointer_rtx, 0, NULL);
6572 if (frame_pointer_needed)
6573 frame_insn (GEN_MOV (hard_frame_pointer_rtx, stack_pointer_rtx));
6575 if (TARGET_SHCOMPACT
6576 && (crtl->args.info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
6578 /* This must NOT go through the PLT, otherwise mach and macl
6579 may be clobbered. */
6580 function_symbol (gen_rtx_REG (Pmode, R0_REG),
6581 "__GCC_shcompact_incoming_args", SFUNC_GOT);
6582 emit_insn (gen_shcompact_incoming_args ());
6586 void
6587 sh_expand_epilogue (bool sibcall_p)
6589 HARD_REG_SET live_regs_mask;
6590 int d, i;
6591 int d_rounding = 0;
6593 int save_flags = target_flags;
6594 int frame_size, save_size;
6595 int fpscr_deferred = 0;
6596 int e = sibcall_p ? -1 : 1;
6598 d = calc_live_regs (&live_regs_mask);
6600 save_size = d;
6601 frame_size = rounded_frame_size (d);
6603 if (TARGET_SH5)
6605 int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
6606 int total_size;
6607 if (d % (STACK_BOUNDARY / BITS_PER_UNIT))
6608 d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
6609 - d % (STACK_BOUNDARY / BITS_PER_UNIT));
6611 total_size = d + tregs_space;
6612 total_size += rounded_frame_size (total_size);
6613 save_size = total_size - frame_size;
6615 /* If adjusting the stack in a single step costs nothing extra, do so.
6616 I.e. either if a single addi is enough, or we need a movi anyway,
6617 and we don't exceed the maximum offset range (the test for the
6618 latter is conservative for simplicity). */
6619 if (TARGET_SHMEDIA
6620 && ! frame_pointer_needed
6621 && (CONST_OK_FOR_I10 (total_size)
6622 || (! CONST_OK_FOR_I10 (save_size + d_rounding)
6623 && total_size <= 2044)))
6624 d_rounding = frame_size;
6626 frame_size -= d_rounding;
6629 if (frame_pointer_needed)
6631 /* We must avoid scheduling the epilogue with previous basic blocks
6632 when exception handling is enabled. See PR/18032. */
6633 if (flag_exceptions)
6634 emit_insn (gen_blockage ());
6635 output_stack_adjust (frame_size, hard_frame_pointer_rtx, e,
6636 &live_regs_mask);
6638 /* We must avoid moving the stack pointer adjustment past code
6639 which reads from the local frame, else an interrupt could
6640 occur after the SP adjustment and clobber data in the local
6641 frame. */
6642 emit_insn (gen_blockage ());
6643 emit_insn (GEN_MOV (stack_pointer_rtx, hard_frame_pointer_rtx));
6645 else if (frame_size)
6647 /* We must avoid moving the stack pointer adjustment past code
6648 which reads from the local frame, else an interrupt could
6649 occur after the SP adjustment and clobber data in the local
6650 frame. */
6651 emit_insn (gen_blockage ());
6652 output_stack_adjust (frame_size, stack_pointer_rtx, e, &live_regs_mask);
6655 if (SHMEDIA_REGS_STACK_ADJUST ())
6657 function_symbol (gen_rtx_REG (Pmode, R0_REG),
6658 (TARGET_FPU_ANY
6659 ? "__GCC_pop_shmedia_regs"
6660 : "__GCC_pop_shmedia_regs_nofpu"), SFUNC_GOT);
6661 /* This must NOT go through the PLT, otherwise mach and macl
6662 may be clobbered. */
6663 emit_insn (gen_shmedia_save_restore_regs_compact
6664 (GEN_INT (SHMEDIA_REGS_STACK_ADJUST ())));
6667 /* Pop all the registers. */
6669 if (target_flags != save_flags && ! current_function_interrupt)
6670 emit_insn (gen_toggle_sz ());
6671 if (TARGET_SH5)
6673 int offset_base, offset;
6674 int offset_in_r0 = -1;
6675 int sp_in_r0 = 0;
6676 rtx r0 = gen_rtx_REG (Pmode, R0_REG);
6677 save_schedule schedule;
6678 save_entry *entry;
6679 int *tmp_pnt;
6681 entry = sh5_schedule_saves (&live_regs_mask, &schedule, d_rounding);
6682 offset_base = -entry[1].offset + d_rounding;
6683 tmp_pnt = schedule.temps;
6684 for (; entry->mode != VOIDmode; entry--)
6686 enum machine_mode mode = entry->mode;
6687 int reg = entry->reg;
6688 rtx reg_rtx, mem_rtx, post_inc = NULL_RTX, insn;
6690 offset = offset_base + entry->offset;
6691 reg_rtx = gen_rtx_REG (mode, reg);
6693 mem_rtx = gen_frame_mem (mode,
6694 gen_rtx_PLUS (Pmode,
6695 stack_pointer_rtx,
6696 GEN_INT (offset)));
6698 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_post_inc);
6700 mem_rtx = NULL_RTX;
6702 try_post_inc:
6704 if (HAVE_POST_INCREMENT
6705 && (offset == offset_in_r0
6706 || (offset + GET_MODE_SIZE (mode) != d + d_rounding
6707 && mem_rtx == NULL_RTX)
6708 || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
6710 post_inc = gen_frame_mem (mode, gen_rtx_POST_INC (Pmode, r0));
6712 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (post_inc, 0),
6713 post_inc_ok);
6715 post_inc = NULL_RTX;
6717 break;
6719 post_inc_ok:
6720 mem_rtx = NULL_RTX;
6722 while (0);
6724 if (mem_rtx != NULL_RTX)
6725 goto addr_ok;
6727 if (offset_in_r0 == -1)
6729 emit_move_insn (r0, GEN_INT (offset));
6730 offset_in_r0 = offset;
6732 else if (offset != offset_in_r0)
6734 emit_move_insn (r0,
6735 gen_rtx_PLUS
6736 (Pmode, r0,
6737 GEN_INT (offset - offset_in_r0)));
6738 offset_in_r0 += offset - offset_in_r0;
6741 if (post_inc != NULL_RTX)
6743 if (! sp_in_r0)
6745 emit_move_insn (r0,
6746 gen_rtx_PLUS
6747 (Pmode, r0, stack_pointer_rtx));
6748 sp_in_r0 = 1;
6751 mem_rtx = post_inc;
6753 offset_in_r0 += GET_MODE_SIZE (mode);
6755 else if (sp_in_r0)
6756 mem_rtx = gen_frame_mem (mode, r0);
6757 else
6758 mem_rtx = gen_frame_mem (mode,
6759 gen_rtx_PLUS (Pmode,
6760 stack_pointer_rtx,
6761 r0));
6763 gcc_assert ((reg != PR_REG && !SPECIAL_REGISTER_P (reg))
6764 || mem_rtx == post_inc);
6766 addr_ok:
6767 if ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
6768 && mem_rtx != post_inc)
6770 insn = emit_move_insn (r0, mem_rtx);
6771 mem_rtx = r0;
6773 else if (TARGET_REGISTER_P (reg))
6775 rtx tmp_reg = gen_rtx_REG (mode, *tmp_pnt);
6777 /* Give the scheduler a bit of freedom by using up to
6778 MAX_TEMPS registers in a round-robin fashion. */
6779 insn = emit_move_insn (tmp_reg, mem_rtx);
6780 mem_rtx = tmp_reg;
6781 if (*++tmp_pnt < 0)
6782 tmp_pnt = schedule.temps;
6785 insn = emit_move_insn (reg_rtx, mem_rtx);
6788 gcc_assert (entry->offset + offset_base == d + d_rounding);
6790 else /* ! TARGET_SH5 */
6792 int last_reg;
6794 save_size = 0;
6795 /* For an ISR with RESBANK attribute assigned, don't pop PR
6796 register. */
6797 if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG)
6798 && !sh_cfun_resbank_handler_p ())
6800 if (!frame_pointer_needed)
6801 emit_insn (gen_blockage ());
6802 pop (PR_REG);
6805 /* Banked registers are poped first to avoid being scheduled in the
6806 delay slot. RTE switches banks before the ds instruction. */
6807 if (current_function_interrupt)
6809 for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
6810 if (TEST_HARD_REG_BIT (live_regs_mask, i))
6811 pop (LAST_BANKED_REG - i);
6813 last_reg = FIRST_PSEUDO_REGISTER - LAST_BANKED_REG - 1;
6815 else
6816 last_reg = FIRST_PSEUDO_REGISTER;
6818 for (i = 0; i < last_reg; i++)
6820 int j = (FIRST_PSEUDO_REGISTER - 1) - i;
6822 if (j == FPSCR_REG && current_function_interrupt && TARGET_FMOVD
6823 && hard_reg_set_intersect_p (live_regs_mask,
6824 reg_class_contents[DF_REGS]))
6825 fpscr_deferred = 1;
6826 /* For an ISR with RESBANK attribute assigned, don't pop
6827 following registers, R0-R14, MACH, MACL and GBR. */
6828 else if (j != PR_REG && TEST_HARD_REG_BIT (live_regs_mask, j)
6829 && ! (sh_cfun_resbank_handler_p ()
6830 && ((j >= FIRST_GENERAL_REG
6831 && j < LAST_GENERAL_REG)
6832 || j == MACH_REG
6833 || j == MACL_REG
6834 || j == GBR_REG)))
6835 pop (j);
6837 if (j == FIRST_FP_REG && fpscr_deferred)
6838 pop (FPSCR_REG);
6841 if (target_flags != save_flags && ! current_function_interrupt)
6842 emit_insn (gen_toggle_sz ());
6843 target_flags = save_flags;
6845 output_stack_adjust (crtl->args.pretend_args_size
6846 + save_size + d_rounding
6847 + crtl->args.info.stack_regs * 8,
6848 stack_pointer_rtx, e, NULL);
6850 if (crtl->calls_eh_return)
6851 emit_insn (GEN_ADD3 (stack_pointer_rtx, stack_pointer_rtx,
6852 EH_RETURN_STACKADJ_RTX));
6854 /* Switch back to the normal stack if necessary. */
6855 if (lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl)))
6856 emit_insn (gen_sp_switch_2 ());
6858 /* Tell flow the insn that pops PR isn't dead. */
6859 /* PR_REG will never be live in SHmedia mode, and we don't need to
6860 USE PR_MEDIA_REG, since it will be explicitly copied to TR0_REG
6861 by the return pattern. */
6862 if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG))
6863 emit_use (gen_rtx_REG (SImode, PR_REG));
6866 static int sh_need_epilogue_known = 0;
6869 sh_need_epilogue (void)
6871 if (! sh_need_epilogue_known)
6873 rtx epilogue;
6875 start_sequence ();
6876 sh_expand_epilogue (0);
6877 epilogue = get_insns ();
6878 end_sequence ();
6879 sh_need_epilogue_known = (epilogue == NULL ? -1 : 1);
6881 return sh_need_epilogue_known > 0;
6884 /* Emit code to change the current function's return address to RA.
6885 TEMP is available as a scratch register, if needed. */
6887 void
6888 sh_set_return_address (rtx ra, rtx tmp)
6890 HARD_REG_SET live_regs_mask;
6891 int d;
6892 int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
6893 int pr_offset;
6895 d = calc_live_regs (&live_regs_mask);
6897 /* If pr_reg isn't life, we can set it (or the register given in
6898 sh_media_register_for_return) directly. */
6899 if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
6901 rtx rr;
6903 if (TARGET_SHMEDIA)
6905 int rr_regno = sh_media_register_for_return ();
6907 if (rr_regno < 0)
6908 rr_regno = pr_reg;
6910 rr = gen_rtx_REG (DImode, rr_regno);
6912 else
6913 rr = gen_rtx_REG (SImode, pr_reg);
6915 emit_insn (GEN_MOV (rr, ra));
6916 /* Tell flow the register for return isn't dead. */
6917 emit_use (rr);
6918 return;
6921 if (TARGET_SH5)
6923 int offset;
6924 save_schedule schedule;
6925 save_entry *entry;
6927 entry = sh5_schedule_saves (&live_regs_mask, &schedule, 0);
6928 offset = entry[1].offset;
6929 for (; entry->mode != VOIDmode; entry--)
6930 if (entry->reg == pr_reg)
6931 goto found;
6933 /* We can't find pr register. */
6934 gcc_unreachable ();
6936 found:
6937 offset = entry->offset - offset;
6938 pr_offset = (rounded_frame_size (d) + offset
6939 + SHMEDIA_REGS_STACK_ADJUST ());
6941 else
6942 pr_offset = rounded_frame_size (d);
6944 emit_insn (GEN_MOV (tmp, GEN_INT (pr_offset)));
6945 emit_insn (GEN_ADD3 (tmp, tmp, hard_frame_pointer_rtx));
6947 tmp = gen_frame_mem (Pmode, tmp);
6948 emit_insn (GEN_MOV (tmp, ra));
6951 /* Clear variables at function end. */
6953 static void
6954 sh_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6955 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6957 sh_need_epilogue_known = 0;
6960 static rtx
6961 sh_builtin_saveregs (void)
6963 /* First unnamed integer register. */
6964 int first_intreg = crtl->args.info.arg_count[(int) SH_ARG_INT];
6965 /* Number of integer registers we need to save. */
6966 int n_intregs = MAX (0, NPARM_REGS (SImode) - first_intreg);
6967 /* First unnamed SFmode float reg */
6968 int first_floatreg = crtl->args.info.arg_count[(int) SH_ARG_FLOAT];
6969 /* Number of SFmode float regs to save. */
6970 int n_floatregs = MAX (0, NPARM_REGS (SFmode) - first_floatreg);
6971 rtx regbuf, fpregs;
6972 int bufsize, regno;
6973 alias_set_type alias_set;
6975 if (TARGET_SH5)
6977 if (n_intregs)
6979 int pushregs = n_intregs;
6981 while (pushregs < NPARM_REGS (SImode) - 1
6982 && (CALL_COOKIE_INT_REG_GET
6983 (crtl->args.info.call_cookie,
6984 NPARM_REGS (SImode) - pushregs)
6985 == 1))
6987 crtl->args.info.call_cookie
6988 &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
6989 - pushregs, 1);
6990 pushregs++;
6993 if (pushregs == NPARM_REGS (SImode))
6994 crtl->args.info.call_cookie
6995 |= (CALL_COOKIE_INT_REG (0, 1)
6996 | CALL_COOKIE_STACKSEQ (pushregs - 1));
6997 else
6998 crtl->args.info.call_cookie
6999 |= CALL_COOKIE_STACKSEQ (pushregs);
7001 crtl->args.pretend_args_size += 8 * n_intregs;
7003 if (TARGET_SHCOMPACT)
7004 return const0_rtx;
7007 if (! TARGET_SH2E && ! TARGET_SH4 && ! TARGET_SH5)
7009 error ("__builtin_saveregs not supported by this subtarget");
7010 return const0_rtx;
7013 if (TARGET_SHMEDIA)
7014 n_floatregs = 0;
7016 /* Allocate block of memory for the regs. */
7017 /* ??? If n_intregs + n_floatregs == 0, should we allocate at least 1 byte?
7018 Or can assign_stack_local accept a 0 SIZE argument? */
7019 bufsize = (n_intregs * UNITS_PER_WORD) + (n_floatregs * UNITS_PER_WORD);
7021 if (TARGET_SHMEDIA)
7022 regbuf = gen_frame_mem (BLKmode, gen_rtx_REG (Pmode, ARG_POINTER_REGNUM));
7023 else if (n_floatregs & 1)
7025 rtx addr;
7027 regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
7028 addr = copy_to_mode_reg (Pmode, XEXP (regbuf, 0));
7029 emit_insn (gen_iorsi3 (addr, addr, GEN_INT (UNITS_PER_WORD)));
7030 regbuf = change_address (regbuf, BLKmode, addr);
7032 else if (STACK_BOUNDARY < 64 && TARGET_FPU_DOUBLE && n_floatregs)
7034 rtx addr, mask;
7036 regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
7037 addr = copy_to_mode_reg (Pmode, plus_constant (XEXP (regbuf, 0), 4));
7038 mask = copy_to_mode_reg (Pmode, GEN_INT (-8));
7039 emit_insn (gen_andsi3 (addr, addr, mask));
7040 regbuf = change_address (regbuf, BLKmode, addr);
7042 else
7043 regbuf = assign_stack_local (BLKmode, bufsize, TARGET_FPU_DOUBLE ? 64 : 0);
7044 alias_set = get_varargs_alias_set ();
7045 set_mem_alias_set (regbuf, alias_set);
7047 /* Save int args.
7048 This is optimized to only save the regs that are necessary. Explicitly
7049 named args need not be saved. */
7050 if (n_intregs > 0)
7051 move_block_from_reg (BASE_ARG_REG (SImode) + first_intreg,
7052 adjust_address (regbuf, BLKmode,
7053 n_floatregs * UNITS_PER_WORD),
7054 n_intregs);
7056 if (TARGET_SHMEDIA)
7057 /* Return the address of the regbuf. */
7058 return XEXP (regbuf, 0);
7060 /* Save float args.
7061 This is optimized to only save the regs that are necessary. Explicitly
7062 named args need not be saved.
7063 We explicitly build a pointer to the buffer because it halves the insn
7064 count when not optimizing (otherwise the pointer is built for each reg
7065 saved).
7066 We emit the moves in reverse order so that we can use predecrement. */
7068 fpregs = copy_to_mode_reg (Pmode,
7069 plus_constant (XEXP (regbuf, 0),
7070 n_floatregs * UNITS_PER_WORD));
7071 if (TARGET_SH4 || TARGET_SH2A_DOUBLE)
7073 rtx mem;
7074 for (regno = NPARM_REGS (DFmode) - 2; regno >= first_floatreg; regno -= 2)
7076 emit_insn (gen_addsi3 (fpregs, fpregs,
7077 GEN_INT (-2 * UNITS_PER_WORD)));
7078 mem = change_address (regbuf, DFmode, fpregs);
7079 emit_move_insn (mem,
7080 gen_rtx_REG (DFmode, BASE_ARG_REG (DFmode) + regno));
7082 regno = first_floatreg;
7083 if (regno & 1)
7085 emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
7086 mem = change_address (regbuf, SFmode, fpregs);
7087 emit_move_insn (mem,
7088 gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno
7089 - (TARGET_LITTLE_ENDIAN != 0)));
7092 else
7093 for (regno = NPARM_REGS (SFmode) - 1; regno >= first_floatreg; regno--)
7095 rtx mem;
7097 emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
7098 mem = change_address (regbuf, SFmode, fpregs);
7099 emit_move_insn (mem,
7100 gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno));
7103 /* Return the address of the regbuf. */
7104 return XEXP (regbuf, 0);
7107 /* Define the `__builtin_va_list' type for the ABI. */
7109 static tree
7110 sh_build_builtin_va_list (void)
7112 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7113 tree record;
7115 if (TARGET_SH5 || (! TARGET_SH2E && ! TARGET_SH4)
7116 || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
7117 return ptr_type_node;
7119 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
7121 f_next_o = build_decl (FIELD_DECL, get_identifier ("__va_next_o"),
7122 ptr_type_node);
7123 f_next_o_limit = build_decl (FIELD_DECL,
7124 get_identifier ("__va_next_o_limit"),
7125 ptr_type_node);
7126 f_next_fp = build_decl (FIELD_DECL, get_identifier ("__va_next_fp"),
7127 ptr_type_node);
7128 f_next_fp_limit = build_decl (FIELD_DECL,
7129 get_identifier ("__va_next_fp_limit"),
7130 ptr_type_node);
7131 f_next_stack = build_decl (FIELD_DECL, get_identifier ("__va_next_stack"),
7132 ptr_type_node);
7134 DECL_FIELD_CONTEXT (f_next_o) = record;
7135 DECL_FIELD_CONTEXT (f_next_o_limit) = record;
7136 DECL_FIELD_CONTEXT (f_next_fp) = record;
7137 DECL_FIELD_CONTEXT (f_next_fp_limit) = record;
7138 DECL_FIELD_CONTEXT (f_next_stack) = record;
7140 TYPE_FIELDS (record) = f_next_o;
7141 TREE_CHAIN (f_next_o) = f_next_o_limit;
7142 TREE_CHAIN (f_next_o_limit) = f_next_fp;
7143 TREE_CHAIN (f_next_fp) = f_next_fp_limit;
7144 TREE_CHAIN (f_next_fp_limit) = f_next_stack;
7146 layout_type (record);
7148 return record;
7151 /* Return always va_list_type_node. */
7153 static tree
7154 sh_canonical_va_list_type (tree type ATTRIBUTE_UNUSED)
7156 return va_list_type_node;
7159 /* Implement `va_start' for varargs and stdarg. */
7161 static void
7162 sh_va_start (tree valist, rtx nextarg)
7164 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7165 tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
7166 tree t, u;
7167 int nfp, nint;
7169 if (TARGET_SH5)
7171 expand_builtin_saveregs ();
7172 std_expand_builtin_va_start (valist, nextarg);
7173 return;
7176 if ((! TARGET_SH2E && ! TARGET_SH4)
7177 || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
7179 std_expand_builtin_va_start (valist, nextarg);
7180 return;
7183 f_next_o = TYPE_FIELDS (va_list_type_node);
7184 f_next_o_limit = TREE_CHAIN (f_next_o);
7185 f_next_fp = TREE_CHAIN (f_next_o_limit);
7186 f_next_fp_limit = TREE_CHAIN (f_next_fp);
7187 f_next_stack = TREE_CHAIN (f_next_fp_limit);
7189 next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
7190 NULL_TREE);
7191 next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
7192 valist, f_next_o_limit, NULL_TREE);
7193 next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp), valist, f_next_fp,
7194 NULL_TREE);
7195 next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
7196 valist, f_next_fp_limit, NULL_TREE);
7197 next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
7198 valist, f_next_stack, NULL_TREE);
7200 /* Call __builtin_saveregs. */
7201 u = make_tree (sizetype, expand_builtin_saveregs ());
7202 u = fold_convert (ptr_type_node, u);
7203 t = build2 (MODIFY_EXPR, ptr_type_node, next_fp, u);
7204 TREE_SIDE_EFFECTS (t) = 1;
7205 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7207 nfp = crtl->args.info.arg_count[SH_ARG_FLOAT];
7208 if (nfp < 8)
7209 nfp = 8 - nfp;
7210 else
7211 nfp = 0;
7212 u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
7213 size_int (UNITS_PER_WORD * nfp));
7214 t = build2 (MODIFY_EXPR, ptr_type_node, next_fp_limit, u);
7215 TREE_SIDE_EFFECTS (t) = 1;
7216 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7218 t = build2 (MODIFY_EXPR, ptr_type_node, next_o, u);
7219 TREE_SIDE_EFFECTS (t) = 1;
7220 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7222 nint = crtl->args.info.arg_count[SH_ARG_INT];
7223 if (nint < 4)
7224 nint = 4 - nint;
7225 else
7226 nint = 0;
7227 u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
7228 size_int (UNITS_PER_WORD * nint));
7229 t = build2 (MODIFY_EXPR, ptr_type_node, next_o_limit, u);
7230 TREE_SIDE_EFFECTS (t) = 1;
7231 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7233 u = make_tree (ptr_type_node, nextarg);
7234 t = build2 (MODIFY_EXPR, ptr_type_node, next_stack, u);
7235 TREE_SIDE_EFFECTS (t) = 1;
7236 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7239 /* TYPE is a RECORD_TYPE. If there is only a single nonzero-sized
7240 member, return it. */
7241 static tree
7242 find_sole_member (tree type)
7244 tree field, member = NULL_TREE;
7246 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
7248 if (TREE_CODE (field) != FIELD_DECL)
7249 continue;
7250 if (!DECL_SIZE (field))
7251 return NULL_TREE;
7252 if (integer_zerop (DECL_SIZE (field)))
7253 continue;
7254 if (member)
7255 return NULL_TREE;
7256 member = field;
7258 return member;
7260 /* Implement `va_arg'. */
7262 static tree
7263 sh_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
7264 gimple_seq *post_p ATTRIBUTE_UNUSED)
7266 HOST_WIDE_INT size, rsize;
7267 tree tmp, pptr_type_node;
7268 tree addr, lab_over = NULL, result = NULL;
7269 int pass_by_ref = targetm.calls.must_pass_in_stack (TYPE_MODE (type), type);
7270 tree eff_type;
7272 if (pass_by_ref)
7273 type = build_pointer_type (type);
7275 size = int_size_in_bytes (type);
7276 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
7277 pptr_type_node = build_pointer_type (ptr_type_node);
7279 if (! TARGET_SH5 && (TARGET_SH2E || TARGET_SH4)
7280 && ! (TARGET_HITACHI || sh_cfun_attr_renesas_p ()))
7282 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7283 tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
7284 int pass_as_float;
7285 tree lab_false;
7286 tree member;
7288 f_next_o = TYPE_FIELDS (va_list_type_node);
7289 f_next_o_limit = TREE_CHAIN (f_next_o);
7290 f_next_fp = TREE_CHAIN (f_next_o_limit);
7291 f_next_fp_limit = TREE_CHAIN (f_next_fp);
7292 f_next_stack = TREE_CHAIN (f_next_fp_limit);
7294 next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
7295 NULL_TREE);
7296 next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
7297 valist, f_next_o_limit, NULL_TREE);
7298 next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp),
7299 valist, f_next_fp, NULL_TREE);
7300 next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
7301 valist, f_next_fp_limit, NULL_TREE);
7302 next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
7303 valist, f_next_stack, NULL_TREE);
7305 /* Structures with a single member with a distinct mode are passed
7306 like their member. This is relevant if the latter has a REAL_TYPE
7307 or COMPLEX_TYPE type. */
7308 eff_type = type;
7309 while (TREE_CODE (eff_type) == RECORD_TYPE
7310 && (member = find_sole_member (eff_type))
7311 && (TREE_CODE (TREE_TYPE (member)) == REAL_TYPE
7312 || TREE_CODE (TREE_TYPE (member)) == COMPLEX_TYPE
7313 || TREE_CODE (TREE_TYPE (member)) == RECORD_TYPE))
7315 tree field_type = TREE_TYPE (member);
7317 if (TYPE_MODE (eff_type) == TYPE_MODE (field_type))
7318 eff_type = field_type;
7319 else
7321 gcc_assert ((TYPE_ALIGN (eff_type)
7322 < GET_MODE_ALIGNMENT (TYPE_MODE (field_type)))
7323 || (TYPE_ALIGN (eff_type)
7324 > GET_MODE_BITSIZE (TYPE_MODE (field_type))));
7325 break;
7329 if (TARGET_SH4 || TARGET_SH2A_DOUBLE)
7331 pass_as_float = ((TREE_CODE (eff_type) == REAL_TYPE && size <= 8)
7332 || (TREE_CODE (eff_type) == COMPLEX_TYPE
7333 && TREE_CODE (TREE_TYPE (eff_type)) == REAL_TYPE
7334 && size <= 16));
7336 else
7338 pass_as_float = (TREE_CODE (eff_type) == REAL_TYPE && size == 4);
7341 addr = create_tmp_var (pptr_type_node, NULL);
7342 lab_false = create_artificial_label ();
7343 lab_over = create_artificial_label ();
7345 valist = build1 (INDIRECT_REF, ptr_type_node, addr);
7347 if (pass_as_float)
7349 tree next_fp_tmp = create_tmp_var (TREE_TYPE (f_next_fp), NULL);
7350 tree cmp;
7351 bool is_double = size == 8 && TREE_CODE (eff_type) == REAL_TYPE;
7353 tmp = build1 (ADDR_EXPR, pptr_type_node, next_fp);
7354 gimplify_assign (addr, tmp, pre_p);
7356 gimplify_assign (next_fp_tmp, valist, pre_p);
7357 tmp = next_fp_limit;
7358 if (size > 4 && !is_double)
7359 tmp = build2 (POINTER_PLUS_EXPR, TREE_TYPE (tmp), tmp,
7360 size_int (4 - size));
7361 tmp = build2 (GE_EXPR, boolean_type_node, next_fp_tmp, tmp);
7362 cmp = build3 (COND_EXPR, void_type_node, tmp,
7363 build1 (GOTO_EXPR, void_type_node, lab_false),
7364 NULL_TREE);
7365 if (!is_double)
7366 gimplify_and_add (cmp, pre_p);
7368 if (TYPE_ALIGN (eff_type) > BITS_PER_WORD
7369 || (is_double || size == 16))
7371 tmp = fold_convert (sizetype, next_fp_tmp);
7372 tmp = build2 (BIT_AND_EXPR, sizetype, tmp,
7373 size_int (UNITS_PER_WORD));
7374 tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node,
7375 next_fp_tmp, tmp);
7376 gimplify_assign (next_fp_tmp, tmp, pre_p);
7378 if (is_double)
7379 gimplify_and_add (cmp, pre_p);
7381 #ifdef FUNCTION_ARG_SCmode_WART
7382 if (TYPE_MODE (eff_type) == SCmode
7383 && TARGET_SH4 && TARGET_LITTLE_ENDIAN)
7385 tree subtype = TREE_TYPE (eff_type);
7386 tree real, imag;
7388 imag
7389 = std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
7390 imag = get_initialized_tmp_var (imag, pre_p, NULL);
7392 real
7393 = std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
7394 real = get_initialized_tmp_var (real, pre_p, NULL);
7396 result = build2 (COMPLEX_EXPR, type, real, imag);
7397 result = get_initialized_tmp_var (result, pre_p, NULL);
7399 #endif /* FUNCTION_ARG_SCmode_WART */
7401 tmp = build1 (GOTO_EXPR, void_type_node, lab_over);
7402 gimplify_and_add (tmp, pre_p);
7404 tmp = build1 (LABEL_EXPR, void_type_node, lab_false);
7405 gimplify_and_add (tmp, pre_p);
7407 tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
7408 gimplify_assign (addr, tmp, pre_p);
7409 gimplify_assign (next_fp_tmp, valist, pre_p);
7411 gimplify_assign (valist, next_fp_tmp, post_p);
7412 valist = next_fp_tmp;
7414 else
7416 tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node, next_o,
7417 size_int (rsize));
7418 tmp = build2 (GT_EXPR, boolean_type_node, tmp, next_o_limit);
7419 tmp = build3 (COND_EXPR, void_type_node, tmp,
7420 build1 (GOTO_EXPR, void_type_node, lab_false),
7421 NULL_TREE);
7422 gimplify_and_add (tmp, pre_p);
7424 tmp = build1 (ADDR_EXPR, pptr_type_node, next_o);
7425 gimplify_assign (addr, tmp, pre_p);
7427 tmp = build1 (GOTO_EXPR, void_type_node, lab_over);
7428 gimplify_and_add (tmp, pre_p);
7430 tmp = build1 (LABEL_EXPR, void_type_node, lab_false);
7431 gimplify_and_add (tmp, pre_p);
7433 if (size > 4 && ! (TARGET_SH4 || TARGET_SH2A))
7434 gimplify_assign (next_o, next_o_limit, pre_p);
7436 tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
7437 gimplify_assign (addr, tmp, pre_p);
7440 if (!result)
7442 tmp = build1 (LABEL_EXPR, void_type_node, lab_over);
7443 gimplify_and_add (tmp, pre_p);
7447 /* ??? In va-sh.h, there had been code to make values larger than
7448 size 8 indirect. This does not match the FUNCTION_ARG macros. */
7450 tmp = std_gimplify_va_arg_expr (valist, type, pre_p, NULL);
7451 if (result)
7453 gimplify_assign (result, tmp, pre_p);
7455 tmp = build1 (LABEL_EXPR, void_type_node, lab_over);
7456 gimplify_and_add (tmp, pre_p);
7458 else
7459 result = tmp;
7461 if (pass_by_ref)
7462 result = build_va_arg_indirect_ref (result);
7464 return result;
7467 bool
7468 sh_promote_prototypes (const_tree type)
7470 if (TARGET_HITACHI)
7471 return 0;
7472 if (! type)
7473 return 1;
7474 return ! sh_attr_renesas_p (type);
7477 /* Whether an argument must be passed by reference. On SHcompact, we
7478 pretend arguments wider than 32-bits that would have been passed in
7479 registers are passed by reference, so that an SHmedia trampoline
7480 loads them into the full 64-bits registers. */
7482 static int
7483 shcompact_byref (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
7484 const_tree type, bool named)
7486 unsigned HOST_WIDE_INT size;
7488 if (type)
7489 size = int_size_in_bytes (type);
7490 else
7491 size = GET_MODE_SIZE (mode);
7493 if (cum->arg_count[SH_ARG_INT] < NPARM_REGS (SImode)
7494 && (!named
7495 || GET_SH_ARG_CLASS (mode) == SH_ARG_INT
7496 || (GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT
7497 && cum->arg_count[SH_ARG_FLOAT] >= NPARM_REGS (SFmode)))
7498 && size > 4
7499 && !SHCOMPACT_FORCE_ON_STACK (mode, type)
7500 && !SH5_WOULD_BE_PARTIAL_NREGS (*cum, mode, type, named))
7501 return size;
7502 else
7503 return 0;
7506 static bool
7507 sh_pass_by_reference (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7508 const_tree type, bool named)
7510 if (targetm.calls.must_pass_in_stack (mode, type))
7511 return true;
7513 /* ??? std_gimplify_va_arg_expr passes NULL for cum. That function
7514 wants to know about pass-by-reference semantics for incoming
7515 arguments. */
7516 if (! cum)
7517 return false;
7519 if (TARGET_SHCOMPACT)
7521 cum->byref = shcompact_byref (cum, mode, type, named);
7522 return cum->byref != 0;
7525 return false;
7528 static bool
7529 sh_callee_copies (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7530 const_tree type, bool named ATTRIBUTE_UNUSED)
7532 /* ??? How can it possibly be correct to return true only on the
7533 caller side of the equation? Is there someplace else in the
7534 sh backend that's magically producing the copies? */
7535 return (cum->outgoing
7536 && ((mode == BLKmode ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode))
7537 % SH_MIN_ALIGN_FOR_CALLEE_COPY == 0));
7540 static int
7541 sh_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7542 tree type, bool named ATTRIBUTE_UNUSED)
7544 int words = 0;
7546 if (!TARGET_SH5
7547 && PASS_IN_REG_P (*cum, mode, type)
7548 && !(TARGET_SH4 || TARGET_SH2A_DOUBLE)
7549 && (ROUND_REG (*cum, mode)
7550 + (mode != BLKmode
7551 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
7552 : ROUND_ADVANCE (int_size_in_bytes (type)))
7553 > NPARM_REGS (mode)))
7554 words = NPARM_REGS (mode) - ROUND_REG (*cum, mode);
7556 else if (!TARGET_SHCOMPACT
7557 && SH5_WOULD_BE_PARTIAL_NREGS (*cum, mode, type, named))
7558 words = NPARM_REGS (SImode) - cum->arg_count[SH_ARG_INT];
7560 return words * UNITS_PER_WORD;
7564 /* Define where to put the arguments to a function.
7565 Value is zero to push the argument on the stack,
7566 or a hard register in which to store the argument.
7568 MODE is the argument's machine mode.
7569 TYPE is the data type of the argument (as a tree).
7570 This is null for libcalls where that information may
7571 not be available.
7572 CUM is a variable of type CUMULATIVE_ARGS which gives info about
7573 the preceding args and about the function being called.
7574 NAMED is nonzero if this argument is a named parameter
7575 (otherwise it is an extra parameter matching an ellipsis).
7577 On SH the first args are normally in registers
7578 and the rest are pushed. Any arg that starts within the first
7579 NPARM_REGS words is at least partially passed in a register unless
7580 its data type forbids. */
7584 sh_function_arg (CUMULATIVE_ARGS *ca, enum machine_mode mode,
7585 tree type, int named)
7587 if (! TARGET_SH5 && mode == VOIDmode)
7588 return GEN_INT (ca->renesas_abi ? 1 : 0);
7590 if (! TARGET_SH5
7591 && PASS_IN_REG_P (*ca, mode, type)
7592 && (named || ! (TARGET_HITACHI || ca->renesas_abi)))
7594 int regno;
7596 if (mode == SCmode && TARGET_SH4 && TARGET_LITTLE_ENDIAN
7597 && (! FUNCTION_ARG_SCmode_WART || (ROUND_REG (*ca, mode) & 1)))
7599 rtx r1 = gen_rtx_EXPR_LIST (VOIDmode,
7600 gen_rtx_REG (SFmode,
7601 BASE_ARG_REG (mode)
7602 + (ROUND_REG (*ca, mode) ^ 1)),
7603 const0_rtx);
7604 rtx r2 = gen_rtx_EXPR_LIST (VOIDmode,
7605 gen_rtx_REG (SFmode,
7606 BASE_ARG_REG (mode)
7607 + ((ROUND_REG (*ca, mode) + 1) ^ 1)),
7608 GEN_INT (4));
7609 return gen_rtx_PARALLEL(SCmode, gen_rtvec(2, r1, r2));
7612 /* If the alignment of a DF value causes an SF register to be
7613 skipped, we will use that skipped register for the next SF
7614 value. */
7615 if ((TARGET_HITACHI || ca->renesas_abi)
7616 && ca->free_single_fp_reg
7617 && mode == SFmode)
7618 return gen_rtx_REG (mode, ca->free_single_fp_reg);
7620 regno = (BASE_ARG_REG (mode) + ROUND_REG (*ca, mode))
7621 ^ (mode == SFmode && TARGET_SH4
7622 && TARGET_LITTLE_ENDIAN != 0
7623 && ! TARGET_HITACHI && ! ca->renesas_abi);
7624 return gen_rtx_REG (mode, regno);
7628 if (TARGET_SH5)
7630 if (mode == VOIDmode && TARGET_SHCOMPACT)
7631 return GEN_INT (ca->call_cookie);
7633 /* The following test assumes unnamed arguments are promoted to
7634 DFmode. */
7635 if (mode == SFmode && ca->free_single_fp_reg)
7636 return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode, ca->free_single_fp_reg);
7638 if ((GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT)
7639 && (named || ! ca->prototype_p)
7640 && ca->arg_count[(int) SH_ARG_FLOAT] < NPARM_REGS (SFmode))
7642 if (! ca->prototype_p && TARGET_SHMEDIA)
7643 return SH5_PROTOTYPELESS_FLOAT_ARG (*ca, mode);
7645 return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode,
7646 FIRST_FP_PARM_REG
7647 + ca->arg_count[(int) SH_ARG_FLOAT]);
7650 if (ca->arg_count[(int) SH_ARG_INT] < NPARM_REGS (SImode)
7651 && (! TARGET_SHCOMPACT
7652 || (! SHCOMPACT_FORCE_ON_STACK (mode, type)
7653 && ! SH5_WOULD_BE_PARTIAL_NREGS (*ca, mode,
7654 type, named))))
7656 return gen_rtx_REG (mode, (FIRST_PARM_REG
7657 + ca->arg_count[(int) SH_ARG_INT]));
7660 return 0;
7663 return 0;
7666 /* Update the data in CUM to advance over an argument
7667 of mode MODE and data type TYPE.
7668 (TYPE is null for libcalls where that information may not be
7669 available.) */
7671 void
7672 sh_function_arg_advance (CUMULATIVE_ARGS *ca, enum machine_mode mode,
7673 tree type, int named)
7675 if (ca->force_mem)
7676 ca->force_mem = 0;
7677 else if (TARGET_SH5)
7679 tree type2 = (ca->byref && type
7680 ? TREE_TYPE (type)
7681 : type);
7682 enum machine_mode mode2 = (ca->byref && type
7683 ? TYPE_MODE (type2)
7684 : mode);
7685 int dwords = ((ca->byref
7686 ? ca->byref
7687 : mode2 == BLKmode
7688 ? int_size_in_bytes (type2)
7689 : GET_MODE_SIZE (mode2)) + 7) / 8;
7690 int numregs = MIN (dwords, NPARM_REGS (SImode)
7691 - ca->arg_count[(int) SH_ARG_INT]);
7693 if (numregs)
7695 ca->arg_count[(int) SH_ARG_INT] += numregs;
7696 if (TARGET_SHCOMPACT
7697 && SHCOMPACT_FORCE_ON_STACK (mode2, type2))
7699 ca->call_cookie
7700 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7701 - numregs, 1);
7702 /* N.B. We want this also for outgoing. */
7703 ca->stack_regs += numregs;
7705 else if (ca->byref)
7707 if (! ca->outgoing)
7708 ca->stack_regs += numregs;
7709 ca->byref_regs += numregs;
7710 ca->byref = 0;
7712 ca->call_cookie
7713 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7714 - numregs, 2);
7715 while (--numregs);
7716 ca->call_cookie
7717 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7718 - 1, 1);
7720 else if (dwords > numregs)
7722 int pushregs = numregs;
7724 if (TARGET_SHCOMPACT)
7725 ca->stack_regs += numregs;
7726 while (pushregs < NPARM_REGS (SImode) - 1
7727 && (CALL_COOKIE_INT_REG_GET
7728 (ca->call_cookie,
7729 NPARM_REGS (SImode) - pushregs)
7730 == 1))
7732 ca->call_cookie
7733 &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
7734 - pushregs, 1);
7735 pushregs++;
7737 if (numregs == NPARM_REGS (SImode))
7738 ca->call_cookie
7739 |= CALL_COOKIE_INT_REG (0, 1)
7740 | CALL_COOKIE_STACKSEQ (numregs - 1);
7741 else
7742 ca->call_cookie
7743 |= CALL_COOKIE_STACKSEQ (numregs);
7746 if (GET_SH_ARG_CLASS (mode2) == SH_ARG_FLOAT
7747 && (named || ! ca->prototype_p))
7749 if (mode2 == SFmode && ca->free_single_fp_reg)
7750 ca->free_single_fp_reg = 0;
7751 else if (ca->arg_count[(int) SH_ARG_FLOAT]
7752 < NPARM_REGS (SFmode))
7754 int numfpregs
7755 = MIN ((GET_MODE_SIZE (mode2) + 7) / 8 * 2,
7756 NPARM_REGS (SFmode)
7757 - ca->arg_count[(int) SH_ARG_FLOAT]);
7759 ca->arg_count[(int) SH_ARG_FLOAT] += numfpregs;
7761 if (TARGET_SHCOMPACT && ! ca->prototype_p)
7763 if (ca->outgoing && numregs > 0)
7766 ca->call_cookie
7767 |= (CALL_COOKIE_INT_REG
7768 (ca->arg_count[(int) SH_ARG_INT]
7769 - numregs + ((numfpregs - 2) / 2),
7770 4 + (ca->arg_count[(int) SH_ARG_FLOAT]
7771 - numfpregs) / 2));
7773 while (numfpregs -= 2);
7775 else if (mode2 == SFmode && (named)
7776 && (ca->arg_count[(int) SH_ARG_FLOAT]
7777 < NPARM_REGS (SFmode)))
7778 ca->free_single_fp_reg
7779 = FIRST_FP_PARM_REG - numfpregs
7780 + ca->arg_count[(int) SH_ARG_FLOAT] + 1;
7783 return;
7786 if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
7788 /* Note that we've used the skipped register. */
7789 if (mode == SFmode && ca->free_single_fp_reg)
7791 ca->free_single_fp_reg = 0;
7792 return;
7794 /* When we have a DF after an SF, there's an SF register that get
7795 skipped in order to align the DF value. We note this skipped
7796 register, because the next SF value will use it, and not the
7797 SF that follows the DF. */
7798 if (mode == DFmode
7799 && ROUND_REG (*ca, DFmode) != ROUND_REG (*ca, SFmode))
7801 ca->free_single_fp_reg = (ROUND_REG (*ca, SFmode)
7802 + BASE_ARG_REG (mode));
7806 if (! ((TARGET_SH4 || TARGET_SH2A) || ca->renesas_abi)
7807 || PASS_IN_REG_P (*ca, mode, type))
7808 (ca->arg_count[(int) GET_SH_ARG_CLASS (mode)]
7809 = (ROUND_REG (*ca, mode)
7810 + (mode == BLKmode
7811 ? ROUND_ADVANCE (int_size_in_bytes (type))
7812 : ROUND_ADVANCE (GET_MODE_SIZE (mode)))));
7815 /* The Renesas calling convention doesn't quite fit into this scheme since
7816 the address is passed like an invisible argument, but one that is always
7817 passed in memory. */
7818 static rtx
7819 sh_struct_value_rtx (tree fndecl, int incoming ATTRIBUTE_UNUSED)
7821 if (TARGET_HITACHI || sh_attr_renesas_p (fndecl))
7822 return 0;
7823 return gen_rtx_REG (Pmode, 2);
7826 /* Worker function for TARGET_RETURN_IN_MEMORY. */
7828 static bool
7829 sh_return_in_memory (const_tree type, const_tree fndecl)
7831 if (TARGET_SH5)
7833 if (TYPE_MODE (type) == BLKmode)
7834 return ((unsigned HOST_WIDE_INT) int_size_in_bytes (type)) > 8;
7835 else
7836 return GET_MODE_SIZE (TYPE_MODE (type)) > 8;
7838 else
7840 return (TYPE_MODE (type) == BLKmode
7841 || ((TARGET_HITACHI || sh_attr_renesas_p (fndecl))
7842 && TREE_CODE (type) == RECORD_TYPE));
7846 /* We actually emit the code in sh_expand_prologue. We used to use
7847 a static variable to flag that we need to emit this code, but that
7848 doesn't when inlining, when functions are deferred and then emitted
7849 later. Fortunately, we already have two flags that are part of struct
7850 function that tell if a function uses varargs or stdarg. */
7851 static void
7852 sh_setup_incoming_varargs (CUMULATIVE_ARGS *ca,
7853 enum machine_mode mode,
7854 tree type,
7855 int *pretend_arg_size,
7856 int second_time ATTRIBUTE_UNUSED)
7858 gcc_assert (cfun->stdarg);
7859 if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
7861 int named_parm_regs, anon_parm_regs;
7863 named_parm_regs = (ROUND_REG (*ca, mode)
7864 + (mode == BLKmode
7865 ? ROUND_ADVANCE (int_size_in_bytes (type))
7866 : ROUND_ADVANCE (GET_MODE_SIZE (mode))));
7867 anon_parm_regs = NPARM_REGS (SImode) - named_parm_regs;
7868 if (anon_parm_regs > 0)
7869 *pretend_arg_size = anon_parm_regs * 4;
7873 static bool
7874 sh_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
7876 return TARGET_SH5;
7879 static bool
7880 sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *ca)
7882 return ! (TARGET_HITACHI || ca->renesas_abi) && ! TARGET_SH5;
7886 /* Define the offset between two registers, one to be eliminated, and
7887 the other its replacement, at the start of a routine. */
7890 initial_elimination_offset (int from, int to)
7892 int regs_saved;
7893 int regs_saved_rounding = 0;
7894 int total_saved_regs_space;
7895 int total_auto_space;
7896 int save_flags = target_flags;
7897 int copy_flags;
7898 HARD_REG_SET live_regs_mask;
7900 shmedia_space_reserved_for_target_registers = false;
7901 regs_saved = calc_live_regs (&live_regs_mask);
7902 regs_saved += SHMEDIA_REGS_STACK_ADJUST ();
7904 if (shmedia_reserve_space_for_target_registers_p (regs_saved, &live_regs_mask))
7906 shmedia_space_reserved_for_target_registers = true;
7907 regs_saved += shmedia_target_regs_stack_adjust (&live_regs_mask);
7910 if (TARGET_SH5 && regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT))
7911 regs_saved_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
7912 - regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT));
7914 total_auto_space = rounded_frame_size (regs_saved) - regs_saved_rounding;
7915 copy_flags = target_flags;
7916 target_flags = save_flags;
7918 total_saved_regs_space = regs_saved + regs_saved_rounding;
7920 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7921 return total_saved_regs_space + total_auto_space
7922 + crtl->args.info.byref_regs * 8;
7924 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7925 return total_saved_regs_space + total_auto_space
7926 + crtl->args.info.byref_regs * 8;
7928 /* Initial gap between fp and sp is 0. */
7929 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7930 return 0;
7932 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7933 return rounded_frame_size (0);
7935 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7936 return rounded_frame_size (0);
7938 gcc_assert (from == RETURN_ADDRESS_POINTER_REGNUM
7939 && (to == HARD_FRAME_POINTER_REGNUM
7940 || to == STACK_POINTER_REGNUM));
7941 if (TARGET_SH5)
7943 int n = total_saved_regs_space;
7944 int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
7945 save_schedule schedule;
7946 save_entry *entry;
7948 n += total_auto_space;
7950 /* If it wasn't saved, there's not much we can do. */
7951 if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
7952 return n;
7954 target_flags = copy_flags;
7956 sh5_schedule_saves (&live_regs_mask, &schedule, n);
7957 for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
7958 if (entry->reg == pr_reg)
7960 target_flags = save_flags;
7961 return entry->offset;
7963 gcc_unreachable ();
7965 else
7966 return total_auto_space;
7969 /* Parse the -mfixed-range= option string. */
7970 void
7971 sh_fix_range (const char *const_str)
7973 int i, first, last;
7974 char *str, *dash, *comma;
7976 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
7977 REG2 are either register names or register numbers. The effect
7978 of this option is to mark the registers in the range from REG1 to
7979 REG2 as ``fixed'' so they won't be used by the compiler. */
7981 i = strlen (const_str);
7982 str = (char *) alloca (i + 1);
7983 memcpy (str, const_str, i + 1);
7985 while (1)
7987 dash = strchr (str, '-');
7988 if (!dash)
7990 warning (0, "value of -mfixed-range must have form REG1-REG2");
7991 return;
7993 *dash = '\0';
7994 comma = strchr (dash + 1, ',');
7995 if (comma)
7996 *comma = '\0';
7998 first = decode_reg_name (str);
7999 if (first < 0)
8001 warning (0, "unknown register name: %s", str);
8002 return;
8005 last = decode_reg_name (dash + 1);
8006 if (last < 0)
8008 warning (0, "unknown register name: %s", dash + 1);
8009 return;
8012 *dash = '-';
8014 if (first > last)
8016 warning (0, "%s-%s is an empty range", str, dash + 1);
8017 return;
8020 for (i = first; i <= last; ++i)
8021 fixed_regs[i] = call_used_regs[i] = 1;
8023 if (!comma)
8024 break;
8026 *comma = ',';
8027 str = comma + 1;
8031 /* Insert any deferred function attributes from earlier pragmas. */
8032 static void
8033 sh_insert_attributes (tree node, tree *attributes)
8035 tree attrs;
8037 if (TREE_CODE (node) != FUNCTION_DECL)
8038 return;
8040 /* We are only interested in fields. */
8041 if (!DECL_P (node))
8042 return;
8044 /* Append the attributes to the deferred attributes. */
8045 *sh_deferred_function_attributes_tail = *attributes;
8046 attrs = sh_deferred_function_attributes;
8047 if (!attrs)
8048 return;
8050 /* Some attributes imply or require the interrupt attribute. */
8051 if (!lookup_attribute ("interrupt_handler", attrs)
8052 && !lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (node)))
8054 /* If we have a trapa_handler, but no interrupt_handler attribute,
8055 insert an interrupt_handler attribute. */
8056 if (lookup_attribute ("trapa_handler", attrs) != NULL_TREE)
8057 /* We can't use sh_pr_interrupt here because that's not in the
8058 java frontend. */
8059 attrs
8060 = tree_cons (get_identifier("interrupt_handler"), NULL_TREE, attrs);
8061 /* However, for sp_switch, trap_exit, nosave_low_regs and resbank,
8062 if the interrupt attribute is missing, we ignore the attribute
8063 and warn. */
8064 else if (lookup_attribute ("sp_switch", attrs)
8065 || lookup_attribute ("trap_exit", attrs)
8066 || lookup_attribute ("nosave_low_regs", attrs)
8067 || lookup_attribute ("resbank", attrs))
8069 tree *tail;
8071 for (tail = attributes; attrs; attrs = TREE_CHAIN (attrs))
8073 if (is_attribute_p ("sp_switch", TREE_PURPOSE (attrs))
8074 || is_attribute_p ("trap_exit", TREE_PURPOSE (attrs))
8075 || is_attribute_p ("nosave_low_regs", TREE_PURPOSE (attrs))
8076 || is_attribute_p ("resbank", TREE_PURPOSE (attrs)))
8077 warning (OPT_Wattributes,
8078 "%qs attribute only applies to interrupt functions",
8079 IDENTIFIER_POINTER (TREE_PURPOSE (attrs)));
8080 else
8082 *tail = tree_cons (TREE_PURPOSE (attrs), NULL_TREE,
8083 NULL_TREE);
8084 tail = &TREE_CHAIN (*tail);
8087 attrs = *attributes;
8091 /* Install the processed list. */
8092 *attributes = attrs;
8094 /* Clear deferred attributes. */
8095 sh_deferred_function_attributes = NULL_TREE;
8096 sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
8098 return;
8101 /* Supported attributes:
8103 interrupt_handler -- specifies this function is an interrupt handler.
8105 trapa_handler - like above, but don't save all registers.
8107 sp_switch -- specifies an alternate stack for an interrupt handler
8108 to run on.
8110 trap_exit -- use a trapa to exit an interrupt function instead of
8111 an rte instruction.
8113 nosave_low_regs - don't save r0..r7 in an interrupt handler.
8114 This is useful on the SH3 and upwards,
8115 which has a separate set of low regs for User and Supervisor modes.
8116 This should only be used for the lowest level of interrupts. Higher levels
8117 of interrupts must save the registers in case they themselves are
8118 interrupted.
8120 renesas -- use Renesas calling/layout conventions (functions and
8121 structures).
8123 resbank -- In case of an ISR, use a register bank to save registers
8124 R0-R14, MACH, MACL, GBR and PR. This is useful only on SH2A targets.
8127 const struct attribute_spec sh_attribute_table[] =
8129 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
8130 { "interrupt_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
8131 { "sp_switch", 1, 1, true, false, false, sh_handle_sp_switch_attribute },
8132 { "trap_exit", 1, 1, true, false, false, sh_handle_trap_exit_attribute },
8133 { "renesas", 0, 0, false, true, false, sh_handle_renesas_attribute },
8134 { "trapa_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
8135 { "nosave_low_regs", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
8136 { "resbank", 0, 0, true, false, false, sh_handle_resbank_handler_attribute },
8137 { "function_vector", 1, 1, true, false, false, sh2a_handle_function_vector_handler_attribute },
8138 #ifdef SYMBIAN
8139 /* Symbian support adds three new attributes:
8140 dllexport - for exporting a function/variable that will live in a dll
8141 dllimport - for importing a function/variable from a dll
8143 Microsoft allows multiple declspecs in one __declspec, separating
8144 them with spaces. We do NOT support this. Instead, use __declspec
8145 multiple times. */
8146 { "dllimport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
8147 { "dllexport", 0, 0, true, false, false, sh_symbian_handle_dll_attribute },
8148 #endif
8149 { NULL, 0, 0, false, false, false, NULL }
8152 /* Handle a 'resbank' attribute. */
8153 static tree
8154 sh_handle_resbank_handler_attribute (tree * node, tree name,
8155 tree args ATTRIBUTE_UNUSED,
8156 int flags ATTRIBUTE_UNUSED,
8157 bool * no_add_attrs)
8159 if (!TARGET_SH2A)
8161 warning (OPT_Wattributes, "%qs attribute is supported only for SH2A",
8162 IDENTIFIER_POINTER (name));
8163 *no_add_attrs = true;
8165 if (TREE_CODE (*node) != FUNCTION_DECL)
8167 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8168 IDENTIFIER_POINTER (name));
8169 *no_add_attrs = true;
8172 return NULL_TREE;
8175 /* Handle an "interrupt_handler" attribute; arguments as in
8176 struct attribute_spec.handler. */
8177 static tree
8178 sh_handle_interrupt_handler_attribute (tree *node, tree name,
8179 tree args ATTRIBUTE_UNUSED,
8180 int flags ATTRIBUTE_UNUSED,
8181 bool *no_add_attrs)
8183 if (TREE_CODE (*node) != FUNCTION_DECL)
8185 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8186 IDENTIFIER_POINTER (name));
8187 *no_add_attrs = true;
8189 else if (TARGET_SHCOMPACT)
8191 error ("attribute interrupt_handler is not compatible with -m5-compact");
8192 *no_add_attrs = true;
8195 return NULL_TREE;
8198 /* Handle an 'function_vector' attribute; arguments as in
8199 struct attribute_spec.handler. */
8200 static tree
8201 sh2a_handle_function_vector_handler_attribute (tree * node, tree name,
8202 tree args ATTRIBUTE_UNUSED,
8203 int flags ATTRIBUTE_UNUSED,
8204 bool * no_add_attrs)
8206 if (!TARGET_SH2A)
8208 warning (OPT_Wattributes, "%qs attribute only applies to SH2A",
8209 IDENTIFIER_POINTER (name));
8210 *no_add_attrs = true;
8212 else if (TREE_CODE (*node) != FUNCTION_DECL)
8214 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8215 IDENTIFIER_POINTER (name));
8216 *no_add_attrs = true;
8218 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
8220 /* The argument must be a constant integer. */
8221 warning (OPT_Wattributes,
8222 "`%s' attribute argument not an integer constant",
8223 IDENTIFIER_POINTER (name));
8224 *no_add_attrs = true;
8226 else if (TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
8228 /* The argument value must be between 0 to 255. */
8229 warning (OPT_Wattributes,
8230 "`%s' attribute argument should be between 0 to 255",
8231 IDENTIFIER_POINTER (name));
8232 *no_add_attrs = true;
8234 return NULL_TREE;
8237 /* Returns 1 if current function has been assigned the attribute
8238 'function_vector'. */
8240 sh2a_is_function_vector_call (rtx x)
8242 if (GET_CODE (x) == SYMBOL_REF
8243 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
8245 tree tr = SYMBOL_REF_DECL (x);
8247 if (sh2a_function_vector_p (tr))
8248 return 1;
8251 return 0;
8254 /* Returns the function vector number, if the the attribute
8255 'function_vector' is assigned, otherwise returns zero. */
8257 sh2a_get_function_vector_number (rtx x)
8259 int num;
8260 tree list, t;
8262 if ((GET_CODE (x) == SYMBOL_REF)
8263 && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
8265 t = SYMBOL_REF_DECL (x);
8267 if (TREE_CODE (t) != FUNCTION_DECL)
8268 return 0;
8270 list = SH_ATTRIBUTES (t);
8271 while (list)
8273 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
8275 num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
8276 return num;
8279 list = TREE_CHAIN (list);
8282 return 0;
8284 else
8285 return 0;
8288 /* Handle an "sp_switch" attribute; arguments as in
8289 struct attribute_spec.handler. */
8290 static tree
8291 sh_handle_sp_switch_attribute (tree *node, tree name, tree args,
8292 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
8294 if (TREE_CODE (*node) != FUNCTION_DECL)
8296 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8297 IDENTIFIER_POINTER (name));
8298 *no_add_attrs = true;
8300 else if (TREE_CODE (TREE_VALUE (args)) != STRING_CST)
8302 /* The argument must be a constant string. */
8303 warning (OPT_Wattributes, "%qs attribute argument not a string constant",
8304 IDENTIFIER_POINTER (name));
8305 *no_add_attrs = true;
8308 return NULL_TREE;
8311 /* Handle an "trap_exit" attribute; arguments as in
8312 struct attribute_spec.handler. */
8313 static tree
8314 sh_handle_trap_exit_attribute (tree *node, tree name, tree args,
8315 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
8317 if (TREE_CODE (*node) != FUNCTION_DECL)
8319 warning (OPT_Wattributes, "%qs attribute only applies to functions",
8320 IDENTIFIER_POINTER (name));
8321 *no_add_attrs = true;
8323 /* The argument specifies a trap number to be used in a trapa instruction
8324 at function exit (instead of an rte instruction). */
8325 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
8327 /* The argument must be a constant integer. */
8328 warning (OPT_Wattributes, "%qs attribute argument not an "
8329 "integer constant", IDENTIFIER_POINTER (name));
8330 *no_add_attrs = true;
8333 return NULL_TREE;
8336 static tree
8337 sh_handle_renesas_attribute (tree *node ATTRIBUTE_UNUSED,
8338 tree name ATTRIBUTE_UNUSED,
8339 tree args ATTRIBUTE_UNUSED,
8340 int flags ATTRIBUTE_UNUSED,
8341 bool *no_add_attrs ATTRIBUTE_UNUSED)
8343 return NULL_TREE;
8346 /* True if __attribute__((renesas)) or -mrenesas. */
8348 sh_attr_renesas_p (const_tree td)
8350 if (TARGET_HITACHI)
8351 return 1;
8352 if (td == 0)
8353 return 0;
8354 if (DECL_P (td))
8355 td = TREE_TYPE (td);
8356 if (td == error_mark_node)
8357 return 0;
8358 return (lookup_attribute ("renesas", TYPE_ATTRIBUTES (td))
8359 != NULL_TREE);
8362 /* True if __attribute__((renesas)) or -mrenesas, for the current
8363 function. */
8365 sh_cfun_attr_renesas_p (void)
8367 return sh_attr_renesas_p (current_function_decl);
8371 sh_cfun_interrupt_handler_p (void)
8373 return (lookup_attribute ("interrupt_handler",
8374 DECL_ATTRIBUTES (current_function_decl))
8375 != NULL_TREE);
8378 /* Returns 1 if FUNC has been assigned the attribute
8379 "function_vector". */
8381 sh2a_function_vector_p (tree func)
8383 tree list;
8384 if (TREE_CODE (func) != FUNCTION_DECL)
8385 return 0;
8387 list = SH_ATTRIBUTES (func);
8388 while (list)
8390 if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
8391 return 1;
8393 list = TREE_CHAIN (list);
8395 return 0;
8398 /* Returns TRUE if given tree has the "resbank" attribute. */
8401 sh_cfun_resbank_handler_p (void)
8403 return ((lookup_attribute ("resbank",
8404 DECL_ATTRIBUTES (current_function_decl))
8405 != NULL_TREE)
8406 && (lookup_attribute ("interrupt_handler",
8407 DECL_ATTRIBUTES (current_function_decl))
8408 != NULL_TREE) && TARGET_SH2A);
8411 /* Implement TARGET_CHECK_PCH_TARGET_FLAGS. */
8413 static const char *
8414 sh_check_pch_target_flags (int old_flags)
8416 if ((old_flags ^ target_flags) & (MASK_SH1 | MASK_SH2 | MASK_SH3
8417 | MASK_SH_E | MASK_HARD_SH4
8418 | MASK_FPU_SINGLE | MASK_SH4))
8419 return _("created and used with different architectures / ABIs");
8420 if ((old_flags ^ target_flags) & MASK_HITACHI)
8421 return _("created and used with different ABIs");
8422 if ((old_flags ^ target_flags) & MASK_LITTLE_ENDIAN)
8423 return _("created and used with different endianness");
8424 return NULL;
8427 /* Predicates used by the templates. */
8429 /* Returns 1 if OP is MACL, MACH or PR. The input must be a REG rtx.
8430 Used only in general_movsrc_operand. */
8433 system_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8435 switch (REGNO (op))
8437 case PR_REG:
8438 case MACL_REG:
8439 case MACH_REG:
8440 return 1;
8442 return 0;
8445 /* Nonzero if OP is a floating point value with value 0.0. */
8448 fp_zero_operand (rtx op)
8450 REAL_VALUE_TYPE r;
8452 if (GET_MODE (op) != SFmode)
8453 return 0;
8455 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
8456 return REAL_VALUES_EQUAL (r, dconst0) && ! REAL_VALUE_MINUS_ZERO (r);
8459 /* Nonzero if OP is a floating point value with value 1.0. */
8462 fp_one_operand (rtx op)
8464 REAL_VALUE_TYPE r;
8466 if (GET_MODE (op) != SFmode)
8467 return 0;
8469 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
8470 return REAL_VALUES_EQUAL (r, dconst1);
8473 /* For -m4 and -m4-single-only, mode switching is used. If we are
8474 compiling without -mfmovd, movsf_ie isn't taken into account for
8475 mode switching. We could check in machine_dependent_reorg for
8476 cases where we know we are in single precision mode, but there is
8477 interface to find that out during reload, so we must avoid
8478 choosing an fldi alternative during reload and thus failing to
8479 allocate a scratch register for the constant loading. */
8481 fldi_ok (void)
8483 return ! TARGET_SH4 || TARGET_FMOVD || reload_completed;
8487 tertiary_reload_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8489 enum rtx_code code = GET_CODE (op);
8490 return code == MEM || (TARGET_SH4 && code == CONST_DOUBLE);
8493 /* Return the TLS type for TLS symbols, 0 for otherwise. */
8495 tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8497 if (GET_CODE (op) != SYMBOL_REF)
8498 return 0;
8499 return SYMBOL_REF_TLS_MODEL (op);
8502 /* Return the destination address of a branch. */
8504 static int
8505 branch_dest (rtx branch)
8507 rtx dest = SET_SRC (PATTERN (branch));
8508 int dest_uid;
8510 if (GET_CODE (dest) == IF_THEN_ELSE)
8511 dest = XEXP (dest, 1);
8512 dest = XEXP (dest, 0);
8513 dest_uid = INSN_UID (dest);
8514 return INSN_ADDRESSES (dest_uid);
8517 /* Return nonzero if REG is not used after INSN.
8518 We assume REG is a reload reg, and therefore does
8519 not live past labels. It may live past calls or jumps though. */
8521 reg_unused_after (rtx reg, rtx insn)
8523 enum rtx_code code;
8524 rtx set;
8526 /* If the reg is set by this instruction, then it is safe for our
8527 case. Disregard the case where this is a store to memory, since
8528 we are checking a register used in the store address. */
8529 set = single_set (insn);
8530 if (set && GET_CODE (SET_DEST (set)) != MEM
8531 && reg_overlap_mentioned_p (reg, SET_DEST (set)))
8532 return 1;
8534 while ((insn = NEXT_INSN (insn)))
8536 rtx set;
8537 if (!INSN_P (insn))
8538 continue;
8540 code = GET_CODE (insn);
8542 #if 0
8543 /* If this is a label that existed before reload, then the register
8544 if dead here. However, if this is a label added by reorg, then
8545 the register may still be live here. We can't tell the difference,
8546 so we just ignore labels completely. */
8547 if (code == CODE_LABEL)
8548 return 1;
8549 /* else */
8550 #endif
8552 if (code == JUMP_INSN)
8553 return 0;
8555 /* If this is a sequence, we must handle them all at once.
8556 We could have for instance a call that sets the target register,
8557 and an insn in a delay slot that uses the register. In this case,
8558 we must return 0. */
8559 else if (code == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
8561 int i;
8562 int retval = 0;
8564 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8566 rtx this_insn = XVECEXP (PATTERN (insn), 0, i);
8567 rtx set = single_set (this_insn);
8569 if (GET_CODE (this_insn) == CALL_INSN)
8570 code = CALL_INSN;
8571 else if (GET_CODE (this_insn) == JUMP_INSN)
8573 if (INSN_ANNULLED_BRANCH_P (this_insn))
8574 return 0;
8575 code = JUMP_INSN;
8578 if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
8579 return 0;
8580 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
8582 if (GET_CODE (SET_DEST (set)) != MEM)
8583 retval = 1;
8584 else
8585 return 0;
8587 if (set == 0
8588 && reg_overlap_mentioned_p (reg, PATTERN (this_insn)))
8589 return 0;
8591 if (retval == 1)
8592 return 1;
8593 else if (code == JUMP_INSN)
8594 return 0;
8597 set = single_set (insn);
8598 if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
8599 return 0;
8600 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
8601 return GET_CODE (SET_DEST (set)) != MEM;
8602 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
8603 return 0;
8605 if (code == CALL_INSN && call_really_used_regs[REGNO (reg)])
8606 return 1;
8608 return 1;
8611 #include "ggc.h"
8613 static GTY(()) rtx fpscr_rtx;
8615 get_fpscr_rtx (void)
8617 if (! fpscr_rtx)
8619 fpscr_rtx = gen_rtx_REG (PSImode, FPSCR_REG);
8620 REG_USERVAR_P (fpscr_rtx) = 1;
8621 mark_user_reg (fpscr_rtx);
8623 if (! reload_completed || mdep_reorg_phase != SH_AFTER_MDEP_REORG)
8624 mark_user_reg (fpscr_rtx);
8625 return fpscr_rtx;
8628 static GTY(()) tree fpscr_values;
8630 static void
8631 emit_fpu_switch (rtx scratch, int index)
8633 rtx dst, src;
8635 if (fpscr_values == NULL)
8637 tree t;
8639 t = build_index_type (integer_one_node);
8640 t = build_array_type (integer_type_node, t);
8641 t = build_decl (VAR_DECL, get_identifier ("__fpscr_values"), t);
8642 DECL_ARTIFICIAL (t) = 1;
8643 DECL_IGNORED_P (t) = 1;
8644 DECL_EXTERNAL (t) = 1;
8645 TREE_STATIC (t) = 1;
8646 TREE_PUBLIC (t) = 1;
8647 TREE_USED (t) = 1;
8649 fpscr_values = t;
8652 src = DECL_RTL (fpscr_values);
8653 if (!can_create_pseudo_p ())
8655 emit_move_insn (scratch, XEXP (src, 0));
8656 if (index != 0)
8657 emit_insn (gen_addsi3 (scratch, scratch, GEN_INT (index * 4)));
8658 src = adjust_automodify_address (src, PSImode, scratch, index * 4);
8660 else
8661 src = adjust_address (src, PSImode, index * 4);
8663 dst = get_fpscr_rtx ();
8664 emit_move_insn (dst, src);
8667 void
8668 emit_sf_insn (rtx pat)
8670 emit_insn (pat);
8673 void
8674 emit_df_insn (rtx pat)
8676 emit_insn (pat);
8679 void
8680 expand_sf_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
8682 emit_sf_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
8685 void
8686 expand_sf_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
8688 emit_sf_insn ((*fun) (operands[0], operands[1], operands[2],
8689 get_fpscr_rtx ()));
8692 void
8693 expand_df_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
8695 emit_df_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
8698 void
8699 expand_df_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
8701 emit_df_insn ((*fun) (operands[0], operands[1], operands[2],
8702 get_fpscr_rtx ()));
8705 static rtx get_free_reg (HARD_REG_SET);
8707 /* This function returns a register to use to load the address to load
8708 the fpscr from. Currently it always returns r1 or r7, but when we are
8709 able to use pseudo registers after combine, or have a better mechanism
8710 for choosing a register, it should be done here. */
8711 /* REGS_LIVE is the liveness information for the point for which we
8712 need this allocation. In some bare-bones exit blocks, r1 is live at the
8713 start. We can even have all of r0..r3 being live:
8714 __complex__ long long f (double d) { if (d == 0) return 2; else return 3; }
8715 INSN before which new insns are placed with will clobber the register
8716 we return. If a basic block consists only of setting the return value
8717 register to a pseudo and using that register, the return value is not
8718 live before or after this block, yet we we'll insert our insns right in
8719 the middle. */
8721 static rtx
8722 get_free_reg (HARD_REG_SET regs_live)
8724 if (! TEST_HARD_REG_BIT (regs_live, 1))
8725 return gen_rtx_REG (Pmode, 1);
8727 /* Hard reg 1 is live; since this is a SMALL_REGISTER_CLASSES target,
8728 there shouldn't be anything but a jump before the function end. */
8729 gcc_assert (!TEST_HARD_REG_BIT (regs_live, 7));
8730 return gen_rtx_REG (Pmode, 7);
8733 /* This function will set the fpscr from memory.
8734 MODE is the mode we are setting it to. */
8735 void
8736 fpscr_set_from_mem (int mode, HARD_REG_SET regs_live)
8738 enum attr_fp_mode fp_mode = mode;
8739 enum attr_fp_mode norm_mode = ACTUAL_NORMAL_MODE (FP_MODE);
8740 rtx addr_reg;
8742 addr_reg = !can_create_pseudo_p () ? get_free_reg (regs_live) : NULL_RTX;
8743 emit_fpu_switch (addr_reg, fp_mode == norm_mode);
8746 /* Is the given character a logical line separator for the assembler? */
8747 #ifndef IS_ASM_LOGICAL_LINE_SEPARATOR
8748 #define IS_ASM_LOGICAL_LINE_SEPARATOR(C, STR) ((C) == ';')
8749 #endif
8752 sh_insn_length_adjustment (rtx insn)
8754 /* Instructions with unfilled delay slots take up an extra two bytes for
8755 the nop in the delay slot. */
8756 if (((GET_CODE (insn) == INSN
8757 && GET_CODE (PATTERN (insn)) != USE
8758 && GET_CODE (PATTERN (insn)) != CLOBBER)
8759 || GET_CODE (insn) == CALL_INSN
8760 || (GET_CODE (insn) == JUMP_INSN
8761 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
8762 && GET_CODE (PATTERN (insn)) != ADDR_VEC))
8763 && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE
8764 && get_attr_needs_delay_slot (insn) == NEEDS_DELAY_SLOT_YES)
8765 return 2;
8767 /* SH2e has a bug that prevents the use of annulled branches, so if
8768 the delay slot is not filled, we'll have to put a NOP in it. */
8769 if (sh_cpu == CPU_SH2E
8770 && GET_CODE (insn) == JUMP_INSN
8771 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
8772 && GET_CODE (PATTERN (insn)) != ADDR_VEC
8773 && get_attr_type (insn) == TYPE_CBRANCH
8774 && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE)
8775 return 2;
8777 /* sh-dsp parallel processing insn take four bytes instead of two. */
8779 if (GET_CODE (insn) == INSN)
8781 int sum = 0;
8782 rtx body = PATTERN (insn);
8783 const char *template;
8784 char c;
8785 int maybe_label = 1;
8787 if (GET_CODE (body) == ASM_INPUT)
8788 template = XSTR (body, 0);
8789 else if (asm_noperands (body) >= 0)
8790 template
8791 = decode_asm_operands (body, NULL, NULL, NULL, NULL, NULL);
8792 else
8793 return 0;
8796 int ppi_adjust = 0;
8799 c = *template++;
8800 while (c == ' ' || c == '\t');
8801 /* all sh-dsp parallel-processing insns start with p.
8802 The only non-ppi sh insn starting with p is pref.
8803 The only ppi starting with pr is prnd. */
8804 if ((c == 'p' || c == 'P') && strncasecmp ("re", template, 2))
8805 ppi_adjust = 2;
8806 /* The repeat pseudo-insn expands two three insns, a total of
8807 six bytes in size. */
8808 else if ((c == 'r' || c == 'R')
8809 && ! strncasecmp ("epeat", template, 5))
8810 ppi_adjust = 4;
8811 while (c && c != '\n'
8812 && ! IS_ASM_LOGICAL_LINE_SEPARATOR (c, template))
8814 /* If this is a label, it is obviously not a ppi insn. */
8815 if (c == ':' && maybe_label)
8817 ppi_adjust = 0;
8818 break;
8820 else if (c == '\'' || c == '"')
8821 maybe_label = 0;
8822 c = *template++;
8824 sum += ppi_adjust;
8825 maybe_label = c != ':';
8827 while (c);
8828 return sum;
8830 return 0;
8833 /* Return TRUE if X references a SYMBOL_REF or LABEL_REF whose symbol
8834 isn't protected by a PIC unspec. */
8836 nonpic_symbol_mentioned_p (rtx x)
8838 register const char *fmt;
8839 register int i;
8841 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF
8842 || GET_CODE (x) == PC)
8843 return 1;
8845 /* We don't want to look into the possible MEM location of a
8846 CONST_DOUBLE, since we're not going to use it, in general. */
8847 if (GET_CODE (x) == CONST_DOUBLE)
8848 return 0;
8850 if (GET_CODE (x) == UNSPEC
8851 && (XINT (x, 1) == UNSPEC_PIC
8852 || XINT (x, 1) == UNSPEC_GOT
8853 || XINT (x, 1) == UNSPEC_GOTOFF
8854 || XINT (x, 1) == UNSPEC_GOTPLT
8855 || XINT (x, 1) == UNSPEC_GOTTPOFF
8856 || XINT (x, 1) == UNSPEC_DTPOFF
8857 || XINT (x, 1) == UNSPEC_PLT))
8858 return 0;
8860 fmt = GET_RTX_FORMAT (GET_CODE (x));
8861 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8863 if (fmt[i] == 'E')
8865 register int j;
8867 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8868 if (nonpic_symbol_mentioned_p (XVECEXP (x, i, j)))
8869 return 1;
8871 else if (fmt[i] == 'e' && nonpic_symbol_mentioned_p (XEXP (x, i)))
8872 return 1;
8875 return 0;
8878 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
8879 @GOTOFF in `reg'. */
8881 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
8882 rtx reg)
8884 if (tls_symbolic_operand (orig, Pmode))
8885 return orig;
8887 if (GET_CODE (orig) == LABEL_REF
8888 || (GET_CODE (orig) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (orig)))
8890 if (reg == 0)
8891 reg = gen_reg_rtx (Pmode);
8893 emit_insn (gen_symGOTOFF2reg (reg, orig));
8894 return reg;
8896 else if (GET_CODE (orig) == SYMBOL_REF)
8898 if (reg == 0)
8899 reg = gen_reg_rtx (Pmode);
8901 emit_insn (gen_symGOT2reg (reg, orig));
8902 return reg;
8904 return orig;
8907 /* Mark the use of a constant in the literal table. If the constant
8908 has multiple labels, make it unique. */
8909 static rtx
8910 mark_constant_pool_use (rtx x)
8912 rtx insn, lab, pattern;
8914 if (x == NULL)
8915 return x;
8917 switch (GET_CODE (x))
8919 case LABEL_REF:
8920 x = XEXP (x, 0);
8921 case CODE_LABEL:
8922 break;
8923 default:
8924 return x;
8927 /* Get the first label in the list of labels for the same constant
8928 and delete another labels in the list. */
8929 lab = x;
8930 for (insn = PREV_INSN (x); insn; insn = PREV_INSN (insn))
8932 if (GET_CODE (insn) != CODE_LABEL
8933 || LABEL_REFS (insn) != NEXT_INSN (insn))
8934 break;
8935 lab = insn;
8938 for (insn = LABEL_REFS (lab); insn; insn = LABEL_REFS (insn))
8939 INSN_DELETED_P (insn) = 1;
8941 /* Mark constants in a window. */
8942 for (insn = NEXT_INSN (x); insn; insn = NEXT_INSN (insn))
8944 if (GET_CODE (insn) != INSN)
8945 continue;
8947 pattern = PATTERN (insn);
8948 if (GET_CODE (pattern) != UNSPEC_VOLATILE)
8949 continue;
8951 switch (XINT (pattern, 1))
8953 case UNSPECV_CONST2:
8954 case UNSPECV_CONST4:
8955 case UNSPECV_CONST8:
8956 XVECEXP (pattern, 0, 1) = const1_rtx;
8957 break;
8958 case UNSPECV_WINDOW_END:
8959 if (XVECEXP (pattern, 0, 0) == x)
8960 return lab;
8961 break;
8962 case UNSPECV_CONST_END:
8963 return lab;
8964 default:
8965 break;
8969 return lab;
8972 /* Return true if it's possible to redirect BRANCH1 to the destination
8973 of an unconditional jump BRANCH2. We only want to do this if the
8974 resulting branch will have a short displacement. */
8976 sh_can_redirect_branch (rtx branch1, rtx branch2)
8978 if (flag_expensive_optimizations && simplejump_p (branch2))
8980 rtx dest = XEXP (SET_SRC (single_set (branch2)), 0);
8981 rtx insn;
8982 int distance;
8984 for (distance = 0, insn = NEXT_INSN (branch1);
8985 insn && distance < 256;
8986 insn = PREV_INSN (insn))
8988 if (insn == dest)
8989 return 1;
8990 else
8991 distance += get_attr_length (insn);
8993 for (distance = 0, insn = NEXT_INSN (branch1);
8994 insn && distance < 256;
8995 insn = NEXT_INSN (insn))
8997 if (insn == dest)
8998 return 1;
8999 else
9000 distance += get_attr_length (insn);
9003 return 0;
9006 /* Return nonzero if register old_reg can be renamed to register new_reg. */
9008 sh_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
9009 unsigned int new_reg)
9011 /* Interrupt functions can only use registers that have already been
9012 saved by the prologue, even if they would normally be
9013 call-clobbered. */
9015 if (sh_cfun_interrupt_handler_p () && !df_regs_ever_live_p (new_reg))
9016 return 0;
9018 return 1;
9021 /* Function to update the integer COST
9022 based on the relationship between INSN that is dependent on
9023 DEP_INSN through the dependence LINK. The default is to make no
9024 adjustment to COST. This can be used for example to specify to
9025 the scheduler that an output- or anti-dependence does not incur
9026 the same cost as a data-dependence. The return value should be
9027 the new value for COST. */
9028 static int
9029 sh_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx dep_insn, int cost)
9031 rtx reg, use_pat;
9033 if (TARGET_SHMEDIA)
9035 /* On SHmedia, if the dependence is an anti-dependence or
9036 output-dependence, there is no cost. */
9037 if (REG_NOTE_KIND (link) != 0)
9039 /* However, dependencies between target register loads and
9040 uses of the register in a subsequent block that are separated
9041 by a conditional branch are not modelled - we have to do with
9042 the anti-dependency between the target register load and the
9043 conditional branch that ends the current block. */
9044 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
9045 && GET_CODE (PATTERN (dep_insn)) == SET
9046 && (get_attr_type (dep_insn) == TYPE_PT_MEDIA
9047 || get_attr_type (dep_insn) == TYPE_PTABS_MEDIA)
9048 && get_attr_type (insn) == TYPE_CBRANCH_MEDIA)
9050 int orig_cost = cost;
9051 rtx note = find_reg_note (insn, REG_BR_PROB, 0);
9052 rtx target = ((! note
9053 || INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
9054 ? insn : JUMP_LABEL (insn));
9055 /* On the likely path, the branch costs 1, on the unlikely path,
9056 it costs 3. */
9057 cost--;
9059 target = next_active_insn (target);
9060 while (target && ! flow_dependent_p (target, dep_insn)
9061 && --cost > 0);
9062 /* If two branches are executed in immediate succession, with the
9063 first branch properly predicted, this causes a stall at the
9064 second branch, hence we won't need the target for the
9065 second branch for two cycles after the launch of the first
9066 branch. */
9067 if (cost > orig_cost - 2)
9068 cost = orig_cost - 2;
9070 else
9071 cost = 0;
9074 else if (get_attr_is_mac_media (insn)
9075 && get_attr_is_mac_media (dep_insn))
9076 cost = 1;
9078 else if (! reload_completed
9079 && GET_CODE (PATTERN (insn)) == SET
9080 && GET_CODE (SET_SRC (PATTERN (insn))) == FLOAT
9081 && GET_CODE (PATTERN (dep_insn)) == SET
9082 && fp_arith_reg_operand (SET_SRC (PATTERN (dep_insn)), VOIDmode)
9083 && cost < 4)
9084 cost = 4;
9085 /* Schedule the ptabs for a casesi_jump_media in preference to stuff
9086 that is needed at the target. */
9087 else if (get_attr_type (insn) == TYPE_JUMP_MEDIA
9088 && ! flow_dependent_p (insn, dep_insn))
9089 cost--;
9091 else if (REG_NOTE_KIND (link) == 0)
9093 enum attr_type type;
9094 rtx dep_set;
9096 if (recog_memoized (insn) < 0
9097 || recog_memoized (dep_insn) < 0)
9098 return cost;
9100 dep_set = single_set (dep_insn);
9102 /* The latency that we specify in the scheduling description refers
9103 to the actual output, not to an auto-increment register; for that,
9104 the latency is one. */
9105 if (dep_set && MEM_P (SET_SRC (dep_set)) && cost > 1)
9107 rtx set = single_set (insn);
9109 if (set
9110 && !reg_mentioned_p (SET_DEST (dep_set), SET_SRC (set))
9111 && (!MEM_P (SET_DEST (set))
9112 || !reg_mentioned_p (SET_DEST (dep_set),
9113 XEXP (SET_DEST (set), 0))))
9114 cost = 1;
9116 /* The only input for a call that is timing-critical is the
9117 function's address. */
9118 if (GET_CODE (insn) == CALL_INSN)
9120 rtx call = PATTERN (insn);
9122 if (GET_CODE (call) == PARALLEL)
9123 call = XVECEXP (call, 0 ,0);
9124 if (GET_CODE (call) == SET)
9125 call = SET_SRC (call);
9126 if (GET_CODE (call) == CALL && GET_CODE (XEXP (call, 0)) == MEM
9127 /* sibcalli_thunk uses a symbol_ref in an unspec. */
9128 && (GET_CODE (XEXP (XEXP (call, 0), 0)) == UNSPEC
9129 || ! reg_set_p (XEXP (XEXP (call, 0), 0), dep_insn)))
9130 cost -= TARGET_SH4_300 ? 3 : 6;
9132 /* Likewise, the most timing critical input for an sfuncs call
9133 is the function address. However, sfuncs typically start
9134 using their arguments pretty quickly.
9135 Assume a four cycle delay for SH4 before they are needed.
9136 Cached ST40-300 calls are quicker, so assume only a one
9137 cycle delay there.
9138 ??? Maybe we should encode the delays till input registers
9139 are needed by sfuncs into the sfunc call insn. */
9140 /* All sfunc calls are parallels with at least four components.
9141 Exploit this to avoid unnecessary calls to sfunc_uses_reg. */
9142 else if (GET_CODE (PATTERN (insn)) == PARALLEL
9143 && XVECLEN (PATTERN (insn), 0) >= 4
9144 && (reg = sfunc_uses_reg (insn)))
9146 if (! reg_set_p (reg, dep_insn))
9147 cost -= TARGET_SH4_300 ? 1 : 4;
9149 if (TARGET_HARD_SH4 && !TARGET_SH4_300)
9151 enum attr_type dep_type = get_attr_type (dep_insn);
9153 if (dep_type == TYPE_FLOAD || dep_type == TYPE_PCFLOAD)
9154 cost--;
9155 else if ((dep_type == TYPE_LOAD_SI || dep_type == TYPE_PCLOAD_SI)
9156 && (type = get_attr_type (insn)) != TYPE_CALL
9157 && type != TYPE_SFUNC)
9158 cost--;
9159 /* When the preceding instruction loads the shift amount of
9160 the following SHAD/SHLD, the latency of the load is increased
9161 by 1 cycle. */
9162 if (get_attr_type (insn) == TYPE_DYN_SHIFT
9163 && get_attr_any_int_load (dep_insn) == ANY_INT_LOAD_YES
9164 && reg_overlap_mentioned_p (SET_DEST (dep_set),
9165 XEXP (SET_SRC (single_set (insn)),
9166 1)))
9167 cost++;
9168 /* When an LS group instruction with a latency of less than
9169 3 cycles is followed by a double-precision floating-point
9170 instruction, FIPR, or FTRV, the latency of the first
9171 instruction is increased to 3 cycles. */
9172 else if (cost < 3
9173 && get_attr_insn_class (dep_insn) == INSN_CLASS_LS_GROUP
9174 && get_attr_dfp_comp (insn) == DFP_COMP_YES)
9175 cost = 3;
9176 /* The lsw register of a double-precision computation is ready one
9177 cycle earlier. */
9178 else if (reload_completed
9179 && get_attr_dfp_comp (dep_insn) == DFP_COMP_YES
9180 && (use_pat = single_set (insn))
9181 && ! regno_use_in (REGNO (SET_DEST (single_set (dep_insn))),
9182 SET_SRC (use_pat)))
9183 cost -= 1;
9185 if (get_attr_any_fp_comp (dep_insn) == ANY_FP_COMP_YES
9186 && get_attr_late_fp_use (insn) == LATE_FP_USE_YES)
9187 cost -= 1;
9189 else if (TARGET_SH4_300)
9191 /* Stores need their input register two cycles later. */
9192 if (dep_set && cost >= 1
9193 && ((type = get_attr_type (insn)) == TYPE_STORE
9194 || type == TYPE_PSTORE
9195 || type == TYPE_FSTORE || type == TYPE_MAC_MEM))
9197 rtx set = single_set (insn);
9199 if (!reg_mentioned_p (SET_SRC (set), XEXP (SET_DEST (set), 0))
9200 && rtx_equal_p (SET_SRC (set), SET_DEST (dep_set)))
9202 cost -= 2;
9203 /* But don't reduce the cost below 1 if the address depends
9204 on a side effect of dep_insn. */
9205 if (cost < 1
9206 && modified_in_p (XEXP (SET_DEST (set), 0), dep_insn))
9207 cost = 1;
9212 /* An anti-dependence penalty of two applies if the first insn is a double
9213 precision fadd / fsub / fmul. */
9214 else if (!TARGET_SH4_300
9215 && REG_NOTE_KIND (link) == REG_DEP_ANTI
9216 && recog_memoized (dep_insn) >= 0
9217 && (get_attr_type (dep_insn) == TYPE_DFP_ARITH
9218 || get_attr_type (dep_insn) == TYPE_DFP_MUL)
9219 /* A lot of alleged anti-flow dependences are fake,
9220 so check this one is real. */
9221 && flow_dependent_p (dep_insn, insn))
9222 cost = 2;
9224 return cost;
9227 /* Check if INSN is flow-dependent on DEP_INSN. Can also be used to check
9228 if DEP_INSN is anti-flow dependent on INSN. */
9229 static int
9230 flow_dependent_p (rtx insn, rtx dep_insn)
9232 rtx tmp = PATTERN (insn);
9234 note_stores (PATTERN (dep_insn), flow_dependent_p_1, &tmp);
9235 return tmp == NULL_RTX;
9238 /* A helper function for flow_dependent_p called through note_stores. */
9239 static void
9240 flow_dependent_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
9242 rtx * pinsn = (rtx *) data;
9244 if (*pinsn && reg_referenced_p (x, *pinsn))
9245 *pinsn = NULL_RTX;
9248 /* For use by sh_allocate_initial_value. Note that sh.md contains some
9249 'special function' patterns (type sfunc) that clobber pr, but that
9250 do not look like function calls to leaf_function_p. Hence we must
9251 do this extra check. */
9252 static int
9253 sh_pr_n_sets (void)
9255 return DF_REG_DEF_COUNT (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
9258 /* Return where to allocate pseudo for a given hard register initial
9259 value. */
9260 static rtx
9261 sh_allocate_initial_value (rtx hard_reg)
9263 rtx x;
9265 if (REGNO (hard_reg) == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG))
9267 if (current_function_is_leaf
9268 && ! sh_pr_n_sets ()
9269 && ! (TARGET_SHCOMPACT
9270 && ((crtl->args.info.call_cookie
9271 & ~ CALL_COOKIE_RET_TRAMP (1))
9272 || crtl->saves_all_registers)))
9273 x = hard_reg;
9274 else
9275 x = gen_frame_mem (Pmode, return_address_pointer_rtx);
9277 else
9278 x = NULL_RTX;
9280 return x;
9283 /* This function returns "2" to indicate dual issue for the SH4
9284 processor. To be used by the DFA pipeline description. */
9285 static int
9286 sh_issue_rate (void)
9288 if (TARGET_SUPERSCALAR)
9289 return 2;
9290 else
9291 return 1;
9294 /* Functions for ready queue reordering for sched1. */
9296 /* Get weight for mode for a set x. */
9297 static short
9298 find_set_regmode_weight (rtx x, enum machine_mode mode)
9300 if (GET_CODE (x) == CLOBBER && register_operand (SET_DEST (x), mode))
9301 return 1;
9302 if (GET_CODE (x) == SET && register_operand (SET_DEST (x), mode))
9304 if (GET_CODE (SET_DEST (x)) == REG)
9306 if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
9307 return 1;
9308 else
9309 return 0;
9311 return 1;
9313 return 0;
9316 /* Get regmode weight for insn. */
9317 static short
9318 find_insn_regmode_weight (rtx insn, enum machine_mode mode)
9320 short reg_weight = 0;
9321 rtx x;
9323 /* Increment weight for each register born here. */
9324 x = PATTERN (insn);
9325 reg_weight += find_set_regmode_weight (x, mode);
9326 if (GET_CODE (x) == PARALLEL)
9328 int j;
9329 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
9331 x = XVECEXP (PATTERN (insn), 0, j);
9332 reg_weight += find_set_regmode_weight (x, mode);
9335 /* Decrement weight for each register that dies here. */
9336 for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
9338 if (REG_NOTE_KIND (x) == REG_DEAD || REG_NOTE_KIND (x) == REG_UNUSED)
9340 rtx note = XEXP (x, 0);
9341 if (GET_CODE (note) == REG && GET_MODE (note) == mode)
9342 reg_weight--;
9345 return reg_weight;
9348 /* Calculate regmode weights for all insns of a basic block. */
9349 static void
9350 find_regmode_weight (basic_block b, enum machine_mode mode)
9352 rtx insn, next_tail, head, tail;
9354 get_ebb_head_tail (b, b, &head, &tail);
9355 next_tail = NEXT_INSN (tail);
9357 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
9359 /* Handle register life information. */
9360 if (!INSN_P (insn))
9361 continue;
9363 if (mode == SFmode)
9364 INSN_REGMODE_WEIGHT (insn, mode) =
9365 find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DFmode);
9366 else if (mode == SImode)
9367 INSN_REGMODE_WEIGHT (insn, mode) =
9368 find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DImode);
9372 /* Comparison function for ready queue sorting. */
9373 static int
9374 rank_for_reorder (const void *x, const void *y)
9376 rtx tmp = *(const rtx *) y;
9377 rtx tmp2 = *(const rtx *) x;
9379 /* The insn in a schedule group should be issued the first. */
9380 if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
9381 return SCHED_GROUP_P (tmp2) ? 1 : -1;
9383 /* If insns are equally good, sort by INSN_LUID (original insn order), This
9384 minimizes instruction movement, thus minimizing sched's effect on
9385 register pressure. */
9386 return INSN_LUID (tmp) - INSN_LUID (tmp2);
9389 /* Resort the array A in which only element at index N may be out of order. */
9390 static void
9391 swap_reorder (rtx *a, int n)
9393 rtx insn = a[n - 1];
9394 int i = n - 2;
9396 while (i >= 0 && rank_for_reorder (a + i, &insn) >= 0)
9398 a[i + 1] = a[i];
9399 i -= 1;
9401 a[i + 1] = insn;
9404 #define SCHED_REORDER(READY, N_READY) \
9405 do \
9407 if ((N_READY) == 2) \
9408 swap_reorder (READY, N_READY); \
9409 else if ((N_READY) > 2) \
9410 qsort (READY, N_READY, sizeof (rtx), rank_for_reorder); \
9412 while (0)
9414 /* Sort the ready list READY by ascending priority, using the SCHED_REORDER
9415 macro. */
9416 static void
9417 ready_reorder (rtx *ready, int nready)
9419 SCHED_REORDER (ready, nready);
9422 /* Count life regions of r0 for a block. */
9423 static int
9424 find_r0_life_regions (basic_block b)
9426 rtx end, insn;
9427 rtx pset;
9428 rtx r0_reg;
9429 int live;
9430 int set;
9431 int death = 0;
9433 if (REGNO_REG_SET_P (df_get_live_in (b), R0_REG))
9435 set = 1;
9436 live = 1;
9438 else
9440 set = 0;
9441 live = 0;
9444 insn = BB_HEAD (b);
9445 end = BB_END (b);
9446 r0_reg = gen_rtx_REG (SImode, R0_REG);
9447 while (1)
9449 if (INSN_P (insn))
9451 if (find_regno_note (insn, REG_DEAD, R0_REG))
9453 death++;
9454 live = 0;
9456 if (!live
9457 && (pset = single_set (insn))
9458 && reg_overlap_mentioned_p (r0_reg, SET_DEST (pset))
9459 && !find_regno_note (insn, REG_UNUSED, R0_REG))
9461 set++;
9462 live = 1;
9465 if (insn == end)
9466 break;
9467 insn = NEXT_INSN (insn);
9469 return set - death;
9472 /* Calculate regmode weights for all insns of all basic block. */
9473 static void
9474 sh_md_init_global (FILE *dump ATTRIBUTE_UNUSED,
9475 int verbose ATTRIBUTE_UNUSED,
9476 int old_max_uid)
9478 basic_block b;
9480 regmode_weight[0] = (short *) xcalloc (old_max_uid, sizeof (short));
9481 regmode_weight[1] = (short *) xcalloc (old_max_uid, sizeof (short));
9482 r0_life_regions = 0;
9484 FOR_EACH_BB_REVERSE (b)
9486 find_regmode_weight (b, SImode);
9487 find_regmode_weight (b, SFmode);
9488 if (!reload_completed)
9489 r0_life_regions += find_r0_life_regions (b);
9492 CURR_REGMODE_PRESSURE (SImode) = 0;
9493 CURR_REGMODE_PRESSURE (SFmode) = 0;
9497 /* Cleanup. */
9498 static void
9499 sh_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
9500 int verbose ATTRIBUTE_UNUSED)
9502 if (regmode_weight[0])
9504 free (regmode_weight[0]);
9505 regmode_weight[0] = NULL;
9507 if (regmode_weight[1])
9509 free (regmode_weight[1]);
9510 regmode_weight[1] = NULL;
9514 /* The scalar modes supported differs from the default version in TImode
9515 for 32-bit SHMEDIA. */
9516 static bool
9517 sh_scalar_mode_supported_p (enum machine_mode mode)
9519 if (TARGET_SHMEDIA32 && mode == TImode)
9520 return false;
9522 return default_scalar_mode_supported_p (mode);
9525 /* Cache the can_issue_more so that we can return it from reorder2. Also,
9526 keep count of register pressures on SImode and SFmode. */
9527 static int
9528 sh_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
9529 int sched_verbose ATTRIBUTE_UNUSED,
9530 rtx insn,
9531 int can_issue_more)
9533 if (GET_CODE (PATTERN (insn)) != USE
9534 && GET_CODE (PATTERN (insn)) != CLOBBER)
9535 cached_can_issue_more = can_issue_more - 1;
9536 else
9537 cached_can_issue_more = can_issue_more;
9539 if (reload_completed)
9540 return cached_can_issue_more;
9542 CURR_REGMODE_PRESSURE (SImode) += INSN_REGMODE_WEIGHT (insn, SImode);
9543 CURR_REGMODE_PRESSURE (SFmode) += INSN_REGMODE_WEIGHT (insn, SFmode);
9545 return cached_can_issue_more;
9548 static void
9549 sh_md_init (FILE *dump ATTRIBUTE_UNUSED,
9550 int verbose ATTRIBUTE_UNUSED,
9551 int veclen ATTRIBUTE_UNUSED)
9553 CURR_REGMODE_PRESSURE (SImode) = 0;
9554 CURR_REGMODE_PRESSURE (SFmode) = 0;
9557 /* Some magic numbers. */
9558 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
9559 functions that already have high pressure on r0. */
9560 #define R0_MAX_LIFE_REGIONS 2
9561 /* Register Pressure thresholds for SImode and SFmode registers. */
9562 #define SIMODE_MAX_WEIGHT 5
9563 #define SFMODE_MAX_WEIGHT 10
9565 /* Return true if the pressure is high for MODE. */
9566 static short
9567 high_pressure (enum machine_mode mode)
9569 /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
9570 functions that already have high pressure on r0. */
9571 if (r0_life_regions >= R0_MAX_LIFE_REGIONS)
9572 return 1;
9574 if (mode == SFmode)
9575 return (CURR_REGMODE_PRESSURE (SFmode) > SFMODE_MAX_WEIGHT);
9576 else
9577 return (CURR_REGMODE_PRESSURE (SImode) > SIMODE_MAX_WEIGHT);
9580 /* Reorder ready queue if register pressure is high. */
9581 static int
9582 sh_reorder (FILE *dump ATTRIBUTE_UNUSED,
9583 int sched_verbose ATTRIBUTE_UNUSED,
9584 rtx *ready,
9585 int *n_readyp,
9586 int clock_var ATTRIBUTE_UNUSED)
9588 if (reload_completed)
9589 return sh_issue_rate ();
9591 if (high_pressure (SFmode) || high_pressure (SImode))
9593 ready_reorder (ready, *n_readyp);
9596 return sh_issue_rate ();
9599 /* Skip cycles if the current register pressure is high. */
9600 static int
9601 sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
9602 int sched_verbose ATTRIBUTE_UNUSED,
9603 rtx *ready ATTRIBUTE_UNUSED,
9604 int *n_readyp ATTRIBUTE_UNUSED,
9605 int clock_var ATTRIBUTE_UNUSED)
9607 if (reload_completed)
9608 return cached_can_issue_more;
9610 if (high_pressure(SFmode) || high_pressure (SImode))
9611 skip_cycles = 1;
9613 return cached_can_issue_more;
9616 /* Skip cycles without sorting the ready queue. This will move insn from
9617 Q->R. If this is the last cycle we are skipping; allow sorting of ready
9618 queue by sh_reorder. */
9620 /* Generally, skipping these many cycles are sufficient for all insns to move
9621 from Q -> R. */
9622 #define MAX_SKIPS 8
9624 static int
9625 sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED,
9626 int sched_verbose ATTRIBUTE_UNUSED,
9627 rtx insn ATTRIBUTE_UNUSED,
9628 int last_clock_var,
9629 int clock_var,
9630 int *sort_p)
9632 if (reload_completed)
9633 return 0;
9635 if (skip_cycles)
9637 if ((clock_var - last_clock_var) < MAX_SKIPS)
9639 *sort_p = 0;
9640 return 1;
9642 /* If this is the last cycle we are skipping, allow reordering of R. */
9643 if ((clock_var - last_clock_var) == MAX_SKIPS)
9645 *sort_p = 1;
9646 return 1;
9650 skip_cycles = 0;
9652 return 0;
9655 /* SHmedia requires registers for branches, so we can't generate new
9656 branches past reload. */
9657 static bool
9658 sh_cannot_modify_jumps_p (void)
9660 return (TARGET_SHMEDIA && (reload_in_progress || reload_completed));
9663 static int
9664 sh_target_reg_class (void)
9666 return TARGET_SHMEDIA ? TARGET_REGS : NO_REGS;
9669 static bool
9670 sh_optimize_target_register_callee_saved (bool after_prologue_epilogue_gen)
9672 HARD_REG_SET dummy;
9673 #if 0
9674 rtx insn;
9675 #endif
9677 if (! shmedia_space_reserved_for_target_registers)
9678 return 0;
9679 if (after_prologue_epilogue_gen && ! TARGET_SAVE_ALL_TARGET_REGS)
9680 return 0;
9681 if (calc_live_regs (&dummy) >= 6 * 8)
9682 return 1;
9683 return 0;
9686 static bool
9687 sh_ms_bitfield_layout_p (const_tree record_type ATTRIBUTE_UNUSED)
9689 return (TARGET_SH5 || TARGET_HITACHI || sh_attr_renesas_p (record_type));
9693 On the SH1..SH4, the trampoline looks like
9694 2 0002 D202 mov.l l2,r2
9695 1 0000 D301 mov.l l1,r3
9696 3 0004 422B jmp @r2
9697 4 0006 0009 nop
9698 5 0008 00000000 l1: .long area
9699 6 000c 00000000 l2: .long function
9701 SH5 (compact) uses r1 instead of r3 for the static chain. */
9704 /* Emit RTL insns to initialize the variable parts of a trampoline.
9705 FNADDR is an RTX for the address of the function's pure code.
9706 CXT is an RTX for the static chain value for the function. */
9708 void
9709 sh_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
9711 rtx tramp_mem = gen_frame_mem (BLKmode, tramp);
9713 if (TARGET_SHMEDIA64)
9715 rtx tramp_templ;
9716 int fixed_len;
9718 rtx movi1 = GEN_INT (0xcc000010);
9719 rtx shori1 = GEN_INT (0xc8000010);
9720 rtx src, dst;
9722 /* The following trampoline works within a +- 128 KB range for cxt:
9723 ptb/u cxt,tr1; movi fnaddr >> 48,r0; shori fnaddr >> 32,r0;
9724 shori fnaddr >> 16,r0; shori fnaddr,r0; ptabs/l r0,tr0
9725 gettr tr1,r1; blink tr0,r63 */
9726 /* Address rounding makes it hard to compute the exact bounds of the
9727 offset for this trampoline, but we have a rather generous offset
9728 range, so frame_offset should do fine as an upper bound. */
9729 if (cxt == virtual_stack_vars_rtx && frame_offset < 0x20000)
9731 /* ??? could optimize this trampoline initialization
9732 by writing DImode words with two insns each. */
9733 rtx mask = force_reg (DImode, GEN_INT (0x3fffc00));
9734 rtx insn = gen_rtx_MINUS (DImode, cxt, tramp);
9735 insn = gen_rtx_ASHIFT (DImode, insn, GEN_INT (10-2));
9736 insn = gen_rtx_AND (DImode, insn, mask);
9737 /* Or in ptb/u .,tr1 pattern */
9738 insn = gen_rtx_IOR (DImode, insn, gen_int_mode (0xec000010, SImode));
9739 insn = force_operand (insn, NULL_RTX);
9740 insn = gen_lowpart (SImode, insn);
9741 emit_move_insn (change_address (tramp_mem, SImode, NULL_RTX), insn);
9742 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (38));
9743 insn = gen_rtx_AND (DImode, insn, mask);
9744 insn = force_operand (gen_rtx_IOR (DImode, movi1, insn), NULL_RTX);
9745 insn = gen_lowpart (SImode, insn);
9746 emit_move_insn (adjust_address (tramp_mem, SImode, 4), insn);
9747 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (22));
9748 insn = gen_rtx_AND (DImode, insn, mask);
9749 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9750 insn = gen_lowpart (SImode, insn);
9751 emit_move_insn (adjust_address (tramp_mem, SImode, 8), insn);
9752 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (6));
9753 insn = gen_rtx_AND (DImode, insn, mask);
9754 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9755 insn = gen_lowpart (SImode, insn);
9756 emit_move_insn (adjust_address (tramp_mem, SImode, 12), insn);
9757 insn = gen_rtx_ASHIFT (DImode, fnaddr, GEN_INT (10));
9758 insn = gen_rtx_AND (DImode, insn, mask);
9759 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9760 insn = gen_lowpart (SImode, insn);
9761 emit_move_insn (adjust_address (tramp_mem, SImode, 16), insn);
9762 emit_move_insn (adjust_address (tramp_mem, SImode, 20),
9763 GEN_INT (0x6bf10600));
9764 emit_move_insn (adjust_address (tramp_mem, SImode, 24),
9765 GEN_INT (0x4415fc10));
9766 emit_move_insn (adjust_address (tramp_mem, SImode, 28),
9767 GEN_INT (0x4401fff0));
9768 emit_insn (gen_ic_invalidate_line (tramp));
9769 return;
9771 tramp_templ = gen_rtx_SYMBOL_REF (Pmode,"__GCC_nested_trampoline");
9772 fixed_len = TRAMPOLINE_SIZE - 2 * GET_MODE_SIZE (Pmode);
9774 tramp_templ = gen_datalabel_ref (tramp_templ);
9775 dst = tramp_mem;
9776 src = gen_const_mem (BLKmode, tramp_templ);
9777 set_mem_align (dst, 256);
9778 set_mem_align (src, 64);
9779 emit_block_move (dst, src, GEN_INT (fixed_len), BLOCK_OP_NORMAL);
9781 emit_move_insn (adjust_address (tramp_mem, Pmode, fixed_len), fnaddr);
9782 emit_move_insn (adjust_address (tramp_mem, Pmode,
9783 fixed_len + GET_MODE_SIZE (Pmode)),
9784 cxt);
9785 emit_insn (gen_ic_invalidate_line (tramp));
9786 return;
9788 else if (TARGET_SHMEDIA)
9790 /* movi fnaddr >> 16,r1; shori fnaddr,r1; ptabs/l r1,tr0
9791 movi cxt >> 16,r1; shori cxt,r1; blink tr0,r63 */
9792 rtx quad0 = gen_reg_rtx (DImode), cxtload = gen_reg_rtx (DImode);
9793 rtx quad1 = gen_reg_rtx (DImode), quad2 = gen_reg_rtx (DImode);
9794 /* movi 0,r1: 0xcc000010 shori 0,r1: c8000010 concatenated,
9795 rotated 10 right, and higher 16 bit of every 32 selected. */
9796 rtx movishori
9797 = force_reg (V2HImode, (simplify_gen_subreg
9798 (V2HImode, GEN_INT (0x4330432), SImode, 0)));
9799 rtx ptabs = force_reg (DImode, GEN_INT (0x6bf10600));
9800 rtx blink = force_reg (DImode, GEN_INT (0x4401fff0));
9802 tramp = force_reg (Pmode, tramp);
9803 fnaddr = force_reg (SImode, fnaddr);
9804 cxt = force_reg (SImode, cxt);
9805 emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, quad0, 0),
9806 gen_rtx_SUBREG (V2HImode, fnaddr, 0),
9807 movishori));
9808 emit_insn (gen_rotrdi3_mextr (quad0, quad0,
9809 GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
9810 emit_insn (gen_ashldi3_media (quad0, quad0, const2_rtx));
9811 emit_move_insn (change_address (tramp_mem, DImode, NULL_RTX), quad0);
9812 emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, cxtload, 0),
9813 gen_rtx_SUBREG (V2HImode, cxt, 0),
9814 movishori));
9815 emit_insn (gen_rotrdi3_mextr (cxtload, cxtload,
9816 GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
9817 emit_insn (gen_ashldi3_media (cxtload, cxtload, const2_rtx));
9818 if (TARGET_LITTLE_ENDIAN)
9820 emit_insn (gen_mshflo_l_di (quad1, ptabs, cxtload));
9821 emit_insn (gen_mextr4 (quad2, cxtload, blink));
9823 else
9825 emit_insn (gen_mextr4 (quad1, cxtload, ptabs));
9826 emit_insn (gen_mshflo_l_di (quad2, blink, cxtload));
9828 emit_move_insn (adjust_address (tramp_mem, DImode, 8), quad1);
9829 emit_move_insn (adjust_address (tramp_mem, DImode, 16), quad2);
9830 emit_insn (gen_ic_invalidate_line (tramp));
9831 return;
9833 else if (TARGET_SHCOMPACT)
9835 emit_insn (gen_initialize_trampoline (tramp, cxt, fnaddr));
9836 return;
9838 emit_move_insn (change_address (tramp_mem, SImode, NULL_RTX),
9839 gen_int_mode (TARGET_LITTLE_ENDIAN ? 0xd301d202 : 0xd202d301,
9840 SImode));
9841 emit_move_insn (adjust_address (tramp_mem, SImode, 4),
9842 gen_int_mode (TARGET_LITTLE_ENDIAN ? 0x0009422b : 0x422b0009,
9843 SImode));
9844 emit_move_insn (adjust_address (tramp_mem, SImode, 8), cxt);
9845 emit_move_insn (adjust_address (tramp_mem, SImode, 12), fnaddr);
9846 if (TARGET_HARVARD)
9848 if (!TARGET_INLINE_IC_INVALIDATE
9849 || (!(TARGET_SH4A_ARCH || TARGET_SH4_300) && TARGET_USERMODE))
9850 emit_library_call (function_symbol (NULL, "__ic_invalidate",
9851 FUNCTION_ORDINARY),
9852 0, VOIDmode, 1, tramp, SImode);
9853 else
9854 emit_insn (gen_ic_invalidate_line (tramp));
9858 /* FIXME: This is overly conservative. A SHcompact function that
9859 receives arguments ``by reference'' will have them stored in its
9860 own stack frame, so it must not pass pointers or references to
9861 these arguments to other functions by means of sibling calls. */
9862 /* If PIC, we cannot make sibling calls to global functions
9863 because the PLT requires r12 to be live. */
9864 static bool
9865 sh_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
9867 return (1
9868 && (! TARGET_SHCOMPACT
9869 || crtl->args.info.stack_regs == 0)
9870 && ! sh_cfun_interrupt_handler_p ()
9871 && (! flag_pic
9872 || (decl && ! TREE_PUBLIC (decl))
9873 || (decl && DECL_VISIBILITY (decl) != VISIBILITY_DEFAULT)));
9876 /* Machine specific built-in functions. */
9878 struct builtin_description
9880 const enum insn_code icode;
9881 const char *const name;
9882 int signature;
9885 /* describe number and signedness of arguments; arg[0] == result
9886 (1: unsigned, 2: signed, 4: don't care, 8: pointer 0: no argument */
9887 /* 9: 64-bit pointer, 10: 32-bit pointer */
9888 static const char signature_args[][4] =
9890 #define SH_BLTIN_V2SI2 0
9891 { 4, 4 },
9892 #define SH_BLTIN_V4HI2 1
9893 { 4, 4 },
9894 #define SH_BLTIN_V2SI3 2
9895 { 4, 4, 4 },
9896 #define SH_BLTIN_V4HI3 3
9897 { 4, 4, 4 },
9898 #define SH_BLTIN_V8QI3 4
9899 { 4, 4, 4 },
9900 #define SH_BLTIN_MAC_HISI 5
9901 { 1, 4, 4, 1 },
9902 #define SH_BLTIN_SH_HI 6
9903 { 4, 4, 1 },
9904 #define SH_BLTIN_SH_SI 7
9905 { 4, 4, 1 },
9906 #define SH_BLTIN_V4HI2V2SI 8
9907 { 4, 4, 4 },
9908 #define SH_BLTIN_V4HI2V8QI 9
9909 { 4, 4, 4 },
9910 #define SH_BLTIN_SISF 10
9911 { 4, 2 },
9912 #define SH_BLTIN_LDUA_L 11
9913 { 2, 10 },
9914 #define SH_BLTIN_LDUA_Q 12
9915 { 1, 10 },
9916 #define SH_BLTIN_STUA_L 13
9917 { 0, 10, 2 },
9918 #define SH_BLTIN_STUA_Q 14
9919 { 0, 10, 1 },
9920 #define SH_BLTIN_LDUA_L64 15
9921 { 2, 9 },
9922 #define SH_BLTIN_LDUA_Q64 16
9923 { 1, 9 },
9924 #define SH_BLTIN_STUA_L64 17
9925 { 0, 9, 2 },
9926 #define SH_BLTIN_STUA_Q64 18
9927 { 0, 9, 1 },
9928 #define SH_BLTIN_NUM_SHARED_SIGNATURES 19
9929 #define SH_BLTIN_2 19
9930 #define SH_BLTIN_SU 19
9931 { 1, 2 },
9932 #define SH_BLTIN_3 20
9933 #define SH_BLTIN_SUS 20
9934 { 2, 2, 1 },
9935 #define SH_BLTIN_PSSV 21
9936 { 0, 8, 2, 2 },
9937 #define SH_BLTIN_XXUU 22
9938 #define SH_BLTIN_UUUU 22
9939 { 1, 1, 1, 1 },
9940 #define SH_BLTIN_PV 23
9941 { 0, 8 },
9943 /* mcmv: operands considered unsigned. */
9944 /* mmulsum_wq, msad_ubq: result considered unsigned long long. */
9945 /* mperm: control value considered unsigned int. */
9946 /* mshalds, mshard, mshards, mshlld, mshlrd: shift count is unsigned int. */
9947 /* mshards_q: returns signed short. */
9948 /* nsb: takes long long arg, returns unsigned char. */
9949 static const struct builtin_description bdesc[] =
9951 { CODE_FOR_absv2si2, "__builtin_absv2si2", SH_BLTIN_V2SI2 },
9952 { CODE_FOR_absv4hi2, "__builtin_absv4hi2", SH_BLTIN_V4HI2 },
9953 { CODE_FOR_addv2si3, "__builtin_addv2si3", SH_BLTIN_V2SI3 },
9954 { CODE_FOR_addv4hi3, "__builtin_addv4hi3", SH_BLTIN_V4HI3 },
9955 { CODE_FOR_ssaddv2si3,"__builtin_ssaddv2si3", SH_BLTIN_V2SI3 },
9956 { CODE_FOR_usaddv8qi3,"__builtin_usaddv8qi3", SH_BLTIN_V8QI3 },
9957 { CODE_FOR_ssaddv4hi3,"__builtin_ssaddv4hi3", SH_BLTIN_V4HI3 },
9958 { CODE_FOR_alloco_i, "__builtin_sh_media_ALLOCO", SH_BLTIN_PV },
9959 { CODE_FOR_negcmpeqv8qi,"__builtin_sh_media_MCMPEQ_B", SH_BLTIN_V8QI3 },
9960 { CODE_FOR_negcmpeqv2si,"__builtin_sh_media_MCMPEQ_L", SH_BLTIN_V2SI3 },
9961 { CODE_FOR_negcmpeqv4hi,"__builtin_sh_media_MCMPEQ_W", SH_BLTIN_V4HI3 },
9962 { CODE_FOR_negcmpgtuv8qi,"__builtin_sh_media_MCMPGT_UB", SH_BLTIN_V8QI3 },
9963 { CODE_FOR_negcmpgtv2si,"__builtin_sh_media_MCMPGT_L", SH_BLTIN_V2SI3 },
9964 { CODE_FOR_negcmpgtv4hi,"__builtin_sh_media_MCMPGT_W", SH_BLTIN_V4HI3 },
9965 { CODE_FOR_mcmv, "__builtin_sh_media_MCMV", SH_BLTIN_UUUU },
9966 { CODE_FOR_mcnvs_lw, "__builtin_sh_media_MCNVS_LW", SH_BLTIN_3 },
9967 { CODE_FOR_mcnvs_wb, "__builtin_sh_media_MCNVS_WB", SH_BLTIN_V4HI2V8QI },
9968 { CODE_FOR_mcnvs_wub, "__builtin_sh_media_MCNVS_WUB", SH_BLTIN_V4HI2V8QI },
9969 { CODE_FOR_mextr1, "__builtin_sh_media_MEXTR1", SH_BLTIN_V8QI3 },
9970 { CODE_FOR_mextr2, "__builtin_sh_media_MEXTR2", SH_BLTIN_V8QI3 },
9971 { CODE_FOR_mextr3, "__builtin_sh_media_MEXTR3", SH_BLTIN_V8QI3 },
9972 { CODE_FOR_mextr4, "__builtin_sh_media_MEXTR4", SH_BLTIN_V8QI3 },
9973 { CODE_FOR_mextr5, "__builtin_sh_media_MEXTR5", SH_BLTIN_V8QI3 },
9974 { CODE_FOR_mextr6, "__builtin_sh_media_MEXTR6", SH_BLTIN_V8QI3 },
9975 { CODE_FOR_mextr7, "__builtin_sh_media_MEXTR7", SH_BLTIN_V8QI3 },
9976 { CODE_FOR_mmacfx_wl, "__builtin_sh_media_MMACFX_WL", SH_BLTIN_MAC_HISI },
9977 { CODE_FOR_mmacnfx_wl,"__builtin_sh_media_MMACNFX_WL", SH_BLTIN_MAC_HISI },
9978 { CODE_FOR_mulv2si3, "__builtin_mulv2si3", SH_BLTIN_V2SI3, },
9979 { CODE_FOR_mulv4hi3, "__builtin_mulv4hi3", SH_BLTIN_V4HI3 },
9980 { CODE_FOR_mmulfx_l, "__builtin_sh_media_MMULFX_L", SH_BLTIN_V2SI3 },
9981 { CODE_FOR_mmulfx_w, "__builtin_sh_media_MMULFX_W", SH_BLTIN_V4HI3 },
9982 { CODE_FOR_mmulfxrp_w,"__builtin_sh_media_MMULFXRP_W", SH_BLTIN_V4HI3 },
9983 { CODE_FOR_mmulhi_wl, "__builtin_sh_media_MMULHI_WL", SH_BLTIN_V4HI2V2SI },
9984 { CODE_FOR_mmullo_wl, "__builtin_sh_media_MMULLO_WL", SH_BLTIN_V4HI2V2SI },
9985 { CODE_FOR_mmulsum_wq,"__builtin_sh_media_MMULSUM_WQ", SH_BLTIN_XXUU },
9986 { CODE_FOR_mperm_w, "__builtin_sh_media_MPERM_W", SH_BLTIN_SH_HI },
9987 { CODE_FOR_msad_ubq, "__builtin_sh_media_MSAD_UBQ", SH_BLTIN_XXUU },
9988 { CODE_FOR_mshalds_l, "__builtin_sh_media_MSHALDS_L", SH_BLTIN_SH_SI },
9989 { CODE_FOR_mshalds_w, "__builtin_sh_media_MSHALDS_W", SH_BLTIN_SH_HI },
9990 { CODE_FOR_ashrv2si3, "__builtin_ashrv2si3", SH_BLTIN_SH_SI },
9991 { CODE_FOR_ashrv4hi3, "__builtin_ashrv4hi3", SH_BLTIN_SH_HI },
9992 { CODE_FOR_mshards_q, "__builtin_sh_media_MSHARDS_Q", SH_BLTIN_SUS },
9993 { CODE_FOR_mshfhi_b, "__builtin_sh_media_MSHFHI_B", SH_BLTIN_V8QI3 },
9994 { CODE_FOR_mshfhi_l, "__builtin_sh_media_MSHFHI_L", SH_BLTIN_V2SI3 },
9995 { CODE_FOR_mshfhi_w, "__builtin_sh_media_MSHFHI_W", SH_BLTIN_V4HI3 },
9996 { CODE_FOR_mshflo_b, "__builtin_sh_media_MSHFLO_B", SH_BLTIN_V8QI3 },
9997 { CODE_FOR_mshflo_l, "__builtin_sh_media_MSHFLO_L", SH_BLTIN_V2SI3 },
9998 { CODE_FOR_mshflo_w, "__builtin_sh_media_MSHFLO_W", SH_BLTIN_V4HI3 },
9999 { CODE_FOR_ashlv2si3, "__builtin_ashlv2si3", SH_BLTIN_SH_SI },
10000 { CODE_FOR_ashlv4hi3, "__builtin_ashlv4hi3", SH_BLTIN_SH_HI },
10001 { CODE_FOR_lshrv2si3, "__builtin_lshrv2si3", SH_BLTIN_SH_SI },
10002 { CODE_FOR_lshrv4hi3, "__builtin_lshrv4hi3", SH_BLTIN_SH_HI },
10003 { CODE_FOR_subv2si3, "__builtin_subv2si3", SH_BLTIN_V2SI3 },
10004 { CODE_FOR_subv4hi3, "__builtin_subv4hi3", SH_BLTIN_V4HI3 },
10005 { CODE_FOR_sssubv2si3,"__builtin_sssubv2si3", SH_BLTIN_V2SI3 },
10006 { CODE_FOR_ussubv8qi3,"__builtin_ussubv8qi3", SH_BLTIN_V8QI3 },
10007 { CODE_FOR_sssubv4hi3,"__builtin_sssubv4hi3", SH_BLTIN_V4HI3 },
10008 { CODE_FOR_fcosa_s, "__builtin_sh_media_FCOSA_S", SH_BLTIN_SISF },
10009 { CODE_FOR_fsina_s, "__builtin_sh_media_FSINA_S", SH_BLTIN_SISF },
10010 { CODE_FOR_fipr, "__builtin_sh_media_FIPR_S", SH_BLTIN_3 },
10011 { CODE_FOR_ftrv, "__builtin_sh_media_FTRV_S", SH_BLTIN_3 },
10012 { CODE_FOR_mac_media, "__builtin_sh_media_FMAC_S", SH_BLTIN_3 },
10013 { CODE_FOR_sqrtdf2, "__builtin_sh_media_FSQRT_D", SH_BLTIN_2 },
10014 { CODE_FOR_sqrtsf2, "__builtin_sh_media_FSQRT_S", SH_BLTIN_2 },
10015 { CODE_FOR_fsrra_s, "__builtin_sh_media_FSRRA_S", SH_BLTIN_2 },
10016 { CODE_FOR_ldhi_l, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L },
10017 { CODE_FOR_ldhi_q, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q },
10018 { CODE_FOR_ldlo_l, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L },
10019 { CODE_FOR_ldlo_q, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q },
10020 { CODE_FOR_sthi_l, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L },
10021 { CODE_FOR_sthi_q, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q },
10022 { CODE_FOR_stlo_l, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L },
10023 { CODE_FOR_stlo_q, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q },
10024 { CODE_FOR_ldhi_l64, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L64 },
10025 { CODE_FOR_ldhi_q64, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q64 },
10026 { CODE_FOR_ldlo_l64, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L64 },
10027 { CODE_FOR_ldlo_q64, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q64 },
10028 { CODE_FOR_sthi_l64, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L64 },
10029 { CODE_FOR_sthi_q64, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q64 },
10030 { CODE_FOR_stlo_l64, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L64 },
10031 { CODE_FOR_stlo_q64, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q64 },
10032 { CODE_FOR_nsb, "__builtin_sh_media_NSB", SH_BLTIN_SU },
10033 { CODE_FOR_byterev, "__builtin_sh_media_BYTEREV", SH_BLTIN_2 },
10034 { CODE_FOR_prefetch, "__builtin_sh_media_PREFO", SH_BLTIN_PSSV },
10037 static void
10038 sh_media_init_builtins (void)
10040 tree shared[SH_BLTIN_NUM_SHARED_SIGNATURES];
10041 const struct builtin_description *d;
10043 memset (shared, 0, sizeof shared);
10044 for (d = bdesc; d - bdesc < (int) ARRAY_SIZE (bdesc); d++)
10046 tree type, arg_type = 0;
10047 int signature = d->signature;
10048 int i;
10050 if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES && shared[signature])
10051 type = shared[signature];
10052 else
10054 int has_result = signature_args[signature][0] != 0;
10056 if ((signature_args[signature][1] & 8)
10057 && (((signature_args[signature][1] & 1) && TARGET_SHMEDIA32)
10058 || ((signature_args[signature][1] & 2) && TARGET_SHMEDIA64)))
10059 continue;
10060 if (! TARGET_FPU_ANY
10061 && FLOAT_MODE_P (insn_data[d->icode].operand[0].mode))
10062 continue;
10063 type = void_list_node;
10064 for (i = 3; ; i--)
10066 int arg = signature_args[signature][i];
10067 int opno = i - 1 + has_result;
10069 if (arg & 8)
10070 arg_type = ptr_type_node;
10071 else if (arg)
10072 arg_type = (*lang_hooks.types.type_for_mode)
10073 (insn_data[d->icode].operand[opno].mode,
10074 (arg & 1));
10075 else if (i)
10076 continue;
10077 else
10078 arg_type = void_type_node;
10079 if (i == 0)
10080 break;
10081 type = tree_cons (NULL_TREE, arg_type, type);
10083 type = build_function_type (arg_type, type);
10084 if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES)
10085 shared[signature] = type;
10087 add_builtin_function (d->name, type, d - bdesc, BUILT_IN_MD,
10088 NULL, NULL_TREE);
10092 /* Implements target hook vector_mode_supported_p. */
10093 bool
10094 sh_vector_mode_supported_p (enum machine_mode mode)
10096 if (TARGET_FPU_ANY
10097 && ((mode == V2SFmode)
10098 || (mode == V4SFmode)
10099 || (mode == V16SFmode)))
10100 return true;
10102 else if (TARGET_SHMEDIA
10103 && ((mode == V8QImode)
10104 || (mode == V2HImode)
10105 || (mode == V4HImode)
10106 || (mode == V2SImode)))
10107 return true;
10109 return false;
10112 /* Implements target hook dwarf_calling_convention. Return an enum
10113 of dwarf_calling_convention. */
10115 sh_dwarf_calling_convention (const_tree func)
10117 if (sh_attr_renesas_p (func))
10118 return DW_CC_GNU_renesas_sh;
10120 return DW_CC_normal;
10123 static void
10124 sh_init_builtins (void)
10126 if (TARGET_SHMEDIA)
10127 sh_media_init_builtins ();
10130 /* Expand an expression EXP that calls a built-in function,
10131 with result going to TARGET if that's convenient
10132 (and in mode MODE if that's convenient).
10133 SUBTARGET may be used as the target for computing one of EXP's operands.
10134 IGNORE is nonzero if the value is to be ignored. */
10136 static rtx
10137 sh_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10138 enum machine_mode mode ATTRIBUTE_UNUSED, int ignore)
10140 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10141 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10142 const struct builtin_description *d = &bdesc[fcode];
10143 enum insn_code icode = d->icode;
10144 int signature = d->signature;
10145 enum machine_mode tmode = VOIDmode;
10146 int nop = 0, i;
10147 rtx op[4];
10148 rtx pat = 0;
10150 if (signature_args[signature][0])
10152 if (ignore)
10153 return 0;
10155 tmode = insn_data[icode].operand[0].mode;
10156 if (! target
10157 || GET_MODE (target) != tmode
10158 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10159 target = gen_reg_rtx (tmode);
10160 op[nop++] = target;
10162 else
10163 target = 0;
10165 for (i = 1; i <= 3; i++, nop++)
10167 tree arg;
10168 enum machine_mode opmode, argmode;
10169 tree optype;
10171 if (! signature_args[signature][i])
10172 break;
10173 arg = CALL_EXPR_ARG (exp, i - 1);
10174 if (arg == error_mark_node)
10175 return const0_rtx;
10176 if (signature_args[signature][i] & 8)
10178 opmode = ptr_mode;
10179 optype = ptr_type_node;
10181 else
10183 opmode = insn_data[icode].operand[nop].mode;
10184 optype = (*lang_hooks.types.type_for_mode) (opmode, 0);
10186 argmode = TYPE_MODE (TREE_TYPE (arg));
10187 if (argmode != opmode)
10188 arg = build1 (NOP_EXPR, optype, arg);
10189 op[nop] = expand_expr (arg, NULL_RTX, opmode, 0);
10190 if (! (*insn_data[icode].operand[nop].predicate) (op[nop], opmode))
10191 op[nop] = copy_to_mode_reg (opmode, op[nop]);
10194 switch (nop)
10196 case 1:
10197 pat = (*insn_data[d->icode].genfun) (op[0]);
10198 break;
10199 case 2:
10200 pat = (*insn_data[d->icode].genfun) (op[0], op[1]);
10201 break;
10202 case 3:
10203 pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2]);
10204 break;
10205 case 4:
10206 pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2], op[3]);
10207 break;
10208 default:
10209 gcc_unreachable ();
10211 if (! pat)
10212 return 0;
10213 emit_insn (pat);
10214 return target;
10217 void
10218 sh_expand_unop_v2sf (enum rtx_code code, rtx op0, rtx op1)
10220 rtx sel0 = const0_rtx;
10221 rtx sel1 = const1_rtx;
10222 rtx (*fn) (rtx, rtx, rtx, rtx, rtx) = gen_unary_sf_op;
10223 rtx op = gen_rtx_fmt_e (code, SFmode, op1);
10225 emit_insn ((*fn) (op0, op1, op, sel0, sel0));
10226 emit_insn ((*fn) (op0, op1, op, sel1, sel1));
10229 void
10230 sh_expand_binop_v2sf (enum rtx_code code, rtx op0, rtx op1, rtx op2)
10232 rtx op = gen_rtx_fmt_ee (code, SFmode, op1, op2);
10234 emit_insn (gen_binary_sf_op0 (op0, op1, op2, op));
10235 emit_insn (gen_binary_sf_op1 (op0, op1, op2, op));
10238 /* Return the class of registers for which a mode change from FROM to TO
10239 is invalid. */
10240 bool
10241 sh_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
10242 enum reg_class class)
10244 /* We want to enable the use of SUBREGs as a means to
10245 VEC_SELECT a single element of a vector. */
10246 if (to == SFmode && VECTOR_MODE_P (from) && GET_MODE_INNER (from) == SFmode)
10247 return (reg_classes_intersect_p (GENERAL_REGS, class));
10249 if (GET_MODE_SIZE (from) != GET_MODE_SIZE (to))
10251 if (TARGET_LITTLE_ENDIAN)
10253 if (GET_MODE_SIZE (to) < 8 || GET_MODE_SIZE (from) < 8)
10254 return reg_classes_intersect_p (DF_REGS, class);
10256 else
10258 if (GET_MODE_SIZE (from) < 8)
10259 return reg_classes_intersect_p (DF_HI_REGS, class);
10262 return 0;
10266 /* If ADDRESS refers to a CODE_LABEL, add NUSES to the number of times
10267 that label is used. */
10269 void
10270 sh_mark_label (rtx address, int nuses)
10272 if (GOTOFF_P (address))
10274 /* Extract the label or symbol. */
10275 address = XEXP (address, 0);
10276 if (GET_CODE (address) == PLUS)
10277 address = XEXP (address, 0);
10278 address = XVECEXP (address, 0, 0);
10280 if (GET_CODE (address) == LABEL_REF
10281 && GET_CODE (XEXP (address, 0)) == CODE_LABEL)
10282 LABEL_NUSES (XEXP (address, 0)) += nuses;
10285 /* Compute extra cost of moving data between one register class
10286 and another. */
10288 /* If SECONDARY*_RELOAD_CLASS says something about the src/dst pair, regclass
10289 uses this information. Hence, the general register <-> floating point
10290 register information here is not used for SFmode. */
10293 sh_register_move_cost (enum machine_mode mode,
10294 enum reg_class srcclass, enum reg_class dstclass)
10296 if (dstclass == T_REGS || dstclass == PR_REGS)
10297 return 10;
10299 if (dstclass == MAC_REGS && srcclass == MAC_REGS)
10300 return 4;
10302 if (mode == SImode && ! TARGET_SHMEDIA && TARGET_FMOVD
10303 && REGCLASS_HAS_FP_REG (srcclass)
10304 && REGCLASS_HAS_FP_REG (dstclass))
10305 return 4;
10307 if (REGCLASS_HAS_FP_REG (dstclass) && srcclass == T_REGS)
10308 return ((TARGET_HARD_SH4 && !optimize_size) ? 10 : 7);
10310 if ((REGCLASS_HAS_FP_REG (dstclass) && srcclass == MAC_REGS)
10311 || (dstclass == MAC_REGS && REGCLASS_HAS_FP_REG (srcclass)))
10312 return 9;
10314 if ((REGCLASS_HAS_FP_REG (dstclass)
10315 && REGCLASS_HAS_GENERAL_REG (srcclass))
10316 || (REGCLASS_HAS_GENERAL_REG (dstclass)
10317 && REGCLASS_HAS_FP_REG (srcclass)))
10318 return ((TARGET_SHMEDIA ? 4 : TARGET_FMOVD ? 8 : 12)
10319 * ((GET_MODE_SIZE (mode) + 7) / 8U));
10321 if ((dstclass == FPUL_REGS
10322 && REGCLASS_HAS_GENERAL_REG (srcclass))
10323 || (srcclass == FPUL_REGS
10324 && REGCLASS_HAS_GENERAL_REG (dstclass)))
10325 return 5;
10327 if ((dstclass == FPUL_REGS
10328 && (srcclass == PR_REGS || srcclass == MAC_REGS || srcclass == T_REGS))
10329 || (srcclass == FPUL_REGS
10330 && (dstclass == PR_REGS || dstclass == MAC_REGS)))
10331 return 7;
10333 if ((srcclass == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
10334 || ((dstclass) == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
10335 return 20;
10337 /* ??? ptabs faults on (value & 0x3) == 0x3 */
10338 if (TARGET_SHMEDIA
10339 && ((srcclass) == TARGET_REGS || (srcclass) == SIBCALL_REGS))
10341 if (sh_gettrcost >= 0)
10342 return sh_gettrcost;
10343 else if (!TARGET_PT_FIXED)
10344 return 100;
10347 if ((srcclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
10348 || (dstclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
10349 return 4;
10351 if (TARGET_SHMEDIA
10352 || (TARGET_FMOVD
10353 && ! REGCLASS_HAS_GENERAL_REG (srcclass)
10354 && ! REGCLASS_HAS_GENERAL_REG (dstclass)))
10355 return 2 * ((GET_MODE_SIZE (mode) + 7) / 8U);
10357 return 2 * ((GET_MODE_SIZE (mode) + 3) / 4U);
10360 static rtx emit_load_ptr (rtx, rtx);
10362 static rtx
10363 emit_load_ptr (rtx reg, rtx addr)
10365 rtx mem = gen_const_mem (ptr_mode, addr);
10367 if (Pmode != ptr_mode)
10368 mem = gen_rtx_SIGN_EXTEND (Pmode, mem);
10369 return emit_move_insn (reg, mem);
10372 static void
10373 sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
10374 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10375 tree function)
10377 CUMULATIVE_ARGS cum;
10378 int structure_value_byref = 0;
10379 rtx this, this_value, sibcall, insns, funexp;
10380 tree funtype = TREE_TYPE (function);
10381 int simple_add = CONST_OK_FOR_ADD (delta);
10382 int did_load = 0;
10383 rtx scratch0, scratch1, scratch2;
10384 unsigned i;
10386 reload_completed = 1;
10387 epilogue_completed = 1;
10388 current_function_uses_only_leaf_regs = 1;
10390 emit_note (NOTE_INSN_PROLOGUE_END);
10392 /* Find the "this" pointer. We have such a wide range of ABIs for the
10393 SH that it's best to do this completely machine independently.
10394 "this" is passed as first argument, unless a structure return pointer
10395 comes first, in which case "this" comes second. */
10396 INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0, 1);
10397 #ifndef PCC_STATIC_STRUCT_RETURN
10398 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
10399 structure_value_byref = 1;
10400 #endif /* not PCC_STATIC_STRUCT_RETURN */
10401 if (structure_value_byref && sh_struct_value_rtx (function, 0) == 0)
10403 tree ptype = build_pointer_type (TREE_TYPE (funtype));
10405 FUNCTION_ARG_ADVANCE (cum, Pmode, ptype, 1);
10407 this = FUNCTION_ARG (cum, Pmode, ptr_type_node, 1);
10409 /* For SHcompact, we only have r0 for a scratch register: r1 is the
10410 static chain pointer (even if you can't have nested virtual functions
10411 right now, someone might implement them sometime), and the rest of the
10412 registers are used for argument passing, are callee-saved, or reserved. */
10413 /* We need to check call_used_regs / fixed_regs in case -fcall_saved-reg /
10414 -ffixed-reg has been used. */
10415 if (! call_used_regs[0] || fixed_regs[0])
10416 error ("r0 needs to be available as a call-clobbered register");
10417 scratch0 = scratch1 = scratch2 = gen_rtx_REG (Pmode, 0);
10418 if (! TARGET_SH5)
10420 if (call_used_regs[1] && ! fixed_regs[1])
10421 scratch1 = gen_rtx_REG (ptr_mode, 1);
10422 /* N.B., if not TARGET_HITACHI, register 2 is used to pass the pointer
10423 pointing where to return struct values. */
10424 if (call_used_regs[3] && ! fixed_regs[3])
10425 scratch2 = gen_rtx_REG (Pmode, 3);
10427 else if (TARGET_SHMEDIA)
10429 for (i = FIRST_GENERAL_REG; i <= LAST_GENERAL_REG; i++)
10430 if (i != REGNO (scratch0) &&
10431 call_used_regs[i] && ! fixed_regs[i] && ! FUNCTION_ARG_REGNO_P (i))
10433 scratch1 = gen_rtx_REG (ptr_mode, i);
10434 break;
10436 if (scratch1 == scratch0)
10437 error ("Need a second call-clobbered general purpose register");
10438 for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
10439 if (call_used_regs[i] && ! fixed_regs[i])
10441 scratch2 = gen_rtx_REG (Pmode, i);
10442 break;
10444 if (scratch2 == scratch0)
10445 error ("Need a call-clobbered target register");
10448 this_value = plus_constant (this, delta);
10449 if (vcall_offset
10450 && (simple_add || scratch0 != scratch1)
10451 && strict_memory_address_p (ptr_mode, this_value))
10453 emit_load_ptr (scratch0, this_value);
10454 did_load = 1;
10457 if (!delta)
10458 ; /* Do nothing. */
10459 else if (simple_add)
10460 emit_move_insn (this, this_value);
10461 else
10463 emit_move_insn (scratch1, GEN_INT (delta));
10464 emit_insn (gen_add2_insn (this, scratch1));
10467 if (vcall_offset)
10469 rtx offset_addr;
10471 if (!did_load)
10472 emit_load_ptr (scratch0, this);
10474 offset_addr = plus_constant (scratch0, vcall_offset);
10475 if (strict_memory_address_p (ptr_mode, offset_addr))
10476 ; /* Do nothing. */
10477 else if (! TARGET_SH5 && scratch0 != scratch1)
10479 /* scratch0 != scratch1, and we have indexed loads. Get better
10480 schedule by loading the offset into r1 and using an indexed
10481 load - then the load of r1 can issue before the load from
10482 (this + delta) finishes. */
10483 emit_move_insn (scratch1, GEN_INT (vcall_offset));
10484 offset_addr = gen_rtx_PLUS (Pmode, scratch0, scratch1);
10486 else if (CONST_OK_FOR_ADD (vcall_offset))
10488 emit_insn (gen_add2_insn (scratch0, GEN_INT (vcall_offset)));
10489 offset_addr = scratch0;
10491 else if (scratch0 != scratch1)
10493 emit_move_insn (scratch1, GEN_INT (vcall_offset));
10494 emit_insn (gen_add2_insn (scratch0, scratch1));
10495 offset_addr = scratch0;
10497 else
10498 gcc_unreachable (); /* FIXME */
10499 emit_load_ptr (scratch0, offset_addr);
10501 if (Pmode != ptr_mode)
10502 scratch0 = gen_rtx_TRUNCATE (ptr_mode, scratch0);
10503 emit_insn (gen_add2_insn (this, scratch0));
10506 /* Generate a tail call to the target function. */
10507 if (! TREE_USED (function))
10509 assemble_external (function);
10510 TREE_USED (function) = 1;
10512 funexp = XEXP (DECL_RTL (function), 0);
10513 /* If the function is overridden, so is the thunk, hence we don't
10514 need GOT addressing even if this is a public symbol. */
10515 #if 0
10516 if (TARGET_SH1 && ! flag_weak)
10517 sibcall = gen_sibcalli_thunk (funexp, const0_rtx);
10518 else
10519 #endif
10520 if (TARGET_SH2 && flag_pic)
10522 sibcall = gen_sibcall_pcrel (funexp, const0_rtx);
10523 XEXP (XVECEXP (sibcall, 0, 2), 0) = scratch2;
10525 else
10527 if (TARGET_SHMEDIA && flag_pic)
10529 funexp = gen_sym2PIC (funexp);
10530 PUT_MODE (funexp, Pmode);
10532 emit_move_insn (scratch2, funexp);
10533 funexp = gen_rtx_MEM (FUNCTION_MODE, scratch2);
10534 sibcall = gen_sibcall (funexp, const0_rtx, NULL_RTX);
10536 sibcall = emit_call_insn (sibcall);
10537 SIBLING_CALL_P (sibcall) = 1;
10538 use_reg (&CALL_INSN_FUNCTION_USAGE (sibcall), this);
10539 emit_barrier ();
10541 /* Run just enough of rest_of_compilation to do scheduling and get
10542 the insns emitted. Note that use_thunk calls
10543 assemble_start_function and assemble_end_function. */
10545 insn_locators_alloc ();
10546 insns = get_insns ();
10548 #if 0
10549 if (optimize > 0)
10551 /* Initialize the bitmap obstacks. */
10552 bitmap_obstack_initialize (NULL);
10553 bitmap_obstack_initialize (&reg_obstack);
10554 if (! cfun->cfg)
10555 init_flow ();
10556 rtl_register_cfg_hooks ();
10557 init_rtl_bb_info (ENTRY_BLOCK_PTR);
10558 init_rtl_bb_info (EXIT_BLOCK_PTR);
10559 ENTRY_BLOCK_PTR->flags |= BB_RTL;
10560 EXIT_BLOCK_PTR->flags |= BB_RTL;
10561 find_basic_blocks (insns);
10563 if (flag_schedule_insns_after_reload)
10565 life_analysis (PROP_FINAL);
10567 split_all_insns (1);
10569 schedule_insns ();
10571 /* We must split jmp insn in PIC case. */
10572 else if (flag_pic)
10573 split_all_insns_noflow ();
10575 #else
10576 if (optimize > 0)
10578 if (! cfun->cfg)
10579 init_flow (cfun);
10580 split_all_insns_noflow ();
10582 #endif
10584 sh_reorg ();
10586 if (optimize > 0 && flag_delayed_branch)
10587 dbr_schedule (insns);
10589 shorten_branches (insns);
10590 final_start_function (insns, file, 1);
10591 final (insns, file, 1);
10592 final_end_function ();
10593 free_after_compilation (cfun);
10595 reload_completed = 0;
10596 epilogue_completed = 0;
10600 function_symbol (rtx target, const char *name, enum sh_function_kind kind)
10602 rtx sym;
10604 /* If this is not an ordinary function, the name usually comes from a
10605 string literal or an sprintf buffer. Make sure we use the same
10606 string consistently, so that cse will be able to unify address loads. */
10607 if (kind != FUNCTION_ORDINARY)
10608 name = IDENTIFIER_POINTER (get_identifier (name));
10609 sym = gen_rtx_SYMBOL_REF (Pmode, name);
10610 SYMBOL_REF_FLAGS (sym) = SYMBOL_FLAG_FUNCTION;
10611 if (flag_pic)
10612 switch (kind)
10614 case FUNCTION_ORDINARY:
10615 break;
10616 case SFUNC_GOT:
10618 rtx reg = target ? target : gen_reg_rtx (Pmode);
10620 emit_insn (gen_symGOT2reg (reg, sym));
10621 sym = reg;
10622 break;
10624 case SFUNC_STATIC:
10626 /* ??? To allow cse to work, we use GOTOFF relocations.
10627 we could add combiner patterns to transform this into
10628 straight pc-relative calls with sym2PIC / bsrf when
10629 label load and function call are still 1:1 and in the
10630 same basic block during combine. */
10631 rtx reg = target ? target : gen_reg_rtx (Pmode);
10633 emit_insn (gen_symGOTOFF2reg (reg, sym));
10634 sym = reg;
10635 break;
10638 if (target && sym != target)
10640 emit_move_insn (target, sym);
10641 return target;
10643 return sym;
10646 /* Find the number of a general purpose register in S. */
10647 static int
10648 scavenge_reg (HARD_REG_SET *s)
10650 int r;
10651 for (r = FIRST_GENERAL_REG; r <= LAST_GENERAL_REG; r++)
10652 if (TEST_HARD_REG_BIT (*s, r))
10653 return r;
10654 return -1;
10658 sh_get_pr_initial_val (void)
10660 rtx val;
10662 /* ??? Unfortunately, get_hard_reg_initial_val doesn't always work for the
10663 PR register on SHcompact, because it might be clobbered by the prologue.
10664 We check first if that is known to be the case. */
10665 if (TARGET_SHCOMPACT
10666 && ((crtl->args.info.call_cookie
10667 & ~ CALL_COOKIE_RET_TRAMP (1))
10668 || crtl->saves_all_registers))
10669 return gen_frame_mem (SImode, return_address_pointer_rtx);
10671 /* If we haven't finished rtl generation, there might be a nonlocal label
10672 that we haven't seen yet.
10673 ??? get_hard_reg_initial_val fails if it is called after register
10674 allocation has started, unless it has been called before for the
10675 same register. And even then, we end in trouble if we didn't use
10676 the register in the same basic block before. So call
10677 get_hard_reg_initial_val now and wrap it in an unspec if we might
10678 need to replace it. */
10679 /* ??? We also must do this for TARGET_SH1 in general, because otherwise
10680 combine can put the pseudo returned by get_hard_reg_initial_val into
10681 instructions that need a general purpose registers, which will fail to
10682 be recognized when the pseudo becomes allocated to PR. */
10684 = get_hard_reg_initial_val (Pmode, TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
10685 if (TARGET_SH1)
10686 return gen_rtx_UNSPEC (SImode, gen_rtvec (1, val), UNSPEC_RA);
10687 return val;
10691 sh_expand_t_scc (enum rtx_code code, rtx target)
10693 rtx result = target;
10694 HOST_WIDE_INT val;
10696 if (GET_CODE (sh_compare_op0) != REG || REGNO (sh_compare_op0) != T_REG
10697 || GET_CODE (sh_compare_op1) != CONST_INT)
10698 return 0;
10699 if (GET_CODE (result) != REG)
10700 result = gen_reg_rtx (SImode);
10701 val = INTVAL (sh_compare_op1);
10702 if ((code == EQ && val == 1) || (code == NE && val == 0))
10703 emit_insn (gen_movt (result));
10704 else if (TARGET_SH2A && ((code == EQ && val == 0)
10705 || (code == NE && val == 1)))
10706 emit_insn (gen_movrt (result));
10707 else if ((code == EQ && val == 0) || (code == NE && val == 1))
10709 emit_clobber (result);
10710 emit_insn (gen_subc (result, result, result));
10711 emit_insn (gen_addsi3 (result, result, const1_rtx));
10713 else if (code == EQ || code == NE)
10714 emit_insn (gen_move_insn (result, GEN_INT (code == NE)));
10715 else
10716 return 0;
10717 if (result != target)
10718 emit_move_insn (target, result);
10719 return 1;
10722 /* INSN is an sfunc; return the rtx that describes the address used. */
10723 static rtx
10724 extract_sfunc_addr (rtx insn)
10726 rtx pattern, part = NULL_RTX;
10727 int len, i;
10729 pattern = PATTERN (insn);
10730 len = XVECLEN (pattern, 0);
10731 for (i = 0; i < len; i++)
10733 part = XVECEXP (pattern, 0, i);
10734 if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == Pmode
10735 && GENERAL_REGISTER_P (true_regnum (XEXP (part, 0))))
10736 return XEXP (part, 0);
10738 gcc_assert (GET_CODE (XVECEXP (pattern, 0, 0)) == UNSPEC_VOLATILE);
10739 return XVECEXP (XVECEXP (pattern, 0, 0), 0, 1);
10742 /* Verify that the register in use_sfunc_addr still agrees with the address
10743 used in the sfunc. This prevents fill_slots_from_thread from changing
10744 use_sfunc_addr.
10745 INSN is the use_sfunc_addr instruction, and REG is the register it
10746 guards. */
10748 check_use_sfunc_addr (rtx insn, rtx reg)
10750 /* Search for the sfunc. It should really come right after INSN. */
10751 while ((insn = NEXT_INSN (insn)))
10753 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
10754 break;
10755 if (! INSN_P (insn))
10756 continue;
10758 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10759 insn = XVECEXP (PATTERN (insn), 0, 0);
10760 if (GET_CODE (PATTERN (insn)) != PARALLEL
10761 || get_attr_type (insn) != TYPE_SFUNC)
10762 continue;
10763 return rtx_equal_p (extract_sfunc_addr (insn), reg);
10765 gcc_unreachable ();
10768 /* This function returns a constant rtx that represents pi / 2**15 in
10769 SFmode. it's used to scale SFmode angles, in radians, to a
10770 fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
10771 maps to 0x10000). */
10773 static GTY(()) rtx sh_fsca_sf2int_rtx;
10776 sh_fsca_sf2int (void)
10778 if (! sh_fsca_sf2int_rtx)
10780 REAL_VALUE_TYPE rv;
10782 real_from_string (&rv, "10430.378350470453");
10783 sh_fsca_sf2int_rtx = const_double_from_real_value (rv, SFmode);
10786 return sh_fsca_sf2int_rtx;
10789 /* This function returns a constant rtx that represents pi / 2**15 in
10790 DFmode. it's used to scale DFmode angles, in radians, to a
10791 fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
10792 maps to 0x10000). */
10794 static GTY(()) rtx sh_fsca_df2int_rtx;
10797 sh_fsca_df2int (void)
10799 if (! sh_fsca_df2int_rtx)
10801 REAL_VALUE_TYPE rv;
10803 real_from_string (&rv, "10430.378350470453");
10804 sh_fsca_df2int_rtx = const_double_from_real_value (rv, DFmode);
10807 return sh_fsca_df2int_rtx;
10810 /* This function returns a constant rtx that represents 2**15 / pi in
10811 SFmode. it's used to scale a fixed-point signed 16.16-bit fraction
10812 of a full circle back to a SFmode value, i.e., 0x10000 maps to
10813 2*pi). */
10815 static GTY(()) rtx sh_fsca_int2sf_rtx;
10818 sh_fsca_int2sf (void)
10820 if (! sh_fsca_int2sf_rtx)
10822 REAL_VALUE_TYPE rv;
10824 real_from_string (&rv, "9.587379924285257e-5");
10825 sh_fsca_int2sf_rtx = const_double_from_real_value (rv, SFmode);
10828 return sh_fsca_int2sf_rtx;
10831 /* Initialize the CUMULATIVE_ARGS structure. */
10833 void
10834 sh_init_cumulative_args (CUMULATIVE_ARGS * pcum,
10835 tree fntype,
10836 rtx libname ATTRIBUTE_UNUSED,
10837 tree fndecl,
10838 signed int n_named_args,
10839 enum machine_mode mode)
10841 pcum->arg_count [(int) SH_ARG_FLOAT] = 0;
10842 pcum->free_single_fp_reg = 0;
10843 pcum->stack_regs = 0;
10844 pcum->byref_regs = 0;
10845 pcum->byref = 0;
10846 pcum->outgoing = (n_named_args == -1) ? 0 : 1;
10848 /* XXX - Should we check TARGET_HITACHI here ??? */
10849 pcum->renesas_abi = sh_attr_renesas_p (fntype) ? 1 : 0;
10851 if (fntype)
10853 pcum->force_mem = ((TARGET_HITACHI || pcum->renesas_abi)
10854 && aggregate_value_p (TREE_TYPE (fntype), fndecl));
10855 pcum->prototype_p = TYPE_ARG_TYPES (fntype) ? TRUE : FALSE;
10856 pcum->arg_count [(int) SH_ARG_INT]
10857 = TARGET_SH5 && aggregate_value_p (TREE_TYPE (fntype), fndecl);
10859 pcum->call_cookie
10860 = CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
10861 && pcum->arg_count [(int) SH_ARG_INT] == 0
10862 && (TYPE_MODE (TREE_TYPE (fntype)) == BLKmode
10863 ? int_size_in_bytes (TREE_TYPE (fntype))
10864 : GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (fntype)))) > 4
10865 && (BASE_RETURN_VALUE_REG (TYPE_MODE (TREE_TYPE (fntype)))
10866 == FIRST_RET_REG));
10868 else
10870 pcum->arg_count [(int) SH_ARG_INT] = 0;
10871 pcum->prototype_p = FALSE;
10872 if (mode != VOIDmode)
10874 pcum->call_cookie =
10875 CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
10876 && GET_MODE_SIZE (mode) > 4
10877 && BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG);
10879 /* If the default ABI is the Renesas ABI then all library
10880 calls must assume that the library will be using the
10881 Renesas ABI. So if the function would return its result
10882 in memory then we must force the address of this memory
10883 block onto the stack. Ideally we would like to call
10884 targetm.calls.return_in_memory() here but we do not have
10885 the TYPE or the FNDECL available so we synthesize the
10886 contents of that function as best we can. */
10887 pcum->force_mem =
10888 (TARGET_DEFAULT & MASK_HITACHI)
10889 && (mode == BLKmode
10890 || (GET_MODE_SIZE (mode) > 4
10891 && !(mode == DFmode
10892 && TARGET_FPU_DOUBLE)));
10894 else
10896 pcum->call_cookie = 0;
10897 pcum->force_mem = FALSE;
10902 /* Replace any occurrence of FROM(n) in X with TO(n). The function does
10903 not enter into CONST_DOUBLE for the replace.
10905 Note that copying is not done so X must not be shared unless all copies
10906 are to be modified.
10908 This is like replace_rtx, except that we operate on N_REPLACEMENTS
10909 replacements simultaneously - FROM(n) is replacements[n*2] and to(n) is
10910 replacements[n*2+1] - and that we take mode changes into account.
10912 If a replacement is ambiguous, return NULL_RTX.
10914 If MODIFY is zero, don't modify any rtl in place,
10915 just return zero or nonzero for failure / success. */
10918 replace_n_hard_rtx (rtx x, rtx *replacements, int n_replacements, int modify)
10920 int i, j;
10921 const char *fmt;
10923 /* The following prevents loops occurrence when we change MEM in
10924 CONST_DOUBLE onto the same CONST_DOUBLE. */
10925 if (x != 0 && GET_CODE (x) == CONST_DOUBLE)
10926 return x;
10928 for (i = n_replacements - 1; i >= 0 ; i--)
10929 if (x == replacements[i*2] && GET_MODE (x) == GET_MODE (replacements[i*2+1]))
10930 return replacements[i*2+1];
10932 /* Allow this function to make replacements in EXPR_LISTs. */
10933 if (x == 0)
10934 return 0;
10936 if (GET_CODE (x) == SUBREG)
10938 rtx new = replace_n_hard_rtx (SUBREG_REG (x), replacements,
10939 n_replacements, modify);
10941 if (GET_CODE (new) == CONST_INT)
10943 x = simplify_subreg (GET_MODE (x), new,
10944 GET_MODE (SUBREG_REG (x)),
10945 SUBREG_BYTE (x));
10946 if (! x)
10947 abort ();
10949 else if (modify)
10950 SUBREG_REG (x) = new;
10952 return x;
10954 else if (GET_CODE (x) == REG)
10956 unsigned regno = REGNO (x);
10957 unsigned nregs = (regno < FIRST_PSEUDO_REGISTER
10958 ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
10959 rtx result = NULL_RTX;
10961 for (i = n_replacements - 1; i >= 0; i--)
10963 rtx from = replacements[i*2];
10964 rtx to = replacements[i*2+1];
10965 unsigned from_regno, from_nregs, to_regno, new_regno;
10967 if (GET_CODE (from) != REG)
10968 continue;
10969 from_regno = REGNO (from);
10970 from_nregs = (from_regno < FIRST_PSEUDO_REGISTER
10971 ? HARD_REGNO_NREGS (from_regno, GET_MODE (from)) : 1);
10972 if (regno < from_regno + from_nregs && regno + nregs > from_regno)
10974 if (regno < from_regno
10975 || regno + nregs > from_regno + nregs
10976 || GET_CODE (to) != REG
10977 || result)
10978 return NULL_RTX;
10979 to_regno = REGNO (to);
10980 if (to_regno < FIRST_PSEUDO_REGISTER)
10982 new_regno = regno + to_regno - from_regno;
10983 if ((unsigned) HARD_REGNO_NREGS (new_regno, GET_MODE (x))
10984 != nregs)
10985 return NULL_RTX;
10986 result = gen_rtx_REG (GET_MODE (x), new_regno);
10988 else if (GET_MODE (x) <= GET_MODE (to))
10989 result = gen_lowpart_common (GET_MODE (x), to);
10990 else
10991 result = gen_lowpart_SUBREG (GET_MODE (x), to);
10994 return result ? result : x;
10996 else if (GET_CODE (x) == ZERO_EXTEND)
10998 rtx new = replace_n_hard_rtx (XEXP (x, 0), replacements,
10999 n_replacements, modify);
11001 if (GET_CODE (new) == CONST_INT)
11003 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
11004 new, GET_MODE (XEXP (x, 0)));
11005 if (! x)
11006 abort ();
11008 else if (modify)
11009 XEXP (x, 0) = new;
11011 return x;
11014 fmt = GET_RTX_FORMAT (GET_CODE (x));
11015 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
11017 rtx new;
11019 if (fmt[i] == 'e')
11021 new = replace_n_hard_rtx (XEXP (x, i), replacements,
11022 n_replacements, modify);
11023 if (!new)
11024 return NULL_RTX;
11025 if (modify)
11026 XEXP (x, i) = new;
11028 else if (fmt[i] == 'E')
11029 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
11031 new = replace_n_hard_rtx (XVECEXP (x, i, j), replacements,
11032 n_replacements, modify);
11033 if (!new)
11034 return NULL_RTX;
11035 if (modify)
11036 XVECEXP (x, i, j) = new;
11040 return x;
11044 sh_gen_truncate (enum machine_mode mode, rtx x, int need_sign_ext)
11046 enum rtx_code code = TRUNCATE;
11048 if (GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
11050 rtx inner = XEXP (x, 0);
11051 enum machine_mode inner_mode = GET_MODE (inner);
11053 if (inner_mode == mode)
11054 return inner;
11055 else if (GET_MODE_SIZE (inner_mode) >= GET_MODE_SIZE (mode))
11056 x = inner;
11057 else if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode)
11058 && (! need_sign_ext || GET_CODE (x) == SIGN_EXTEND))
11060 code = GET_CODE (x);
11061 x = inner;
11064 return gen_rtx_fmt_e (code, mode, x);
11067 /* called via for_each_rtx after reload, to clean up truncates of
11068 registers that span multiple actual hard registers. */
11070 shmedia_cleanup_truncate (rtx *p, void *n_changes)
11072 rtx x = *p, reg;
11074 if (GET_CODE (x) != TRUNCATE)
11075 return 0;
11076 reg = XEXP (x, 0);
11077 if (GET_MODE_SIZE (GET_MODE (reg)) > 8 && GET_CODE (reg) == REG)
11079 enum machine_mode reg_mode = GET_MODE (reg);
11080 XEXP (x, 0) = simplify_subreg (DImode, reg, reg_mode,
11081 subreg_lowpart_offset (DImode, reg_mode));
11082 *(int*) n_changes += 1;
11083 return -1;
11085 return 0;
11088 /* Load and store depend on the highpart of the address. However,
11089 set_attr_alternative does not give well-defined results before reload,
11090 so we must look at the rtl ourselves to see if any of the feeding
11091 registers is used in a memref. */
11093 /* Called by sh_contains_memref_p via for_each_rtx. */
11094 static int
11095 sh_contains_memref_p_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
11097 return (GET_CODE (*loc) == MEM);
11100 /* Return nonzero iff INSN contains a MEM. */
11102 sh_contains_memref_p (rtx insn)
11104 return for_each_rtx (&PATTERN (insn), &sh_contains_memref_p_1, NULL);
11107 /* Return nonzero iff INSN loads a banked register. */
11109 sh_loads_bankedreg_p (rtx insn)
11111 if (GET_CODE (PATTERN (insn)) == SET)
11113 rtx op = SET_DEST (PATTERN(insn));
11114 if (REG_P (op) && BANKED_REGISTER_P (REGNO (op)))
11115 return 1;
11118 return 0;
11121 /* FNADDR is the MEM expression from a call expander. Return an address
11122 to use in an SHmedia insn pattern. */
11124 shmedia_prepare_call_address (rtx fnaddr, int is_sibcall)
11126 int is_sym;
11128 fnaddr = XEXP (fnaddr, 0);
11129 is_sym = GET_CODE (fnaddr) == SYMBOL_REF;
11130 if (flag_pic && is_sym)
11132 if (! SYMBOL_REF_LOCAL_P (fnaddr))
11134 rtx reg = gen_reg_rtx (Pmode);
11136 /* We must not use GOTPLT for sibcalls, because PIC_REG
11137 must be restored before the PLT code gets to run. */
11138 if (is_sibcall)
11139 emit_insn (gen_symGOT2reg (reg, fnaddr));
11140 else
11141 emit_insn (gen_symGOTPLT2reg (reg, fnaddr));
11142 fnaddr = reg;
11144 else
11146 fnaddr = gen_sym2PIC (fnaddr);
11147 PUT_MODE (fnaddr, Pmode);
11150 /* If ptabs might trap, make this visible to the rest of the compiler.
11151 We generally assume that symbols pertain to valid locations, but
11152 it is possible to generate invalid symbols with asm or linker tricks.
11153 In a list of functions where each returns its successor, an invalid
11154 symbol might denote an empty list. */
11155 if (!TARGET_PT_FIXED
11156 && (!is_sym || TARGET_INVALID_SYMBOLS)
11157 && (!REG_P (fnaddr) || ! TARGET_REGISTER_P (REGNO (fnaddr))))
11159 rtx tr = gen_reg_rtx (PDImode);
11161 emit_insn (gen_ptabs (tr, fnaddr));
11162 fnaddr = tr;
11164 else if (! target_reg_operand (fnaddr, Pmode))
11165 fnaddr = copy_to_mode_reg (Pmode, fnaddr);
11166 return fnaddr;
11169 enum reg_class
11170 sh_secondary_reload (bool in_p, rtx x, enum reg_class class,
11171 enum machine_mode mode, secondary_reload_info *sri)
11173 if (in_p)
11175 if (REGCLASS_HAS_FP_REG (class)
11176 && ! TARGET_SHMEDIA
11177 && immediate_operand ((x), mode)
11178 && ! ((fp_zero_operand (x) || fp_one_operand (x))
11179 && mode == SFmode && fldi_ok ()))
11180 switch (mode)
11182 case SFmode:
11183 sri->icode = CODE_FOR_reload_insf__frn;
11184 return NO_REGS;
11185 case DFmode:
11186 sri->icode = CODE_FOR_reload_indf__frn;
11187 return NO_REGS;
11188 case SImode:
11189 /* ??? If we knew that we are in the appropriate mode -
11190 single precision - we could use a reload pattern directly. */
11191 return FPUL_REGS;
11192 default:
11193 abort ();
11195 if (class == FPUL_REGS
11196 && ((GET_CODE (x) == REG
11197 && (REGNO (x) == MACL_REG || REGNO (x) == MACH_REG
11198 || REGNO (x) == T_REG))
11199 || GET_CODE (x) == PLUS))
11200 return GENERAL_REGS;
11201 if (class == FPUL_REGS && immediate_operand (x, mode))
11203 if (satisfies_constraint_I08 (x) || fp_zero_operand (x))
11204 return GENERAL_REGS;
11205 else if (mode == SFmode)
11206 return FP_REGS;
11207 sri->icode = CODE_FOR_reload_insi__i_fpul;
11208 return NO_REGS;
11210 if (class == FPSCR_REGS
11211 && ((GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
11212 || (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == PLUS)))
11213 return GENERAL_REGS;
11214 if (REGCLASS_HAS_FP_REG (class)
11215 && TARGET_SHMEDIA
11216 && immediate_operand (x, mode)
11217 && x != CONST0_RTX (GET_MODE (x))
11218 && GET_MODE (x) != V4SFmode)
11219 return GENERAL_REGS;
11220 if ((mode == QImode || mode == HImode)
11221 && TARGET_SHMEDIA && inqhi_operand (x, mode))
11223 sri->icode = ((mode == QImode)
11224 ? CODE_FOR_reload_inqi : CODE_FOR_reload_inhi);
11225 return NO_REGS;
11227 if (TARGET_SHMEDIA && class == GENERAL_REGS
11228 && (GET_CODE (x) == LABEL_REF || PIC_DIRECT_ADDR_P (x)))
11229 return TARGET_REGS;
11230 } /* end of input-only processing. */
11232 if (((REGCLASS_HAS_FP_REG (class)
11233 && (GET_CODE (x) == REG
11234 && (GENERAL_OR_AP_REGISTER_P (REGNO (x))
11235 || (FP_REGISTER_P (REGNO (x)) && mode == SImode
11236 && TARGET_FMOVD))))
11237 || (REGCLASS_HAS_GENERAL_REG (class)
11238 && GET_CODE (x) == REG
11239 && FP_REGISTER_P (REGNO (x))))
11240 && ! TARGET_SHMEDIA
11241 && (mode == SFmode || mode == SImode))
11242 return FPUL_REGS;
11243 if ((class == FPUL_REGS
11244 || (REGCLASS_HAS_FP_REG (class)
11245 && ! TARGET_SHMEDIA && mode == SImode))
11246 && (GET_CODE (x) == MEM
11247 || (GET_CODE (x) == REG
11248 && (REGNO (x) >= FIRST_PSEUDO_REGISTER
11249 || REGNO (x) == T_REG
11250 || system_reg_operand (x, VOIDmode)))))
11252 if (class == FPUL_REGS)
11253 return GENERAL_REGS;
11254 return FPUL_REGS;
11256 if ((class == TARGET_REGS
11257 || (TARGET_SHMEDIA && class == SIBCALL_REGS))
11258 && !satisfies_constraint_Csy (x)
11259 && (GET_CODE (x) != REG || ! GENERAL_REGISTER_P (REGNO (x))))
11260 return GENERAL_REGS;
11261 if ((class == MAC_REGS || class == PR_REGS)
11262 && GET_CODE (x) == REG && ! GENERAL_REGISTER_P (REGNO (x))
11263 && class != REGNO_REG_CLASS (REGNO (x)))
11264 return GENERAL_REGS;
11265 if (class != GENERAL_REGS && GET_CODE (x) == REG
11266 && TARGET_REGISTER_P (REGNO (x)))
11267 return GENERAL_REGS;
11268 return NO_REGS;
11271 enum sh_divide_strategy_e sh_div_strategy = SH_DIV_STRATEGY_DEFAULT;
11273 #include "gt-sh.h"