* doc/tm.texi (INIT_CUMULATIVE_ARGS): Update doco.
[official-gcc.git] / gcc / config / sh / sh.c
blob1acdeadfd6a92da19fac2894c9afdac24294dfbe
1 /* Output routines for GCC for Renesas / SuperH SH.
2 Copyright (C) 1993, 1994, 1995, 1997, 1997, 1998, 1999, 2000, 2001, 2002,
3 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Steve Chamberlain (sac@cygnus.com).
5 Improved by Jim Wilson (wilson@cygnus.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
12 any later version.
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "insn-config.h"
29 #include "rtl.h"
30 #include "tree.h"
31 #include "flags.h"
32 #include "expr.h"
33 #include "optabs.h"
34 #include "function.h"
35 #include "regs.h"
36 #include "hard-reg-set.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "toplev.h"
40 #include "recog.h"
41 #include "c-pragma.h"
42 #include "integrate.h"
43 #include "tm_p.h"
44 #include "target.h"
45 #include "target-def.h"
46 #include "real.h"
47 #include "langhooks.h"
48 #include "basic-block.h"
49 #include "ra.h"
50 #include "cfglayout.h"
51 #include "intl.h"
52 #include "ggc.h"
54 int code_for_indirect_jump_scratch = CODE_FOR_indirect_jump_scratch;
56 #define MSW (TARGET_LITTLE_ENDIAN ? 1 : 0)
57 #define LSW (TARGET_LITTLE_ENDIAN ? 0 : 1)
59 /* These are some macros to abstract register modes. */
60 #define CONST_OK_FOR_ADD(size) \
61 (TARGET_SHMEDIA ? CONST_OK_FOR_I10 (size) : CONST_OK_FOR_I08 (size))
62 #define GEN_MOV (*(TARGET_SHMEDIA64 ? gen_movdi : gen_movsi))
63 #define GEN_ADD3 (*(TARGET_SHMEDIA64 ? gen_adddi3 : gen_addsi3))
64 #define GEN_SUB3 (*(TARGET_SHMEDIA64 ? gen_subdi3 : gen_subsi3))
66 /* Set to 1 by expand_prologue() when the function is an interrupt handler. */
67 int current_function_interrupt;
69 /* ??? The pragma interrupt support will not work for SH3. */
70 /* This is set by #pragma interrupt and #pragma trapa, and causes gcc to
71 output code for the next function appropriate for an interrupt handler. */
72 int pragma_interrupt;
74 /* This is set by the trap_exit attribute for functions. It specifies
75 a trap number to be used in a trapa instruction at function exit
76 (instead of an rte instruction). */
77 int trap_exit;
79 /* This is used by the sp_switch attribute for functions. It specifies
80 a variable holding the address of the stack the interrupt function
81 should switch to/from at entry/exit. */
82 rtx sp_switch;
84 /* This is set by #pragma trapa, and is similar to the above, except that
85 the compiler doesn't emit code to preserve all registers. */
86 static int pragma_trapa;
88 /* This is set by #pragma nosave_low_regs. This is useful on the SH3,
89 which has a separate set of low regs for User and Supervisor modes.
90 This should only be used for the lowest level of interrupts. Higher levels
91 of interrupts must save the registers in case they themselves are
92 interrupted. */
93 int pragma_nosave_low_regs;
95 /* This is used for communication between TARGET_SETUP_INCOMING_VARARGS and
96 sh_expand_prologue. */
97 int current_function_anonymous_args;
99 /* Global variables for machine-dependent things. */
101 /* Which cpu are we scheduling for. */
102 enum processor_type sh_cpu;
104 /* Saved operands from the last compare to use when we generate an scc
105 or bcc insn. */
107 rtx sh_compare_op0;
108 rtx sh_compare_op1;
110 /* Provides the class number of the smallest class containing
111 reg number. */
113 enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
115 R0_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
116 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
117 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
118 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
119 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
120 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
121 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
122 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
123 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
124 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
125 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
126 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
127 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
128 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
129 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
130 GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
131 FP0_REGS,FP_REGS, FP_REGS, FP_REGS,
132 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
133 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
134 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
135 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
136 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
137 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
138 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
139 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
140 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
141 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
142 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
143 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
144 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
145 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
146 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
147 TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
148 TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
149 DF_REGS, DF_REGS, DF_REGS, DF_REGS,
150 DF_REGS, DF_REGS, DF_REGS, DF_REGS,
151 NO_REGS, GENERAL_REGS, PR_REGS, T_REGS,
152 MAC_REGS, MAC_REGS, FPUL_REGS, FPSCR_REGS,
153 GENERAL_REGS,
156 char sh_register_names[FIRST_PSEUDO_REGISTER] \
157 [MAX_REGISTER_NAME_LENGTH + 1] = SH_REGISTER_NAMES_INITIALIZER;
159 char sh_additional_register_names[ADDREGNAMES_SIZE] \
160 [MAX_ADDITIONAL_REGISTER_NAME_LENGTH + 1]
161 = SH_ADDITIONAL_REGISTER_NAMES_INITIALIZER;
163 /* Provide reg_class from a letter such as appears in the machine
164 description. *: target independently reserved letter.
165 reg_class_from_letter['e' - 'a'] is set to NO_REGS for TARGET_FMOVD. */
167 enum reg_class reg_class_from_letter[] =
169 /* a */ ALL_REGS, /* b */ TARGET_REGS, /* c */ FPSCR_REGS, /* d */ DF_REGS,
170 /* e */ FP_REGS, /* f */ FP_REGS, /* g **/ NO_REGS, /* h */ NO_REGS,
171 /* i **/ NO_REGS, /* j */ NO_REGS, /* k */ SIBCALL_REGS, /* l */ PR_REGS,
172 /* m **/ NO_REGS, /* n **/ NO_REGS, /* o **/ NO_REGS, /* p **/ NO_REGS,
173 /* q */ NO_REGS, /* r **/ NO_REGS, /* s **/ NO_REGS, /* t */ T_REGS,
174 /* u */ NO_REGS, /* v */ NO_REGS, /* w */ FP0_REGS, /* x */ MAC_REGS,
175 /* y */ FPUL_REGS, /* z */ R0_REGS
178 int assembler_dialect;
180 static bool shmedia_space_reserved_for_target_registers;
182 static void split_branches (rtx);
183 static int branch_dest (rtx);
184 static void force_into (rtx, rtx);
185 static void print_slot (rtx);
186 static rtx add_constant (rtx, enum machine_mode, rtx);
187 static void dump_table (rtx);
188 static int hi_const (rtx);
189 static int broken_move (rtx);
190 static int mova_p (rtx);
191 static rtx find_barrier (int, rtx, rtx);
192 static int noncall_uses_reg (rtx, rtx, rtx *);
193 static rtx gen_block_redirect (rtx, int, int);
194 static void sh_reorg (void);
195 static void output_stack_adjust (int, rtx, int, HARD_REG_SET *);
196 static rtx frame_insn (rtx);
197 static rtx push (int);
198 static void pop (int);
199 static void push_regs (HARD_REG_SET *, int);
200 static int calc_live_regs (HARD_REG_SET *);
201 static void mark_use (rtx, rtx *);
202 static HOST_WIDE_INT rounded_frame_size (int);
203 static rtx mark_constant_pool_use (rtx);
204 const struct attribute_spec sh_attribute_table[];
205 static tree sh_handle_interrupt_handler_attribute (tree *, tree, tree, int, bool *);
206 static tree sh_handle_sp_switch_attribute (tree *, tree, tree, int, bool *);
207 static tree sh_handle_trap_exit_attribute (tree *, tree, tree, int, bool *);
208 static tree sh_handle_renesas_attribute (tree *, tree, tree, int, bool *);
209 static void sh_output_function_epilogue (FILE *, HOST_WIDE_INT);
210 static void sh_insert_attributes (tree, tree *);
211 static int sh_adjust_cost (rtx, rtx, rtx, int);
212 static int sh_use_dfa_interface (void);
213 static int sh_issue_rate (void);
214 static bool sh_function_ok_for_sibcall (tree, tree);
216 static bool sh_cannot_modify_jumps_p (void);
217 static int sh_target_reg_class (void);
218 static bool sh_optimize_target_register_callee_saved (bool);
219 static bool sh_ms_bitfield_layout_p (tree);
221 static void sh_init_builtins (void);
222 static void sh_media_init_builtins (void);
223 static rtx sh_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
224 static void sh_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
225 static void sh_file_start (void);
226 static int flow_dependent_p (rtx, rtx);
227 static void flow_dependent_p_1 (rtx, rtx, void *);
228 static int shiftcosts (rtx);
229 static int andcosts (rtx);
230 static int addsubcosts (rtx);
231 static int multcosts (rtx);
232 static bool unspec_caller_rtx_p (rtx);
233 static bool sh_cannot_copy_insn_p (rtx);
234 static bool sh_rtx_costs (rtx, int, int, int *);
235 static int sh_address_cost (rtx);
236 static int shmedia_target_regs_stack_space (HARD_REG_SET *);
237 static int shmedia_reserve_space_for_target_registers_p (int, HARD_REG_SET *);
238 static int shmedia_target_regs_stack_adjust (HARD_REG_SET *);
239 static int scavenge_reg (HARD_REG_SET *s);
240 struct save_schedule_s;
241 static struct save_entry_s *sh5_schedule_saves (HARD_REG_SET *,
242 struct save_schedule_s *, int);
244 static bool sh_promote_prototypes (tree);
245 static rtx sh_struct_value_rtx (tree, int);
246 static bool sh_return_in_memory (tree, tree);
247 static rtx sh_builtin_saveregs (void);
248 static void sh_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
249 static bool sh_strict_argument_naming (CUMULATIVE_ARGS *);
250 static bool sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *);
251 static tree sh_build_builtin_va_list (void);
254 /* Initialize the GCC target structure. */
255 #undef TARGET_ATTRIBUTE_TABLE
256 #define TARGET_ATTRIBUTE_TABLE sh_attribute_table
258 /* The next two are used for debug info when compiling with -gdwarf. */
259 #undef TARGET_ASM_UNALIGNED_HI_OP
260 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uaword\t"
261 #undef TARGET_ASM_UNALIGNED_SI_OP
262 #define TARGET_ASM_UNALIGNED_SI_OP "\t.ualong\t"
264 /* These are NULLed out on non-SH5 in OVERRIDE_OPTIONS. */
265 #undef TARGET_ASM_UNALIGNED_DI_OP
266 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaquad\t"
267 #undef TARGET_ASM_ALIGNED_DI_OP
268 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
270 #undef TARGET_ASM_FUNCTION_EPILOGUE
271 #define TARGET_ASM_FUNCTION_EPILOGUE sh_output_function_epilogue
273 #undef TARGET_ASM_OUTPUT_MI_THUNK
274 #define TARGET_ASM_OUTPUT_MI_THUNK sh_output_mi_thunk
276 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
277 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
279 #undef TARGET_ASM_FILE_START
280 #define TARGET_ASM_FILE_START sh_file_start
281 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
282 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
284 #undef TARGET_INSERT_ATTRIBUTES
285 #define TARGET_INSERT_ATTRIBUTES sh_insert_attributes
287 #undef TARGET_SCHED_ADJUST_COST
288 #define TARGET_SCHED_ADJUST_COST sh_adjust_cost
290 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
291 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE \
292 sh_use_dfa_interface
293 #undef TARGET_SCHED_ISSUE_RATE
294 #define TARGET_SCHED_ISSUE_RATE sh_issue_rate
296 #undef TARGET_CANNOT_MODIFY_JUMPS_P
297 #define TARGET_CANNOT_MODIFY_JUMPS_P sh_cannot_modify_jumps_p
298 #undef TARGET_BRANCH_TARGET_REGISTER_CLASS
299 #define TARGET_BRANCH_TARGET_REGISTER_CLASS sh_target_reg_class
300 #undef TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED
301 #define TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED \
302 sh_optimize_target_register_callee_saved
304 #undef TARGET_MS_BITFIELD_LAYOUT_P
305 #define TARGET_MS_BITFIELD_LAYOUT_P sh_ms_bitfield_layout_p
307 #undef TARGET_INIT_BUILTINS
308 #define TARGET_INIT_BUILTINS sh_init_builtins
309 #undef TARGET_EXPAND_BUILTIN
310 #define TARGET_EXPAND_BUILTIN sh_expand_builtin
312 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
313 #define TARGET_FUNCTION_OK_FOR_SIBCALL sh_function_ok_for_sibcall
315 #undef TARGET_CANNOT_COPY_INSN_P
316 #define TARGET_CANNOT_COPY_INSN_P sh_cannot_copy_insn_p
317 #undef TARGET_RTX_COSTS
318 #define TARGET_RTX_COSTS sh_rtx_costs
319 #undef TARGET_ADDRESS_COST
320 #define TARGET_ADDRESS_COST sh_address_cost
322 #undef TARGET_MACHINE_DEPENDENT_REORG
323 #define TARGET_MACHINE_DEPENDENT_REORG sh_reorg
325 #ifdef HAVE_AS_TLS
326 #undef TARGET_HAVE_TLS
327 #define TARGET_HAVE_TLS true
328 #endif
330 #undef TARGET_PROMOTE_PROTOTYPES
331 #define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes
332 #undef TARGET_PROMOTE_FUNCTION_ARGS
333 #define TARGET_PROMOTE_FUNCTION_ARGS sh_promote_prototypes
334 #undef TARGET_PROMOTE_FUNCTION_RETURN
335 #define TARGET_PROMOTE_FUNCTION_RETURN sh_promote_prototypes
337 #undef TARGET_STRUCT_VALUE_RTX
338 #define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx
339 #undef TARGET_RETURN_IN_MEMORY
340 #define TARGET_RETURN_IN_MEMORY sh_return_in_memory
342 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
343 #define TARGET_EXPAND_BUILTIN_SAVEREGS sh_builtin_saveregs
344 #undef TARGET_SETUP_INCOMING_VARARGS
345 #define TARGET_SETUP_INCOMING_VARARGS sh_setup_incoming_varargs
346 #undef TARGET_STRICT_ARGUMENT_NAMING
347 #define TARGET_STRICT_ARGUMENT_NAMING sh_strict_argument_naming
348 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
349 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named
351 #undef TARGET_BUILD_BUILTIN_VA_LIST
352 #define TARGET_BUILD_BUILTIN_VA_LIST sh_build_builtin_va_list
354 #undef TARGET_PCH_VALID_P
355 #define TARGET_PCH_VALID_P sh_pch_valid_p
357 struct gcc_target targetm = TARGET_INITIALIZER;
359 /* Print the operand address in x to the stream. */
361 void
362 print_operand_address (FILE *stream, rtx x)
364 switch (GET_CODE (x))
366 case REG:
367 case SUBREG:
368 fprintf (stream, "@%s", reg_names[true_regnum (x)]);
369 break;
371 case PLUS:
373 rtx base = XEXP (x, 0);
374 rtx index = XEXP (x, 1);
376 switch (GET_CODE (index))
378 case CONST_INT:
379 fprintf (stream, "@(%d,%s)", (int) INTVAL (index),
380 reg_names[true_regnum (base)]);
381 break;
383 case REG:
384 case SUBREG:
386 int base_num = true_regnum (base);
387 int index_num = true_regnum (index);
389 fprintf (stream, "@(r0,%s)",
390 reg_names[MAX (base_num, index_num)]);
391 break;
394 default:
395 debug_rtx (x);
396 abort ();
399 break;
401 case PRE_DEC:
402 fprintf (stream, "@-%s", reg_names[true_regnum (XEXP (x, 0))]);
403 break;
405 case POST_INC:
406 fprintf (stream, "@%s+", reg_names[true_regnum (XEXP (x, 0))]);
407 break;
409 default:
410 x = mark_constant_pool_use (x);
411 output_addr_const (stream, x);
412 break;
416 /* Print operand x (an rtx) in assembler syntax to file stream
417 according to modifier code.
419 '.' print a .s if insn needs delay slot
420 ',' print LOCAL_LABEL_PREFIX
421 '@' print trap, rte or rts depending upon pragma interruptness
422 '#' output a nop if there is nothing to put in the delay slot
423 ''' print likelihood suffix (/u for unlikely).
424 'O' print a constant without the #
425 'R' print the LSW of a dp value - changes if in little endian
426 'S' print the MSW of a dp value - changes if in little endian
427 'T' print the next word of a dp value - same as 'R' in big endian mode.
428 'M' print an `x' if `m' will print `base,index'.
429 'N' print 'r63' if the operand is (const_int 0).
430 'm' print a pair `base,offset' or `base,index', for LD and ST.
431 'u' prints the lowest 16 bits of CONST_INT, as an unsigned value.
432 'o' output an operator. */
434 void
435 print_operand (FILE *stream, rtx x, int code)
437 switch (code)
439 case '.':
440 if (final_sequence
441 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
442 && get_attr_length (XVECEXP (final_sequence, 0, 1)))
443 fprintf (stream, ASSEMBLER_DIALECT ? "/s" : ".s");
444 break;
445 case ',':
446 fprintf (stream, "%s", LOCAL_LABEL_PREFIX);
447 break;
448 case '@':
449 if (trap_exit)
450 fprintf (stream, "trapa #%d", trap_exit);
451 else if (sh_cfun_interrupt_handler_p ())
452 fprintf (stream, "rte");
453 else
454 fprintf (stream, "rts");
455 break;
456 case '#':
457 /* Output a nop if there's nothing in the delay slot. */
458 if (dbr_sequence_length () == 0)
459 fprintf (stream, "\n\tnop");
460 break;
461 case '\'':
463 rtx note = find_reg_note (current_output_insn, REG_BR_PROB, 0);
465 if (note && INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
466 fputs ("/u", stream);
467 break;
469 case 'O':
470 x = mark_constant_pool_use (x);
471 output_addr_const (stream, x);
472 break;
473 case 'R':
474 fputs (reg_names[REGNO (x) + LSW], (stream));
475 break;
476 case 'S':
477 fputs (reg_names[REGNO (x) + MSW], (stream));
478 break;
479 case 'T':
480 /* Next word of a double. */
481 switch (GET_CODE (x))
483 case REG:
484 fputs (reg_names[REGNO (x) + 1], (stream));
485 break;
486 case MEM:
487 if (GET_CODE (XEXP (x, 0)) != PRE_DEC
488 && GET_CODE (XEXP (x, 0)) != POST_INC)
489 x = adjust_address (x, SImode, 4);
490 print_operand_address (stream, XEXP (x, 0));
491 break;
492 default:
493 break;
495 break;
496 case 'o':
497 switch (GET_CODE (x))
499 case PLUS: fputs ("add", stream); break;
500 case MINUS: fputs ("sub", stream); break;
501 case MULT: fputs ("mul", stream); break;
502 case DIV: fputs ("div", stream); break;
503 case EQ: fputs ("eq", stream); break;
504 case NE: fputs ("ne", stream); break;
505 case GT: case LT: fputs ("gt", stream); break;
506 case GE: case LE: fputs ("ge", stream); break;
507 case GTU: case LTU: fputs ("gtu", stream); break;
508 case GEU: case LEU: fputs ("geu", stream); break;
509 default:
510 break;
512 break;
513 case 'M':
514 if (GET_CODE (x) == MEM
515 && GET_CODE (XEXP (x, 0)) == PLUS
516 && (GET_CODE (XEXP (XEXP (x, 0), 1)) == REG
517 || GET_CODE (XEXP (XEXP (x, 0), 1)) == SUBREG))
518 fputc ('x', stream);
519 break;
521 case 'm':
522 if (GET_CODE (x) != MEM)
523 abort ();
524 x = XEXP (x, 0);
525 switch (GET_CODE (x))
527 case REG:
528 case SUBREG:
529 print_operand (stream, x, 0);
530 fputs (", 0", stream);
531 break;
533 case PLUS:
534 print_operand (stream, XEXP (x, 0), 0);
535 fputs (", ", stream);
536 print_operand (stream, XEXP (x, 1), 0);
537 break;
539 default:
540 abort ();
542 break;
544 case 'N':
545 if (x == CONST0_RTX (GET_MODE (x)))
547 fprintf ((stream), "r63");
548 break;
550 goto default_output;
551 case 'u':
552 if (GET_CODE (x) == CONST_INT)
554 fprintf ((stream), "%u", (unsigned) INTVAL (x) & (0x10000 - 1));
555 break;
557 /* Fall through. */
559 default_output:
560 default:
561 switch (GET_CODE (x))
563 /* FIXME: We need this on SHmedia32 because reload generates
564 some sign-extended HI or QI loads into DImode registers
565 but, because Pmode is SImode, the address ends up with a
566 subreg:SI of the DImode register. Maybe reload should be
567 fixed so as to apply alter_subreg to such loads? */
568 case SUBREG:
569 if (SUBREG_BYTE (x) != 0
570 || GET_CODE (SUBREG_REG (x)) != REG)
571 abort ();
573 x = SUBREG_REG (x);
574 /* Fall through. */
576 case REG:
577 if (FP_REGISTER_P (REGNO (x))
578 && GET_MODE (x) == V16SFmode)
579 fprintf ((stream), "mtrx%s", reg_names[REGNO (x)] + 2);
580 else if (FP_REGISTER_P (REGNO (x))
581 && GET_MODE (x) == V4SFmode)
582 fprintf ((stream), "fv%s", reg_names[REGNO (x)] + 2);
583 else if (GET_CODE (x) == REG
584 && GET_MODE (x) == V2SFmode)
585 fprintf ((stream), "fp%s", reg_names[REGNO (x)] + 2);
586 else if (FP_REGISTER_P (REGNO (x))
587 && GET_MODE_SIZE (GET_MODE (x)) > 4)
588 fprintf ((stream), "d%s", reg_names[REGNO (x)] + 1);
589 else
590 fputs (reg_names[REGNO (x)], (stream));
591 break;
593 case MEM:
594 output_address (XEXP (x, 0));
595 break;
597 case CONST:
598 if (TARGET_SHMEDIA
599 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
600 && GET_MODE (XEXP (x, 0)) == DImode
601 && GET_CODE (XEXP (XEXP (x, 0), 0)) == TRUNCATE
602 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode)
604 rtx val = XEXP (XEXP (XEXP (x, 0), 0), 0);
606 fputc ('(', stream);
607 if (GET_CODE (val) == ASHIFTRT)
609 fputc ('(', stream);
610 if (GET_CODE (XEXP (val, 0)) == CONST)
611 fputc ('(', stream);
612 output_addr_const (stream, XEXP (val, 0));
613 if (GET_CODE (XEXP (val, 0)) == CONST)
614 fputc (')', stream);
615 fputs (" >> ", stream);
616 output_addr_const (stream, XEXP (val, 1));
617 fputc (')', stream);
619 else
621 if (GET_CODE (val) == CONST)
622 fputc ('(', stream);
623 output_addr_const (stream, val);
624 if (GET_CODE (val) == CONST)
625 fputc (')', stream);
627 fputs (" & 65535)", stream);
628 break;
631 /* Fall through. */
632 default:
633 if (TARGET_SH1)
634 fputc ('#', stream);
635 output_addr_const (stream, x);
636 break;
638 break;
642 /* Like force_operand, but guarantees that VALUE ends up in TARGET. */
643 static void
644 force_into (rtx value, rtx target)
646 value = force_operand (value, target);
647 if (! rtx_equal_p (value, target))
648 emit_insn (gen_move_insn (target, value));
651 /* Emit code to perform a block move. Choose the best method.
653 OPERANDS[0] is the destination.
654 OPERANDS[1] is the source.
655 OPERANDS[2] is the size.
656 OPERANDS[3] is the alignment safe to use. */
659 expand_block_move (rtx *operands)
661 int align = INTVAL (operands[3]);
662 int constp = (GET_CODE (operands[2]) == CONST_INT);
663 int bytes = (constp ? INTVAL (operands[2]) : 0);
665 /* If it isn't a constant number of bytes, or if it doesn't have 4 byte
666 alignment, or if it isn't a multiple of 4 bytes, then fail. */
667 if (! constp || align < 4 || (bytes % 4 != 0))
668 return 0;
670 if (TARGET_HARD_SH4)
672 if (bytes < 12)
673 return 0;
674 else if (bytes == 12)
676 tree entry_name;
677 rtx sym;
678 rtx func_addr_rtx;
679 rtx r4 = gen_rtx_REG (SImode, 4);
680 rtx r5 = gen_rtx_REG (SImode, 5);
682 entry_name = get_identifier ("__movstrSI12_i4");
684 sym = function_symbol (IDENTIFIER_POINTER (entry_name));
685 func_addr_rtx = copy_to_mode_reg (Pmode, sym);
686 force_into (XEXP (operands[0], 0), r4);
687 force_into (XEXP (operands[1], 0), r5);
688 emit_insn (gen_block_move_real_i4 (func_addr_rtx));
689 return 1;
691 else if (! TARGET_SMALLCODE)
693 tree entry_name;
694 rtx sym;
695 rtx func_addr_rtx;
696 int dwords;
697 rtx r4 = gen_rtx_REG (SImode, 4);
698 rtx r5 = gen_rtx_REG (SImode, 5);
699 rtx r6 = gen_rtx_REG (SImode, 6);
701 entry_name = get_identifier (bytes & 4
702 ? "__movstr_i4_odd"
703 : "__movstr_i4_even");
704 sym = function_symbol (IDENTIFIER_POINTER (entry_name));
705 func_addr_rtx = copy_to_mode_reg (Pmode, sym);
706 force_into (XEXP (operands[0], 0), r4);
707 force_into (XEXP (operands[1], 0), r5);
709 dwords = bytes >> 3;
710 emit_insn (gen_move_insn (r6, GEN_INT (dwords - 1)));
711 emit_insn (gen_block_lump_real_i4 (func_addr_rtx));
712 return 1;
714 else
715 return 0;
717 if (bytes < 64)
719 char entry[30];
720 tree entry_name;
721 rtx sym;
722 rtx func_addr_rtx;
723 rtx r4 = gen_rtx_REG (SImode, 4);
724 rtx r5 = gen_rtx_REG (SImode, 5);
726 sprintf (entry, "__movstrSI%d", bytes);
727 entry_name = get_identifier (entry);
728 sym = function_symbol (IDENTIFIER_POINTER (entry_name));
729 func_addr_rtx = copy_to_mode_reg (Pmode, sym);
730 force_into (XEXP (operands[0], 0), r4);
731 force_into (XEXP (operands[1], 0), r5);
732 emit_insn (gen_block_move_real (func_addr_rtx));
733 return 1;
736 /* This is the same number of bytes as a memcpy call, but to a different
737 less common function name, so this will occasionally use more space. */
738 if (! TARGET_SMALLCODE)
740 tree entry_name;
741 rtx sym;
742 rtx func_addr_rtx;
743 int final_switch, while_loop;
744 rtx r4 = gen_rtx_REG (SImode, 4);
745 rtx r5 = gen_rtx_REG (SImode, 5);
746 rtx r6 = gen_rtx_REG (SImode, 6);
748 entry_name = get_identifier ("__movstr");
749 sym = function_symbol (IDENTIFIER_POINTER (entry_name));
750 func_addr_rtx = copy_to_mode_reg (Pmode, sym);
751 force_into (XEXP (operands[0], 0), r4);
752 force_into (XEXP (operands[1], 0), r5);
754 /* r6 controls the size of the move. 16 is decremented from it
755 for each 64 bytes moved. Then the negative bit left over is used
756 as an index into a list of move instructions. e.g., a 72 byte move
757 would be set up with size(r6) = 14, for one iteration through the
758 big while loop, and a switch of -2 for the last part. */
760 final_switch = 16 - ((bytes / 4) % 16);
761 while_loop = ((bytes / 4) / 16 - 1) * 16;
762 emit_insn (gen_move_insn (r6, GEN_INT (while_loop + final_switch)));
763 emit_insn (gen_block_lump_real (func_addr_rtx));
764 return 1;
767 return 0;
770 /* Prepare operands for a move define_expand; specifically, one of the
771 operands must be in a register. */
774 prepare_move_operands (rtx operands[], enum machine_mode mode)
776 if ((mode == SImode || mode == DImode)
777 && flag_pic
778 && ! ((mode == Pmode || mode == ptr_mode)
779 && tls_symbolic_operand (operands[1], Pmode) != 0))
781 rtx temp;
782 if (SYMBOLIC_CONST_P (operands[1]))
784 if (GET_CODE (operands[0]) == MEM)
785 operands[1] = force_reg (Pmode, operands[1]);
786 else if (TARGET_SHMEDIA
787 && GET_CODE (operands[1]) == LABEL_REF
788 && target_reg_operand (operands[0], mode))
789 /* It's ok. */;
790 else
792 temp = no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode);
793 operands[1] = legitimize_pic_address (operands[1], mode, temp);
796 else if (GET_CODE (operands[1]) == CONST
797 && GET_CODE (XEXP (operands[1], 0)) == PLUS
798 && SYMBOLIC_CONST_P (XEXP (XEXP (operands[1], 0), 0)))
800 temp = no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode);
801 temp = legitimize_pic_address (XEXP (XEXP (operands[1], 0), 0),
802 mode, temp);
803 operands[1] = expand_binop (mode, add_optab, temp,
804 XEXP (XEXP (operands[1], 0), 1),
805 no_new_pseudos ? temp
806 : gen_reg_rtx (Pmode),
807 0, OPTAB_LIB_WIDEN);
811 if (! reload_in_progress && ! reload_completed)
813 /* Copy the source to a register if both operands aren't registers. */
814 if (! register_operand (operands[0], mode)
815 && ! sh_register_operand (operands[1], mode))
816 operands[1] = copy_to_mode_reg (mode, operands[1]);
818 if (GET_CODE (operands[0]) == MEM && ! memory_operand (operands[0], mode))
820 /* This is like change_address_1 (operands[0], mode, 0, 1) ,
821 except that we can't use that function because it is static. */
822 rtx new = change_address (operands[0], mode, 0);
823 MEM_COPY_ATTRIBUTES (new, operands[0]);
824 operands[0] = new;
827 /* This case can happen while generating code to move the result
828 of a library call to the target. Reject `st r0,@(rX,rY)' because
829 reload will fail to find a spill register for rX, since r0 is already
830 being used for the source. */
831 else if (refers_to_regno_p (R0_REG, R0_REG + 1, operands[1], (rtx *)0)
832 && GET_CODE (operands[0]) == MEM
833 && GET_CODE (XEXP (operands[0], 0)) == PLUS
834 && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == REG)
835 operands[1] = copy_to_mode_reg (mode, operands[1]);
838 if (mode == Pmode || mode == ptr_mode)
840 rtx op0, op1;
841 enum tls_model tls_kind;
843 op0 = operands[0];
844 op1 = operands[1];
845 if ((tls_kind = tls_symbolic_operand (op1, Pmode)))
847 rtx tga_op1, tga_ret, tmp, tmp2;
850 switch (tls_kind)
852 case TLS_MODEL_GLOBAL_DYNAMIC:
853 tga_ret = gen_rtx_REG (Pmode, R0_REG);
854 emit_insn (gen_tls_global_dynamic (tga_ret, op1));
855 op1 = tga_ret;
856 break;
858 case TLS_MODEL_LOCAL_DYNAMIC:
859 tga_ret = gen_rtx_REG (Pmode, R0_REG);
860 emit_insn (gen_tls_local_dynamic (tga_ret, op1));
862 tmp = gen_reg_rtx (Pmode);
863 emit_move_insn (tmp, tga_ret);
865 if (register_operand (op0, Pmode))
866 tmp2 = op0;
867 else
868 tmp2 = gen_reg_rtx (Pmode);
870 emit_insn (gen_symDTPOFF2reg (tmp2, op1, tmp));
871 op1 = tmp2;
872 break;
874 case TLS_MODEL_INITIAL_EXEC:
875 if (! flag_pic)
876 emit_insn (gen_GOTaddr2picreg ());
877 tga_op1 = gen_reg_rtx (Pmode);
878 tmp = gen_sym2GOTTPOFF (op1);
879 emit_insn (gen_tls_initial_exec (tga_op1, tmp));
880 op1 = tga_op1;
881 break;
883 case TLS_MODEL_LOCAL_EXEC:
884 tmp2 = gen_reg_rtx (Pmode);
885 emit_insn (gen_load_gbr (tmp2));
886 tmp = gen_reg_rtx (Pmode);
887 emit_insn (gen_symTPOFF2reg (tmp, op1));
888 RTX_UNCHANGING_P (tmp) = 1;
890 if (register_operand (op0, Pmode))
891 op1 = op0;
892 else
893 op1 = gen_reg_rtx (Pmode);
895 emit_insn (gen_addsi3 (op1, tmp, tmp2));
896 break;
898 default:
899 abort ();
901 operands[1] = op1;
905 return 0;
908 /* Prepare the operands for an scc instruction; make sure that the
909 compare has been done. */
911 prepare_scc_operands (enum rtx_code code)
913 rtx t_reg = gen_rtx_REG (SImode, T_REG);
914 enum rtx_code oldcode = code;
915 enum machine_mode mode;
917 /* First need a compare insn. */
918 switch (code)
920 case NE:
921 /* It isn't possible to handle this case. */
922 abort ();
923 case LT:
924 code = GT;
925 break;
926 case LE:
927 code = GE;
928 break;
929 case LTU:
930 code = GTU;
931 break;
932 case LEU:
933 code = GEU;
934 break;
935 default:
936 break;
938 if (code != oldcode)
940 rtx tmp = sh_compare_op0;
941 sh_compare_op0 = sh_compare_op1;
942 sh_compare_op1 = tmp;
945 mode = GET_MODE (sh_compare_op0);
946 if (mode == VOIDmode)
947 mode = GET_MODE (sh_compare_op1);
949 sh_compare_op0 = force_reg (mode, sh_compare_op0);
950 if ((code != EQ && code != NE
951 && (sh_compare_op1 != const0_rtx
952 || code == GTU || code == GEU || code == LTU || code == LEU))
953 || (mode == DImode && sh_compare_op1 != const0_rtx)
954 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
955 sh_compare_op1 = force_reg (mode, sh_compare_op1);
957 if (TARGET_SH4 && GET_MODE_CLASS (mode) == MODE_FLOAT)
958 (mode == SFmode ? emit_sf_insn : emit_df_insn)
959 (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
960 gen_rtx_SET (VOIDmode, t_reg,
961 gen_rtx_fmt_ee (code, SImode,
962 sh_compare_op0, sh_compare_op1)),
963 gen_rtx_USE (VOIDmode, get_fpscr_rtx ()))));
964 else
965 emit_insn (gen_rtx_SET (VOIDmode, t_reg,
966 gen_rtx_fmt_ee (code, SImode,
967 sh_compare_op0, sh_compare_op1)));
969 return t_reg;
972 /* Called from the md file, set up the operands of a compare instruction. */
974 void
975 from_compare (rtx *operands, int code)
977 enum machine_mode mode = GET_MODE (sh_compare_op0);
978 rtx insn;
979 if (mode == VOIDmode)
980 mode = GET_MODE (sh_compare_op1);
981 if (code != EQ
982 || mode == DImode
983 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
985 /* Force args into regs, since we can't use constants here. */
986 sh_compare_op0 = force_reg (mode, sh_compare_op0);
987 if (sh_compare_op1 != const0_rtx
988 || code == GTU || code == GEU
989 || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
990 sh_compare_op1 = force_reg (mode, sh_compare_op1);
992 if (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT && code == GE)
994 from_compare (operands, GT);
995 insn = gen_ieee_ccmpeqsf_t (sh_compare_op0, sh_compare_op1);
997 else
998 insn = gen_rtx_SET (VOIDmode,
999 gen_rtx_REG (SImode, T_REG),
1000 gen_rtx_fmt_ee (code, SImode,
1001 sh_compare_op0, sh_compare_op1));
1002 if (TARGET_SH4 && GET_MODE_CLASS (mode) == MODE_FLOAT)
1004 insn = gen_rtx_PARALLEL (VOIDmode,
1005 gen_rtvec (2, insn,
1006 gen_rtx_USE (VOIDmode, get_fpscr_rtx ())));
1007 (mode == SFmode ? emit_sf_insn : emit_df_insn) (insn);
1009 else
1010 emit_insn (insn);
1013 /* Functions to output assembly code. */
1015 /* Return a sequence of instructions to perform DI or DF move.
1017 Since the SH cannot move a DI or DF in one instruction, we have
1018 to take care when we see overlapping source and dest registers. */
1020 const char *
1021 output_movedouble (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1022 enum machine_mode mode)
1024 rtx dst = operands[0];
1025 rtx src = operands[1];
1027 if (GET_CODE (dst) == MEM
1028 && GET_CODE (XEXP (dst, 0)) == PRE_DEC)
1029 return "mov.l %T1,%0\n\tmov.l %1,%0";
1031 if (register_operand (dst, mode)
1032 && register_operand (src, mode))
1034 if (REGNO (src) == MACH_REG)
1035 return "sts mach,%S0\n\tsts macl,%R0";
1037 /* When mov.d r1,r2 do r2->r3 then r1->r2;
1038 when mov.d r1,r0 do r1->r0 then r2->r1. */
1040 if (REGNO (src) + 1 == REGNO (dst))
1041 return "mov %T1,%T0\n\tmov %1,%0";
1042 else
1043 return "mov %1,%0\n\tmov %T1,%T0";
1045 else if (GET_CODE (src) == CONST_INT)
1047 if (INTVAL (src) < 0)
1048 output_asm_insn ("mov #-1,%S0", operands);
1049 else
1050 output_asm_insn ("mov #0,%S0", operands);
1052 return "mov %1,%R0";
1054 else if (GET_CODE (src) == MEM)
1056 int ptrreg = -1;
1057 int dreg = REGNO (dst);
1058 rtx inside = XEXP (src, 0);
1060 if (GET_CODE (inside) == REG)
1061 ptrreg = REGNO (inside);
1062 else if (GET_CODE (inside) == SUBREG)
1063 ptrreg = subreg_regno (inside);
1064 else if (GET_CODE (inside) == PLUS)
1066 ptrreg = REGNO (XEXP (inside, 0));
1067 /* ??? A r0+REG address shouldn't be possible here, because it isn't
1068 an offsettable address. Unfortunately, offsettable addresses use
1069 QImode to check the offset, and a QImode offsettable address
1070 requires r0 for the other operand, which is not currently
1071 supported, so we can't use the 'o' constraint.
1072 Thus we must check for and handle r0+REG addresses here.
1073 We punt for now, since this is likely very rare. */
1074 if (GET_CODE (XEXP (inside, 1)) == REG)
1075 abort ();
1077 else if (GET_CODE (inside) == LABEL_REF)
1078 return "mov.l %1,%0\n\tmov.l %1+4,%T0";
1079 else if (GET_CODE (inside) == POST_INC)
1080 return "mov.l %1,%0\n\tmov.l %1,%T0";
1081 else
1082 abort ();
1084 /* Work out the safe way to copy. Copy into the second half first. */
1085 if (dreg == ptrreg)
1086 return "mov.l %T1,%T0\n\tmov.l %1,%0";
1089 return "mov.l %1,%0\n\tmov.l %T1,%T0";
1092 /* Print an instruction which would have gone into a delay slot after
1093 another instruction, but couldn't because the other instruction expanded
1094 into a sequence where putting the slot insn at the end wouldn't work. */
1096 static void
1097 print_slot (rtx insn)
1099 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file, optimize, 0, 1, NULL);
1101 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
1104 const char *
1105 output_far_jump (rtx insn, rtx op)
1107 struct { rtx lab, reg, op; } this;
1108 rtx braf_base_lab = NULL_RTX;
1109 const char *jump;
1110 int far;
1111 int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn));
1112 rtx prev;
1114 this.lab = gen_label_rtx ();
1116 if (TARGET_SH2
1117 && offset >= -32764
1118 && offset - get_attr_length (insn) <= 32766)
1120 far = 0;
1121 jump = "mov.w %O0,%1; braf %1";
1123 else
1125 far = 1;
1126 if (flag_pic)
1128 if (TARGET_SH2)
1129 jump = "mov.l %O0,%1; braf %1";
1130 else
1131 jump = "mov.l r0,@-r15; mova %O0,r0; mov.l @r0,%1; add r0,%1; mov.l @r15+,r0; jmp @%1";
1133 else
1134 jump = "mov.l %O0,%1; jmp @%1";
1136 /* If we have a scratch register available, use it. */
1137 if (GET_CODE ((prev = prev_nonnote_insn (insn))) == INSN
1138 && INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
1140 this.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0));
1141 if (REGNO (this.reg) == R0_REG && flag_pic && ! TARGET_SH2)
1142 jump = "mov.l r1,@-r15; mova %O0,r0; mov.l @r0,r1; add r1,r0; mov.l @r15+,r1; jmp @%1";
1143 output_asm_insn (jump, &this.lab);
1144 if (dbr_sequence_length ())
1145 print_slot (final_sequence);
1146 else
1147 output_asm_insn ("nop", 0);
1149 else
1151 /* Output the delay slot insn first if any. */
1152 if (dbr_sequence_length ())
1153 print_slot (final_sequence);
1155 this.reg = gen_rtx_REG (SImode, 13);
1156 /* We must keep the stack aligned to 8-byte boundaries on SH5.
1157 Fortunately, MACL is fixed and call-clobbered, and we never
1158 need its value across jumps, so save r13 in it instead of in
1159 the stack. */
1160 if (TARGET_SH5)
1161 output_asm_insn ("lds r13, macl", 0);
1162 else
1163 output_asm_insn ("mov.l r13,@-r15", 0);
1164 output_asm_insn (jump, &this.lab);
1165 if (TARGET_SH5)
1166 output_asm_insn ("sts macl, r13", 0);
1167 else
1168 output_asm_insn ("mov.l @r15+,r13", 0);
1170 if (far && flag_pic && TARGET_SH2)
1172 braf_base_lab = gen_label_rtx ();
1173 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1174 CODE_LABEL_NUMBER (braf_base_lab));
1176 if (far)
1177 output_asm_insn (".align 2", 0);
1178 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (this.lab));
1179 this.op = op;
1180 if (far && flag_pic)
1182 if (TARGET_SH2)
1183 this.lab = braf_base_lab;
1184 output_asm_insn (".long %O2-%O0", &this.lab);
1186 else
1187 output_asm_insn (far ? ".long %O2" : ".word %O2-%O0", &this.lab);
1188 return "";
1191 /* Local label counter, used for constants in the pool and inside
1192 pattern branches. */
1194 static int lf = 100;
1196 /* Output code for ordinary branches. */
1198 const char *
1199 output_branch (int logic, rtx insn, rtx *operands)
1201 switch (get_attr_length (insn))
1203 case 6:
1204 /* This can happen if filling the delay slot has caused a forward
1205 branch to exceed its range (we could reverse it, but only
1206 when we know we won't overextend other branches; this should
1207 best be handled by relaxation).
1208 It can also happen when other condbranches hoist delay slot insn
1209 from their destination, thus leading to code size increase.
1210 But the branch will still be in the range -4092..+4098 bytes. */
1212 if (! TARGET_RELAX)
1214 int label = lf++;
1215 /* The call to print_slot will clobber the operands. */
1216 rtx op0 = operands[0];
1218 /* If the instruction in the delay slot is annulled (true), then
1219 there is no delay slot where we can put it now. The only safe
1220 place for it is after the label. final will do that by default. */
1222 if (final_sequence
1223 && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0)))
1225 asm_fprintf (asm_out_file, "\tb%s%ss\t%LLF%d\n", logic ? "f" : "t",
1226 ASSEMBLER_DIALECT ? "/" : ".", label);
1227 print_slot (final_sequence);
1229 else
1230 asm_fprintf (asm_out_file, "\tb%s\t%LLF%d\n", logic ? "f" : "t", label);
1232 output_asm_insn ("bra\t%l0", &op0);
1233 fprintf (asm_out_file, "\tnop\n");
1234 (*targetm.asm_out.internal_label)(asm_out_file, "LF", label);
1236 return "";
1238 /* When relaxing, handle this like a short branch. The linker
1239 will fix it up if it still doesn't fit after relaxation. */
1240 case 2:
1241 return logic ? "bt%.\t%l0" : "bf%.\t%l0";
1243 /* These are for SH2e, in which we have to account for the
1244 extra nop because of the hardware bug in annulled branches. */
1245 case 8:
1246 if (! TARGET_RELAX)
1248 int label = lf++;
1250 if (final_sequence
1251 && INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0)))
1252 abort ();
1253 asm_fprintf (asm_out_file, "b%s%ss\t%LLF%d\n",
1254 logic ? "f" : "t",
1255 ASSEMBLER_DIALECT ? "/" : ".", label);
1256 fprintf (asm_out_file, "\tnop\n");
1257 output_asm_insn ("bra\t%l0", operands);
1258 fprintf (asm_out_file, "\tnop\n");
1259 (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
1261 return "";
1263 /* When relaxing, fall through. */
1264 case 4:
1266 char buffer[10];
1268 sprintf (buffer, "b%s%ss\t%%l0",
1269 logic ? "t" : "f",
1270 ASSEMBLER_DIALECT ? "/" : ".");
1271 output_asm_insn (buffer, &operands[0]);
1272 return "nop";
1275 default:
1276 /* There should be no longer branches now - that would
1277 indicate that something has destroyed the branches set
1278 up in machine_dependent_reorg. */
1279 abort ();
1283 const char *
1284 output_branchy_insn (enum rtx_code code, const char *template,
1285 rtx insn, rtx *operands)
1287 rtx next_insn = NEXT_INSN (insn);
1289 if (next_insn && GET_CODE (next_insn) == JUMP_INSN && condjump_p (next_insn))
1291 rtx src = SET_SRC (PATTERN (next_insn));
1292 if (GET_CODE (src) == IF_THEN_ELSE && GET_CODE (XEXP (src, 0)) != code)
1294 /* Following branch not taken */
1295 operands[9] = gen_label_rtx ();
1296 emit_label_after (operands[9], next_insn);
1297 INSN_ADDRESSES_NEW (operands[9],
1298 INSN_ADDRESSES (INSN_UID (next_insn))
1299 + get_attr_length (next_insn));
1300 return template;
1302 else
1304 int offset = (branch_dest (next_insn)
1305 - INSN_ADDRESSES (INSN_UID (next_insn)) + 4);
1306 if (offset >= -252 && offset <= 258)
1308 if (GET_CODE (src) == IF_THEN_ELSE)
1309 /* branch_true */
1310 src = XEXP (src, 1);
1311 operands[9] = src;
1312 return template;
1316 operands[9] = gen_label_rtx ();
1317 emit_label_after (operands[9], insn);
1318 INSN_ADDRESSES_NEW (operands[9],
1319 INSN_ADDRESSES (INSN_UID (insn))
1320 + get_attr_length (insn));
1321 return template;
1324 const char *
1325 output_ieee_ccmpeq (rtx insn, rtx *operands)
1327 return output_branchy_insn (NE, "bt\t%l9\\;fcmp/eq\t%1,%0", insn, operands);
1330 /* Output the start of the assembler file. */
1332 static void
1333 sh_file_start (void)
1335 default_file_start ();
1337 if (TARGET_ELF)
1338 /* We need to show the text section with the proper
1339 attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
1340 emits it without attributes in TEXT_SECTION_ASM_OP, else GAS
1341 will complain. We can teach GAS specifically about the
1342 default attributes for our choice of text section, but
1343 then we would have to change GAS again if/when we change
1344 the text section name. */
1345 fprintf (asm_out_file, "%s\n", TEXT_SECTION_ASM_OP);
1346 else
1347 /* Switch to the data section so that the coffsem symbol
1348 isn't in the text section. */
1349 data_section ();
1351 if (TARGET_LITTLE_ENDIAN)
1352 fputs ("\t.little\n", asm_out_file);
1354 if (!TARGET_ELF)
1356 if (TARGET_SHCOMPACT)
1357 fputs ("\t.mode\tSHcompact\n", asm_out_file);
1358 else if (TARGET_SHMEDIA)
1359 fprintf (asm_out_file, "\t.mode\tSHmedia\n\t.abi\t%i\n",
1360 TARGET_SHMEDIA64 ? 64 : 32);
1364 /* Check if PAT includes UNSPEC_CALLER unspec pattern. */
1366 static bool
1367 unspec_caller_rtx_p (rtx pat)
1369 switch (GET_CODE (pat))
1371 case CONST:
1372 return unspec_caller_rtx_p (XEXP (pat, 0));
1373 case PLUS:
1374 case MINUS:
1375 if (unspec_caller_rtx_p (XEXP (pat, 0)))
1376 return true;
1377 return unspec_caller_rtx_p (XEXP (pat, 1));
1378 case UNSPEC:
1379 if (XINT (pat, 1) == UNSPEC_CALLER)
1380 return true;
1381 default:
1382 break;
1385 return false;
1388 /* Indicate that INSN cannot be duplicated. This is true for insn
1389 that generates an unique label. */
1391 static bool
1392 sh_cannot_copy_insn_p (rtx insn)
1394 rtx pat;
1396 if (!reload_completed || !flag_pic)
1397 return false;
1399 if (GET_CODE (insn) != INSN)
1400 return false;
1401 if (asm_noperands (insn) >= 0)
1402 return false;
1404 pat = PATTERN (insn);
1405 if (GET_CODE (pat) != SET)
1406 return false;
1407 pat = SET_SRC (pat);
1409 if (unspec_caller_rtx_p (pat))
1410 return true;
1412 return false;
1415 /* Actual number of instructions used to make a shift by N. */
1416 static const char ashiftrt_insns[] =
1417 { 0,1,2,3,4,5,8,8,8,8,8,8,8,8,8,8,2,3,4,5,8,8,8,8,8,8,8,8,8,8,8,2};
1419 /* Left shift and logical right shift are the same. */
1420 static const char shift_insns[] =
1421 { 0,1,1,2,2,3,3,4,1,2,2,3,3,4,3,3,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
1423 /* Individual shift amounts needed to get the above length sequences.
1424 One bit right shifts clobber the T bit, so when possible, put one bit
1425 shifts in the middle of the sequence, so the ends are eligible for
1426 branch delay slots. */
1427 static const short shift_amounts[32][5] = {
1428 {0}, {1}, {2}, {2, 1},
1429 {2, 2}, {2, 1, 2}, {2, 2, 2}, {2, 2, 1, 2},
1430 {8}, {8, 1}, {8, 2}, {8, 1, 2},
1431 {8, 2, 2}, {8, 2, 1, 2}, {8, -2, 8}, {8, -1, 8},
1432 {16}, {16, 1}, {16, 2}, {16, 1, 2},
1433 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
1434 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
1435 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
1437 /* Likewise, but for shift amounts < 16, up to three highmost bits
1438 might be clobbered. This is typically used when combined with some
1439 kind of sign or zero extension. */
1441 static const char ext_shift_insns[] =
1442 { 0,1,1,2,2,3,2,2,1,2,2,3,3,3,2,2,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
1444 static const short ext_shift_amounts[32][4] = {
1445 {0}, {1}, {2}, {2, 1},
1446 {2, 2}, {2, 1, 2}, {8, -2}, {8, -1},
1447 {8}, {8, 1}, {8, 2}, {8, 1, 2},
1448 {8, 2, 2}, {16, -2, -1}, {16, -2}, {16, -1},
1449 {16}, {16, 1}, {16, 2}, {16, 1, 2},
1450 {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
1451 {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
1452 {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
1454 /* Assuming we have a value that has been sign-extended by at least one bit,
1455 can we use the ext_shift_amounts with the last shift turned to an arithmetic shift
1456 to shift it by N without data loss, and quicker than by other means? */
1457 #define EXT_SHIFT_SIGNED(n) (((n) | 8) == 15)
1459 /* This is used in length attributes in sh.md to help compute the length
1460 of arbitrary constant shift instructions. */
1463 shift_insns_rtx (rtx insn)
1465 rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
1466 int shift_count = INTVAL (XEXP (set_src, 1));
1467 enum rtx_code shift_code = GET_CODE (set_src);
1469 switch (shift_code)
1471 case ASHIFTRT:
1472 return ashiftrt_insns[shift_count];
1473 case LSHIFTRT:
1474 case ASHIFT:
1475 return shift_insns[shift_count];
1476 default:
1477 abort();
1481 /* Return the cost of a shift. */
1483 static inline int
1484 shiftcosts (rtx x)
1486 int value;
1488 if (TARGET_SHMEDIA)
1489 return 1;
1491 if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
1493 if (GET_MODE (x) == DImode
1494 && GET_CODE (XEXP (x, 1)) == CONST_INT
1495 && INTVAL (XEXP (x, 1)) == 1)
1496 return 2;
1498 /* Everything else is invalid, because there is no pattern for it. */
1499 return 10000;
1501 /* If shift by a non constant, then this will be expensive. */
1502 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
1503 return SH_DYNAMIC_SHIFT_COST;
1505 value = INTVAL (XEXP (x, 1));
1507 /* Otherwise, return the true cost in instructions. */
1508 if (GET_CODE (x) == ASHIFTRT)
1510 int cost = ashiftrt_insns[value];
1511 /* If SH3, then we put the constant in a reg and use shad. */
1512 if (cost > 1 + SH_DYNAMIC_SHIFT_COST)
1513 cost = 1 + SH_DYNAMIC_SHIFT_COST;
1514 return cost;
1516 else
1517 return shift_insns[value];
1520 /* Return the cost of an AND operation. */
1522 static inline int
1523 andcosts (rtx x)
1525 int i;
1527 /* Anding with a register is a single cycle and instruction. */
1528 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
1529 return 1;
1531 i = INTVAL (XEXP (x, 1));
1533 if (TARGET_SHMEDIA)
1535 if ((GET_CODE (XEXP (x, 1)) == CONST_INT
1536 && CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1))))
1537 || EXTRA_CONSTRAINT_C16 (XEXP (x, 1)))
1538 return 1;
1539 else
1540 return 2;
1543 /* These constants are single cycle extu.[bw] instructions. */
1544 if (i == 0xff || i == 0xffff)
1545 return 1;
1546 /* Constants that can be used in an and immediate instruction in a single
1547 cycle, but this requires r0, so make it a little more expensive. */
1548 if (CONST_OK_FOR_K08 (i))
1549 return 2;
1550 /* Constants that can be loaded with a mov immediate and an and.
1551 This case is probably unnecessary. */
1552 if (CONST_OK_FOR_I08 (i))
1553 return 2;
1554 /* Any other constants requires a 2 cycle pc-relative load plus an and.
1555 This case is probably unnecessary. */
1556 return 3;
1559 /* Return the cost of an addition or a subtraction. */
1561 static inline int
1562 addsubcosts (rtx x)
1564 /* Adding a register is a single cycle insn. */
1565 if (GET_CODE (XEXP (x, 1)) == REG
1566 || GET_CODE (XEXP (x, 1)) == SUBREG)
1567 return 1;
1569 /* Likewise for small constants. */
1570 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1571 && CONST_OK_FOR_ADD (INTVAL (XEXP (x, 1))))
1572 return 1;
1574 if (TARGET_SHMEDIA)
1575 switch (GET_CODE (XEXP (x, 1)))
1577 case CONST:
1578 case LABEL_REF:
1579 case SYMBOL_REF:
1580 return TARGET_SHMEDIA64 ? 5 : 3;
1582 case CONST_INT:
1583 if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1))))
1584 return 2;
1585 else if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1)) >> 16))
1586 return 3;
1587 else if (CONST_OK_FOR_I16 ((INTVAL (XEXP (x, 1)) >> 16) >> 16))
1588 return 4;
1590 /* Fall through. */
1591 default:
1592 return 5;
1595 /* Any other constant requires a 2 cycle pc-relative load plus an
1596 addition. */
1597 return 3;
1600 /* Return the cost of a multiply. */
1601 static inline int
1602 multcosts (rtx x ATTRIBUTE_UNUSED)
1604 if (TARGET_SHMEDIA)
1605 return 3;
1607 if (TARGET_SH2)
1609 /* We have a mul insn, so we can never take more than the mul and the
1610 read of the mac reg, but count more because of the latency and extra
1611 reg usage. */
1612 if (TARGET_SMALLCODE)
1613 return 2;
1614 return 3;
1617 /* If we're aiming at small code, then just count the number of
1618 insns in a multiply call sequence. */
1619 if (TARGET_SMALLCODE)
1620 return 5;
1622 /* Otherwise count all the insns in the routine we'd be calling too. */
1623 return 20;
1626 /* Compute a (partial) cost for rtx X. Return true if the complete
1627 cost has been computed, and false if subexpressions should be
1628 scanned. In either case, *TOTAL contains the cost result. */
1630 static bool
1631 sh_rtx_costs (rtx x, int code, int outer_code, int *total)
1633 switch (code)
1635 case CONST_INT:
1636 if (TARGET_SHMEDIA)
1638 if (INTVAL (x) == 0)
1639 *total = 0;
1640 else if (outer_code == AND && and_operand ((x), DImode))
1641 *total = 0;
1642 else if ((outer_code == IOR || outer_code == XOR
1643 || outer_code == PLUS)
1644 && CONST_OK_FOR_I10 (INTVAL (x)))
1645 *total = 0;
1646 else if (CONST_OK_FOR_I16 (INTVAL (x)))
1647 *total = COSTS_N_INSNS (outer_code != SET);
1648 else if (CONST_OK_FOR_I16 (INTVAL (x) >> 16))
1649 *total = COSTS_N_INSNS (2);
1650 else if (CONST_OK_FOR_I16 ((INTVAL (x) >> 16) >> 16))
1651 *total = COSTS_N_INSNS (3);
1652 else
1653 *total = COSTS_N_INSNS (4);
1654 return true;
1656 if (CONST_OK_FOR_I08 (INTVAL (x)))
1657 *total = 0;
1658 else if ((outer_code == AND || outer_code == IOR || outer_code == XOR)
1659 && CONST_OK_FOR_K08 (INTVAL (x)))
1660 *total = 1;
1661 else
1662 *total = 8;
1663 return true;
1665 case CONST:
1666 case LABEL_REF:
1667 case SYMBOL_REF:
1668 if (TARGET_SHMEDIA64)
1669 *total = COSTS_N_INSNS (4);
1670 else if (TARGET_SHMEDIA32)
1671 *total = COSTS_N_INSNS (2);
1672 else
1673 *total = 5;
1674 return true;
1676 case CONST_DOUBLE:
1677 if (TARGET_SHMEDIA)
1678 *total = COSTS_N_INSNS (4);
1679 else
1680 *total = 10;
1681 return true;
1683 case PLUS:
1684 *total = COSTS_N_INSNS (addsubcosts (x));
1685 return true;
1687 case AND:
1688 *total = COSTS_N_INSNS (andcosts (x));
1689 return true;
1691 case MULT:
1692 *total = COSTS_N_INSNS (multcosts (x));
1693 return true;
1695 case ASHIFT:
1696 case ASHIFTRT:
1697 case LSHIFTRT:
1698 *total = COSTS_N_INSNS (shiftcosts (x));
1699 return true;
1701 case DIV:
1702 case UDIV:
1703 case MOD:
1704 case UMOD:
1705 *total = COSTS_N_INSNS (20);
1706 return true;
1708 case FLOAT:
1709 case FIX:
1710 *total = 100;
1711 return true;
1713 default:
1714 return false;
1718 /* Compute the cost of an address. For the SH, all valid addresses are
1719 the same cost. Use a slightly higher cost for reg + reg addressing,
1720 since it increases pressure on r0. */
1722 static int
1723 sh_address_cost (rtx X)
1725 return (GET_CODE (X) == PLUS
1726 && ! CONSTANT_P (XEXP (X, 1))
1727 && ! TARGET_SHMEDIA ? 1 : 0);
1730 /* Code to expand a shift. */
1732 void
1733 gen_ashift (int type, int n, rtx reg)
1735 /* Negative values here come from the shift_amounts array. */
1736 if (n < 0)
1738 if (type == ASHIFT)
1739 type = LSHIFTRT;
1740 else
1741 type = ASHIFT;
1742 n = -n;
1745 switch (type)
1747 case ASHIFTRT:
1748 emit_insn (gen_ashrsi3_k (reg, reg, GEN_INT (n)));
1749 break;
1750 case LSHIFTRT:
1751 if (n == 1)
1752 emit_insn (gen_lshrsi3_m (reg, reg, GEN_INT (n)));
1753 else
1754 emit_insn (gen_lshrsi3_k (reg, reg, GEN_INT (n)));
1755 break;
1756 case ASHIFT:
1757 emit_insn (gen_ashlsi3_std (reg, reg, GEN_INT (n)));
1758 break;
1762 /* Same for HImode */
1764 void
1765 gen_ashift_hi (int type, int n, rtx reg)
1767 /* Negative values here come from the shift_amounts array. */
1768 if (n < 0)
1770 if (type == ASHIFT)
1771 type = LSHIFTRT;
1772 else
1773 type = ASHIFT;
1774 n = -n;
1777 switch (type)
1779 case ASHIFTRT:
1780 case LSHIFTRT:
1781 /* We don't have HImode right shift operations because using the
1782 ordinary 32 bit shift instructions for that doesn't generate proper
1783 zero/sign extension.
1784 gen_ashift_hi is only called in contexts where we know that the
1785 sign extension works out correctly. */
1787 int offset = 0;
1788 if (GET_CODE (reg) == SUBREG)
1790 offset = SUBREG_BYTE (reg);
1791 reg = SUBREG_REG (reg);
1793 gen_ashift (type, n, gen_rtx_SUBREG (SImode, reg, offset));
1794 break;
1796 case ASHIFT:
1797 emit_insn (gen_ashlhi3_k (reg, reg, GEN_INT (n)));
1798 break;
1802 /* Output RTL to split a constant shift into its component SH constant
1803 shift instructions. */
1805 void
1806 gen_shifty_op (int code, rtx *operands)
1808 int value = INTVAL (operands[2]);
1809 int max, i;
1811 /* Truncate the shift count in case it is out of bounds. */
1812 value = value & 0x1f;
1814 if (value == 31)
1816 if (code == LSHIFTRT)
1818 emit_insn (gen_rotlsi3_1 (operands[0], operands[0]));
1819 emit_insn (gen_movt (operands[0]));
1820 return;
1822 else if (code == ASHIFT)
1824 /* There is a two instruction sequence for 31 bit left shifts,
1825 but it requires r0. */
1826 if (GET_CODE (operands[0]) == REG && REGNO (operands[0]) == 0)
1828 emit_insn (gen_andsi3 (operands[0], operands[0], const1_rtx));
1829 emit_insn (gen_rotlsi3_31 (operands[0], operands[0]));
1830 return;
1834 else if (value == 0)
1836 /* This can happen when not optimizing. We must output something here
1837 to prevent the compiler from aborting in final.c after the try_split
1838 call. */
1839 emit_insn (gen_nop ());
1840 return;
1843 max = shift_insns[value];
1844 for (i = 0; i < max; i++)
1845 gen_ashift (code, shift_amounts[value][i], operands[0]);
1848 /* Same as above, but optimized for values where the topmost bits don't
1849 matter. */
1851 void
1852 gen_shifty_hi_op (int code, rtx *operands)
1854 int value = INTVAL (operands[2]);
1855 int max, i;
1856 void (*gen_fun) (int, int, rtx);
1858 /* This operation is used by and_shl for SImode values with a few
1859 high bits known to be cleared. */
1860 value &= 31;
1861 if (value == 0)
1863 emit_insn (gen_nop ());
1864 return;
1867 gen_fun = GET_MODE (operands[0]) == HImode ? gen_ashift_hi : gen_ashift;
1868 if (code == ASHIFT)
1870 max = ext_shift_insns[value];
1871 for (i = 0; i < max; i++)
1872 gen_fun (code, ext_shift_amounts[value][i], operands[0]);
1874 else
1875 /* When shifting right, emit the shifts in reverse order, so that
1876 solitary negative values come first. */
1877 for (i = ext_shift_insns[value] - 1; i >= 0; i--)
1878 gen_fun (code, ext_shift_amounts[value][i], operands[0]);
1881 /* Output RTL for an arithmetic right shift. */
1883 /* ??? Rewrite to use super-optimizer sequences. */
1886 expand_ashiftrt (rtx *operands)
1888 rtx sym;
1889 rtx wrk;
1890 char func[18];
1891 tree func_name;
1892 int value;
1894 if (TARGET_SH3)
1896 if (GET_CODE (operands[2]) != CONST_INT)
1898 rtx count = copy_to_mode_reg (SImode, operands[2]);
1899 emit_insn (gen_negsi2 (count, count));
1900 emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
1901 return 1;
1903 else if (ashiftrt_insns[INTVAL (operands[2]) & 31]
1904 > 1 + SH_DYNAMIC_SHIFT_COST)
1906 rtx count
1907 = force_reg (SImode, GEN_INT (- (INTVAL (operands[2]) & 31)));
1908 emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
1909 return 1;
1912 if (GET_CODE (operands[2]) != CONST_INT)
1913 return 0;
1915 value = INTVAL (operands[2]) & 31;
1917 if (value == 31)
1919 emit_insn (gen_ashrsi2_31 (operands[0], operands[1]));
1920 return 1;
1922 else if (value >= 16 && value <= 19)
1924 wrk = gen_reg_rtx (SImode);
1925 emit_insn (gen_ashrsi2_16 (wrk, operands[1]));
1926 value -= 16;
1927 while (value--)
1928 gen_ashift (ASHIFTRT, 1, wrk);
1929 emit_move_insn (operands[0], wrk);
1930 return 1;
1932 /* Expand a short sequence inline, longer call a magic routine. */
1933 else if (value <= 5)
1935 wrk = gen_reg_rtx (SImode);
1936 emit_move_insn (wrk, operands[1]);
1937 while (value--)
1938 gen_ashift (ASHIFTRT, 1, wrk);
1939 emit_move_insn (operands[0], wrk);
1940 return 1;
1943 wrk = gen_reg_rtx (Pmode);
1945 /* Load the value into an arg reg and call a helper. */
1946 emit_move_insn (gen_rtx_REG (SImode, 4), operands[1]);
1947 sprintf (func, "__ashiftrt_r4_%d", value);
1948 func_name = get_identifier (func);
1949 sym = function_symbol (IDENTIFIER_POINTER (func_name));
1950 emit_move_insn (wrk, sym);
1951 emit_insn (gen_ashrsi3_n (GEN_INT (value), wrk));
1952 emit_move_insn (operands[0], gen_rtx_REG (SImode, 4));
1953 return 1;
1957 sh_dynamicalize_shift_p (rtx count)
1959 return shift_insns[INTVAL (count)] > 1 + SH_DYNAMIC_SHIFT_COST;
1962 /* Try to find a good way to implement the combiner pattern
1963 [(set (match_operand:SI 0 "register_operand" "r")
1964 (and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
1965 (match_operand:SI 2 "const_int_operand" "n"))
1966 (match_operand:SI 3 "const_int_operand" "n"))) .
1967 LEFT_RTX is operand 2 in the above pattern, and MASK_RTX is operand 3.
1968 return 0 for simple right / left or left/right shift combination.
1969 return 1 for a combination of shifts with zero_extend.
1970 return 2 for a combination of shifts with an AND that needs r0.
1971 return 3 for a combination of shifts with an AND that needs an extra
1972 scratch register, when the three highmost bits of the AND mask are clear.
1973 return 4 for a combination of shifts with an AND that needs an extra
1974 scratch register, when any of the three highmost bits of the AND mask
1975 is set.
1976 If ATTRP is set, store an initial right shift width in ATTRP[0],
1977 and the instruction length in ATTRP[1] . These values are not valid
1978 when returning 0.
1979 When ATTRP is set and returning 1, ATTRP[2] gets set to the index into
1980 shift_amounts for the last shift value that is to be used before the
1981 sign extend. */
1983 shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
1985 unsigned HOST_WIDE_INT mask, lsb, mask2, lsb2;
1986 int left = INTVAL (left_rtx), right;
1987 int best = 0;
1988 int cost, best_cost = 10000;
1989 int best_right = 0, best_len = 0;
1990 int i;
1991 int can_ext;
1993 if (left < 0 || left > 31)
1994 return 0;
1995 if (GET_CODE (mask_rtx) == CONST_INT)
1996 mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> left;
1997 else
1998 mask = (unsigned HOST_WIDE_INT) GET_MODE_MASK (SImode) >> left;
1999 /* Can this be expressed as a right shift / left shift pair ? */
2000 lsb = ((mask ^ (mask - 1)) >> 1) + 1;
2001 right = exact_log2 (lsb);
2002 mask2 = ~(mask + lsb - 1);
2003 lsb2 = ((mask2 ^ (mask2 - 1)) >> 1) + 1;
2004 /* mask has no zeroes but trailing zeroes <==> ! mask2 */
2005 if (! mask2)
2006 best_cost = shift_insns[right] + shift_insns[right + left];
2007 /* mask has no trailing zeroes <==> ! right */
2008 else if (! right && mask2 == ~(lsb2 - 1))
2010 int late_right = exact_log2 (lsb2);
2011 best_cost = shift_insns[left + late_right] + shift_insns[late_right];
2013 /* Try to use zero extend */
2014 if (mask2 == ~(lsb2 - 1))
2016 int width, first;
2018 for (width = 8; width <= 16; width += 8)
2020 /* Can we zero-extend right away? */
2021 if (lsb2 == (unsigned HOST_WIDE_INT)1 << width)
2023 cost
2024 = 1 + ext_shift_insns[right] + ext_shift_insns[left + right];
2025 if (cost < best_cost)
2027 best = 1;
2028 best_cost = cost;
2029 best_right = right;
2030 best_len = cost;
2031 if (attrp)
2032 attrp[2] = -1;
2034 continue;
2036 /* ??? Could try to put zero extend into initial right shift,
2037 or even shift a bit left before the right shift. */
2038 /* Determine value of first part of left shift, to get to the
2039 zero extend cut-off point. */
2040 first = width - exact_log2 (lsb2) + right;
2041 if (first >= 0 && right + left - first >= 0)
2043 cost = ext_shift_insns[right] + ext_shift_insns[first] + 1
2044 + ext_shift_insns[right + left - first];
2045 if (cost < best_cost)
2047 best = 1;
2048 best_cost = cost;
2049 best_right = right;
2050 best_len = cost;
2051 if (attrp)
2052 attrp[2] = first;
2057 /* Try to use r0 AND pattern */
2058 for (i = 0; i <= 2; i++)
2060 if (i > right)
2061 break;
2062 if (! CONST_OK_FOR_K08 (mask >> i))
2063 continue;
2064 cost = (i != 0) + 2 + ext_shift_insns[left + i];
2065 if (cost < best_cost)
2067 best = 2;
2068 best_cost = cost;
2069 best_right = i;
2070 best_len = cost - 1;
2073 /* Try to use a scratch register to hold the AND operand. */
2074 can_ext = ((mask << left) & ((unsigned HOST_WIDE_INT)3 << 30)) == 0;
2075 for (i = 0; i <= 2; i++)
2077 if (i > right)
2078 break;
2079 cost = (i != 0) + (CONST_OK_FOR_I08 (mask >> i) ? 2 : 3)
2080 + (can_ext ? ext_shift_insns : shift_insns)[left + i];
2081 if (cost < best_cost)
2083 best = 4 - can_ext;
2084 best_cost = cost;
2085 best_right = i;
2086 best_len = cost - 1 - ! CONST_OK_FOR_I08 (mask >> i);
2090 if (attrp)
2092 attrp[0] = best_right;
2093 attrp[1] = best_len;
2095 return best;
2098 /* This is used in length attributes of the unnamed instructions
2099 corresponding to shl_and_kind return values of 1 and 2. */
2101 shl_and_length (rtx insn)
2103 rtx set_src, left_rtx, mask_rtx;
2104 int attributes[3];
2106 set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2107 left_rtx = XEXP (XEXP (set_src, 0), 1);
2108 mask_rtx = XEXP (set_src, 1);
2109 shl_and_kind (left_rtx, mask_rtx, attributes);
2110 return attributes[1];
2113 /* This is used in length attribute of the and_shl_scratch instruction. */
2116 shl_and_scr_length (rtx insn)
2118 rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2119 int len = shift_insns[INTVAL (XEXP (set_src, 1))];
2120 rtx op = XEXP (set_src, 0);
2121 len += shift_insns[INTVAL (XEXP (op, 1))] + 1;
2122 op = XEXP (XEXP (op, 0), 0);
2123 return len + shift_insns[INTVAL (XEXP (op, 1))];
2126 /* Generating rtl? */
2127 extern int rtx_equal_function_value_matters;
2129 /* Generate rtl for instructions for which shl_and_kind advised a particular
2130 method of generating them, i.e. returned zero. */
2133 gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
2135 int attributes[3];
2136 unsigned HOST_WIDE_INT mask;
2137 int kind = shl_and_kind (left_rtx, mask_rtx, attributes);
2138 int right, total_shift;
2139 void (*shift_gen_fun) (int, rtx*) = gen_shifty_hi_op;
2141 right = attributes[0];
2142 total_shift = INTVAL (left_rtx) + right;
2143 mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> total_shift;
2144 switch (kind)
2146 default:
2147 return -1;
2148 case 1:
2150 int first = attributes[2];
2151 rtx operands[3];
2153 if (first < 0)
2155 emit_insn ((mask << right) <= 0xff
2156 ? gen_zero_extendqisi2(dest,
2157 gen_lowpart (QImode, source))
2158 : gen_zero_extendhisi2(dest,
2159 gen_lowpart (HImode, source)));
2160 source = dest;
2162 if (source != dest)
2163 emit_insn (gen_movsi (dest, source));
2164 operands[0] = dest;
2165 if (right)
2167 operands[2] = GEN_INT (right);
2168 gen_shifty_hi_op (LSHIFTRT, operands);
2170 if (first > 0)
2172 operands[2] = GEN_INT (first);
2173 gen_shifty_hi_op (ASHIFT, operands);
2174 total_shift -= first;
2175 mask <<= first;
2177 if (first >= 0)
2178 emit_insn (mask <= 0xff
2179 ? gen_zero_extendqisi2(dest, gen_lowpart (QImode, dest))
2180 : gen_zero_extendhisi2(dest, gen_lowpart (HImode, dest)));
2181 if (total_shift > 0)
2183 operands[2] = GEN_INT (total_shift);
2184 gen_shifty_hi_op (ASHIFT, operands);
2186 break;
2188 case 4:
2189 shift_gen_fun = gen_shifty_op;
2190 case 3:
2191 /* If the topmost bit that matters is set, set the topmost bits
2192 that don't matter. This way, we might be able to get a shorter
2193 signed constant. */
2194 if (mask & ((HOST_WIDE_INT)1 << (31 - total_shift)))
2195 mask |= (HOST_WIDE_INT)~0 << (31 - total_shift);
2196 case 2:
2197 /* Don't expand fine-grained when combining, because that will
2198 make the pattern fail. */
2199 if (rtx_equal_function_value_matters
2200 || reload_in_progress || reload_completed)
2202 rtx operands[3];
2204 /* Cases 3 and 4 should be handled by this split
2205 only while combining */
2206 if (kind > 2)
2207 abort ();
2208 if (right)
2210 emit_insn (gen_lshrsi3 (dest, source, GEN_INT (right)));
2211 source = dest;
2213 emit_insn (gen_andsi3 (dest, source, GEN_INT (mask)));
2214 if (total_shift)
2216 operands[0] = dest;
2217 operands[1] = dest;
2218 operands[2] = GEN_INT (total_shift);
2219 shift_gen_fun (ASHIFT, operands);
2221 break;
2223 else
2225 int neg = 0;
2226 if (kind != 4 && total_shift < 16)
2228 neg = -ext_shift_amounts[total_shift][1];
2229 if (neg > 0)
2230 neg -= ext_shift_amounts[total_shift][2];
2231 else
2232 neg = 0;
2234 emit_insn (gen_and_shl_scratch (dest, source,
2235 GEN_INT (right),
2236 GEN_INT (mask),
2237 GEN_INT (total_shift + neg),
2238 GEN_INT (neg)));
2239 emit_insn (gen_movsi (dest, dest));
2240 break;
2243 return 0;
2246 /* Try to find a good way to implement the combiner pattern
2247 [(set (match_operand:SI 0 "register_operand" "=r")
2248 (sign_extract:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
2249 (match_operand:SI 2 "const_int_operand" "n")
2250 (match_operand:SI 3 "const_int_operand" "n")
2251 (const_int 0)))
2252 (clobber (reg:SI T_REG))]
2253 LEFT_RTX is operand 2 in the above pattern, and SIZE_RTX is operand 3.
2254 return 0 for simple left / right shift combination.
2255 return 1 for left shift / 8 bit sign extend / left shift.
2256 return 2 for left shift / 16 bit sign extend / left shift.
2257 return 3 for left shift / 8 bit sign extend / shift / sign extend.
2258 return 4 for left shift / 16 bit sign extend / shift / sign extend.
2259 return 5 for left shift / 16 bit sign extend / right shift
2260 return 6 for < 8 bit sign extend / left shift.
2261 return 7 for < 8 bit sign extend / left shift / single right shift.
2262 If COSTP is nonzero, assign the calculated cost to *COSTP. */
2265 shl_sext_kind (rtx left_rtx, rtx size_rtx, int *costp)
2267 int left, size, insize, ext;
2268 int cost = 0, best_cost;
2269 int kind;
2271 left = INTVAL (left_rtx);
2272 size = INTVAL (size_rtx);
2273 insize = size - left;
2274 if (insize <= 0)
2275 abort ();
2276 /* Default to left / right shift. */
2277 kind = 0;
2278 best_cost = shift_insns[32 - insize] + ashiftrt_insns[32 - size];
2279 if (size <= 16)
2281 /* 16 bit shift / sign extend / 16 bit shift */
2282 cost = shift_insns[16 - insize] + 1 + ashiftrt_insns[16 - size];
2283 /* If ashiftrt_insns[16 - size] is 8, this choice will be overridden
2284 below, by alternative 3 or something even better. */
2285 if (cost < best_cost)
2287 kind = 5;
2288 best_cost = cost;
2291 /* Try a plain sign extend between two shifts. */
2292 for (ext = 16; ext >= insize; ext -= 8)
2294 if (ext <= size)
2296 cost = ext_shift_insns[ext - insize] + 1 + shift_insns[size - ext];
2297 if (cost < best_cost)
2299 kind = ext / (unsigned) 8;
2300 best_cost = cost;
2303 /* Check if we can do a sloppy shift with a final signed shift
2304 restoring the sign. */
2305 if (EXT_SHIFT_SIGNED (size - ext))
2306 cost = ext_shift_insns[ext - insize] + ext_shift_insns[size - ext] + 1;
2307 /* If not, maybe it's still cheaper to do the second shift sloppy,
2308 and do a final sign extend? */
2309 else if (size <= 16)
2310 cost = ext_shift_insns[ext - insize] + 1
2311 + ext_shift_insns[size > ext ? size - ext : ext - size] + 1;
2312 else
2313 continue;
2314 if (cost < best_cost)
2316 kind = ext / (unsigned) 8 + 2;
2317 best_cost = cost;
2320 /* Check if we can sign extend in r0 */
2321 if (insize < 8)
2323 cost = 3 + shift_insns[left];
2324 if (cost < best_cost)
2326 kind = 6;
2327 best_cost = cost;
2329 /* Try the same with a final signed shift. */
2330 if (left < 31)
2332 cost = 3 + ext_shift_insns[left + 1] + 1;
2333 if (cost < best_cost)
2335 kind = 7;
2336 best_cost = cost;
2340 if (TARGET_SH3)
2342 /* Try to use a dynamic shift. */
2343 cost = shift_insns[32 - insize] + 1 + SH_DYNAMIC_SHIFT_COST;
2344 if (cost < best_cost)
2346 kind = 0;
2347 best_cost = cost;
2350 if (costp)
2351 *costp = cost;
2352 return kind;
2355 /* Function to be used in the length attribute of the instructions
2356 implementing this pattern. */
2359 shl_sext_length (rtx insn)
2361 rtx set_src, left_rtx, size_rtx;
2362 int cost;
2364 set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2365 left_rtx = XEXP (XEXP (set_src, 0), 1);
2366 size_rtx = XEXP (set_src, 1);
2367 shl_sext_kind (left_rtx, size_rtx, &cost);
2368 return cost;
2371 /* Generate rtl for this pattern */
2374 gen_shl_sext (rtx dest, rtx left_rtx, rtx size_rtx, rtx source)
2376 int kind;
2377 int left, size, insize, cost;
2378 rtx operands[3];
2380 kind = shl_sext_kind (left_rtx, size_rtx, &cost);
2381 left = INTVAL (left_rtx);
2382 size = INTVAL (size_rtx);
2383 insize = size - left;
2384 switch (kind)
2386 case 1:
2387 case 2:
2388 case 3:
2389 case 4:
2391 int ext = kind & 1 ? 8 : 16;
2392 int shift2 = size - ext;
2394 /* Don't expand fine-grained when combining, because that will
2395 make the pattern fail. */
2396 if (! rtx_equal_function_value_matters
2397 && ! reload_in_progress && ! reload_completed)
2399 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
2400 emit_insn (gen_movsi (dest, source));
2401 break;
2403 if (dest != source)
2404 emit_insn (gen_movsi (dest, source));
2405 operands[0] = dest;
2406 if (ext - insize)
2408 operands[2] = GEN_INT (ext - insize);
2409 gen_shifty_hi_op (ASHIFT, operands);
2411 emit_insn (kind & 1
2412 ? gen_extendqisi2(dest, gen_lowpart (QImode, dest))
2413 : gen_extendhisi2(dest, gen_lowpart (HImode, dest)));
2414 if (kind <= 2)
2416 if (shift2)
2418 operands[2] = GEN_INT (shift2);
2419 gen_shifty_op (ASHIFT, operands);
2422 else
2424 if (shift2 > 0)
2426 if (EXT_SHIFT_SIGNED (shift2))
2428 operands[2] = GEN_INT (shift2 + 1);
2429 gen_shifty_op (ASHIFT, operands);
2430 operands[2] = const1_rtx;
2431 gen_shifty_op (ASHIFTRT, operands);
2432 break;
2434 operands[2] = GEN_INT (shift2);
2435 gen_shifty_hi_op (ASHIFT, operands);
2437 else if (shift2)
2439 operands[2] = GEN_INT (-shift2);
2440 gen_shifty_hi_op (LSHIFTRT, operands);
2442 emit_insn (size <= 8
2443 ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
2444 : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
2446 break;
2448 case 5:
2450 int i = 16 - size;
2451 if (! rtx_equal_function_value_matters
2452 && ! reload_in_progress && ! reload_completed)
2453 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
2454 else
2456 operands[0] = dest;
2457 operands[2] = GEN_INT (16 - insize);
2458 gen_shifty_hi_op (ASHIFT, operands);
2459 emit_insn (gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
2461 /* Don't use gen_ashrsi3 because it generates new pseudos. */
2462 while (--i >= 0)
2463 gen_ashift (ASHIFTRT, 1, dest);
2464 break;
2466 case 6:
2467 case 7:
2468 /* Don't expand fine-grained when combining, because that will
2469 make the pattern fail. */
2470 if (! rtx_equal_function_value_matters
2471 && ! reload_in_progress && ! reload_completed)
2473 emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
2474 emit_insn (gen_movsi (dest, source));
2475 break;
2477 emit_insn (gen_andsi3 (dest, source, GEN_INT ((1 << insize) - 1)));
2478 emit_insn (gen_xorsi3 (dest, dest, GEN_INT (1 << (insize - 1))));
2479 emit_insn (gen_addsi3 (dest, dest, GEN_INT (-1 << (insize - 1))));
2480 operands[0] = dest;
2481 operands[2] = kind == 7 ? GEN_INT (left + 1) : left_rtx;
2482 gen_shifty_op (ASHIFT, operands);
2483 if (kind == 7)
2484 emit_insn (gen_ashrsi3_k (dest, dest, const1_rtx));
2485 break;
2486 default:
2487 return -1;
2489 return 0;
2492 /* Prefix a symbol_ref name with "datalabel". */
2495 gen_datalabel_ref (rtx sym)
2497 if (GET_CODE (sym) == LABEL_REF)
2498 return gen_rtx_CONST (GET_MODE (sym),
2499 gen_rtx_UNSPEC (GET_MODE (sym),
2500 gen_rtvec (1, sym),
2501 UNSPEC_DATALABEL));
2503 if (GET_CODE (sym) != SYMBOL_REF)
2504 abort ();
2506 return sym;
2510 /* The SH cannot load a large constant into a register, constants have to
2511 come from a pc relative load. The reference of a pc relative load
2512 instruction must be less than 1k infront of the instruction. This
2513 means that we often have to dump a constant inside a function, and
2514 generate code to branch around it.
2516 It is important to minimize this, since the branches will slow things
2517 down and make things bigger.
2519 Worst case code looks like:
2521 mov.l L1,rn
2522 bra L2
2524 align
2525 L1: .long value
2529 mov.l L3,rn
2530 bra L4
2532 align
2533 L3: .long value
2537 We fix this by performing a scan before scheduling, which notices which
2538 instructions need to have their operands fetched from the constant table
2539 and builds the table.
2541 The algorithm is:
2543 scan, find an instruction which needs a pcrel move. Look forward, find the
2544 last barrier which is within MAX_COUNT bytes of the requirement.
2545 If there isn't one, make one. Process all the instructions between
2546 the find and the barrier.
2548 In the above example, we can tell that L3 is within 1k of L1, so
2549 the first move can be shrunk from the 3 insn+constant sequence into
2550 just 1 insn, and the constant moved to L3 to make:
2552 mov.l L1,rn
2554 mov.l L3,rn
2555 bra L4
2557 align
2558 L3:.long value
2559 L4:.long value
2561 Then the second move becomes the target for the shortening process. */
2563 typedef struct
2565 rtx value; /* Value in table. */
2566 rtx label; /* Label of value. */
2567 rtx wend; /* End of window. */
2568 enum machine_mode mode; /* Mode of value. */
2570 /* True if this constant is accessed as part of a post-increment
2571 sequence. Note that HImode constants are never accessed in this way. */
2572 bool part_of_sequence_p;
2573 } pool_node;
2575 /* The maximum number of constants that can fit into one pool, since
2576 the pc relative range is 0...1020 bytes and constants are at least 4
2577 bytes long. */
2579 #define MAX_POOL_SIZE (1020/4)
2580 static pool_node pool_vector[MAX_POOL_SIZE];
2581 static int pool_size;
2582 static rtx pool_window_label;
2583 static int pool_window_last;
2585 /* ??? If we need a constant in HImode which is the truncated value of a
2586 constant we need in SImode, we could combine the two entries thus saving
2587 two bytes. Is this common enough to be worth the effort of implementing
2588 it? */
2590 /* ??? This stuff should be done at the same time that we shorten branches.
2591 As it is now, we must assume that all branches are the maximum size, and
2592 this causes us to almost always output constant pools sooner than
2593 necessary. */
2595 /* Add a constant to the pool and return its label. */
2597 static rtx
2598 add_constant (rtx x, enum machine_mode mode, rtx last_value)
2600 int i;
2601 rtx lab, new, ref, newref;
2603 /* First see if we've already got it. */
2604 for (i = 0; i < pool_size; i++)
2606 if (x->code == pool_vector[i].value->code
2607 && mode == pool_vector[i].mode)
2609 if (x->code == CODE_LABEL)
2611 if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
2612 continue;
2614 if (rtx_equal_p (x, pool_vector[i].value))
2616 lab = new = 0;
2617 if (! last_value
2618 || ! i
2619 || ! rtx_equal_p (last_value, pool_vector[i-1].value))
2621 new = gen_label_rtx ();
2622 LABEL_REFS (new) = pool_vector[i].label;
2623 pool_vector[i].label = lab = new;
2625 if (lab && pool_window_label)
2627 newref = gen_rtx_LABEL_REF (VOIDmode, pool_window_label);
2628 ref = pool_vector[pool_window_last].wend;
2629 LABEL_NEXTREF (newref) = ref;
2630 pool_vector[pool_window_last].wend = newref;
2632 if (new)
2633 pool_window_label = new;
2634 pool_window_last = i;
2635 return lab;
2640 /* Need a new one. */
2641 pool_vector[pool_size].value = x;
2642 if (last_value && rtx_equal_p (last_value, pool_vector[pool_size - 1].value))
2644 lab = 0;
2645 pool_vector[pool_size - 1].part_of_sequence_p = true;
2647 else
2648 lab = gen_label_rtx ();
2649 pool_vector[pool_size].mode = mode;
2650 pool_vector[pool_size].label = lab;
2651 pool_vector[pool_size].wend = NULL_RTX;
2652 pool_vector[pool_size].part_of_sequence_p = (lab == 0);
2653 if (lab && pool_window_label)
2655 newref = gen_rtx_LABEL_REF (VOIDmode, pool_window_label);
2656 ref = pool_vector[pool_window_last].wend;
2657 LABEL_NEXTREF (newref) = ref;
2658 pool_vector[pool_window_last].wend = newref;
2660 if (lab)
2661 pool_window_label = lab;
2662 pool_window_last = pool_size;
2663 pool_size++;
2664 return lab;
2667 /* Output the literal table. */
2669 static void
2670 dump_table (rtx scan)
2672 int i;
2673 int need_align = 1;
2674 rtx lab, ref;
2675 int have_df = 0;
2677 /* Do two passes, first time dump out the HI sized constants. */
2679 for (i = 0; i < pool_size; i++)
2681 pool_node *p = &pool_vector[i];
2683 if (p->mode == HImode)
2685 if (need_align)
2687 scan = emit_insn_after (gen_align_2 (), scan);
2688 need_align = 0;
2690 for (lab = p->label; lab; lab = LABEL_REFS (lab))
2691 scan = emit_label_after (lab, scan);
2692 scan = emit_insn_after (gen_consttable_2 (p->value, const0_rtx),
2693 scan);
2694 for (ref = p->wend; ref; ref = LABEL_NEXTREF (ref))
2696 lab = XEXP (ref, 0);
2697 scan = emit_insn_after (gen_consttable_window_end (lab), scan);
2700 else if (p->mode == DFmode)
2701 have_df = 1;
2704 need_align = 1;
2706 if (TARGET_FMOVD && TARGET_ALIGN_DOUBLE && have_df)
2708 rtx align_insn = NULL_RTX;
2710 scan = emit_label_after (gen_label_rtx (), scan);
2711 scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
2712 need_align = 0;
2714 for (i = 0; i < pool_size; i++)
2716 pool_node *p = &pool_vector[i];
2718 switch (p->mode)
2720 case HImode:
2721 break;
2722 case SImode:
2723 case SFmode:
2724 if (align_insn && !p->part_of_sequence_p)
2726 for (lab = p->label; lab; lab = LABEL_REFS (lab))
2727 emit_label_before (lab, align_insn);
2728 emit_insn_before (gen_consttable_4 (p->value, const0_rtx),
2729 align_insn);
2730 for (ref = p->wend; ref; ref = LABEL_NEXTREF (ref))
2732 lab = XEXP (ref, 0);
2733 emit_insn_before (gen_consttable_window_end (lab),
2734 align_insn);
2736 delete_insn (align_insn);
2737 align_insn = NULL_RTX;
2738 continue;
2740 else
2742 for (lab = p->label; lab; lab = LABEL_REFS (lab))
2743 scan = emit_label_after (lab, scan);
2744 scan = emit_insn_after (gen_consttable_4 (p->value,
2745 const0_rtx), scan);
2746 need_align = ! need_align;
2748 break;
2749 case DFmode:
2750 if (need_align)
2752 scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
2753 align_insn = scan;
2754 need_align = 0;
2756 case DImode:
2757 for (lab = p->label; lab; lab = LABEL_REFS (lab))
2758 scan = emit_label_after (lab, scan);
2759 scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
2760 scan);
2761 break;
2762 default:
2763 abort ();
2764 break;
2767 if (p->mode != HImode)
2769 for (ref = p->wend; ref; ref = LABEL_NEXTREF (ref))
2771 lab = XEXP (ref, 0);
2772 scan = emit_insn_after (gen_consttable_window_end (lab),
2773 scan);
2778 pool_size = 0;
2781 for (i = 0; i < pool_size; i++)
2783 pool_node *p = &pool_vector[i];
2785 switch (p->mode)
2787 case HImode:
2788 break;
2789 case SImode:
2790 case SFmode:
2791 if (need_align)
2793 need_align = 0;
2794 scan = emit_label_after (gen_label_rtx (), scan);
2795 scan = emit_insn_after (gen_align_4 (), scan);
2797 for (lab = p->label; lab; lab = LABEL_REFS (lab))
2798 scan = emit_label_after (lab, scan);
2799 scan = emit_insn_after (gen_consttable_4 (p->value, const0_rtx),
2800 scan);
2801 break;
2802 case DFmode:
2803 case DImode:
2804 if (need_align)
2806 need_align = 0;
2807 scan = emit_label_after (gen_label_rtx (), scan);
2808 scan = emit_insn_after (gen_align_4 (), scan);
2810 for (lab = p->label; lab; lab = LABEL_REFS (lab))
2811 scan = emit_label_after (lab, scan);
2812 scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
2813 scan);
2814 break;
2815 default:
2816 abort ();
2817 break;
2820 if (p->mode != HImode)
2822 for (ref = p->wend; ref; ref = LABEL_NEXTREF (ref))
2824 lab = XEXP (ref, 0);
2825 scan = emit_insn_after (gen_consttable_window_end (lab), scan);
2830 scan = emit_insn_after (gen_consttable_end (), scan);
2831 scan = emit_barrier_after (scan);
2832 pool_size = 0;
2833 pool_window_label = NULL_RTX;
2834 pool_window_last = 0;
2837 /* Return nonzero if constant would be an ok source for a
2838 mov.w instead of a mov.l. */
2840 static int
2841 hi_const (rtx src)
2843 return (GET_CODE (src) == CONST_INT
2844 && INTVAL (src) >= -32768
2845 && INTVAL (src) <= 32767);
2848 /* Nonzero if the insn is a move instruction which needs to be fixed. */
2850 /* ??? For a DImode/DFmode moves, we don't need to fix it if each half of the
2851 CONST_DOUBLE input value is CONST_OK_FOR_I08. For a SFmode move, we don't
2852 need to fix it if the input value is CONST_OK_FOR_I08. */
2854 static int
2855 broken_move (rtx insn)
2857 if (GET_CODE (insn) == INSN)
2859 rtx pat = PATTERN (insn);
2860 if (GET_CODE (pat) == PARALLEL)
2861 pat = XVECEXP (pat, 0, 0);
2862 if (GET_CODE (pat) == SET
2863 /* We can load any 8 bit value if we don't care what the high
2864 order bits end up as. */
2865 && GET_MODE (SET_DEST (pat)) != QImode
2866 && (CONSTANT_P (SET_SRC (pat))
2867 /* Match mova_const. */
2868 || (GET_CODE (SET_SRC (pat)) == UNSPEC
2869 && XINT (SET_SRC (pat), 1) == UNSPEC_MOVA
2870 && GET_CODE (XVECEXP (SET_SRC (pat), 0, 0)) == CONST))
2871 && ! (TARGET_SH2E
2872 && GET_CODE (SET_SRC (pat)) == CONST_DOUBLE
2873 && (fp_zero_operand (SET_SRC (pat))
2874 || fp_one_operand (SET_SRC (pat)))
2875 /* ??? If this is a -m4 or -m4-single compilation, in general
2876 we don't know the current setting of fpscr, so disable fldi.
2877 There is an exception if this was a register-register move
2878 before reload - and hence it was ascertained that we have
2879 single precision setting - and in a post-reload optimization
2880 we changed this to do a constant load. In that case
2881 we don't have an r0 clobber, hence we must use fldi. */
2882 && (! TARGET_SH4 || TARGET_FMOVD
2883 || (GET_CODE (XEXP (XVECEXP (PATTERN (insn), 0, 2), 0))
2884 == SCRATCH))
2885 && GET_CODE (SET_DEST (pat)) == REG
2886 && FP_REGISTER_P (REGNO (SET_DEST (pat))))
2887 && (GET_CODE (SET_SRC (pat)) != CONST_INT
2888 || ! CONST_OK_FOR_I08 (INTVAL (SET_SRC (pat)))))
2889 return 1;
2892 return 0;
2895 static int
2896 mova_p (rtx insn)
2898 return (GET_CODE (insn) == INSN
2899 && GET_CODE (PATTERN (insn)) == SET
2900 && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
2901 && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_MOVA
2902 /* Don't match mova_const. */
2903 && GET_CODE (XVECEXP (SET_SRC (PATTERN (insn)), 0, 0)) == LABEL_REF);
2906 /* Find the last barrier from insn FROM which is close enough to hold the
2907 constant pool. If we can't find one, then create one near the end of
2908 the range. */
2910 static rtx
2911 find_barrier (int num_mova, rtx mova, rtx from)
2913 int count_si = 0;
2914 int count_hi = 0;
2915 int found_hi = 0;
2916 int found_si = 0;
2917 int found_di = 0;
2918 int hi_align = 2;
2919 int si_align = 2;
2920 int leading_mova = num_mova;
2921 rtx barrier_before_mova = 0, found_barrier = 0, good_barrier = 0;
2922 int si_limit;
2923 int hi_limit;
2925 /* For HImode: range is 510, add 4 because pc counts from address of
2926 second instruction after this one, subtract 2 for the jump instruction
2927 that we may need to emit before the table, subtract 2 for the instruction
2928 that fills the jump delay slot (in very rare cases, reorg will take an
2929 instruction from after the constant pool or will leave the delay slot
2930 empty). This gives 510.
2931 For SImode: range is 1020, add 4 because pc counts from address of
2932 second instruction after this one, subtract 2 in case pc is 2 byte
2933 aligned, subtract 2 for the jump instruction that we may need to emit
2934 before the table, subtract 2 for the instruction that fills the jump
2935 delay slot. This gives 1018. */
2937 /* The branch will always be shortened now that the reference address for
2938 forward branches is the successor address, thus we need no longer make
2939 adjustments to the [sh]i_limit for -O0. */
2941 si_limit = 1018;
2942 hi_limit = 510;
2944 while (from && count_si < si_limit && count_hi < hi_limit)
2946 int inc = get_attr_length (from);
2947 int new_align = 1;
2949 if (GET_CODE (from) == CODE_LABEL)
2951 if (optimize)
2952 new_align = 1 << label_to_alignment (from);
2953 else if (GET_CODE (prev_nonnote_insn (from)) == BARRIER)
2954 new_align = 1 << barrier_align (from);
2955 else
2956 new_align = 1;
2957 inc = 0;
2960 if (GET_CODE (from) == BARRIER)
2963 found_barrier = from;
2965 /* If we are at the end of the function, or in front of an alignment
2966 instruction, we need not insert an extra alignment. We prefer
2967 this kind of barrier. */
2968 if (barrier_align (from) > 2)
2969 good_barrier = from;
2972 if (broken_move (from))
2974 rtx pat, src, dst;
2975 enum machine_mode mode;
2977 pat = PATTERN (from);
2978 if (GET_CODE (pat) == PARALLEL)
2979 pat = XVECEXP (pat, 0, 0);
2980 src = SET_SRC (pat);
2981 dst = SET_DEST (pat);
2982 mode = GET_MODE (dst);
2984 /* We must explicitly check the mode, because sometimes the
2985 front end will generate code to load unsigned constants into
2986 HImode targets without properly sign extending them. */
2987 if (mode == HImode
2988 || (mode == SImode && hi_const (src) && REGNO (dst) != FPUL_REG))
2990 found_hi += 2;
2991 /* We put the short constants before the long constants, so
2992 we must count the length of short constants in the range
2993 for the long constants. */
2994 /* ??? This isn't optimal, but is easy to do. */
2995 si_limit -= 2;
2997 else
2999 /* We dump DF/DI constants before SF/SI ones, because
3000 the limit is the same, but the alignment requirements
3001 are higher. We may waste up to 4 additional bytes
3002 for alignment, and the DF/DI constant may have
3003 another SF/SI constant placed before it. */
3004 if (TARGET_SHCOMPACT
3005 && ! found_di
3006 && (mode == DFmode || mode == DImode))
3008 found_di = 1;
3009 si_limit -= 8;
3011 while (si_align > 2 && found_si + si_align - 2 > count_si)
3012 si_align >>= 1;
3013 if (found_si > count_si)
3014 count_si = found_si;
3015 found_si += GET_MODE_SIZE (mode);
3016 if (num_mova)
3017 si_limit -= GET_MODE_SIZE (mode);
3020 /* See the code in machine_dependent_reorg, which has a similar if
3021 statement that generates a new mova insn in many cases. */
3022 if (GET_CODE (dst) == REG && FP_ANY_REGISTER_P (REGNO (dst)))
3023 inc += 2;
3026 if (mova_p (from))
3028 if (! num_mova++)
3030 leading_mova = 0;
3031 mova = from;
3032 barrier_before_mova = good_barrier ? good_barrier : found_barrier;
3034 if (found_si > count_si)
3035 count_si = found_si;
3037 else if (GET_CODE (from) == JUMP_INSN
3038 && (GET_CODE (PATTERN (from)) == ADDR_VEC
3039 || GET_CODE (PATTERN (from)) == ADDR_DIFF_VEC))
3041 if (num_mova)
3042 num_mova--;
3043 if (barrier_align (next_real_insn (from)) == align_jumps_log)
3045 /* We have just passed the barrier in front of the
3046 ADDR_DIFF_VEC, which is stored in found_barrier. Since
3047 the ADDR_DIFF_VEC is accessed as data, just like our pool
3048 constants, this is a good opportunity to accommodate what
3049 we have gathered so far.
3050 If we waited any longer, we could end up at a barrier in
3051 front of code, which gives worse cache usage for separated
3052 instruction / data caches. */
3053 good_barrier = found_barrier;
3054 break;
3056 else
3058 rtx body = PATTERN (from);
3059 inc = XVECLEN (body, 1) * GET_MODE_SIZE (GET_MODE (body));
3062 /* For the SH1, we generate alignments even after jumps-around-jumps. */
3063 else if (GET_CODE (from) == JUMP_INSN
3064 && ! TARGET_SH2
3065 && ! TARGET_SMALLCODE)
3066 new_align = 4;
3068 if (found_si)
3070 count_si += inc;
3071 if (new_align > si_align)
3073 si_limit -= (count_si - 1) & (new_align - si_align);
3074 si_align = new_align;
3076 count_si = (count_si + new_align - 1) & -new_align;
3078 if (found_hi)
3080 count_hi += inc;
3081 if (new_align > hi_align)
3083 hi_limit -= (count_hi - 1) & (new_align - hi_align);
3084 hi_align = new_align;
3086 count_hi = (count_hi + new_align - 1) & -new_align;
3088 from = NEXT_INSN (from);
3091 if (num_mova)
3093 if (leading_mova)
3095 /* Try as we might, the leading mova is out of range. Change
3096 it into a load (which will become a pcload) and retry. */
3097 SET_SRC (PATTERN (mova)) = XVECEXP (SET_SRC (PATTERN (mova)), 0, 0);
3098 INSN_CODE (mova) = -1;
3099 return find_barrier (0, 0, mova);
3101 else
3103 /* Insert the constant pool table before the mova instruction,
3104 to prevent the mova label reference from going out of range. */
3105 from = mova;
3106 good_barrier = found_barrier = barrier_before_mova;
3110 if (found_barrier)
3112 if (good_barrier && next_real_insn (found_barrier))
3113 found_barrier = good_barrier;
3115 else
3117 /* We didn't find a barrier in time to dump our stuff,
3118 so we'll make one. */
3119 rtx label = gen_label_rtx ();
3121 /* If we exceeded the range, then we must back up over the last
3122 instruction we looked at. Otherwise, we just need to undo the
3123 NEXT_INSN at the end of the loop. */
3124 if (count_hi > hi_limit || count_si > si_limit)
3125 from = PREV_INSN (PREV_INSN (from));
3126 else
3127 from = PREV_INSN (from);
3129 /* Walk back to be just before any jump or label.
3130 Putting it before a label reduces the number of times the branch
3131 around the constant pool table will be hit. Putting it before
3132 a jump makes it more likely that the bra delay slot will be
3133 filled. */
3134 while (GET_CODE (from) == JUMP_INSN || GET_CODE (from) == NOTE
3135 || GET_CODE (from) == CODE_LABEL)
3136 from = PREV_INSN (from);
3138 from = emit_jump_insn_after (gen_jump (label), from);
3139 JUMP_LABEL (from) = label;
3140 LABEL_NUSES (label) = 1;
3141 found_barrier = emit_barrier_after (from);
3142 emit_label_after (label, found_barrier);
3145 return found_barrier;
3148 /* If the instruction INSN is implemented by a special function, and we can
3149 positively find the register that is used to call the sfunc, and this
3150 register is not used anywhere else in this instruction - except as the
3151 destination of a set, return this register; else, return 0. */
3153 sfunc_uses_reg (rtx insn)
3155 int i;
3156 rtx pattern, part, reg_part, reg;
3158 if (GET_CODE (insn) != INSN)
3159 return 0;
3160 pattern = PATTERN (insn);
3161 if (GET_CODE (pattern) != PARALLEL || get_attr_type (insn) != TYPE_SFUNC)
3162 return 0;
3164 for (reg_part = 0, i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
3166 part = XVECEXP (pattern, 0, i);
3167 if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == SImode)
3168 reg_part = part;
3170 if (! reg_part)
3171 return 0;
3172 reg = XEXP (reg_part, 0);
3173 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
3175 part = XVECEXP (pattern, 0, i);
3176 if (part == reg_part || GET_CODE (part) == CLOBBER)
3177 continue;
3178 if (reg_mentioned_p (reg, ((GET_CODE (part) == SET
3179 && GET_CODE (SET_DEST (part)) == REG)
3180 ? SET_SRC (part) : part)))
3181 return 0;
3183 return reg;
3186 /* See if the only way in which INSN uses REG is by calling it, or by
3187 setting it while calling it. Set *SET to a SET rtx if the register
3188 is set by INSN. */
3190 static int
3191 noncall_uses_reg (rtx reg, rtx insn, rtx *set)
3193 rtx pattern, reg2;
3195 *set = NULL_RTX;
3197 reg2 = sfunc_uses_reg (insn);
3198 if (reg2 && REGNO (reg2) == REGNO (reg))
3200 pattern = single_set (insn);
3201 if (pattern
3202 && GET_CODE (SET_DEST (pattern)) == REG
3203 && REGNO (reg) == REGNO (SET_DEST (pattern)))
3204 *set = pattern;
3205 return 0;
3207 if (GET_CODE (insn) != CALL_INSN)
3209 /* We don't use rtx_equal_p because we don't care if the mode is
3210 different. */
3211 pattern = single_set (insn);
3212 if (pattern
3213 && GET_CODE (SET_DEST (pattern)) == REG
3214 && REGNO (reg) == REGNO (SET_DEST (pattern)))
3216 rtx par, part;
3217 int i;
3219 *set = pattern;
3220 par = PATTERN (insn);
3221 if (GET_CODE (par) == PARALLEL)
3222 for (i = XVECLEN (par, 0) - 1; i >= 0; i--)
3224 part = XVECEXP (par, 0, i);
3225 if (GET_CODE (part) != SET && reg_mentioned_p (reg, part))
3226 return 1;
3228 return reg_mentioned_p (reg, SET_SRC (pattern));
3231 return 1;
3234 pattern = PATTERN (insn);
3236 if (GET_CODE (pattern) == PARALLEL)
3238 int i;
3240 for (i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
3241 if (reg_mentioned_p (reg, XVECEXP (pattern, 0, i)))
3242 return 1;
3243 pattern = XVECEXP (pattern, 0, 0);
3246 if (GET_CODE (pattern) == SET)
3248 if (reg_mentioned_p (reg, SET_DEST (pattern)))
3250 /* We don't use rtx_equal_p, because we don't care if the
3251 mode is different. */
3252 if (GET_CODE (SET_DEST (pattern)) != REG
3253 || REGNO (reg) != REGNO (SET_DEST (pattern)))
3254 return 1;
3256 *set = pattern;
3259 pattern = SET_SRC (pattern);
3262 if (GET_CODE (pattern) != CALL
3263 || GET_CODE (XEXP (pattern, 0)) != MEM
3264 || ! rtx_equal_p (reg, XEXP (XEXP (pattern, 0), 0)))
3265 return 1;
3267 return 0;
3270 /* Given a X, a pattern of an insn or a part of it, return a mask of used
3271 general registers. Bits 0..15 mean that the respective registers
3272 are used as inputs in the instruction. Bits 16..31 mean that the
3273 registers 0..15, respectively, are used as outputs, or are clobbered.
3274 IS_DEST should be set to 16 if X is the destination of a SET, else to 0. */
3276 regs_used (rtx x, int is_dest)
3278 enum rtx_code code;
3279 const char *fmt;
3280 int i, used = 0;
3282 if (! x)
3283 return used;
3284 code = GET_CODE (x);
3285 switch (code)
3287 case REG:
3288 if (REGNO (x) < 16)
3289 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
3290 << (REGNO (x) + is_dest));
3291 return 0;
3292 case SUBREG:
3294 rtx y = SUBREG_REG (x);
3296 if (GET_CODE (y) != REG)
3297 break;
3298 if (REGNO (y) < 16)
3299 return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
3300 << (REGNO (y) +
3301 subreg_regno_offset (REGNO (y),
3302 GET_MODE (y),
3303 SUBREG_BYTE (x),
3304 GET_MODE (x)) + is_dest));
3305 return 0;
3307 case SET:
3308 return regs_used (SET_SRC (x), 0) | regs_used (SET_DEST (x), 16);
3309 case RETURN:
3310 /* If there was a return value, it must have been indicated with USE. */
3311 return 0x00ffff00;
3312 case CLOBBER:
3313 is_dest = 1;
3314 break;
3315 case MEM:
3316 is_dest = 0;
3317 break;
3318 case CALL:
3319 used |= 0x00ff00f0;
3320 break;
3321 default:
3322 break;
3325 fmt = GET_RTX_FORMAT (code);
3327 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3329 if (fmt[i] == 'E')
3331 register int j;
3332 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3333 used |= regs_used (XVECEXP (x, i, j), is_dest);
3335 else if (fmt[i] == 'e')
3336 used |= regs_used (XEXP (x, i), is_dest);
3338 return used;
3341 /* Create an instruction that prevents redirection of a conditional branch
3342 to the destination of the JUMP with address ADDR.
3343 If the branch needs to be implemented as an indirect jump, try to find
3344 a scratch register for it.
3345 If NEED_BLOCK is 0, don't do anything unless we need a scratch register.
3346 If any preceding insn that doesn't fit into a delay slot is good enough,
3347 pass 1. Pass 2 if a definite blocking insn is needed.
3348 -1 is used internally to avoid deep recursion.
3349 If a blocking instruction is made or recognized, return it. */
3351 static rtx
3352 gen_block_redirect (rtx jump, int addr, int need_block)
3354 int dead = 0;
3355 rtx prev = prev_nonnote_insn (jump);
3356 rtx dest;
3358 /* First, check if we already have an instruction that satisfies our need. */
3359 if (prev && GET_CODE (prev) == INSN && ! INSN_DELETED_P (prev))
3361 if (INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
3362 return prev;
3363 if (GET_CODE (PATTERN (prev)) == USE
3364 || GET_CODE (PATTERN (prev)) == CLOBBER
3365 || get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
3366 prev = jump;
3367 else if ((need_block &= ~1) < 0)
3368 return prev;
3369 else if (recog_memoized (prev) == CODE_FOR_block_branch_redirect)
3370 need_block = 0;
3372 if (GET_CODE (PATTERN (jump)) == RETURN)
3374 if (! need_block)
3375 return prev;
3376 /* Reorg even does nasty things with return insns that cause branches
3377 to go out of range - see find_end_label and callers. */
3378 return emit_insn_before (gen_block_branch_redirect (const0_rtx) , jump);
3380 /* We can't use JUMP_LABEL here because it might be undefined
3381 when not optimizing. */
3382 dest = XEXP (SET_SRC (PATTERN (jump)), 0);
3383 /* If the branch is out of range, try to find a scratch register for it. */
3384 if (optimize
3385 && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
3386 > 4092 + 4098))
3388 rtx scan;
3389 /* Don't look for the stack pointer as a scratch register,
3390 it would cause trouble if an interrupt occurred. */
3391 unsigned try = 0x7fff, used;
3392 int jump_left = flag_expensive_optimizations + 1;
3394 /* It is likely that the most recent eligible instruction is wanted for
3395 the delay slot. Therefore, find out which registers it uses, and
3396 try to avoid using them. */
3398 for (scan = jump; (scan = PREV_INSN (scan)); )
3400 enum rtx_code code;
3402 if (INSN_DELETED_P (scan))
3403 continue;
3404 code = GET_CODE (scan);
3405 if (code == CODE_LABEL || code == JUMP_INSN)
3406 break;
3407 if (code == INSN
3408 && GET_CODE (PATTERN (scan)) != USE
3409 && GET_CODE (PATTERN (scan)) != CLOBBER
3410 && get_attr_in_delay_slot (scan) == IN_DELAY_SLOT_YES)
3412 try &= ~regs_used (PATTERN (scan), 0);
3413 break;
3416 for (used = dead = 0, scan = JUMP_LABEL (jump);
3417 (scan = NEXT_INSN (scan)); )
3419 enum rtx_code code;
3421 if (INSN_DELETED_P (scan))
3422 continue;
3423 code = GET_CODE (scan);
3424 if (GET_RTX_CLASS (code) == 'i')
3426 used |= regs_used (PATTERN (scan), 0);
3427 if (code == CALL_INSN)
3428 used |= regs_used (CALL_INSN_FUNCTION_USAGE (scan), 0);
3429 dead |= (used >> 16) & ~used;
3430 if (dead & try)
3432 dead &= try;
3433 break;
3435 if (code == JUMP_INSN)
3437 if (jump_left-- && simplejump_p (scan))
3438 scan = JUMP_LABEL (scan);
3439 else
3440 break;
3444 /* Mask out the stack pointer again, in case it was
3445 the only 'free' register we have found. */
3446 dead &= 0x7fff;
3448 /* If the immediate destination is still in range, check for possible
3449 threading with a jump beyond the delay slot insn.
3450 Don't check if we are called recursively; the jump has been or will be
3451 checked in a different invocation then. */
3453 else if (optimize && need_block >= 0)
3455 rtx next = next_active_insn (next_active_insn (dest));
3456 if (next && GET_CODE (next) == JUMP_INSN
3457 && GET_CODE (PATTERN (next)) == SET
3458 && recog_memoized (next) == CODE_FOR_jump_compact)
3460 dest = JUMP_LABEL (next);
3461 if (dest
3462 && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
3463 > 4092 + 4098))
3464 gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), -1);
3468 if (dead)
3470 rtx reg = gen_rtx_REG (SImode, exact_log2 (dead & -dead));
3472 /* It would be nice if we could convert the jump into an indirect
3473 jump / far branch right now, and thus exposing all constituent
3474 instructions to further optimization. However, reorg uses
3475 simplejump_p to determine if there is an unconditional jump where
3476 it should try to schedule instructions from the target of the
3477 branch; simplejump_p fails for indirect jumps even if they have
3478 a JUMP_LABEL. */
3479 rtx insn = emit_insn_before (gen_indirect_jump_scratch
3480 (reg, GEN_INT (INSN_UID (JUMP_LABEL (jump))))
3481 , jump);
3482 /* ??? We would like this to have the scope of the jump, but that
3483 scope will change when a delay slot insn of an inner scope is added.
3484 Hence, after delay slot scheduling, we'll have to expect
3485 NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
3486 the jump. */
3488 INSN_LOCATOR (insn) = INSN_LOCATOR (jump);
3489 INSN_CODE (insn) = CODE_FOR_indirect_jump_scratch;
3490 return insn;
3492 else if (need_block)
3493 /* We can't use JUMP_LABEL here because it might be undefined
3494 when not optimizing. */
3495 return emit_insn_before (gen_block_branch_redirect
3496 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))))
3497 , jump);
3498 return prev;
3501 #define CONDJUMP_MIN -252
3502 #define CONDJUMP_MAX 262
3503 struct far_branch
3505 /* A label (to be placed) in front of the jump
3506 that jumps to our ultimate destination. */
3507 rtx near_label;
3508 /* Where we are going to insert it if we cannot move the jump any farther,
3509 or the jump itself if we have picked up an existing jump. */
3510 rtx insert_place;
3511 /* The ultimate destination. */
3512 rtx far_label;
3513 struct far_branch *prev;
3514 /* If the branch has already been created, its address;
3515 else the address of its first prospective user. */
3516 int address;
3519 static void gen_far_branch (struct far_branch *);
3520 enum mdep_reorg_phase_e mdep_reorg_phase;
3521 static void
3522 gen_far_branch (struct far_branch *bp)
3524 rtx insn = bp->insert_place;
3525 rtx jump;
3526 rtx label = gen_label_rtx ();
3528 emit_label_after (label, insn);
3529 if (bp->far_label)
3531 jump = emit_jump_insn_after (gen_jump (bp->far_label), insn);
3532 LABEL_NUSES (bp->far_label)++;
3534 else
3535 jump = emit_jump_insn_after (gen_return (), insn);
3536 /* Emit a barrier so that reorg knows that any following instructions
3537 are not reachable via a fall-through path.
3538 But don't do this when not optimizing, since we wouldn't suppress the
3539 alignment for the barrier then, and could end up with out-of-range
3540 pc-relative loads. */
3541 if (optimize)
3542 emit_barrier_after (jump);
3543 emit_label_after (bp->near_label, insn);
3544 JUMP_LABEL (jump) = bp->far_label;
3545 if (! invert_jump (insn, label, 1))
3546 abort ();
3547 /* If we are branching around a jump (rather than a return), prevent
3548 reorg from using an insn from the jump target as the delay slot insn -
3549 when reorg did this, it pessimized code (we rather hide the delay slot)
3550 and it could cause branches to go out of range. */
3551 if (bp->far_label)
3552 (emit_insn_after
3553 (gen_stuff_delay_slot
3554 (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))),
3555 GEN_INT (recog_memoized (insn) == CODE_FOR_branch_false)),
3556 insn));
3557 /* Prevent reorg from undoing our splits. */
3558 gen_block_redirect (jump, bp->address += 2, 2);
3561 /* Fix up ADDR_DIFF_VECs. */
3562 void
3563 fixup_addr_diff_vecs (rtx first)
3565 rtx insn;
3567 for (insn = first; insn; insn = NEXT_INSN (insn))
3569 rtx vec_lab, pat, prev, prevpat, x, braf_label;
3571 if (GET_CODE (insn) != JUMP_INSN
3572 || GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
3573 continue;
3574 pat = PATTERN (insn);
3575 vec_lab = XEXP (XEXP (pat, 0), 0);
3577 /* Search the matching casesi_jump_2. */
3578 for (prev = vec_lab; ; prev = PREV_INSN (prev))
3580 if (GET_CODE (prev) != JUMP_INSN)
3581 continue;
3582 prevpat = PATTERN (prev);
3583 if (GET_CODE (prevpat) != PARALLEL || XVECLEN (prevpat, 0) != 2)
3584 continue;
3585 x = XVECEXP (prevpat, 0, 1);
3586 if (GET_CODE (x) != USE)
3587 continue;
3588 x = XEXP (x, 0);
3589 if (GET_CODE (x) == LABEL_REF && XEXP (x, 0) == vec_lab)
3590 break;
3593 /* Emit the reference label of the braf where it belongs, right after
3594 the casesi_jump_2 (i.e. braf). */
3595 braf_label = XEXP (XEXP (SET_SRC (XVECEXP (prevpat, 0, 0)), 1), 0);
3596 emit_label_after (braf_label, prev);
3598 /* Fix up the ADDR_DIF_VEC to be relative
3599 to the reference address of the braf. */
3600 XEXP (XEXP (pat, 0), 0) = braf_label;
3604 /* BARRIER_OR_LABEL is either a BARRIER or a CODE_LABEL immediately following
3605 a barrier. Return the base 2 logarithm of the desired alignment. */
3607 barrier_align (rtx barrier_or_label)
3609 rtx next = next_real_insn (barrier_or_label), pat, prev;
3610 int slot, credit, jump_to_next = 0;
3612 if (! next)
3613 return 0;
3615 pat = PATTERN (next);
3617 if (GET_CODE (pat) == ADDR_DIFF_VEC)
3618 return 2;
3620 if (GET_CODE (pat) == UNSPEC_VOLATILE && XINT (pat, 1) == UNSPECV_ALIGN)
3621 /* This is a barrier in front of a constant table. */
3622 return 0;
3624 prev = prev_real_insn (barrier_or_label);
3625 if (GET_CODE (PATTERN (prev)) == ADDR_DIFF_VEC)
3627 pat = PATTERN (prev);
3628 /* If this is a very small table, we want to keep the alignment after
3629 the table to the minimum for proper code alignment. */
3630 return ((TARGET_SMALLCODE
3631 || ((unsigned) XVECLEN (pat, 1) * GET_MODE_SIZE (GET_MODE (pat))
3632 <= (unsigned)1 << (CACHE_LOG - 2)))
3633 ? 1 << TARGET_SHMEDIA : align_jumps_log);
3636 if (TARGET_SMALLCODE)
3637 return 0;
3639 if (! TARGET_SH2 || ! optimize)
3640 return align_jumps_log;
3642 /* When fixing up pcloads, a constant table might be inserted just before
3643 the basic block that ends with the barrier. Thus, we can't trust the
3644 instruction lengths before that. */
3645 if (mdep_reorg_phase > SH_FIXUP_PCLOAD)
3647 /* Check if there is an immediately preceding branch to the insn beyond
3648 the barrier. We must weight the cost of discarding useful information
3649 from the current cache line when executing this branch and there is
3650 an alignment, against that of fetching unneeded insn in front of the
3651 branch target when there is no alignment. */
3653 /* There are two delay_slot cases to consider. One is the simple case
3654 where the preceding branch is to the insn beyond the barrier (simple
3655 delay slot filling), and the other is where the preceding branch has
3656 a delay slot that is a duplicate of the insn after the barrier
3657 (fill_eager_delay_slots) and the branch is to the insn after the insn
3658 after the barrier. */
3660 /* PREV is presumed to be the JUMP_INSN for the barrier under
3661 investigation. Skip to the insn before it. */
3662 prev = prev_real_insn (prev);
3664 for (slot = 2, credit = (1 << (CACHE_LOG - 2)) + 2;
3665 credit >= 0 && prev && GET_CODE (prev) == INSN;
3666 prev = prev_real_insn (prev))
3668 jump_to_next = 0;
3669 if (GET_CODE (PATTERN (prev)) == USE
3670 || GET_CODE (PATTERN (prev)) == CLOBBER)
3671 continue;
3672 if (GET_CODE (PATTERN (prev)) == SEQUENCE)
3674 prev = XVECEXP (PATTERN (prev), 0, 1);
3675 if (INSN_UID (prev) == INSN_UID (next))
3677 /* Delay slot was filled with insn at jump target. */
3678 jump_to_next = 1;
3679 continue;
3683 if (slot &&
3684 get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
3685 slot = 0;
3686 credit -= get_attr_length (prev);
3688 if (prev
3689 && GET_CODE (prev) == JUMP_INSN
3690 && JUMP_LABEL (prev))
3692 rtx x;
3693 if (jump_to_next
3694 || next_real_insn (JUMP_LABEL (prev)) == next
3695 /* If relax_delay_slots() decides NEXT was redundant
3696 with some previous instruction, it will have
3697 redirected PREV's jump to the following insn. */
3698 || JUMP_LABEL (prev) == next_nonnote_insn (next)
3699 /* There is no upper bound on redundant instructions
3700 that might have been skipped, but we must not put an
3701 alignment where none had been before. */
3702 || (x = (NEXT_INSN (NEXT_INSN (PREV_INSN (prev)))),
3703 (INSN_P (x)
3704 && (INSN_CODE (x) == CODE_FOR_block_branch_redirect
3705 || INSN_CODE (x) == CODE_FOR_indirect_jump_scratch
3706 || INSN_CODE (x) == CODE_FOR_stuff_delay_slot))))
3708 rtx pat = PATTERN (prev);
3709 if (GET_CODE (pat) == PARALLEL)
3710 pat = XVECEXP (pat, 0, 0);
3711 if (credit - slot >= (GET_CODE (SET_SRC (pat)) == PC ? 2 : 0))
3712 return 0;
3717 return align_jumps_log;
3720 /* If we are inside a phony loop, almost any kind of label can turn up as the
3721 first one in the loop. Aligning a braf label causes incorrect switch
3722 destination addresses; we can detect braf labels because they are
3723 followed by a BARRIER.
3724 Applying loop alignment to small constant or switch tables is a waste
3725 of space, so we suppress this too. */
3727 sh_loop_align (rtx label)
3729 rtx next = label;
3732 next = next_nonnote_insn (next);
3733 while (next && GET_CODE (next) == CODE_LABEL);
3735 if (! next
3736 || ! INSN_P (next)
3737 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC
3738 || recog_memoized (next) == CODE_FOR_consttable_2)
3739 return 0;
3741 return align_loops_log;
3744 /* Do a final pass over the function, just before delayed branch
3745 scheduling. */
3747 static void
3748 sh_reorg (void)
3750 rtx first, insn, mova = NULL_RTX;
3751 int num_mova;
3752 rtx r0_rtx = gen_rtx_REG (Pmode, 0);
3753 rtx r0_inc_rtx = gen_rtx_POST_INC (Pmode, r0_rtx);
3755 first = get_insns ();
3757 /* We must split call insns before introducing `mova's. If we're
3758 optimizing, they'll have already been split. Otherwise, make
3759 sure we don't split them too late. */
3760 if (! optimize)
3761 split_all_insns_noflow ();
3763 if (TARGET_SHMEDIA)
3764 return;
3766 /* If relaxing, generate pseudo-ops to associate function calls with
3767 the symbols they call. It does no harm to not generate these
3768 pseudo-ops. However, when we can generate them, it enables to
3769 linker to potentially relax the jsr to a bsr, and eliminate the
3770 register load and, possibly, the constant pool entry. */
3772 mdep_reorg_phase = SH_INSERT_USES_LABELS;
3773 if (TARGET_RELAX)
3775 /* Remove all REG_LABEL notes. We want to use them for our own
3776 purposes. This works because none of the remaining passes
3777 need to look at them.
3779 ??? But it may break in the future. We should use a machine
3780 dependent REG_NOTE, or some other approach entirely. */
3781 for (insn = first; insn; insn = NEXT_INSN (insn))
3783 if (INSN_P (insn))
3785 rtx note;
3787 while ((note = find_reg_note (insn, REG_LABEL, NULL_RTX)) != 0)
3788 remove_note (insn, note);
3792 for (insn = first; insn; insn = NEXT_INSN (insn))
3794 rtx pattern, reg, link, set, scan, dies, label;
3795 int rescan = 0, foundinsn = 0;
3797 if (GET_CODE (insn) == CALL_INSN)
3799 pattern = PATTERN (insn);
3801 if (GET_CODE (pattern) == PARALLEL)
3802 pattern = XVECEXP (pattern, 0, 0);
3803 if (GET_CODE (pattern) == SET)
3804 pattern = SET_SRC (pattern);
3806 if (GET_CODE (pattern) != CALL
3807 || GET_CODE (XEXP (pattern, 0)) != MEM)
3808 continue;
3810 reg = XEXP (XEXP (pattern, 0), 0);
3812 else
3814 reg = sfunc_uses_reg (insn);
3815 if (! reg)
3816 continue;
3819 if (GET_CODE (reg) != REG)
3820 continue;
3822 /* This is a function call via REG. If the only uses of REG
3823 between the time that it is set and the time that it dies
3824 are in function calls, then we can associate all the
3825 function calls with the setting of REG. */
3827 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
3829 if (REG_NOTE_KIND (link) != 0)
3830 continue;
3831 set = single_set (XEXP (link, 0));
3832 if (set && rtx_equal_p (reg, SET_DEST (set)))
3834 link = XEXP (link, 0);
3835 break;
3839 if (! link)
3841 /* ??? Sometimes global register allocation will have
3842 deleted the insn pointed to by LOG_LINKS. Try
3843 scanning backward to find where the register is set. */
3844 for (scan = PREV_INSN (insn);
3845 scan && GET_CODE (scan) != CODE_LABEL;
3846 scan = PREV_INSN (scan))
3848 if (! INSN_P (scan))
3849 continue;
3851 if (! reg_mentioned_p (reg, scan))
3852 continue;
3854 if (noncall_uses_reg (reg, scan, &set))
3855 break;
3857 if (set)
3859 link = scan;
3860 break;
3865 if (! link)
3866 continue;
3868 /* The register is set at LINK. */
3870 /* We can only optimize the function call if the register is
3871 being set to a symbol. In theory, we could sometimes
3872 optimize calls to a constant location, but the assembler
3873 and linker do not support that at present. */
3874 if (GET_CODE (SET_SRC (set)) != SYMBOL_REF
3875 && GET_CODE (SET_SRC (set)) != LABEL_REF)
3876 continue;
3878 /* Scan forward from LINK to the place where REG dies, and
3879 make sure that the only insns which use REG are
3880 themselves function calls. */
3882 /* ??? This doesn't work for call targets that were allocated
3883 by reload, since there may not be a REG_DEAD note for the
3884 register. */
3886 dies = NULL_RTX;
3887 for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
3889 rtx scanset;
3891 /* Don't try to trace forward past a CODE_LABEL if we haven't
3892 seen INSN yet. Ordinarily, we will only find the setting insn
3893 in LOG_LINKS if it is in the same basic block. However,
3894 cross-jumping can insert code labels in between the load and
3895 the call, and can result in situations where a single call
3896 insn may have two targets depending on where we came from. */
3898 if (GET_CODE (scan) == CODE_LABEL && ! foundinsn)
3899 break;
3901 if (! INSN_P (scan))
3902 continue;
3904 /* Don't try to trace forward past a JUMP. To optimize
3905 safely, we would have to check that all the
3906 instructions at the jump destination did not use REG. */
3908 if (GET_CODE (scan) == JUMP_INSN)
3909 break;
3911 if (! reg_mentioned_p (reg, scan))
3912 continue;
3914 if (noncall_uses_reg (reg, scan, &scanset))
3915 break;
3917 if (scan == insn)
3918 foundinsn = 1;
3920 if (scan != insn
3921 && (GET_CODE (scan) == CALL_INSN || sfunc_uses_reg (scan)))
3923 /* There is a function call to this register other
3924 than the one we are checking. If we optimize
3925 this call, we need to rescan again below. */
3926 rescan = 1;
3929 /* ??? We shouldn't have to worry about SCANSET here.
3930 We should just be able to check for a REG_DEAD note
3931 on a function call. However, the REG_DEAD notes are
3932 apparently not dependable around libcalls; c-torture
3933 execute/920501-2 is a test case. If SCANSET is set,
3934 then this insn sets the register, so it must have
3935 died earlier. Unfortunately, this will only handle
3936 the cases in which the register is, in fact, set in a
3937 later insn. */
3939 /* ??? We shouldn't have to use FOUNDINSN here.
3940 However, the LOG_LINKS fields are apparently not
3941 entirely reliable around libcalls;
3942 newlib/libm/math/e_pow.c is a test case. Sometimes
3943 an insn will appear in LOG_LINKS even though it is
3944 not the most recent insn which sets the register. */
3946 if (foundinsn
3947 && (scanset
3948 || find_reg_note (scan, REG_DEAD, reg)))
3950 dies = scan;
3951 break;
3955 if (! dies)
3957 /* Either there was a branch, or some insn used REG
3958 other than as a function call address. */
3959 continue;
3962 /* Create a code label, and put it in a REG_LABEL note on
3963 the insn which sets the register, and on each call insn
3964 which uses the register. In final_prescan_insn we look
3965 for the REG_LABEL notes, and output the appropriate label
3966 or pseudo-op. */
3968 label = gen_label_rtx ();
3969 REG_NOTES (link) = gen_rtx_INSN_LIST (REG_LABEL, label,
3970 REG_NOTES (link));
3971 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, label,
3972 REG_NOTES (insn));
3973 if (rescan)
3975 scan = link;
3978 rtx reg2;
3980 scan = NEXT_INSN (scan);
3981 if (scan != insn
3982 && ((GET_CODE (scan) == CALL_INSN
3983 && reg_mentioned_p (reg, scan))
3984 || ((reg2 = sfunc_uses_reg (scan))
3985 && REGNO (reg2) == REGNO (reg))))
3986 REG_NOTES (scan)
3987 = gen_rtx_INSN_LIST (REG_LABEL, label, REG_NOTES (scan));
3989 while (scan != dies);
3994 if (TARGET_SH2)
3995 fixup_addr_diff_vecs (first);
3997 if (optimize)
3999 mdep_reorg_phase = SH_SHORTEN_BRANCHES0;
4000 shorten_branches (first);
4002 /* Scan the function looking for move instructions which have to be
4003 changed to pc-relative loads and insert the literal tables. */
4005 mdep_reorg_phase = SH_FIXUP_PCLOAD;
4006 for (insn = first, num_mova = 0; insn; insn = NEXT_INSN (insn))
4008 if (mova_p (insn))
4010 if (! num_mova++)
4011 mova = insn;
4013 else if (GET_CODE (insn) == JUMP_INSN
4014 && GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
4015 && num_mova)
4017 rtx scan;
4018 int total;
4020 num_mova--;
4022 /* Some code might have been inserted between the mova and
4023 its ADDR_DIFF_VEC. Check if the mova is still in range. */
4024 for (scan = mova, total = 0; scan != insn; scan = NEXT_INSN (scan))
4025 total += get_attr_length (scan);
4027 /* range of mova is 1020, add 4 because pc counts from address of
4028 second instruction after this one, subtract 2 in case pc is 2
4029 byte aligned. Possible alignment needed for the ADDR_DIFF_VEC
4030 cancels out with alignment effects of the mova itself. */
4031 if (total > 1022)
4033 /* Change the mova into a load, and restart scanning
4034 there. broken_move will then return true for mova. */
4035 SET_SRC (PATTERN (mova))
4036 = XVECEXP (SET_SRC (PATTERN (mova)), 0, 0);
4037 INSN_CODE (mova) = -1;
4038 insn = mova;
4041 if (broken_move (insn))
4043 rtx scan;
4044 /* Scan ahead looking for a barrier to stick the constant table
4045 behind. */
4046 rtx barrier = find_barrier (num_mova, mova, insn);
4047 rtx last_float_move = NULL_RTX, last_float = 0, *last_float_addr = NULL;
4049 if (num_mova && ! mova_p (mova))
4051 /* find_barrier had to change the first mova into a
4052 pcload; thus, we have to start with this new pcload. */
4053 insn = mova;
4054 num_mova = 0;
4056 /* Now find all the moves between the points and modify them. */
4057 for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
4059 if (GET_CODE (scan) == CODE_LABEL)
4060 last_float = 0;
4061 if (broken_move (scan))
4063 rtx *patp = &PATTERN (scan), pat = *patp;
4064 rtx src, dst;
4065 rtx lab;
4066 rtx newsrc;
4067 enum machine_mode mode;
4069 if (GET_CODE (pat) == PARALLEL)
4070 patp = &XVECEXP (pat, 0, 0), pat = *patp;
4071 src = SET_SRC (pat);
4072 dst = SET_DEST (pat);
4073 mode = GET_MODE (dst);
4075 if (mode == SImode && hi_const (src)
4076 && REGNO (dst) != FPUL_REG)
4078 int offset = 0;
4080 mode = HImode;
4081 while (GET_CODE (dst) == SUBREG)
4083 offset += subreg_regno_offset (REGNO (SUBREG_REG (dst)),
4084 GET_MODE (SUBREG_REG (dst)),
4085 SUBREG_BYTE (dst),
4086 GET_MODE (dst));
4087 dst = SUBREG_REG (dst);
4089 dst = gen_rtx_REG (HImode, REGNO (dst) + offset);
4092 if (GET_CODE (dst) == REG && FP_ANY_REGISTER_P (REGNO (dst)))
4094 /* This must be an insn that clobbers r0. */
4095 rtx *clobberp = &XVECEXP (PATTERN (scan), 0,
4096 XVECLEN (PATTERN (scan), 0)
4097 - 1);
4098 rtx clobber = *clobberp;
4100 if (GET_CODE (clobber) != CLOBBER
4101 || ! rtx_equal_p (XEXP (clobber, 0), r0_rtx))
4102 abort ();
4104 if (last_float
4105 && reg_set_between_p (r0_rtx, last_float_move, scan))
4106 last_float = 0;
4107 if (last_float
4108 && TARGET_SHCOMPACT
4109 && GET_MODE_SIZE (mode) != 4
4110 && GET_MODE_SIZE (GET_MODE (last_float)) == 4)
4111 last_float = 0;
4112 lab = add_constant (src, mode, last_float);
4113 if (lab)
4114 emit_insn_before (gen_mova (lab), scan);
4115 else
4117 /* There will be a REG_UNUSED note for r0 on
4118 LAST_FLOAT_MOVE; we have to change it to REG_INC,
4119 lest reorg:mark_target_live_regs will not
4120 consider r0 to be used, and we end up with delay
4121 slot insn in front of SCAN that clobbers r0. */
4122 rtx note
4123 = find_regno_note (last_float_move, REG_UNUSED, 0);
4125 /* If we are not optimizing, then there may not be
4126 a note. */
4127 if (note)
4128 PUT_MODE (note, REG_INC);
4130 *last_float_addr = r0_inc_rtx;
4132 last_float_move = scan;
4133 last_float = src;
4134 newsrc = gen_rtx_MEM (mode,
4135 (((TARGET_SH4 && ! TARGET_FMOVD)
4136 || REGNO (dst) == FPUL_REG)
4137 ? r0_inc_rtx
4138 : r0_rtx));
4139 last_float_addr = &XEXP (newsrc, 0);
4141 /* Remove the clobber of r0. */
4142 *clobberp = gen_rtx_CLOBBER (GET_MODE (clobber),
4143 gen_rtx_SCRATCH (Pmode));
4144 RTX_UNCHANGING_P (newsrc) = 1;
4146 /* This is a mova needing a label. Create it. */
4147 else if (GET_CODE (src) == UNSPEC
4148 && XINT (src, 1) == UNSPEC_MOVA
4149 && GET_CODE (XVECEXP (src, 0, 0)) == CONST)
4151 lab = add_constant (XVECEXP (src, 0, 0), mode, 0);
4152 newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
4153 newsrc = gen_rtx_UNSPEC (SImode,
4154 gen_rtvec (1, newsrc),
4155 UNSPEC_MOVA);
4157 else
4159 lab = add_constant (src, mode, 0);
4160 newsrc = gen_rtx_MEM (mode,
4161 gen_rtx_LABEL_REF (VOIDmode, lab));
4162 RTX_UNCHANGING_P (newsrc) = 1;
4164 *patp = gen_rtx_SET (VOIDmode, dst, newsrc);
4165 INSN_CODE (scan) = -1;
4168 dump_table (barrier);
4169 insn = barrier;
4173 mdep_reorg_phase = SH_SHORTEN_BRANCHES1;
4174 INSN_ADDRESSES_FREE ();
4175 split_branches (first);
4177 /* The INSN_REFERENCES_ARE_DELAYED in sh.h is problematic because it
4178 also has an effect on the register that holds the address of the sfunc.
4179 Insert an extra dummy insn in front of each sfunc that pretends to
4180 use this register. */
4181 if (flag_delayed_branch)
4183 for (insn = first; insn; insn = NEXT_INSN (insn))
4185 rtx reg = sfunc_uses_reg (insn);
4187 if (! reg)
4188 continue;
4189 emit_insn_before (gen_use_sfunc_addr (reg), insn);
4192 #if 0
4193 /* fpscr is not actually a user variable, but we pretend it is for the
4194 sake of the previous optimization passes, since we want it handled like
4195 one. However, we don't have any debugging information for it, so turn
4196 it into a non-user variable now. */
4197 if (TARGET_SH4)
4198 REG_USERVAR_P (get_fpscr_rtx ()) = 0;
4199 #endif
4200 mdep_reorg_phase = SH_AFTER_MDEP_REORG;
4204 get_dest_uid (rtx label, int max_uid)
4206 rtx dest = next_real_insn (label);
4207 int dest_uid;
4208 if (! dest)
4209 /* This can happen for an undefined label. */
4210 return 0;
4211 dest_uid = INSN_UID (dest);
4212 /* If this is a newly created branch redirection blocking instruction,
4213 we cannot index the branch_uid or insn_addresses arrays with its
4214 uid. But then, we won't need to, because the actual destination is
4215 the following branch. */
4216 while (dest_uid >= max_uid)
4218 dest = NEXT_INSN (dest);
4219 dest_uid = INSN_UID (dest);
4221 if (GET_CODE (dest) == JUMP_INSN && GET_CODE (PATTERN (dest)) == RETURN)
4222 return 0;
4223 return dest_uid;
4226 /* Split condbranches that are out of range. Also add clobbers for
4227 scratch registers that are needed in far jumps.
4228 We do this before delay slot scheduling, so that it can take our
4229 newly created instructions into account. It also allows us to
4230 find branches with common targets more easily. */
4232 static void
4233 split_branches (rtx first)
4235 rtx insn;
4236 struct far_branch **uid_branch, *far_branch_list = 0;
4237 int max_uid = get_max_uid ();
4239 /* Find out which branches are out of range. */
4240 shorten_branches (first);
4242 uid_branch = (struct far_branch **) alloca (max_uid * sizeof *uid_branch);
4243 memset ((char *) uid_branch, 0, max_uid * sizeof *uid_branch);
4245 for (insn = first; insn; insn = NEXT_INSN (insn))
4246 if (! INSN_P (insn))
4247 continue;
4248 else if (INSN_DELETED_P (insn))
4250 /* Shorten_branches would split this instruction again,
4251 so transform it into a note. */
4252 PUT_CODE (insn, NOTE);
4253 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4254 NOTE_SOURCE_FILE (insn) = 0;
4256 else if (GET_CODE (insn) == JUMP_INSN
4257 /* Don't mess with ADDR_DIFF_VEC */
4258 && (GET_CODE (PATTERN (insn)) == SET
4259 || GET_CODE (PATTERN (insn)) == RETURN))
4261 enum attr_type type = get_attr_type (insn);
4262 if (type == TYPE_CBRANCH)
4264 rtx next, beyond;
4266 if (get_attr_length (insn) > 4)
4268 rtx src = SET_SRC (PATTERN (insn));
4269 rtx olabel = XEXP (XEXP (src, 1), 0);
4270 int addr = INSN_ADDRESSES (INSN_UID (insn));
4271 rtx label = 0;
4272 int dest_uid = get_dest_uid (olabel, max_uid);
4273 struct far_branch *bp = uid_branch[dest_uid];
4275 /* redirect_jump needs a valid JUMP_LABEL, and it might delete
4276 the label if the LABEL_NUSES count drops to zero. There is
4277 always a jump_optimize pass that sets these values, but it
4278 proceeds to delete unreferenced code, and then if not
4279 optimizing, to un-delete the deleted instructions, thus
4280 leaving labels with too low uses counts. */
4281 if (! optimize)
4283 JUMP_LABEL (insn) = olabel;
4284 LABEL_NUSES (olabel)++;
4286 if (! bp)
4288 bp = (struct far_branch *) alloca (sizeof *bp);
4289 uid_branch[dest_uid] = bp;
4290 bp->prev = far_branch_list;
4291 far_branch_list = bp;
4292 bp->far_label
4293 = XEXP (XEXP (SET_SRC (PATTERN (insn)), 1), 0);
4294 LABEL_NUSES (bp->far_label)++;
4296 else
4298 label = bp->near_label;
4299 if (! label && bp->address - addr >= CONDJUMP_MIN)
4301 rtx block = bp->insert_place;
4303 if (GET_CODE (PATTERN (block)) == RETURN)
4304 block = PREV_INSN (block);
4305 else
4306 block = gen_block_redirect (block,
4307 bp->address, 2);
4308 label = emit_label_after (gen_label_rtx (),
4309 PREV_INSN (block));
4310 bp->near_label = label;
4312 else if (label && ! NEXT_INSN (label))
4314 if (addr + 2 - bp->address <= CONDJUMP_MAX)
4315 bp->insert_place = insn;
4316 else
4317 gen_far_branch (bp);
4320 if (! label
4321 || (NEXT_INSN (label) && bp->address - addr < CONDJUMP_MIN))
4323 bp->near_label = label = gen_label_rtx ();
4324 bp->insert_place = insn;
4325 bp->address = addr;
4327 if (! redirect_jump (insn, label, 1))
4328 abort ();
4330 else
4332 /* get_attr_length (insn) == 2 */
4333 /* Check if we have a pattern where reorg wants to redirect
4334 the branch to a label from an unconditional branch that
4335 is too far away. */
4336 /* We can't use JUMP_LABEL here because it might be undefined
4337 when not optimizing. */
4338 /* A syntax error might cause beyond to be NULL_RTX. */
4339 beyond
4340 = next_active_insn (XEXP (XEXP (SET_SRC (PATTERN (insn)), 1),
4341 0));
4343 if (beyond
4344 && (GET_CODE (beyond) == JUMP_INSN
4345 || ((beyond = next_active_insn (beyond))
4346 && GET_CODE (beyond) == JUMP_INSN))
4347 && GET_CODE (PATTERN (beyond)) == SET
4348 && recog_memoized (beyond) == CODE_FOR_jump_compact
4349 && ((INSN_ADDRESSES
4350 (INSN_UID (XEXP (SET_SRC (PATTERN (beyond)), 0)))
4351 - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
4352 > 252 + 258 + 2))
4353 gen_block_redirect (beyond,
4354 INSN_ADDRESSES (INSN_UID (beyond)), 1);
4357 next = next_active_insn (insn);
4359 if ((GET_CODE (next) == JUMP_INSN
4360 || GET_CODE (next = next_active_insn (next)) == JUMP_INSN)
4361 && GET_CODE (PATTERN (next)) == SET
4362 && recog_memoized (next) == CODE_FOR_jump_compact
4363 && ((INSN_ADDRESSES
4364 (INSN_UID (XEXP (SET_SRC (PATTERN (next)), 0)))
4365 - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
4366 > 252 + 258 + 2))
4367 gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), 1);
4369 else if (type == TYPE_JUMP || type == TYPE_RETURN)
4371 int addr = INSN_ADDRESSES (INSN_UID (insn));
4372 rtx far_label = 0;
4373 int dest_uid = 0;
4374 struct far_branch *bp;
4376 if (type == TYPE_JUMP)
4378 far_label = XEXP (SET_SRC (PATTERN (insn)), 0);
4379 dest_uid = get_dest_uid (far_label, max_uid);
4380 if (! dest_uid)
4382 /* Parse errors can lead to labels outside
4383 the insn stream. */
4384 if (! NEXT_INSN (far_label))
4385 continue;
4387 if (! optimize)
4389 JUMP_LABEL (insn) = far_label;
4390 LABEL_NUSES (far_label)++;
4392 redirect_jump (insn, NULL_RTX, 1);
4393 far_label = 0;
4396 bp = uid_branch[dest_uid];
4397 if (! bp)
4399 bp = (struct far_branch *) alloca (sizeof *bp);
4400 uid_branch[dest_uid] = bp;
4401 bp->prev = far_branch_list;
4402 far_branch_list = bp;
4403 bp->near_label = 0;
4404 bp->far_label = far_label;
4405 if (far_label)
4406 LABEL_NUSES (far_label)++;
4408 else if (bp->near_label && ! NEXT_INSN (bp->near_label))
4409 if (addr - bp->address <= CONDJUMP_MAX)
4410 emit_label_after (bp->near_label, PREV_INSN (insn));
4411 else
4413 gen_far_branch (bp);
4414 bp->near_label = 0;
4416 else
4417 bp->near_label = 0;
4418 bp->address = addr;
4419 bp->insert_place = insn;
4420 if (! far_label)
4421 emit_insn_before (gen_block_branch_redirect (const0_rtx), insn);
4422 else
4423 gen_block_redirect (insn, addr, bp->near_label ? 2 : 0);
4426 /* Generate all pending far branches,
4427 and free our references to the far labels. */
4428 while (far_branch_list)
4430 if (far_branch_list->near_label
4431 && ! NEXT_INSN (far_branch_list->near_label))
4432 gen_far_branch (far_branch_list);
4433 if (optimize
4434 && far_branch_list->far_label
4435 && ! --LABEL_NUSES (far_branch_list->far_label))
4436 delete_insn (far_branch_list->far_label);
4437 far_branch_list = far_branch_list->prev;
4440 /* Instruction length information is no longer valid due to the new
4441 instructions that have been generated. */
4442 init_insn_lengths ();
4445 /* Dump out instruction addresses, which is useful for debugging the
4446 constant pool table stuff.
4448 If relaxing, output the label and pseudo-ops used to link together
4449 calls and the instruction which set the registers. */
4451 /* ??? The addresses printed by this routine for insns are nonsense for
4452 insns which are inside of a sequence where none of the inner insns have
4453 variable length. This is because the second pass of shorten_branches
4454 does not bother to update them. */
4456 void
4457 final_prescan_insn (rtx insn, rtx *opvec ATTRIBUTE_UNUSED,
4458 int noperands ATTRIBUTE_UNUSED)
4460 if (TARGET_DUMPISIZE)
4461 fprintf (asm_out_file, "\n! at %04x\n", INSN_ADDRESSES (INSN_UID (insn)));
4463 if (TARGET_RELAX)
4465 rtx note;
4467 note = find_reg_note (insn, REG_LABEL, NULL_RTX);
4468 if (note)
4470 rtx pattern;
4472 pattern = PATTERN (insn);
4473 if (GET_CODE (pattern) == PARALLEL)
4474 pattern = XVECEXP (pattern, 0, 0);
4475 if (GET_CODE (pattern) == CALL
4476 || (GET_CODE (pattern) == SET
4477 && (GET_CODE (SET_SRC (pattern)) == CALL
4478 || get_attr_type (insn) == TYPE_SFUNC)))
4479 asm_fprintf (asm_out_file, "\t.uses %LL%d\n",
4480 CODE_LABEL_NUMBER (XEXP (note, 0)));
4481 else if (GET_CODE (pattern) == SET)
4482 (*targetm.asm_out.internal_label) (asm_out_file, "L",
4483 CODE_LABEL_NUMBER (XEXP (note, 0)));
4484 else
4485 abort ();
4490 /* Dump out any constants accumulated in the final pass. These will
4491 only be labels. */
4493 const char *
4494 output_jump_label_table (void)
4496 int i;
4498 if (pool_size)
4500 fprintf (asm_out_file, "\t.align 2\n");
4501 for (i = 0; i < pool_size; i++)
4503 pool_node *p = &pool_vector[i];
4505 (*targetm.asm_out.internal_label) (asm_out_file, "L",
4506 CODE_LABEL_NUMBER (p->label));
4507 output_asm_insn (".long %O0", &p->value);
4509 pool_size = 0;
4512 return "";
4515 /* A full frame looks like:
4517 arg-5
4518 arg-4
4519 [ if current_function_anonymous_args
4520 arg-3
4521 arg-2
4522 arg-1
4523 arg-0 ]
4524 saved-fp
4525 saved-r10
4526 saved-r11
4527 saved-r12
4528 saved-pr
4529 local-n
4531 local-1
4532 local-0 <- fp points here. */
4534 /* Number of bytes pushed for anonymous args, used to pass information
4535 between expand_prologue and expand_epilogue. */
4537 static int extra_push;
4539 /* Adjust the stack by SIZE bytes. REG holds the rtl of the register to be
4540 adjusted. If epilogue_p is zero, this is for a prologue; otherwise, it's
4541 for an epilogue. If LIVE_REGS_MASK is nonzero, it points to a HARD_REG_SET
4542 of all the registers that are about to be restored, and hence dead. */
4544 static void
4545 output_stack_adjust (int size, rtx reg, int epilogue_p,
4546 HARD_REG_SET *live_regs_mask)
4548 rtx (*emit_fn) (rtx) = epilogue_p ? &emit_insn : &frame_insn;
4549 if (size)
4551 HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
4553 if (size % align)
4554 abort ();
4556 if (CONST_OK_FOR_ADD (size))
4557 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size)));
4558 /* Try to do it with two partial adjustments; however, we must make
4559 sure that the stack is properly aligned at all times, in case
4560 an interrupt occurs between the two partial adjustments. */
4561 else if (CONST_OK_FOR_ADD (size / 2 & -align)
4562 && CONST_OK_FOR_ADD (size - (size / 2 & -align)))
4564 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size / 2 & -align)));
4565 emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size - (size / 2 & -align))));
4567 else
4569 rtx const_reg;
4570 rtx insn;
4571 int temp = epilogue_p ? 7 : (TARGET_SH5 ? 0 : 1);
4572 int i;
4574 /* If TEMP is invalid, we could temporarily save a general
4575 register to MACL. However, there is currently no need
4576 to handle this case, so just abort when we see it. */
4577 if (current_function_interrupt
4578 || ! call_used_regs[temp] || fixed_regs[temp])
4579 temp = -1;
4580 if (temp < 0 && ! current_function_interrupt)
4582 HARD_REG_SET temps;
4583 COPY_HARD_REG_SET (temps, call_used_reg_set);
4584 AND_COMPL_HARD_REG_SET (temps, call_fixed_reg_set);
4585 if (epilogue_p)
4587 for (i = 0; i < HARD_REGNO_NREGS (FIRST_RET_REG, DImode); i++)
4588 CLEAR_HARD_REG_BIT (temps, FIRST_RET_REG + i);
4589 if (current_function_calls_eh_return)
4591 CLEAR_HARD_REG_BIT (temps, EH_RETURN_STACKADJ_REGNO);
4592 for (i = 0; i <= 3; i++)
4593 CLEAR_HARD_REG_BIT (temps, EH_RETURN_DATA_REGNO (i));
4596 else
4598 for (i = FIRST_PARM_REG;
4599 i < FIRST_PARM_REG + NPARM_REGS (SImode); i++)
4600 CLEAR_HARD_REG_BIT (temps, i);
4601 if (current_function_needs_context)
4602 CLEAR_HARD_REG_BIT (temps, STATIC_CHAIN_REGNUM);
4604 temp = scavenge_reg (&temps);
4606 if (temp < 0 && live_regs_mask)
4607 temp = scavenge_reg (live_regs_mask);
4608 if (temp < 0)
4609 abort ();
4610 const_reg = gen_rtx_REG (GET_MODE (reg), temp);
4612 /* If SIZE is negative, subtract the positive value.
4613 This sometimes allows a constant pool entry to be shared
4614 between prologue and epilogue code. */
4615 if (size < 0)
4617 emit_insn (GEN_MOV (const_reg, GEN_INT (-size)));
4618 insn = emit_fn (GEN_SUB3 (reg, reg, const_reg));
4620 else
4622 emit_insn (GEN_MOV (const_reg, GEN_INT (size)));
4623 insn = emit_fn (GEN_ADD3 (reg, reg, const_reg));
4625 if (! epilogue_p)
4626 REG_NOTES (insn)
4627 = (gen_rtx_EXPR_LIST
4628 (REG_FRAME_RELATED_EXPR,
4629 gen_rtx_SET (VOIDmode, reg,
4630 gen_rtx_PLUS (SImode, reg, GEN_INT (size))),
4631 REG_NOTES (insn)));
4636 static rtx
4637 frame_insn (rtx x)
4639 x = emit_insn (x);
4640 RTX_FRAME_RELATED_P (x) = 1;
4641 return x;
4644 /* Output RTL to push register RN onto the stack. */
4646 static rtx
4647 push (int rn)
4649 rtx x;
4650 if (rn == FPUL_REG)
4651 x = gen_push_fpul ();
4652 else if (rn == FPSCR_REG)
4653 x = gen_push_fpscr ();
4654 else if (TARGET_SH4 && TARGET_FMOVD && ! TARGET_FPU_SINGLE
4655 && FP_OR_XD_REGISTER_P (rn))
4657 if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
4658 return NULL_RTX;
4659 x = gen_push_4 (gen_rtx_REG (DFmode, rn));
4661 else if (TARGET_SH2E && FP_REGISTER_P (rn))
4662 x = gen_push_e (gen_rtx_REG (SFmode, rn));
4663 else
4664 x = gen_push (gen_rtx_REG (SImode, rn));
4666 x = frame_insn (x);
4667 REG_NOTES (x)
4668 = gen_rtx_EXPR_LIST (REG_INC,
4669 gen_rtx_REG (SImode, STACK_POINTER_REGNUM), 0);
4670 return x;
4673 /* Output RTL to pop register RN from the stack. */
4675 static void
4676 pop (int rn)
4678 rtx x;
4679 if (rn == FPUL_REG)
4680 x = gen_pop_fpul ();
4681 else if (rn == FPSCR_REG)
4682 x = gen_pop_fpscr ();
4683 else if (TARGET_SH4 && TARGET_FMOVD && ! TARGET_FPU_SINGLE
4684 && FP_OR_XD_REGISTER_P (rn))
4686 if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
4687 return;
4688 x = gen_pop_4 (gen_rtx_REG (DFmode, rn));
4690 else if (TARGET_SH2E && FP_REGISTER_P (rn))
4691 x = gen_pop_e (gen_rtx_REG (SFmode, rn));
4692 else
4693 x = gen_pop (gen_rtx_REG (SImode, rn));
4695 x = emit_insn (x);
4696 REG_NOTES (x)
4697 = gen_rtx_EXPR_LIST (REG_INC,
4698 gen_rtx_REG (SImode, STACK_POINTER_REGNUM), 0);
4701 /* Generate code to push the regs specified in the mask. */
4703 static void
4704 push_regs (HARD_REG_SET *mask, int interrupt_handler)
4706 int i;
4707 int skip_fpscr = 0;
4709 /* Push PR last; this gives better latencies after the prologue, and
4710 candidates for the return delay slot when there are no general
4711 registers pushed. */
4712 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4714 /* If this is an interrupt handler, and the SZ bit varies,
4715 and we have to push any floating point register, we need
4716 to switch to the correct precision first. */
4717 if (i == FIRST_FP_REG && interrupt_handler && TARGET_FMOVD
4718 && hard_regs_intersect_p (mask, &reg_class_contents[DF_REGS]))
4720 HARD_REG_SET unsaved;
4722 push (FPSCR_REG);
4723 COMPL_HARD_REG_SET(unsaved, *mask);
4724 fpscr_set_from_mem (NORMAL_MODE (FP_MODE), unsaved);
4725 skip_fpscr = 1;
4727 if (i != PR_REG
4728 && (i != FPSCR_REG || ! skip_fpscr)
4729 && TEST_HARD_REG_BIT (*mask, i))
4730 push (i);
4732 if (TEST_HARD_REG_BIT (*mask, PR_REG))
4733 push (PR_REG);
4736 /* Calculate how much extra space is needed to save all callee-saved
4737 target registers.
4738 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
4740 static int
4741 shmedia_target_regs_stack_space (HARD_REG_SET *live_regs_mask)
4743 int reg;
4744 int stack_space = 0;
4745 int interrupt_handler = sh_cfun_interrupt_handler_p ();
4747 for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
4748 if ((! call_used_regs[reg] || interrupt_handler)
4749 && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
4750 /* Leave space to save this target register on the stack,
4751 in case target register allocation wants to use it. */
4752 stack_space += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
4753 return stack_space;
4756 /* Decide whether we should reserve space for callee-save target registers,
4757 in case target register allocation wants to use them. REGS_SAVED is
4758 the space, in bytes, that is already required for register saves.
4759 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
4761 static int
4762 shmedia_reserve_space_for_target_registers_p (int regs_saved,
4763 HARD_REG_SET *live_regs_mask)
4765 if (optimize_size)
4766 return 0;
4767 return shmedia_target_regs_stack_space (live_regs_mask) <= regs_saved;
4770 /* Decide how much space to reserve for callee-save target registers
4771 in case target register allocation wants to use them.
4772 LIVE_REGS_MASK is the register mask calculated by calc_live_regs. */
4774 static int
4775 shmedia_target_regs_stack_adjust (HARD_REG_SET *live_regs_mask)
4777 if (shmedia_space_reserved_for_target_registers)
4778 return shmedia_target_regs_stack_space (live_regs_mask);
4779 else
4780 return 0;
4783 /* Work out the registers which need to be saved, both as a mask and a
4784 count of saved words. Return the count.
4786 If doing a pragma interrupt function, then push all regs used by the
4787 function, and if we call another function (we can tell by looking at PR),
4788 make sure that all the regs it clobbers are safe too. */
4790 static int
4791 calc_live_regs (HARD_REG_SET *live_regs_mask)
4793 int reg;
4794 int count;
4795 int interrupt_handler;
4796 int pr_live, has_call;
4798 interrupt_handler = sh_cfun_interrupt_handler_p ();
4800 CLEAR_HARD_REG_SET (*live_regs_mask);
4801 if (TARGET_SH4 && TARGET_FMOVD && interrupt_handler
4802 && regs_ever_live[FPSCR_REG])
4803 target_flags &= ~FPU_SINGLE_BIT;
4804 /* If we can save a lot of saves by switching to double mode, do that. */
4805 else if (TARGET_SH4 && TARGET_FMOVD && TARGET_FPU_SINGLE)
4806 for (count = 0, reg = FIRST_FP_REG; reg <= LAST_FP_REG; reg += 2)
4807 if (regs_ever_live[reg] && regs_ever_live[reg+1]
4808 && (! call_used_regs[reg] || (interrupt_handler && ! pragma_trapa))
4809 && ++count > 2)
4811 target_flags &= ~FPU_SINGLE_BIT;
4812 break;
4814 /* PR_MEDIA_REG is a general purpose register, thus global_alloc already
4815 knows how to use it. That means the pseudo originally allocated for
4816 the initial value can become the PR_MEDIA_REG hard register, as seen for
4817 execute/20010122-1.c:test9. */
4818 if (TARGET_SHMEDIA)
4819 /* ??? this function is called from initial_elimination_offset, hence we
4820 can't use the result of sh_media_register_for_return here. */
4821 pr_live = sh_pr_n_sets ();
4822 else
4824 rtx pr_initial = has_hard_reg_initial_val (Pmode, PR_REG);
4825 pr_live = (pr_initial
4826 ? (GET_CODE (pr_initial) != REG
4827 || REGNO (pr_initial) != (PR_REG))
4828 : regs_ever_live[PR_REG]);
4829 /* For Shcompact, if not optimizing, we end up with a memory reference
4830 using the return address pointer for __builtin_return_address even
4831 though there is no actual need to put the PR register on the stack. */
4832 pr_live |= regs_ever_live[RETURN_ADDRESS_POINTER_REGNUM];
4834 /* Force PR to be live if the prologue has to call the SHmedia
4835 argument decoder or register saver. */
4836 if (TARGET_SHCOMPACT
4837 && ((current_function_args_info.call_cookie
4838 & ~ CALL_COOKIE_RET_TRAMP (1))
4839 || current_function_has_nonlocal_label))
4840 pr_live = 1;
4841 has_call = TARGET_SHMEDIA ? ! leaf_function_p () : pr_live;
4842 for (count = 0, reg = FIRST_PSEUDO_REGISTER - 1; reg >= 0; reg--)
4844 if (reg == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG)
4845 ? pr_live
4846 : (interrupt_handler && ! pragma_trapa)
4847 ? (/* Need to save all the regs ever live. */
4848 (regs_ever_live[reg]
4849 || (call_used_regs[reg]
4850 && (! fixed_regs[reg] || reg == MACH_REG || reg == MACL_REG)
4851 && has_call)
4852 || (has_call && REGISTER_NATURAL_MODE (reg) == SImode
4853 && (GENERAL_REGISTER_P (reg) || TARGET_REGISTER_P (reg))))
4854 && reg != STACK_POINTER_REGNUM && reg != ARG_POINTER_REGNUM
4855 && reg != RETURN_ADDRESS_POINTER_REGNUM
4856 && reg != T_REG && reg != GBR_REG
4857 /* Push fpscr only on targets which have FPU */
4858 && (reg != FPSCR_REG || TARGET_FPU_ANY))
4859 : (/* Only push those regs which are used and need to be saved. */
4860 (TARGET_SHCOMPACT
4861 && flag_pic
4862 && current_function_args_info.call_cookie
4863 && reg == (int) PIC_OFFSET_TABLE_REGNUM)
4864 || (regs_ever_live[reg] && ! call_used_regs[reg])
4865 || (current_function_calls_eh_return
4866 && (reg == (int) EH_RETURN_DATA_REGNO (0)
4867 || reg == (int) EH_RETURN_DATA_REGNO (1)
4868 || reg == (int) EH_RETURN_DATA_REGNO (2)
4869 || reg == (int) EH_RETURN_DATA_REGNO (3)))
4870 || ((reg == MACL_REG || reg == MACH_REG)
4871 && regs_ever_live[reg]
4872 && sh_cfun_attr_renesas_p ())
4875 SET_HARD_REG_BIT (*live_regs_mask, reg);
4876 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
4878 if ((TARGET_SH4 || TARGET_SH5) && TARGET_FMOVD
4879 && GET_MODE_CLASS (REGISTER_NATURAL_MODE (reg)) == MODE_FLOAT)
4881 if (FP_REGISTER_P (reg))
4883 if (! TARGET_FPU_SINGLE && ! regs_ever_live[reg ^ 1])
4885 SET_HARD_REG_BIT (*live_regs_mask, (reg ^ 1));
4886 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg ^ 1));
4889 else if (XD_REGISTER_P (reg))
4891 /* Must switch to double mode to access these registers. */
4892 target_flags &= ~FPU_SINGLE_BIT;
4897 /* If we have a target register optimization pass after prologue / epilogue
4898 threading, we need to assume all target registers will be live even if
4899 they aren't now. */
4900 if (flag_branch_target_load_optimize2
4901 && TARGET_SAVE_ALL_TARGET_REGS
4902 && shmedia_space_reserved_for_target_registers)
4903 for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
4904 if ((! call_used_regs[reg] || interrupt_handler)
4905 && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
4907 SET_HARD_REG_BIT (*live_regs_mask, reg);
4908 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
4910 /* If this is an interrupt handler, we don't have any call-clobbered
4911 registers we can conveniently use for target register save/restore.
4912 Make sure we save at least one general purpose register when we need
4913 to save target registers. */
4914 if (interrupt_handler
4915 && hard_regs_intersect_p (live_regs_mask,
4916 &reg_class_contents[TARGET_REGS])
4917 && ! hard_regs_intersect_p (live_regs_mask,
4918 &reg_class_contents[GENERAL_REGS]))
4920 SET_HARD_REG_BIT (*live_regs_mask, R0_REG);
4921 count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (R0_REG));
4924 return count;
4927 /* Code to generate prologue and epilogue sequences */
4929 /* PUSHED is the number of bytes that are being pushed on the
4930 stack for register saves. Return the frame size, padded
4931 appropriately so that the stack stays properly aligned. */
4932 static HOST_WIDE_INT
4933 rounded_frame_size (int pushed)
4935 HOST_WIDE_INT size = get_frame_size ();
4936 HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
4938 return ((size + pushed + align - 1) & -align) - pushed;
4941 /* Choose a call-clobbered target-branch register that remains
4942 unchanged along the whole function. We set it up as the return
4943 value in the prologue. */
4945 sh_media_register_for_return (void)
4947 int regno;
4948 int tr0_used;
4950 if (! current_function_is_leaf)
4951 return -1;
4952 if (lookup_attribute ("interrupt_handler",
4953 DECL_ATTRIBUTES (current_function_decl)))
4954 return -1;
4956 tr0_used = flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM];
4958 for (regno = FIRST_TARGET_REG + tr0_used; regno <= LAST_TARGET_REG; regno++)
4959 if (call_used_regs[regno] && ! regs_ever_live[regno])
4960 return regno;
4962 return -1;
4965 /* The maximum registers we need to save are:
4966 - 62 general purpose registers (r15 is stack pointer, r63 is zero)
4967 - 32 floating point registers (for each pair, we save none,
4968 one single precision value, or a double precision value).
4969 - 8 target registers
4970 - add 1 entry for a delimiter. */
4971 #define MAX_SAVED_REGS (62+32+8)
4973 typedef struct save_entry_s
4975 unsigned char reg;
4976 unsigned char mode;
4977 short offset;
4978 } save_entry;
4980 #define MAX_TEMPS 4
4982 /* There will be a delimiter entry with VOIDmode both at the start and the
4983 end of a filled in schedule. The end delimiter has the offset of the
4984 save with the smallest (i.e. most negative) offset. */
4985 typedef struct save_schedule_s
4987 save_entry entries[MAX_SAVED_REGS + 2];
4988 int temps[MAX_TEMPS+1];
4989 } save_schedule;
4991 /* Fill in SCHEDULE according to LIVE_REGS_MASK. If RESTORE is nonzero,
4992 use reverse order. Returns the last entry written to (not counting
4993 the delimiter). OFFSET_BASE is a number to be added to all offset
4994 entries. */
4996 static save_entry *
4997 sh5_schedule_saves (HARD_REG_SET *live_regs_mask, save_schedule *schedule,
4998 int offset_base)
5000 int align, i;
5001 save_entry *entry = schedule->entries;
5002 int tmpx = 0;
5003 int offset;
5005 if (! current_function_interrupt)
5006 for (i = FIRST_GENERAL_REG; tmpx < MAX_TEMPS && i <= LAST_GENERAL_REG; i++)
5007 if (call_used_regs[i] && ! fixed_regs[i] && i != PR_MEDIA_REG
5008 && ! FUNCTION_ARG_REGNO_P (i)
5009 && i != FIRST_RET_REG
5010 && ! (current_function_needs_context && i == STATIC_CHAIN_REGNUM)
5011 && ! (current_function_calls_eh_return
5012 && (i == EH_RETURN_STACKADJ_REGNO
5013 || ((unsigned)i <= EH_RETURN_DATA_REGNO (0)
5014 && (unsigned)i >= EH_RETURN_DATA_REGNO (3)))))
5015 schedule->temps[tmpx++] = i;
5016 entry->reg = -1;
5017 entry->mode = VOIDmode;
5018 entry->offset = offset_base;
5019 entry++;
5020 /* We loop twice: first, we save 8-byte aligned registers in the
5021 higher addresses, that are known to be aligned. Then, we
5022 proceed to saving 32-bit registers that don't need 8-byte
5023 alignment.
5024 If this is an interrupt function, all registers that need saving
5025 need to be saved in full. moreover, we need to postpone saving
5026 target registers till we have saved some general purpose registers
5027 we can then use as scratch registers. */
5028 offset = offset_base;
5029 for (align = 1; align >= 0; align--)
5031 for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
5032 if (TEST_HARD_REG_BIT (*live_regs_mask, i))
5034 enum machine_mode mode = REGISTER_NATURAL_MODE (i);
5035 int reg = i;
5037 if (current_function_interrupt)
5039 if (TARGET_REGISTER_P (i))
5040 continue;
5041 if (GENERAL_REGISTER_P (i))
5042 mode = DImode;
5044 if (mode == SFmode && (i % 2) == 1
5045 && ! TARGET_FPU_SINGLE && FP_REGISTER_P (i)
5046 && (TEST_HARD_REG_BIT (*live_regs_mask, (i ^ 1))))
5048 mode = DFmode;
5049 i--;
5050 reg--;
5053 /* If we're doing the aligned pass and this is not aligned,
5054 or we're doing the unaligned pass and this is aligned,
5055 skip it. */
5056 if ((GET_MODE_SIZE (mode) % (STACK_BOUNDARY / BITS_PER_UNIT) == 0)
5057 != align)
5058 continue;
5060 if (current_function_interrupt
5061 && GENERAL_REGISTER_P (i)
5062 && tmpx < MAX_TEMPS)
5063 schedule->temps[tmpx++] = i;
5065 offset -= GET_MODE_SIZE (mode);
5066 entry->reg = i;
5067 entry->mode = mode;
5068 entry->offset = offset;
5069 entry++;
5071 if (align && current_function_interrupt)
5072 for (i = LAST_TARGET_REG; i >= FIRST_TARGET_REG; i--)
5073 if (TEST_HARD_REG_BIT (*live_regs_mask, i))
5075 offset -= GET_MODE_SIZE (DImode);
5076 entry->reg = i;
5077 entry->mode = DImode;
5078 entry->offset = offset;
5079 entry++;
5082 entry->reg = -1;
5083 entry->mode = VOIDmode;
5084 entry->offset = offset;
5085 schedule->temps[tmpx] = -1;
5086 return entry - 1;
5089 void
5090 sh_expand_prologue (void)
5092 HARD_REG_SET live_regs_mask;
5093 int d, i;
5094 int d_rounding = 0;
5095 int save_flags = target_flags;
5097 current_function_interrupt = sh_cfun_interrupt_handler_p ();
5099 /* We have pretend args if we had an object sent partially in registers
5100 and partially on the stack, e.g. a large structure. */
5101 output_stack_adjust (-current_function_pretend_args_size
5102 - current_function_args_info.stack_regs * 8,
5103 stack_pointer_rtx, 0, NULL);
5105 extra_push = 0;
5107 if (TARGET_SHCOMPACT && flag_pic && current_function_args_info.call_cookie)
5108 /* We're going to use the PIC register to load the address of the
5109 incoming-argument decoder and/or of the return trampoline from
5110 the GOT, so make sure the PIC register is preserved and
5111 initialized. */
5112 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5114 if (TARGET_SHCOMPACT
5115 && (current_function_args_info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
5117 int reg;
5119 /* First, make all registers with incoming arguments that will
5120 be pushed onto the stack live, so that register renaming
5121 doesn't overwrite them. */
5122 for (reg = 0; reg < NPARM_REGS (SImode); reg++)
5123 if (CALL_COOKIE_STACKSEQ_GET (current_function_args_info.call_cookie)
5124 >= NPARM_REGS (SImode) - reg)
5125 for (; reg < NPARM_REGS (SImode); reg++)
5126 emit_insn (gen_shcompact_preserve_incoming_args
5127 (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
5128 else if (CALL_COOKIE_INT_REG_GET
5129 (current_function_args_info.call_cookie, reg) == 1)
5130 emit_insn (gen_shcompact_preserve_incoming_args
5131 (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
5133 emit_move_insn (gen_rtx_REG (Pmode, MACL_REG),
5134 stack_pointer_rtx);
5135 emit_move_insn (gen_rtx_REG (SImode, R0_REG),
5136 GEN_INT (current_function_args_info.call_cookie));
5137 emit_move_insn (gen_rtx_REG (SImode, MACH_REG),
5138 gen_rtx_REG (SImode, R0_REG));
5140 else if (TARGET_SHMEDIA)
5142 int tr = sh_media_register_for_return ();
5144 if (tr >= 0)
5146 rtx insn = emit_move_insn (gen_rtx_REG (DImode, tr),
5147 gen_rtx_REG (DImode, PR_MEDIA_REG));
5149 /* ??? We should suppress saving pr when we don't need it, but this
5150 is tricky because of builtin_return_address. */
5152 /* If this function only exits with sibcalls, this copy
5153 will be flagged as dead. */
5154 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD,
5155 const0_rtx,
5156 REG_NOTES (insn));
5160 /* Emit the code for SETUP_VARARGS. */
5161 if (current_function_stdarg)
5163 /* This is not used by the SH2E calling convention */
5164 if (TARGET_SH1 && ! TARGET_SH2E && ! TARGET_SH5
5165 && ! (TARGET_HITACHI || sh_cfun_attr_renesas_p ()))
5167 /* Push arg regs as if they'd been provided by caller in stack. */
5168 for (i = 0; i < NPARM_REGS(SImode); i++)
5170 int rn = NPARM_REGS(SImode) + FIRST_PARM_REG - i - 1;
5171 rtx insn;
5173 if (i >= (NPARM_REGS(SImode)
5174 - current_function_args_info.arg_count[(int) SH_ARG_INT]
5176 break;
5177 insn = push (rn);
5178 RTX_FRAME_RELATED_P (insn) = 0;
5179 extra_push += 4;
5184 /* If we're supposed to switch stacks at function entry, do so now. */
5185 if (sp_switch)
5186 emit_insn (gen_sp_switch_1 ());
5188 d = calc_live_regs (&live_regs_mask);
5189 /* ??? Maybe we could save some switching if we can move a mode switch
5190 that already happens to be at the function start into the prologue. */
5191 if (target_flags != save_flags && ! current_function_interrupt)
5192 emit_insn (gen_toggle_sz ());
5194 if (TARGET_SH5)
5196 int offset_base, offset;
5197 rtx r0 = NULL_RTX;
5198 int offset_in_r0 = -1;
5199 int sp_in_r0 = 0;
5200 int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
5201 int total_size, save_size;
5202 save_schedule schedule;
5203 save_entry *entry;
5204 int *tmp_pnt;
5206 if (call_used_regs[R0_REG] && ! fixed_regs[R0_REG]
5207 && ! current_function_interrupt)
5208 r0 = gen_rtx_REG (Pmode, R0_REG);
5210 /* D is the actual number of bytes that we need for saving registers,
5211 however, in initial_elimination_offset we have committed to using
5212 an additional TREGS_SPACE amount of bytes - in order to keep both
5213 addresses to arguments supplied by the caller and local variables
5214 valid, we must keep this gap. Place it between the incoming
5215 arguments and the actually saved registers in a bid to optimize
5216 locality of reference. */
5217 total_size = d + tregs_space;
5218 total_size += rounded_frame_size (total_size);
5219 save_size = total_size - rounded_frame_size (d);
5220 if (save_size % (STACK_BOUNDARY / BITS_PER_UNIT))
5221 d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
5222 - save_size % (STACK_BOUNDARY / BITS_PER_UNIT));
5224 /* If adjusting the stack in a single step costs nothing extra, do so.
5225 I.e. either if a single addi is enough, or we need a movi anyway,
5226 and we don't exceed the maximum offset range (the test for the
5227 latter is conservative for simplicity). */
5228 if (TARGET_SHMEDIA
5229 && (CONST_OK_FOR_I10 (-total_size)
5230 || (! CONST_OK_FOR_I10 (-(save_size + d_rounding))
5231 && total_size <= 2044)))
5232 d_rounding = total_size - save_size;
5234 offset_base = d + d_rounding;
5236 output_stack_adjust (-(save_size + d_rounding), stack_pointer_rtx,
5237 0, NULL);
5239 sh5_schedule_saves (&live_regs_mask, &schedule, offset_base);
5240 tmp_pnt = schedule.temps;
5241 for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
5243 enum machine_mode mode = entry->mode;
5244 int reg = entry->reg;
5245 rtx reg_rtx, mem_rtx, pre_dec = NULL_RTX;
5247 offset = entry->offset;
5249 reg_rtx = gen_rtx_REG (mode, reg);
5251 mem_rtx = gen_rtx_MEM (mode,
5252 gen_rtx_PLUS (Pmode,
5253 stack_pointer_rtx,
5254 GEN_INT (offset)));
5256 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_pre_dec);
5258 if (! r0)
5259 abort ();
5260 mem_rtx = NULL_RTX;
5262 try_pre_dec:
5264 if (HAVE_PRE_DECREMENT
5265 && (offset_in_r0 - offset == GET_MODE_SIZE (mode)
5266 || mem_rtx == NULL_RTX
5267 || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
5269 pre_dec = gen_rtx_MEM (mode,
5270 gen_rtx_PRE_DEC (Pmode, r0));
5272 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (pre_dec, 0),
5273 pre_dec_ok);
5275 pre_dec = NULL_RTX;
5277 break;
5279 pre_dec_ok:
5280 mem_rtx = NULL_RTX;
5281 offset += GET_MODE_SIZE (mode);
5283 while (0);
5285 if (mem_rtx != NULL_RTX)
5286 goto addr_ok;
5288 if (offset_in_r0 == -1)
5290 emit_move_insn (r0, GEN_INT (offset));
5291 offset_in_r0 = offset;
5293 else if (offset != offset_in_r0)
5295 emit_move_insn (r0,
5296 gen_rtx_PLUS
5297 (Pmode, r0,
5298 GEN_INT (offset - offset_in_r0)));
5299 offset_in_r0 += offset - offset_in_r0;
5302 if (pre_dec != NULL_RTX)
5304 if (! sp_in_r0)
5306 emit_move_insn (r0,
5307 gen_rtx_PLUS
5308 (Pmode, r0, stack_pointer_rtx));
5309 sp_in_r0 = 1;
5312 offset -= GET_MODE_SIZE (mode);
5313 offset_in_r0 -= GET_MODE_SIZE (mode);
5315 mem_rtx = pre_dec;
5317 else if (sp_in_r0)
5318 mem_rtx = gen_rtx_MEM (mode, r0);
5319 else
5320 mem_rtx = gen_rtx_MEM (mode,
5321 gen_rtx_PLUS (Pmode,
5322 stack_pointer_rtx,
5323 r0));
5325 /* We must not use an r0-based address for target-branch
5326 registers or for special registers without pre-dec
5327 memory addresses, since we store their values in r0
5328 first. */
5329 if (TARGET_REGISTER_P (reg)
5330 || ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
5331 && mem_rtx != pre_dec))
5332 abort ();
5334 addr_ok:
5335 if (TARGET_REGISTER_P (reg)
5336 || ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
5337 && mem_rtx != pre_dec))
5339 rtx tmp_reg = gen_rtx_REG (GET_MODE (reg_rtx), *tmp_pnt);
5341 emit_move_insn (tmp_reg, reg_rtx);
5343 if (REGNO (tmp_reg) == R0_REG)
5345 offset_in_r0 = -1;
5346 sp_in_r0 = 0;
5347 if (refers_to_regno_p (R0_REG, R0_REG+1, mem_rtx, (rtx *) 0))
5348 abort ();
5351 if (*++tmp_pnt <= 0)
5352 tmp_pnt = schedule.temps;
5354 reg_rtx = tmp_reg;
5357 rtx insn;
5359 /* Mark as interesting for dwarf cfi generator */
5360 insn = emit_move_insn (mem_rtx, reg_rtx);
5361 RTX_FRAME_RELATED_P (insn) = 1;
5363 if (TARGET_SHCOMPACT && (offset_in_r0 != -1))
5365 rtx reg_rtx = gen_rtx_REG (mode, reg);
5366 rtx set, note_rtx;
5367 rtx mem_rtx = gen_rtx_MEM (mode,
5368 gen_rtx_PLUS (Pmode,
5369 stack_pointer_rtx,
5370 GEN_INT (offset)));
5372 set = gen_rtx_SET (VOIDmode, mem_rtx, reg_rtx);
5373 note_rtx = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, set,
5374 REG_NOTES (insn));
5375 REG_NOTES (insn) = note_rtx;
5380 if (entry->offset != d_rounding)
5381 abort ();
5383 else
5384 push_regs (&live_regs_mask, current_function_interrupt);
5386 if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
5388 rtx insn = get_last_insn ();
5389 rtx last = emit_insn (gen_GOTaddr2picreg ());
5391 /* Mark these insns as possibly dead. Sometimes, flow2 may
5392 delete all uses of the PIC register. In this case, let it
5393 delete the initialization too. */
5396 insn = NEXT_INSN (insn);
5398 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD,
5399 const0_rtx,
5400 REG_NOTES (insn));
5402 while (insn != last);
5405 if (SHMEDIA_REGS_STACK_ADJUST ())
5407 emit_move_insn (gen_rtx_REG (Pmode, R0_REG),
5408 function_symbol (TARGET_FPU_ANY
5409 ? "__GCC_push_shmedia_regs"
5410 : "__GCC_push_shmedia_regs_nofpu"));
5411 /* This must NOT go through the PLT, otherwise mach and macl
5412 may be clobbered. */
5413 emit_insn (gen_shmedia_save_restore_regs_compact
5414 (GEN_INT (-SHMEDIA_REGS_STACK_ADJUST ())));
5417 if (target_flags != save_flags && ! current_function_interrupt)
5419 rtx insn = emit_insn (gen_toggle_sz ());
5421 /* If we're lucky, a mode switch in the function body will
5422 overwrite fpscr, turning this insn dead. Tell flow this
5423 insn is ok to delete. */
5424 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD,
5425 const0_rtx,
5426 REG_NOTES (insn));
5429 target_flags = save_flags;
5431 output_stack_adjust (-rounded_frame_size (d) + d_rounding,
5432 stack_pointer_rtx, 0, NULL);
5434 if (frame_pointer_needed)
5435 frame_insn (GEN_MOV (frame_pointer_rtx, stack_pointer_rtx));
5437 if (TARGET_SHCOMPACT
5438 && (current_function_args_info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
5440 /* This must NOT go through the PLT, otherwise mach and macl
5441 may be clobbered. */
5442 emit_move_insn (gen_rtx_REG (Pmode, R0_REG),
5443 function_symbol ("__GCC_shcompact_incoming_args"));
5444 emit_insn (gen_shcompact_incoming_args ());
5448 void
5449 sh_expand_epilogue (void)
5451 HARD_REG_SET live_regs_mask;
5452 int d, i;
5453 int d_rounding = 0;
5455 int save_flags = target_flags;
5456 int frame_size, save_size;
5457 int fpscr_deferred = 0;
5459 d = calc_live_regs (&live_regs_mask);
5461 save_size = d;
5462 frame_size = rounded_frame_size (d);
5464 if (TARGET_SH5)
5466 int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
5467 int total_size;
5468 if (d % (STACK_BOUNDARY / BITS_PER_UNIT))
5469 d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
5470 - d % (STACK_BOUNDARY / BITS_PER_UNIT));
5472 total_size = d + tregs_space;
5473 total_size += rounded_frame_size (total_size);
5474 save_size = total_size - frame_size;
5476 /* If adjusting the stack in a single step costs nothing extra, do so.
5477 I.e. either if a single addi is enough, or we need a movi anyway,
5478 and we don't exceed the maximum offset range (the test for the
5479 latter is conservative for simplicity). */
5480 if (TARGET_SHMEDIA
5481 && ! frame_pointer_needed
5482 && (CONST_OK_FOR_I10 (total_size)
5483 || (! CONST_OK_FOR_I10 (save_size + d_rounding)
5484 && total_size <= 2044)))
5485 d_rounding = frame_size;
5487 frame_size -= d_rounding;
5490 if (frame_pointer_needed)
5492 output_stack_adjust (frame_size, frame_pointer_rtx, 1, &live_regs_mask);
5494 /* We must avoid moving the stack pointer adjustment past code
5495 which reads from the local frame, else an interrupt could
5496 occur after the SP adjustment and clobber data in the local
5497 frame. */
5498 emit_insn (gen_blockage ());
5499 emit_insn (GEN_MOV (stack_pointer_rtx, frame_pointer_rtx));
5501 else if (frame_size)
5503 /* We must avoid moving the stack pointer adjustment past code
5504 which reads from the local frame, else an interrupt could
5505 occur after the SP adjustment and clobber data in the local
5506 frame. */
5507 emit_insn (gen_blockage ());
5508 output_stack_adjust (frame_size, stack_pointer_rtx, 1, &live_regs_mask);
5511 if (SHMEDIA_REGS_STACK_ADJUST ())
5513 emit_move_insn (gen_rtx_REG (Pmode, R0_REG),
5514 function_symbol (TARGET_FPU_ANY
5515 ? "__GCC_pop_shmedia_regs"
5516 : "__GCC_pop_shmedia_regs_nofpu"));
5517 /* This must NOT go through the PLT, otherwise mach and macl
5518 may be clobbered. */
5519 emit_insn (gen_shmedia_save_restore_regs_compact
5520 (GEN_INT (SHMEDIA_REGS_STACK_ADJUST ())));
5523 /* Pop all the registers. */
5525 if (target_flags != save_flags && ! current_function_interrupt)
5526 emit_insn (gen_toggle_sz ());
5527 if (TARGET_SH5)
5529 int offset_base, offset;
5530 int offset_in_r0 = -1;
5531 int sp_in_r0 = 0;
5532 rtx r0 = gen_rtx_REG (Pmode, R0_REG);
5533 save_schedule schedule;
5534 save_entry *entry;
5535 int *tmp_pnt;
5537 entry = sh5_schedule_saves (&live_regs_mask, &schedule, d_rounding);
5538 offset_base = -entry[1].offset + d_rounding;
5539 tmp_pnt = schedule.temps;
5540 for (; entry->mode != VOIDmode; entry--)
5542 enum machine_mode mode = entry->mode;
5543 int reg = entry->reg;
5544 rtx reg_rtx, mem_rtx, post_inc = NULL_RTX, insn;
5546 offset = offset_base + entry->offset;
5547 reg_rtx = gen_rtx_REG (mode, reg);
5549 mem_rtx = gen_rtx_MEM (mode,
5550 gen_rtx_PLUS (Pmode,
5551 stack_pointer_rtx,
5552 GEN_INT (offset)));
5554 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_post_inc);
5556 mem_rtx = NULL_RTX;
5558 try_post_inc:
5560 if (HAVE_POST_INCREMENT
5561 && (offset == offset_in_r0
5562 || (offset + GET_MODE_SIZE (mode) != d + d_rounding
5563 && mem_rtx == NULL_RTX)
5564 || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
5566 post_inc = gen_rtx_MEM (mode,
5567 gen_rtx_POST_INC (Pmode, r0));
5569 GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (post_inc, 0),
5570 post_inc_ok);
5572 post_inc = NULL_RTX;
5574 break;
5576 post_inc_ok:
5577 mem_rtx = NULL_RTX;
5579 while (0);
5581 if (mem_rtx != NULL_RTX)
5582 goto addr_ok;
5584 if (offset_in_r0 == -1)
5586 emit_move_insn (r0, GEN_INT (offset));
5587 offset_in_r0 = offset;
5589 else if (offset != offset_in_r0)
5591 emit_move_insn (r0,
5592 gen_rtx_PLUS
5593 (Pmode, r0,
5594 GEN_INT (offset - offset_in_r0)));
5595 offset_in_r0 += offset - offset_in_r0;
5598 if (post_inc != NULL_RTX)
5600 if (! sp_in_r0)
5602 emit_move_insn (r0,
5603 gen_rtx_PLUS
5604 (Pmode, r0, stack_pointer_rtx));
5605 sp_in_r0 = 1;
5608 mem_rtx = post_inc;
5610 offset_in_r0 += GET_MODE_SIZE (mode);
5612 else if (sp_in_r0)
5613 mem_rtx = gen_rtx_MEM (mode, r0);
5614 else
5615 mem_rtx = gen_rtx_MEM (mode,
5616 gen_rtx_PLUS (Pmode,
5617 stack_pointer_rtx,
5618 r0));
5620 if ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
5621 && mem_rtx != post_inc)
5622 abort ();
5624 addr_ok:
5625 if ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
5626 && mem_rtx != post_inc)
5628 insn = emit_move_insn (r0, mem_rtx);
5629 mem_rtx = r0;
5631 else if (TARGET_REGISTER_P (reg))
5633 rtx tmp_reg = gen_rtx_REG (mode, *tmp_pnt);
5635 /* Give the scheduler a bit of freedom by using up to
5636 MAX_TEMPS registers in a round-robin fashion. */
5637 insn = emit_move_insn (tmp_reg, mem_rtx);
5638 mem_rtx = tmp_reg;
5639 if (*++tmp_pnt < 0)
5640 tmp_pnt = schedule.temps;
5643 insn = emit_move_insn (reg_rtx, mem_rtx);
5644 if (reg == PR_MEDIA_REG && sh_media_register_for_return () >= 0)
5645 /* This is dead, unless we return with a sibcall. */
5646 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD,
5647 const0_rtx,
5648 REG_NOTES (insn));
5651 if (entry->offset + offset_base != d + d_rounding)
5652 abort ();
5654 else /* ! TARGET_SH5 */
5656 save_size = 0;
5657 if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG))
5658 pop (PR_REG);
5659 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5661 int j = (FIRST_PSEUDO_REGISTER - 1) - i;
5663 if (j == FPSCR_REG && current_function_interrupt && TARGET_FMOVD
5664 && hard_regs_intersect_p (&live_regs_mask,
5665 &reg_class_contents[DF_REGS]))
5666 fpscr_deferred = 1;
5667 else if (j != PR_REG && TEST_HARD_REG_BIT (live_regs_mask, j))
5668 pop (j);
5669 if (j == FIRST_FP_REG && fpscr_deferred)
5670 pop (FPSCR_REG);
5674 if (target_flags != save_flags && ! current_function_interrupt)
5675 emit_insn (gen_toggle_sz ());
5676 target_flags = save_flags;
5678 output_stack_adjust (extra_push + current_function_pretend_args_size
5679 + save_size + d_rounding
5680 + current_function_args_info.stack_regs * 8,
5681 stack_pointer_rtx, 1, NULL);
5683 if (current_function_calls_eh_return)
5684 emit_insn (GEN_ADD3 (stack_pointer_rtx, stack_pointer_rtx,
5685 EH_RETURN_STACKADJ_RTX));
5687 /* Switch back to the normal stack if necessary. */
5688 if (sp_switch)
5689 emit_insn (gen_sp_switch_2 ());
5691 /* Tell flow the insn that pops PR isn't dead. */
5692 /* PR_REG will never be live in SHmedia mode, and we don't need to
5693 USE PR_MEDIA_REG, since it will be explicitly copied to TR0_REG
5694 by the return pattern. */
5695 if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG))
5696 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, PR_REG)));
5699 static int sh_need_epilogue_known = 0;
5702 sh_need_epilogue (void)
5704 if (! sh_need_epilogue_known)
5706 rtx epilogue;
5708 start_sequence ();
5709 sh_expand_epilogue ();
5710 epilogue = get_insns ();
5711 end_sequence ();
5712 sh_need_epilogue_known = (epilogue == NULL ? -1 : 1);
5714 return sh_need_epilogue_known > 0;
5717 /* Emit code to change the current function's return address to RA.
5718 TEMP is available as a scratch register, if needed. */
5720 void
5721 sh_set_return_address (rtx ra, rtx tmp)
5723 HARD_REG_SET live_regs_mask;
5724 int d;
5725 int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
5726 int pr_offset;
5728 d = calc_live_regs (&live_regs_mask);
5730 /* If pr_reg isn't life, we can set it (or the register given in
5731 sh_media_register_for_return) directly. */
5732 if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
5734 rtx rr;
5736 if (TARGET_SHMEDIA)
5738 int rr_regno = sh_media_register_for_return ();
5740 if (rr_regno < 0)
5741 rr_regno = pr_reg;
5743 rr = gen_rtx_REG (DImode, rr_regno);
5745 else
5746 rr = gen_rtx_REG (SImode, pr_reg);
5748 emit_insn (GEN_MOV (rr, ra));
5749 /* Tell flow the register for return isn't dead. */
5750 emit_insn (gen_rtx_USE (VOIDmode, rr));
5751 return;
5754 if (TARGET_SH5)
5756 int offset;
5757 save_schedule schedule;
5758 save_entry *entry;
5760 entry = sh5_schedule_saves (&live_regs_mask, &schedule, 0);
5761 offset = entry[1].offset;
5762 for (; entry->mode != VOIDmode; entry--)
5763 if (entry->reg == pr_reg)
5764 goto found;
5766 /* We can't find pr register. */
5767 abort ();
5769 found:
5770 offset = entry->offset - offset;
5771 pr_offset = (rounded_frame_size (d) + offset
5772 + SHMEDIA_REGS_STACK_ADJUST ());
5774 else
5775 pr_offset = rounded_frame_size (d);
5777 emit_insn (GEN_MOV (tmp, GEN_INT (pr_offset)));
5778 emit_insn (GEN_ADD3 (tmp, tmp, frame_pointer_rtx));
5780 tmp = gen_rtx_MEM (Pmode, tmp);
5781 emit_insn (GEN_MOV (tmp, ra));
5784 /* Clear variables at function end. */
5786 static void
5787 sh_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
5788 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5790 trap_exit = pragma_interrupt = pragma_trapa = pragma_nosave_low_regs = 0;
5791 sh_need_epilogue_known = 0;
5792 sp_switch = NULL_RTX;
5795 static rtx
5796 sh_builtin_saveregs (void)
5798 /* First unnamed integer register. */
5799 int first_intreg = current_function_args_info.arg_count[(int) SH_ARG_INT];
5800 /* Number of integer registers we need to save. */
5801 int n_intregs = MAX (0, NPARM_REGS (SImode) - first_intreg);
5802 /* First unnamed SFmode float reg */
5803 int first_floatreg = current_function_args_info.arg_count[(int) SH_ARG_FLOAT];
5804 /* Number of SFmode float regs to save. */
5805 int n_floatregs = MAX (0, NPARM_REGS (SFmode) - first_floatreg);
5806 rtx regbuf, fpregs;
5807 int bufsize, regno;
5808 HOST_WIDE_INT alias_set;
5810 if (TARGET_SH5)
5812 if (n_intregs)
5814 int pushregs = n_intregs;
5816 while (pushregs < NPARM_REGS (SImode) - 1
5817 && (CALL_COOKIE_INT_REG_GET
5818 (current_function_args_info.call_cookie,
5819 NPARM_REGS (SImode) - pushregs)
5820 == 1))
5822 current_function_args_info.call_cookie
5823 &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
5824 - pushregs, 1);
5825 pushregs++;
5828 if (pushregs == NPARM_REGS (SImode))
5829 current_function_args_info.call_cookie
5830 |= (CALL_COOKIE_INT_REG (0, 1)
5831 | CALL_COOKIE_STACKSEQ (pushregs - 1));
5832 else
5833 current_function_args_info.call_cookie
5834 |= CALL_COOKIE_STACKSEQ (pushregs);
5836 current_function_pretend_args_size += 8 * n_intregs;
5838 if (TARGET_SHCOMPACT)
5839 return const0_rtx;
5842 if (! TARGET_SH2E && ! TARGET_SH4 && ! TARGET_SH5)
5844 error ("__builtin_saveregs not supported by this subtarget");
5845 return const0_rtx;
5848 if (TARGET_SHMEDIA)
5849 n_floatregs = 0;
5851 /* Allocate block of memory for the regs. */
5852 /* ??? If n_intregs + n_floatregs == 0, should we allocate at least 1 byte?
5853 Or can assign_stack_local accept a 0 SIZE argument? */
5854 bufsize = (n_intregs * UNITS_PER_WORD) + (n_floatregs * UNITS_PER_WORD);
5856 if (TARGET_SHMEDIA)
5857 regbuf = gen_rtx_MEM (BLKmode,
5858 gen_rtx_REG (Pmode, ARG_POINTER_REGNUM));
5859 else if (n_floatregs & 1)
5861 rtx addr;
5863 regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
5864 addr = copy_to_mode_reg (Pmode, XEXP (regbuf, 0));
5865 emit_insn (gen_iorsi3 (addr, addr, GEN_INT (UNITS_PER_WORD)));
5866 regbuf = change_address (regbuf, BLKmode, addr);
5868 else
5869 regbuf = assign_stack_local (BLKmode, bufsize, 0);
5870 alias_set = get_varargs_alias_set ();
5871 set_mem_alias_set (regbuf, alias_set);
5873 /* Save int args.
5874 This is optimized to only save the regs that are necessary. Explicitly
5875 named args need not be saved. */
5876 if (n_intregs > 0)
5877 move_block_from_reg (BASE_ARG_REG (SImode) + first_intreg,
5878 adjust_address (regbuf, BLKmode,
5879 n_floatregs * UNITS_PER_WORD),
5880 n_intregs);
5882 if (TARGET_SHMEDIA)
5883 /* Return the address of the regbuf. */
5884 return XEXP (regbuf, 0);
5886 /* Save float args.
5887 This is optimized to only save the regs that are necessary. Explicitly
5888 named args need not be saved.
5889 We explicitly build a pointer to the buffer because it halves the insn
5890 count when not optimizing (otherwise the pointer is built for each reg
5891 saved).
5892 We emit the moves in reverse order so that we can use predecrement. */
5894 fpregs = gen_reg_rtx (Pmode);
5895 emit_move_insn (fpregs, XEXP (regbuf, 0));
5896 emit_insn (gen_addsi3 (fpregs, fpregs,
5897 GEN_INT (n_floatregs * UNITS_PER_WORD)));
5898 if (TARGET_SH4)
5900 rtx mem;
5901 for (regno = NPARM_REGS (DFmode) - 2; regno >= first_floatreg; regno -= 2)
5903 emit_insn (gen_addsi3 (fpregs, fpregs,
5904 GEN_INT (-2 * UNITS_PER_WORD)));
5905 mem = gen_rtx_MEM (DFmode, fpregs);
5906 set_mem_alias_set (mem, alias_set);
5907 emit_move_insn (mem,
5908 gen_rtx_REG (DFmode, BASE_ARG_REG (DFmode) + regno));
5910 regno = first_floatreg;
5911 if (regno & 1)
5913 emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (- UNITS_PER_WORD)));
5914 mem = gen_rtx_MEM (SFmode, fpregs);
5915 set_mem_alias_set (mem, alias_set);
5916 emit_move_insn (mem,
5917 gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno
5918 - (TARGET_LITTLE_ENDIAN != 0)));
5921 else
5922 for (regno = NPARM_REGS (SFmode) - 1; regno >= first_floatreg; regno--)
5924 rtx mem;
5926 emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (- UNITS_PER_WORD)));
5927 mem = gen_rtx_MEM (SFmode, fpregs);
5928 set_mem_alias_set (mem, alias_set);
5929 emit_move_insn (mem,
5930 gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno));
5933 /* Return the address of the regbuf. */
5934 return XEXP (regbuf, 0);
5937 /* Define the `__builtin_va_list' type for the ABI. */
5939 static tree
5940 sh_build_builtin_va_list (void)
5942 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
5943 tree record;
5945 if (TARGET_SH5 || (! TARGET_SH2E && ! TARGET_SH4)
5946 || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
5947 return ptr_type_node;
5949 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5951 f_next_o = build_decl (FIELD_DECL, get_identifier ("__va_next_o"),
5952 ptr_type_node);
5953 f_next_o_limit = build_decl (FIELD_DECL,
5954 get_identifier ("__va_next_o_limit"),
5955 ptr_type_node);
5956 f_next_fp = build_decl (FIELD_DECL, get_identifier ("__va_next_fp"),
5957 ptr_type_node);
5958 f_next_fp_limit = build_decl (FIELD_DECL,
5959 get_identifier ("__va_next_fp_limit"),
5960 ptr_type_node);
5961 f_next_stack = build_decl (FIELD_DECL, get_identifier ("__va_next_stack"),
5962 ptr_type_node);
5964 DECL_FIELD_CONTEXT (f_next_o) = record;
5965 DECL_FIELD_CONTEXT (f_next_o_limit) = record;
5966 DECL_FIELD_CONTEXT (f_next_fp) = record;
5967 DECL_FIELD_CONTEXT (f_next_fp_limit) = record;
5968 DECL_FIELD_CONTEXT (f_next_stack) = record;
5970 TYPE_FIELDS (record) = f_next_o;
5971 TREE_CHAIN (f_next_o) = f_next_o_limit;
5972 TREE_CHAIN (f_next_o_limit) = f_next_fp;
5973 TREE_CHAIN (f_next_fp) = f_next_fp_limit;
5974 TREE_CHAIN (f_next_fp_limit) = f_next_stack;
5976 layout_type (record);
5978 return record;
5981 /* Implement `va_start' for varargs and stdarg. */
5983 void
5984 sh_va_start (tree valist, rtx nextarg)
5986 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
5987 tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
5988 tree t, u;
5989 int nfp, nint;
5991 if (TARGET_SH5)
5993 expand_builtin_saveregs ();
5994 std_expand_builtin_va_start (valist, nextarg);
5995 return;
5998 if ((! TARGET_SH2E && ! TARGET_SH4)
5999 || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
6001 std_expand_builtin_va_start (valist, nextarg);
6002 return;
6005 f_next_o = TYPE_FIELDS (va_list_type_node);
6006 f_next_o_limit = TREE_CHAIN (f_next_o);
6007 f_next_fp = TREE_CHAIN (f_next_o_limit);
6008 f_next_fp_limit = TREE_CHAIN (f_next_fp);
6009 f_next_stack = TREE_CHAIN (f_next_fp_limit);
6011 next_o = build (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o);
6012 next_o_limit = build (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
6013 valist, f_next_o_limit);
6014 next_fp = build (COMPONENT_REF, TREE_TYPE (f_next_fp), valist, f_next_fp);
6015 next_fp_limit = build (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
6016 valist, f_next_fp_limit);
6017 next_stack = build (COMPONENT_REF, TREE_TYPE (f_next_stack),
6018 valist, f_next_stack);
6020 /* Call __builtin_saveregs. */
6021 u = make_tree (ptr_type_node, expand_builtin_saveregs ());
6022 t = build (MODIFY_EXPR, ptr_type_node, next_fp, u);
6023 TREE_SIDE_EFFECTS (t) = 1;
6024 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6026 nfp = current_function_args_info.arg_count[SH_ARG_FLOAT];
6027 if (nfp < 8)
6028 nfp = 8 - nfp;
6029 else
6030 nfp = 0;
6031 u = fold (build (PLUS_EXPR, ptr_type_node, u,
6032 build_int_2 (UNITS_PER_WORD * nfp, 0)));
6033 t = build (MODIFY_EXPR, ptr_type_node, next_fp_limit, u);
6034 TREE_SIDE_EFFECTS (t) = 1;
6035 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6037 t = build (MODIFY_EXPR, ptr_type_node, next_o, u);
6038 TREE_SIDE_EFFECTS (t) = 1;
6039 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6041 nint = current_function_args_info.arg_count[SH_ARG_INT];
6042 if (nint < 4)
6043 nint = 4 - nint;
6044 else
6045 nint = 0;
6046 u = fold (build (PLUS_EXPR, ptr_type_node, u,
6047 build_int_2 (UNITS_PER_WORD * nint, 0)));
6048 t = build (MODIFY_EXPR, ptr_type_node, next_o_limit, u);
6049 TREE_SIDE_EFFECTS (t) = 1;
6050 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6052 u = make_tree (ptr_type_node, nextarg);
6053 t = build (MODIFY_EXPR, ptr_type_node, next_stack, u);
6054 TREE_SIDE_EFFECTS (t) = 1;
6055 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6058 /* Implement `va_arg'. */
6061 sh_va_arg (tree valist, tree type)
6063 HOST_WIDE_INT size, rsize;
6064 tree tmp, pptr_type_node;
6065 rtx addr_rtx, r;
6066 rtx result_ptr, result = NULL_RTX;
6067 int pass_by_ref = MUST_PASS_IN_STACK (TYPE_MODE (type), type);
6068 rtx lab_over;
6070 size = int_size_in_bytes (type);
6071 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6072 pptr_type_node = build_pointer_type (ptr_type_node);
6074 if (pass_by_ref)
6075 type = build_pointer_type (type);
6077 if (! TARGET_SH5 && (TARGET_SH2E || TARGET_SH4)
6078 && ! (TARGET_HITACHI || sh_cfun_attr_renesas_p ()))
6080 tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
6081 tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
6082 int pass_as_float;
6083 rtx lab_false;
6085 f_next_o = TYPE_FIELDS (va_list_type_node);
6086 f_next_o_limit = TREE_CHAIN (f_next_o);
6087 f_next_fp = TREE_CHAIN (f_next_o_limit);
6088 f_next_fp_limit = TREE_CHAIN (f_next_fp);
6089 f_next_stack = TREE_CHAIN (f_next_fp_limit);
6091 next_o = build (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o);
6092 next_o_limit = build (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
6093 valist, f_next_o_limit);
6094 next_fp = build (COMPONENT_REF, TREE_TYPE (f_next_fp),
6095 valist, f_next_fp);
6096 next_fp_limit = build (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
6097 valist, f_next_fp_limit);
6098 next_stack = build (COMPONENT_REF, TREE_TYPE (f_next_stack),
6099 valist, f_next_stack);
6101 /* Structures with a single member with a distinct mode are passed
6102 like their member. This is relevant if the latter has a REAL_TYPE
6103 or COMPLEX_TYPE type. */
6104 if (TREE_CODE (type) == RECORD_TYPE
6105 && TYPE_FIELDS (type)
6106 && TREE_CODE (TYPE_FIELDS (type)) == FIELD_DECL
6107 && (TREE_CODE (TREE_TYPE (TYPE_FIELDS (type))) == REAL_TYPE
6108 || TREE_CODE (TREE_TYPE (TYPE_FIELDS (type))) == COMPLEX_TYPE)
6109 && TREE_CHAIN (TYPE_FIELDS (type)) == NULL_TREE)
6110 type = TREE_TYPE (TYPE_FIELDS (type));
6111 if (TARGET_SH4)
6113 pass_as_float = ((TREE_CODE (type) == REAL_TYPE && size <= 8)
6114 || (TREE_CODE (type) == COMPLEX_TYPE
6115 && TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
6116 && size <= 16));
6118 else
6120 pass_as_float = (TREE_CODE (type) == REAL_TYPE && size == 4);
6123 addr_rtx = gen_reg_rtx (Pmode);
6124 lab_false = gen_label_rtx ();
6125 lab_over = gen_label_rtx ();
6127 tmp = make_tree (pptr_type_node, addr_rtx);
6128 valist = build1 (INDIRECT_REF, ptr_type_node, tmp);
6130 if (pass_as_float)
6132 int first_floatreg
6133 = current_function_args_info.arg_count[(int) SH_ARG_FLOAT];
6134 int n_floatregs = MAX (0, NPARM_REGS (SFmode) - first_floatreg);
6136 emit_cmp_and_jump_insns (expand_expr (next_fp, NULL_RTX, Pmode,
6137 EXPAND_NORMAL),
6138 expand_expr (next_fp_limit, NULL_RTX,
6139 Pmode, EXPAND_NORMAL),
6140 GE, const1_rtx, Pmode, 1, lab_false);
6142 if (TYPE_ALIGN (type) > BITS_PER_WORD
6143 || (((TREE_CODE (type) == REAL_TYPE && size == 8) || size == 16)
6144 && (n_floatregs & 1)))
6146 tmp = build (BIT_AND_EXPR, ptr_type_node, next_fp,
6147 build_int_2 (UNITS_PER_WORD, 0));
6148 tmp = build (PLUS_EXPR, ptr_type_node, next_fp, tmp);
6149 tmp = build (MODIFY_EXPR, ptr_type_node, next_fp, tmp);
6150 TREE_SIDE_EFFECTS (tmp) = 1;
6151 expand_expr (tmp, const0_rtx, VOIDmode, EXPAND_NORMAL);
6154 tmp = build1 (ADDR_EXPR, pptr_type_node, next_fp);
6155 r = expand_expr (tmp, addr_rtx, Pmode, EXPAND_NORMAL);
6156 if (r != addr_rtx)
6157 emit_move_insn (addr_rtx, r);
6159 #ifdef FUNCTION_ARG_SCmode_WART
6160 if (TYPE_MODE (type) == SCmode && TARGET_SH4 && TARGET_LITTLE_ENDIAN)
6162 rtx addr, real, imag, result_value, slot;
6163 tree subtype = TREE_TYPE (type);
6165 addr = std_expand_builtin_va_arg (valist, subtype);
6166 #ifdef POINTERS_EXTEND_UNSIGNED
6167 if (GET_MODE (addr) != Pmode)
6168 addr = convert_memory_address (Pmode, addr);
6169 #endif
6170 imag = gen_rtx_MEM (TYPE_MODE (type), addr);
6171 set_mem_alias_set (imag, get_varargs_alias_set ());
6173 addr = std_expand_builtin_va_arg (valist, subtype);
6174 #ifdef POINTERS_EXTEND_UNSIGNED
6175 if (GET_MODE (addr) != Pmode)
6176 addr = convert_memory_address (Pmode, addr);
6177 #endif
6178 real = gen_rtx_MEM (TYPE_MODE (type), addr);
6179 set_mem_alias_set (real, get_varargs_alias_set ());
6181 result_value = gen_rtx_CONCAT (SCmode, real, imag);
6182 /* ??? this interface is stupid - why require a pointer? */
6183 result = gen_reg_rtx (Pmode);
6184 slot = assign_stack_temp (SCmode, 8, 0);
6185 emit_move_insn (slot, result_value);
6186 emit_move_insn (result, XEXP (slot, 0));
6188 #endif /* FUNCTION_ARG_SCmode_WART */
6190 emit_jump_insn (gen_jump (lab_over));
6191 emit_barrier ();
6192 emit_label (lab_false);
6194 tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
6195 r = expand_expr (tmp, addr_rtx, Pmode, EXPAND_NORMAL);
6196 if (r != addr_rtx)
6197 emit_move_insn (addr_rtx, r);
6199 else
6201 tmp = build (PLUS_EXPR, ptr_type_node, next_o,
6202 build_int_2 (rsize, 0));
6204 emit_cmp_and_jump_insns (expand_expr (tmp, NULL_RTX, Pmode,
6205 EXPAND_NORMAL),
6206 expand_expr (next_o_limit, NULL_RTX,
6207 Pmode, EXPAND_NORMAL),
6208 GT, const1_rtx, Pmode, 1, lab_false);
6210 tmp = build1 (ADDR_EXPR, pptr_type_node, next_o);
6211 r = expand_expr (tmp, addr_rtx, Pmode, EXPAND_NORMAL);
6212 if (r != addr_rtx)
6213 emit_move_insn (addr_rtx, r);
6215 emit_jump_insn (gen_jump (lab_over));
6216 emit_barrier ();
6217 emit_label (lab_false);
6219 if (size > 4 && ! TARGET_SH4)
6221 tmp = build (MODIFY_EXPR, ptr_type_node, next_o, next_o_limit);
6222 TREE_SIDE_EFFECTS (tmp) = 1;
6223 expand_expr (tmp, const0_rtx, VOIDmode, EXPAND_NORMAL);
6226 tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
6227 r = expand_expr (tmp, addr_rtx, Pmode, EXPAND_NORMAL);
6228 if (r != addr_rtx)
6229 emit_move_insn (addr_rtx, r);
6232 if (! result)
6233 emit_label (lab_over);
6236 /* ??? In va-sh.h, there had been code to make values larger than
6237 size 8 indirect. This does not match the FUNCTION_ARG macros. */
6239 result_ptr = std_expand_builtin_va_arg (valist, type);
6240 if (result)
6242 emit_move_insn (result, result_ptr);
6243 emit_label (lab_over);
6245 else
6246 result = result_ptr;
6248 if (pass_by_ref)
6250 #ifdef POINTERS_EXTEND_UNSIGNED
6251 if (GET_MODE (addr) != Pmode)
6252 addr = convert_memory_address (Pmode, result);
6253 #endif
6254 result = gen_rtx_MEM (ptr_mode, force_reg (Pmode, result));
6255 set_mem_alias_set (result, get_varargs_alias_set ());
6257 /* ??? expand_builtin_va_arg will also set the alias set of the dereferenced
6258 argument to the varargs alias set. */
6259 return result;
6262 static bool
6263 sh_promote_prototypes (tree type)
6265 if (TARGET_HITACHI)
6266 return 0;
6267 if (! type)
6268 return 1;
6269 return ! sh_attr_renesas_p (type);
6272 /* Define where to put the arguments to a function.
6273 Value is zero to push the argument on the stack,
6274 or a hard register in which to store the argument.
6276 MODE is the argument's machine mode.
6277 TYPE is the data type of the argument (as a tree).
6278 This is null for libcalls where that information may
6279 not be available.
6280 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6281 the preceding args and about the function being called.
6282 NAMED is nonzero if this argument is a named parameter
6283 (otherwise it is an extra parameter matching an ellipsis).
6285 On SH the first args are normally in registers
6286 and the rest are pushed. Any arg that starts within the first
6287 NPARM_REGS words is at least partially passed in a register unless
6288 its data type forbids. */
6292 sh_function_arg (CUMULATIVE_ARGS *ca, enum machine_mode mode,
6293 tree type, int named)
6295 if (! TARGET_SH5 && mode == VOIDmode)
6296 return GEN_INT (ca->renesas_abi ? 1 : 0);
6298 if (! TARGET_SH5
6299 && PASS_IN_REG_P (*ca, mode, type)
6300 && (named || ! (TARGET_HITACHI || ca->renesas_abi)))
6302 int regno;
6304 if (mode == SCmode && TARGET_SH4 && TARGET_LITTLE_ENDIAN
6305 && (! FUNCTION_ARG_SCmode_WART || (ROUND_REG (*ca, mode) & 1)))
6307 rtx r1 = gen_rtx_EXPR_LIST (VOIDmode,
6308 gen_rtx_REG (SFmode,
6309 BASE_ARG_REG (mode)
6310 + (ROUND_REG (*ca, mode) ^ 1)),
6311 const0_rtx);
6312 rtx r2 = gen_rtx_EXPR_LIST(VOIDmode,
6313 gen_rtx_REG (SFmode,
6314 BASE_ARG_REG (mode)
6315 + ((ROUND_REG (*ca, mode) + 1) ^ 1)),
6316 GEN_INT (4));
6317 return gen_rtx_PARALLEL(SCmode, gen_rtvec(2, r1, r2));
6320 /* If the alignment of a DF value causes an SF register to be
6321 skipped, we will use that skipped register for the next SF
6322 value. */
6323 if ((TARGET_HITACHI || ca->renesas_abi)
6324 && ca->free_single_fp_reg
6325 && mode == SFmode)
6326 return gen_rtx_REG (mode, ca->free_single_fp_reg);
6328 regno = (BASE_ARG_REG (mode) + ROUND_REG (*ca, mode))
6329 ^ (mode == SFmode && TARGET_SH4
6330 && TARGET_LITTLE_ENDIAN != 0
6331 && ! TARGET_HITACHI && ! ca->renesas_abi);
6332 return gen_rtx_REG (mode, regno);
6336 if (TARGET_SH5)
6338 if (mode == VOIDmode && TARGET_SHCOMPACT)
6339 return GEN_INT (ca->call_cookie);
6341 /* The following test assumes unnamed arguments are promoted to
6342 DFmode. */
6343 if (mode == SFmode && ca->free_single_fp_reg)
6344 return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode, ca->free_single_fp_reg);
6346 if ((GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT)
6347 && (named || ! ca->prototype_p)
6348 && ca->arg_count[(int) SH_ARG_FLOAT] < NPARM_REGS (SFmode))
6350 if (! ca->prototype_p && TARGET_SHMEDIA)
6351 return SH5_PROTOTYPELESS_FLOAT_ARG (*ca, mode);
6353 return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode,
6354 FIRST_FP_PARM_REG
6355 + ca->arg_count[(int) SH_ARG_FLOAT]);
6358 if (ca->arg_count[(int) SH_ARG_INT] < NPARM_REGS (SImode)
6359 && (! TARGET_SHCOMPACT
6360 || (! SHCOMPACT_FORCE_ON_STACK (mode, type)
6361 && ! SH5_WOULD_BE_PARTIAL_NREGS (*ca, mode,
6362 type, named))))
6364 return gen_rtx_REG (mode, (FIRST_PARM_REG
6365 + ca->arg_count[(int) SH_ARG_INT]));
6368 return 0;
6371 return 0;
6374 /* Update the data in CUM to advance over an argument
6375 of mode MODE and data type TYPE.
6376 (TYPE is null for libcalls where that information may not be
6377 available.) */
6379 void
6380 sh_function_arg_advance (CUMULATIVE_ARGS *ca, enum machine_mode mode,
6381 tree type, int named)
6383 if (ca->force_mem)
6384 ca->force_mem = 0;
6385 else if (TARGET_SH5)
6387 tree type2 = (ca->byref && type
6388 ? TREE_TYPE (type)
6389 : type);
6390 enum machine_mode mode2 = (ca->byref && type
6391 ? TYPE_MODE (type2)
6392 : mode);
6393 int dwords = ((ca->byref
6394 ? ca->byref
6395 : mode2 == BLKmode
6396 ? int_size_in_bytes (type2)
6397 : GET_MODE_SIZE (mode2)) + 7) / 8;
6398 int numregs = MIN (dwords, NPARM_REGS (SImode)
6399 - ca->arg_count[(int) SH_ARG_INT]);
6401 if (numregs)
6403 ca->arg_count[(int) SH_ARG_INT] += numregs;
6404 if (TARGET_SHCOMPACT
6405 && SHCOMPACT_FORCE_ON_STACK (mode2, type2))
6407 ca->call_cookie
6408 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
6409 - numregs, 1);
6410 /* N.B. We want this also for outgoing. */
6411 ca->stack_regs += numregs;
6413 else if (ca->byref)
6415 if (! ca->outgoing)
6416 ca->stack_regs += numregs;
6417 ca->byref_regs += numregs;
6418 ca->byref = 0;
6420 ca->call_cookie
6421 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
6422 - numregs, 2);
6423 while (--numregs);
6424 ca->call_cookie
6425 |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
6426 - 1, 1);
6428 else if (dwords > numregs)
6430 int pushregs = numregs;
6432 if (TARGET_SHCOMPACT)
6433 ca->stack_regs += numregs;
6434 while (pushregs < NPARM_REGS (SImode) - 1
6435 && (CALL_COOKIE_INT_REG_GET
6436 (ca->call_cookie,
6437 NPARM_REGS (SImode) - pushregs)
6438 == 1))
6440 ca->call_cookie
6441 &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
6442 - pushregs, 1);
6443 pushregs++;
6445 if (numregs == NPARM_REGS (SImode))
6446 ca->call_cookie
6447 |= CALL_COOKIE_INT_REG (0, 1)
6448 | CALL_COOKIE_STACKSEQ (numregs - 1);
6449 else
6450 ca->call_cookie
6451 |= CALL_COOKIE_STACKSEQ (numregs);
6454 if (GET_SH_ARG_CLASS (mode2) == SH_ARG_FLOAT
6455 && (named || ! ca->prototype_p))
6457 if (mode2 == SFmode && ca->free_single_fp_reg)
6458 ca->free_single_fp_reg = 0;
6459 else if (ca->arg_count[(int) SH_ARG_FLOAT]
6460 < NPARM_REGS (SFmode))
6462 int numfpregs
6463 = MIN ((GET_MODE_SIZE (mode2) + 7) / 8 * 2,
6464 NPARM_REGS (SFmode)
6465 - ca->arg_count[(int) SH_ARG_FLOAT]);
6467 ca->arg_count[(int) SH_ARG_FLOAT] += numfpregs;
6469 if (TARGET_SHCOMPACT && ! ca->prototype_p)
6471 if (ca->outgoing && numregs > 0)
6474 ca->call_cookie
6475 |= (CALL_COOKIE_INT_REG
6476 (ca->arg_count[(int) SH_ARG_INT]
6477 - numregs + ((numfpregs - 2) / 2),
6478 4 + (ca->arg_count[(int) SH_ARG_FLOAT]
6479 - numfpregs) / 2));
6481 while (numfpregs -= 2);
6483 else if (mode2 == SFmode && (named)
6484 && (ca->arg_count[(int) SH_ARG_FLOAT]
6485 < NPARM_REGS (SFmode)))
6486 ca->free_single_fp_reg
6487 = FIRST_FP_PARM_REG - numfpregs
6488 + ca->arg_count[(int) SH_ARG_FLOAT] + 1;
6491 return;
6494 if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
6496 /* Note that we've used the skipped register. */
6497 if (mode == SFmode && ca->free_single_fp_reg)
6499 ca->free_single_fp_reg = 0;
6500 return;
6502 /* When we have a DF after an SF, there's an SF register that get
6503 skipped in order to align the DF value. We note this skipped
6504 register, because the next SF value will use it, and not the
6505 SF that follows the DF. */
6506 if (mode == DFmode
6507 && ROUND_REG (*ca, DFmode) != ROUND_REG (*ca, SFmode))
6509 ca->free_single_fp_reg = (ROUND_REG (*ca, SFmode)
6510 + BASE_ARG_REG (mode));
6514 if (! (TARGET_SH4 || ca->renesas_abi)
6515 || PASS_IN_REG_P (*ca, mode, type))
6516 (ca->arg_count[(int) GET_SH_ARG_CLASS (mode)]
6517 = (ROUND_REG (*ca, mode)
6518 + (mode == BLKmode
6519 ? ROUND_ADVANCE (int_size_in_bytes (type))
6520 : ROUND_ADVANCE (GET_MODE_SIZE (mode)))));
6523 /* The Renesas calling convention doesn't quite fit into this scheme since
6524 the address is passed like an invisible argument, but one that is always
6525 passed in memory. */
6526 static rtx
6527 sh_struct_value_rtx (tree fndecl, int incoming ATTRIBUTE_UNUSED)
6529 if (TARGET_HITACHI || sh_attr_renesas_p (fndecl))
6530 return 0;
6531 return gen_rtx_REG (Pmode, 2);
6534 /* Worker function for TARGET_RETURN_IN_MEMORY. */
6536 static bool
6537 sh_return_in_memory (tree type, tree fndecl)
6539 if (TARGET_SH5)
6541 if (TYPE_MODE (type) == BLKmode)
6542 return ((unsigned HOST_WIDE_INT) int_size_in_bytes (type)) > 8;
6543 else
6544 return GET_MODE_SIZE (TYPE_MODE (type)) > 8;
6546 else
6548 return (TYPE_MODE (type) == BLKmode
6549 || ((TARGET_HITACHI || sh_attr_renesas_p (fndecl))
6550 && TREE_CODE (type) == RECORD_TYPE));
6554 /* We actually emit the code in sh_expand_prologue. We used to use
6555 a static variable to flag that we need to emit this code, but that
6556 doesn't when inlining, when functions are deferred and then emitted
6557 later. Fortunately, we already have two flags that are part of struct
6558 function that tell if a function uses varargs or stdarg. */
6559 static void
6560 sh_setup_incoming_varargs (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
6561 enum machine_mode mode ATTRIBUTE_UNUSED,
6562 tree type ATTRIBUTE_UNUSED,
6563 int *pretend_arg_size ATTRIBUTE_UNUSED,
6564 int second_time ATTRIBUTE_UNUSED)
6566 if (! current_function_stdarg)
6567 abort ();
6570 static bool
6571 sh_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
6573 return TARGET_SH5;
6576 static bool
6577 sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *ca)
6579 return ! (TARGET_HITACHI || ca->renesas_abi) && ! TARGET_SH5;
6583 /* Define the offset between two registers, one to be eliminated, and
6584 the other its replacement, at the start of a routine. */
6587 initial_elimination_offset (int from, int to)
6589 int regs_saved;
6590 int regs_saved_rounding = 0;
6591 int total_saved_regs_space;
6592 int total_auto_space;
6593 int save_flags = target_flags;
6594 int copy_flags;
6595 HARD_REG_SET live_regs_mask;
6597 shmedia_space_reserved_for_target_registers = false;
6598 regs_saved = calc_live_regs (&live_regs_mask);
6599 regs_saved += SHMEDIA_REGS_STACK_ADJUST ();
6601 if (shmedia_reserve_space_for_target_registers_p (regs_saved, &live_regs_mask))
6603 shmedia_space_reserved_for_target_registers = true;
6604 regs_saved += shmedia_target_regs_stack_adjust (&live_regs_mask);
6607 if (TARGET_SH5 && regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT))
6608 regs_saved_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
6609 - regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT));
6611 total_auto_space = rounded_frame_size (regs_saved) - regs_saved_rounding;
6612 copy_flags = target_flags;
6613 target_flags = save_flags;
6615 total_saved_regs_space = regs_saved + regs_saved_rounding;
6617 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
6618 return total_saved_regs_space + total_auto_space
6619 + current_function_args_info.byref_regs * 8;
6621 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
6622 return total_saved_regs_space + total_auto_space
6623 + current_function_args_info.byref_regs * 8;
6625 /* Initial gap between fp and sp is 0. */
6626 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
6627 return 0;
6629 if (from == RETURN_ADDRESS_POINTER_REGNUM
6630 && (to == FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM))
6632 if (TARGET_SH5)
6634 int n = total_saved_regs_space;
6635 int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
6636 save_schedule schedule;
6637 save_entry *entry;
6639 n += total_auto_space;
6641 /* If it wasn't saved, there's not much we can do. */
6642 if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
6643 return n;
6645 target_flags = copy_flags;
6647 sh5_schedule_saves (&live_regs_mask, &schedule, n);
6648 for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
6649 if (entry->reg == pr_reg)
6651 target_flags = save_flags;
6652 return entry->offset;
6654 abort ();
6656 else
6657 return total_auto_space;
6660 abort ();
6663 /* Handle machine specific pragmas to be semi-compatible with Renesas
6664 compiler. */
6666 void
6667 sh_pr_interrupt (struct cpp_reader *pfile ATTRIBUTE_UNUSED)
6669 pragma_interrupt = 1;
6672 void
6673 sh_pr_trapa (struct cpp_reader *pfile ATTRIBUTE_UNUSED)
6675 pragma_interrupt = pragma_trapa = 1;
6678 void
6679 sh_pr_nosave_low_regs (struct cpp_reader *pfile ATTRIBUTE_UNUSED)
6681 pragma_nosave_low_regs = 1;
6684 /* Generate 'handle_interrupt' attribute for decls */
6686 static void
6687 sh_insert_attributes (tree node, tree *attributes)
6689 if (! pragma_interrupt
6690 || TREE_CODE (node) != FUNCTION_DECL)
6691 return;
6693 /* We are only interested in fields. */
6694 if (TREE_CODE_CLASS (TREE_CODE (node)) != 'd')
6695 return;
6697 /* Add a 'handle_interrupt' attribute. */
6698 * attributes = tree_cons (get_identifier ("interrupt_handler"), NULL, * attributes);
6700 return;
6703 /* Supported attributes:
6705 interrupt_handler -- specifies this function is an interrupt handler.
6707 sp_switch -- specifies an alternate stack for an interrupt handler
6708 to run on.
6710 trap_exit -- use a trapa to exit an interrupt function instead of
6711 an rte instruction.
6713 renesas -- use Renesas calling/layout conventions (functions and
6714 structures).
6718 const struct attribute_spec sh_attribute_table[] =
6720 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
6721 { "interrupt_handler", 0, 0, true, false, false, sh_handle_interrupt_handler_attribute },
6722 { "sp_switch", 1, 1, true, false, false, sh_handle_sp_switch_attribute },
6723 { "trap_exit", 1, 1, true, false, false, sh_handle_trap_exit_attribute },
6724 { "renesas", 0, 0, false, true, false, sh_handle_renesas_attribute },
6725 { NULL, 0, 0, false, false, false, NULL }
6728 /* Handle an "interrupt_handler" attribute; arguments as in
6729 struct attribute_spec.handler. */
6730 static tree
6731 sh_handle_interrupt_handler_attribute (tree *node, tree name,
6732 tree args ATTRIBUTE_UNUSED,
6733 int flags ATTRIBUTE_UNUSED,
6734 bool *no_add_attrs)
6736 if (TREE_CODE (*node) != FUNCTION_DECL)
6738 warning ("`%s' attribute only applies to functions",
6739 IDENTIFIER_POINTER (name));
6740 *no_add_attrs = true;
6742 else if (TARGET_SHCOMPACT)
6744 error ("attribute interrupt_handler is not compatible with -m5-compact");
6745 *no_add_attrs = true;
6748 return NULL_TREE;
6751 /* Handle an "sp_switch" attribute; arguments as in
6752 struct attribute_spec.handler. */
6753 static tree
6754 sh_handle_sp_switch_attribute (tree *node, tree name, tree args,
6755 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
6757 if (TREE_CODE (*node) != FUNCTION_DECL)
6759 warning ("`%s' attribute only applies to functions",
6760 IDENTIFIER_POINTER (name));
6761 *no_add_attrs = true;
6763 else if (!pragma_interrupt)
6765 /* The sp_switch attribute only has meaning for interrupt functions. */
6766 warning ("`%s' attribute only applies to interrupt functions",
6767 IDENTIFIER_POINTER (name));
6768 *no_add_attrs = true;
6770 else if (TREE_CODE (TREE_VALUE (args)) != STRING_CST)
6772 /* The argument must be a constant string. */
6773 warning ("`%s' attribute argument not a string constant",
6774 IDENTIFIER_POINTER (name));
6775 *no_add_attrs = true;
6777 else
6779 sp_switch = gen_rtx_SYMBOL_REF (VOIDmode,
6780 TREE_STRING_POINTER (TREE_VALUE (args)));
6783 return NULL_TREE;
6786 /* Handle an "trap_exit" attribute; arguments as in
6787 struct attribute_spec.handler. */
6788 static tree
6789 sh_handle_trap_exit_attribute (tree *node, tree name, tree args,
6790 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
6792 if (TREE_CODE (*node) != FUNCTION_DECL)
6794 warning ("`%s' attribute only applies to functions",
6795 IDENTIFIER_POINTER (name));
6796 *no_add_attrs = true;
6798 else if (!pragma_interrupt)
6800 /* The trap_exit attribute only has meaning for interrupt functions. */
6801 warning ("`%s' attribute only applies to interrupt functions",
6802 IDENTIFIER_POINTER (name));
6803 *no_add_attrs = true;
6805 else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
6807 /* The argument must be a constant integer. */
6808 warning ("`%s' attribute argument not an integer constant",
6809 IDENTIFIER_POINTER (name));
6810 *no_add_attrs = true;
6812 else
6814 trap_exit = TREE_INT_CST_LOW (TREE_VALUE (args));
6817 return NULL_TREE;
6820 static tree
6821 sh_handle_renesas_attribute (tree *node ATTRIBUTE_UNUSED,
6822 tree name ATTRIBUTE_UNUSED,
6823 tree args ATTRIBUTE_UNUSED,
6824 int flags ATTRIBUTE_UNUSED,
6825 bool *no_add_attrs ATTRIBUTE_UNUSED)
6827 return NULL_TREE;
6830 /* True if __attribute__((renesas)) or -mrenesas. */
6832 sh_attr_renesas_p (tree td)
6834 if (TARGET_HITACHI)
6835 return 1;
6836 if (td == 0)
6837 return 0;
6838 if (DECL_P (td))
6839 td = TREE_TYPE (td);
6840 return (lookup_attribute ("renesas", TYPE_ATTRIBUTES (td))
6841 != NULL_TREE);
6844 /* True if __attribute__((renesas)) or -mrenesas, for the current
6845 function. */
6847 sh_cfun_attr_renesas_p (void)
6849 return sh_attr_renesas_p (current_function_decl);
6853 sh_cfun_interrupt_handler_p (void)
6855 return (lookup_attribute ("interrupt_handler",
6856 DECL_ATTRIBUTES (current_function_decl))
6857 != NULL_TREE);
6860 /* ??? target_switches in toplev.c is static, hence we have to duplicate it. */
6861 static const struct
6863 const char *const name;
6864 const int value;
6865 const char *const description;
6867 sh_target_switches[] = TARGET_SWITCHES;
6868 #define target_switches sh_target_switches
6870 /* Like default_pch_valid_p, but take flag_mask into account. */
6871 const char *
6872 sh_pch_valid_p (const void *data_p, size_t len)
6874 const char *data = (const char *)data_p;
6875 const char *flag_that_differs = NULL;
6876 size_t i;
6877 int old_flags;
6878 int flag_mask
6879 = (SH1_BIT | SH2_BIT | SH3_BIT | SH_E_BIT | HARD_SH4_BIT | FPU_SINGLE_BIT
6880 | SH4_BIT | HITACHI_BIT | LITTLE_ENDIAN_BIT);
6882 /* -fpic and -fpie also usually make a PCH invalid. */
6883 if (data[0] != flag_pic)
6884 return _("created and used with different settings of -fpic");
6885 if (data[1] != flag_pie)
6886 return _("created and used with different settings of -fpie");
6887 data += 2;
6889 /* Check target_flags. */
6890 memcpy (&old_flags, data, sizeof (target_flags));
6891 if (((old_flags ^ target_flags) & flag_mask) != 0)
6893 for (i = 0; i < ARRAY_SIZE (target_switches); i++)
6895 int bits;
6897 bits = target_switches[i].value;
6898 if (bits < 0)
6899 bits = -bits;
6900 bits &= flag_mask;
6901 if ((target_flags & bits) != (old_flags & bits))
6903 flag_that_differs = target_switches[i].name;
6904 goto make_message;
6907 abort ();
6909 data += sizeof (target_flags);
6910 len -= sizeof (target_flags);
6912 /* Check string options. */
6913 #ifdef TARGET_OPTIONS
6914 for (i = 0; i < ARRAY_SIZE (target_options); i++)
6916 const char *str = *target_options[i].variable;
6917 size_t l;
6918 if (! str)
6919 str = "";
6920 l = strlen (str) + 1;
6921 if (len < l || memcmp (data, str, l) != 0)
6923 flag_that_differs = target_options[i].prefix;
6924 goto make_message;
6926 data += l;
6927 len -= l;
6929 #endif
6931 return NULL;
6933 make_message:
6935 char *r;
6936 asprintf (&r, _("created and used with differing settings of `-m%s'"),
6937 flag_that_differs);
6938 if (r == NULL)
6939 return _("out of memory");
6940 return r;
6944 /* Predicates used by the templates. */
6946 /* Returns 1 if OP is MACL, MACH or PR. The input must be a REG rtx.
6947 Used only in general_movsrc_operand. */
6950 system_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
6952 switch (REGNO (op))
6954 case PR_REG:
6955 case MACL_REG:
6956 case MACH_REG:
6957 return 1;
6959 return 0;
6962 /* Returns 1 if OP can be source of a simple move operation.
6963 Same as general_operand, but a LABEL_REF is valid, PRE_DEC is
6964 invalid as are subregs of system registers. */
6967 general_movsrc_operand (rtx op, enum machine_mode mode)
6969 if (GET_CODE (op) == MEM)
6971 rtx inside = XEXP (op, 0);
6972 if (GET_CODE (inside) == CONST)
6973 inside = XEXP (inside, 0);
6975 if (GET_CODE (inside) == LABEL_REF)
6976 return 1;
6978 if (GET_CODE (inside) == PLUS
6979 && GET_CODE (XEXP (inside, 0)) == LABEL_REF
6980 && GET_CODE (XEXP (inside, 1)) == CONST_INT)
6981 return 1;
6983 /* Only post inc allowed. */
6984 if (GET_CODE (inside) == PRE_DEC)
6985 return 0;
6988 if ((mode == QImode || mode == HImode)
6989 && (GET_CODE (op) == SUBREG
6990 && GET_CODE (XEXP (op, 0)) == REG
6991 && system_reg_operand (XEXP (op, 0), mode)))
6992 return 0;
6994 return general_operand (op, mode);
6997 /* Returns 1 if OP can be a destination of a move.
6998 Same as general_operand, but no preinc allowed. */
7001 general_movdst_operand (rtx op, enum machine_mode mode)
7003 /* Only pre dec allowed. */
7004 if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == POST_INC)
7005 return 0;
7007 return general_operand (op, mode);
7010 /* Returns 1 if OP is a normal arithmetic register. */
7013 arith_reg_operand (rtx op, enum machine_mode mode)
7015 if (register_operand (op, mode))
7017 int regno;
7019 if (GET_CODE (op) == REG)
7020 regno = REGNO (op);
7021 else if (GET_CODE (op) == SUBREG && GET_CODE (SUBREG_REG (op)) == REG)
7022 regno = REGNO (SUBREG_REG (op));
7023 else
7024 return 1;
7026 return (regno != T_REG && regno != PR_REG
7027 && ! TARGET_REGISTER_P (regno)
7028 && (regno != FPUL_REG || TARGET_SH4)
7029 && regno != MACH_REG && regno != MACL_REG);
7031 return 0;
7034 /* Like above, but for DImode destinations: forbid paradoxical DImode subregs,
7035 because this would lead to missing sign extensions when truncating from
7036 DImode to SImode. */
7038 arith_reg_dest (rtx op, enum machine_mode mode)
7040 if (mode == DImode && GET_CODE (op) == SUBREG
7041 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))) < 8)
7042 return 0;
7043 return arith_reg_operand (op, mode);
7047 int_gpr_dest (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7049 enum machine_mode op_mode = GET_MODE (op);
7051 if (GET_MODE_CLASS (op_mode) != MODE_INT
7052 || GET_MODE_SIZE (op_mode) >= UNITS_PER_WORD)
7053 return 0;
7054 if (! reload_completed)
7055 return 0;
7056 return true_regnum (op) <= LAST_GENERAL_REG;
7060 fp_arith_reg_operand (rtx op, enum machine_mode mode)
7062 if (register_operand (op, mode))
7064 int regno;
7066 if (GET_CODE (op) == REG)
7067 regno = REGNO (op);
7068 else if (GET_CODE (op) == SUBREG && GET_CODE (SUBREG_REG (op)) == REG)
7069 regno = REGNO (SUBREG_REG (op));
7070 else
7071 return 1;
7073 return (regno >= FIRST_PSEUDO_REGISTER
7074 || FP_REGISTER_P (regno));
7076 return 0;
7079 /* Returns 1 if OP is a valid source operand for an arithmetic insn. */
7082 arith_operand (rtx op, enum machine_mode mode)
7084 if (arith_reg_operand (op, mode))
7085 return 1;
7087 if (TARGET_SHMEDIA)
7089 /* FIXME: We should be checking whether the CONST_INT fits in a
7090 CONST_OK_FOR_I16 here, but this causes reload_cse to crash when
7091 attempting to transform a sequence of two 64-bit sets of the
7092 same register from literal constants into a set and an add,
7093 when the difference is too wide for an add. */
7094 if (GET_CODE (op) == CONST_INT
7095 || EXTRA_CONSTRAINT_C16 (op))
7096 return 1;
7097 else
7098 return 0;
7100 else if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_I08 (INTVAL (op)))
7101 return 1;
7103 return 0;
7106 /* Returns 1 if OP is a valid source operand for a compare insn. */
7109 arith_reg_or_0_operand (rtx op, enum machine_mode mode)
7111 if (arith_reg_operand (op, mode))
7112 return 1;
7114 if (EXTRA_CONSTRAINT_Z (op))
7115 return 1;
7117 return 0;
7120 /* Return 1 if OP is a valid source operand for an SHmedia operation
7121 that takes either a register or a 6-bit immediate. */
7124 shmedia_6bit_operand (rtx op, enum machine_mode mode)
7126 return (arith_reg_operand (op, mode)
7127 || (GET_CODE (op) == CONST_INT && CONST_OK_FOR_I06 (INTVAL (op))));
7130 /* Returns 1 if OP is a valid source operand for a logical operation. */
7133 logical_operand (rtx op, enum machine_mode mode)
7135 if (arith_reg_operand (op, mode))
7136 return 1;
7138 if (TARGET_SHMEDIA)
7140 if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_I10 (INTVAL (op)))
7141 return 1;
7142 else
7143 return 0;
7145 else if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K08 (INTVAL (op)))
7146 return 1;
7148 return 0;
7152 and_operand (rtx op, enum machine_mode mode)
7154 if (logical_operand (op, mode))
7155 return 1;
7157 /* Check mshflo.l / mshflhi.l opportunities. */
7158 if (TARGET_SHMEDIA
7159 && mode == DImode
7160 && GET_CODE (op) == CONST_INT
7161 && CONST_OK_FOR_J16 (INTVAL (op)))
7162 return 1;
7164 return 0;
7167 /* Nonzero if OP is a floating point value with value 0.0. */
7170 fp_zero_operand (rtx op)
7172 REAL_VALUE_TYPE r;
7174 if (GET_MODE (op) != SFmode)
7175 return 0;
7177 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
7178 return REAL_VALUES_EQUAL (r, dconst0) && ! REAL_VALUE_MINUS_ZERO (r);
7181 /* Nonzero if OP is a floating point value with value 1.0. */
7184 fp_one_operand (rtx op)
7186 REAL_VALUE_TYPE r;
7188 if (GET_MODE (op) != SFmode)
7189 return 0;
7191 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
7192 return REAL_VALUES_EQUAL (r, dconst1);
7195 /* For -m4 and -m4-single-only, mode switching is used. If we are
7196 compiling without -mfmovd, movsf_ie isn't taken into account for
7197 mode switching. We could check in machine_dependent_reorg for
7198 cases where we know we are in single precision mode, but there is
7199 interface to find that out during reload, so we must avoid
7200 choosing an fldi alternative during reload and thus failing to
7201 allocate a scratch register for the constant loading. */
7203 fldi_ok (void)
7205 return ! TARGET_SH4 || TARGET_FMOVD || reload_completed;
7209 tertiary_reload_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7211 enum rtx_code code = GET_CODE (op);
7212 return code == MEM || (TARGET_SH4 && code == CONST_DOUBLE);
7216 fpscr_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7218 return (GET_CODE (op) == REG && REGNO (op) == FPSCR_REG
7219 && GET_MODE (op) == PSImode);
7223 fpul_operand (rtx op, enum machine_mode mode)
7225 if (TARGET_SHMEDIA)
7226 return fp_arith_reg_operand (op, mode);
7228 return (GET_CODE (op) == REG
7229 && (REGNO (op) == FPUL_REG || REGNO (op) >= FIRST_PSEUDO_REGISTER)
7230 && GET_MODE (op) == mode);
7234 symbol_ref_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7236 return (GET_CODE (op) == SYMBOL_REF);
7239 /* Return the TLS type for TLS symbols, 0 for otherwise. */
7241 tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7243 if (GET_CODE (op) != SYMBOL_REF)
7244 return 0;
7245 return SYMBOL_REF_TLS_MODEL (op);
7249 commutative_float_operator (rtx op, enum machine_mode mode)
7251 if (GET_MODE (op) != mode)
7252 return 0;
7253 switch (GET_CODE (op))
7255 case PLUS:
7256 case MULT:
7257 return 1;
7258 default:
7259 break;
7261 return 0;
7265 noncommutative_float_operator (rtx op, enum machine_mode mode)
7267 if (GET_MODE (op) != mode)
7268 return 0;
7269 switch (GET_CODE (op))
7271 case MINUS:
7272 case DIV:
7273 return 1;
7274 default:
7275 break;
7277 return 0;
7281 unary_float_operator (rtx op, enum machine_mode mode)
7283 if (GET_MODE (op) != mode)
7284 return 0;
7285 switch (GET_CODE (op))
7287 case ABS:
7288 case NEG:
7289 case SQRT:
7290 return 1;
7291 default:
7292 break;
7294 return 0;
7298 binary_float_operator (rtx op, enum machine_mode mode)
7300 if (GET_MODE (op) != mode)
7301 return 0;
7302 switch (GET_CODE (op))
7304 case PLUS:
7305 case MINUS:
7306 case MULT:
7307 case DIV:
7308 return 1;
7309 default:
7310 break;
7312 return 0;
7316 binary_logical_operator (rtx op, enum machine_mode mode)
7318 if (GET_MODE (op) != mode)
7319 return 0;
7320 switch (GET_CODE (op))
7322 case IOR:
7323 case AND:
7324 case XOR:
7325 return 1;
7326 default:
7327 break;
7329 return 0;
7333 equality_comparison_operator (rtx op, enum machine_mode mode)
7335 return ((mode == VOIDmode || GET_MODE (op) == mode)
7336 && (GET_CODE (op) == EQ || GET_CODE (op) == NE));
7339 int greater_comparison_operator (rtx op, enum machine_mode mode)
7341 if (mode != VOIDmode && GET_MODE (op) == mode)
7342 return 0;
7343 switch (GET_CODE (op))
7345 case GT:
7346 case GE:
7347 case GTU:
7348 case GEU:
7349 return 1;
7350 default:
7351 return 0;
7355 int less_comparison_operator (rtx op, enum machine_mode mode)
7357 if (mode != VOIDmode && GET_MODE (op) == mode)
7358 return 0;
7359 switch (GET_CODE (op))
7361 case LT:
7362 case LE:
7363 case LTU:
7364 case LEU:
7365 return 1;
7366 default:
7367 return 0;
7371 /* Accept pseudos and branch target registers. */
7373 target_reg_operand (rtx op, enum machine_mode mode)
7375 if (mode != DImode
7376 || GET_MODE (op) != DImode)
7377 return 0;
7379 if (GET_CODE (op) == SUBREG)
7380 op = XEXP (op, 0);
7382 if (GET_CODE (op) != REG)
7383 return 0;
7385 /* We must protect ourselves from matching pseudos that are virtual
7386 register, because they will eventually be replaced with hardware
7387 registers that aren't branch-target registers. */
7388 if (REGNO (op) > LAST_VIRTUAL_REGISTER
7389 || TARGET_REGISTER_P (REGNO (op)))
7390 return 1;
7392 return 0;
7395 /* Same as target_reg_operand, except that label_refs and symbol_refs
7396 are accepted before reload. */
7398 target_operand (rtx op, enum machine_mode mode)
7400 if (mode != DImode)
7401 return 0;
7403 if ((GET_MODE (op) == DImode || GET_MODE (op) == VOIDmode)
7404 && EXTRA_CONSTRAINT_Csy (op))
7405 return ! reload_completed;
7407 return target_reg_operand (op, mode);
7411 mextr_bit_offset (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7413 HOST_WIDE_INT i;
7415 if (GET_CODE (op) != CONST_INT)
7416 return 0;
7417 i = INTVAL (op);
7418 return i >= 1*8 && i <= 7*8 && (i & 7) == 0;
7422 extend_reg_operand (rtx op, enum machine_mode mode)
7424 return (GET_CODE (op) == TRUNCATE
7425 ? arith_operand
7426 : arith_reg_operand) (op, mode);
7430 trunc_hi_operand (rtx op, enum machine_mode mode)
7432 enum machine_mode op_mode = GET_MODE (op);
7434 if (op_mode != SImode && op_mode != DImode
7435 && op_mode != V4HImode && op_mode != V2SImode)
7436 return 0;
7437 return extend_reg_operand (op, mode);
7441 extend_reg_or_0_operand (rtx op, enum machine_mode mode)
7443 return (GET_CODE (op) == TRUNCATE
7444 ? arith_operand
7445 : arith_reg_or_0_operand) (op, mode);
7449 general_extend_operand (rtx op, enum machine_mode mode)
7451 return (GET_CODE (op) == TRUNCATE
7452 ? arith_operand
7453 : nonimmediate_operand) (op, mode);
7457 inqhi_operand (rtx op, enum machine_mode mode)
7459 if (GET_CODE (op) != TRUNCATE || mode != GET_MODE (op))
7460 return 0;
7461 op = XEXP (op, 0);
7462 /* Can't use true_regnum here because copy_cost wants to know about
7463 SECONDARY_INPUT_RELOAD_CLASS. */
7464 return GET_CODE (op) == REG && FP_REGISTER_P (REGNO (op));
7468 sh_rep_vec (rtx v, enum machine_mode mode)
7470 int i;
7471 rtx x, y;
7473 if ((GET_CODE (v) != CONST_VECTOR && GET_CODE (v) != PARALLEL)
7474 || (GET_MODE (v) != mode && mode != VOIDmode))
7475 return 0;
7476 i = XVECLEN (v, 0) - 2;
7477 x = XVECEXP (v, 0, i + 1);
7478 if (GET_MODE_UNIT_SIZE (mode) == 1)
7480 y = XVECEXP (v, 0, i);
7481 for (i -= 2 ; i >= 0; i -= 2)
7482 if (! rtx_equal_p (XVECEXP (v, 0, i + 1), x)
7483 || ! rtx_equal_p (XVECEXP (v, 0, i), y))
7484 return 0;
7486 else
7487 for (; i >= 0; i--)
7488 if (XVECEXP (v, 0, i) != x)
7489 return 0;
7490 return 1;
7493 /* Determine if V is a constant vector matching MODE with only one element
7494 that is not a sign extension. Two byte-sized elements count as one. */
7496 sh_1el_vec (rtx v, enum machine_mode mode)
7498 int unit_size;
7499 int i, last, least, sign_ix;
7500 rtx sign;
7502 if (GET_CODE (v) != CONST_VECTOR
7503 || (GET_MODE (v) != mode && mode != VOIDmode))
7504 return 0;
7505 /* Determine numbers of last and of least significant elements. */
7506 last = XVECLEN (v, 0) - 1;
7507 least = TARGET_LITTLE_ENDIAN ? 0 : last;
7508 if (GET_CODE (XVECEXP (v, 0, least)) != CONST_INT)
7509 return 0;
7510 sign_ix = least;
7511 if (GET_MODE_UNIT_SIZE (mode) == 1)
7512 sign_ix = TARGET_LITTLE_ENDIAN ? 1 : last - 1;
7513 if (GET_CODE (XVECEXP (v, 0, sign_ix)) != CONST_INT)
7514 return 0;
7515 unit_size = GET_MODE_UNIT_SIZE (GET_MODE (v));
7516 sign = (INTVAL (XVECEXP (v, 0, sign_ix)) >> (unit_size * BITS_PER_UNIT - 1)
7517 ? constm1_rtx : const0_rtx);
7518 i = XVECLEN (v, 0) - 1;
7520 if (i != least && i != sign_ix && XVECEXP (v, 0, i) != sign)
7521 return 0;
7522 while (--i);
7523 return 1;
7527 sh_const_vec (rtx v, enum machine_mode mode)
7529 int i;
7531 if (GET_CODE (v) != CONST_VECTOR
7532 || (GET_MODE (v) != mode && mode != VOIDmode))
7533 return 0;
7534 i = XVECLEN (v, 0) - 1;
7535 for (; i >= 0; i--)
7536 if (GET_CODE (XVECEXP (v, 0, i)) != CONST_INT)
7537 return 0;
7538 return 1;
7541 /* Return the destination address of a branch. */
7543 static int
7544 branch_dest (rtx branch)
7546 rtx dest = SET_SRC (PATTERN (branch));
7547 int dest_uid;
7549 if (GET_CODE (dest) == IF_THEN_ELSE)
7550 dest = XEXP (dest, 1);
7551 dest = XEXP (dest, 0);
7552 dest_uid = INSN_UID (dest);
7553 return INSN_ADDRESSES (dest_uid);
7556 /* Return nonzero if REG is not used after INSN.
7557 We assume REG is a reload reg, and therefore does
7558 not live past labels. It may live past calls or jumps though. */
7560 reg_unused_after (rtx reg, rtx insn)
7562 enum rtx_code code;
7563 rtx set;
7565 /* If the reg is set by this instruction, then it is safe for our
7566 case. Disregard the case where this is a store to memory, since
7567 we are checking a register used in the store address. */
7568 set = single_set (insn);
7569 if (set && GET_CODE (SET_DEST (set)) != MEM
7570 && reg_overlap_mentioned_p (reg, SET_DEST (set)))
7571 return 1;
7573 while ((insn = NEXT_INSN (insn)))
7575 code = GET_CODE (insn);
7577 #if 0
7578 /* If this is a label that existed before reload, then the register
7579 if dead here. However, if this is a label added by reorg, then
7580 the register may still be live here. We can't tell the difference,
7581 so we just ignore labels completely. */
7582 if (code == CODE_LABEL)
7583 return 1;
7584 /* else */
7585 #endif
7587 if (code == JUMP_INSN)
7588 return 0;
7590 /* If this is a sequence, we must handle them all at once.
7591 We could have for instance a call that sets the target register,
7592 and an insn in a delay slot that uses the register. In this case,
7593 we must return 0. */
7594 else if (code == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
7596 int i;
7597 int retval = 0;
7599 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7601 rtx this_insn = XVECEXP (PATTERN (insn), 0, i);
7602 rtx set = single_set (this_insn);
7604 if (GET_CODE (this_insn) == CALL_INSN)
7605 code = CALL_INSN;
7606 else if (GET_CODE (this_insn) == JUMP_INSN)
7608 if (INSN_ANNULLED_BRANCH_P (this_insn))
7609 return 0;
7610 code = JUMP_INSN;
7613 if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
7614 return 0;
7615 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
7617 if (GET_CODE (SET_DEST (set)) != MEM)
7618 retval = 1;
7619 else
7620 return 0;
7622 if (set == 0
7623 && reg_overlap_mentioned_p (reg, PATTERN (this_insn)))
7624 return 0;
7626 if (retval == 1)
7627 return 1;
7628 else if (code == JUMP_INSN)
7629 return 0;
7631 else if (GET_RTX_CLASS (code) == 'i')
7633 rtx set = single_set (insn);
7635 if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
7636 return 0;
7637 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
7638 return GET_CODE (SET_DEST (set)) != MEM;
7639 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
7640 return 0;
7643 if (code == CALL_INSN && call_used_regs[REGNO (reg)])
7644 return 1;
7646 return 1;
7649 #include "ggc.h"
7651 static GTY(()) rtx fpscr_rtx;
7653 get_fpscr_rtx (void)
7655 if (! fpscr_rtx)
7657 fpscr_rtx = gen_rtx_REG (PSImode, FPSCR_REG);
7658 REG_USERVAR_P (fpscr_rtx) = 1;
7659 mark_user_reg (fpscr_rtx);
7661 if (! reload_completed || mdep_reorg_phase != SH_AFTER_MDEP_REORG)
7662 mark_user_reg (fpscr_rtx);
7663 return fpscr_rtx;
7666 void
7667 emit_sf_insn (rtx pat)
7669 emit_insn (pat);
7672 void
7673 emit_df_insn (rtx pat)
7675 emit_insn (pat);
7678 void
7679 expand_sf_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
7681 emit_sf_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
7684 void
7685 expand_sf_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
7687 emit_sf_insn ((*fun) (operands[0], operands[1], operands[2],
7688 get_fpscr_rtx ()));
7691 void
7692 expand_df_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
7694 emit_df_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
7697 void
7698 expand_df_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
7700 emit_df_insn ((*fun) (operands[0], operands[1], operands[2],
7701 get_fpscr_rtx ()));
7704 /* ??? gcc does flow analysis strictly after common subexpression
7705 elimination. As a result, common subexpression elimination fails
7706 when there are some intervening statements setting the same register.
7707 If we did nothing about this, this would hurt the precision switching
7708 for SH4 badly. There is some cse after reload, but it is unable to
7709 undo the extra register pressure from the unused instructions, and
7710 it cannot remove auto-increment loads.
7712 A C code example that shows this flow/cse weakness for (at least) SH
7713 and sparc (as of gcc ss-970706) is this:
7715 double
7716 f(double a)
7718 double d;
7719 d = 0.1;
7720 a += d;
7721 d = 1.1;
7722 d = 0.1;
7723 a *= d;
7724 return a;
7727 So we add another pass before common subexpression elimination, to
7728 remove assignments that are dead due to a following assignment in the
7729 same basic block. */
7731 static void
7732 mark_use (rtx x, rtx *reg_set_block)
7734 enum rtx_code code;
7736 if (! x)
7737 return;
7738 code = GET_CODE (x);
7739 switch (code)
7741 case REG:
7743 int regno = REGNO (x);
7744 int nregs = (regno < FIRST_PSEUDO_REGISTER
7745 ? HARD_REGNO_NREGS (regno, GET_MODE (x))
7746 : 1);
7749 reg_set_block[regno + nregs - 1] = 0;
7751 while (--nregs);
7752 break;
7754 case SET:
7756 rtx dest = SET_DEST (x);
7758 if (GET_CODE (dest) == SUBREG)
7759 dest = SUBREG_REG (dest);
7760 if (GET_CODE (dest) != REG)
7761 mark_use (dest, reg_set_block);
7762 mark_use (SET_SRC (x), reg_set_block);
7763 break;
7765 case CLOBBER:
7766 break;
7767 default:
7769 const char *fmt = GET_RTX_FORMAT (code);
7770 int i, j;
7771 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7773 if (fmt[i] == 'e')
7774 mark_use (XEXP (x, i), reg_set_block);
7775 else if (fmt[i] == 'E')
7776 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7777 mark_use (XVECEXP (x, i, j), reg_set_block);
7779 break;
7784 static rtx get_free_reg (HARD_REG_SET);
7786 /* This function returns a register to use to load the address to load
7787 the fpscr from. Currently it always returns r1 or r7, but when we are
7788 able to use pseudo registers after combine, or have a better mechanism
7789 for choosing a register, it should be done here. */
7790 /* REGS_LIVE is the liveness information for the point for which we
7791 need this allocation. In some bare-bones exit blocks, r1 is live at the
7792 start. We can even have all of r0..r3 being live:
7793 __complex__ long long f (double d) { if (d == 0) return 2; else return 3; }
7794 INSN before which new insns are placed with will clobber the register
7795 we return. If a basic block consists only of setting the return value
7796 register to a pseudo and using that register, the return value is not
7797 live before or after this block, yet we we'll insert our insns right in
7798 the middle. */
7800 static rtx
7801 get_free_reg (HARD_REG_SET regs_live)
7803 if (! TEST_HARD_REG_BIT (regs_live, 1))
7804 return gen_rtx_REG (Pmode, 1);
7806 /* Hard reg 1 is live; since this is a SMALL_REGISTER_CLASSES target,
7807 there shouldn't be anything but a jump before the function end. */
7808 if (! TEST_HARD_REG_BIT (regs_live, 7))
7809 return gen_rtx_REG (Pmode, 7);
7811 abort ();
7814 /* This function will set the fpscr from memory.
7815 MODE is the mode we are setting it to. */
7816 void
7817 fpscr_set_from_mem (int mode, HARD_REG_SET regs_live)
7819 enum attr_fp_mode fp_mode = mode;
7820 rtx addr_reg = get_free_reg (regs_live);
7822 if (fp_mode == (enum attr_fp_mode) ACTUAL_NORMAL_MODE (FP_MODE))
7823 emit_insn (gen_fpu_switch1 (addr_reg));
7824 else
7825 emit_insn (gen_fpu_switch0 (addr_reg));
7828 /* Is the given character a logical line separator for the assembler? */
7829 #ifndef IS_ASM_LOGICAL_LINE_SEPARATOR
7830 #define IS_ASM_LOGICAL_LINE_SEPARATOR(C) ((C) == ';')
7831 #endif
7834 sh_insn_length_adjustment (rtx insn)
7836 /* Instructions with unfilled delay slots take up an extra two bytes for
7837 the nop in the delay slot. */
7838 if (((GET_CODE (insn) == INSN
7839 && GET_CODE (PATTERN (insn)) != USE
7840 && GET_CODE (PATTERN (insn)) != CLOBBER)
7841 || GET_CODE (insn) == CALL_INSN
7842 || (GET_CODE (insn) == JUMP_INSN
7843 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
7844 && GET_CODE (PATTERN (insn)) != ADDR_VEC))
7845 && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE
7846 && get_attr_needs_delay_slot (insn) == NEEDS_DELAY_SLOT_YES)
7847 return 2;
7849 /* SH2e has a bug that prevents the use of annulled branches, so if
7850 the delay slot is not filled, we'll have to put a NOP in it. */
7851 if (sh_cpu == CPU_SH2E
7852 && GET_CODE (insn) == JUMP_INSN
7853 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
7854 && GET_CODE (PATTERN (insn)) != ADDR_VEC
7855 && get_attr_type (insn) == TYPE_CBRANCH
7856 && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE)
7857 return 2;
7859 /* sh-dsp parallel processing insn take four bytes instead of two. */
7861 if (GET_CODE (insn) == INSN)
7863 int sum = 0;
7864 rtx body = PATTERN (insn);
7865 const char *template;
7866 char c;
7867 int maybe_label = 1;
7869 if (GET_CODE (body) == ASM_INPUT)
7870 template = XSTR (body, 0);
7871 else if (asm_noperands (body) >= 0)
7872 template
7873 = decode_asm_operands (body, NULL, NULL, NULL, NULL);
7874 else
7875 return 0;
7878 int ppi_adjust = 0;
7881 c = *template++;
7882 while (c == ' ' || c == '\t');
7883 /* all sh-dsp parallel-processing insns start with p.
7884 The only non-ppi sh insn starting with p is pref.
7885 The only ppi starting with pr is prnd. */
7886 if ((c == 'p' || c == 'P') && strncasecmp ("re", template, 2))
7887 ppi_adjust = 2;
7888 /* The repeat pseudo-insn expands two three insns, a total of
7889 six bytes in size. */
7890 else if ((c == 'r' || c == 'R')
7891 && ! strncasecmp ("epeat", template, 5))
7892 ppi_adjust = 4;
7893 while (c && c != '\n' && ! IS_ASM_LOGICAL_LINE_SEPARATOR (c))
7895 /* If this is a label, it is obviously not a ppi insn. */
7896 if (c == ':' && maybe_label)
7898 ppi_adjust = 0;
7899 break;
7901 else if (c == '\'' || c == '"')
7902 maybe_label = 0;
7903 c = *template++;
7905 sum += ppi_adjust;
7906 maybe_label = c != ':';
7908 while (c);
7909 return sum;
7911 return 0;
7914 /* Return TRUE if X references a SYMBOL_REF or LABEL_REF whose symbol
7915 isn't protected by a PIC unspec. */
7917 nonpic_symbol_mentioned_p (rtx x)
7919 register const char *fmt;
7920 register int i;
7922 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF
7923 || GET_CODE (x) == PC)
7924 return 1;
7926 /* We don't want to look into the possible MEM location of a
7927 CONST_DOUBLE, since we're not going to use it, in general. */
7928 if (GET_CODE (x) == CONST_DOUBLE)
7929 return 0;
7931 if (GET_CODE (x) == UNSPEC
7932 && (XINT (x, 1) == UNSPEC_PIC
7933 || XINT (x, 1) == UNSPEC_GOT
7934 || XINT (x, 1) == UNSPEC_GOTOFF
7935 || XINT (x, 1) == UNSPEC_GOTPLT
7936 || XINT (x, 1) == UNSPEC_GOTTPOFF
7937 || XINT (x, 1) == UNSPEC_DTPOFF
7938 || XINT (x, 1) == UNSPEC_PLT))
7939 return 0;
7941 fmt = GET_RTX_FORMAT (GET_CODE (x));
7942 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
7944 if (fmt[i] == 'E')
7946 register int j;
7948 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
7949 if (nonpic_symbol_mentioned_p (XVECEXP (x, i, j)))
7950 return 1;
7952 else if (fmt[i] == 'e' && nonpic_symbol_mentioned_p (XEXP (x, i)))
7953 return 1;
7956 return 0;
7959 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
7960 @GOTOFF in `reg'. */
7962 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
7963 rtx reg)
7965 if (tls_symbolic_operand (orig, Pmode))
7966 return orig;
7968 if (GET_CODE (orig) == LABEL_REF
7969 || (GET_CODE (orig) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (orig)))
7971 if (reg == 0)
7972 reg = gen_reg_rtx (Pmode);
7974 emit_insn (gen_symGOTOFF2reg (reg, orig));
7975 return reg;
7977 else if (GET_CODE (orig) == SYMBOL_REF)
7979 if (reg == 0)
7980 reg = gen_reg_rtx (Pmode);
7982 emit_insn (gen_symGOT2reg (reg, orig));
7983 return reg;
7985 return orig;
7988 /* Mark the use of a constant in the literal table. If the constant
7989 has multiple labels, make it unique. */
7990 static rtx
7991 mark_constant_pool_use (rtx x)
7993 rtx insn, lab, pattern;
7995 if (x == NULL)
7996 return x;
7998 switch (GET_CODE (x))
8000 case LABEL_REF:
8001 x = XEXP (x, 0);
8002 case CODE_LABEL:
8003 break;
8004 default:
8005 return x;
8008 /* Get the first label in the list of labels for the same constant
8009 and delete another labels in the list. */
8010 lab = x;
8011 for (insn = PREV_INSN (x); insn; insn = PREV_INSN (insn))
8013 if (GET_CODE (insn) != CODE_LABEL
8014 || LABEL_REFS (insn) != NEXT_INSN (insn))
8015 break;
8016 lab = insn;
8019 for (insn = LABEL_REFS (lab); insn; insn = LABEL_REFS (insn))
8020 INSN_DELETED_P (insn) = 1;
8022 /* Mark constants in a window. */
8023 for (insn = NEXT_INSN (x); insn; insn = NEXT_INSN (insn))
8025 if (GET_CODE (insn) != INSN)
8026 continue;
8028 pattern = PATTERN (insn);
8029 if (GET_CODE (pattern) != UNSPEC_VOLATILE)
8030 continue;
8032 switch (XINT (pattern, 1))
8034 case UNSPECV_CONST2:
8035 case UNSPECV_CONST4:
8036 case UNSPECV_CONST8:
8037 XVECEXP (pattern, 0, 1) = const1_rtx;
8038 break;
8039 case UNSPECV_WINDOW_END:
8040 if (XVECEXP (pattern, 0, 0) == x)
8041 return lab;
8042 break;
8043 case UNSPECV_CONST_END:
8044 return lab;
8045 default:
8046 break;
8050 return lab;
8053 /* Return true if it's possible to redirect BRANCH1 to the destination
8054 of an unconditional jump BRANCH2. We only want to do this if the
8055 resulting branch will have a short displacement. */
8056 int
8057 sh_can_redirect_branch (rtx branch1, rtx branch2)
8059 if (flag_expensive_optimizations && simplejump_p (branch2))
8061 rtx dest = XEXP (SET_SRC (single_set (branch2)), 0);
8062 rtx insn;
8063 int distance;
8065 for (distance = 0, insn = NEXT_INSN (branch1);
8066 insn && distance < 256;
8067 insn = PREV_INSN (insn))
8069 if (insn == dest)
8070 return 1;
8071 else
8072 distance += get_attr_length (insn);
8074 for (distance = 0, insn = NEXT_INSN (branch1);
8075 insn && distance < 256;
8076 insn = NEXT_INSN (insn))
8078 if (insn == dest)
8079 return 1;
8080 else
8081 distance += get_attr_length (insn);
8084 return 0;
8087 /* Return nonzero if register old_reg can be renamed to register new_reg. */
8089 sh_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
8090 unsigned int new_reg)
8093 /* Interrupt functions can only use registers that have already been
8094 saved by the prologue, even if they would normally be
8095 call-clobbered. */
8097 if (sh_cfun_interrupt_handler_p () && !regs_ever_live[new_reg])
8098 return 0;
8100 return 1;
8103 /* Function to update the integer COST
8104 based on the relationship between INSN that is dependent on
8105 DEP_INSN through the dependence LINK. The default is to make no
8106 adjustment to COST. This can be used for example to specify to
8107 the scheduler that an output- or anti-dependence does not incur
8108 the same cost as a data-dependence. The return value should be
8109 the new value for COST. */
8110 static int
8111 sh_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx dep_insn, int cost)
8113 rtx reg, use_pat;
8115 if (TARGET_SHMEDIA)
8117 /* On SHmedia, if the dependence is an anti-dependence or
8118 output-dependence, there is no cost. */
8119 if (REG_NOTE_KIND (link) != 0)
8120 cost = 0;
8122 if (get_attr_is_mac_media (insn)
8123 && get_attr_is_mac_media (dep_insn))
8124 cost = 1;
8126 else if (REG_NOTE_KIND (link) == 0)
8128 enum attr_type dep_type, type;
8130 if (recog_memoized (insn) < 0
8131 || recog_memoized (dep_insn) < 0)
8132 return cost;
8134 dep_type = get_attr_type (dep_insn);
8135 if (dep_type == TYPE_FLOAD || dep_type == TYPE_PCFLOAD)
8136 cost--;
8137 if ((dep_type == TYPE_LOAD_SI || dep_type == TYPE_PCLOAD_SI)
8138 && (type = get_attr_type (insn)) != TYPE_CALL
8139 && type != TYPE_SFUNC)
8140 cost--;
8142 /* The only input for a call that is timing-critical is the
8143 function's address. */
8144 if (GET_CODE(insn) == CALL_INSN)
8146 rtx call = PATTERN (insn);
8148 if (GET_CODE (call) == PARALLEL)
8149 call = XVECEXP (call, 0 ,0);
8150 if (GET_CODE (call) == SET)
8151 call = SET_SRC (call);
8152 if (GET_CODE (call) == CALL && GET_CODE (XEXP (call, 0)) == MEM
8153 && ! reg_set_p (XEXP (XEXP (call, 0), 0), dep_insn))
8154 cost = 0;
8156 /* Likewise, the most timing critical input for an sfuncs call
8157 is the function address. However, sfuncs typically start
8158 using their arguments pretty quickly.
8159 Assume a four cycle delay before they are needed. */
8160 /* All sfunc calls are parallels with at least four components.
8161 Exploit this to avoid unnecessary calls to sfunc_uses_reg. */
8162 else if (GET_CODE (PATTERN (insn)) == PARALLEL
8163 && XVECLEN (PATTERN (insn), 0) >= 4
8164 && (reg = sfunc_uses_reg (insn)))
8166 if (! reg_set_p (reg, dep_insn))
8167 cost -= 4;
8169 /* When the preceding instruction loads the shift amount of
8170 the following SHAD/SHLD, the latency of the load is increased
8171 by 1 cycle. */
8172 else if (TARGET_SH4
8173 && get_attr_type (insn) == TYPE_DYN_SHIFT
8174 && get_attr_any_int_load (dep_insn) == ANY_INT_LOAD_YES
8175 && reg_overlap_mentioned_p (SET_DEST (PATTERN (dep_insn)),
8176 XEXP (SET_SRC (single_set(insn)),
8177 1)))
8178 cost++;
8179 /* When an LS group instruction with a latency of less than
8180 3 cycles is followed by a double-precision floating-point
8181 instruction, FIPR, or FTRV, the latency of the first
8182 instruction is increased to 3 cycles. */
8183 else if (cost < 3
8184 && get_attr_insn_class (dep_insn) == INSN_CLASS_LS_GROUP
8185 && get_attr_dfp_comp (insn) == DFP_COMP_YES)
8186 cost = 3;
8187 /* The lsw register of a double-precision computation is ready one
8188 cycle earlier. */
8189 else if (reload_completed
8190 && get_attr_dfp_comp (dep_insn) == DFP_COMP_YES
8191 && (use_pat = single_set (insn))
8192 && ! regno_use_in (REGNO (SET_DEST (single_set (dep_insn))),
8193 SET_SRC (use_pat)))
8194 cost -= 1;
8196 if (get_attr_any_fp_comp (dep_insn) == ANY_FP_COMP_YES
8197 && get_attr_late_fp_use (insn) == LATE_FP_USE_YES)
8198 cost -= 1;
8200 /* An anti-dependence penalty of two applies if the first insn is a double
8201 precision fadd / fsub / fmul. */
8202 else if (REG_NOTE_KIND (link) == REG_DEP_ANTI
8203 && recog_memoized (dep_insn) >= 0
8204 && get_attr_type (dep_insn) == TYPE_DFP_ARITH
8205 /* A lot of alleged anti-flow dependences are fake,
8206 so check this one is real. */
8207 && flow_dependent_p (dep_insn, insn))
8208 cost = 2;
8211 return cost;
8214 /* Check if INSN is flow-dependent on DEP_INSN. Can also be used to check
8215 if DEP_INSN is anti-flow dependent on INSN. */
8216 static int
8217 flow_dependent_p (rtx insn, rtx dep_insn)
8219 rtx tmp = PATTERN (insn);
8221 note_stores (PATTERN (dep_insn), flow_dependent_p_1, &tmp);
8222 return tmp == NULL_RTX;
8225 /* A helper function for flow_dependent_p called through note_stores. */
8226 static void
8227 flow_dependent_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
8229 rtx * pinsn = (rtx *) data;
8231 if (*pinsn && reg_referenced_p (x, *pinsn))
8232 *pinsn = NULL_RTX;
8235 /* For use by ALLOCATE_INITIAL_VALUE. Note that sh.md contains some
8236 'special function' patterns (type sfunc) that clobber pr, but that
8237 do not look like function calls to leaf_function_p. Hence we must
8238 do this extra check. */
8240 sh_pr_n_sets (void)
8242 return REG_N_SETS (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
8245 /* This Function returns nonzero if the DFA based scheduler interface
8246 is to be used. At present this is supported for the SH4 only. */
8247 static int
8248 sh_use_dfa_interface(void)
8250 if (TARGET_HARD_SH4)
8251 return 1;
8252 else
8253 return 0;
8256 /* This function returns "2" to indicate dual issue for the SH4
8257 processor. To be used by the DFA pipeline description. */
8258 static int
8259 sh_issue_rate(void)
8261 if (TARGET_SUPERSCALAR)
8262 return 2;
8263 else
8264 return 1;
8267 /* SHmedia requires registers for branches, so we can't generate new
8268 branches past reload. */
8269 static bool
8270 sh_cannot_modify_jumps_p (void)
8272 return (TARGET_SHMEDIA && (reload_in_progress || reload_completed));
8275 static int
8276 sh_target_reg_class (void)
8278 return TARGET_SHMEDIA ? TARGET_REGS : NO_REGS;
8281 static bool
8282 sh_optimize_target_register_callee_saved (bool after_prologue_epilogue_gen)
8284 return (shmedia_space_reserved_for_target_registers
8285 && (! after_prologue_epilogue_gen || TARGET_SAVE_ALL_TARGET_REGS));
8288 static bool
8289 sh_ms_bitfield_layout_p (record_type)
8290 tree record_type ATTRIBUTE_UNUSED;
8292 return (TARGET_SH5 || TARGET_HITACHI || sh_attr_renesas_p (record_type));
8296 On the SH1..SH4, the trampoline looks like
8297 2 0002 D202 mov.l l2,r2
8298 1 0000 D301 mov.l l1,r3
8299 3 0004 422B jmp @r2
8300 4 0006 0009 nop
8301 5 0008 00000000 l1: .long area
8302 6 000c 00000000 l2: .long function
8304 SH5 (compact) uses r1 instead of r3 for the static chain. */
8307 /* Emit RTL insns to initialize the variable parts of a trampoline.
8308 FNADDR is an RTX for the address of the function's pure code.
8309 CXT is an RTX for the static chain value for the function. */
8311 void
8312 sh_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
8314 if (TARGET_SHMEDIA64)
8316 rtx tramp_templ;
8317 int fixed_len;
8319 rtx movi1 = GEN_INT (0xcc000010);
8320 rtx shori1 = GEN_INT (0xc8000010);
8321 rtx src, dst;
8323 /* The following trampoline works within a +- 128 KB range for cxt:
8324 ptb/u cxt,tr1; movi fnaddr >> 48,r0; shori fnaddr >> 32,r0;
8325 shori fnaddr >> 16,r0; shori fnaddr,r0; ptabs/l r0,tr0
8326 gettr tr1,r1; blink tr0,r63 */
8327 /* Address rounding makes it hard to compute the exact bounds of the
8328 offset for this trampoline, but we have a rather generous offset
8329 range, so frame_offset should do fine as an upper bound. */
8330 if (cxt == virtual_stack_vars_rtx && frame_offset < 0x20000)
8332 /* ??? could optimize this trampoline initialization
8333 by writing DImode words with two insns each. */
8334 rtx mask = force_reg (DImode, GEN_INT (0x3fffc00));
8335 rtx insn = gen_rtx_MINUS (DImode, cxt, tramp);
8336 insn = gen_rtx_ASHIFT (DImode, insn, GEN_INT (10-2));
8337 insn = gen_rtx_AND (DImode, insn, mask);
8338 /* Or in ptb/u .,tr1 pattern */
8339 insn = gen_rtx_IOR (DImode, insn, gen_int_mode (0xec000010, SImode));
8340 insn = force_operand (insn, NULL_RTX);
8341 insn = gen_lowpart (SImode, insn);
8342 emit_move_insn (gen_rtx_MEM (SImode, tramp), insn);
8343 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (38));
8344 insn = gen_rtx_AND (DImode, insn, mask);
8345 insn = force_operand (gen_rtx_IOR (DImode, movi1, insn), NULL_RTX);
8346 insn = gen_lowpart (SImode, insn);
8347 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)), insn);
8348 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (22));
8349 insn = gen_rtx_AND (DImode, insn, mask);
8350 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
8351 insn = gen_lowpart (SImode, insn);
8352 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)), insn);
8353 insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (6));
8354 insn = gen_rtx_AND (DImode, insn, mask);
8355 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
8356 insn = gen_lowpart (SImode, insn);
8357 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
8358 insn);
8359 insn = gen_rtx_ASHIFT (DImode, fnaddr, GEN_INT (10));
8360 insn = gen_rtx_AND (DImode, insn, mask);
8361 insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
8362 insn = gen_lowpart (SImode, insn);
8363 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 16)),
8364 insn);
8365 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 20)),
8366 GEN_INT (0x6bf10600));
8367 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 24)),
8368 GEN_INT (0x4415fc10));
8369 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 28)),
8370 GEN_INT (0x4401fff0));
8371 emit_insn (gen_ic_invalidate_line (tramp));
8372 return;
8374 tramp_templ = gen_rtx_SYMBOL_REF (Pmode,"__GCC_nested_trampoline");
8375 fixed_len = TRAMPOLINE_SIZE - 2 * GET_MODE_SIZE (Pmode);
8377 tramp_templ = gen_datalabel_ref (tramp_templ);
8378 dst = gen_rtx_MEM (BLKmode, tramp);
8379 src = gen_rtx_MEM (BLKmode, tramp_templ);
8380 set_mem_align (dst, 256);
8381 set_mem_align (src, 64);
8382 emit_block_move (dst, src, GEN_INT (fixed_len), BLOCK_OP_NORMAL);
8384 emit_move_insn (gen_rtx_MEM (Pmode, plus_constant (tramp, fixed_len)),
8385 fnaddr);
8386 emit_move_insn (gen_rtx_MEM (Pmode,
8387 plus_constant (tramp,
8388 fixed_len
8389 + GET_MODE_SIZE (Pmode))),
8390 cxt);
8391 emit_insn (gen_ic_invalidate_line (tramp));
8392 return;
8394 else if (TARGET_SHMEDIA)
8396 /* movi fnaddr >> 16,r1; shori fnaddr,r1; ptabs/l r1,tr0
8397 movi cxt >> 16,r1; shori cxt,r1; blink tr0,r63 */
8398 rtx quad0 = gen_reg_rtx (DImode), cxtload = gen_reg_rtx (DImode);
8399 rtx quad1 = gen_reg_rtx (DImode), quad2 = gen_reg_rtx (DImode);
8400 /* movi 0,r1: 0xcc000010 shori 0,r1: c8000010 concatenated,
8401 rotated 10 right, and higher 16 bit of every 32 selected. */
8402 rtx movishori
8403 = force_reg (V2HImode, (simplify_gen_subreg
8404 (V2HImode, GEN_INT (0x4330432), SImode, 0)));
8405 rtx ptabs = force_reg (DImode, GEN_INT (0x6bf10600));
8406 rtx blink = force_reg (DImode, GEN_INT (0x4401fff0));
8408 tramp = force_reg (Pmode, tramp);
8409 fnaddr = force_reg (SImode, fnaddr);
8410 cxt = force_reg (SImode, cxt);
8411 emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, quad0, 0),
8412 gen_rtx_SUBREG (V2HImode, fnaddr, 0),
8413 movishori));
8414 emit_insn (gen_rotrdi3_mextr (quad0, quad0,
8415 GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
8416 emit_insn (gen_ashldi3_media (quad0, quad0, const2_rtx));
8417 emit_move_insn (gen_rtx_MEM (DImode, tramp), quad0);
8418 emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, cxtload, 0),
8419 gen_rtx_SUBREG (V2HImode, cxt, 0),
8420 movishori));
8421 emit_insn (gen_rotrdi3_mextr (cxtload, cxtload,
8422 GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
8423 emit_insn (gen_ashldi3_media (cxtload, cxtload, const2_rtx));
8424 if (TARGET_LITTLE_ENDIAN)
8426 emit_insn (gen_mshflo_l_di (quad1, ptabs, cxtload));
8427 emit_insn (gen_mextr4 (quad2, cxtload, blink));
8429 else
8431 emit_insn (gen_mextr4 (quad1, cxtload, ptabs));
8432 emit_insn (gen_mshflo_l_di (quad2, blink, cxtload));
8434 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 8)), quad1);
8435 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), quad2);
8436 emit_insn (gen_ic_invalidate_line (tramp));
8437 return;
8439 else if (TARGET_SHCOMPACT)
8441 emit_insn (gen_initialize_trampoline (tramp, cxt, fnaddr));
8442 return;
8444 emit_move_insn (gen_rtx_MEM (SImode, tramp),
8445 gen_int_mode (TARGET_LITTLE_ENDIAN ? 0xd301d202 : 0xd202d301,
8446 SImode));
8447 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
8448 gen_int_mode (TARGET_LITTLE_ENDIAN ? 0x0009422b : 0x422b0009,
8449 SImode));
8450 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
8451 cxt);
8452 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
8453 fnaddr);
8454 if (TARGET_HARVARD)
8456 if (TARGET_USERMODE)
8457 emit_library_call (function_symbol ("__ic_invalidate"),
8458 0, VOIDmode, 1, tramp, SImode);
8459 else
8460 emit_insn (gen_ic_invalidate_line (tramp));
8464 /* FIXME: This is overly conservative. A SHcompact function that
8465 receives arguments ``by reference'' will have them stored in its
8466 own stack frame, so it must not pass pointers or references to
8467 these arguments to other functions by means of sibling calls. */
8468 static bool
8469 sh_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8471 return (decl
8472 && (! TARGET_SHCOMPACT
8473 || current_function_args_info.stack_regs == 0)
8474 && ! sh_cfun_interrupt_handler_p ());
8477 /* Machine specific built-in functions. */
8479 struct builtin_description
8481 const enum insn_code icode;
8482 const char *const name;
8483 int signature;
8486 /* describe number and signedness of arguments; arg[0] == result
8487 (1: unsigned, 2: signed, 4: don't care, 8: pointer 0: no argument */
8488 static const char signature_args[][4] =
8490 #define SH_BLTIN_V2SI2 0
8491 { 4, 4 },
8492 #define SH_BLTIN_V4HI2 1
8493 { 4, 4 },
8494 #define SH_BLTIN_V2SI3 2
8495 { 4, 4, 4 },
8496 #define SH_BLTIN_V4HI3 3
8497 { 4, 4, 4 },
8498 #define SH_BLTIN_V8QI3 4
8499 { 4, 4, 4 },
8500 #define SH_BLTIN_MAC_HISI 5
8501 { 1, 4, 4, 1 },
8502 #define SH_BLTIN_SH_HI 6
8503 { 4, 4, 1 },
8504 #define SH_BLTIN_SH_SI 7
8505 { 4, 4, 1 },
8506 #define SH_BLTIN_V4HI2V2SI 8
8507 { 4, 4, 4 },
8508 #define SH_BLTIN_V4HI2V8QI 9
8509 { 4, 4, 4 },
8510 #define SH_BLTIN_SISF 10
8511 { 4, 2 },
8512 #define SH_BLTIN_LDUA_L 11
8513 { 2, 8 },
8514 #define SH_BLTIN_LDUA_Q 12
8515 { 1, 8 },
8516 #define SH_BLTIN_STUA_L 13
8517 { 0, 8, 2 },
8518 #define SH_BLTIN_STUA_Q 14
8519 { 0, 8, 1 },
8520 #define SH_BLTIN_UDI 15
8521 { 0, 8, 1 },
8522 #define SH_BLTIN_NUM_SHARED_SIGNATURES 16
8523 #define SH_BLTIN_2 16
8524 #define SH_BLTIN_SU 16
8525 { 1, 2 },
8526 #define SH_BLTIN_3 17
8527 #define SH_BLTIN_SUS 17
8528 { 2, 2, 1 },
8529 #define SH_BLTIN_PSSV 18
8530 { 0, 8, 2, 2 },
8531 #define SH_BLTIN_XXUU 19
8532 #define SH_BLTIN_UUUU 19
8533 { 1, 1, 1, 1 },
8534 #define SH_BLTIN_PV 20
8535 { 0, 8 },
8537 /* mcmv: operands considered unsigned. */
8538 /* mmulsum_wq, msad_ubq: result considered unsigned long long. */
8539 /* mperm: control value considered unsigned int. */
8540 /* mshalds, mshard, mshards, mshlld, mshlrd: shift count is unsigned int. */
8541 /* mshards_q: returns signed short. */
8542 /* nsb: takes long long arg, returns unsigned char. */
8543 static const struct builtin_description bdesc[] =
8545 { CODE_FOR_absv2si2, "__builtin_absv2si2", SH_BLTIN_V2SI2 },
8546 { CODE_FOR_absv4hi2, "__builtin_absv4hi2", SH_BLTIN_V4HI2 },
8547 { CODE_FOR_addv2si3, "__builtin_addv2si3", SH_BLTIN_V2SI3 },
8548 { CODE_FOR_addv4hi3, "__builtin_addv4hi3", SH_BLTIN_V4HI3 },
8549 { CODE_FOR_ssaddv2si3,"__builtin_ssaddv2si3", SH_BLTIN_V2SI3 },
8550 { CODE_FOR_usaddv8qi3,"__builtin_usaddv8qi3", SH_BLTIN_V8QI3 },
8551 { CODE_FOR_ssaddv4hi3,"__builtin_ssaddv4hi3", SH_BLTIN_V4HI3 },
8552 #if 0
8553 { CODE_FOR_alloco32, "__builtin_sh_media_ALLOCO", SH_BLTIN_PV },
8554 { CODE_FOR_alloco64, "__builtin_sh_media_ALLOCO", SH_BLTIN_PV },
8555 #endif
8556 { CODE_FOR_negcmpeqv8qi,"__builtin_sh_media_MCMPEQ_B", SH_BLTIN_V8QI3 },
8557 { CODE_FOR_negcmpeqv2si,"__builtin_sh_media_MCMPEQ_L", SH_BLTIN_V2SI3 },
8558 { CODE_FOR_negcmpeqv4hi,"__builtin_sh_media_MCMPEQ_W", SH_BLTIN_V4HI3 },
8559 { CODE_FOR_negcmpgtuv8qi,"__builtin_sh_media_MCMPGT_UB", SH_BLTIN_V8QI3 },
8560 { CODE_FOR_negcmpgtv2si,"__builtin_sh_media_MCMPGT_L", SH_BLTIN_V2SI3 },
8561 { CODE_FOR_negcmpgtv4hi,"__builtin_sh_media_MCMPGT_W", SH_BLTIN_V4HI3 },
8562 { CODE_FOR_mcmv, "__builtin_sh_media_MCMV", SH_BLTIN_UUUU },
8563 { CODE_FOR_mcnvs_lw, "__builtin_sh_media_MCNVS_LW", SH_BLTIN_3 },
8564 { CODE_FOR_mcnvs_wb, "__builtin_sh_media_MCNVS_WB", SH_BLTIN_V4HI2V8QI },
8565 { CODE_FOR_mcnvs_wub, "__builtin_sh_media_MCNVS_WUB", SH_BLTIN_V4HI2V8QI },
8566 { CODE_FOR_mextr1, "__builtin_sh_media_MEXTR1", SH_BLTIN_UDI },
8567 { CODE_FOR_mextr2, "__builtin_sh_media_MEXTR2", SH_BLTIN_UDI },
8568 { CODE_FOR_mextr3, "__builtin_sh_media_MEXTR3", SH_BLTIN_UDI },
8569 { CODE_FOR_mextr4, "__builtin_sh_media_MEXTR4", SH_BLTIN_UDI },
8570 { CODE_FOR_mextr5, "__builtin_sh_media_MEXTR5", SH_BLTIN_UDI },
8571 { CODE_FOR_mextr6, "__builtin_sh_media_MEXTR6", SH_BLTIN_UDI },
8572 { CODE_FOR_mextr7, "__builtin_sh_media_MEXTR7", SH_BLTIN_UDI },
8573 { CODE_FOR_mmacfx_wl, "__builtin_sh_media_MMACFX_WL", SH_BLTIN_MAC_HISI },
8574 { CODE_FOR_mmacnfx_wl,"__builtin_sh_media_MMACNFX_WL", SH_BLTIN_MAC_HISI },
8575 { CODE_FOR_mulv2si3, "__builtin_mulv2si3", SH_BLTIN_V2SI3, },
8576 { CODE_FOR_mulv4hi3, "__builtin_mulv4hi3", SH_BLTIN_V4HI3 },
8577 { CODE_FOR_mmulfx_l, "__builtin_sh_media_MMULFX_L", SH_BLTIN_V2SI3 },
8578 { CODE_FOR_mmulfx_w, "__builtin_sh_media_MMULFX_W", SH_BLTIN_V4HI3 },
8579 { CODE_FOR_mmulfxrp_w,"__builtin_sh_media_MMULFXRP_W", SH_BLTIN_V4HI3 },
8580 { CODE_FOR_mmulhi_wl, "__builtin_sh_media_MMULHI_WL", SH_BLTIN_V4HI2V2SI },
8581 { CODE_FOR_mmullo_wl, "__builtin_sh_media_MMULLO_WL", SH_BLTIN_V4HI2V2SI },
8582 { CODE_FOR_mmulsum_wq,"__builtin_sh_media_MMULSUM_WQ", SH_BLTIN_XXUU },
8583 { CODE_FOR_mperm_w, "__builtin_sh_media_MPERM_W", SH_BLTIN_SH_HI },
8584 { CODE_FOR_msad_ubq, "__builtin_sh_media_MSAD_UBQ", SH_BLTIN_XXUU },
8585 { CODE_FOR_mshalds_l, "__builtin_sh_media_MSHALDS_L", SH_BLTIN_SH_SI },
8586 { CODE_FOR_mshalds_w, "__builtin_sh_media_MSHALDS_W", SH_BLTIN_SH_HI },
8587 { CODE_FOR_ashrv2si3, "__builtin_ashrv2si3", SH_BLTIN_SH_SI },
8588 { CODE_FOR_ashrv4hi3, "__builtin_ashrv4hi3", SH_BLTIN_SH_HI },
8589 { CODE_FOR_mshards_q, "__builtin_sh_media_MSHARDS_Q", SH_BLTIN_SUS },
8590 { CODE_FOR_mshfhi_b, "__builtin_sh_media_MSHFHI_B", SH_BLTIN_V8QI3 },
8591 { CODE_FOR_mshfhi_l, "__builtin_sh_media_MSHFHI_L", SH_BLTIN_V2SI3 },
8592 { CODE_FOR_mshfhi_w, "__builtin_sh_media_MSHFHI_W", SH_BLTIN_V4HI3 },
8593 { CODE_FOR_mshflo_b, "__builtin_sh_media_MSHFLO_B", SH_BLTIN_V8QI3 },
8594 { CODE_FOR_mshflo_l, "__builtin_sh_media_MSHFLO_L", SH_BLTIN_V2SI3 },
8595 { CODE_FOR_mshflo_w, "__builtin_sh_media_MSHFLO_W", SH_BLTIN_V4HI3 },
8596 { CODE_FOR_ashlv2si3, "__builtin_ashlv2si3", SH_BLTIN_SH_SI },
8597 { CODE_FOR_ashlv4hi3, "__builtin_ashlv4hi3", SH_BLTIN_SH_HI },
8598 { CODE_FOR_lshrv2si3, "__builtin_lshrv2si3", SH_BLTIN_SH_SI },
8599 { CODE_FOR_lshrv4hi3, "__builtin_lshrv4hi3", SH_BLTIN_SH_HI },
8600 { CODE_FOR_subv2si3, "__builtin_subv2si3", SH_BLTIN_V2SI3 },
8601 { CODE_FOR_subv4hi3, "__builtin_subv4hi3", SH_BLTIN_V4HI3 },
8602 { CODE_FOR_sssubv2si3,"__builtin_sssubv2si3", SH_BLTIN_V2SI3 },
8603 { CODE_FOR_ussubv8qi3,"__builtin_ussubv8qi3", SH_BLTIN_V8QI3 },
8604 { CODE_FOR_sssubv4hi3,"__builtin_sssubv4hi3", SH_BLTIN_V4HI3 },
8605 { CODE_FOR_fcosa_s, "__builtin_sh_media_FCOSA_S", SH_BLTIN_SISF },
8606 { CODE_FOR_fsina_s, "__builtin_sh_media_FSINA_S", SH_BLTIN_SISF },
8607 { CODE_FOR_fipr, "__builtin_sh_media_FIPR_S", SH_BLTIN_3 },
8608 { CODE_FOR_ftrv, "__builtin_sh_media_FTRV_S", SH_BLTIN_3 },
8609 { CODE_FOR_fsrra_s, "__builtin_sh_media_FSRRA_S", SH_BLTIN_2 },
8610 #if 0
8611 { CODE_FOR_ldhi_l, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L },
8612 { CODE_FOR_ldhi_q, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q },
8613 { CODE_FOR_ldlo_l, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L },
8614 { CODE_FOR_ldlo_q, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q },
8615 { CODE_FOR_sthi_l, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L },
8616 { CODE_FOR_sthi_q, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q },
8617 { CODE_FOR_stlo_l, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L },
8618 { CODE_FOR_stlo_q, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q },
8619 { CODE_FOR_ldhi_l64, "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L },
8620 { CODE_FOR_ldhi_q64, "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q },
8621 { CODE_FOR_ldlo_l64, "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L },
8622 { CODE_FOR_ldlo_q64, "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q },
8623 { CODE_FOR_sthi_l64, "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L },
8624 { CODE_FOR_sthi_q64, "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q },
8625 { CODE_FOR_stlo_l64, "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L },
8626 { CODE_FOR_stlo_q64, "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q },
8627 #endif
8628 { CODE_FOR_nsb, "__builtin_sh_media_NSB", SH_BLTIN_SU },
8629 { CODE_FOR_byterev, "__builtin_sh_media_BYTEREV", SH_BLTIN_2 },
8630 #if 0
8631 { CODE_FOR_prefetch32,"__builtin_sh_media_PREFO", SH_BLTIN_PSSV },
8632 { CODE_FOR_prefetch64,"__builtin_sh_media_PREFO", SH_BLTIN_PSSV }
8633 #endif
8636 static void
8637 sh_media_init_builtins (void)
8639 tree shared[SH_BLTIN_NUM_SHARED_SIGNATURES];
8640 const struct builtin_description *d;
8642 memset (shared, 0, sizeof shared);
8643 for (d = bdesc; d - bdesc < (int) ARRAY_SIZE (bdesc); d++)
8645 tree type, arg_type;
8646 int signature = d->signature;
8647 int i;
8649 if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES && shared[signature])
8650 type = shared[signature];
8651 else
8653 int has_result = signature_args[signature][0] != 0;
8655 if (signature_args[signature][1] == 8
8656 && (insn_data[d->icode].operand[has_result].mode != Pmode))
8657 continue;
8658 if (! TARGET_FPU_ANY
8659 && FLOAT_MODE_P (insn_data[d->icode].operand[0].mode))
8660 continue;
8661 type = void_list_node;
8662 for (i = 3; ; i--)
8664 int arg = signature_args[signature][i];
8665 int opno = i - 1 + has_result;
8667 if (arg == 8)
8668 arg_type = ptr_type_node;
8669 else if (arg)
8670 arg_type = ((*lang_hooks.types.type_for_mode)
8671 (insn_data[d->icode].operand[opno].mode,
8672 (arg & 1)));
8673 else if (i)
8674 continue;
8675 else
8676 arg_type = void_type_node;
8677 if (i == 0)
8678 break;
8679 type = tree_cons (NULL_TREE, arg_type, type);
8681 type = build_function_type (arg_type, type);
8682 if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES)
8683 shared[signature] = type;
8685 builtin_function (d->name, type, d - bdesc, BUILT_IN_MD,
8686 NULL, NULL_TREE);
8690 static void
8691 sh_init_builtins (void)
8693 if (TARGET_SHMEDIA)
8694 sh_media_init_builtins ();
8697 /* Expand an expression EXP that calls a built-in function,
8698 with result going to TARGET if that's convenient
8699 (and in mode MODE if that's convenient).
8700 SUBTARGET may be used as the target for computing one of EXP's operands.
8701 IGNORE is nonzero if the value is to be ignored. */
8703 static rtx
8704 sh_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
8705 enum machine_mode mode ATTRIBUTE_UNUSED, int ignore)
8707 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
8708 tree arglist = TREE_OPERAND (exp, 1);
8709 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8710 const struct builtin_description *d = &bdesc[fcode];
8711 enum insn_code icode = d->icode;
8712 int signature = d->signature;
8713 enum machine_mode tmode = VOIDmode;
8714 int nop = 0, i;
8715 rtx op[4];
8716 rtx pat;
8718 if (signature_args[signature][0])
8720 if (ignore)
8721 return 0;
8723 tmode = insn_data[icode].operand[0].mode;
8724 if (! target
8725 || GET_MODE (target) != tmode
8726 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
8727 target = gen_reg_rtx (tmode);
8728 op[nop++] = target;
8730 else
8731 target = 0;
8733 for (i = 1; i <= 3; i++, nop++)
8735 tree arg;
8736 enum machine_mode opmode, argmode;
8738 if (! signature_args[signature][i])
8739 break;
8740 arg = TREE_VALUE (arglist);
8741 if (arg == error_mark_node)
8742 return const0_rtx;
8743 arglist = TREE_CHAIN (arglist);
8744 opmode = insn_data[icode].operand[nop].mode;
8745 argmode = TYPE_MODE (TREE_TYPE (arg));
8746 if (argmode != opmode)
8747 arg = build1 (NOP_EXPR,
8748 (*lang_hooks.types.type_for_mode) (opmode, 0), arg);
8749 op[nop] = expand_expr (arg, NULL_RTX, opmode, 0);
8750 if (! (*insn_data[icode].operand[nop].predicate) (op[nop], opmode))
8751 op[nop] = copy_to_mode_reg (opmode, op[nop]);
8754 switch (nop)
8756 case 1:
8757 pat = (*insn_data[d->icode].genfun) (op[0]);
8758 break;
8759 case 2:
8760 pat = (*insn_data[d->icode].genfun) (op[0], op[1]);
8761 break;
8762 case 3:
8763 pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2]);
8764 break;
8765 case 4:
8766 pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2], op[3]);
8767 break;
8768 default:
8769 abort ();
8771 if (! pat)
8772 return 0;
8773 emit_insn (pat);
8774 return target;
8777 void
8778 sh_expand_unop_v2sf (enum rtx_code code, rtx op0, rtx op1)
8780 rtx sel0 = const0_rtx;
8781 rtx sel1 = const1_rtx;
8782 rtx (*fn) (rtx, rtx, rtx, rtx, rtx) = gen_unary_sf_op;
8783 rtx op = gen_rtx_fmt_e (code, SFmode, op1);
8785 emit_insn ((*fn) (op0, op1, op, sel0, sel0));
8786 emit_insn ((*fn) (op0, op1, op, sel1, sel1));
8789 void
8790 sh_expand_binop_v2sf (enum rtx_code code, rtx op0, rtx op1, rtx op2)
8792 rtx sel0 = const0_rtx;
8793 rtx sel1 = const1_rtx;
8794 rtx (*fn) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx)
8795 = gen_binary_sf_op;
8796 rtx op = gen_rtx_fmt_ee (code, SFmode, op1, op2);
8798 emit_insn ((*fn) (op0, op1, op2, op, sel0, sel0, sel0, sel1));
8799 emit_insn ((*fn) (op0, op1, op2, op, sel1, sel1, sel1, sel0));
8802 /* Return the class of registers for which a mode change from FROM to TO
8803 is invalid. */
8804 bool
8805 sh_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
8806 enum reg_class class)
8808 if (GET_MODE_SIZE (from) != GET_MODE_SIZE (to))
8810 if (TARGET_LITTLE_ENDIAN)
8812 if (GET_MODE_SIZE (to) < 8 || GET_MODE_SIZE (from) < 8)
8813 return reg_classes_intersect_p (DF_REGS, class);
8815 else
8817 if (GET_MODE_SIZE (from) < 8)
8818 return reg_classes_intersect_p (DF_HI_REGS, class);
8821 return 0;
8825 /* If ADDRESS refers to a CODE_LABEL, add NUSES to the number of times
8826 that label is used. */
8828 void
8829 sh_mark_label (rtx address, int nuses)
8831 if (GOTOFF_P (address))
8833 /* Extract the label or symbol. */
8834 address = XEXP (address, 0);
8835 if (GET_CODE (address) == PLUS)
8836 address = XEXP (address, 0);
8837 address = XVECEXP (address, 0, 0);
8839 if (GET_CODE (address) == LABEL_REF
8840 && GET_CODE (XEXP (address, 0)) == CODE_LABEL)
8841 LABEL_NUSES (XEXP (address, 0)) += nuses;
8844 /* Compute extra cost of moving data between one register class
8845 and another. */
8847 /* If SECONDARY*_RELOAD_CLASS says something about the src/dst pair, regclass
8848 uses this information. Hence, the general register <-> floating point
8849 register information here is not used for SFmode. */
8852 sh_register_move_cost (enum machine_mode mode,
8853 enum reg_class srcclass, enum reg_class dstclass)
8855 if (dstclass == T_REGS || dstclass == PR_REGS)
8856 return 10;
8858 if (dstclass == MAC_REGS && srcclass == MAC_REGS)
8859 return 4;
8861 if (mode == SImode && ! TARGET_SHMEDIA && TARGET_FMOVD
8862 && REGCLASS_HAS_FP_REG (srcclass)
8863 && REGCLASS_HAS_FP_REG (dstclass))
8864 return 4;
8866 if ((REGCLASS_HAS_FP_REG (dstclass) && srcclass == MAC_REGS)
8867 || (dstclass== MAC_REGS && REGCLASS_HAS_FP_REG (srcclass)))
8868 return 9;
8870 if ((REGCLASS_HAS_FP_REG (dstclass)
8871 && REGCLASS_HAS_GENERAL_REG (srcclass))
8872 || (REGCLASS_HAS_GENERAL_REG (dstclass)
8873 && REGCLASS_HAS_FP_REG (srcclass)))
8874 return ((TARGET_SHMEDIA ? 4 : TARGET_FMOVD ? 8 : 12)
8875 * ((GET_MODE_SIZE (mode) + 7) / 8U));
8877 if ((dstclass == FPUL_REGS
8878 && REGCLASS_HAS_GENERAL_REG (srcclass))
8879 || (srcclass == FPUL_REGS
8880 && REGCLASS_HAS_GENERAL_REG (dstclass)))
8881 return 5;
8883 if ((dstclass == FPUL_REGS
8884 && (srcclass == PR_REGS || srcclass == MAC_REGS || srcclass == T_REGS))
8885 || (srcclass == FPUL_REGS
8886 && (dstclass == PR_REGS || dstclass == MAC_REGS)))
8887 return 7;
8889 if ((srcclass == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
8890 || ((dstclass) == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
8891 return 20;
8893 if ((srcclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
8894 || (dstclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
8895 return 4;
8897 if (TARGET_SHMEDIA
8898 || (TARGET_FMOVD
8899 && ! REGCLASS_HAS_GENERAL_REG (srcclass)
8900 && ! REGCLASS_HAS_GENERAL_REG (dstclass)))
8901 return 2 * ((GET_MODE_SIZE (mode) + 7) / 8U);
8903 return 2 * ((GET_MODE_SIZE (mode) + 3) / 4U);
8906 /* Like register_operand, but take into account that SHMEDIA can use
8907 the constant zero like a general register. */
8909 sh_register_operand (rtx op, enum machine_mode mode)
8911 if (op == CONST0_RTX (mode) && TARGET_SHMEDIA)
8912 return 1;
8913 return register_operand (op, mode);
8917 cmpsi_operand (rtx op, enum machine_mode mode)
8919 if (GET_CODE (op) == REG && REGNO (op) == T_REG
8920 && GET_MODE (op) == SImode)
8921 return 1;
8922 return arith_operand (op, mode);
8925 static rtx emit_load_ptr (rtx, rtx);
8927 static rtx
8928 emit_load_ptr (rtx reg, rtx addr)
8930 rtx mem = gen_rtx_MEM (ptr_mode, addr);
8932 if (Pmode != ptr_mode)
8933 mem = gen_rtx_SIGN_EXTEND (Pmode, mem);
8934 return emit_move_insn (reg, mem);
8937 void
8938 sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8939 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8940 tree function)
8942 CUMULATIVE_ARGS cum;
8943 int structure_value_byref = 0;
8944 rtx this, this_value, sibcall, insns, funexp;
8945 tree funtype = TREE_TYPE (function);
8946 int simple_add = CONST_OK_FOR_ADD (delta);
8947 int did_load = 0;
8948 rtx scratch0, scratch1, scratch2;
8950 reload_completed = 1;
8951 epilogue_completed = 1;
8952 no_new_pseudos = 1;
8953 current_function_uses_only_leaf_regs = 1;
8955 emit_note (NOTE_INSN_PROLOGUE_END);
8957 /* Find the "this" pointer. We have such a wide range of ABIs for the
8958 SH that it's best to do this completely machine independently.
8959 "this" is passed as first argument, unless a structure return pointer
8960 comes first, in which case "this" comes second. */
8961 INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0, 1);
8962 #ifndef PCC_STATIC_STRUCT_RETURN
8963 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8964 structure_value_byref = 1;
8965 #endif /* not PCC_STATIC_STRUCT_RETURN */
8966 if (structure_value_byref && sh_struct_value_rtx (function, 0) == 0)
8968 tree ptype = build_pointer_type (TREE_TYPE (funtype));
8970 FUNCTION_ARG_ADVANCE (cum, Pmode, ptype, 1);
8972 this = FUNCTION_ARG (cum, Pmode, ptr_type_node, 1);
8974 /* For SHcompact, we only have r0 for a scratch register: r1 is the
8975 static chain pointer (even if you can't have nested virtual functions
8976 right now, someone might implement them sometime), and the rest of the
8977 registers are used for argument passing, are callee-saved, or reserved. */
8978 scratch0 = scratch1 = scratch2 = gen_rtx_REG (Pmode, 0);
8979 if (! TARGET_SH5)
8981 scratch1 = gen_rtx_REG (ptr_mode, 1);
8982 /* N.B., if not TARGET_HITACHI, register 2 is used to pass the pointer
8983 pointing where to return struct values. */
8984 scratch2 = gen_rtx_REG (Pmode, 3);
8986 else if (TARGET_SHMEDIA)
8988 scratch1 = gen_rtx_REG (ptr_mode, 21);
8989 scratch2 = gen_rtx_REG (Pmode, TR0_REG);
8992 this_value = plus_constant (this, delta);
8993 if (vcall_offset
8994 && (simple_add || scratch0 != scratch1)
8995 && strict_memory_address_p (ptr_mode, this_value))
8997 emit_load_ptr (scratch0, this_value);
8998 did_load = 1;
9001 if (!delta)
9002 ; /* Do nothing. */
9003 else if (simple_add)
9004 emit_move_insn (this, this_value);
9005 else
9007 emit_move_insn (scratch1, GEN_INT (delta));
9008 emit_insn (gen_add2_insn (this, scratch1));
9011 if (vcall_offset)
9013 rtx offset_addr;
9015 if (!did_load)
9016 emit_load_ptr (scratch0, this);
9018 offset_addr = plus_constant (scratch0, vcall_offset);
9019 if (strict_memory_address_p (ptr_mode, offset_addr))
9020 ; /* Do nothing. */
9021 else if (! TARGET_SH5)
9023 /* scratch0 != scratch1, and we have indexed loads. Get better
9024 schedule by loading the offset into r1 and using an indexed
9025 load - then the load of r1 can issue before the load from
9026 (this + delta) finishes. */
9027 emit_move_insn (scratch1, GEN_INT (vcall_offset));
9028 offset_addr = gen_rtx_PLUS (Pmode, scratch0, scratch1);
9030 else if (CONST_OK_FOR_ADD (vcall_offset))
9032 emit_insn (gen_add2_insn (scratch0, GEN_INT (vcall_offset)));
9033 offset_addr = scratch0;
9035 else if (scratch0 != scratch1)
9037 emit_move_insn (scratch1, GEN_INT (vcall_offset));
9038 emit_insn (gen_add2_insn (scratch0, scratch1));
9039 offset_addr = scratch0;
9041 else
9042 abort (); /* FIXME */
9043 emit_load_ptr (scratch0, offset_addr);
9045 if (Pmode != ptr_mode)
9046 scratch0 = gen_rtx_TRUNCATE (ptr_mode, scratch0);
9047 emit_insn (gen_add2_insn (this, scratch0));
9050 /* Generate a tail call to the target function. */
9051 if (! TREE_USED (function))
9053 assemble_external (function);
9054 TREE_USED (function) = 1;
9056 funexp = XEXP (DECL_RTL (function), 0);
9057 emit_move_insn (scratch2, funexp);
9058 funexp = gen_rtx_MEM (FUNCTION_MODE, scratch2);
9059 sibcall = emit_call_insn (gen_sibcall (funexp, const0_rtx, NULL_RTX));
9060 SIBLING_CALL_P (sibcall) = 1;
9061 use_reg (&CALL_INSN_FUNCTION_USAGE (sibcall), this);
9062 emit_barrier ();
9064 /* Run just enough of rest_of_compilation to do scheduling and get
9065 the insns emitted. Note that use_thunk calls
9066 assemble_start_function and assemble_end_function. */
9068 insn_locators_initialize ();
9069 insns = get_insns ();
9071 if (optimize > 0 && flag_schedule_insns_after_reload)
9074 find_basic_blocks (insns, max_reg_num (), rtl_dump_file);
9075 life_analysis (insns, rtl_dump_file, PROP_FINAL);
9077 split_all_insns (1);
9079 schedule_insns (rtl_dump_file);
9082 sh_reorg ();
9084 if (optimize > 0 && flag_delayed_branch)
9085 dbr_schedule (insns, rtl_dump_file);
9086 shorten_branches (insns);
9087 final_start_function (insns, file, 1);
9088 final (insns, file, 1, 0);
9089 final_end_function ();
9091 if (optimize > 0 && flag_schedule_insns_after_reload)
9093 /* Release all memory allocated by flow. */
9094 free_basic_block_vars (0);
9096 /* Release all memory held by regsets now. */
9097 regset_release_memory ();
9100 reload_completed = 0;
9101 epilogue_completed = 0;
9102 no_new_pseudos = 0;
9106 function_symbol (const char *name)
9108 rtx sym = gen_rtx_SYMBOL_REF (Pmode, name);
9109 SYMBOL_REF_FLAGS (sym) = SYMBOL_FLAG_FUNCTION;
9110 return sym;
9113 /* Find the number of a general purpose register in S. */
9114 static int
9115 scavenge_reg (HARD_REG_SET *s)
9117 int r;
9118 for (r = FIRST_GENERAL_REG; r <= LAST_GENERAL_REG; r++)
9119 if (TEST_HARD_REG_BIT (*s, r))
9120 return r;
9121 return -1;
9125 sh_get_pr_initial_val (void)
9127 rtx val;
9129 /* ??? Unfortunately, get_hard_reg_initial_val doesn't always work for the
9130 PR register on SHcompact, because it might be clobbered by the prologue.
9131 We check first if that is known to be the case. */
9132 if (TARGET_SHCOMPACT
9133 && ((current_function_args_info.call_cookie
9134 & ~ CALL_COOKIE_RET_TRAMP (1))
9135 || current_function_has_nonlocal_label))
9136 return gen_rtx_MEM (SImode, return_address_pointer_rtx);
9138 /* If we haven't finished rtl generation, there might be a nonlocal label
9139 that we haven't seen yet.
9140 ??? get_hard_reg_initial_val fails if it is called while no_new_pseudos
9141 is set, unless it has been called before for the same register. And even
9142 then, we end in trouble if we didn't use the register in the same
9143 basic block before. So call get_hard_reg_initial_val now and wrap it
9144 in an unspec if we might need to replace it. */
9145 /* ??? We also must do this for TARGET_SH1 in general, because otherwise
9146 combine can put the pseudo returned by get_hard_reg_initial_val into
9147 instructions that need a general purpose registers, which will fail to
9148 be recognized when the pseudo becomes allocated to PR. */
9150 = get_hard_reg_initial_val (Pmode, TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
9151 if (TARGET_SH1)
9152 return gen_rtx_UNSPEC (SImode, gen_rtvec (1, val), UNSPEC_RA);
9153 return val;
9157 sh_expand_t_scc (enum rtx_code code, rtx target)
9159 rtx result = target;
9160 HOST_WIDE_INT val;
9162 if (GET_CODE (sh_compare_op0) != REG || REGNO (sh_compare_op0) != T_REG
9163 || GET_CODE (sh_compare_op1) != CONST_INT)
9164 return 0;
9165 if (GET_CODE (result) != REG)
9166 result = gen_reg_rtx (SImode);
9167 val = INTVAL (sh_compare_op1);
9168 if ((code == EQ && val == 1) || (code == NE && val == 0))
9169 emit_insn (gen_movt (result));
9170 else if ((code == EQ && val == 0) || (code == NE && val == 1))
9172 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
9173 emit_insn (gen_subc (result, result, result));
9174 emit_insn (gen_addsi3 (result, result, const1_rtx));
9176 else if (code == EQ || code == NE)
9177 emit_insn (gen_move_insn (result, GEN_INT (code == NE)));
9178 else
9179 return 0;
9180 if (result != target)
9181 emit_move_insn (target, result);
9182 return 1;
9185 /* INSN is an sfunc; return the rtx that describes the address used. */
9186 static rtx
9187 extract_sfunc_addr (rtx insn)
9189 rtx pattern, part = NULL_RTX;
9190 int len, i;
9192 pattern = PATTERN (insn);
9193 len = XVECLEN (pattern, 0);
9194 for (i = 0; i < len; i++)
9196 part = XVECEXP (pattern, 0, i);
9197 if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == Pmode
9198 && GENERAL_REGISTER_P (true_regnum (XEXP (part, 0))))
9199 return XEXP (part, 0);
9201 if (GET_CODE (XVECEXP (pattern, 0, 0)) == UNSPEC_VOLATILE)
9202 return XVECEXP (XVECEXP (pattern, 0, 0), 0, 1);
9203 abort ();
9206 /* Verify that the register in use_sfunc_addr still agrees with the address
9207 used in the sfunc. This prevents fill_slots_from_thread from changing
9208 use_sfunc_addr.
9209 INSN is the use_sfunc_addr instruction, and REG is the register it
9210 guards. */
9212 check_use_sfunc_addr (rtx insn, rtx reg)
9214 /* Search for the sfunc. It should really come right after INSN. */
9215 while ((insn = NEXT_INSN (insn)))
9217 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9218 break;
9219 if (! INSN_P (insn))
9220 continue;
9222 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
9223 insn = XVECEXP (PATTERN (insn), 0, 0);
9224 if (GET_CODE (PATTERN (insn)) != PARALLEL
9225 || get_attr_type (insn) != TYPE_SFUNC)
9226 continue;
9227 return rtx_equal_p (extract_sfunc_addr (insn), reg);
9229 abort ();
9232 #include "gt-sh.h"