FSF GCC merge 02/23/03
[official-gcc.git] / gcc / sched-ebb.c
blobeefee1c168a97cef68ca0fad4bccc67e7ada1da5
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 02111-1307, USA. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "tm_p.h"
31 #include "hard-reg-set.h"
32 #include "basic-block.h"
33 #include "regs.h"
34 #include "function.h"
35 #include "flags.h"
36 #include "insn-config.h"
37 #include "insn-attr.h"
38 #include "except.h"
39 #include "toplev.h"
40 #include "recog.h"
41 #include "cfglayout.h"
42 #include "sched-int.h"
43 #include "target.h"
45 /* The number of insns to be scheduled in total. */
46 static int target_n_insns;
47 /* The number of insns scheduled so far. */
48 static int sched_n_insns;
50 /* Implementations of the sched_info functions for region scheduling. */
51 static void init_ready_list PARAMS ((struct ready_list *));
52 static int can_schedule_ready_p PARAMS ((rtx));
53 static int new_ready PARAMS ((rtx));
54 static int schedule_more_p PARAMS ((void));
55 static const char *ebb_print_insn PARAMS ((rtx, int));
56 static int rank PARAMS ((rtx, rtx));
57 static int contributes_to_priority PARAMS ((rtx, rtx));
58 static void compute_jump_reg_dependencies PARAMS ((rtx, regset));
59 static basic_block schedule_ebb PARAMS ((rtx, rtx));
60 static basic_block fix_basic_block_boundaries PARAMS ((basic_block, basic_block, rtx, rtx));
61 static void add_missing_bbs PARAMS ((rtx, basic_block, basic_block));
63 /* Return nonzero if there are more insns that should be scheduled. */
65 static int
66 schedule_more_p ()
68 return sched_n_insns < target_n_insns;
71 /* Add all insns that are initially ready to the ready list READY. Called
72 once before scheduling a set of insns. */
74 static void
75 init_ready_list (ready)
76 struct ready_list *ready;
78 rtx prev_head = current_sched_info->prev_head;
79 rtx next_tail = current_sched_info->next_tail;
80 rtx insn;
82 target_n_insns = 0;
83 sched_n_insns = 0;
85 #if 0
86 /* Print debugging information. */
87 if (sched_verbose >= 5)
88 debug_dependencies ();
89 #endif
91 /* Initialize ready list with all 'ready' insns in target block.
92 Count number of insns in the target block being scheduled. */
93 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
95 if (INSN_DEP_COUNT (insn) == 0)
96 ready_add (ready, insn);
97 target_n_insns++;
101 /* Called after taking INSN from the ready list. Returns nonzero if this
102 insn can be scheduled, nonzero if we should silently discard it. */
104 static int
105 can_schedule_ready_p (insn)
106 rtx insn ATTRIBUTE_UNUSED;
108 sched_n_insns++;
109 return 1;
112 /* Called after INSN has all its dependencies resolved. Return nonzero
113 if it should be moved to the ready list or the queue, or zero if we
114 should silently discard it. */
115 static int
116 new_ready (next)
117 rtx next ATTRIBUTE_UNUSED;
119 return 1;
122 /* Return a string that contains the insn uid and optionally anything else
123 necessary to identify this insn in an output. It's valid to use a
124 static buffer for this. The ALIGNED parameter should cause the string
125 to be formatted so that multiple output lines will line up nicely. */
127 static const char *
128 ebb_print_insn (insn, aligned)
129 rtx insn;
130 int aligned ATTRIBUTE_UNUSED;
132 static char tmp[80];
134 sprintf (tmp, "%4d", INSN_UID (insn));
135 return tmp;
138 /* Compare priority of two insns. Return a positive number if the second
139 insn is to be preferred for scheduling, and a negative one if the first
140 is to be preferred. Zero if they are equally good. */
142 static int
143 rank (insn1, insn2)
144 rtx insn1, insn2;
146 basic_block bb1 = BLOCK_FOR_INSN (insn1);
147 basic_block bb2 = BLOCK_FOR_INSN (insn2);
149 if (bb1->count > bb2->count
150 || bb1->frequency > bb2->frequency)
151 return -1;
152 if (bb1->count < bb2->count
153 || bb1->frequency < bb2->frequency)
154 return 1;
155 return 0;
158 /* NEXT is an instruction that depends on INSN (a backward dependence);
159 return nonzero if we should include this dependence in priority
160 calculations. */
162 static int
163 contributes_to_priority (next, insn)
164 rtx next ATTRIBUTE_UNUSED, insn ATTRIBUTE_UNUSED;
166 return 1;
169 /* INSN is a JUMP_INSN. Store the set of registers that must be considered
170 to be set by this jump in SET. */
172 static void
173 compute_jump_reg_dependencies (insn, set)
174 rtx insn;
175 regset set;
177 basic_block b = BLOCK_FOR_INSN (insn);
178 edge e;
179 for (e = b->succ; e; e = e->succ_next)
180 if ((e->flags & EDGE_FALLTHRU) == 0)
182 bitmap_operation (set, set, e->dest->global_live_at_start,
183 BITMAP_IOR);
187 /* Used in schedule_insns to initialize current_sched_info for scheduling
188 regions (or single basic blocks). */
190 static struct sched_info ebb_sched_info =
192 init_ready_list,
193 can_schedule_ready_p,
194 schedule_more_p,
195 new_ready,
196 rank,
197 ebb_print_insn,
198 contributes_to_priority,
199 compute_jump_reg_dependencies,
201 NULL, NULL,
202 NULL, NULL,
203 0, 1
206 /* It is possible that ebb scheduling elliminated some blocks.
207 Place blocks from FIRST to LAST before BEFORE. */
209 static void
210 add_missing_bbs (before, first, last)
211 rtx before;
212 basic_block first, last;
214 for (; last != first->prev_bb; last = last->prev_bb)
216 before = emit_note_before (NOTE_INSN_BASIC_BLOCK, before);
217 NOTE_BASIC_BLOCK (before) = last;
218 last->head = before;
219 last->end = before;
220 update_bb_for_insn (last);
224 /* Fixup the CFG after EBB scheduling. Re-recognize the basic
225 block boundaries in between HEAD and TAIL and update basic block
226 structures between BB and LAST. */
228 static basic_block
229 fix_basic_block_boundaries (bb, last, head, tail)
230 basic_block bb, last;
231 rtx head, tail;
233 rtx insn = head;
234 rtx last_inside = bb->head;
235 rtx aftertail = NEXT_INSN (tail);
237 head = bb->head;
239 for (; insn != aftertail; insn = NEXT_INSN (insn))
241 if (GET_CODE (insn) == CODE_LABEL)
242 abort ();
243 /* Create new basic blocks just before first insn. */
244 if (inside_basic_block_p (insn))
246 if (!last_inside)
248 rtx note;
250 /* Re-emit the basic block note for newly found BB header. */
251 if (GET_CODE (insn) == CODE_LABEL)
253 note = emit_note_after (NOTE_INSN_BASIC_BLOCK, insn);
254 head = insn;
255 last_inside = note;
257 else
259 note = emit_note_before (NOTE_INSN_BASIC_BLOCK, insn);
260 head = note;
261 last_inside = insn;
264 else
265 last_inside = insn;
267 /* Control flow instruction terminate basic block. It is possible
268 that we've elliminated some basic blocks (made them empty).
269 Find the proper basic block using BLOCK_FOR_INSN and arrange things in
270 a sensible way by inserting empty basic blocks as needed. */
271 if (control_flow_insn_p (insn) || (insn == tail && last_inside))
273 basic_block curr_bb = BLOCK_FOR_INSN (insn);
274 rtx note;
276 if (!control_flow_insn_p (insn))
277 curr_bb = last;
278 if (bb == last->next_bb)
280 edge f;
281 rtx h;
283 /* An obscure special case, where we do have partially dead
284 instruction scheduled after last control flow instruction.
285 In this case we can create new basic block. It is
286 always exactly one basic block last in the sequence. Handle
287 it by splitting the edge and repositioning the block.
288 This is somewhat hackish, but at least avoid cut&paste
290 Safter sollution can be to bring the code into sequence,
291 do the split and re-emit it back in case this will ever
292 trigger problem. */
293 f = bb->prev_bb->succ;
294 while (f && !(f->flags & EDGE_FALLTHRU))
295 f = f->succ_next;
297 if (f)
299 last = curr_bb = split_edge (f);
300 h = curr_bb->head;
301 curr_bb->head = head;
302 curr_bb->end = insn;
303 /* Edge splitting created missplaced BASIC_BLOCK note, kill
304 it. */
305 delete_insn (h);
307 /* It may happen that code got moved past unconditional jump in
308 case the code is completely dead. Kill it. */
309 else
311 rtx next = next_nonnote_insn (insn);
312 delete_insn_chain (head, insn);
313 /* We keep some notes in the way that may split barrier from the
314 jump. */
315 if (GET_CODE (next) == BARRIER)
317 emit_barrier_after (prev_nonnote_insn (head));
318 delete_insn (next);
320 insn = NULL;
323 else
325 curr_bb->head = head;
326 curr_bb->end = insn;
327 add_missing_bbs (curr_bb->head, bb, curr_bb->prev_bb);
329 note = GET_CODE (head) == CODE_LABEL ? NEXT_INSN (head) : head;
330 NOTE_BASIC_BLOCK (note) = curr_bb;
331 update_bb_for_insn (curr_bb);
332 bb = curr_bb->next_bb;
333 last_inside = NULL;
334 if (!insn)
335 break;
338 add_missing_bbs (last->next_bb->head, bb, last);
339 return bb->prev_bb;
342 /* Schedule a single extended basic block, defined by the boundaries HEAD
343 and TAIL. */
345 static basic_block
346 schedule_ebb (head, tail)
347 rtx head, tail;
349 int n_insns;
350 basic_block b;
351 struct deps tmp_deps;
352 basic_block first_bb = BLOCK_FOR_INSN (head);
353 basic_block last_bb = BLOCK_FOR_INSN (tail);
355 if (no_real_insns_p (head, tail))
356 return BLOCK_FOR_INSN (tail);
358 init_deps_global ();
360 /* Compute LOG_LINKS. */
361 init_deps (&tmp_deps);
362 sched_analyze (&tmp_deps, head, tail);
363 free_deps (&tmp_deps);
365 /* Compute INSN_DEPEND. */
366 compute_forward_dependences (head, tail);
368 if (targetm.sched.dependencies_evaluation_hook)
369 targetm.sched.dependencies_evaluation_hook (head, tail);
371 /* Set priorities. */
372 n_insns = set_priorities (head, tail);
374 current_sched_info->prev_head = PREV_INSN (head);
375 current_sched_info->next_tail = NEXT_INSN (tail);
377 if (write_symbols != NO_DEBUG)
379 save_line_notes (0, head, tail);
380 rm_line_notes (head, tail);
383 /* rm_other_notes only removes notes which are _inside_ the
384 block---that is, it won't remove notes before the first real insn
385 or after the last real insn of the block. So if the first insn
386 has a REG_SAVE_NOTE which would otherwise be emitted before the
387 insn, it is redundant with the note before the start of the
388 block, and so we have to take it out. */
389 if (INSN_P (head))
391 rtx note;
393 for (note = REG_NOTES (head); note; note = XEXP (note, 1))
394 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
396 remove_note (head, note);
397 note = XEXP (note, 1);
398 remove_note (head, note);
402 /* Remove remaining note insns from the block, save them in
403 note_list. These notes are restored at the end of
404 schedule_block (). */
405 rm_other_notes (head, tail);
407 current_sched_info->queue_must_finish_empty = 1;
409 schedule_block (-1, n_insns);
411 /* Sanity check: verify that all region insns were scheduled. */
412 if (sched_n_insns != n_insns)
413 abort ();
414 head = current_sched_info->head;
415 tail = current_sched_info->tail;
417 if (write_symbols != NO_DEBUG)
418 restore_line_notes (head, tail);
419 b = fix_basic_block_boundaries (first_bb, last_bb, head, tail);
421 finish_deps_global ();
422 return b;
425 /* The one entry point in this file. DUMP_FILE is the dump file for
426 this pass. */
428 void
429 schedule_ebbs (dump_file)
430 FILE *dump_file;
432 basic_block bb;
434 /* Taking care of this degenerate case makes the rest of
435 this code simpler. */
436 if (n_basic_blocks == 0)
437 return;
439 sched_init (dump_file);
441 current_sched_info = &ebb_sched_info;
443 allocate_reg_life_data ();
444 compute_bb_for_insn ();
446 /* Schedule every region in the subroutine. */
447 FOR_EACH_BB (bb)
449 rtx head = bb->head;
450 rtx tail;
452 for (;;)
454 edge e;
455 tail = bb->end;
456 if (bb->next_bb == EXIT_BLOCK_PTR
457 || GET_CODE (bb->next_bb->head) == CODE_LABEL)
458 break;
459 for (e = bb->succ; e; e = e->succ_next)
460 if ((e->flags & EDGE_FALLTHRU) != 0)
461 break;
462 if (! e)
463 break;
464 if (e->probability < REG_BR_PROB_BASE / 2)
465 break;
466 bb = bb->next_bb;
469 /* Blah. We should fix the rest of the code not to get confused by
470 a note or two. */
471 while (head != tail)
473 if (GET_CODE (head) == NOTE)
474 head = NEXT_INSN (head);
475 else if (GET_CODE (tail) == NOTE)
476 tail = PREV_INSN (tail);
477 else if (GET_CODE (head) == CODE_LABEL)
478 head = NEXT_INSN (head);
479 else
480 break;
483 bb = schedule_ebb (head, tail);
486 /* Updating life info can be done by local propagation over the modified
487 superblocks. */
489 /* Reposition the prologue and epilogue notes in case we moved the
490 prologue/epilogue insns. */
491 if (reload_completed)
492 reposition_prologue_and_epilogue_notes (get_insns ());
494 if (write_symbols != NO_DEBUG)
495 rm_redundant_line_notes ();
497 sched_finish ();