Mark as release
[official-gcc.git] / gcc / sched-ebb.c
blob2773a36d1bdc9003274cbf6560e6e6cb6bdde6d1
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "tm_p.h"
31 #include "hard-reg-set.h"
32 #include "regs.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
37 #include "except.h"
38 #include "toplev.h"
39 #include "recog.h"
40 #include "cfglayout.h"
41 #include "params.h"
42 #include "sched-int.h"
43 #include "target.h"
44 #include "output.h"
47 #ifdef INSN_SCHEDULING
49 /* The number of insns to be scheduled in total. */
50 static int rgn_n_insns;
52 /* The number of insns scheduled so far. */
53 static int sched_rgn_n_insns;
55 /* Set of blocks, that already have their dependencies calculated. */
56 static bitmap_head dont_calc_deps;
58 /* Last basic block in current ebb. */
59 static basic_block last_bb;
61 /* Implementations of the sched_info functions for region scheduling. */
62 static void init_ready_list (void);
63 static void begin_schedule_ready (rtx, rtx);
64 static int schedule_more_p (void);
65 static const char *ebb_print_insn (const_rtx, int);
66 static int rank (rtx, rtx);
67 static int ebb_contributes_to_priority (rtx, rtx);
68 static basic_block earliest_block_with_similiar_load (basic_block, rtx);
69 static void add_deps_for_risky_insns (rtx, rtx);
70 static basic_block schedule_ebb (rtx, rtx);
71 static void debug_ebb_dependencies (rtx, rtx);
73 static void ebb_add_remove_insn (rtx, int);
74 static void ebb_add_block (basic_block, basic_block);
75 static basic_block advance_target_bb (basic_block, rtx);
76 static void ebb_fix_recovery_cfg (int, int, int);
78 /* Return nonzero if there are more insns that should be scheduled. */
80 static int
81 schedule_more_p (void)
83 return sched_rgn_n_insns < rgn_n_insns;
86 /* Print dependency information about ebb between HEAD and TAIL. */
87 static void
88 debug_ebb_dependencies (rtx head, rtx tail)
90 fprintf (sched_dump,
91 ";; --------------- forward dependences: ------------ \n");
93 fprintf (sched_dump, "\n;; --- EBB Dependences --- from bb%d to bb%d \n",
94 BLOCK_NUM (head), BLOCK_NUM (tail));
96 debug_dependencies (head, tail);
99 /* Add all insns that are initially ready to the ready list READY. Called
100 once before scheduling a set of insns. */
102 static void
103 init_ready_list (void)
105 int n = 0;
106 rtx prev_head = current_sched_info->prev_head;
107 rtx next_tail = current_sched_info->next_tail;
108 rtx insn;
110 sched_rgn_n_insns = 0;
112 /* Print debugging information. */
113 if (sched_verbose >= 5)
114 debug_ebb_dependencies (NEXT_INSN (prev_head), PREV_INSN (next_tail));
116 /* Initialize ready list with all 'ready' insns in target block.
117 Count number of insns in the target block being scheduled. */
118 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
120 try_ready (insn);
121 n++;
124 gcc_assert (n == rgn_n_insns);
127 /* INSN is being scheduled after LAST. Update counters. */
128 static void
129 begin_schedule_ready (rtx insn, rtx last)
131 sched_rgn_n_insns++;
133 if (BLOCK_FOR_INSN (insn) == last_bb
134 /* INSN is a jump in the last block, ... */
135 && control_flow_insn_p (insn)
136 /* that is going to be moved over some instructions. */
137 && last != PREV_INSN (insn))
139 edge e;
140 edge_iterator ei;
141 basic_block bb;
143 /* An obscure special case, where we do have partially dead
144 instruction scheduled after last control flow instruction.
145 In this case we can create new basic block. It is
146 always exactly one basic block last in the sequence. */
148 FOR_EACH_EDGE (e, ei, last_bb->succs)
149 if (e->flags & EDGE_FALLTHRU)
150 break;
152 #ifdef ENABLE_CHECKING
153 gcc_assert (!e || !(e->flags & EDGE_COMPLEX));
155 gcc_assert (BLOCK_FOR_INSN (insn) == last_bb
156 && !IS_SPECULATION_CHECK_P (insn)
157 && BB_HEAD (last_bb) != insn
158 && BB_END (last_bb) == insn);
161 rtx x;
163 x = NEXT_INSN (insn);
164 if (e)
165 gcc_assert (NOTE_P (x) || LABEL_P (x));
166 else
167 gcc_assert (BARRIER_P (x));
169 #endif
171 if (e)
173 bb = split_edge (e);
174 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
176 else
177 /* Create an empty unreachable block after the INSN. */
178 bb = create_basic_block (NEXT_INSN (insn), NULL_RTX, last_bb);
180 /* split_edge () creates BB before E->DEST. Keep in mind, that
181 this operation extends scheduling region till the end of BB.
182 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
183 of the scheduling region. */
184 current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
185 gcc_assert (current_sched_info->next_tail);
187 /* Append new basic block to the end of the ebb. */
188 sched_init_only_bb (bb, last_bb);
189 gcc_assert (last_bb == bb);
193 /* Return a string that contains the insn uid and optionally anything else
194 necessary to identify this insn in an output. It's valid to use a
195 static buffer for this. The ALIGNED parameter should cause the string
196 to be formatted so that multiple output lines will line up nicely. */
198 static const char *
199 ebb_print_insn (const_rtx insn, int aligned ATTRIBUTE_UNUSED)
201 static char tmp[80];
203 /* '+' before insn means it is a new cycle start. */
204 if (GET_MODE (insn) == TImode)
205 sprintf (tmp, "+ %4d", INSN_UID (insn));
206 else
207 sprintf (tmp, " %4d", INSN_UID (insn));
209 return tmp;
212 /* Compare priority of two insns. Return a positive number if the second
213 insn is to be preferred for scheduling, and a negative one if the first
214 is to be preferred. Zero if they are equally good. */
216 static int
217 rank (rtx insn1, rtx insn2)
219 basic_block bb1 = BLOCK_FOR_INSN (insn1);
220 basic_block bb2 = BLOCK_FOR_INSN (insn2);
222 if (bb1->count > bb2->count
223 || bb1->frequency > bb2->frequency)
224 return -1;
225 if (bb1->count < bb2->count
226 || bb1->frequency < bb2->frequency)
227 return 1;
228 return 0;
231 /* NEXT is an instruction that depends on INSN (a backward dependence);
232 return nonzero if we should include this dependence in priority
233 calculations. */
235 static int
236 ebb_contributes_to_priority (rtx next ATTRIBUTE_UNUSED,
237 rtx insn ATTRIBUTE_UNUSED)
239 return 1;
242 /* INSN is a JUMP_INSN, COND_SET is the set of registers that are
243 conditionally set before INSN. Store the set of registers that
244 must be considered as used by this jump in USED and that of
245 registers that must be considered as set in SET. */
247 void
248 ebb_compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used,
249 regset set)
251 basic_block b = BLOCK_FOR_INSN (insn);
252 edge e;
253 edge_iterator ei;
255 FOR_EACH_EDGE (e, ei, b->succs)
256 if (e->flags & EDGE_FALLTHRU)
257 /* The jump may be a by-product of a branch that has been merged
258 in the main codepath after being conditionalized. Therefore
259 it may guard the fallthrough block from using a value that has
260 conditionally overwritten that of the main codepath. So we
261 consider that it restores the value of the main codepath. */
262 bitmap_and (set, df_get_live_in (e->dest), cond_set);
263 else
264 bitmap_ior_into (used, df_get_live_in (e->dest));
267 /* Used in schedule_insns to initialize current_sched_info for scheduling
268 regions (or single basic blocks). */
270 static struct common_sched_info_def ebb_common_sched_info;
272 static struct sched_deps_info_def ebb_sched_deps_info =
274 ebb_compute_jump_reg_dependencies,
275 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
276 NULL,
277 1, 0, 0
280 static struct haifa_sched_info ebb_sched_info =
282 init_ready_list,
283 NULL,
284 schedule_more_p,
285 NULL,
286 rank,
287 ebb_print_insn,
288 ebb_contributes_to_priority,
290 NULL, NULL,
291 NULL, NULL,
292 1, 0,
294 ebb_add_remove_insn,
295 begin_schedule_ready,
296 advance_target_bb,
297 SCHED_EBB
298 /* We can create new blocks in begin_schedule_ready (). */
299 | NEW_BBS
302 /* Returns the earliest block in EBB currently being processed where a
303 "similar load" 'insn2' is found, and hence LOAD_INSN can move
304 speculatively into the found block. All the following must hold:
306 (1) both loads have 1 base register (PFREE_CANDIDATEs).
307 (2) load_insn and load2 have a def-use dependence upon
308 the same insn 'insn1'.
310 From all these we can conclude that the two loads access memory
311 addresses that differ at most by a constant, and hence if moving
312 load_insn would cause an exception, it would have been caused by
313 load2 anyhow.
315 The function uses list (given by LAST_BLOCK) of already processed
316 blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */
318 static basic_block
319 earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
321 sd_iterator_def back_sd_it;
322 dep_t back_dep;
323 basic_block bb, earliest_block = NULL;
325 FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
327 rtx insn1 = DEP_PRO (back_dep);
329 if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
330 /* Found a DEF-USE dependence (insn1, load_insn). */
332 sd_iterator_def fore_sd_it;
333 dep_t fore_dep;
335 FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
337 rtx insn2 = DEP_CON (fore_dep);
338 basic_block insn2_block = BLOCK_FOR_INSN (insn2);
340 if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
342 if (earliest_block != NULL
343 && earliest_block->index < insn2_block->index)
344 continue;
346 /* Found a DEF-USE dependence (insn1, insn2). */
347 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
348 /* insn2 not guaranteed to be a 1 base reg load. */
349 continue;
351 for (bb = last_block; bb; bb = (basic_block) bb->aux)
352 if (insn2_block == bb)
353 break;
355 if (!bb)
356 /* insn2 is the similar load. */
357 earliest_block = insn2_block;
363 return earliest_block;
366 /* The following function adds dependencies between jumps and risky
367 insns in given ebb. */
369 static void
370 add_deps_for_risky_insns (rtx head, rtx tail)
372 rtx insn, prev;
373 int classification;
374 rtx last_jump = NULL_RTX;
375 rtx next_tail = NEXT_INSN (tail);
376 basic_block last_block = NULL, bb;
378 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
379 if (control_flow_insn_p (insn))
381 bb = BLOCK_FOR_INSN (insn);
382 bb->aux = last_block;
383 last_block = bb;
384 last_jump = insn;
386 else if (INSN_P (insn) && last_jump != NULL_RTX)
388 classification = haifa_classify_insn (insn);
389 prev = last_jump;
390 switch (classification)
392 case PFREE_CANDIDATE:
393 if (flag_schedule_speculative_load)
395 bb = earliest_block_with_similiar_load (last_block, insn);
396 if (bb)
398 bb = (basic_block) bb->aux;
399 if (!bb)
400 break;
401 prev = BB_END (bb);
404 /* Fall through. */
405 case TRAP_RISKY:
406 case IRISKY:
407 case PRISKY_CANDIDATE:
408 /* ??? We could implement better checking PRISKY_CANDIDATEs
409 analogous to sched-rgn.c. */
410 /* We can not change the mode of the backward
411 dependency because REG_DEP_ANTI has the lowest
412 rank. */
413 if (! sched_insns_conditions_mutex_p (insn, prev))
415 dep_def _dep, *dep = &_dep;
417 init_dep (dep, prev, insn, REG_DEP_ANTI);
419 if (!(current_sched_info->flags & USE_DEPS_LIST))
421 enum DEPS_ADJUST_RESULT res;
423 res = sd_add_or_update_dep (dep, false);
425 /* We can't change an existing dependency with
426 DEP_ANTI. */
427 gcc_assert (res != DEP_CHANGED);
429 else
431 if ((current_sched_info->flags & DO_SPECULATION)
432 && (spec_info->mask & BEGIN_CONTROL))
433 DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
434 MAX_DEP_WEAK);
436 sd_add_or_update_dep (dep, false);
438 /* Dep_status could have been changed.
439 No assertion here. */
443 break;
445 default:
446 break;
449 /* Maintain the invariant that bb->aux is clear after use. */
450 while (last_block)
452 bb = (basic_block) last_block->aux;
453 last_block->aux = NULL;
454 last_block = bb;
458 /* Schedule a single extended basic block, defined by the boundaries HEAD
459 and TAIL. */
461 static basic_block
462 schedule_ebb (rtx head, rtx tail)
464 basic_block first_bb, target_bb;
465 struct deps tmp_deps;
467 first_bb = BLOCK_FOR_INSN (head);
468 last_bb = BLOCK_FOR_INSN (tail);
470 if (no_real_insns_p (head, tail))
471 return BLOCK_FOR_INSN (tail);
473 gcc_assert (INSN_P (head) && INSN_P (tail));
475 if (!bitmap_bit_p (&dont_calc_deps, first_bb->index))
477 init_deps_global ();
479 /* Compute dependencies. */
480 init_deps (&tmp_deps);
481 sched_analyze (&tmp_deps, head, tail);
482 free_deps (&tmp_deps);
484 add_deps_for_risky_insns (head, tail);
486 if (targetm.sched.dependencies_evaluation_hook)
487 targetm.sched.dependencies_evaluation_hook (head, tail);
489 finish_deps_global ();
491 else
492 /* Only recovery blocks can have their dependencies already calculated,
493 and they always are single block ebbs. */
494 gcc_assert (first_bb == last_bb);
496 /* Set priorities. */
497 current_sched_info->sched_max_insns_priority = 0;
498 rgn_n_insns = set_priorities (head, tail);
499 current_sched_info->sched_max_insns_priority++;
501 current_sched_info->prev_head = PREV_INSN (head);
502 current_sched_info->next_tail = NEXT_INSN (tail);
504 remove_notes (head, tail);
506 unlink_bb_notes (first_bb, last_bb);
508 target_bb = first_bb;
510 /* Make ready list big enough to hold all the instructions from the ebb. */
511 sched_extend_ready_list (rgn_n_insns);
512 schedule_block (&target_bb);
513 /* Free ready list. */
514 sched_finish_ready_list ();
516 /* We might pack all instructions into fewer blocks,
517 so we may made some of them empty. Can't assert (b == last_bb). */
519 /* Sanity check: verify that all region insns were scheduled. */
520 gcc_assert (sched_rgn_n_insns == rgn_n_insns);
522 /* Free dependencies. */
523 sched_free_deps (current_sched_info->head, current_sched_info->tail, true);
525 gcc_assert (haifa_recovery_bb_ever_added_p
526 || deps_pools_are_empty_p ());
528 if (EDGE_COUNT (last_bb->preds) == 0)
529 /* LAST_BB is unreachable. */
531 gcc_assert (first_bb != last_bb
532 && EDGE_COUNT (last_bb->succs) == 0);
533 last_bb = last_bb->prev_bb;
534 delete_basic_block (last_bb->next_bb);
537 return last_bb;
540 /* The one entry point in this file. */
542 void
543 schedule_ebbs (void)
545 basic_block bb;
546 int probability_cutoff;
547 rtx tail;
549 if (profile_info && flag_branch_probabilities)
550 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
551 else
552 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
553 probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
555 /* Taking care of this degenerate case makes the rest of
556 this code simpler. */
557 if (n_basic_blocks == NUM_FIXED_BLOCKS)
558 return;
560 /* Setup infos. */
562 memcpy (&ebb_common_sched_info, &haifa_common_sched_info,
563 sizeof (ebb_common_sched_info));
565 ebb_common_sched_info.fix_recovery_cfg = ebb_fix_recovery_cfg;
566 ebb_common_sched_info.add_block = ebb_add_block;
567 ebb_common_sched_info.sched_pass_id = SCHED_EBB_PASS;
569 common_sched_info = &ebb_common_sched_info;
570 sched_deps_info = &ebb_sched_deps_info;
571 current_sched_info = &ebb_sched_info;
574 haifa_sched_init ();
576 compute_bb_for_insn ();
578 /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */
579 bitmap_initialize (&dont_calc_deps, 0);
580 bitmap_clear (&dont_calc_deps);
582 /* Schedule every region in the subroutine. */
583 FOR_EACH_BB (bb)
585 rtx head = BB_HEAD (bb);
587 for (;;)
589 edge e;
590 edge_iterator ei;
591 tail = BB_END (bb);
592 if (bb->next_bb == EXIT_BLOCK_PTR
593 || LABEL_P (BB_HEAD (bb->next_bb)))
594 break;
595 FOR_EACH_EDGE (e, ei, bb->succs)
596 if ((e->flags & EDGE_FALLTHRU) != 0)
597 break;
598 if (! e)
599 break;
600 if (e->probability <= probability_cutoff)
601 break;
602 bb = bb->next_bb;
605 /* Blah. We should fix the rest of the code not to get confused by
606 a note or two. */
607 while (head != tail)
609 if (NOTE_P (head))
610 head = NEXT_INSN (head);
611 else if (NOTE_P (tail))
612 tail = PREV_INSN (tail);
613 else if (LABEL_P (head))
614 head = NEXT_INSN (head);
615 else
616 break;
619 bb = schedule_ebb (head, tail);
621 bitmap_clear (&dont_calc_deps);
623 /* Reposition the prologue and epilogue notes in case we moved the
624 prologue/epilogue insns. */
625 if (reload_completed)
626 reposition_prologue_and_epilogue_notes ();
628 haifa_sched_finish ();
631 /* INSN has been added to/removed from current ebb. */
632 static void
633 ebb_add_remove_insn (rtx insn ATTRIBUTE_UNUSED, int remove_p)
635 if (!remove_p)
636 rgn_n_insns++;
637 else
638 rgn_n_insns--;
641 /* BB was added to ebb after AFTER. */
642 static void
643 ebb_add_block (basic_block bb, basic_block after)
645 /* Recovery blocks are always bounded by BARRIERS,
646 therefore, they always form single block EBB,
647 therefore, we can use rec->index to identify such EBBs. */
648 if (after == EXIT_BLOCK_PTR)
649 bitmap_set_bit (&dont_calc_deps, bb->index);
650 else if (after == last_bb)
651 last_bb = bb;
654 /* Return next block in ebb chain. For parameter meaning please refer to
655 sched-int.h: struct sched_info: advance_target_bb. */
656 static basic_block
657 advance_target_bb (basic_block bb, rtx insn)
659 if (insn)
661 if (BLOCK_FOR_INSN (insn) != bb
662 && control_flow_insn_p (insn)
663 /* We handle interblock movement of the speculation check
664 or over a speculation check in
665 haifa-sched.c: move_block_after_check (). */
666 && !IS_SPECULATION_BRANCHY_CHECK_P (insn)
667 && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb)))
669 /* Assert that we don't move jumps across blocks. */
670 gcc_assert (!control_flow_insn_p (BB_END (bb))
671 && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb)));
672 return bb;
674 else
675 return 0;
677 else
678 /* Return next non empty block. */
682 gcc_assert (bb != last_bb);
684 bb = bb->next_bb;
686 while (bb_note (bb) == BB_END (bb));
688 return bb;
692 /* Fix internal data after interblock movement of jump instruction.
693 For parameter meaning please refer to
694 sched-int.h: struct sched_info: fix_recovery_cfg. */
695 static void
696 ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi,
697 int jump_bb_nexti)
699 gcc_assert (last_bb->index != bbi);
701 if (jump_bb_nexti == last_bb->index)
702 last_bb = BASIC_BLOCK (jump_bbi);
705 #endif /* INSN_SCHEDULING */