* cgraphunit.c (record_cdtor_fn): Declare all cdtors always inlined.
[official-gcc/constexpr.git] / gcc / sched-ebb.c
blobbab11dd6731651f0904d44b6a48953808316c7ce
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "tm_p.h"
31 #include "hard-reg-set.h"
32 #include "regs.h"
33 #include "function.h"
34 #include "flags.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
37 #include "except.h"
38 #include "toplev.h"
39 #include "recog.h"
40 #include "cfglayout.h"
41 #include "params.h"
42 #include "sched-int.h"
43 #include "target.h"
44 #include "output.h"
47 /* The number of insns scheduled so far. */
48 static int sched_n_insns;
50 /* The number of insns to be scheduled in total. */
51 static int n_insns;
53 /* Set of blocks, that already have their dependencies calculated. */
54 static bitmap_head dont_calc_deps;
56 /* Last basic block in current ebb. */
57 static basic_block last_bb;
59 /* Implementations of the sched_info functions for region scheduling. */
60 static void init_ready_list (void);
61 static void begin_schedule_ready (rtx, rtx);
62 static int schedule_more_p (void);
63 static const char *ebb_print_insn (rtx, int);
64 static int rank (rtx, rtx);
65 static int contributes_to_priority (rtx, rtx);
66 static void compute_jump_reg_dependencies (rtx, regset, regset, regset);
67 static basic_block earliest_block_with_similiar_load (basic_block, rtx);
68 static void add_deps_for_risky_insns (rtx, rtx);
69 static basic_block schedule_ebb (rtx, rtx);
71 static void add_remove_insn (rtx, int);
72 static void add_block1 (basic_block, basic_block);
73 static basic_block advance_target_bb (basic_block, rtx);
74 static void fix_recovery_cfg (int, int, int);
76 /* Return nonzero if there are more insns that should be scheduled. */
78 static int
79 schedule_more_p (void)
81 return sched_n_insns < n_insns;
84 /* Print dependency information about ebb between HEAD and TAIL. */
85 static void
86 debug_ebb_dependencies (rtx head, rtx tail)
88 fprintf (sched_dump,
89 ";; --------------- forward dependences: ------------ \n");
91 fprintf (sched_dump, "\n;; --- EBB Dependences --- from bb%d to bb%d \n",
92 BLOCK_NUM (head), BLOCK_NUM (tail));
94 debug_dependencies (head, tail);
97 /* Add all insns that are initially ready to the ready list READY. Called
98 once before scheduling a set of insns. */
100 static void
101 init_ready_list (void)
103 int n = 0;
104 rtx prev_head = current_sched_info->prev_head;
105 rtx next_tail = current_sched_info->next_tail;
106 rtx insn;
108 sched_n_insns = 0;
110 /* Print debugging information. */
111 if (sched_verbose >= 5)
112 debug_ebb_dependencies (NEXT_INSN (prev_head), PREV_INSN (next_tail));
114 /* Initialize ready list with all 'ready' insns in target block.
115 Count number of insns in the target block being scheduled. */
116 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
118 try_ready (insn);
119 n++;
122 gcc_assert (n == n_insns);
125 /* INSN is being scheduled after LAST. Update counters. */
126 static void
127 begin_schedule_ready (rtx insn, rtx last)
129 sched_n_insns++;
131 if (BLOCK_FOR_INSN (insn) == last_bb
132 /* INSN is a jump in the last block, ... */
133 && control_flow_insn_p (insn)
134 /* that is going to be moved over some instructions. */
135 && last != PREV_INSN (insn))
137 edge e;
138 edge_iterator ei;
139 basic_block bb;
141 /* An obscure special case, where we do have partially dead
142 instruction scheduled after last control flow instruction.
143 In this case we can create new basic block. It is
144 always exactly one basic block last in the sequence. */
146 FOR_EACH_EDGE (e, ei, last_bb->succs)
147 if (e->flags & EDGE_FALLTHRU)
148 break;
150 #ifdef ENABLE_CHECKING
151 gcc_assert (!e || !(e->flags & EDGE_COMPLEX));
153 gcc_assert (BLOCK_FOR_INSN (insn) == last_bb
154 && !IS_SPECULATION_CHECK_P (insn)
155 && BB_HEAD (last_bb) != insn
156 && BB_END (last_bb) == insn);
159 rtx x;
161 x = NEXT_INSN (insn);
162 if (e)
163 gcc_assert (NOTE_P (x) || LABEL_P (x));
164 else
165 gcc_assert (BARRIER_P (x));
167 #endif
169 if (e)
171 bb = split_edge (e);
172 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
174 else
175 /* Create an empty unreachable block after the INSN. */
176 bb = create_basic_block (NEXT_INSN (insn), NULL_RTX, last_bb);
178 /* split_edge () creates BB before E->DEST. Keep in mind, that
179 this operation extends scheduling region till the end of BB.
180 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
181 of the scheduling region. */
182 current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
183 gcc_assert (current_sched_info->next_tail);
185 add_block (bb, last_bb);
186 gcc_assert (last_bb == bb);
190 /* Return a string that contains the insn uid and optionally anything else
191 necessary to identify this insn in an output. It's valid to use a
192 static buffer for this. The ALIGNED parameter should cause the string
193 to be formatted so that multiple output lines will line up nicely. */
195 static const char *
196 ebb_print_insn (rtx insn, int aligned ATTRIBUTE_UNUSED)
198 static char tmp[80];
200 sprintf (tmp, "%4d", INSN_UID (insn));
201 return tmp;
204 /* Compare priority of two insns. Return a positive number if the second
205 insn is to be preferred for scheduling, and a negative one if the first
206 is to be preferred. Zero if they are equally good. */
208 static int
209 rank (rtx insn1, rtx insn2)
211 basic_block bb1 = BLOCK_FOR_INSN (insn1);
212 basic_block bb2 = BLOCK_FOR_INSN (insn2);
214 if (bb1->count > bb2->count
215 || bb1->frequency > bb2->frequency)
216 return -1;
217 if (bb1->count < bb2->count
218 || bb1->frequency < bb2->frequency)
219 return 1;
220 return 0;
223 /* NEXT is an instruction that depends on INSN (a backward dependence);
224 return nonzero if we should include this dependence in priority
225 calculations. */
227 static int
228 contributes_to_priority (rtx next ATTRIBUTE_UNUSED,
229 rtx insn ATTRIBUTE_UNUSED)
231 return 1;
234 /* INSN is a JUMP_INSN, COND_SET is the set of registers that are
235 conditionally set before INSN. Store the set of registers that
236 must be considered as used by this jump in USED and that of
237 registers that must be considered as set in SET. */
239 static void
240 compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used,
241 regset set)
243 basic_block b = BLOCK_FOR_INSN (insn);
244 edge e;
245 edge_iterator ei;
247 FOR_EACH_EDGE (e, ei, b->succs)
248 if (e->flags & EDGE_FALLTHRU)
249 /* The jump may be a by-product of a branch that has been merged
250 in the main codepath after being conditionalized. Therefore
251 it may guard the fallthrough block from using a value that has
252 conditionally overwritten that of the main codepath. So we
253 consider that it restores the value of the main codepath. */
254 bitmap_and (set, df_get_live_in (e->dest), cond_set);
255 else
256 bitmap_ior_into (used, df_get_live_in (e->dest));
259 /* Used in schedule_insns to initialize current_sched_info for scheduling
260 regions (or single basic blocks). */
262 static struct sched_info ebb_sched_info =
264 init_ready_list,
265 NULL,
266 schedule_more_p,
267 NULL,
268 rank,
269 ebb_print_insn,
270 contributes_to_priority,
271 compute_jump_reg_dependencies,
273 NULL, NULL,
274 NULL, NULL,
275 0, 1, 0,
277 add_remove_insn,
278 begin_schedule_ready,
279 add_block1,
280 advance_target_bb,
281 fix_recovery_cfg,
282 SCHED_EBB
283 /* We can create new blocks in begin_schedule_ready (). */
284 | NEW_BBS
287 /* Returns the earliest block in EBB currently being processed where a
288 "similar load" 'insn2' is found, and hence LOAD_INSN can move
289 speculatively into the found block. All the following must hold:
291 (1) both loads have 1 base register (PFREE_CANDIDATEs).
292 (2) load_insn and load2 have a def-use dependence upon
293 the same insn 'insn1'.
295 From all these we can conclude that the two loads access memory
296 addresses that differ at most by a constant, and hence if moving
297 load_insn would cause an exception, it would have been caused by
298 load2 anyhow.
300 The function uses list (given by LAST_BLOCK) of already processed
301 blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */
303 static basic_block
304 earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
306 dep_link_t back_link;
307 basic_block bb, earliest_block = NULL;
309 FOR_EACH_DEP_LINK (back_link, INSN_BACK_DEPS (load_insn))
311 rtx insn1 = DEP_LINK_PRO (back_link);
313 if (DEP_LINK_KIND (back_link) == REG_DEP_TRUE)
315 /* Found a DEF-USE dependence (insn1, load_insn). */
316 dep_link_t fore_link;
318 FOR_EACH_DEP_LINK (fore_link, INSN_FORW_DEPS (insn1))
320 rtx insn2 = DEP_LINK_CON (fore_link);
321 basic_block insn2_block = BLOCK_FOR_INSN (insn2);
323 if (DEP_LINK_KIND (fore_link) == REG_DEP_TRUE)
325 if (earliest_block != NULL
326 && earliest_block->index < insn2_block->index)
327 continue;
329 /* Found a DEF-USE dependence (insn1, insn2). */
330 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
331 /* insn2 not guaranteed to be a 1 base reg load. */
332 continue;
334 for (bb = last_block; bb; bb = bb->aux)
335 if (insn2_block == bb)
336 break;
338 if (!bb)
339 /* insn2 is the similar load. */
340 earliest_block = insn2_block;
346 return earliest_block;
349 /* The following function adds dependencies between jumps and risky
350 insns in given ebb. */
352 static void
353 add_deps_for_risky_insns (rtx head, rtx tail)
355 rtx insn, prev;
356 int class;
357 rtx last_jump = NULL_RTX;
358 rtx next_tail = NEXT_INSN (tail);
359 basic_block last_block = NULL, bb;
361 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
362 if (control_flow_insn_p (insn))
364 bb = BLOCK_FOR_INSN (insn);
365 bb->aux = last_block;
366 last_block = bb;
367 last_jump = insn;
369 else if (INSN_P (insn) && last_jump != NULL_RTX)
371 class = haifa_classify_insn (insn);
372 prev = last_jump;
373 switch (class)
375 case PFREE_CANDIDATE:
376 if (flag_schedule_speculative_load)
378 bb = earliest_block_with_similiar_load (last_block, insn);
379 if (bb)
381 bb = bb->aux;
382 if (!bb)
383 break;
384 prev = BB_END (bb);
387 /* Fall through. */
388 case TRAP_RISKY:
389 case IRISKY:
390 case PRISKY_CANDIDATE:
391 /* ??? We could implement better checking PRISKY_CANDIDATEs
392 analogous to sched-rgn.c. */
393 /* We can not change the mode of the backward
394 dependency because REG_DEP_ANTI has the lowest
395 rank. */
396 if (! sched_insns_conditions_mutex_p (insn, prev))
398 if (!(current_sched_info->flags & DO_SPECULATION))
400 enum DEPS_ADJUST_RESULT res;
402 res = add_or_update_back_dep (insn, prev,
403 REG_DEP_ANTI, DEP_ANTI);
405 if (res == DEP_CREATED)
406 add_forw_dep (DEPS_LIST_FIRST (INSN_BACK_DEPS (insn)));
407 else
408 gcc_assert (res != DEP_CHANGED);
410 else
411 add_or_update_back_forw_dep (insn, prev, REG_DEP_ANTI,
412 set_dep_weak (DEP_ANTI,
413 BEGIN_CONTROL,
414 MAX_DEP_WEAK));
417 break;
419 default:
420 break;
423 /* Maintain the invariant that bb->aux is clear after use. */
424 while (last_block)
426 bb = last_block->aux;
427 last_block->aux = NULL;
428 last_block = bb;
432 /* Schedule a single extended basic block, defined by the boundaries HEAD
433 and TAIL. */
435 static basic_block
436 schedule_ebb (rtx head, rtx tail)
438 basic_block first_bb, target_bb;
439 struct deps tmp_deps;
441 first_bb = BLOCK_FOR_INSN (head);
442 last_bb = BLOCK_FOR_INSN (tail);
444 if (no_real_insns_p (head, tail))
445 return BLOCK_FOR_INSN (tail);
447 gcc_assert (INSN_P (head) && INSN_P (tail));
449 if (!bitmap_bit_p (&dont_calc_deps, first_bb->index))
451 init_deps_global ();
453 /* Compute backward dependencies. */
454 init_deps (&tmp_deps);
455 sched_analyze (&tmp_deps, head, tail);
456 free_deps (&tmp_deps);
458 /* Compute forward dependencies. */
459 compute_forward_dependences (head, tail);
461 add_deps_for_risky_insns (head, tail);
463 if (targetm.sched.dependencies_evaluation_hook)
464 targetm.sched.dependencies_evaluation_hook (head, tail);
466 finish_deps_global ();
468 else
469 /* Only recovery blocks can have their dependencies already calculated,
470 and they always are single block ebbs. */
471 gcc_assert (first_bb == last_bb);
473 /* Set priorities. */
474 current_sched_info->sched_max_insns_priority = 0;
475 n_insns = set_priorities (head, tail);
476 current_sched_info->sched_max_insns_priority++;
478 current_sched_info->prev_head = PREV_INSN (head);
479 current_sched_info->next_tail = NEXT_INSN (tail);
481 /* rm_other_notes only removes notes which are _inside_ the
482 block---that is, it won't remove notes before the first real insn
483 or after the last real insn of the block. So if the first insn
484 has a REG_SAVE_NOTE which would otherwise be emitted before the
485 insn, it is redundant with the note before the start of the
486 block, and so we have to take it out. */
487 if (INSN_P (head))
489 rtx note;
491 for (note = REG_NOTES (head); note; note = XEXP (note, 1))
492 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
493 remove_note (head, note);
496 /* Remove remaining note insns from the block, save them in
497 note_list. These notes are restored at the end of
498 schedule_block (). */
499 rm_other_notes (head, tail);
501 unlink_bb_notes (first_bb, last_bb);
503 current_sched_info->queue_must_finish_empty = 1;
505 target_bb = first_bb;
506 schedule_block (&target_bb, n_insns);
508 /* We might pack all instructions into fewer blocks,
509 so we may made some of them empty. Can't assert (b == last_bb). */
511 /* Sanity check: verify that all region insns were scheduled. */
512 gcc_assert (sched_n_insns == n_insns);
513 head = current_sched_info->head;
514 tail = current_sched_info->tail;
516 if (EDGE_COUNT (last_bb->preds) == 0)
517 /* LAST_BB is unreachable. */
519 gcc_assert (first_bb != last_bb
520 && EDGE_COUNT (last_bb->succs) == 0);
521 last_bb = last_bb->prev_bb;
522 delete_basic_block (last_bb->next_bb);
525 return last_bb;
528 /* The one entry point in this file. */
530 void
531 schedule_ebbs (void)
533 basic_block bb;
534 int probability_cutoff;
535 rtx tail;
537 if (profile_info && flag_branch_probabilities)
538 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
539 else
540 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
541 probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
543 /* Taking care of this degenerate case makes the rest of
544 this code simpler. */
545 if (n_basic_blocks == NUM_FIXED_BLOCKS)
546 return;
548 /* We need current_sched_info in init_dependency_caches, which is
549 invoked via sched_init. */
550 current_sched_info = &ebb_sched_info;
552 df_set_flags (DF_LR_RUN_DCE);
553 df_note_add_problem ();
554 df_analyze ();
555 df_clear_flags (DF_LR_RUN_DCE);
556 regstat_compute_calls_crossed ();
557 sched_init ();
559 compute_bb_for_insn ();
561 /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */
562 bitmap_initialize (&dont_calc_deps, 0);
563 bitmap_clear (&dont_calc_deps);
565 /* Schedule every region in the subroutine. */
566 FOR_EACH_BB (bb)
568 rtx head = BB_HEAD (bb);
570 for (;;)
572 edge e;
573 edge_iterator ei;
574 tail = BB_END (bb);
575 if (bb->next_bb == EXIT_BLOCK_PTR
576 || LABEL_P (BB_HEAD (bb->next_bb)))
577 break;
578 FOR_EACH_EDGE (e, ei, bb->succs)
579 if ((e->flags & EDGE_FALLTHRU) != 0)
580 break;
581 if (! e)
582 break;
583 if (e->probability <= probability_cutoff)
584 break;
585 bb = bb->next_bb;
588 /* Blah. We should fix the rest of the code not to get confused by
589 a note or two. */
590 while (head != tail)
592 if (NOTE_P (head))
593 head = NEXT_INSN (head);
594 else if (NOTE_P (tail))
595 tail = PREV_INSN (tail);
596 else if (LABEL_P (head))
597 head = NEXT_INSN (head);
598 else
599 break;
602 bb = schedule_ebb (head, tail);
604 bitmap_clear (&dont_calc_deps);
606 /* Reposition the prologue and epilogue notes in case we moved the
607 prologue/epilogue insns. */
608 if (reload_completed)
609 reposition_prologue_and_epilogue_notes ();
611 sched_finish ();
612 regstat_free_calls_crossed ();
615 /* INSN has been added to/removed from current ebb. */
616 static void
617 add_remove_insn (rtx insn ATTRIBUTE_UNUSED, int remove_p)
619 if (!remove_p)
620 n_insns++;
621 else
622 n_insns--;
625 /* BB was added to ebb after AFTER. */
626 static void
627 add_block1 (basic_block bb, basic_block after)
629 /* Recovery blocks are always bounded by BARRIERS,
630 therefore, they always form single block EBB,
631 therefore, we can use rec->index to identify such EBBs. */
632 if (after == EXIT_BLOCK_PTR)
633 bitmap_set_bit (&dont_calc_deps, bb->index);
634 else if (after == last_bb)
635 last_bb = bb;
638 /* Return next block in ebb chain. For parameter meaning please refer to
639 sched-int.h: struct sched_info: advance_target_bb. */
640 static basic_block
641 advance_target_bb (basic_block bb, rtx insn)
643 if (insn)
645 if (BLOCK_FOR_INSN (insn) != bb
646 && control_flow_insn_p (insn)
647 /* We handle interblock movement of the speculation check
648 or over a speculation check in
649 haifa-sched.c: move_block_after_check (). */
650 && !IS_SPECULATION_BRANCHY_CHECK_P (insn)
651 && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb)))
653 /* Assert that we don't move jumps across blocks. */
654 gcc_assert (!control_flow_insn_p (BB_END (bb))
655 && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb)));
656 return bb;
658 else
659 return 0;
661 else
662 /* Return next non empty block. */
666 gcc_assert (bb != last_bb);
668 bb = bb->next_bb;
670 while (bb_note (bb) == BB_END (bb));
672 return bb;
676 /* Fix internal data after interblock movement of jump instruction.
677 For parameter meaning please refer to
678 sched-int.h: struct sched_info: fix_recovery_cfg. */
679 static void
680 fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi, int jump_bb_nexti)
682 gcc_assert (last_bb->index != bbi);
684 if (jump_bb_nexti == last_bb->index)
685 last_bb = BASIC_BLOCK (jump_bbi);