1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
28 #include "diagnostic-core.h"
31 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
40 #include "sched-int.h"
44 #ifdef INSN_SCHEDULING
46 /* The number of insns to be scheduled in total. */
47 static int rgn_n_insns
;
49 /* The number of insns scheduled so far. */
50 static int sched_rgn_n_insns
;
52 /* Set of blocks, that already have their dependencies calculated. */
53 static bitmap_head dont_calc_deps
;
55 /* Last basic block in current ebb. */
56 static basic_block last_bb
;
58 /* Implementations of the sched_info functions for region scheduling. */
59 static void init_ready_list (void);
60 static void begin_schedule_ready (rtx
);
61 static int schedule_more_p (void);
62 static const char *ebb_print_insn (const_rtx
, int);
63 static int rank (rtx
, rtx
);
64 static int ebb_contributes_to_priority (rtx
, rtx
);
65 static basic_block
earliest_block_with_similiar_load (basic_block
, rtx
);
66 static void add_deps_for_risky_insns (rtx
, rtx
);
67 static void debug_ebb_dependencies (rtx
, rtx
);
69 static void ebb_add_remove_insn (rtx
, int);
70 static void ebb_add_block (basic_block
, basic_block
);
71 static basic_block
advance_target_bb (basic_block
, rtx
);
72 static void ebb_fix_recovery_cfg (int, int, int);
74 /* Allocate memory and store the state of the frontend. Return the allocated
80 *p
= sched_rgn_n_insns
;
84 /* Restore the state of the frontend from P_, then free it. */
86 restore_ebb_state (void *p_
)
89 sched_rgn_n_insns
= *p
;
93 /* Return nonzero if there are more insns that should be scheduled. */
96 schedule_more_p (void)
98 return sched_rgn_n_insns
< rgn_n_insns
;
101 /* Print dependency information about ebb between HEAD and TAIL. */
103 debug_ebb_dependencies (rtx head
, rtx tail
)
106 ";; --------------- forward dependences: ------------ \n");
108 fprintf (sched_dump
, "\n;; --- EBB Dependences --- from bb%d to bb%d \n",
109 BLOCK_NUM (head
), BLOCK_NUM (tail
));
111 debug_dependencies (head
, tail
);
114 /* Add all insns that are initially ready to the ready list READY. Called
115 once before scheduling a set of insns. */
118 init_ready_list (void)
121 rtx prev_head
= current_sched_info
->prev_head
;
122 rtx next_tail
= current_sched_info
->next_tail
;
125 sched_rgn_n_insns
= 0;
127 /* Print debugging information. */
128 if (sched_verbose
>= 5)
129 debug_ebb_dependencies (NEXT_INSN (prev_head
), PREV_INSN (next_tail
));
131 /* Initialize ready list with all 'ready' insns in target block.
132 Count number of insns in the target block being scheduled. */
133 for (insn
= NEXT_INSN (prev_head
); insn
!= next_tail
; insn
= NEXT_INSN (insn
))
139 gcc_assert (n
== rgn_n_insns
);
142 /* INSN is being scheduled after LAST. Update counters. */
144 begin_schedule_ready (rtx insn ATTRIBUTE_UNUSED
)
149 /* INSN is being moved to its place in the schedule, after LAST. */
151 begin_move_insn (rtx insn
, rtx last
)
153 if (BLOCK_FOR_INSN (insn
) == last_bb
154 /* INSN is a jump in the last block, ... */
155 && control_flow_insn_p (insn
)
156 /* that is going to be moved over some instructions. */
157 && last
!= PREV_INSN (insn
))
162 /* An obscure special case, where we do have partially dead
163 instruction scheduled after last control flow instruction.
164 In this case we can create new basic block. It is
165 always exactly one basic block last in the sequence. */
167 e
= find_fallthru_edge (last_bb
->succs
);
169 gcc_checking_assert (!e
|| !(e
->flags
& EDGE_COMPLEX
));
171 gcc_checking_assert (BLOCK_FOR_INSN (insn
) == last_bb
172 && !IS_SPECULATION_CHECK_P (insn
)
173 && BB_HEAD (last_bb
) != insn
174 && BB_END (last_bb
) == insn
);
179 x
= NEXT_INSN (insn
);
181 gcc_checking_assert (NOTE_P (x
) || LABEL_P (x
));
183 gcc_checking_assert (BARRIER_P (x
));
189 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb
)));
193 /* Create an empty unreachable block after the INSN. */
194 rtx next
= NEXT_INSN (insn
);
195 if (next
&& BARRIER_P (next
))
196 next
= NEXT_INSN (next
);
197 bb
= create_basic_block (next
, NULL_RTX
, last_bb
);
200 /* split_edge () creates BB before E->DEST. Keep in mind, that
201 this operation extends scheduling region till the end of BB.
202 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
203 of the scheduling region. */
204 current_sched_info
->next_tail
= NEXT_INSN (BB_END (bb
));
205 gcc_assert (current_sched_info
->next_tail
);
207 /* Append new basic block to the end of the ebb. */
208 sched_init_only_bb (bb
, last_bb
);
209 gcc_assert (last_bb
== bb
);
213 /* Return a string that contains the insn uid and optionally anything else
214 necessary to identify this insn in an output. It's valid to use a
215 static buffer for this. The ALIGNED parameter should cause the string
216 to be formatted so that multiple output lines will line up nicely. */
219 ebb_print_insn (const_rtx insn
, int aligned ATTRIBUTE_UNUSED
)
223 /* '+' before insn means it is a new cycle start. */
224 if (GET_MODE (insn
) == TImode
)
225 sprintf (tmp
, "+ %4d", INSN_UID (insn
));
227 sprintf (tmp
, " %4d", INSN_UID (insn
));
232 /* Compare priority of two insns. Return a positive number if the second
233 insn is to be preferred for scheduling, and a negative one if the first
234 is to be preferred. Zero if they are equally good. */
237 rank (rtx insn1
, rtx insn2
)
239 basic_block bb1
= BLOCK_FOR_INSN (insn1
);
240 basic_block bb2
= BLOCK_FOR_INSN (insn2
);
242 if (bb1
->count
> bb2
->count
243 || bb1
->frequency
> bb2
->frequency
)
245 if (bb1
->count
< bb2
->count
246 || bb1
->frequency
< bb2
->frequency
)
251 /* NEXT is an instruction that depends on INSN (a backward dependence);
252 return nonzero if we should include this dependence in priority
256 ebb_contributes_to_priority (rtx next ATTRIBUTE_UNUSED
,
257 rtx insn ATTRIBUTE_UNUSED
)
262 /* INSN is a JUMP_INSN. Store the set of registers that
263 must be considered as used by this jump in USED. */
266 ebb_compute_jump_reg_dependencies (rtx insn
, regset used
)
268 basic_block b
= BLOCK_FOR_INSN (insn
);
272 FOR_EACH_EDGE (e
, ei
, b
->succs
)
273 if ((e
->flags
& EDGE_FALLTHRU
) == 0)
274 bitmap_ior_into (used
, df_get_live_in (e
->dest
));
277 /* Used in schedule_insns to initialize current_sched_info for scheduling
278 regions (or single basic blocks). */
280 static struct common_sched_info_def ebb_common_sched_info
;
282 static struct sched_deps_info_def ebb_sched_deps_info
=
284 ebb_compute_jump_reg_dependencies
,
285 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
290 static struct haifa_sched_info ebb_sched_info
=
298 ebb_contributes_to_priority
,
299 NULL
, /* insn_finishes_block_p */
306 begin_schedule_ready
,
314 /* We can create new blocks in begin_schedule_ready (). */
318 /* Returns the earliest block in EBB currently being processed where a
319 "similar load" 'insn2' is found, and hence LOAD_INSN can move
320 speculatively into the found block. All the following must hold:
322 (1) both loads have 1 base register (PFREE_CANDIDATEs).
323 (2) load_insn and load2 have a def-use dependence upon
324 the same insn 'insn1'.
326 From all these we can conclude that the two loads access memory
327 addresses that differ at most by a constant, and hence if moving
328 load_insn would cause an exception, it would have been caused by
331 The function uses list (given by LAST_BLOCK) of already processed
332 blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */
335 earliest_block_with_similiar_load (basic_block last_block
, rtx load_insn
)
337 sd_iterator_def back_sd_it
;
339 basic_block bb
, earliest_block
= NULL
;
341 FOR_EACH_DEP (load_insn
, SD_LIST_BACK
, back_sd_it
, back_dep
)
343 rtx insn1
= DEP_PRO (back_dep
);
345 if (DEP_TYPE (back_dep
) == REG_DEP_TRUE
)
346 /* Found a DEF-USE dependence (insn1, load_insn). */
348 sd_iterator_def fore_sd_it
;
351 FOR_EACH_DEP (insn1
, SD_LIST_FORW
, fore_sd_it
, fore_dep
)
353 rtx insn2
= DEP_CON (fore_dep
);
354 basic_block insn2_block
= BLOCK_FOR_INSN (insn2
);
356 if (DEP_TYPE (fore_dep
) == REG_DEP_TRUE
)
358 if (earliest_block
!= NULL
359 && earliest_block
->index
< insn2_block
->index
)
362 /* Found a DEF-USE dependence (insn1, insn2). */
363 if (haifa_classify_insn (insn2
) != PFREE_CANDIDATE
)
364 /* insn2 not guaranteed to be a 1 base reg load. */
367 for (bb
= last_block
; bb
; bb
= (basic_block
) bb
->aux
)
368 if (insn2_block
== bb
)
372 /* insn2 is the similar load. */
373 earliest_block
= insn2_block
;
379 return earliest_block
;
382 /* The following function adds dependencies between jumps and risky
383 insns in given ebb. */
386 add_deps_for_risky_insns (rtx head
, rtx tail
)
390 rtx last_jump
= NULL_RTX
;
391 rtx next_tail
= NEXT_INSN (tail
);
392 basic_block last_block
= NULL
, bb
;
394 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
396 add_delay_dependencies (insn
);
397 if (control_flow_insn_p (insn
))
399 bb
= BLOCK_FOR_INSN (insn
);
400 bb
->aux
= last_block
;
402 /* Ensure blocks stay in the same order. */
404 add_dependence (insn
, last_jump
, REG_DEP_ANTI
);
407 else if (INSN_P (insn
) && last_jump
!= NULL_RTX
)
409 classification
= haifa_classify_insn (insn
);
412 switch (classification
)
414 case PFREE_CANDIDATE
:
415 if (flag_schedule_speculative_load
)
417 bb
= earliest_block_with_similiar_load (last_block
, insn
);
420 bb
= (basic_block
) bb
->aux
;
429 case PRISKY_CANDIDATE
:
430 /* ??? We could implement better checking PRISKY_CANDIDATEs
431 analogous to sched-rgn.c. */
432 /* We can not change the mode of the backward
433 dependency because REG_DEP_ANTI has the lowest
435 if (! sched_insns_conditions_mutex_p (insn
, prev
))
437 if ((current_sched_info
->flags
& DO_SPECULATION
)
438 && (spec_info
->mask
& BEGIN_CONTROL
))
440 dep_def _dep
, *dep
= &_dep
;
442 init_dep (dep
, prev
, insn
, REG_DEP_ANTI
);
444 if (current_sched_info
->flags
& USE_DEPS_LIST
)
446 DEP_STATUS (dep
) = set_dep_weak (DEP_ANTI
, BEGIN_CONTROL
,
450 sd_add_or_update_dep (dep
, false);
453 add_dependence (insn
, prev
, REG_DEP_CONTROL
);
463 /* Maintain the invariant that bb->aux is clear after use. */
466 bb
= (basic_block
) last_block
->aux
;
467 last_block
->aux
= NULL
;
472 /* Schedule a single extended basic block, defined by the boundaries
475 We change our expectations about scheduler behaviour depending on
476 whether MODULO_SCHEDULING is true. If it is, we expect that the
477 caller has already called set_modulo_params and created delay pairs
478 as appropriate. If the modulo schedule failed, we return
482 schedule_ebb (rtx head
, rtx tail
, bool modulo_scheduling
)
484 basic_block first_bb
, target_bb
;
485 struct deps_desc tmp_deps
;
488 /* Blah. We should fix the rest of the code not to get confused by
492 if (NOTE_P (head
) || DEBUG_INSN_P (head
))
493 head
= NEXT_INSN (head
);
494 else if (NOTE_P (tail
) || DEBUG_INSN_P (tail
))
495 tail
= PREV_INSN (tail
);
496 else if (LABEL_P (head
))
497 head
= NEXT_INSN (head
);
502 first_bb
= BLOCK_FOR_INSN (head
);
503 last_bb
= BLOCK_FOR_INSN (tail
);
505 if (no_real_insns_p (head
, tail
))
506 return BLOCK_FOR_INSN (tail
);
508 gcc_assert (INSN_P (head
) && INSN_P (tail
));
510 if (!bitmap_bit_p (&dont_calc_deps
, first_bb
->index
))
514 /* Compute dependencies. */
515 init_deps (&tmp_deps
, false);
516 sched_analyze (&tmp_deps
, head
, tail
);
517 free_deps (&tmp_deps
);
519 add_deps_for_risky_insns (head
, tail
);
521 if (targetm
.sched
.dependencies_evaluation_hook
)
522 targetm
.sched
.dependencies_evaluation_hook (head
, tail
);
524 finish_deps_global ();
527 /* Only recovery blocks can have their dependencies already calculated,
528 and they always are single block ebbs. */
529 gcc_assert (first_bb
== last_bb
);
531 /* Set priorities. */
532 current_sched_info
->sched_max_insns_priority
= 0;
533 rgn_n_insns
= set_priorities (head
, tail
);
534 current_sched_info
->sched_max_insns_priority
++;
536 current_sched_info
->prev_head
= PREV_INSN (head
);
537 current_sched_info
->next_tail
= NEXT_INSN (tail
);
539 remove_notes (head
, tail
);
541 unlink_bb_notes (first_bb
, last_bb
);
543 target_bb
= first_bb
;
545 /* Make ready list big enough to hold all the instructions from the ebb. */
546 sched_extend_ready_list (rgn_n_insns
);
547 success
= schedule_block (&target_bb
);
548 gcc_assert (success
|| modulo_scheduling
);
550 /* Free ready list. */
551 sched_finish_ready_list ();
553 /* We might pack all instructions into fewer blocks,
554 so we may made some of them empty. Can't assert (b == last_bb). */
556 /* Sanity check: verify that all region insns were scheduled. */
557 gcc_assert (modulo_scheduling
|| sched_rgn_n_insns
== rgn_n_insns
);
559 /* Free dependencies. */
560 sched_free_deps (current_sched_info
->head
, current_sched_info
->tail
, true);
562 gcc_assert (haifa_recovery_bb_ever_added_p
563 || deps_pools_are_empty_p ());
565 if (EDGE_COUNT (last_bb
->preds
) == 0)
566 /* LAST_BB is unreachable. */
568 gcc_assert (first_bb
!= last_bb
569 && EDGE_COUNT (last_bb
->succs
) == 0);
570 last_bb
= last_bb
->prev_bb
;
571 delete_basic_block (last_bb
->next_bb
);
574 return success
? last_bb
: NULL
;
577 /* Perform initializations before running schedule_ebbs or a single
580 schedule_ebbs_init (void)
584 memcpy (&ebb_common_sched_info
, &haifa_common_sched_info
,
585 sizeof (ebb_common_sched_info
));
587 ebb_common_sched_info
.fix_recovery_cfg
= ebb_fix_recovery_cfg
;
588 ebb_common_sched_info
.add_block
= ebb_add_block
;
589 ebb_common_sched_info
.sched_pass_id
= SCHED_EBB_PASS
;
591 common_sched_info
= &ebb_common_sched_info
;
592 sched_deps_info
= &ebb_sched_deps_info
;
593 current_sched_info
= &ebb_sched_info
;
598 compute_bb_for_insn ();
600 /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */
601 bitmap_initialize (&dont_calc_deps
, 0);
602 bitmap_clear (&dont_calc_deps
);
605 /* Perform cleanups after scheduling using schedules_ebbs or schedule_ebb. */
607 schedule_ebbs_finish (void)
609 bitmap_clear (&dont_calc_deps
);
611 /* Reposition the prologue and epilogue notes in case we moved the
612 prologue/epilogue insns. */
613 if (reload_completed
)
614 reposition_prologue_and_epilogue_notes ();
616 haifa_sched_finish ();
619 /* The main entry point in this file. */
625 int probability_cutoff
;
628 /* Taking care of this degenerate case makes the rest of
629 this code simpler. */
630 if (n_basic_blocks
== NUM_FIXED_BLOCKS
)
633 if (profile_info
&& flag_branch_probabilities
)
634 probability_cutoff
= PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK
);
636 probability_cutoff
= PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY
);
637 probability_cutoff
= REG_BR_PROB_BASE
/ 100 * probability_cutoff
;
639 schedule_ebbs_init ();
641 /* Schedule every region in the subroutine. */
644 rtx head
= BB_HEAD (bb
);
646 if (bb
->flags
& BB_DISABLE_SCHEDULE
)
653 if (bb
->next_bb
== EXIT_BLOCK_PTR
654 || LABEL_P (BB_HEAD (bb
->next_bb
)))
656 e
= find_fallthru_edge (bb
->succs
);
659 if (e
->probability
<= probability_cutoff
)
661 if (e
->dest
->flags
& BB_DISABLE_SCHEDULE
)
666 bb
= schedule_ebb (head
, tail
, false);
668 schedule_ebbs_finish ();
671 /* INSN has been added to/removed from current ebb. */
673 ebb_add_remove_insn (rtx insn ATTRIBUTE_UNUSED
, int remove_p
)
681 /* BB was added to ebb after AFTER. */
683 ebb_add_block (basic_block bb
, basic_block after
)
685 /* Recovery blocks are always bounded by BARRIERS,
686 therefore, they always form single block EBB,
687 therefore, we can use rec->index to identify such EBBs. */
688 if (after
== EXIT_BLOCK_PTR
)
689 bitmap_set_bit (&dont_calc_deps
, bb
->index
);
690 else if (after
== last_bb
)
694 /* Return next block in ebb chain. For parameter meaning please refer to
695 sched-int.h: struct sched_info: advance_target_bb. */
697 advance_target_bb (basic_block bb
, rtx insn
)
701 if (BLOCK_FOR_INSN (insn
) != bb
702 && control_flow_insn_p (insn
)
703 /* We handle interblock movement of the speculation check
704 or over a speculation check in
705 haifa-sched.c: move_block_after_check (). */
706 && !IS_SPECULATION_BRANCHY_CHECK_P (insn
)
707 && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb
)))
709 /* Assert that we don't move jumps across blocks. */
710 gcc_assert (!control_flow_insn_p (BB_END (bb
))
711 && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb
->next_bb
)));
718 /* Return next non empty block. */
722 gcc_assert (bb
!= last_bb
);
726 while (bb_note (bb
) == BB_END (bb
));
732 /* Fix internal data after interblock movement of jump instruction.
733 For parameter meaning please refer to
734 sched-int.h: struct sched_info: fix_recovery_cfg. */
736 ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED
, int jump_bbi
,
739 gcc_assert (last_bb
->index
!= bbi
);
741 if (jump_bb_nexti
== last_bb
->index
)
742 last_bb
= BASIC_BLOCK (jump_bbi
);
745 #endif /* INSN_SCHEDULING */