1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 /* This pass implements list scheduling within basic blocks. It is
25 run twice: (1) after flow analysis, but before register allocation,
26 and (2) after register allocation.
28 The first run performs interblock scheduling, moving insns between
29 different blocks in the same "region", and the second runs only
30 basic block scheduling.
32 Interblock motions performed are useful motions and speculative
33 motions, including speculative loads. Motions requiring code
34 duplication are not supported. The identification of motion type
35 and the check for validity of speculative motions requires
36 construction and analysis of the function's control flow graph.
38 The main entry point for this pass is schedule_insns(), called for
39 each function. The work of the scheduler is organized in three
40 levels: (1) function level: insns are subject to splitting,
41 control-flow-graph is constructed, regions are computed (after
42 reload, each region is of one block), (2) region level: control
43 flow graph attributes required for interblock scheduling are
44 computed (dominators, reachability, etc.), data dependences and
45 priorities are computed, and (3) block level: insns in the block
46 are actually scheduled. */
50 #include "coretypes.h"
52 #include "diagnostic-core.h"
56 #include "hard-reg-set.h"
60 #include "insn-config.h"
61 #include "insn-attr.h"
65 #include "cfglayout.h"
67 #include "sched-int.h"
68 #include "sel-sched.h"
71 #include "tree-pass.h"
74 #ifdef INSN_SCHEDULING
76 /* Some accessor macros for h_i_d members only used within this file. */
77 #define FED_BY_SPEC_LOAD(INSN) (HID (INSN)->fed_by_spec_load)
78 #define IS_LOAD_INSN(INSN) (HID (insn)->is_load_insn)
80 /* nr_inter/spec counts interblock/speculative motion for the function. */
81 static int nr_inter
, nr_spec
;
83 static int is_cfg_nonregular (void);
85 /* Number of regions in the procedure. */
88 /* Table of region descriptions. */
89 region
*rgn_table
= NULL
;
91 /* Array of lists of regions' blocks. */
92 int *rgn_bb_table
= NULL
;
94 /* Topological order of blocks in the region (if b2 is reachable from
95 b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is
96 always referred to by either block or b, while its topological
97 order name (in the region) is referred to by bb. */
98 int *block_to_bb
= NULL
;
100 /* The number of the region containing a block. */
101 int *containing_rgn
= NULL
;
103 /* ebb_head [i] - is index in rgn_bb_table of the head basic block of i'th ebb.
104 Currently we can get a ebb only through splitting of currently
105 scheduling block, therefore, we don't need ebb_head array for every region,
106 hence, its sufficient to hold it for current one only. */
107 int *ebb_head
= NULL
;
109 /* The minimum probability of reaching a source block so that it will be
110 considered for speculative scheduling. */
111 static int min_spec_prob
;
113 static void find_single_block_region (bool);
114 static void find_rgns (void);
115 static bool too_large (int, int *, int *);
117 /* Blocks of the current region being scheduled. */
118 int current_nr_blocks
;
121 /* A speculative motion requires checking live information on the path
122 from 'source' to 'target'. The split blocks are those to be checked.
123 After a speculative motion, live information should be modified in
126 Lists of split and update blocks for each candidate of the current
127 target are in array bblst_table. */
128 static basic_block
*bblst_table
;
129 static int bblst_size
, bblst_last
;
131 /* Target info declarations.
133 The block currently being scheduled is referred to as the "target" block,
134 while other blocks in the region from which insns can be moved to the
135 target are called "source" blocks. The candidate structure holds info
136 about such sources: are they valid? Speculative? Etc. */
139 basic_block
*first_member
;
154 static candidate
*candidate_table
;
155 #define IS_VALID(src) (candidate_table[src].is_valid)
156 #define IS_SPECULATIVE(src) (candidate_table[src].is_speculative)
157 #define IS_SPECULATIVE_INSN(INSN) \
158 (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
159 #define SRC_PROB(src) ( candidate_table[src].src_prob )
161 /* The bb being currently scheduled. */
172 static edge
*edgelst_table
;
173 static int edgelst_last
;
175 static void extract_edgelst (sbitmap
, edgelst
*);
177 /* Target info functions. */
178 static void split_edges (int, int, edgelst
*);
179 static void compute_trg_info (int);
180 void debug_candidate (int);
181 void debug_candidates (int);
183 /* Dominators array: dom[i] contains the sbitmap of dominators of
184 bb i in the region. */
187 /* bb 0 is the only region entry. */
188 #define IS_RGN_ENTRY(bb) (!bb)
190 /* Is bb_src dominated by bb_trg. */
191 #define IS_DOMINATED(bb_src, bb_trg) \
192 ( TEST_BIT (dom[bb_src], bb_trg) )
194 /* Probability: Prob[i] is an int in [0, REG_BR_PROB_BASE] which is
195 the probability of bb i relative to the region entry. */
198 /* Bit-set of edges, where bit i stands for edge i. */
199 typedef sbitmap edgeset
;
201 /* Number of edges in the region. */
202 static int rgn_nr_edges
;
204 /* Array of size rgn_nr_edges. */
205 static edge
*rgn_edges
;
207 /* Mapping from each edge in the graph to its number in the rgn. */
208 #define EDGE_TO_BIT(edge) ((int)(size_t)(edge)->aux)
209 #define SET_EDGE_TO_BIT(edge,nr) ((edge)->aux = (void *)(size_t)(nr))
211 /* The split edges of a source bb is different for each target
212 bb. In order to compute this efficiently, the 'potential-split edges'
213 are computed for each bb prior to scheduling a region. This is actually
214 the split edges of each bb relative to the region entry.
216 pot_split[bb] is the set of potential split edges of bb. */
217 static edgeset
*pot_split
;
219 /* For every bb, a set of its ancestor edges. */
220 static edgeset
*ancestor_edges
;
222 #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
224 /* Speculative scheduling functions. */
225 static int check_live_1 (int, rtx
);
226 static void update_live_1 (int, rtx
);
227 static int is_pfree (rtx
, int, int);
228 static int find_conditional_protection (rtx
, int);
229 static int is_conditionally_protected (rtx
, int, int);
230 static int is_prisky (rtx
, int, int);
231 static int is_exception_free (rtx
, int, int);
233 static bool sets_likely_spilled (rtx
);
234 static void sets_likely_spilled_1 (rtx
, const_rtx
, void *);
235 static void add_branch_dependences (rtx
, rtx
);
236 static void compute_block_dependences (int);
238 static void schedule_region (int);
239 static rtx
concat_INSN_LIST (rtx
, rtx
);
240 static void concat_insn_mem_list (rtx
, rtx
, rtx
*, rtx
*);
241 static void propagate_deps (int, struct deps_desc
*);
242 static void free_pending_lists (void);
244 /* Functions for construction of the control flow graph. */
246 /* Return 1 if control flow graph should not be constructed, 0 otherwise.
248 We decide not to build the control flow graph if there is possibly more
249 than one entry to the function, if computed branches exist, if we
250 have nonlocal gotos, or if we have an unreachable loop. */
253 is_cfg_nonregular (void)
258 /* If we have a label that could be the target of a nonlocal goto, then
259 the cfg is not well structured. */
260 if (nonlocal_goto_handler_labels
)
263 /* If we have any forced labels, then the cfg is not well structured. */
267 /* If we have exception handlers, then we consider the cfg not well
268 structured. ?!? We should be able to handle this now that we
269 compute an accurate cfg for EH. */
270 if (current_function_has_exception_handlers ())
273 /* If we have insns which refer to labels as non-jumped-to operands,
274 then we consider the cfg not well structured. */
276 FOR_BB_INSNS (b
, insn
)
278 rtx note
, next
, set
, dest
;
280 /* If this function has a computed jump, then we consider the cfg
281 not well structured. */
282 if (JUMP_P (insn
) && computed_jump_p (insn
))
288 note
= find_reg_note (insn
, REG_LABEL_OPERAND
, NULL_RTX
);
289 if (note
== NULL_RTX
)
292 /* For that label not to be seen as a referred-to label, this
293 must be a single-set which is feeding a jump *only*. This
294 could be a conditional jump with the label split off for
295 machine-specific reasons or a casesi/tablejump. */
296 next
= next_nonnote_insn (insn
);
299 || (JUMP_LABEL (next
) != XEXP (note
, 0)
300 && find_reg_note (next
, REG_LABEL_TARGET
,
301 XEXP (note
, 0)) == NULL_RTX
)
302 || BLOCK_FOR_INSN (insn
) != BLOCK_FOR_INSN (next
))
305 set
= single_set (insn
);
309 dest
= SET_DEST (set
);
310 if (!REG_P (dest
) || !dead_or_set_p (next
, dest
))
314 /* Unreachable loops with more than one basic block are detected
315 during the DFS traversal in find_rgns.
317 Unreachable loops with a single block are detected here. This
318 test is redundant with the one in find_rgns, but it's much
319 cheaper to go ahead and catch the trivial case here. */
322 if (EDGE_COUNT (b
->preds
) == 0
323 || (single_pred_p (b
)
324 && single_pred (b
) == b
))
328 /* All the tests passed. Consider the cfg well structured. */
332 /* Extract list of edges from a bitmap containing EDGE_TO_BIT bits. */
335 extract_edgelst (sbitmap set
, edgelst
*el
)
338 sbitmap_iterator sbi
;
340 /* edgelst table space is reused in each call to extract_edgelst. */
343 el
->first_member
= &edgelst_table
[edgelst_last
];
346 /* Iterate over each word in the bitset. */
347 EXECUTE_IF_SET_IN_SBITMAP (set
, 0, i
, sbi
)
349 edgelst_table
[edgelst_last
++] = rgn_edges
[i
];
354 /* Functions for the construction of regions. */
356 /* Print the regions, for debugging purposes. Callable from debugger. */
363 fprintf (sched_dump
, "\n;; ------------ REGIONS ----------\n\n");
364 for (rgn
= 0; rgn
< nr_regions
; rgn
++)
366 fprintf (sched_dump
, ";;\trgn %d nr_blocks %d:\n", rgn
,
367 rgn_table
[rgn
].rgn_nr_blocks
);
368 fprintf (sched_dump
, ";;\tbb/block: ");
370 /* We don't have ebb_head initialized yet, so we can't use
372 current_blocks
= RGN_BLOCKS (rgn
);
374 for (bb
= 0; bb
< rgn_table
[rgn
].rgn_nr_blocks
; bb
++)
375 fprintf (sched_dump
, " %d/%d ", bb
, rgn_bb_table
[current_blocks
+ bb
]);
377 fprintf (sched_dump
, "\n\n");
381 /* Print the region's basic blocks. */
384 debug_region (int rgn
)
388 fprintf (stderr
, "\n;; ------------ REGION %d ----------\n\n", rgn
);
389 fprintf (stderr
, ";;\trgn %d nr_blocks %d:\n", rgn
,
390 rgn_table
[rgn
].rgn_nr_blocks
);
391 fprintf (stderr
, ";;\tbb/block: ");
393 /* We don't have ebb_head initialized yet, so we can't use
395 current_blocks
= RGN_BLOCKS (rgn
);
397 for (bb
= 0; bb
< rgn_table
[rgn
].rgn_nr_blocks
; bb
++)
398 fprintf (stderr
, " %d/%d ", bb
, rgn_bb_table
[current_blocks
+ bb
]);
400 fprintf (stderr
, "\n\n");
402 for (bb
= 0; bb
< rgn_table
[rgn
].rgn_nr_blocks
; bb
++)
404 debug_bb_n_slim (rgn_bb_table
[current_blocks
+ bb
]);
405 fprintf (stderr
, "\n");
408 fprintf (stderr
, "\n");
412 /* True when a bb with index BB_INDEX contained in region RGN. */
414 bb_in_region_p (int bb_index
, int rgn
)
418 for (i
= 0; i
< rgn_table
[rgn
].rgn_nr_blocks
; i
++)
419 if (rgn_bb_table
[current_blocks
+ i
] == bb_index
)
425 /* Dump region RGN to file F using dot syntax. */
427 dump_region_dot (FILE *f
, int rgn
)
431 fprintf (f
, "digraph Region_%d {\n", rgn
);
433 /* We don't have ebb_head initialized yet, so we can't use
435 current_blocks
= RGN_BLOCKS (rgn
);
437 for (i
= 0; i
< rgn_table
[rgn
].rgn_nr_blocks
; i
++)
441 int src_bb_num
= rgn_bb_table
[current_blocks
+ i
];
442 struct basic_block_def
*bb
= BASIC_BLOCK (src_bb_num
);
444 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
445 if (bb_in_region_p (e
->dest
->index
, rgn
))
446 fprintf (f
, "\t%d -> %d\n", src_bb_num
, e
->dest
->index
);
451 /* The same, but first open a file specified by FNAME. */
453 dump_region_dot_file (const char *fname
, int rgn
)
455 FILE *f
= fopen (fname
, "wt");
456 dump_region_dot (f
, rgn
);
460 /* Build a single block region for each basic block in the function.
461 This allows for using the same code for interblock and basic block
465 find_single_block_region (bool ebbs_p
)
467 basic_block bb
, ebb_start
;
473 int probability_cutoff
;
474 if (profile_info
&& flag_branch_probabilities
)
475 probability_cutoff
= PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK
);
477 probability_cutoff
= PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY
);
478 probability_cutoff
= REG_BR_PROB_BASE
/ 100 * probability_cutoff
;
480 FOR_EACH_BB (ebb_start
)
482 RGN_NR_BLOCKS (nr_regions
) = 0;
483 RGN_BLOCKS (nr_regions
) = i
;
484 RGN_DONT_CALC_DEPS (nr_regions
) = 0;
485 RGN_HAS_REAL_EBB (nr_regions
) = 0;
487 for (bb
= ebb_start
; ; bb
= bb
->next_bb
)
491 rgn_bb_table
[i
] = bb
->index
;
492 RGN_NR_BLOCKS (nr_regions
)++;
493 CONTAINING_RGN (bb
->index
) = nr_regions
;
494 BLOCK_TO_BB (bb
->index
) = i
- RGN_BLOCKS (nr_regions
);
497 if (bb
->next_bb
== EXIT_BLOCK_PTR
498 || LABEL_P (BB_HEAD (bb
->next_bb
)))
501 e
= find_fallthru_edge (bb
->succs
);
504 if (e
->probability
<= probability_cutoff
)
515 rgn_bb_table
[nr_regions
] = bb
->index
;
516 RGN_NR_BLOCKS (nr_regions
) = 1;
517 RGN_BLOCKS (nr_regions
) = nr_regions
;
518 RGN_DONT_CALC_DEPS (nr_regions
) = 0;
519 RGN_HAS_REAL_EBB (nr_regions
) = 0;
521 CONTAINING_RGN (bb
->index
) = nr_regions
;
522 BLOCK_TO_BB (bb
->index
) = 0;
527 /* Estimate number of the insns in the BB. */
529 rgn_estimate_number_of_insns (basic_block bb
)
533 count
= INSN_LUID (BB_END (bb
)) - INSN_LUID (BB_HEAD (bb
));
535 if (MAY_HAVE_DEBUG_INSNS
)
539 FOR_BB_INSNS (bb
, insn
)
540 if (DEBUG_INSN_P (insn
))
547 /* Update number of blocks and the estimate for number of insns
548 in the region. Return true if the region is "too large" for interblock
549 scheduling (compile time considerations). */
552 too_large (int block
, int *num_bbs
, int *num_insns
)
555 (*num_insns
) += (common_sched_info
->estimate_number_of_insns
556 (BASIC_BLOCK (block
)));
558 return ((*num_bbs
> PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS
))
559 || (*num_insns
> PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS
)));
562 /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
563 is still an inner loop. Put in max_hdr[blk] the header of the most inner
564 loop containing blk. */
565 #define UPDATE_LOOP_RELATIONS(blk, hdr) \
567 if (max_hdr[blk] == -1) \
568 max_hdr[blk] = hdr; \
569 else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
570 RESET_BIT (inner, hdr); \
571 else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
573 RESET_BIT (inner,max_hdr[blk]); \
574 max_hdr[blk] = hdr; \
578 /* Find regions for interblock scheduling.
580 A region for scheduling can be:
582 * A loop-free procedure, or
584 * A reducible inner loop, or
586 * A basic block not contained in any other region.
588 ?!? In theory we could build other regions based on extended basic
589 blocks or reverse extended basic blocks. Is it worth the trouble?
591 Loop blocks that form a region are put into the region's block list
592 in topological order.
594 This procedure stores its results into the following global (ick) variables
602 We use dominator relationships to avoid making regions out of non-reducible
605 This procedure needs to be converted to work on pred/succ lists instead
606 of edge tables. That would simplify it somewhat. */
609 haifa_find_rgns (void)
611 int *max_hdr
, *dfs_nr
, *degree
;
613 int node
, child
, loop_head
, i
, head
, tail
;
614 int count
= 0, sp
, idx
= 0;
615 edge_iterator current_edge
;
616 edge_iterator
*stack
;
617 int num_bbs
, num_insns
, unreachable
;
618 int too_large_failure
;
621 /* Note if a block is a natural loop header. */
624 /* Note if a block is a natural inner loop header. */
627 /* Note if a block is in the block queue. */
630 /* Note if a block is in the block queue. */
633 /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
634 and a mapping from block to its loop header (if the block is contained
637 Store results in HEADER, INNER, and MAX_HDR respectively, these will
638 be used as inputs to the second traversal.
640 STACK, SP and DFS_NR are only used during the first traversal. */
642 /* Allocate and initialize variables for the first traversal. */
643 max_hdr
= XNEWVEC (int, last_basic_block
);
644 dfs_nr
= XCNEWVEC (int, last_basic_block
);
645 stack
= XNEWVEC (edge_iterator
, n_edges
);
647 inner
= sbitmap_alloc (last_basic_block
);
648 sbitmap_ones (inner
);
650 header
= sbitmap_alloc (last_basic_block
);
651 sbitmap_zero (header
);
653 in_queue
= sbitmap_alloc (last_basic_block
);
654 sbitmap_zero (in_queue
);
656 in_stack
= sbitmap_alloc (last_basic_block
);
657 sbitmap_zero (in_stack
);
659 for (i
= 0; i
< last_basic_block
; i
++)
662 #define EDGE_PASSED(E) (ei_end_p ((E)) || ei_edge ((E))->aux)
663 #define SET_EDGE_PASSED(E) (ei_edge ((E))->aux = ei_edge ((E)))
665 /* DFS traversal to find inner loops in the cfg. */
667 current_edge
= ei_start (single_succ (ENTRY_BLOCK_PTR
)->succs
);
672 if (EDGE_PASSED (current_edge
))
674 /* We have reached a leaf node or a node that was already
675 processed. Pop edges off the stack until we find
676 an edge that has not yet been processed. */
677 while (sp
>= 0 && EDGE_PASSED (current_edge
))
679 /* Pop entry off the stack. */
680 current_edge
= stack
[sp
--];
681 node
= ei_edge (current_edge
)->src
->index
;
682 gcc_assert (node
!= ENTRY_BLOCK
);
683 child
= ei_edge (current_edge
)->dest
->index
;
684 gcc_assert (child
!= EXIT_BLOCK
);
685 RESET_BIT (in_stack
, child
);
686 if (max_hdr
[child
] >= 0 && TEST_BIT (in_stack
, max_hdr
[child
]))
687 UPDATE_LOOP_RELATIONS (node
, max_hdr
[child
]);
688 ei_next (¤t_edge
);
691 /* See if have finished the DFS tree traversal. */
692 if (sp
< 0 && EDGE_PASSED (current_edge
))
695 /* Nope, continue the traversal with the popped node. */
699 /* Process a node. */
700 node
= ei_edge (current_edge
)->src
->index
;
701 gcc_assert (node
!= ENTRY_BLOCK
);
702 SET_BIT (in_stack
, node
);
703 dfs_nr
[node
] = ++count
;
705 /* We don't traverse to the exit block. */
706 child
= ei_edge (current_edge
)->dest
->index
;
707 if (child
== EXIT_BLOCK
)
709 SET_EDGE_PASSED (current_edge
);
710 ei_next (¤t_edge
);
714 /* If the successor is in the stack, then we've found a loop.
715 Mark the loop, if it is not a natural loop, then it will
716 be rejected during the second traversal. */
717 if (TEST_BIT (in_stack
, child
))
720 SET_BIT (header
, child
);
721 UPDATE_LOOP_RELATIONS (node
, child
);
722 SET_EDGE_PASSED (current_edge
);
723 ei_next (¤t_edge
);
727 /* If the child was already visited, then there is no need to visit
728 it again. Just update the loop relationships and restart
732 if (max_hdr
[child
] >= 0 && TEST_BIT (in_stack
, max_hdr
[child
]))
733 UPDATE_LOOP_RELATIONS (node
, max_hdr
[child
]);
734 SET_EDGE_PASSED (current_edge
);
735 ei_next (¤t_edge
);
739 /* Push an entry on the stack and continue DFS traversal. */
740 stack
[++sp
] = current_edge
;
741 SET_EDGE_PASSED (current_edge
);
742 current_edge
= ei_start (ei_edge (current_edge
)->dest
->succs
);
745 /* Reset ->aux field used by EDGE_PASSED. */
750 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
755 /* Another check for unreachable blocks. The earlier test in
756 is_cfg_nonregular only finds unreachable blocks that do not
759 The DFS traversal will mark every block that is reachable from
760 the entry node by placing a nonzero value in dfs_nr. Thus if
761 dfs_nr is zero for any block, then it must be unreachable. */
764 if (dfs_nr
[bb
->index
] == 0)
770 /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array
771 to hold degree counts. */
775 degree
[bb
->index
] = EDGE_COUNT (bb
->preds
);
777 /* Do not perform region scheduling if there are any unreachable
781 int *queue
, *degree1
= NULL
;
782 /* We use EXTENDED_RGN_HEADER as an addition to HEADER and put
783 there basic blocks, which are forced to be region heads.
784 This is done to try to assemble few smaller regions
785 from a too_large region. */
786 sbitmap extended_rgn_header
= NULL
;
787 bool extend_regions_p
;
792 /* Second traversal:find reducible inner loops and topologically sort
793 block of each region. */
795 queue
= XNEWVEC (int, n_basic_blocks
);
797 extend_regions_p
= PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS
) > 0;
798 if (extend_regions_p
)
800 degree1
= XNEWVEC (int, last_basic_block
);
801 extended_rgn_header
= sbitmap_alloc (last_basic_block
);
802 sbitmap_zero (extended_rgn_header
);
805 /* Find blocks which are inner loop headers. We still have non-reducible
806 loops to consider at this point. */
809 if (TEST_BIT (header
, bb
->index
) && TEST_BIT (inner
, bb
->index
))
815 /* Now check that the loop is reducible. We do this separate
816 from finding inner loops so that we do not find a reducible
817 loop which contains an inner non-reducible loop.
819 A simple way to find reducible/natural loops is to verify
820 that each block in the loop is dominated by the loop
823 If there exists a block that is not dominated by the loop
824 header, then the block is reachable from outside the loop
825 and thus the loop is not a natural loop. */
828 /* First identify blocks in the loop, except for the loop
830 if (bb
->index
== max_hdr
[jbb
->index
] && bb
!= jbb
)
832 /* Now verify that the block is dominated by the loop
834 if (!dominated_by_p (CDI_DOMINATORS
, jbb
, bb
))
839 /* If we exited the loop early, then I is the header of
840 a non-reducible loop and we should quit processing it
842 if (jbb
!= EXIT_BLOCK_PTR
)
845 /* I is a header of an inner loop, or block 0 in a subroutine
846 with no loops at all. */
848 too_large_failure
= 0;
849 loop_head
= max_hdr
[bb
->index
];
851 if (extend_regions_p
)
852 /* We save degree in case when we meet a too_large region
853 and cancel it. We need a correct degree later when
854 calling extend_rgns. */
855 memcpy (degree1
, degree
, last_basic_block
* sizeof (int));
857 /* Decrease degree of all I's successors for topological
859 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
860 if (e
->dest
!= EXIT_BLOCK_PTR
)
861 --degree
[e
->dest
->index
];
863 /* Estimate # insns, and count # blocks in the region. */
865 num_insns
= common_sched_info
->estimate_number_of_insns (bb
);
867 /* Find all loop latches (blocks with back edges to the loop
868 header) or all the leaf blocks in the cfg has no loops.
870 Place those blocks into the queue. */
874 /* Leaf nodes have only a single successor which must
876 if (single_succ_p (jbb
)
877 && single_succ (jbb
) == EXIT_BLOCK_PTR
)
879 queue
[++tail
] = jbb
->index
;
880 SET_BIT (in_queue
, jbb
->index
);
882 if (too_large (jbb
->index
, &num_bbs
, &num_insns
))
884 too_large_failure
= 1;
893 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
895 if (e
->src
== ENTRY_BLOCK_PTR
)
898 node
= e
->src
->index
;
900 if (max_hdr
[node
] == loop_head
&& node
!= bb
->index
)
902 /* This is a loop latch. */
903 queue
[++tail
] = node
;
904 SET_BIT (in_queue
, node
);
906 if (too_large (node
, &num_bbs
, &num_insns
))
908 too_large_failure
= 1;
915 /* Now add all the blocks in the loop to the queue.
917 We know the loop is a natural loop; however the algorithm
918 above will not always mark certain blocks as being in the
926 The algorithm in the DFS traversal may not mark B & D as part
927 of the loop (i.e. they will not have max_hdr set to A).
929 We know they can not be loop latches (else they would have
930 had max_hdr set since they'd have a backedge to a dominator
931 block). So we don't need them on the initial queue.
933 We know they are part of the loop because they are dominated
934 by the loop header and can be reached by a backwards walk of
935 the edges starting with nodes on the initial queue.
937 It is safe and desirable to include those nodes in the
938 loop/scheduling region. To do so we would need to decrease
939 the degree of a node if it is the target of a backedge
940 within the loop itself as the node is placed in the queue.
942 We do not do this because I'm not sure that the actual
943 scheduling code will properly handle this case. ?!? */
945 while (head
< tail
&& !too_large_failure
)
948 child
= queue
[++head
];
950 FOR_EACH_EDGE (e
, ei
, BASIC_BLOCK (child
)->preds
)
952 node
= e
->src
->index
;
954 /* See discussion above about nodes not marked as in
955 this loop during the initial DFS traversal. */
956 if (e
->src
== ENTRY_BLOCK_PTR
957 || max_hdr
[node
] != loop_head
)
962 else if (!TEST_BIT (in_queue
, node
) && node
!= bb
->index
)
964 queue
[++tail
] = node
;
965 SET_BIT (in_queue
, node
);
967 if (too_large (node
, &num_bbs
, &num_insns
))
969 too_large_failure
= 1;
976 if (tail
>= 0 && !too_large_failure
)
978 /* Place the loop header into list of region blocks. */
979 degree
[bb
->index
] = -1;
980 rgn_bb_table
[idx
] = bb
->index
;
981 RGN_NR_BLOCKS (nr_regions
) = num_bbs
;
982 RGN_BLOCKS (nr_regions
) = idx
++;
983 RGN_DONT_CALC_DEPS (nr_regions
) = 0;
984 RGN_HAS_REAL_EBB (nr_regions
) = 0;
985 CONTAINING_RGN (bb
->index
) = nr_regions
;
986 BLOCK_TO_BB (bb
->index
) = count
= 0;
988 /* Remove blocks from queue[] when their in degree
989 becomes zero. Repeat until no blocks are left on the
990 list. This produces a topological list of blocks in
997 if (degree
[child
] == 0)
1002 rgn_bb_table
[idx
++] = child
;
1003 BLOCK_TO_BB (child
) = ++count
;
1004 CONTAINING_RGN (child
) = nr_regions
;
1005 queue
[head
] = queue
[tail
--];
1007 FOR_EACH_EDGE (e
, ei
, BASIC_BLOCK (child
)->succs
)
1008 if (e
->dest
!= EXIT_BLOCK_PTR
)
1009 --degree
[e
->dest
->index
];
1016 else if (extend_regions_p
)
1018 /* Restore DEGREE. */
1024 /* And force successors of BB to be region heads.
1025 This may provide several smaller regions instead
1026 of one too_large region. */
1027 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1028 if (e
->dest
!= EXIT_BLOCK_PTR
)
1029 SET_BIT (extended_rgn_header
, e
->dest
->index
);
1035 if (extend_regions_p
)
1039 sbitmap_a_or_b (header
, header
, extended_rgn_header
);
1040 sbitmap_free (extended_rgn_header
);
1042 extend_rgns (degree
, &idx
, header
, max_hdr
);
1046 /* Any block that did not end up in a region is placed into a region
1049 if (degree
[bb
->index
] >= 0)
1051 rgn_bb_table
[idx
] = bb
->index
;
1052 RGN_NR_BLOCKS (nr_regions
) = 1;
1053 RGN_BLOCKS (nr_regions
) = idx
++;
1054 RGN_DONT_CALC_DEPS (nr_regions
) = 0;
1055 RGN_HAS_REAL_EBB (nr_regions
) = 0;
1056 CONTAINING_RGN (bb
->index
) = nr_regions
++;
1057 BLOCK_TO_BB (bb
->index
) = 0;
1063 sbitmap_free (header
);
1064 sbitmap_free (inner
);
1065 sbitmap_free (in_queue
);
1066 sbitmap_free (in_stack
);
1070 /* Wrapper function.
1071 If FLAG_SEL_SCHED_PIPELINING is set, then use custom function to form
1072 regions. Otherwise just call find_rgns_haifa. */
1076 if (sel_sched_p () && flag_sel_sched_pipelining
)
1082 static int gather_region_statistics (int **);
1083 static void print_region_statistics (int *, int, int *, int);
1085 /* Calculate the histogram that shows the number of regions having the
1086 given number of basic blocks, and store it in the RSP array. Return
1087 the size of this array. */
1089 gather_region_statistics (int **rsp
)
1091 int i
, *a
= 0, a_sz
= 0;
1093 /* a[i] is the number of regions that have (i + 1) basic blocks. */
1094 for (i
= 0; i
< nr_regions
; i
++)
1096 int nr_blocks
= RGN_NR_BLOCKS (i
);
1098 gcc_assert (nr_blocks
>= 1);
1100 if (nr_blocks
> a_sz
)
1102 a
= XRESIZEVEC (int, a
, nr_blocks
);
1105 while (a_sz
!= nr_blocks
);
1115 /* Print regions statistics. S1 and S2 denote the data before and after
1116 calling extend_rgns, respectively. */
1118 print_region_statistics (int *s1
, int s1_sz
, int *s2
, int s2_sz
)
1122 /* We iterate until s2_sz because extend_rgns does not decrease
1123 the maximal region size. */
1124 for (i
= 1; i
< s2_sz
; i
++)
1138 fprintf (sched_dump
, ";; Region extension statistics: size %d: " \
1139 "was %d + %d more\n", i
+ 1, n1
, n2
- n1
);
1144 DEGREE - Array of incoming edge count, considering only
1145 the edges, that don't have their sources in formed regions yet.
1146 IDXP - pointer to the next available index in rgn_bb_table.
1147 HEADER - set of all region heads.
1148 LOOP_HDR - mapping from block to the containing loop
1149 (two blocks can reside within one region if they have
1150 the same loop header). */
1152 extend_rgns (int *degree
, int *idxp
, sbitmap header
, int *loop_hdr
)
1154 int *order
, i
, rescan
= 0, idx
= *idxp
, iter
= 0, max_iter
, *max_hdr
;
1155 int nblocks
= n_basic_blocks
- NUM_FIXED_BLOCKS
;
1157 max_iter
= PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS
);
1159 max_hdr
= XNEWVEC (int, last_basic_block
);
1161 order
= XNEWVEC (int, last_basic_block
);
1162 post_order_compute (order
, false, false);
1164 for (i
= nblocks
- 1; i
>= 0; i
--)
1167 if (degree
[bbn
] >= 0)
1173 /* This block already was processed in find_rgns. */
1177 /* The idea is to topologically walk through CFG in top-down order.
1178 During the traversal, if all the predecessors of a node are
1179 marked to be in the same region (they all have the same max_hdr),
1180 then current node is also marked to be a part of that region.
1181 Otherwise the node starts its own region.
1182 CFG should be traversed until no further changes are made. On each
1183 iteration the set of the region heads is extended (the set of those
1184 blocks that have max_hdr[bbi] == bbi). This set is upper bounded by the
1185 set of all basic blocks, thus the algorithm is guaranteed to
1188 while (rescan
&& iter
< max_iter
)
1192 for (i
= nblocks
- 1; i
>= 0; i
--)
1198 if (max_hdr
[bbn
] != -1 && !TEST_BIT (header
, bbn
))
1202 FOR_EACH_EDGE (e
, ei
, BASIC_BLOCK (bbn
)->preds
)
1204 int predn
= e
->src
->index
;
1206 if (predn
!= ENTRY_BLOCK
1207 /* If pred wasn't processed in find_rgns. */
1208 && max_hdr
[predn
] != -1
1209 /* And pred and bb reside in the same loop.
1210 (Or out of any loop). */
1211 && loop_hdr
[bbn
] == loop_hdr
[predn
])
1214 /* Then bb extends the containing region of pred. */
1215 hdr
= max_hdr
[predn
];
1216 else if (hdr
!= max_hdr
[predn
])
1217 /* Too bad, there are at least two predecessors
1218 that reside in different regions. Thus, BB should
1219 begin its own region. */
1226 /* BB starts its own region. */
1235 /* If BB start its own region,
1236 update set of headers with BB. */
1237 SET_BIT (header
, bbn
);
1241 gcc_assert (hdr
!= -1);
1250 /* Statistics were gathered on the SPEC2000 package of tests with
1251 mainline weekly snapshot gcc-4.1-20051015 on ia64.
1253 Statistics for SPECint:
1254 1 iteration : 1751 cases (38.7%)
1255 2 iterations: 2770 cases (61.3%)
1256 Blocks wrapped in regions by find_rgns without extension: 18295 blocks
1257 Blocks wrapped in regions by 2 iterations in extend_rgns: 23821 blocks
1258 (We don't count single block regions here).
1260 Statistics for SPECfp:
1261 1 iteration : 621 cases (35.9%)
1262 2 iterations: 1110 cases (64.1%)
1263 Blocks wrapped in regions by find_rgns without extension: 6476 blocks
1264 Blocks wrapped in regions by 2 iterations in extend_rgns: 11155 blocks
1265 (We don't count single block regions here).
1267 By default we do at most 2 iterations.
1268 This can be overridden with max-sched-extend-regions-iters parameter:
1269 0 - disable region extension,
1270 N > 0 - do at most N iterations. */
1272 if (sched_verbose
&& iter
!= 0)
1273 fprintf (sched_dump
, ";; Region extension iterations: %d%s\n", iter
,
1274 rescan
? "... failed" : "");
1276 if (!rescan
&& iter
!= 0)
1278 int *s1
= NULL
, s1_sz
= 0;
1280 /* Save the old statistics for later printout. */
1281 if (sched_verbose
>= 6)
1282 s1_sz
= gather_region_statistics (&s1
);
1284 /* We have succeeded. Now assemble the regions. */
1285 for (i
= nblocks
- 1; i
>= 0; i
--)
1289 if (max_hdr
[bbn
] == bbn
)
1290 /* BBN is a region head. */
1294 int num_bbs
= 0, j
, num_insns
= 0, large
;
1296 large
= too_large (bbn
, &num_bbs
, &num_insns
);
1299 rgn_bb_table
[idx
] = bbn
;
1300 RGN_BLOCKS (nr_regions
) = idx
++;
1301 RGN_DONT_CALC_DEPS (nr_regions
) = 0;
1302 RGN_HAS_REAL_EBB (nr_regions
) = 0;
1303 CONTAINING_RGN (bbn
) = nr_regions
;
1304 BLOCK_TO_BB (bbn
) = 0;
1306 FOR_EACH_EDGE (e
, ei
, BASIC_BLOCK (bbn
)->succs
)
1307 if (e
->dest
!= EXIT_BLOCK_PTR
)
1308 degree
[e
->dest
->index
]--;
1311 /* Here we check whether the region is too_large. */
1312 for (j
= i
- 1; j
>= 0; j
--)
1314 int succn
= order
[j
];
1315 if (max_hdr
[succn
] == bbn
)
1317 if ((large
= too_large (succn
, &num_bbs
, &num_insns
)))
1323 /* If the region is too_large, then wrap every block of
1324 the region into single block region.
1325 Here we wrap region head only. Other blocks are
1326 processed in the below cycle. */
1328 RGN_NR_BLOCKS (nr_regions
) = 1;
1334 for (j
= i
- 1; j
>= 0; j
--)
1336 int succn
= order
[j
];
1338 if (max_hdr
[succn
] == bbn
)
1339 /* This cycle iterates over all basic blocks, that
1340 are supposed to be in the region with head BBN,
1341 and wraps them into that region (or in single
1344 gcc_assert (degree
[succn
] == 0);
1347 rgn_bb_table
[idx
] = succn
;
1348 BLOCK_TO_BB (succn
) = large
? 0 : num_bbs
++;
1349 CONTAINING_RGN (succn
) = nr_regions
;
1352 /* Wrap SUCCN into single block region. */
1354 RGN_BLOCKS (nr_regions
) = idx
;
1355 RGN_NR_BLOCKS (nr_regions
) = 1;
1356 RGN_DONT_CALC_DEPS (nr_regions
) = 0;
1357 RGN_HAS_REAL_EBB (nr_regions
) = 0;
1363 FOR_EACH_EDGE (e
, ei
, BASIC_BLOCK (succn
)->succs
)
1364 if (e
->dest
!= EXIT_BLOCK_PTR
)
1365 degree
[e
->dest
->index
]--;
1371 RGN_NR_BLOCKS (nr_regions
) = num_bbs
;
1377 if (sched_verbose
>= 6)
1381 /* Get the new statistics and print the comparison with the
1382 one before calling this function. */
1383 s2_sz
= gather_region_statistics (&s2
);
1384 print_region_statistics (s1
, s1_sz
, s2
, s2_sz
);
1396 /* Functions for regions scheduling information. */
1398 /* Compute dominators, probability, and potential-split-edges of bb.
1399 Assume that these values were already computed for bb's predecessors. */
1402 compute_dom_prob_ps (int bb
)
1404 edge_iterator in_ei
;
1407 /* We shouldn't have any real ebbs yet. */
1408 gcc_assert (ebb_head
[bb
] == bb
+ current_blocks
);
1410 if (IS_RGN_ENTRY (bb
))
1412 SET_BIT (dom
[bb
], 0);
1413 prob
[bb
] = REG_BR_PROB_BASE
;
1419 /* Initialize dom[bb] to '111..1'. */
1420 sbitmap_ones (dom
[bb
]);
1422 FOR_EACH_EDGE (in_edge
, in_ei
, BASIC_BLOCK (BB_TO_BLOCK (bb
))->preds
)
1426 edge_iterator out_ei
;
1428 if (in_edge
->src
== ENTRY_BLOCK_PTR
)
1431 pred_bb
= BLOCK_TO_BB (in_edge
->src
->index
);
1432 sbitmap_a_and_b (dom
[bb
], dom
[bb
], dom
[pred_bb
]);
1433 sbitmap_a_or_b (ancestor_edges
[bb
],
1434 ancestor_edges
[bb
], ancestor_edges
[pred_bb
]);
1436 SET_BIT (ancestor_edges
[bb
], EDGE_TO_BIT (in_edge
));
1438 sbitmap_a_or_b (pot_split
[bb
], pot_split
[bb
], pot_split
[pred_bb
]);
1440 FOR_EACH_EDGE (out_edge
, out_ei
, in_edge
->src
->succs
)
1441 SET_BIT (pot_split
[bb
], EDGE_TO_BIT (out_edge
));
1443 prob
[bb
] += ((prob
[pred_bb
] * in_edge
->probability
) / REG_BR_PROB_BASE
);
1446 SET_BIT (dom
[bb
], bb
);
1447 sbitmap_difference (pot_split
[bb
], pot_split
[bb
], ancestor_edges
[bb
]);
1449 if (sched_verbose
>= 2)
1450 fprintf (sched_dump
, ";; bb_prob(%d, %d) = %3d\n", bb
, BB_TO_BLOCK (bb
),
1451 (100 * prob
[bb
]) / REG_BR_PROB_BASE
);
1454 /* Functions for target info. */
1456 /* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
1457 Note that bb_trg dominates bb_src. */
1460 split_edges (int bb_src
, int bb_trg
, edgelst
*bl
)
1462 sbitmap src
= sbitmap_alloc (pot_split
[bb_src
]->n_bits
);
1463 sbitmap_copy (src
, pot_split
[bb_src
]);
1465 sbitmap_difference (src
, src
, pot_split
[bb_trg
]);
1466 extract_edgelst (src
, bl
);
1470 /* Find the valid candidate-source-blocks for the target block TRG, compute
1471 their probability, and check if they are speculative or not.
1472 For speculative sources, compute their update-blocks and split-blocks. */
1475 compute_trg_info (int trg
)
1478 edgelst el
= { NULL
, 0 };
1479 int i
, j
, k
, update_idx
;
1485 candidate_table
= XNEWVEC (candidate
, current_nr_blocks
);
1488 /* bblst_table holds split blocks and update blocks for each block after
1489 the current one in the region. split blocks and update blocks are
1490 the TO blocks of region edges, so there can be at most rgn_nr_edges
1492 bblst_size
= (current_nr_blocks
- target_bb
) * rgn_nr_edges
;
1493 bblst_table
= XNEWVEC (basic_block
, bblst_size
);
1496 edgelst_table
= XNEWVEC (edge
, rgn_nr_edges
);
1498 /* Define some of the fields for the target bb as well. */
1499 sp
= candidate_table
+ trg
;
1501 sp
->is_speculative
= 0;
1502 sp
->src_prob
= REG_BR_PROB_BASE
;
1504 visited
= sbitmap_alloc (last_basic_block
);
1506 for (i
= trg
+ 1; i
< current_nr_blocks
; i
++)
1508 sp
= candidate_table
+ i
;
1510 sp
->is_valid
= IS_DOMINATED (i
, trg
);
1513 int tf
= prob
[trg
], cf
= prob
[i
];
1515 /* In CFGs with low probability edges TF can possibly be zero. */
1516 sp
->src_prob
= (tf
? ((cf
* REG_BR_PROB_BASE
) / tf
) : 0);
1517 sp
->is_valid
= (sp
->src_prob
>= min_spec_prob
);
1522 split_edges (i
, trg
, &el
);
1523 sp
->is_speculative
= (el
.nr_members
) ? 1 : 0;
1524 if (sp
->is_speculative
&& !flag_schedule_speculative
)
1530 /* Compute split blocks and store them in bblst_table.
1531 The TO block of every split edge is a split block. */
1532 sp
->split_bbs
.first_member
= &bblst_table
[bblst_last
];
1533 sp
->split_bbs
.nr_members
= el
.nr_members
;
1534 for (j
= 0; j
< el
.nr_members
; bblst_last
++, j
++)
1535 bblst_table
[bblst_last
] = el
.first_member
[j
]->dest
;
1536 sp
->update_bbs
.first_member
= &bblst_table
[bblst_last
];
1538 /* Compute update blocks and store them in bblst_table.
1539 For every split edge, look at the FROM block, and check
1540 all out edges. For each out edge that is not a split edge,
1541 add the TO block to the update block list. This list can end
1542 up with a lot of duplicates. We need to weed them out to avoid
1543 overrunning the end of the bblst_table. */
1546 sbitmap_zero (visited
);
1547 for (j
= 0; j
< el
.nr_members
; j
++)
1549 block
= el
.first_member
[j
]->src
;
1550 FOR_EACH_EDGE (e
, ei
, block
->succs
)
1552 if (!TEST_BIT (visited
, e
->dest
->index
))
1554 for (k
= 0; k
< el
.nr_members
; k
++)
1555 if (e
== el
.first_member
[k
])
1558 if (k
>= el
.nr_members
)
1560 bblst_table
[bblst_last
++] = e
->dest
;
1561 SET_BIT (visited
, e
->dest
->index
);
1567 sp
->update_bbs
.nr_members
= update_idx
;
1569 /* Make sure we didn't overrun the end of bblst_table. */
1570 gcc_assert (bblst_last
<= bblst_size
);
1574 sp
->split_bbs
.nr_members
= sp
->update_bbs
.nr_members
= 0;
1576 sp
->is_speculative
= 0;
1581 sbitmap_free (visited
);
1584 /* Free the computed target info. */
1586 free_trg_info (void)
1588 free (candidate_table
);
1590 free (edgelst_table
);
1593 /* Print candidates info, for debugging purposes. Callable from debugger. */
1596 debug_candidate (int i
)
1598 if (!candidate_table
[i
].is_valid
)
1601 if (candidate_table
[i
].is_speculative
)
1604 fprintf (sched_dump
, "src b %d bb %d speculative \n", BB_TO_BLOCK (i
), i
);
1606 fprintf (sched_dump
, "split path: ");
1607 for (j
= 0; j
< candidate_table
[i
].split_bbs
.nr_members
; j
++)
1609 int b
= candidate_table
[i
].split_bbs
.first_member
[j
]->index
;
1611 fprintf (sched_dump
, " %d ", b
);
1613 fprintf (sched_dump
, "\n");
1615 fprintf (sched_dump
, "update path: ");
1616 for (j
= 0; j
< candidate_table
[i
].update_bbs
.nr_members
; j
++)
1618 int b
= candidate_table
[i
].update_bbs
.first_member
[j
]->index
;
1620 fprintf (sched_dump
, " %d ", b
);
1622 fprintf (sched_dump
, "\n");
1626 fprintf (sched_dump
, " src %d equivalent\n", BB_TO_BLOCK (i
));
1630 /* Print candidates info, for debugging purposes. Callable from debugger. */
1633 debug_candidates (int trg
)
1637 fprintf (sched_dump
, "----------- candidate table: target: b=%d bb=%d ---\n",
1638 BB_TO_BLOCK (trg
), trg
);
1639 for (i
= trg
+ 1; i
< current_nr_blocks
; i
++)
1640 debug_candidate (i
);
1643 /* Functions for speculative scheduling. */
1645 static bitmap_head not_in_df
;
1647 /* Return 0 if x is a set of a register alive in the beginning of one
1648 of the split-blocks of src, otherwise return 1. */
1651 check_live_1 (int src
, rtx x
)
1655 rtx reg
= SET_DEST (x
);
1660 while (GET_CODE (reg
) == SUBREG
1661 || GET_CODE (reg
) == ZERO_EXTRACT
1662 || GET_CODE (reg
) == STRICT_LOW_PART
)
1663 reg
= XEXP (reg
, 0);
1665 if (GET_CODE (reg
) == PARALLEL
)
1669 for (i
= XVECLEN (reg
, 0) - 1; i
>= 0; i
--)
1670 if (XEXP (XVECEXP (reg
, 0, i
), 0) != 0)
1671 if (check_live_1 (src
, XEXP (XVECEXP (reg
, 0, i
), 0)))
1680 regno
= REGNO (reg
);
1682 if (regno
< FIRST_PSEUDO_REGISTER
&& global_regs
[regno
])
1684 /* Global registers are assumed live. */
1689 if (regno
< FIRST_PSEUDO_REGISTER
)
1691 /* Check for hard registers. */
1692 int j
= hard_regno_nregs
[regno
][GET_MODE (reg
)];
1695 for (i
= 0; i
< candidate_table
[src
].split_bbs
.nr_members
; i
++)
1697 basic_block b
= candidate_table
[src
].split_bbs
.first_member
[i
];
1698 int t
= bitmap_bit_p (¬_in_df
, b
->index
);
1700 /* We can have split blocks, that were recently generated.
1701 Such blocks are always outside current region. */
1702 gcc_assert (!t
|| (CONTAINING_RGN (b
->index
)
1703 != CONTAINING_RGN (BB_TO_BLOCK (src
))));
1705 if (t
|| REGNO_REG_SET_P (df_get_live_in (b
), regno
+ j
))
1712 /* Check for pseudo registers. */
1713 for (i
= 0; i
< candidate_table
[src
].split_bbs
.nr_members
; i
++)
1715 basic_block b
= candidate_table
[src
].split_bbs
.first_member
[i
];
1716 int t
= bitmap_bit_p (¬_in_df
, b
->index
);
1718 gcc_assert (!t
|| (CONTAINING_RGN (b
->index
)
1719 != CONTAINING_RGN (BB_TO_BLOCK (src
))));
1721 if (t
|| REGNO_REG_SET_P (df_get_live_in (b
), regno
))
1730 /* If x is a set of a register R, mark that R is alive in the beginning
1731 of every update-block of src. */
1734 update_live_1 (int src
, rtx x
)
1738 rtx reg
= SET_DEST (x
);
1743 while (GET_CODE (reg
) == SUBREG
1744 || GET_CODE (reg
) == ZERO_EXTRACT
1745 || GET_CODE (reg
) == STRICT_LOW_PART
)
1746 reg
= XEXP (reg
, 0);
1748 if (GET_CODE (reg
) == PARALLEL
)
1752 for (i
= XVECLEN (reg
, 0) - 1; i
>= 0; i
--)
1753 if (XEXP (XVECEXP (reg
, 0, i
), 0) != 0)
1754 update_live_1 (src
, XEXP (XVECEXP (reg
, 0, i
), 0));
1762 /* Global registers are always live, so the code below does not apply
1765 regno
= REGNO (reg
);
1767 if (regno
>= FIRST_PSEUDO_REGISTER
|| !global_regs
[regno
])
1769 if (regno
< FIRST_PSEUDO_REGISTER
)
1771 int j
= hard_regno_nregs
[regno
][GET_MODE (reg
)];
1774 for (i
= 0; i
< candidate_table
[src
].update_bbs
.nr_members
; i
++)
1776 basic_block b
= candidate_table
[src
].update_bbs
.first_member
[i
];
1778 SET_REGNO_REG_SET (df_get_live_in (b
), regno
+ j
);
1784 for (i
= 0; i
< candidate_table
[src
].update_bbs
.nr_members
; i
++)
1786 basic_block b
= candidate_table
[src
].update_bbs
.first_member
[i
];
1788 SET_REGNO_REG_SET (df_get_live_in (b
), regno
);
1794 /* Return 1 if insn can be speculatively moved from block src to trg,
1795 otherwise return 0. Called before first insertion of insn to
1796 ready-list or before the scheduling. */
1799 check_live (rtx insn
, int src
)
1801 /* Find the registers set by instruction. */
1802 if (GET_CODE (PATTERN (insn
)) == SET
1803 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
1804 return check_live_1 (src
, PATTERN (insn
));
1805 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1808 for (j
= XVECLEN (PATTERN (insn
), 0) - 1; j
>= 0; j
--)
1809 if ((GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == SET
1810 || GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == CLOBBER
)
1811 && !check_live_1 (src
, XVECEXP (PATTERN (insn
), 0, j
)))
1820 /* Update the live registers info after insn was moved speculatively from
1821 block src to trg. */
1824 update_live (rtx insn
, int src
)
1826 /* Find the registers set by instruction. */
1827 if (GET_CODE (PATTERN (insn
)) == SET
1828 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
1829 update_live_1 (src
, PATTERN (insn
));
1830 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1833 for (j
= XVECLEN (PATTERN (insn
), 0) - 1; j
>= 0; j
--)
1834 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == SET
1835 || GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == CLOBBER
)
1836 update_live_1 (src
, XVECEXP (PATTERN (insn
), 0, j
));
1840 /* Nonzero if block bb_to is equal to, or reachable from block bb_from. */
1841 #define IS_REACHABLE(bb_from, bb_to) \
1843 || IS_RGN_ENTRY (bb_from) \
1844 || (TEST_BIT (ancestor_edges[bb_to], \
1845 EDGE_TO_BIT (single_pred_edge (BASIC_BLOCK (BB_TO_BLOCK (bb_from)))))))
1847 /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
1850 set_spec_fed (rtx load_insn
)
1852 sd_iterator_def sd_it
;
1855 FOR_EACH_DEP (load_insn
, SD_LIST_FORW
, sd_it
, dep
)
1856 if (DEP_TYPE (dep
) == REG_DEP_TRUE
)
1857 FED_BY_SPEC_LOAD (DEP_CON (dep
)) = 1;
1860 /* On the path from the insn to load_insn_bb, find a conditional
1861 branch depending on insn, that guards the speculative load. */
1864 find_conditional_protection (rtx insn
, int load_insn_bb
)
1866 sd_iterator_def sd_it
;
1869 /* Iterate through DEF-USE forward dependences. */
1870 FOR_EACH_DEP (insn
, SD_LIST_FORW
, sd_it
, dep
)
1872 rtx next
= DEP_CON (dep
);
1874 if ((CONTAINING_RGN (BLOCK_NUM (next
)) ==
1875 CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb
)))
1876 && IS_REACHABLE (INSN_BB (next
), load_insn_bb
)
1877 && load_insn_bb
!= INSN_BB (next
)
1878 && DEP_TYPE (dep
) == REG_DEP_TRUE
1880 || find_conditional_protection (next
, load_insn_bb
)))
1884 } /* find_conditional_protection */
1886 /* Returns 1 if the same insn1 that participates in the computation
1887 of load_insn's address is feeding a conditional branch that is
1888 guarding on load_insn. This is true if we find two DEF-USE
1890 insn1 -> ... -> conditional-branch
1891 insn1 -> ... -> load_insn,
1892 and if a flow path exists:
1893 insn1 -> ... -> conditional-branch -> ... -> load_insn,
1894 and if insn1 is on the path
1895 region-entry -> ... -> bb_trg -> ... load_insn.
1897 Locate insn1 by climbing on INSN_BACK_DEPS from load_insn.
1898 Locate the branch by following INSN_FORW_DEPS from insn1. */
1901 is_conditionally_protected (rtx load_insn
, int bb_src
, int bb_trg
)
1903 sd_iterator_def sd_it
;
1906 FOR_EACH_DEP (load_insn
, SD_LIST_BACK
, sd_it
, dep
)
1908 rtx insn1
= DEP_PRO (dep
);
1910 /* Must be a DEF-USE dependence upon non-branch. */
1911 if (DEP_TYPE (dep
) != REG_DEP_TRUE
1915 /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
1916 if (INSN_BB (insn1
) == bb_src
1917 || (CONTAINING_RGN (BLOCK_NUM (insn1
))
1918 != CONTAINING_RGN (BB_TO_BLOCK (bb_src
)))
1919 || (!IS_REACHABLE (bb_trg
, INSN_BB (insn1
))
1920 && !IS_REACHABLE (INSN_BB (insn1
), bb_trg
)))
1923 /* Now search for the conditional-branch. */
1924 if (find_conditional_protection (insn1
, bb_src
))
1927 /* Recursive step: search another insn1, "above" current insn1. */
1928 return is_conditionally_protected (insn1
, bb_src
, bb_trg
);
1931 /* The chain does not exist. */
1933 } /* is_conditionally_protected */
1935 /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence
1936 load_insn can move speculatively from bb_src to bb_trg. All the
1937 following must hold:
1939 (1) both loads have 1 base register (PFREE_CANDIDATEs).
1940 (2) load_insn and load1 have a def-use dependence upon
1941 the same insn 'insn1'.
1942 (3) either load2 is in bb_trg, or:
1943 - there's only one split-block, and
1944 - load1 is on the escape path, and
1946 From all these we can conclude that the two loads access memory
1947 addresses that differ at most by a constant, and hence if moving
1948 load_insn would cause an exception, it would have been caused by
1952 is_pfree (rtx load_insn
, int bb_src
, int bb_trg
)
1954 sd_iterator_def back_sd_it
;
1956 candidate
*candp
= candidate_table
+ bb_src
;
1958 if (candp
->split_bbs
.nr_members
!= 1)
1959 /* Must have exactly one escape block. */
1962 FOR_EACH_DEP (load_insn
, SD_LIST_BACK
, back_sd_it
, back_dep
)
1964 rtx insn1
= DEP_PRO (back_dep
);
1966 if (DEP_TYPE (back_dep
) == REG_DEP_TRUE
)
1967 /* Found a DEF-USE dependence (insn1, load_insn). */
1969 sd_iterator_def fore_sd_it
;
1972 FOR_EACH_DEP (insn1
, SD_LIST_FORW
, fore_sd_it
, fore_dep
)
1974 rtx insn2
= DEP_CON (fore_dep
);
1976 if (DEP_TYPE (fore_dep
) == REG_DEP_TRUE
)
1978 /* Found a DEF-USE dependence (insn1, insn2). */
1979 if (haifa_classify_insn (insn2
) != PFREE_CANDIDATE
)
1980 /* insn2 not guaranteed to be a 1 base reg load. */
1983 if (INSN_BB (insn2
) == bb_trg
)
1984 /* insn2 is the similar load, in the target block. */
1987 if (*(candp
->split_bbs
.first_member
) == BLOCK_FOR_INSN (insn2
))
1988 /* insn2 is a similar load, in a split-block. */
1995 /* Couldn't find a similar load. */
1999 /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
2000 a load moved speculatively, or if load_insn is protected by
2001 a compare on load_insn's address). */
2004 is_prisky (rtx load_insn
, int bb_src
, int bb_trg
)
2006 if (FED_BY_SPEC_LOAD (load_insn
))
2009 if (sd_lists_empty_p (load_insn
, SD_LIST_BACK
))
2010 /* Dependence may 'hide' out of the region. */
2013 if (is_conditionally_protected (load_insn
, bb_src
, bb_trg
))
2019 /* Insn is a candidate to be moved speculatively from bb_src to bb_trg.
2020 Return 1 if insn is exception-free (and the motion is valid)
2024 is_exception_free (rtx insn
, int bb_src
, int bb_trg
)
2026 int insn_class
= haifa_classify_insn (insn
);
2028 /* Handle non-load insns. */
2039 if (!flag_schedule_speculative_load
)
2041 IS_LOAD_INSN (insn
) = 1;
2048 case PFREE_CANDIDATE
:
2049 if (is_pfree (insn
, bb_src
, bb_trg
))
2051 /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */
2052 case PRISKY_CANDIDATE
:
2053 if (!flag_schedule_speculative_load_dangerous
2054 || is_prisky (insn
, bb_src
, bb_trg
))
2060 return flag_schedule_speculative_load_dangerous
;
2063 /* The number of insns from the current block scheduled so far. */
2064 static int sched_target_n_insns
;
2065 /* The number of insns from the current block to be scheduled in total. */
2066 static int target_n_insns
;
2067 /* The number of insns from the entire region scheduled so far. */
2068 static int sched_n_insns
;
2070 /* Implementations of the sched_info functions for region scheduling. */
2071 static void init_ready_list (void);
2072 static int can_schedule_ready_p (rtx
);
2073 static void begin_schedule_ready (rtx
, rtx
);
2074 static ds_t
new_ready (rtx
, ds_t
);
2075 static int schedule_more_p (void);
2076 static const char *rgn_print_insn (const_rtx
, int);
2077 static int rgn_rank (rtx
, rtx
);
2078 static void compute_jump_reg_dependencies (rtx
, regset
, regset
, regset
);
2080 /* Functions for speculative scheduling. */
2081 static void rgn_add_remove_insn (rtx
, int);
2082 static void rgn_add_block (basic_block
, basic_block
);
2083 static void rgn_fix_recovery_cfg (int, int, int);
2084 static basic_block
advance_target_bb (basic_block
, rtx
);
2086 /* Return nonzero if there are more insns that should be scheduled. */
2089 schedule_more_p (void)
2091 return sched_target_n_insns
< target_n_insns
;
2094 /* Add all insns that are initially ready to the ready list READY. Called
2095 once before scheduling a set of insns. */
2098 init_ready_list (void)
2100 rtx prev_head
= current_sched_info
->prev_head
;
2101 rtx next_tail
= current_sched_info
->next_tail
;
2106 sched_target_n_insns
= 0;
2109 /* Print debugging information. */
2110 if (sched_verbose
>= 5)
2111 debug_rgn_dependencies (target_bb
);
2113 /* Prepare current target block info. */
2114 if (current_nr_blocks
> 1)
2115 compute_trg_info (target_bb
);
2117 /* Initialize ready list with all 'ready' insns in target block.
2118 Count number of insns in the target block being scheduled. */
2119 for (insn
= NEXT_INSN (prev_head
); insn
!= next_tail
; insn
= NEXT_INSN (insn
))
2124 gcc_assert (!(TODO_SPEC (insn
) & BEGIN_CONTROL
));
2127 /* Add to ready list all 'ready' insns in valid source blocks.
2128 For speculative insns, check-live, exception-free, and
2130 for (bb_src
= target_bb
+ 1; bb_src
< current_nr_blocks
; bb_src
++)
2131 if (IS_VALID (bb_src
))
2137 get_ebb_head_tail (EBB_FIRST_BB (bb_src
), EBB_LAST_BB (bb_src
),
2139 src_next_tail
= NEXT_INSN (tail
);
2142 for (insn
= src_head
; insn
!= src_next_tail
; insn
= NEXT_INSN (insn
))
2143 if (INSN_P (insn
) && !BOUNDARY_DEBUG_INSN_P (insn
))
2148 /* Called after taking INSN from the ready list. Returns nonzero if this
2149 insn can be scheduled, nonzero if we should silently discard it. */
2152 can_schedule_ready_p (rtx insn
)
2154 /* An interblock motion? */
2155 if (INSN_BB (insn
) != target_bb
2156 && IS_SPECULATIVE_INSN (insn
)
2157 && !check_live (insn
, INSN_BB (insn
)))
2163 /* Updates counter and other information. Split from can_schedule_ready_p ()
2164 because when we schedule insn speculatively then insn passed to
2165 can_schedule_ready_p () differs from the one passed to
2166 begin_schedule_ready (). */
2168 begin_schedule_ready (rtx insn
, rtx last ATTRIBUTE_UNUSED
)
2170 /* An interblock motion? */
2171 if (INSN_BB (insn
) != target_bb
)
2173 if (IS_SPECULATIVE_INSN (insn
))
2175 gcc_assert (check_live (insn
, INSN_BB (insn
)));
2177 update_live (insn
, INSN_BB (insn
));
2179 /* For speculative load, mark insns fed by it. */
2180 if (IS_LOAD_INSN (insn
) || FED_BY_SPEC_LOAD (insn
))
2181 set_spec_fed (insn
);
2189 /* In block motion. */
2190 sched_target_n_insns
++;
2195 /* Called after INSN has all its hard dependencies resolved and the speculation
2196 of type TS is enough to overcome them all.
2197 Return nonzero if it should be moved to the ready list or the queue, or zero
2198 if we should silently discard it. */
2200 new_ready (rtx next
, ds_t ts
)
2202 if (INSN_BB (next
) != target_bb
)
2204 int not_ex_free
= 0;
2206 /* For speculative insns, before inserting to ready/queue,
2207 check live, exception-free, and issue-delay. */
2208 if (!IS_VALID (INSN_BB (next
))
2210 || (IS_SPECULATIVE_INSN (next
)
2211 && ((recog_memoized (next
) >= 0
2212 && min_insn_conflict_delay (curr_state
, next
, next
)
2213 > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY
))
2214 || IS_SPECULATION_CHECK_P (next
)
2215 || !check_live (next
, INSN_BB (next
))
2216 || (not_ex_free
= !is_exception_free (next
, INSN_BB (next
),
2220 /* We are here because is_exception_free () == false.
2221 But we possibly can handle that with control speculation. */
2222 && sched_deps_info
->generate_spec_deps
2223 && spec_info
->mask
& BEGIN_CONTROL
)
2227 /* Add control speculation to NEXT's dependency type. */
2228 new_ds
= set_dep_weak (ts
, BEGIN_CONTROL
, MAX_DEP_WEAK
);
2230 /* Check if NEXT can be speculated with new dependency type. */
2231 if (sched_insn_is_legitimate_for_speculation_p (next
, new_ds
))
2232 /* Here we got new control-speculative instruction. */
2235 /* NEXT isn't ready yet. */
2236 ts
= (ts
& ~SPECULATIVE
) | HARD_DEP
;
2239 /* NEXT isn't ready yet. */
2240 ts
= (ts
& ~SPECULATIVE
) | HARD_DEP
;
2247 /* Return a string that contains the insn uid and optionally anything else
2248 necessary to identify this insn in an output. It's valid to use a
2249 static buffer for this. The ALIGNED parameter should cause the string
2250 to be formatted so that multiple output lines will line up nicely. */
2253 rgn_print_insn (const_rtx insn
, int aligned
)
2255 static char tmp
[80];
2258 sprintf (tmp
, "b%3d: i%4d", INSN_BB (insn
), INSN_UID (insn
));
2261 if (current_nr_blocks
> 1 && INSN_BB (insn
) != target_bb
)
2262 sprintf (tmp
, "%d/b%d", INSN_UID (insn
), INSN_BB (insn
));
2264 sprintf (tmp
, "%d", INSN_UID (insn
));
2269 /* Compare priority of two insns. Return a positive number if the second
2270 insn is to be preferred for scheduling, and a negative one if the first
2271 is to be preferred. Zero if they are equally good. */
2274 rgn_rank (rtx insn1
, rtx insn2
)
2276 /* Some comparison make sense in interblock scheduling only. */
2277 if (INSN_BB (insn1
) != INSN_BB (insn2
))
2279 int spec_val
, prob_val
;
2281 /* Prefer an inblock motion on an interblock motion. */
2282 if ((INSN_BB (insn2
) == target_bb
) && (INSN_BB (insn1
) != target_bb
))
2284 if ((INSN_BB (insn1
) == target_bb
) && (INSN_BB (insn2
) != target_bb
))
2287 /* Prefer a useful motion on a speculative one. */
2288 spec_val
= IS_SPECULATIVE_INSN (insn1
) - IS_SPECULATIVE_INSN (insn2
);
2292 /* Prefer a more probable (speculative) insn. */
2293 prob_val
= INSN_PROBABILITY (insn2
) - INSN_PROBABILITY (insn1
);
2300 /* NEXT is an instruction that depends on INSN (a backward dependence);
2301 return nonzero if we should include this dependence in priority
2305 contributes_to_priority (rtx next
, rtx insn
)
2307 /* NEXT and INSN reside in one ebb. */
2308 return BLOCK_TO_BB (BLOCK_NUM (next
)) == BLOCK_TO_BB (BLOCK_NUM (insn
));
2311 /* INSN is a JUMP_INSN, COND_SET is the set of registers that are
2312 conditionally set before INSN. Store the set of registers that
2313 must be considered as used by this jump in USED and that of
2314 registers that must be considered as set in SET. */
2317 compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED
,
2318 regset cond_exec ATTRIBUTE_UNUSED
,
2319 regset used ATTRIBUTE_UNUSED
,
2320 regset set ATTRIBUTE_UNUSED
)
2322 /* Nothing to do here, since we postprocess jumps in
2323 add_branch_dependences. */
2326 /* This variable holds common_sched_info hooks and data relevant to
2327 the interblock scheduler. */
2328 static struct common_sched_info_def rgn_common_sched_info
;
2331 /* This holds data for the dependence analysis relevant to
2332 the interblock scheduler. */
2333 static struct sched_deps_info_def rgn_sched_deps_info
;
2335 /* This holds constant data used for initializing the above structure
2336 for the Haifa scheduler. */
2337 static const struct sched_deps_info_def rgn_const_sched_deps_info
=
2339 compute_jump_reg_dependencies
,
2340 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2344 /* Same as above, but for the selective scheduler. */
2345 static const struct sched_deps_info_def rgn_const_sel_sched_deps_info
=
2347 compute_jump_reg_dependencies
,
2348 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2352 /* Return true if scheduling INSN will trigger finish of scheduling
2355 rgn_insn_finishes_block_p (rtx insn
)
2357 if (INSN_BB (insn
) == target_bb
2358 && sched_target_n_insns
+ 1 == target_n_insns
)
2359 /* INSN is the last not-scheduled instruction in the current block. */
2365 /* Used in schedule_insns to initialize current_sched_info for scheduling
2366 regions (or single basic blocks). */
2368 static const struct haifa_sched_info rgn_const_sched_info
=
2371 can_schedule_ready_p
,
2376 contributes_to_priority
,
2377 rgn_insn_finishes_block_p
,
2383 rgn_add_remove_insn
,
2384 begin_schedule_ready
,
2389 /* This variable holds the data and hooks needed to the Haifa scheduler backend
2390 for the interblock scheduler frontend. */
2391 static struct haifa_sched_info rgn_sched_info
;
2393 /* Returns maximum priority that an insn was assigned to. */
2396 get_rgn_sched_max_insns_priority (void)
2398 return rgn_sched_info
.sched_max_insns_priority
;
2401 /* Determine if PAT sets a TARGET_CLASS_LIKELY_SPILLED_P register. */
2404 sets_likely_spilled (rtx pat
)
2407 note_stores (pat
, sets_likely_spilled_1
, &ret
);
2412 sets_likely_spilled_1 (rtx x
, const_rtx pat
, void *data
)
2414 bool *ret
= (bool *) data
;
2416 if (GET_CODE (pat
) == SET
2418 && HARD_REGISTER_P (x
)
2419 && targetm
.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (x
))))
2423 /* A bitmap to note insns that participate in any dependency. Used in
2424 add_branch_dependences. */
2425 static sbitmap insn_referenced
;
2427 /* Add dependences so that branches are scheduled to run last in their
2430 add_branch_dependences (rtx head
, rtx tail
)
2434 /* For all branches, calls, uses, clobbers, cc0 setters, and instructions
2435 that can throw exceptions, force them to remain in order at the end of
2436 the block by adding dependencies and giving the last a high priority.
2437 There may be notes present, and prev_head may also be a note.
2439 Branches must obviously remain at the end. Calls should remain at the
2440 end since moving them results in worse register allocation. Uses remain
2441 at the end to ensure proper register allocation.
2443 cc0 setters remain at the end because they can't be moved away from
2446 COND_EXEC insns cannot be moved past a branch (see e.g. PR17808).
2448 Insns setting TARGET_CLASS_LIKELY_SPILLED_P registers (usually return
2449 values) are not moved before reload because we can wind up with register
2450 allocation failures. */
2452 while (tail
!= head
&& DEBUG_INSN_P (tail
))
2453 tail
= PREV_INSN (tail
);
2457 while (CALL_P (insn
)
2459 || (NONJUMP_INSN_P (insn
)
2460 && (GET_CODE (PATTERN (insn
)) == USE
2461 || GET_CODE (PATTERN (insn
)) == CLOBBER
2462 || can_throw_internal (insn
)
2464 || sets_cc0_p (PATTERN (insn
))
2466 || (!reload_completed
2467 && sets_likely_spilled (PATTERN (insn
)))))
2473 && sd_find_dep_between (insn
, last
, false) == NULL
)
2475 if (! sched_insns_conditions_mutex_p (last
, insn
))
2476 add_dependence (last
, insn
, REG_DEP_ANTI
);
2477 SET_BIT (insn_referenced
, INSN_LUID (insn
));
2480 CANT_MOVE (insn
) = 1;
2485 /* Don't overrun the bounds of the basic block. */
2490 insn
= PREV_INSN (insn
);
2491 while (insn
!= head
&& DEBUG_INSN_P (insn
));
2494 /* Make sure these insns are scheduled last in their block. */
2497 while (insn
!= head
)
2499 insn
= prev_nonnote_insn (insn
);
2501 if (TEST_BIT (insn_referenced
, INSN_LUID (insn
))
2502 || DEBUG_INSN_P (insn
))
2505 if (! sched_insns_conditions_mutex_p (last
, insn
))
2506 add_dependence (last
, insn
, REG_DEP_ANTI
);
2509 if (!targetm
.have_conditional_execution ())
2512 /* Finally, if the block ends in a jump, and we are doing intra-block
2513 scheduling, make sure that the branch depends on any COND_EXEC insns
2514 inside the block to avoid moving the COND_EXECs past the branch insn.
2516 We only have to do this after reload, because (1) before reload there
2517 are no COND_EXEC insns, and (2) the region scheduler is an intra-block
2518 scheduler after reload.
2520 FIXME: We could in some cases move COND_EXEC insns past the branch if
2521 this scheduler would be a little smarter. Consider this code:
2529 On a target with a one cycle stall on a memory access the optimal
2538 We don't want to put the 'X += 12' before the branch because it just
2539 wastes a cycle of execution time when the branch is taken.
2541 Note that in the example "!C" will always be true. That is another
2542 possible improvement for handling COND_EXECs in this scheduler: it
2543 could remove always-true predicates. */
2545 if (!reload_completed
|| ! JUMP_P (tail
))
2549 while (insn
!= head
)
2551 insn
= PREV_INSN (insn
);
2553 /* Note that we want to add this dependency even when
2554 sched_insns_conditions_mutex_p returns true. The whole point
2555 is that we _want_ this dependency, even if these insns really
2557 if (INSN_P (insn
) && GET_CODE (PATTERN (insn
)) == COND_EXEC
)
2558 add_dependence (tail
, insn
, REG_DEP_ANTI
);
2562 /* Data structures for the computation of data dependences in a regions. We
2563 keep one `deps' structure for every basic block. Before analyzing the
2564 data dependences for a bb, its variables are initialized as a function of
2565 the variables of its predecessors. When the analysis for a bb completes,
2566 we save the contents to the corresponding bb_deps[bb] variable. */
2568 static struct deps_desc
*bb_deps
;
2570 /* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */
2573 concat_INSN_LIST (rtx copy
, rtx old
)
2576 for (; copy
; copy
= XEXP (copy
, 1))
2577 new_rtx
= alloc_INSN_LIST (XEXP (copy
, 0), new_rtx
);
2582 concat_insn_mem_list (rtx copy_insns
, rtx copy_mems
, rtx
*old_insns_p
,
2585 rtx new_insns
= *old_insns_p
;
2586 rtx new_mems
= *old_mems_p
;
2590 new_insns
= alloc_INSN_LIST (XEXP (copy_insns
, 0), new_insns
);
2591 new_mems
= alloc_EXPR_LIST (VOIDmode
, XEXP (copy_mems
, 0), new_mems
);
2592 copy_insns
= XEXP (copy_insns
, 1);
2593 copy_mems
= XEXP (copy_mems
, 1);
2596 *old_insns_p
= new_insns
;
2597 *old_mems_p
= new_mems
;
2600 /* Join PRED_DEPS to the SUCC_DEPS. */
2602 deps_join (struct deps_desc
*succ_deps
, struct deps_desc
*pred_deps
)
2605 reg_set_iterator rsi
;
2607 /* The reg_last lists are inherited by successor. */
2608 EXECUTE_IF_SET_IN_REG_SET (&pred_deps
->reg_last_in_use
, 0, reg
, rsi
)
2610 struct deps_reg
*pred_rl
= &pred_deps
->reg_last
[reg
];
2611 struct deps_reg
*succ_rl
= &succ_deps
->reg_last
[reg
];
2613 succ_rl
->uses
= concat_INSN_LIST (pred_rl
->uses
, succ_rl
->uses
);
2614 succ_rl
->sets
= concat_INSN_LIST (pred_rl
->sets
, succ_rl
->sets
);
2615 succ_rl
->implicit_sets
2616 = concat_INSN_LIST (pred_rl
->implicit_sets
, succ_rl
->implicit_sets
);
2617 succ_rl
->clobbers
= concat_INSN_LIST (pred_rl
->clobbers
,
2619 succ_rl
->uses_length
+= pred_rl
->uses_length
;
2620 succ_rl
->clobbers_length
+= pred_rl
->clobbers_length
;
2622 IOR_REG_SET (&succ_deps
->reg_last_in_use
, &pred_deps
->reg_last_in_use
);
2624 /* Mem read/write lists are inherited by successor. */
2625 concat_insn_mem_list (pred_deps
->pending_read_insns
,
2626 pred_deps
->pending_read_mems
,
2627 &succ_deps
->pending_read_insns
,
2628 &succ_deps
->pending_read_mems
);
2629 concat_insn_mem_list (pred_deps
->pending_write_insns
,
2630 pred_deps
->pending_write_mems
,
2631 &succ_deps
->pending_write_insns
,
2632 &succ_deps
->pending_write_mems
);
2634 succ_deps
->last_pending_memory_flush
2635 = concat_INSN_LIST (pred_deps
->last_pending_memory_flush
,
2636 succ_deps
->last_pending_memory_flush
);
2638 succ_deps
->pending_read_list_length
+= pred_deps
->pending_read_list_length
;
2639 succ_deps
->pending_write_list_length
+= pred_deps
->pending_write_list_length
;
2640 succ_deps
->pending_flush_length
+= pred_deps
->pending_flush_length
;
2642 /* last_function_call is inherited by successor. */
2643 succ_deps
->last_function_call
2644 = concat_INSN_LIST (pred_deps
->last_function_call
,
2645 succ_deps
->last_function_call
);
2647 /* last_function_call_may_noreturn is inherited by successor. */
2648 succ_deps
->last_function_call_may_noreturn
2649 = concat_INSN_LIST (pred_deps
->last_function_call_may_noreturn
,
2650 succ_deps
->last_function_call_may_noreturn
);
2652 /* sched_before_next_call is inherited by successor. */
2653 succ_deps
->sched_before_next_call
2654 = concat_INSN_LIST (pred_deps
->sched_before_next_call
,
2655 succ_deps
->sched_before_next_call
);
2658 /* After computing the dependencies for block BB, propagate the dependencies
2659 found in TMP_DEPS to the successors of the block. */
2661 propagate_deps (int bb
, struct deps_desc
*pred_deps
)
2663 basic_block block
= BASIC_BLOCK (BB_TO_BLOCK (bb
));
2667 /* bb's structures are inherited by its successors. */
2668 FOR_EACH_EDGE (e
, ei
, block
->succs
)
2670 /* Only bbs "below" bb, in the same region, are interesting. */
2671 if (e
->dest
== EXIT_BLOCK_PTR
2672 || CONTAINING_RGN (block
->index
) != CONTAINING_RGN (e
->dest
->index
)
2673 || BLOCK_TO_BB (e
->dest
->index
) <= bb
)
2676 deps_join (bb_deps
+ BLOCK_TO_BB (e
->dest
->index
), pred_deps
);
2679 /* These lists should point to the right place, for correct
2681 bb_deps
[bb
].pending_read_insns
= pred_deps
->pending_read_insns
;
2682 bb_deps
[bb
].pending_read_mems
= pred_deps
->pending_read_mems
;
2683 bb_deps
[bb
].pending_write_insns
= pred_deps
->pending_write_insns
;
2684 bb_deps
[bb
].pending_write_mems
= pred_deps
->pending_write_mems
;
2686 /* Can't allow these to be freed twice. */
2687 pred_deps
->pending_read_insns
= 0;
2688 pred_deps
->pending_read_mems
= 0;
2689 pred_deps
->pending_write_insns
= 0;
2690 pred_deps
->pending_write_mems
= 0;
2693 /* Compute dependences inside bb. In a multiple blocks region:
2694 (1) a bb is analyzed after its predecessors, and (2) the lists in
2695 effect at the end of bb (after analyzing for bb) are inherited by
2698 Specifically for reg-reg data dependences, the block insns are
2699 scanned by sched_analyze () top-to-bottom. Three lists are
2700 maintained by sched_analyze (): reg_last[].sets for register DEFs,
2701 reg_last[].implicit_sets for implicit hard register DEFs, and
2702 reg_last[].uses for register USEs.
2704 When analysis is completed for bb, we update for its successors:
2705 ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
2706 ; - IMPLICIT_DEFS[succ] = Union (IMPLICIT_DEFS [succ], IMPLICIT_DEFS [bb])
2707 ; - USES[succ] = Union (USES [succ], DEFS [bb])
2709 The mechanism for computing mem-mem data dependence is very
2710 similar, and the result is interblock dependences in the region. */
2713 compute_block_dependences (int bb
)
2716 struct deps_desc tmp_deps
;
2718 tmp_deps
= bb_deps
[bb
];
2720 /* Do the analysis for this block. */
2721 gcc_assert (EBB_FIRST_BB (bb
) == EBB_LAST_BB (bb
));
2722 get_ebb_head_tail (EBB_FIRST_BB (bb
), EBB_LAST_BB (bb
), &head
, &tail
);
2724 sched_analyze (&tmp_deps
, head
, tail
);
2726 /* Selective scheduling handles control dependencies by itself. */
2727 if (!sel_sched_p ())
2728 add_branch_dependences (head
, tail
);
2730 if (current_nr_blocks
> 1)
2731 propagate_deps (bb
, &tmp_deps
);
2733 /* Free up the INSN_LISTs. */
2734 free_deps (&tmp_deps
);
2736 if (targetm
.sched
.dependencies_evaluation_hook
)
2737 targetm
.sched
.dependencies_evaluation_hook (head
, tail
);
2740 /* Free dependencies of instructions inside BB. */
2742 free_block_dependencies (int bb
)
2747 get_ebb_head_tail (EBB_FIRST_BB (bb
), EBB_LAST_BB (bb
), &head
, &tail
);
2749 if (no_real_insns_p (head
, tail
))
2752 sched_free_deps (head
, tail
, true);
2755 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
2756 them to the unused_*_list variables, so that they can be reused. */
2759 free_pending_lists (void)
2763 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2765 free_INSN_LIST_list (&bb_deps
[bb
].pending_read_insns
);
2766 free_INSN_LIST_list (&bb_deps
[bb
].pending_write_insns
);
2767 free_EXPR_LIST_list (&bb_deps
[bb
].pending_read_mems
);
2768 free_EXPR_LIST_list (&bb_deps
[bb
].pending_write_mems
);
2772 /* Print dependences for debugging starting from FROM_BB.
2773 Callable from debugger. */
2774 /* Print dependences for debugging starting from FROM_BB.
2775 Callable from debugger. */
2777 debug_rgn_dependencies (int from_bb
)
2781 fprintf (sched_dump
,
2782 ";; --------------- forward dependences: ------------ \n");
2784 for (bb
= from_bb
; bb
< current_nr_blocks
; bb
++)
2788 get_ebb_head_tail (EBB_FIRST_BB (bb
), EBB_LAST_BB (bb
), &head
, &tail
);
2789 fprintf (sched_dump
, "\n;; --- Region Dependences --- b %d bb %d \n",
2790 BB_TO_BLOCK (bb
), bb
);
2792 debug_dependencies (head
, tail
);
2796 /* Print dependencies information for instructions between HEAD and TAIL.
2797 ??? This function would probably fit best in haifa-sched.c. */
2798 void debug_dependencies (rtx head
, rtx tail
)
2801 rtx next_tail
= NEXT_INSN (tail
);
2803 fprintf (sched_dump
, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2804 "insn", "code", "bb", "dep", "prio", "cost",
2806 fprintf (sched_dump
, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2807 "----", "----", "--", "---", "----", "----",
2810 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
2812 if (! INSN_P (insn
))
2815 fprintf (sched_dump
, ";; %6d ", INSN_UID (insn
));
2818 n
= NOTE_KIND (insn
);
2819 fprintf (sched_dump
, "%s\n", GET_NOTE_INSN_NAME (n
));
2822 fprintf (sched_dump
, " {%s}\n", GET_RTX_NAME (GET_CODE (insn
)));
2826 fprintf (sched_dump
,
2827 ";; %s%5d%6d%6d%6d%6d%6d ",
2828 (SCHED_GROUP_P (insn
) ? "+" : " "),
2832 sched_emulate_haifa_p
? -1 : sd_lists_size (insn
, SD_LIST_BACK
),
2833 (sel_sched_p () ? (sched_emulate_haifa_p
? -1
2834 : INSN_PRIORITY (insn
))
2835 : INSN_PRIORITY (insn
)),
2836 (sel_sched_p () ? (sched_emulate_haifa_p
? -1
2838 : insn_cost (insn
)));
2840 if (recog_memoized (insn
) < 0)
2841 fprintf (sched_dump
, "nothing");
2843 print_reservation (sched_dump
, insn
);
2845 fprintf (sched_dump
, "\t: ");
2847 sd_iterator_def sd_it
;
2850 FOR_EACH_DEP (insn
, SD_LIST_FORW
, sd_it
, dep
)
2851 fprintf (sched_dump
, "%d ", INSN_UID (DEP_CON (dep
)));
2853 fprintf (sched_dump
, "\n");
2856 fprintf (sched_dump
, "\n");
2859 /* Returns true if all the basic blocks of the current region have
2860 NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region. */
2862 sched_is_disabled_for_current_region_p (void)
2866 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2867 if (!(BASIC_BLOCK (BB_TO_BLOCK (bb
))->flags
& BB_DISABLE_SCHEDULE
))
2873 /* Free all region dependencies saved in INSN_BACK_DEPS and
2874 INSN_RESOLVED_BACK_DEPS. The Haifa scheduler does this on the fly
2875 when scheduling, so this function is supposed to be called from
2876 the selective scheduling only. */
2878 free_rgn_deps (void)
2882 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2886 gcc_assert (EBB_FIRST_BB (bb
) == EBB_LAST_BB (bb
));
2887 get_ebb_head_tail (EBB_FIRST_BB (bb
), EBB_LAST_BB (bb
), &head
, &tail
);
2889 sched_free_deps (head
, tail
, false);
2893 static int rgn_n_insns
;
2895 /* Compute insn priority for a current region. */
2897 compute_priorities (void)
2901 current_sched_info
->sched_max_insns_priority
= 0;
2902 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2906 gcc_assert (EBB_FIRST_BB (bb
) == EBB_LAST_BB (bb
));
2907 get_ebb_head_tail (EBB_FIRST_BB (bb
), EBB_LAST_BB (bb
), &head
, &tail
);
2909 if (no_real_insns_p (head
, tail
))
2912 rgn_n_insns
+= set_priorities (head
, tail
);
2914 current_sched_info
->sched_max_insns_priority
++;
2917 /* Schedule a region. A region is either an inner loop, a loop-free
2918 subroutine, or a single basic block. Each bb in the region is
2919 scheduled after its flow predecessors. */
2922 schedule_region (int rgn
)
2925 int sched_rgn_n_insns
= 0;
2929 rgn_setup_region (rgn
);
2931 /* Don't schedule region that is marked by
2932 NOTE_DISABLE_SCHED_OF_BLOCK. */
2933 if (sched_is_disabled_for_current_region_p ())
2936 sched_rgn_compute_dependencies (rgn
);
2938 sched_rgn_local_init (rgn
);
2940 /* Set priorities. */
2941 compute_priorities ();
2943 sched_extend_ready_list (rgn_n_insns
);
2945 if (sched_pressure_p
)
2947 sched_init_region_reg_pressure_info ();
2948 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2950 basic_block first_bb
, last_bb
;
2953 first_bb
= EBB_FIRST_BB (bb
);
2954 last_bb
= EBB_LAST_BB (bb
);
2956 get_ebb_head_tail (first_bb
, last_bb
, &head
, &tail
);
2958 if (no_real_insns_p (head
, tail
))
2960 gcc_assert (first_bb
== last_bb
);
2963 sched_setup_bb_reg_pressure_info (first_bb
, PREV_INSN (head
));
2967 /* Now we can schedule all blocks. */
2968 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2970 basic_block first_bb
, last_bb
, curr_bb
;
2973 first_bb
= EBB_FIRST_BB (bb
);
2974 last_bb
= EBB_LAST_BB (bb
);
2976 get_ebb_head_tail (first_bb
, last_bb
, &head
, &tail
);
2978 if (no_real_insns_p (head
, tail
))
2980 gcc_assert (first_bb
== last_bb
);
2984 current_sched_info
->prev_head
= PREV_INSN (head
);
2985 current_sched_info
->next_tail
= NEXT_INSN (tail
);
2987 remove_notes (head
, tail
);
2989 unlink_bb_notes (first_bb
, last_bb
);
2993 gcc_assert (flag_schedule_interblock
|| current_nr_blocks
== 1);
2994 current_sched_info
->queue_must_finish_empty
= current_nr_blocks
== 1;
2997 if (dbg_cnt (sched_block
))
2999 schedule_block (&curr_bb
);
3000 gcc_assert (EBB_FIRST_BB (bb
) == first_bb
);
3001 sched_rgn_n_insns
+= sched_n_insns
;
3005 sched_rgn_n_insns
+= rgn_n_insns
;
3009 if (current_nr_blocks
> 1)
3013 /* Sanity check: verify that all region insns were scheduled. */
3014 gcc_assert (sched_rgn_n_insns
== rgn_n_insns
);
3016 sched_finish_ready_list ();
3018 /* Done with this region. */
3019 sched_rgn_local_finish ();
3021 /* Free dependencies. */
3022 for (bb
= 0; bb
< current_nr_blocks
; ++bb
)
3023 free_block_dependencies (bb
);
3025 gcc_assert (haifa_recovery_bb_ever_added_p
3026 || deps_pools_are_empty_p ());
3029 /* Initialize data structures for region scheduling. */
3032 sched_rgn_init (bool single_blocks_p
)
3034 min_spec_prob
= ((PARAM_VALUE (PARAM_MIN_SPEC_PROB
) * REG_BR_PROB_BASE
)
3042 CONTAINING_RGN (ENTRY_BLOCK
) = -1;
3043 CONTAINING_RGN (EXIT_BLOCK
) = -1;
3045 /* Compute regions for scheduling. */
3047 || n_basic_blocks
== NUM_FIXED_BLOCKS
+ 1
3048 || !flag_schedule_interblock
3049 || is_cfg_nonregular ())
3051 find_single_block_region (sel_sched_p ());
3055 /* Compute the dominators and post dominators. */
3056 if (!sel_sched_p ())
3057 calculate_dominance_info (CDI_DOMINATORS
);
3062 if (sched_verbose
>= 3)
3065 /* For now. This will move as more and more of haifa is converted
3066 to using the cfg code. */
3067 if (!sel_sched_p ())
3068 free_dominance_info (CDI_DOMINATORS
);
3071 gcc_assert (0 < nr_regions
&& nr_regions
<= n_basic_blocks
);
3073 RGN_BLOCKS (nr_regions
) = (RGN_BLOCKS (nr_regions
- 1) +
3074 RGN_NR_BLOCKS (nr_regions
- 1));
3077 /* Free data structures for region scheduling. */
3079 sched_rgn_finish (void)
3081 /* Reposition the prologue and epilogue notes in case we moved the
3082 prologue/epilogue insns. */
3083 if (reload_completed
)
3084 reposition_prologue_and_epilogue_notes ();
3088 if (reload_completed
== 0
3089 && flag_schedule_interblock
)
3091 fprintf (sched_dump
,
3092 "\n;; Procedure interblock/speculative motions == %d/%d \n",
3096 gcc_assert (nr_inter
<= 0);
3097 fprintf (sched_dump
, "\n\n");
3105 free (rgn_bb_table
);
3106 rgn_bb_table
= NULL
;
3111 free (containing_rgn
);
3112 containing_rgn
= NULL
;
3118 /* Setup global variables like CURRENT_BLOCKS and CURRENT_NR_BLOCK to
3119 point to the region RGN. */
3121 rgn_setup_region (int rgn
)
3125 /* Set variables for the current region. */
3126 current_nr_blocks
= RGN_NR_BLOCKS (rgn
);
3127 current_blocks
= RGN_BLOCKS (rgn
);
3129 /* EBB_HEAD is a region-scope structure. But we realloc it for
3130 each region to save time/memory/something else.
3131 See comments in add_block1, for what reasons we allocate +1 element. */
3132 ebb_head
= XRESIZEVEC (int, ebb_head
, current_nr_blocks
+ 1);
3133 for (bb
= 0; bb
<= current_nr_blocks
; bb
++)
3134 ebb_head
[bb
] = current_blocks
+ bb
;
3137 /* Compute instruction dependencies in region RGN. */
3139 sched_rgn_compute_dependencies (int rgn
)
3141 if (!RGN_DONT_CALC_DEPS (rgn
))
3146 sched_emulate_haifa_p
= 1;
3148 init_deps_global ();
3150 /* Initializations for region data dependence analysis. */
3151 bb_deps
= XNEWVEC (struct deps_desc
, current_nr_blocks
);
3152 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
3153 init_deps (bb_deps
+ bb
, false);
3155 /* Initialize bitmap used in add_branch_dependences. */
3156 insn_referenced
= sbitmap_alloc (sched_max_luid
);
3157 sbitmap_zero (insn_referenced
);
3159 /* Compute backward dependencies. */
3160 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
3161 compute_block_dependences (bb
);
3163 sbitmap_free (insn_referenced
);
3164 free_pending_lists ();
3165 finish_deps_global ();
3168 /* We don't want to recalculate this twice. */
3169 RGN_DONT_CALC_DEPS (rgn
) = 1;
3172 sched_emulate_haifa_p
= 0;
3175 /* (This is a recovery block. It is always a single block region.)
3176 OR (We use selective scheduling.) */
3177 gcc_assert (current_nr_blocks
== 1 || sel_sched_p ());
3180 /* Init region data structures. Returns true if this region should
3181 not be scheduled. */
3183 sched_rgn_local_init (int rgn
)
3187 /* Compute interblock info: probabilities, split-edges, dominators, etc. */
3188 if (current_nr_blocks
> 1)
3194 prob
= XNEWVEC (int, current_nr_blocks
);
3196 dom
= sbitmap_vector_alloc (current_nr_blocks
, current_nr_blocks
);
3197 sbitmap_vector_zero (dom
, current_nr_blocks
);
3199 /* Use ->aux to implement EDGE_TO_BIT mapping. */
3203 if (CONTAINING_RGN (block
->index
) != rgn
)
3205 FOR_EACH_EDGE (e
, ei
, block
->succs
)
3206 SET_EDGE_TO_BIT (e
, rgn_nr_edges
++);
3209 rgn_edges
= XNEWVEC (edge
, rgn_nr_edges
);
3213 if (CONTAINING_RGN (block
->index
) != rgn
)
3215 FOR_EACH_EDGE (e
, ei
, block
->succs
)
3216 rgn_edges
[rgn_nr_edges
++] = e
;
3220 pot_split
= sbitmap_vector_alloc (current_nr_blocks
, rgn_nr_edges
);
3221 sbitmap_vector_zero (pot_split
, current_nr_blocks
);
3222 ancestor_edges
= sbitmap_vector_alloc (current_nr_blocks
, rgn_nr_edges
);
3223 sbitmap_vector_zero (ancestor_edges
, current_nr_blocks
);
3225 /* Compute probabilities, dominators, split_edges. */
3226 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
3227 compute_dom_prob_ps (bb
);
3229 /* Cleanup ->aux used for EDGE_TO_BIT mapping. */
3230 /* We don't need them anymore. But we want to avoid duplication of
3231 aux fields in the newly created edges. */
3234 if (CONTAINING_RGN (block
->index
) != rgn
)
3236 FOR_EACH_EDGE (e
, ei
, block
->succs
)
3242 /* Free data computed for the finished region. */
3244 sched_rgn_local_free (void)
3247 sbitmap_vector_free (dom
);
3248 sbitmap_vector_free (pot_split
);
3249 sbitmap_vector_free (ancestor_edges
);
3253 /* Free data computed for the finished region. */
3255 sched_rgn_local_finish (void)
3257 if (current_nr_blocks
> 1 && !sel_sched_p ())
3259 sched_rgn_local_free ();
3263 /* Setup scheduler infos. */
3265 rgn_setup_common_sched_info (void)
3267 memcpy (&rgn_common_sched_info
, &haifa_common_sched_info
,
3268 sizeof (rgn_common_sched_info
));
3270 rgn_common_sched_info
.fix_recovery_cfg
= rgn_fix_recovery_cfg
;
3271 rgn_common_sched_info
.add_block
= rgn_add_block
;
3272 rgn_common_sched_info
.estimate_number_of_insns
3273 = rgn_estimate_number_of_insns
;
3274 rgn_common_sched_info
.sched_pass_id
= SCHED_RGN_PASS
;
3276 common_sched_info
= &rgn_common_sched_info
;
3279 /* Setup all *_sched_info structures (for the Haifa frontend
3280 and for the dependence analysis) in the interblock scheduler. */
3282 rgn_setup_sched_infos (void)
3284 if (!sel_sched_p ())
3285 memcpy (&rgn_sched_deps_info
, &rgn_const_sched_deps_info
,
3286 sizeof (rgn_sched_deps_info
));
3288 memcpy (&rgn_sched_deps_info
, &rgn_const_sel_sched_deps_info
,
3289 sizeof (rgn_sched_deps_info
));
3291 sched_deps_info
= &rgn_sched_deps_info
;
3293 memcpy (&rgn_sched_info
, &rgn_const_sched_info
, sizeof (rgn_sched_info
));
3294 current_sched_info
= &rgn_sched_info
;
3297 /* The one entry point in this file. */
3299 schedule_insns (void)
3303 /* Taking care of this degenerate case makes the rest of
3304 this code simpler. */
3305 if (n_basic_blocks
== NUM_FIXED_BLOCKS
)
3308 rgn_setup_common_sched_info ();
3309 rgn_setup_sched_infos ();
3311 haifa_sched_init ();
3312 sched_rgn_init (reload_completed
);
3314 bitmap_initialize (¬_in_df
, 0);
3315 bitmap_clear (¬_in_df
);
3317 /* Schedule every region in the subroutine. */
3318 for (rgn
= 0; rgn
< nr_regions
; rgn
++)
3319 if (dbg_cnt (sched_region
))
3320 schedule_region (rgn
);
3323 sched_rgn_finish ();
3324 bitmap_clear (¬_in_df
);
3326 haifa_sched_finish ();
3329 /* INSN has been added to/removed from current region. */
3331 rgn_add_remove_insn (rtx insn
, int remove_p
)
3338 if (INSN_BB (insn
) == target_bb
)
3347 /* Extend internal data structures. */
3349 extend_regions (void)
3351 rgn_table
= XRESIZEVEC (region
, rgn_table
, n_basic_blocks
);
3352 rgn_bb_table
= XRESIZEVEC (int, rgn_bb_table
, n_basic_blocks
);
3353 block_to_bb
= XRESIZEVEC (int, block_to_bb
, last_basic_block
);
3354 containing_rgn
= XRESIZEVEC (int, containing_rgn
, last_basic_block
);
3358 rgn_make_new_region_out_of_new_block (basic_block bb
)
3362 i
= RGN_BLOCKS (nr_regions
);
3363 /* I - first free position in rgn_bb_table. */
3365 rgn_bb_table
[i
] = bb
->index
;
3366 RGN_NR_BLOCKS (nr_regions
) = 1;
3367 RGN_HAS_REAL_EBB (nr_regions
) = 0;
3368 RGN_DONT_CALC_DEPS (nr_regions
) = 0;
3369 CONTAINING_RGN (bb
->index
) = nr_regions
;
3370 BLOCK_TO_BB (bb
->index
) = 0;
3374 RGN_BLOCKS (nr_regions
) = i
+ 1;
3377 /* BB was added to ebb after AFTER. */
3379 rgn_add_block (basic_block bb
, basic_block after
)
3382 bitmap_set_bit (¬_in_df
, bb
->index
);
3384 if (after
== 0 || after
== EXIT_BLOCK_PTR
)
3386 rgn_make_new_region_out_of_new_block (bb
);
3387 RGN_DONT_CALC_DEPS (nr_regions
- 1) = (after
== EXIT_BLOCK_PTR
);
3393 /* We need to fix rgn_table, block_to_bb, containing_rgn
3396 BLOCK_TO_BB (bb
->index
) = BLOCK_TO_BB (after
->index
);
3398 /* We extend ebb_head to one more position to
3399 easily find the last position of the last ebb in
3400 the current region. Thus, ebb_head[BLOCK_TO_BB (after) + 1]
3401 is _always_ valid for access. */
3403 i
= BLOCK_TO_BB (after
->index
) + 1;
3404 pos
= ebb_head
[i
] - 1;
3405 /* Now POS is the index of the last block in the region. */
3407 /* Find index of basic block AFTER. */
3408 for (; rgn_bb_table
[pos
] != after
->index
; pos
--);
3411 gcc_assert (pos
> ebb_head
[i
- 1]);
3413 /* i - ebb right after "AFTER". */
3414 /* ebb_head[i] - VALID. */
3416 /* Source position: ebb_head[i]
3417 Destination position: ebb_head[i] + 1
3419 RGN_BLOCKS (nr_regions) - 1
3420 Number of elements to copy: (last_position) - (source_position) + 1
3423 memmove (rgn_bb_table
+ pos
+ 1,
3425 ((RGN_BLOCKS (nr_regions
) - 1) - (pos
) + 1)
3426 * sizeof (*rgn_bb_table
));
3428 rgn_bb_table
[pos
] = bb
->index
;
3430 for (; i
<= current_nr_blocks
; i
++)
3433 i
= CONTAINING_RGN (after
->index
);
3434 CONTAINING_RGN (bb
->index
) = i
;
3436 RGN_HAS_REAL_EBB (i
) = 1;
3438 for (++i
; i
<= nr_regions
; i
++)
3443 /* Fix internal data after interblock movement of jump instruction.
3444 For parameter meaning please refer to
3445 sched-int.h: struct sched_info: fix_recovery_cfg. */
3447 rgn_fix_recovery_cfg (int bbi
, int check_bbi
, int check_bb_nexti
)
3449 int old_pos
, new_pos
, i
;
3451 BLOCK_TO_BB (check_bb_nexti
) = BLOCK_TO_BB (bbi
);
3453 for (old_pos
= ebb_head
[BLOCK_TO_BB (check_bbi
) + 1] - 1;
3454 rgn_bb_table
[old_pos
] != check_bb_nexti
;
3456 gcc_assert (old_pos
> ebb_head
[BLOCK_TO_BB (check_bbi
)]);
3458 for (new_pos
= ebb_head
[BLOCK_TO_BB (bbi
) + 1] - 1;
3459 rgn_bb_table
[new_pos
] != bbi
;
3462 gcc_assert (new_pos
> ebb_head
[BLOCK_TO_BB (bbi
)]);
3464 gcc_assert (new_pos
< old_pos
);
3466 memmove (rgn_bb_table
+ new_pos
+ 1,
3467 rgn_bb_table
+ new_pos
,
3468 (old_pos
- new_pos
) * sizeof (*rgn_bb_table
));
3470 rgn_bb_table
[new_pos
] = check_bb_nexti
;
3472 for (i
= BLOCK_TO_BB (bbi
) + 1; i
<= BLOCK_TO_BB (check_bbi
); i
++)
3476 /* Return next block in ebb chain. For parameter meaning please refer to
3477 sched-int.h: struct sched_info: advance_target_bb. */
3479 advance_target_bb (basic_block bb
, rtx insn
)
3484 gcc_assert (BLOCK_TO_BB (bb
->index
) == target_bb
3485 && BLOCK_TO_BB (bb
->next_bb
->index
) == target_bb
);
3492 gate_handle_sched (void)
3494 #ifdef INSN_SCHEDULING
3495 return flag_schedule_insns
&& dbg_cnt (sched_func
);
3501 /* Run instruction scheduler. */
3503 rest_of_handle_sched (void)
3505 #ifdef INSN_SCHEDULING
3506 if (flag_selective_scheduling
3507 && ! maybe_skip_selective_scheduling ())
3508 run_selective_scheduling ();
3516 gate_handle_sched2 (void)
3518 #ifdef INSN_SCHEDULING
3519 return optimize
> 0 && flag_schedule_insns_after_reload
3520 && dbg_cnt (sched2_func
);
3526 /* Run second scheduling pass after reload. */
3528 rest_of_handle_sched2 (void)
3530 #ifdef INSN_SCHEDULING
3531 if (flag_selective_scheduling2
3532 && ! maybe_skip_selective_scheduling ())
3533 run_selective_scheduling ();
3536 /* Do control and data sched analysis again,
3537 and write some more of the results to dump file. */
3538 if (flag_sched2_use_superblocks
)
3547 struct rtl_opt_pass pass_sched
=
3551 "sched1", /* name */
3552 gate_handle_sched
, /* gate */
3553 rest_of_handle_sched
, /* execute */
3556 0, /* static_pass_number */
3557 TV_SCHED
, /* tv_id */
3558 0, /* properties_required */
3559 0, /* properties_provided */
3560 0, /* properties_destroyed */
3561 0, /* todo_flags_start */
3562 TODO_df_finish
| TODO_verify_rtl_sharing
|
3565 TODO_ggc_collect
/* todo_flags_finish */
3569 struct rtl_opt_pass pass_sched2
=
3573 "sched2", /* name */
3574 gate_handle_sched2
, /* gate */
3575 rest_of_handle_sched2
, /* execute */
3578 0, /* static_pass_number */
3579 TV_SCHED2
, /* tv_id */
3580 0, /* properties_required */
3581 0, /* properties_provided */
3582 0, /* properties_destroyed */
3583 0, /* todo_flags_start */
3584 TODO_df_finish
| TODO_verify_rtl_sharing
|
3587 TODO_ggc_collect
/* todo_flags_finish */