PR rtl-optimization/49095
[official-gcc.git] / gcc / sched-rgn.c
blob2c00907036fa366e3e5e5a45886ed6534844a979
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 /* This pass implements list scheduling within basic blocks. It is
25 run twice: (1) after flow analysis, but before register allocation,
26 and (2) after register allocation.
28 The first run performs interblock scheduling, moving insns between
29 different blocks in the same "region", and the second runs only
30 basic block scheduling.
32 Interblock motions performed are useful motions and speculative
33 motions, including speculative loads. Motions requiring code
34 duplication are not supported. The identification of motion type
35 and the check for validity of speculative motions requires
36 construction and analysis of the function's control flow graph.
38 The main entry point for this pass is schedule_insns(), called for
39 each function. The work of the scheduler is organized in three
40 levels: (1) function level: insns are subject to splitting,
41 control-flow-graph is constructed, regions are computed (after
42 reload, each region is of one block), (2) region level: control
43 flow graph attributes required for interblock scheduling are
44 computed (dominators, reachability, etc.), data dependences and
45 priorities are computed, and (3) block level: insns in the block
46 are actually scheduled. */
48 #include "config.h"
49 #include "system.h"
50 #include "coretypes.h"
51 #include "tm.h"
52 #include "diagnostic-core.h"
53 #include "rtl.h"
54 #include "tm_p.h"
55 #include "hard-reg-set.h"
56 #include "regs.h"
57 #include "function.h"
58 #include "flags.h"
59 #include "insn-config.h"
60 #include "insn-attr.h"
61 #include "except.h"
62 #include "recog.h"
63 #include "cfglayout.h"
64 #include "params.h"
65 #include "sched-int.h"
66 #include "sel-sched.h"
67 #include "target.h"
68 #include "timevar.h"
69 #include "tree-pass.h"
70 #include "dbgcnt.h"
72 #ifdef INSN_SCHEDULING
74 /* Some accessor macros for h_i_d members only used within this file. */
75 #define FED_BY_SPEC_LOAD(INSN) (HID (INSN)->fed_by_spec_load)
76 #define IS_LOAD_INSN(INSN) (HID (insn)->is_load_insn)
78 /* nr_inter/spec counts interblock/speculative motion for the function. */
79 static int nr_inter, nr_spec;
81 static int is_cfg_nonregular (void);
83 /* Number of regions in the procedure. */
84 int nr_regions = 0;
86 /* Table of region descriptions. */
87 region *rgn_table = NULL;
89 /* Array of lists of regions' blocks. */
90 int *rgn_bb_table = NULL;
92 /* Topological order of blocks in the region (if b2 is reachable from
93 b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is
94 always referred to by either block or b, while its topological
95 order name (in the region) is referred to by bb. */
96 int *block_to_bb = NULL;
98 /* The number of the region containing a block. */
99 int *containing_rgn = NULL;
101 /* ebb_head [i] - is index in rgn_bb_table of the head basic block of i'th ebb.
102 Currently we can get a ebb only through splitting of currently
103 scheduling block, therefore, we don't need ebb_head array for every region,
104 hence, its sufficient to hold it for current one only. */
105 int *ebb_head = NULL;
107 /* The minimum probability of reaching a source block so that it will be
108 considered for speculative scheduling. */
109 static int min_spec_prob;
111 static void find_single_block_region (bool);
112 static void find_rgns (void);
113 static bool too_large (int, int *, int *);
115 /* Blocks of the current region being scheduled. */
116 int current_nr_blocks;
117 int current_blocks;
119 /* A speculative motion requires checking live information on the path
120 from 'source' to 'target'. The split blocks are those to be checked.
121 After a speculative motion, live information should be modified in
122 the 'update' blocks.
124 Lists of split and update blocks for each candidate of the current
125 target are in array bblst_table. */
126 static basic_block *bblst_table;
127 static int bblst_size, bblst_last;
129 /* Target info declarations.
131 The block currently being scheduled is referred to as the "target" block,
132 while other blocks in the region from which insns can be moved to the
133 target are called "source" blocks. The candidate structure holds info
134 about such sources: are they valid? Speculative? Etc. */
135 typedef struct
137 basic_block *first_member;
138 int nr_members;
140 bblst;
142 typedef struct
144 char is_valid;
145 char is_speculative;
146 int src_prob;
147 bblst split_bbs;
148 bblst update_bbs;
150 candidate;
152 static candidate *candidate_table;
153 #define IS_VALID(src) (candidate_table[src].is_valid)
154 #define IS_SPECULATIVE(src) (candidate_table[src].is_speculative)
155 #define IS_SPECULATIVE_INSN(INSN) \
156 (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
157 #define SRC_PROB(src) ( candidate_table[src].src_prob )
159 /* The bb being currently scheduled. */
160 int target_bb;
162 /* List of edges. */
163 typedef struct
165 edge *first_member;
166 int nr_members;
168 edgelst;
170 static edge *edgelst_table;
171 static int edgelst_last;
173 static void extract_edgelst (sbitmap, edgelst *);
175 /* Target info functions. */
176 static void split_edges (int, int, edgelst *);
177 static void compute_trg_info (int);
178 void debug_candidate (int);
179 void debug_candidates (int);
181 /* Dominators array: dom[i] contains the sbitmap of dominators of
182 bb i in the region. */
183 static sbitmap *dom;
185 /* bb 0 is the only region entry. */
186 #define IS_RGN_ENTRY(bb) (!bb)
188 /* Is bb_src dominated by bb_trg. */
189 #define IS_DOMINATED(bb_src, bb_trg) \
190 ( TEST_BIT (dom[bb_src], bb_trg) )
192 /* Probability: Prob[i] is an int in [0, REG_BR_PROB_BASE] which is
193 the probability of bb i relative to the region entry. */
194 static int *prob;
196 /* Bit-set of edges, where bit i stands for edge i. */
197 typedef sbitmap edgeset;
199 /* Number of edges in the region. */
200 static int rgn_nr_edges;
202 /* Array of size rgn_nr_edges. */
203 static edge *rgn_edges;
205 /* Mapping from each edge in the graph to its number in the rgn. */
206 #define EDGE_TO_BIT(edge) ((int)(size_t)(edge)->aux)
207 #define SET_EDGE_TO_BIT(edge,nr) ((edge)->aux = (void *)(size_t)(nr))
209 /* The split edges of a source bb is different for each target
210 bb. In order to compute this efficiently, the 'potential-split edges'
211 are computed for each bb prior to scheduling a region. This is actually
212 the split edges of each bb relative to the region entry.
214 pot_split[bb] is the set of potential split edges of bb. */
215 static edgeset *pot_split;
217 /* For every bb, a set of its ancestor edges. */
218 static edgeset *ancestor_edges;
220 #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
222 /* Speculative scheduling functions. */
223 static int check_live_1 (int, rtx);
224 static void update_live_1 (int, rtx);
225 static int is_pfree (rtx, int, int);
226 static int find_conditional_protection (rtx, int);
227 static int is_conditionally_protected (rtx, int, int);
228 static int is_prisky (rtx, int, int);
229 static int is_exception_free (rtx, int, int);
231 static bool sets_likely_spilled (rtx);
232 static void sets_likely_spilled_1 (rtx, const_rtx, void *);
233 static void add_branch_dependences (rtx, rtx);
234 static void compute_block_dependences (int);
236 static void schedule_region (int);
237 static rtx concat_INSN_LIST (rtx, rtx);
238 static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *);
239 static void propagate_deps (int, struct deps_desc *);
240 static void free_pending_lists (void);
242 /* Functions for construction of the control flow graph. */
244 /* Return 1 if control flow graph should not be constructed, 0 otherwise.
246 We decide not to build the control flow graph if there is possibly more
247 than one entry to the function, if computed branches exist, if we
248 have nonlocal gotos, or if we have an unreachable loop. */
250 static int
251 is_cfg_nonregular (void)
253 basic_block b;
254 rtx insn;
256 /* If we have a label that could be the target of a nonlocal goto, then
257 the cfg is not well structured. */
258 if (nonlocal_goto_handler_labels)
259 return 1;
261 /* If we have any forced labels, then the cfg is not well structured. */
262 if (forced_labels)
263 return 1;
265 /* If we have exception handlers, then we consider the cfg not well
266 structured. ?!? We should be able to handle this now that we
267 compute an accurate cfg for EH. */
268 if (current_function_has_exception_handlers ())
269 return 1;
271 /* If we have insns which refer to labels as non-jumped-to operands,
272 then we consider the cfg not well structured. */
273 FOR_EACH_BB (b)
274 FOR_BB_INSNS (b, insn)
276 rtx note, next, set, dest;
278 /* If this function has a computed jump, then we consider the cfg
279 not well structured. */
280 if (JUMP_P (insn) && computed_jump_p (insn))
281 return 1;
283 if (!INSN_P (insn))
284 continue;
286 note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX);
287 if (note == NULL_RTX)
288 continue;
290 /* For that label not to be seen as a referred-to label, this
291 must be a single-set which is feeding a jump *only*. This
292 could be a conditional jump with the label split off for
293 machine-specific reasons or a casesi/tablejump. */
294 next = next_nonnote_insn (insn);
295 if (next == NULL_RTX
296 || !JUMP_P (next)
297 || (JUMP_LABEL (next) != XEXP (note, 0)
298 && find_reg_note (next, REG_LABEL_TARGET,
299 XEXP (note, 0)) == NULL_RTX)
300 || BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (next))
301 return 1;
303 set = single_set (insn);
304 if (set == NULL_RTX)
305 return 1;
307 dest = SET_DEST (set);
308 if (!REG_P (dest) || !dead_or_set_p (next, dest))
309 return 1;
312 /* Unreachable loops with more than one basic block are detected
313 during the DFS traversal in find_rgns.
315 Unreachable loops with a single block are detected here. This
316 test is redundant with the one in find_rgns, but it's much
317 cheaper to go ahead and catch the trivial case here. */
318 FOR_EACH_BB (b)
320 if (EDGE_COUNT (b->preds) == 0
321 || (single_pred_p (b)
322 && single_pred (b) == b))
323 return 1;
326 /* All the tests passed. Consider the cfg well structured. */
327 return 0;
330 /* Extract list of edges from a bitmap containing EDGE_TO_BIT bits. */
332 static void
333 extract_edgelst (sbitmap set, edgelst *el)
335 unsigned int i = 0;
336 sbitmap_iterator sbi;
338 /* edgelst table space is reused in each call to extract_edgelst. */
339 edgelst_last = 0;
341 el->first_member = &edgelst_table[edgelst_last];
342 el->nr_members = 0;
344 /* Iterate over each word in the bitset. */
345 EXECUTE_IF_SET_IN_SBITMAP (set, 0, i, sbi)
347 edgelst_table[edgelst_last++] = rgn_edges[i];
348 el->nr_members++;
352 /* Functions for the construction of regions. */
354 /* Print the regions, for debugging purposes. Callable from debugger. */
356 DEBUG_FUNCTION void
357 debug_regions (void)
359 int rgn, bb;
361 fprintf (sched_dump, "\n;; ------------ REGIONS ----------\n\n");
362 for (rgn = 0; rgn < nr_regions; rgn++)
364 fprintf (sched_dump, ";;\trgn %d nr_blocks %d:\n", rgn,
365 rgn_table[rgn].rgn_nr_blocks);
366 fprintf (sched_dump, ";;\tbb/block: ");
368 /* We don't have ebb_head initialized yet, so we can't use
369 BB_TO_BLOCK (). */
370 current_blocks = RGN_BLOCKS (rgn);
372 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
373 fprintf (sched_dump, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
375 fprintf (sched_dump, "\n\n");
379 /* Print the region's basic blocks. */
381 DEBUG_FUNCTION void
382 debug_region (int rgn)
384 int bb;
386 fprintf (stderr, "\n;; ------------ REGION %d ----------\n\n", rgn);
387 fprintf (stderr, ";;\trgn %d nr_blocks %d:\n", rgn,
388 rgn_table[rgn].rgn_nr_blocks);
389 fprintf (stderr, ";;\tbb/block: ");
391 /* We don't have ebb_head initialized yet, so we can't use
392 BB_TO_BLOCK (). */
393 current_blocks = RGN_BLOCKS (rgn);
395 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
396 fprintf (stderr, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
398 fprintf (stderr, "\n\n");
400 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
402 debug_bb_n_slim (rgn_bb_table[current_blocks + bb]);
403 fprintf (stderr, "\n");
406 fprintf (stderr, "\n");
410 /* True when a bb with index BB_INDEX contained in region RGN. */
411 static bool
412 bb_in_region_p (int bb_index, int rgn)
414 int i;
416 for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
417 if (rgn_bb_table[current_blocks + i] == bb_index)
418 return true;
420 return false;
423 /* Dump region RGN to file F using dot syntax. */
424 void
425 dump_region_dot (FILE *f, int rgn)
427 int i;
429 fprintf (f, "digraph Region_%d {\n", rgn);
431 /* We don't have ebb_head initialized yet, so we can't use
432 BB_TO_BLOCK (). */
433 current_blocks = RGN_BLOCKS (rgn);
435 for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
437 edge e;
438 edge_iterator ei;
439 int src_bb_num = rgn_bb_table[current_blocks + i];
440 struct basic_block_def *bb = BASIC_BLOCK (src_bb_num);
442 FOR_EACH_EDGE (e, ei, bb->succs)
443 if (bb_in_region_p (e->dest->index, rgn))
444 fprintf (f, "\t%d -> %d\n", src_bb_num, e->dest->index);
446 fprintf (f, "}\n");
449 /* The same, but first open a file specified by FNAME. */
450 void
451 dump_region_dot_file (const char *fname, int rgn)
453 FILE *f = fopen (fname, "wt");
454 dump_region_dot (f, rgn);
455 fclose (f);
458 /* Build a single block region for each basic block in the function.
459 This allows for using the same code for interblock and basic block
460 scheduling. */
462 static void
463 find_single_block_region (bool ebbs_p)
465 basic_block bb, ebb_start;
466 int i = 0;
468 nr_regions = 0;
470 if (ebbs_p) {
471 int probability_cutoff;
472 if (profile_info && flag_branch_probabilities)
473 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
474 else
475 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
476 probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
478 FOR_EACH_BB (ebb_start)
480 RGN_NR_BLOCKS (nr_regions) = 0;
481 RGN_BLOCKS (nr_regions) = i;
482 RGN_DONT_CALC_DEPS (nr_regions) = 0;
483 RGN_HAS_REAL_EBB (nr_regions) = 0;
485 for (bb = ebb_start; ; bb = bb->next_bb)
487 edge e;
489 rgn_bb_table[i] = bb->index;
490 RGN_NR_BLOCKS (nr_regions)++;
491 CONTAINING_RGN (bb->index) = nr_regions;
492 BLOCK_TO_BB (bb->index) = i - RGN_BLOCKS (nr_regions);
493 i++;
495 if (bb->next_bb == EXIT_BLOCK_PTR
496 || LABEL_P (BB_HEAD (bb->next_bb)))
497 break;
499 e = find_fallthru_edge (bb->succs);
500 if (! e)
501 break;
502 if (e->probability <= probability_cutoff)
503 break;
506 ebb_start = bb;
507 nr_regions++;
510 else
511 FOR_EACH_BB (bb)
513 rgn_bb_table[nr_regions] = bb->index;
514 RGN_NR_BLOCKS (nr_regions) = 1;
515 RGN_BLOCKS (nr_regions) = nr_regions;
516 RGN_DONT_CALC_DEPS (nr_regions) = 0;
517 RGN_HAS_REAL_EBB (nr_regions) = 0;
519 CONTAINING_RGN (bb->index) = nr_regions;
520 BLOCK_TO_BB (bb->index) = 0;
521 nr_regions++;
525 /* Estimate number of the insns in the BB. */
526 static int
527 rgn_estimate_number_of_insns (basic_block bb)
529 int count;
531 count = INSN_LUID (BB_END (bb)) - INSN_LUID (BB_HEAD (bb));
533 if (MAY_HAVE_DEBUG_INSNS)
535 rtx insn;
537 FOR_BB_INSNS (bb, insn)
538 if (DEBUG_INSN_P (insn))
539 count--;
542 return count;
545 /* Update number of blocks and the estimate for number of insns
546 in the region. Return true if the region is "too large" for interblock
547 scheduling (compile time considerations). */
549 static bool
550 too_large (int block, int *num_bbs, int *num_insns)
552 (*num_bbs)++;
553 (*num_insns) += (common_sched_info->estimate_number_of_insns
554 (BASIC_BLOCK (block)));
556 return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
557 || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
560 /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
561 is still an inner loop. Put in max_hdr[blk] the header of the most inner
562 loop containing blk. */
563 #define UPDATE_LOOP_RELATIONS(blk, hdr) \
565 if (max_hdr[blk] == -1) \
566 max_hdr[blk] = hdr; \
567 else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
568 RESET_BIT (inner, hdr); \
569 else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
571 RESET_BIT (inner,max_hdr[blk]); \
572 max_hdr[blk] = hdr; \
576 /* Find regions for interblock scheduling.
578 A region for scheduling can be:
580 * A loop-free procedure, or
582 * A reducible inner loop, or
584 * A basic block not contained in any other region.
586 ?!? In theory we could build other regions based on extended basic
587 blocks or reverse extended basic blocks. Is it worth the trouble?
589 Loop blocks that form a region are put into the region's block list
590 in topological order.
592 This procedure stores its results into the following global (ick) variables
594 * rgn_nr
595 * rgn_table
596 * rgn_bb_table
597 * block_to_bb
598 * containing region
600 We use dominator relationships to avoid making regions out of non-reducible
601 loops.
603 This procedure needs to be converted to work on pred/succ lists instead
604 of edge tables. That would simplify it somewhat. */
606 static void
607 haifa_find_rgns (void)
609 int *max_hdr, *dfs_nr, *degree;
610 char no_loops = 1;
611 int node, child, loop_head, i, head, tail;
612 int count = 0, sp, idx = 0;
613 edge_iterator current_edge;
614 edge_iterator *stack;
615 int num_bbs, num_insns, unreachable;
616 int too_large_failure;
617 basic_block bb;
619 /* Note if a block is a natural loop header. */
620 sbitmap header;
622 /* Note if a block is a natural inner loop header. */
623 sbitmap inner;
625 /* Note if a block is in the block queue. */
626 sbitmap in_queue;
628 /* Note if a block is in the block queue. */
629 sbitmap in_stack;
631 /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
632 and a mapping from block to its loop header (if the block is contained
633 in a loop, else -1).
635 Store results in HEADER, INNER, and MAX_HDR respectively, these will
636 be used as inputs to the second traversal.
638 STACK, SP and DFS_NR are only used during the first traversal. */
640 /* Allocate and initialize variables for the first traversal. */
641 max_hdr = XNEWVEC (int, last_basic_block);
642 dfs_nr = XCNEWVEC (int, last_basic_block);
643 stack = XNEWVEC (edge_iterator, n_edges);
645 inner = sbitmap_alloc (last_basic_block);
646 sbitmap_ones (inner);
648 header = sbitmap_alloc (last_basic_block);
649 sbitmap_zero (header);
651 in_queue = sbitmap_alloc (last_basic_block);
652 sbitmap_zero (in_queue);
654 in_stack = sbitmap_alloc (last_basic_block);
655 sbitmap_zero (in_stack);
657 for (i = 0; i < last_basic_block; i++)
658 max_hdr[i] = -1;
660 #define EDGE_PASSED(E) (ei_end_p ((E)) || ei_edge ((E))->aux)
661 #define SET_EDGE_PASSED(E) (ei_edge ((E))->aux = ei_edge ((E)))
663 /* DFS traversal to find inner loops in the cfg. */
665 current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR)->succs);
666 sp = -1;
668 while (1)
670 if (EDGE_PASSED (current_edge))
672 /* We have reached a leaf node or a node that was already
673 processed. Pop edges off the stack until we find
674 an edge that has not yet been processed. */
675 while (sp >= 0 && EDGE_PASSED (current_edge))
677 /* Pop entry off the stack. */
678 current_edge = stack[sp--];
679 node = ei_edge (current_edge)->src->index;
680 gcc_assert (node != ENTRY_BLOCK);
681 child = ei_edge (current_edge)->dest->index;
682 gcc_assert (child != EXIT_BLOCK);
683 RESET_BIT (in_stack, child);
684 if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child]))
685 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
686 ei_next (&current_edge);
689 /* See if have finished the DFS tree traversal. */
690 if (sp < 0 && EDGE_PASSED (current_edge))
691 break;
693 /* Nope, continue the traversal with the popped node. */
694 continue;
697 /* Process a node. */
698 node = ei_edge (current_edge)->src->index;
699 gcc_assert (node != ENTRY_BLOCK);
700 SET_BIT (in_stack, node);
701 dfs_nr[node] = ++count;
703 /* We don't traverse to the exit block. */
704 child = ei_edge (current_edge)->dest->index;
705 if (child == EXIT_BLOCK)
707 SET_EDGE_PASSED (current_edge);
708 ei_next (&current_edge);
709 continue;
712 /* If the successor is in the stack, then we've found a loop.
713 Mark the loop, if it is not a natural loop, then it will
714 be rejected during the second traversal. */
715 if (TEST_BIT (in_stack, child))
717 no_loops = 0;
718 SET_BIT (header, child);
719 UPDATE_LOOP_RELATIONS (node, child);
720 SET_EDGE_PASSED (current_edge);
721 ei_next (&current_edge);
722 continue;
725 /* If the child was already visited, then there is no need to visit
726 it again. Just update the loop relationships and restart
727 with a new edge. */
728 if (dfs_nr[child])
730 if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child]))
731 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
732 SET_EDGE_PASSED (current_edge);
733 ei_next (&current_edge);
734 continue;
737 /* Push an entry on the stack and continue DFS traversal. */
738 stack[++sp] = current_edge;
739 SET_EDGE_PASSED (current_edge);
740 current_edge = ei_start (ei_edge (current_edge)->dest->succs);
743 /* Reset ->aux field used by EDGE_PASSED. */
744 FOR_ALL_BB (bb)
746 edge_iterator ei;
747 edge e;
748 FOR_EACH_EDGE (e, ei, bb->succs)
749 e->aux = NULL;
753 /* Another check for unreachable blocks. The earlier test in
754 is_cfg_nonregular only finds unreachable blocks that do not
755 form a loop.
757 The DFS traversal will mark every block that is reachable from
758 the entry node by placing a nonzero value in dfs_nr. Thus if
759 dfs_nr is zero for any block, then it must be unreachable. */
760 unreachable = 0;
761 FOR_EACH_BB (bb)
762 if (dfs_nr[bb->index] == 0)
764 unreachable = 1;
765 break;
768 /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array
769 to hold degree counts. */
770 degree = dfs_nr;
772 FOR_EACH_BB (bb)
773 degree[bb->index] = EDGE_COUNT (bb->preds);
775 /* Do not perform region scheduling if there are any unreachable
776 blocks. */
777 if (!unreachable)
779 int *queue, *degree1 = NULL;
780 /* We use EXTENDED_RGN_HEADER as an addition to HEADER and put
781 there basic blocks, which are forced to be region heads.
782 This is done to try to assemble few smaller regions
783 from a too_large region. */
784 sbitmap extended_rgn_header = NULL;
785 bool extend_regions_p;
787 if (no_loops)
788 SET_BIT (header, 0);
790 /* Second traversal:find reducible inner loops and topologically sort
791 block of each region. */
793 queue = XNEWVEC (int, n_basic_blocks);
795 extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
796 if (extend_regions_p)
798 degree1 = XNEWVEC (int, last_basic_block);
799 extended_rgn_header = sbitmap_alloc (last_basic_block);
800 sbitmap_zero (extended_rgn_header);
803 /* Find blocks which are inner loop headers. We still have non-reducible
804 loops to consider at this point. */
805 FOR_EACH_BB (bb)
807 if (TEST_BIT (header, bb->index) && TEST_BIT (inner, bb->index))
809 edge e;
810 edge_iterator ei;
811 basic_block jbb;
813 /* Now check that the loop is reducible. We do this separate
814 from finding inner loops so that we do not find a reducible
815 loop which contains an inner non-reducible loop.
817 A simple way to find reducible/natural loops is to verify
818 that each block in the loop is dominated by the loop
819 header.
821 If there exists a block that is not dominated by the loop
822 header, then the block is reachable from outside the loop
823 and thus the loop is not a natural loop. */
824 FOR_EACH_BB (jbb)
826 /* First identify blocks in the loop, except for the loop
827 entry block. */
828 if (bb->index == max_hdr[jbb->index] && bb != jbb)
830 /* Now verify that the block is dominated by the loop
831 header. */
832 if (!dominated_by_p (CDI_DOMINATORS, jbb, bb))
833 break;
837 /* If we exited the loop early, then I is the header of
838 a non-reducible loop and we should quit processing it
839 now. */
840 if (jbb != EXIT_BLOCK_PTR)
841 continue;
843 /* I is a header of an inner loop, or block 0 in a subroutine
844 with no loops at all. */
845 head = tail = -1;
846 too_large_failure = 0;
847 loop_head = max_hdr[bb->index];
849 if (extend_regions_p)
850 /* We save degree in case when we meet a too_large region
851 and cancel it. We need a correct degree later when
852 calling extend_rgns. */
853 memcpy (degree1, degree, last_basic_block * sizeof (int));
855 /* Decrease degree of all I's successors for topological
856 ordering. */
857 FOR_EACH_EDGE (e, ei, bb->succs)
858 if (e->dest != EXIT_BLOCK_PTR)
859 --degree[e->dest->index];
861 /* Estimate # insns, and count # blocks in the region. */
862 num_bbs = 1;
863 num_insns = common_sched_info->estimate_number_of_insns (bb);
865 /* Find all loop latches (blocks with back edges to the loop
866 header) or all the leaf blocks in the cfg has no loops.
868 Place those blocks into the queue. */
869 if (no_loops)
871 FOR_EACH_BB (jbb)
872 /* Leaf nodes have only a single successor which must
873 be EXIT_BLOCK. */
874 if (single_succ_p (jbb)
875 && single_succ (jbb) == EXIT_BLOCK_PTR)
877 queue[++tail] = jbb->index;
878 SET_BIT (in_queue, jbb->index);
880 if (too_large (jbb->index, &num_bbs, &num_insns))
882 too_large_failure = 1;
883 break;
887 else
889 edge e;
891 FOR_EACH_EDGE (e, ei, bb->preds)
893 if (e->src == ENTRY_BLOCK_PTR)
894 continue;
896 node = e->src->index;
898 if (max_hdr[node] == loop_head && node != bb->index)
900 /* This is a loop latch. */
901 queue[++tail] = node;
902 SET_BIT (in_queue, node);
904 if (too_large (node, &num_bbs, &num_insns))
906 too_large_failure = 1;
907 break;
913 /* Now add all the blocks in the loop to the queue.
915 We know the loop is a natural loop; however the algorithm
916 above will not always mark certain blocks as being in the
917 loop. Consider:
918 node children
919 a b,c
921 c a,d
924 The algorithm in the DFS traversal may not mark B & D as part
925 of the loop (i.e. they will not have max_hdr set to A).
927 We know they can not be loop latches (else they would have
928 had max_hdr set since they'd have a backedge to a dominator
929 block). So we don't need them on the initial queue.
931 We know they are part of the loop because they are dominated
932 by the loop header and can be reached by a backwards walk of
933 the edges starting with nodes on the initial queue.
935 It is safe and desirable to include those nodes in the
936 loop/scheduling region. To do so we would need to decrease
937 the degree of a node if it is the target of a backedge
938 within the loop itself as the node is placed in the queue.
940 We do not do this because I'm not sure that the actual
941 scheduling code will properly handle this case. ?!? */
943 while (head < tail && !too_large_failure)
945 edge e;
946 child = queue[++head];
948 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->preds)
950 node = e->src->index;
952 /* See discussion above about nodes not marked as in
953 this loop during the initial DFS traversal. */
954 if (e->src == ENTRY_BLOCK_PTR
955 || max_hdr[node] != loop_head)
957 tail = -1;
958 break;
960 else if (!TEST_BIT (in_queue, node) && node != bb->index)
962 queue[++tail] = node;
963 SET_BIT (in_queue, node);
965 if (too_large (node, &num_bbs, &num_insns))
967 too_large_failure = 1;
968 break;
974 if (tail >= 0 && !too_large_failure)
976 /* Place the loop header into list of region blocks. */
977 degree[bb->index] = -1;
978 rgn_bb_table[idx] = bb->index;
979 RGN_NR_BLOCKS (nr_regions) = num_bbs;
980 RGN_BLOCKS (nr_regions) = idx++;
981 RGN_DONT_CALC_DEPS (nr_regions) = 0;
982 RGN_HAS_REAL_EBB (nr_regions) = 0;
983 CONTAINING_RGN (bb->index) = nr_regions;
984 BLOCK_TO_BB (bb->index) = count = 0;
986 /* Remove blocks from queue[] when their in degree
987 becomes zero. Repeat until no blocks are left on the
988 list. This produces a topological list of blocks in
989 the region. */
990 while (tail >= 0)
992 if (head < 0)
993 head = tail;
994 child = queue[head];
995 if (degree[child] == 0)
997 edge e;
999 degree[child] = -1;
1000 rgn_bb_table[idx++] = child;
1001 BLOCK_TO_BB (child) = ++count;
1002 CONTAINING_RGN (child) = nr_regions;
1003 queue[head] = queue[tail--];
1005 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->succs)
1006 if (e->dest != EXIT_BLOCK_PTR)
1007 --degree[e->dest->index];
1009 else
1010 --head;
1012 ++nr_regions;
1014 else if (extend_regions_p)
1016 /* Restore DEGREE. */
1017 int *t = degree;
1019 degree = degree1;
1020 degree1 = t;
1022 /* And force successors of BB to be region heads.
1023 This may provide several smaller regions instead
1024 of one too_large region. */
1025 FOR_EACH_EDGE (e, ei, bb->succs)
1026 if (e->dest != EXIT_BLOCK_PTR)
1027 SET_BIT (extended_rgn_header, e->dest->index);
1031 free (queue);
1033 if (extend_regions_p)
1035 free (degree1);
1037 sbitmap_a_or_b (header, header, extended_rgn_header);
1038 sbitmap_free (extended_rgn_header);
1040 extend_rgns (degree, &idx, header, max_hdr);
1044 /* Any block that did not end up in a region is placed into a region
1045 by itself. */
1046 FOR_EACH_BB (bb)
1047 if (degree[bb->index] >= 0)
1049 rgn_bb_table[idx] = bb->index;
1050 RGN_NR_BLOCKS (nr_regions) = 1;
1051 RGN_BLOCKS (nr_regions) = idx++;
1052 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1053 RGN_HAS_REAL_EBB (nr_regions) = 0;
1054 CONTAINING_RGN (bb->index) = nr_regions++;
1055 BLOCK_TO_BB (bb->index) = 0;
1058 free (max_hdr);
1059 free (degree);
1060 free (stack);
1061 sbitmap_free (header);
1062 sbitmap_free (inner);
1063 sbitmap_free (in_queue);
1064 sbitmap_free (in_stack);
1068 /* Wrapper function.
1069 If FLAG_SEL_SCHED_PIPELINING is set, then use custom function to form
1070 regions. Otherwise just call find_rgns_haifa. */
1071 static void
1072 find_rgns (void)
1074 if (sel_sched_p () && flag_sel_sched_pipelining)
1075 sel_find_rgns ();
1076 else
1077 haifa_find_rgns ();
1080 static int gather_region_statistics (int **);
1081 static void print_region_statistics (int *, int, int *, int);
1083 /* Calculate the histogram that shows the number of regions having the
1084 given number of basic blocks, and store it in the RSP array. Return
1085 the size of this array. */
1086 static int
1087 gather_region_statistics (int **rsp)
1089 int i, *a = 0, a_sz = 0;
1091 /* a[i] is the number of regions that have (i + 1) basic blocks. */
1092 for (i = 0; i < nr_regions; i++)
1094 int nr_blocks = RGN_NR_BLOCKS (i);
1096 gcc_assert (nr_blocks >= 1);
1098 if (nr_blocks > a_sz)
1100 a = XRESIZEVEC (int, a, nr_blocks);
1102 a[a_sz++] = 0;
1103 while (a_sz != nr_blocks);
1106 a[nr_blocks - 1]++;
1109 *rsp = a;
1110 return a_sz;
1113 /* Print regions statistics. S1 and S2 denote the data before and after
1114 calling extend_rgns, respectively. */
1115 static void
1116 print_region_statistics (int *s1, int s1_sz, int *s2, int s2_sz)
1118 int i;
1120 /* We iterate until s2_sz because extend_rgns does not decrease
1121 the maximal region size. */
1122 for (i = 1; i < s2_sz; i++)
1124 int n1, n2;
1126 n2 = s2[i];
1128 if (n2 == 0)
1129 continue;
1131 if (i >= s1_sz)
1132 n1 = 0;
1133 else
1134 n1 = s1[i];
1136 fprintf (sched_dump, ";; Region extension statistics: size %d: " \
1137 "was %d + %d more\n", i + 1, n1, n2 - n1);
1141 /* Extend regions.
1142 DEGREE - Array of incoming edge count, considering only
1143 the edges, that don't have their sources in formed regions yet.
1144 IDXP - pointer to the next available index in rgn_bb_table.
1145 HEADER - set of all region heads.
1146 LOOP_HDR - mapping from block to the containing loop
1147 (two blocks can reside within one region if they have
1148 the same loop header). */
1149 void
1150 extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
1152 int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
1153 int nblocks = n_basic_blocks - NUM_FIXED_BLOCKS;
1155 max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
1157 max_hdr = XNEWVEC (int, last_basic_block);
1159 order = XNEWVEC (int, last_basic_block);
1160 post_order_compute (order, false, false);
1162 for (i = nblocks - 1; i >= 0; i--)
1164 int bbn = order[i];
1165 if (degree[bbn] >= 0)
1167 max_hdr[bbn] = bbn;
1168 rescan = 1;
1170 else
1171 /* This block already was processed in find_rgns. */
1172 max_hdr[bbn] = -1;
1175 /* The idea is to topologically walk through CFG in top-down order.
1176 During the traversal, if all the predecessors of a node are
1177 marked to be in the same region (they all have the same max_hdr),
1178 then current node is also marked to be a part of that region.
1179 Otherwise the node starts its own region.
1180 CFG should be traversed until no further changes are made. On each
1181 iteration the set of the region heads is extended (the set of those
1182 blocks that have max_hdr[bbi] == bbi). This set is upper bounded by the
1183 set of all basic blocks, thus the algorithm is guaranteed to
1184 terminate. */
1186 while (rescan && iter < max_iter)
1188 rescan = 0;
1190 for (i = nblocks - 1; i >= 0; i--)
1192 edge e;
1193 edge_iterator ei;
1194 int bbn = order[i];
1196 if (max_hdr[bbn] != -1 && !TEST_BIT (header, bbn))
1198 int hdr = -1;
1200 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (bbn)->preds)
1202 int predn = e->src->index;
1204 if (predn != ENTRY_BLOCK
1205 /* If pred wasn't processed in find_rgns. */
1206 && max_hdr[predn] != -1
1207 /* And pred and bb reside in the same loop.
1208 (Or out of any loop). */
1209 && loop_hdr[bbn] == loop_hdr[predn])
1211 if (hdr == -1)
1212 /* Then bb extends the containing region of pred. */
1213 hdr = max_hdr[predn];
1214 else if (hdr != max_hdr[predn])
1215 /* Too bad, there are at least two predecessors
1216 that reside in different regions. Thus, BB should
1217 begin its own region. */
1219 hdr = bbn;
1220 break;
1223 else
1224 /* BB starts its own region. */
1226 hdr = bbn;
1227 break;
1231 if (hdr == bbn)
1233 /* If BB start its own region,
1234 update set of headers with BB. */
1235 SET_BIT (header, bbn);
1236 rescan = 1;
1238 else
1239 gcc_assert (hdr != -1);
1241 max_hdr[bbn] = hdr;
1245 iter++;
1248 /* Statistics were gathered on the SPEC2000 package of tests with
1249 mainline weekly snapshot gcc-4.1-20051015 on ia64.
1251 Statistics for SPECint:
1252 1 iteration : 1751 cases (38.7%)
1253 2 iterations: 2770 cases (61.3%)
1254 Blocks wrapped in regions by find_rgns without extension: 18295 blocks
1255 Blocks wrapped in regions by 2 iterations in extend_rgns: 23821 blocks
1256 (We don't count single block regions here).
1258 Statistics for SPECfp:
1259 1 iteration : 621 cases (35.9%)
1260 2 iterations: 1110 cases (64.1%)
1261 Blocks wrapped in regions by find_rgns without extension: 6476 blocks
1262 Blocks wrapped in regions by 2 iterations in extend_rgns: 11155 blocks
1263 (We don't count single block regions here).
1265 By default we do at most 2 iterations.
1266 This can be overridden with max-sched-extend-regions-iters parameter:
1267 0 - disable region extension,
1268 N > 0 - do at most N iterations. */
1270 if (sched_verbose && iter != 0)
1271 fprintf (sched_dump, ";; Region extension iterations: %d%s\n", iter,
1272 rescan ? "... failed" : "");
1274 if (!rescan && iter != 0)
1276 int *s1 = NULL, s1_sz = 0;
1278 /* Save the old statistics for later printout. */
1279 if (sched_verbose >= 6)
1280 s1_sz = gather_region_statistics (&s1);
1282 /* We have succeeded. Now assemble the regions. */
1283 for (i = nblocks - 1; i >= 0; i--)
1285 int bbn = order[i];
1287 if (max_hdr[bbn] == bbn)
1288 /* BBN is a region head. */
1290 edge e;
1291 edge_iterator ei;
1292 int num_bbs = 0, j, num_insns = 0, large;
1294 large = too_large (bbn, &num_bbs, &num_insns);
1296 degree[bbn] = -1;
1297 rgn_bb_table[idx] = bbn;
1298 RGN_BLOCKS (nr_regions) = idx++;
1299 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1300 RGN_HAS_REAL_EBB (nr_regions) = 0;
1301 CONTAINING_RGN (bbn) = nr_regions;
1302 BLOCK_TO_BB (bbn) = 0;
1304 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (bbn)->succs)
1305 if (e->dest != EXIT_BLOCK_PTR)
1306 degree[e->dest->index]--;
1308 if (!large)
1309 /* Here we check whether the region is too_large. */
1310 for (j = i - 1; j >= 0; j--)
1312 int succn = order[j];
1313 if (max_hdr[succn] == bbn)
1315 if ((large = too_large (succn, &num_bbs, &num_insns)))
1316 break;
1320 if (large)
1321 /* If the region is too_large, then wrap every block of
1322 the region into single block region.
1323 Here we wrap region head only. Other blocks are
1324 processed in the below cycle. */
1326 RGN_NR_BLOCKS (nr_regions) = 1;
1327 nr_regions++;
1330 num_bbs = 1;
1332 for (j = i - 1; j >= 0; j--)
1334 int succn = order[j];
1336 if (max_hdr[succn] == bbn)
1337 /* This cycle iterates over all basic blocks, that
1338 are supposed to be in the region with head BBN,
1339 and wraps them into that region (or in single
1340 block region). */
1342 gcc_assert (degree[succn] == 0);
1344 degree[succn] = -1;
1345 rgn_bb_table[idx] = succn;
1346 BLOCK_TO_BB (succn) = large ? 0 : num_bbs++;
1347 CONTAINING_RGN (succn) = nr_regions;
1349 if (large)
1350 /* Wrap SUCCN into single block region. */
1352 RGN_BLOCKS (nr_regions) = idx;
1353 RGN_NR_BLOCKS (nr_regions) = 1;
1354 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1355 RGN_HAS_REAL_EBB (nr_regions) = 0;
1356 nr_regions++;
1359 idx++;
1361 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (succn)->succs)
1362 if (e->dest != EXIT_BLOCK_PTR)
1363 degree[e->dest->index]--;
1367 if (!large)
1369 RGN_NR_BLOCKS (nr_regions) = num_bbs;
1370 nr_regions++;
1375 if (sched_verbose >= 6)
1377 int *s2, s2_sz;
1379 /* Get the new statistics and print the comparison with the
1380 one before calling this function. */
1381 s2_sz = gather_region_statistics (&s2);
1382 print_region_statistics (s1, s1_sz, s2, s2_sz);
1383 free (s1);
1384 free (s2);
1388 free (order);
1389 free (max_hdr);
1391 *idxp = idx;
1394 /* Functions for regions scheduling information. */
1396 /* Compute dominators, probability, and potential-split-edges of bb.
1397 Assume that these values were already computed for bb's predecessors. */
1399 static void
1400 compute_dom_prob_ps (int bb)
1402 edge_iterator in_ei;
1403 edge in_edge;
1405 /* We shouldn't have any real ebbs yet. */
1406 gcc_assert (ebb_head [bb] == bb + current_blocks);
1408 if (IS_RGN_ENTRY (bb))
1410 SET_BIT (dom[bb], 0);
1411 prob[bb] = REG_BR_PROB_BASE;
1412 return;
1415 prob[bb] = 0;
1417 /* Initialize dom[bb] to '111..1'. */
1418 sbitmap_ones (dom[bb]);
1420 FOR_EACH_EDGE (in_edge, in_ei, BASIC_BLOCK (BB_TO_BLOCK (bb))->preds)
1422 int pred_bb;
1423 edge out_edge;
1424 edge_iterator out_ei;
1426 if (in_edge->src == ENTRY_BLOCK_PTR)
1427 continue;
1429 pred_bb = BLOCK_TO_BB (in_edge->src->index);
1430 sbitmap_a_and_b (dom[bb], dom[bb], dom[pred_bb]);
1431 sbitmap_a_or_b (ancestor_edges[bb],
1432 ancestor_edges[bb], ancestor_edges[pred_bb]);
1434 SET_BIT (ancestor_edges[bb], EDGE_TO_BIT (in_edge));
1436 sbitmap_a_or_b (pot_split[bb], pot_split[bb], pot_split[pred_bb]);
1438 FOR_EACH_EDGE (out_edge, out_ei, in_edge->src->succs)
1439 SET_BIT (pot_split[bb], EDGE_TO_BIT (out_edge));
1441 prob[bb] += ((prob[pred_bb] * in_edge->probability) / REG_BR_PROB_BASE);
1444 SET_BIT (dom[bb], bb);
1445 sbitmap_difference (pot_split[bb], pot_split[bb], ancestor_edges[bb]);
1447 if (sched_verbose >= 2)
1448 fprintf (sched_dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb),
1449 (100 * prob[bb]) / REG_BR_PROB_BASE);
1452 /* Functions for target info. */
1454 /* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
1455 Note that bb_trg dominates bb_src. */
1457 static void
1458 split_edges (int bb_src, int bb_trg, edgelst *bl)
1460 sbitmap src = sbitmap_alloc (pot_split[bb_src]->n_bits);
1461 sbitmap_copy (src, pot_split[bb_src]);
1463 sbitmap_difference (src, src, pot_split[bb_trg]);
1464 extract_edgelst (src, bl);
1465 sbitmap_free (src);
1468 /* Find the valid candidate-source-blocks for the target block TRG, compute
1469 their probability, and check if they are speculative or not.
1470 For speculative sources, compute their update-blocks and split-blocks. */
1472 static void
1473 compute_trg_info (int trg)
1475 candidate *sp;
1476 edgelst el = { NULL, 0 };
1477 int i, j, k, update_idx;
1478 basic_block block;
1479 sbitmap visited;
1480 edge_iterator ei;
1481 edge e;
1483 candidate_table = XNEWVEC (candidate, current_nr_blocks);
1485 bblst_last = 0;
1486 /* bblst_table holds split blocks and update blocks for each block after
1487 the current one in the region. split blocks and update blocks are
1488 the TO blocks of region edges, so there can be at most rgn_nr_edges
1489 of them. */
1490 bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
1491 bblst_table = XNEWVEC (basic_block, bblst_size);
1493 edgelst_last = 0;
1494 edgelst_table = XNEWVEC (edge, rgn_nr_edges);
1496 /* Define some of the fields for the target bb as well. */
1497 sp = candidate_table + trg;
1498 sp->is_valid = 1;
1499 sp->is_speculative = 0;
1500 sp->src_prob = REG_BR_PROB_BASE;
1502 visited = sbitmap_alloc (last_basic_block);
1504 for (i = trg + 1; i < current_nr_blocks; i++)
1506 sp = candidate_table + i;
1508 sp->is_valid = IS_DOMINATED (i, trg);
1509 if (sp->is_valid)
1511 int tf = prob[trg], cf = prob[i];
1513 /* In CFGs with low probability edges TF can possibly be zero. */
1514 sp->src_prob = (tf ? ((cf * REG_BR_PROB_BASE) / tf) : 0);
1515 sp->is_valid = (sp->src_prob >= min_spec_prob);
1518 if (sp->is_valid)
1520 split_edges (i, trg, &el);
1521 sp->is_speculative = (el.nr_members) ? 1 : 0;
1522 if (sp->is_speculative && !flag_schedule_speculative)
1523 sp->is_valid = 0;
1526 if (sp->is_valid)
1528 /* Compute split blocks and store them in bblst_table.
1529 The TO block of every split edge is a split block. */
1530 sp->split_bbs.first_member = &bblst_table[bblst_last];
1531 sp->split_bbs.nr_members = el.nr_members;
1532 for (j = 0; j < el.nr_members; bblst_last++, j++)
1533 bblst_table[bblst_last] = el.first_member[j]->dest;
1534 sp->update_bbs.first_member = &bblst_table[bblst_last];
1536 /* Compute update blocks and store them in bblst_table.
1537 For every split edge, look at the FROM block, and check
1538 all out edges. For each out edge that is not a split edge,
1539 add the TO block to the update block list. This list can end
1540 up with a lot of duplicates. We need to weed them out to avoid
1541 overrunning the end of the bblst_table. */
1543 update_idx = 0;
1544 sbitmap_zero (visited);
1545 for (j = 0; j < el.nr_members; j++)
1547 block = el.first_member[j]->src;
1548 FOR_EACH_EDGE (e, ei, block->succs)
1550 if (!TEST_BIT (visited, e->dest->index))
1552 for (k = 0; k < el.nr_members; k++)
1553 if (e == el.first_member[k])
1554 break;
1556 if (k >= el.nr_members)
1558 bblst_table[bblst_last++] = e->dest;
1559 SET_BIT (visited, e->dest->index);
1560 update_idx++;
1565 sp->update_bbs.nr_members = update_idx;
1567 /* Make sure we didn't overrun the end of bblst_table. */
1568 gcc_assert (bblst_last <= bblst_size);
1570 else
1572 sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0;
1574 sp->is_speculative = 0;
1575 sp->src_prob = 0;
1579 sbitmap_free (visited);
1582 /* Free the computed target info. */
1583 static void
1584 free_trg_info (void)
1586 free (candidate_table);
1587 free (bblst_table);
1588 free (edgelst_table);
1591 /* Print candidates info, for debugging purposes. Callable from debugger. */
1593 DEBUG_FUNCTION void
1594 debug_candidate (int i)
1596 if (!candidate_table[i].is_valid)
1597 return;
1599 if (candidate_table[i].is_speculative)
1601 int j;
1602 fprintf (sched_dump, "src b %d bb %d speculative \n", BB_TO_BLOCK (i), i);
1604 fprintf (sched_dump, "split path: ");
1605 for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++)
1607 int b = candidate_table[i].split_bbs.first_member[j]->index;
1609 fprintf (sched_dump, " %d ", b);
1611 fprintf (sched_dump, "\n");
1613 fprintf (sched_dump, "update path: ");
1614 for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++)
1616 int b = candidate_table[i].update_bbs.first_member[j]->index;
1618 fprintf (sched_dump, " %d ", b);
1620 fprintf (sched_dump, "\n");
1622 else
1624 fprintf (sched_dump, " src %d equivalent\n", BB_TO_BLOCK (i));
1628 /* Print candidates info, for debugging purposes. Callable from debugger. */
1630 DEBUG_FUNCTION void
1631 debug_candidates (int trg)
1633 int i;
1635 fprintf (sched_dump, "----------- candidate table: target: b=%d bb=%d ---\n",
1636 BB_TO_BLOCK (trg), trg);
1637 for (i = trg + 1; i < current_nr_blocks; i++)
1638 debug_candidate (i);
1641 /* Functions for speculative scheduling. */
1643 static bitmap_head not_in_df;
1645 /* Return 0 if x is a set of a register alive in the beginning of one
1646 of the split-blocks of src, otherwise return 1. */
1648 static int
1649 check_live_1 (int src, rtx x)
1651 int i;
1652 int regno;
1653 rtx reg = SET_DEST (x);
1655 if (reg == 0)
1656 return 1;
1658 while (GET_CODE (reg) == SUBREG
1659 || GET_CODE (reg) == ZERO_EXTRACT
1660 || GET_CODE (reg) == STRICT_LOW_PART)
1661 reg = XEXP (reg, 0);
1663 if (GET_CODE (reg) == PARALLEL)
1665 int i;
1667 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1668 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1669 if (check_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0)))
1670 return 1;
1672 return 0;
1675 if (!REG_P (reg))
1676 return 1;
1678 regno = REGNO (reg);
1680 if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
1682 /* Global registers are assumed live. */
1683 return 0;
1685 else
1687 if (regno < FIRST_PSEUDO_REGISTER)
1689 /* Check for hard registers. */
1690 int j = hard_regno_nregs[regno][GET_MODE (reg)];
1691 while (--j >= 0)
1693 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1695 basic_block b = candidate_table[src].split_bbs.first_member[i];
1696 int t = bitmap_bit_p (&not_in_df, b->index);
1698 /* We can have split blocks, that were recently generated.
1699 Such blocks are always outside current region. */
1700 gcc_assert (!t || (CONTAINING_RGN (b->index)
1701 != CONTAINING_RGN (BB_TO_BLOCK (src))));
1703 if (t || REGNO_REG_SET_P (df_get_live_in (b), regno + j))
1704 return 0;
1708 else
1710 /* Check for pseudo registers. */
1711 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1713 basic_block b = candidate_table[src].split_bbs.first_member[i];
1714 int t = bitmap_bit_p (&not_in_df, b->index);
1716 gcc_assert (!t || (CONTAINING_RGN (b->index)
1717 != CONTAINING_RGN (BB_TO_BLOCK (src))));
1719 if (t || REGNO_REG_SET_P (df_get_live_in (b), regno))
1720 return 0;
1725 return 1;
1728 /* If x is a set of a register R, mark that R is alive in the beginning
1729 of every update-block of src. */
1731 static void
1732 update_live_1 (int src, rtx x)
1734 int i;
1735 int regno;
1736 rtx reg = SET_DEST (x);
1738 if (reg == 0)
1739 return;
1741 while (GET_CODE (reg) == SUBREG
1742 || GET_CODE (reg) == ZERO_EXTRACT
1743 || GET_CODE (reg) == STRICT_LOW_PART)
1744 reg = XEXP (reg, 0);
1746 if (GET_CODE (reg) == PARALLEL)
1748 int i;
1750 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1751 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1752 update_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0));
1754 return;
1757 if (!REG_P (reg))
1758 return;
1760 /* Global registers are always live, so the code below does not apply
1761 to them. */
1763 regno = REGNO (reg);
1765 if (! HARD_REGISTER_NUM_P (regno)
1766 || !global_regs[regno])
1768 for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
1770 basic_block b = candidate_table[src].update_bbs.first_member[i];
1772 if (HARD_REGISTER_NUM_P (regno))
1773 bitmap_set_range (df_get_live_in (b), regno,
1774 hard_regno_nregs[regno][GET_MODE (reg)]);
1775 else
1776 bitmap_set_bit (df_get_live_in (b), regno);
1781 /* Return 1 if insn can be speculatively moved from block src to trg,
1782 otherwise return 0. Called before first insertion of insn to
1783 ready-list or before the scheduling. */
1785 static int
1786 check_live (rtx insn, int src)
1788 /* Find the registers set by instruction. */
1789 if (GET_CODE (PATTERN (insn)) == SET
1790 || GET_CODE (PATTERN (insn)) == CLOBBER)
1791 return check_live_1 (src, PATTERN (insn));
1792 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1794 int j;
1795 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1796 if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1797 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1798 && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j)))
1799 return 0;
1801 return 1;
1804 return 1;
1807 /* Update the live registers info after insn was moved speculatively from
1808 block src to trg. */
1810 static void
1811 update_live (rtx insn, int src)
1813 /* Find the registers set by instruction. */
1814 if (GET_CODE (PATTERN (insn)) == SET
1815 || GET_CODE (PATTERN (insn)) == CLOBBER)
1816 update_live_1 (src, PATTERN (insn));
1817 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1819 int j;
1820 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1821 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1822 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1823 update_live_1 (src, XVECEXP (PATTERN (insn), 0, j));
1827 /* Nonzero if block bb_to is equal to, or reachable from block bb_from. */
1828 #define IS_REACHABLE(bb_from, bb_to) \
1829 (bb_from == bb_to \
1830 || IS_RGN_ENTRY (bb_from) \
1831 || (TEST_BIT (ancestor_edges[bb_to], \
1832 EDGE_TO_BIT (single_pred_edge (BASIC_BLOCK (BB_TO_BLOCK (bb_from)))))))
1834 /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
1836 static void
1837 set_spec_fed (rtx load_insn)
1839 sd_iterator_def sd_it;
1840 dep_t dep;
1842 FOR_EACH_DEP (load_insn, SD_LIST_FORW, sd_it, dep)
1843 if (DEP_TYPE (dep) == REG_DEP_TRUE)
1844 FED_BY_SPEC_LOAD (DEP_CON (dep)) = 1;
1847 /* On the path from the insn to load_insn_bb, find a conditional
1848 branch depending on insn, that guards the speculative load. */
1850 static int
1851 find_conditional_protection (rtx insn, int load_insn_bb)
1853 sd_iterator_def sd_it;
1854 dep_t dep;
1856 /* Iterate through DEF-USE forward dependences. */
1857 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
1859 rtx next = DEP_CON (dep);
1861 if ((CONTAINING_RGN (BLOCK_NUM (next)) ==
1862 CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb)))
1863 && IS_REACHABLE (INSN_BB (next), load_insn_bb)
1864 && load_insn_bb != INSN_BB (next)
1865 && DEP_TYPE (dep) == REG_DEP_TRUE
1866 && (JUMP_P (next)
1867 || find_conditional_protection (next, load_insn_bb)))
1868 return 1;
1870 return 0;
1871 } /* find_conditional_protection */
1873 /* Returns 1 if the same insn1 that participates in the computation
1874 of load_insn's address is feeding a conditional branch that is
1875 guarding on load_insn. This is true if we find two DEF-USE
1876 chains:
1877 insn1 -> ... -> conditional-branch
1878 insn1 -> ... -> load_insn,
1879 and if a flow path exists:
1880 insn1 -> ... -> conditional-branch -> ... -> load_insn,
1881 and if insn1 is on the path
1882 region-entry -> ... -> bb_trg -> ... load_insn.
1884 Locate insn1 by climbing on INSN_BACK_DEPS from load_insn.
1885 Locate the branch by following INSN_FORW_DEPS from insn1. */
1887 static int
1888 is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg)
1890 sd_iterator_def sd_it;
1891 dep_t dep;
1893 FOR_EACH_DEP (load_insn, SD_LIST_BACK, sd_it, dep)
1895 rtx insn1 = DEP_PRO (dep);
1897 /* Must be a DEF-USE dependence upon non-branch. */
1898 if (DEP_TYPE (dep) != REG_DEP_TRUE
1899 || JUMP_P (insn1))
1900 continue;
1902 /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
1903 if (INSN_BB (insn1) == bb_src
1904 || (CONTAINING_RGN (BLOCK_NUM (insn1))
1905 != CONTAINING_RGN (BB_TO_BLOCK (bb_src)))
1906 || (!IS_REACHABLE (bb_trg, INSN_BB (insn1))
1907 && !IS_REACHABLE (INSN_BB (insn1), bb_trg)))
1908 continue;
1910 /* Now search for the conditional-branch. */
1911 if (find_conditional_protection (insn1, bb_src))
1912 return 1;
1914 /* Recursive step: search another insn1, "above" current insn1. */
1915 return is_conditionally_protected (insn1, bb_src, bb_trg);
1918 /* The chain does not exist. */
1919 return 0;
1920 } /* is_conditionally_protected */
1922 /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence
1923 load_insn can move speculatively from bb_src to bb_trg. All the
1924 following must hold:
1926 (1) both loads have 1 base register (PFREE_CANDIDATEs).
1927 (2) load_insn and load1 have a def-use dependence upon
1928 the same insn 'insn1'.
1929 (3) either load2 is in bb_trg, or:
1930 - there's only one split-block, and
1931 - load1 is on the escape path, and
1933 From all these we can conclude that the two loads access memory
1934 addresses that differ at most by a constant, and hence if moving
1935 load_insn would cause an exception, it would have been caused by
1936 load2 anyhow. */
1938 static int
1939 is_pfree (rtx load_insn, int bb_src, int bb_trg)
1941 sd_iterator_def back_sd_it;
1942 dep_t back_dep;
1943 candidate *candp = candidate_table + bb_src;
1945 if (candp->split_bbs.nr_members != 1)
1946 /* Must have exactly one escape block. */
1947 return 0;
1949 FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
1951 rtx insn1 = DEP_PRO (back_dep);
1953 if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
1954 /* Found a DEF-USE dependence (insn1, load_insn). */
1956 sd_iterator_def fore_sd_it;
1957 dep_t fore_dep;
1959 FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
1961 rtx insn2 = DEP_CON (fore_dep);
1963 if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
1965 /* Found a DEF-USE dependence (insn1, insn2). */
1966 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
1967 /* insn2 not guaranteed to be a 1 base reg load. */
1968 continue;
1970 if (INSN_BB (insn2) == bb_trg)
1971 /* insn2 is the similar load, in the target block. */
1972 return 1;
1974 if (*(candp->split_bbs.first_member) == BLOCK_FOR_INSN (insn2))
1975 /* insn2 is a similar load, in a split-block. */
1976 return 1;
1982 /* Couldn't find a similar load. */
1983 return 0;
1984 } /* is_pfree */
1986 /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
1987 a load moved speculatively, or if load_insn is protected by
1988 a compare on load_insn's address). */
1990 static int
1991 is_prisky (rtx load_insn, int bb_src, int bb_trg)
1993 if (FED_BY_SPEC_LOAD (load_insn))
1994 return 1;
1996 if (sd_lists_empty_p (load_insn, SD_LIST_BACK))
1997 /* Dependence may 'hide' out of the region. */
1998 return 1;
2000 if (is_conditionally_protected (load_insn, bb_src, bb_trg))
2001 return 1;
2003 return 0;
2006 /* Insn is a candidate to be moved speculatively from bb_src to bb_trg.
2007 Return 1 if insn is exception-free (and the motion is valid)
2008 and 0 otherwise. */
2010 static int
2011 is_exception_free (rtx insn, int bb_src, int bb_trg)
2013 int insn_class = haifa_classify_insn (insn);
2015 /* Handle non-load insns. */
2016 switch (insn_class)
2018 case TRAP_FREE:
2019 return 1;
2020 case TRAP_RISKY:
2021 return 0;
2022 default:;
2025 /* Handle loads. */
2026 if (!flag_schedule_speculative_load)
2027 return 0;
2028 IS_LOAD_INSN (insn) = 1;
2029 switch (insn_class)
2031 case IFREE:
2032 return (1);
2033 case IRISKY:
2034 return 0;
2035 case PFREE_CANDIDATE:
2036 if (is_pfree (insn, bb_src, bb_trg))
2037 return 1;
2038 /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */
2039 case PRISKY_CANDIDATE:
2040 if (!flag_schedule_speculative_load_dangerous
2041 || is_prisky (insn, bb_src, bb_trg))
2042 return 0;
2043 break;
2044 default:;
2047 return flag_schedule_speculative_load_dangerous;
2050 /* The number of insns from the current block scheduled so far. */
2051 static int sched_target_n_insns;
2052 /* The number of insns from the current block to be scheduled in total. */
2053 static int target_n_insns;
2054 /* The number of insns from the entire region scheduled so far. */
2055 static int sched_n_insns;
2057 /* Implementations of the sched_info functions for region scheduling. */
2058 static void init_ready_list (void);
2059 static int can_schedule_ready_p (rtx);
2060 static void begin_schedule_ready (rtx);
2061 static ds_t new_ready (rtx, ds_t);
2062 static int schedule_more_p (void);
2063 static const char *rgn_print_insn (const_rtx, int);
2064 static int rgn_rank (rtx, rtx);
2065 static void compute_jump_reg_dependencies (rtx, regset, regset, regset);
2067 /* Functions for speculative scheduling. */
2068 static void rgn_add_remove_insn (rtx, int);
2069 static void rgn_add_block (basic_block, basic_block);
2070 static void rgn_fix_recovery_cfg (int, int, int);
2071 static basic_block advance_target_bb (basic_block, rtx);
2073 /* Return nonzero if there are more insns that should be scheduled. */
2075 static int
2076 schedule_more_p (void)
2078 return sched_target_n_insns < target_n_insns;
2081 /* Add all insns that are initially ready to the ready list READY. Called
2082 once before scheduling a set of insns. */
2084 static void
2085 init_ready_list (void)
2087 rtx prev_head = current_sched_info->prev_head;
2088 rtx next_tail = current_sched_info->next_tail;
2089 int bb_src;
2090 rtx insn;
2092 target_n_insns = 0;
2093 sched_target_n_insns = 0;
2094 sched_n_insns = 0;
2096 /* Print debugging information. */
2097 if (sched_verbose >= 5)
2098 debug_rgn_dependencies (target_bb);
2100 /* Prepare current target block info. */
2101 if (current_nr_blocks > 1)
2102 compute_trg_info (target_bb);
2104 /* Initialize ready list with all 'ready' insns in target block.
2105 Count number of insns in the target block being scheduled. */
2106 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
2108 try_ready (insn);
2109 target_n_insns++;
2111 gcc_assert (!(TODO_SPEC (insn) & BEGIN_CONTROL));
2114 /* Add to ready list all 'ready' insns in valid source blocks.
2115 For speculative insns, check-live, exception-free, and
2116 issue-delay. */
2117 for (bb_src = target_bb + 1; bb_src < current_nr_blocks; bb_src++)
2118 if (IS_VALID (bb_src))
2120 rtx src_head;
2121 rtx src_next_tail;
2122 rtx tail, head;
2124 get_ebb_head_tail (EBB_FIRST_BB (bb_src), EBB_LAST_BB (bb_src),
2125 &head, &tail);
2126 src_next_tail = NEXT_INSN (tail);
2127 src_head = head;
2129 for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn))
2130 if (INSN_P (insn))
2131 try_ready (insn);
2135 /* Called after taking INSN from the ready list. Returns nonzero if this
2136 insn can be scheduled, nonzero if we should silently discard it. */
2138 static int
2139 can_schedule_ready_p (rtx insn)
2141 /* An interblock motion? */
2142 if (INSN_BB (insn) != target_bb
2143 && IS_SPECULATIVE_INSN (insn)
2144 && !check_live (insn, INSN_BB (insn)))
2145 return 0;
2146 else
2147 return 1;
2150 /* Updates counter and other information. Split from can_schedule_ready_p ()
2151 because when we schedule insn speculatively then insn passed to
2152 can_schedule_ready_p () differs from the one passed to
2153 begin_schedule_ready (). */
2154 static void
2155 begin_schedule_ready (rtx insn)
2157 /* An interblock motion? */
2158 if (INSN_BB (insn) != target_bb)
2160 if (IS_SPECULATIVE_INSN (insn))
2162 gcc_assert (check_live (insn, INSN_BB (insn)));
2164 update_live (insn, INSN_BB (insn));
2166 /* For speculative load, mark insns fed by it. */
2167 if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn))
2168 set_spec_fed (insn);
2170 nr_spec++;
2172 nr_inter++;
2174 else
2176 /* In block motion. */
2177 sched_target_n_insns++;
2179 sched_n_insns++;
2182 /* Called after INSN has all its hard dependencies resolved and the speculation
2183 of type TS is enough to overcome them all.
2184 Return nonzero if it should be moved to the ready list or the queue, or zero
2185 if we should silently discard it. */
2186 static ds_t
2187 new_ready (rtx next, ds_t ts)
2189 if (INSN_BB (next) != target_bb)
2191 int not_ex_free = 0;
2193 /* For speculative insns, before inserting to ready/queue,
2194 check live, exception-free, and issue-delay. */
2195 if (!IS_VALID (INSN_BB (next))
2196 || CANT_MOVE (next)
2197 || (IS_SPECULATIVE_INSN (next)
2198 && ((recog_memoized (next) >= 0
2199 && min_insn_conflict_delay (curr_state, next, next)
2200 > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY))
2201 || IS_SPECULATION_CHECK_P (next)
2202 || !check_live (next, INSN_BB (next))
2203 || (not_ex_free = !is_exception_free (next, INSN_BB (next),
2204 target_bb)))))
2206 if (not_ex_free
2207 /* We are here because is_exception_free () == false.
2208 But we possibly can handle that with control speculation. */
2209 && sched_deps_info->generate_spec_deps
2210 && spec_info->mask & BEGIN_CONTROL)
2212 ds_t new_ds;
2214 /* Add control speculation to NEXT's dependency type. */
2215 new_ds = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK);
2217 /* Check if NEXT can be speculated with new dependency type. */
2218 if (sched_insn_is_legitimate_for_speculation_p (next, new_ds))
2219 /* Here we got new control-speculative instruction. */
2220 ts = new_ds;
2221 else
2222 /* NEXT isn't ready yet. */
2223 ts = (ts & ~SPECULATIVE) | HARD_DEP;
2225 else
2226 /* NEXT isn't ready yet. */
2227 ts = (ts & ~SPECULATIVE) | HARD_DEP;
2231 return ts;
2234 /* Return a string that contains the insn uid and optionally anything else
2235 necessary to identify this insn in an output. It's valid to use a
2236 static buffer for this. The ALIGNED parameter should cause the string
2237 to be formatted so that multiple output lines will line up nicely. */
2239 static const char *
2240 rgn_print_insn (const_rtx insn, int aligned)
2242 static char tmp[80];
2244 if (aligned)
2245 sprintf (tmp, "b%3d: i%4d", INSN_BB (insn), INSN_UID (insn));
2246 else
2248 if (current_nr_blocks > 1 && INSN_BB (insn) != target_bb)
2249 sprintf (tmp, "%d/b%d", INSN_UID (insn), INSN_BB (insn));
2250 else
2251 sprintf (tmp, "%d", INSN_UID (insn));
2253 return tmp;
2256 /* Compare priority of two insns. Return a positive number if the second
2257 insn is to be preferred for scheduling, and a negative one if the first
2258 is to be preferred. Zero if they are equally good. */
2260 static int
2261 rgn_rank (rtx insn1, rtx insn2)
2263 /* Some comparison make sense in interblock scheduling only. */
2264 if (INSN_BB (insn1) != INSN_BB (insn2))
2266 int spec_val, prob_val;
2268 /* Prefer an inblock motion on an interblock motion. */
2269 if ((INSN_BB (insn2) == target_bb) && (INSN_BB (insn1) != target_bb))
2270 return 1;
2271 if ((INSN_BB (insn1) == target_bb) && (INSN_BB (insn2) != target_bb))
2272 return -1;
2274 /* Prefer a useful motion on a speculative one. */
2275 spec_val = IS_SPECULATIVE_INSN (insn1) - IS_SPECULATIVE_INSN (insn2);
2276 if (spec_val)
2277 return spec_val;
2279 /* Prefer a more probable (speculative) insn. */
2280 prob_val = INSN_PROBABILITY (insn2) - INSN_PROBABILITY (insn1);
2281 if (prob_val)
2282 return prob_val;
2284 return 0;
2287 /* NEXT is an instruction that depends on INSN (a backward dependence);
2288 return nonzero if we should include this dependence in priority
2289 calculations. */
2292 contributes_to_priority (rtx next, rtx insn)
2294 /* NEXT and INSN reside in one ebb. */
2295 return BLOCK_TO_BB (BLOCK_NUM (next)) == BLOCK_TO_BB (BLOCK_NUM (insn));
2298 /* INSN is a JUMP_INSN, COND_SET is the set of registers that are
2299 conditionally set before INSN. Store the set of registers that
2300 must be considered as used by this jump in USED and that of
2301 registers that must be considered as set in SET. */
2303 static void
2304 compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
2305 regset cond_exec ATTRIBUTE_UNUSED,
2306 regset used ATTRIBUTE_UNUSED,
2307 regset set ATTRIBUTE_UNUSED)
2309 /* Nothing to do here, since we postprocess jumps in
2310 add_branch_dependences. */
2313 /* This variable holds common_sched_info hooks and data relevant to
2314 the interblock scheduler. */
2315 static struct common_sched_info_def rgn_common_sched_info;
2318 /* This holds data for the dependence analysis relevant to
2319 the interblock scheduler. */
2320 static struct sched_deps_info_def rgn_sched_deps_info;
2322 /* This holds constant data used for initializing the above structure
2323 for the Haifa scheduler. */
2324 static const struct sched_deps_info_def rgn_const_sched_deps_info =
2326 compute_jump_reg_dependencies,
2327 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2328 0, 0, 0
2331 /* Same as above, but for the selective scheduler. */
2332 static const struct sched_deps_info_def rgn_const_sel_sched_deps_info =
2334 compute_jump_reg_dependencies,
2335 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2336 0, 0, 0
2339 /* Return true if scheduling INSN will trigger finish of scheduling
2340 current block. */
2341 static bool
2342 rgn_insn_finishes_block_p (rtx insn)
2344 if (INSN_BB (insn) == target_bb
2345 && sched_target_n_insns + 1 == target_n_insns)
2346 /* INSN is the last not-scheduled instruction in the current block. */
2347 return true;
2349 return false;
2352 /* Used in schedule_insns to initialize current_sched_info for scheduling
2353 regions (or single basic blocks). */
2355 static const struct haifa_sched_info rgn_const_sched_info =
2357 init_ready_list,
2358 can_schedule_ready_p,
2359 schedule_more_p,
2360 new_ready,
2361 rgn_rank,
2362 rgn_print_insn,
2363 contributes_to_priority,
2364 rgn_insn_finishes_block_p,
2366 NULL, NULL,
2367 NULL, NULL,
2368 0, 0,
2370 rgn_add_remove_insn,
2371 begin_schedule_ready,
2372 NULL,
2373 advance_target_bb,
2374 SCHED_RGN
2377 /* This variable holds the data and hooks needed to the Haifa scheduler backend
2378 for the interblock scheduler frontend. */
2379 static struct haifa_sched_info rgn_sched_info;
2381 /* Returns maximum priority that an insn was assigned to. */
2384 get_rgn_sched_max_insns_priority (void)
2386 return rgn_sched_info.sched_max_insns_priority;
2389 /* Determine if PAT sets a TARGET_CLASS_LIKELY_SPILLED_P register. */
2391 static bool
2392 sets_likely_spilled (rtx pat)
2394 bool ret = false;
2395 note_stores (pat, sets_likely_spilled_1, &ret);
2396 return ret;
2399 static void
2400 sets_likely_spilled_1 (rtx x, const_rtx pat, void *data)
2402 bool *ret = (bool *) data;
2404 if (GET_CODE (pat) == SET
2405 && REG_P (x)
2406 && HARD_REGISTER_P (x)
2407 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (x))))
2408 *ret = true;
2411 /* A bitmap to note insns that participate in any dependency. Used in
2412 add_branch_dependences. */
2413 static sbitmap insn_referenced;
2415 /* Add dependences so that branches are scheduled to run last in their
2416 block. */
2417 static void
2418 add_branch_dependences (rtx head, rtx tail)
2420 rtx insn, last;
2422 /* For all branches, calls, uses, clobbers, cc0 setters, and instructions
2423 that can throw exceptions, force them to remain in order at the end of
2424 the block by adding dependencies and giving the last a high priority.
2425 There may be notes present, and prev_head may also be a note.
2427 Branches must obviously remain at the end. Calls should remain at the
2428 end since moving them results in worse register allocation. Uses remain
2429 at the end to ensure proper register allocation.
2431 cc0 setters remain at the end because they can't be moved away from
2432 their cc0 user.
2434 COND_EXEC insns cannot be moved past a branch (see e.g. PR17808).
2436 Insns setting TARGET_CLASS_LIKELY_SPILLED_P registers (usually return
2437 values) are not moved before reload because we can wind up with register
2438 allocation failures. */
2440 while (tail != head && DEBUG_INSN_P (tail))
2441 tail = PREV_INSN (tail);
2443 insn = tail;
2444 last = 0;
2445 while (CALL_P (insn)
2446 || JUMP_P (insn)
2447 || (NONJUMP_INSN_P (insn)
2448 && (GET_CODE (PATTERN (insn)) == USE
2449 || GET_CODE (PATTERN (insn)) == CLOBBER
2450 || can_throw_internal (insn)
2451 #ifdef HAVE_cc0
2452 || sets_cc0_p (PATTERN (insn))
2453 #endif
2454 || (!reload_completed
2455 && sets_likely_spilled (PATTERN (insn)))))
2456 || NOTE_P (insn))
2458 if (!NOTE_P (insn))
2460 if (last != 0
2461 && sd_find_dep_between (insn, last, false) == NULL)
2463 if (! sched_insns_conditions_mutex_p (last, insn))
2464 add_dependence (last, insn, REG_DEP_ANTI);
2465 SET_BIT (insn_referenced, INSN_LUID (insn));
2468 CANT_MOVE (insn) = 1;
2470 last = insn;
2473 /* Don't overrun the bounds of the basic block. */
2474 if (insn == head)
2475 break;
2478 insn = PREV_INSN (insn);
2479 while (insn != head && DEBUG_INSN_P (insn));
2482 /* Make sure these insns are scheduled last in their block. */
2483 insn = last;
2484 if (insn != 0)
2485 while (insn != head)
2487 insn = prev_nonnote_insn (insn);
2489 if (TEST_BIT (insn_referenced, INSN_LUID (insn))
2490 || DEBUG_INSN_P (insn))
2491 continue;
2493 if (! sched_insns_conditions_mutex_p (last, insn))
2494 add_dependence (last, insn, REG_DEP_ANTI);
2497 if (!targetm.have_conditional_execution ())
2498 return;
2500 /* Finally, if the block ends in a jump, and we are doing intra-block
2501 scheduling, make sure that the branch depends on any COND_EXEC insns
2502 inside the block to avoid moving the COND_EXECs past the branch insn.
2504 We only have to do this after reload, because (1) before reload there
2505 are no COND_EXEC insns, and (2) the region scheduler is an intra-block
2506 scheduler after reload.
2508 FIXME: We could in some cases move COND_EXEC insns past the branch if
2509 this scheduler would be a little smarter. Consider this code:
2511 T = [addr]
2512 C ? addr += 4
2513 !C ? X += 12
2514 C ? T += 1
2515 C ? jump foo
2517 On a target with a one cycle stall on a memory access the optimal
2518 sequence would be:
2520 T = [addr]
2521 C ? addr += 4
2522 C ? T += 1
2523 C ? jump foo
2524 !C ? X += 12
2526 We don't want to put the 'X += 12' before the branch because it just
2527 wastes a cycle of execution time when the branch is taken.
2529 Note that in the example "!C" will always be true. That is another
2530 possible improvement for handling COND_EXECs in this scheduler: it
2531 could remove always-true predicates. */
2533 if (!reload_completed || ! JUMP_P (tail))
2534 return;
2536 insn = tail;
2537 while (insn != head)
2539 insn = PREV_INSN (insn);
2541 /* Note that we want to add this dependency even when
2542 sched_insns_conditions_mutex_p returns true. The whole point
2543 is that we _want_ this dependency, even if these insns really
2544 are independent. */
2545 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == COND_EXEC)
2546 add_dependence (tail, insn, REG_DEP_ANTI);
2550 /* Data structures for the computation of data dependences in a regions. We
2551 keep one `deps' structure for every basic block. Before analyzing the
2552 data dependences for a bb, its variables are initialized as a function of
2553 the variables of its predecessors. When the analysis for a bb completes,
2554 we save the contents to the corresponding bb_deps[bb] variable. */
2556 static struct deps_desc *bb_deps;
2558 /* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */
2560 static rtx
2561 concat_INSN_LIST (rtx copy, rtx old)
2563 rtx new_rtx = old;
2564 for (; copy ; copy = XEXP (copy, 1))
2566 new_rtx = alloc_INSN_LIST (XEXP (copy, 0), new_rtx);
2567 PUT_REG_NOTE_KIND (new_rtx, REG_NOTE_KIND (copy));
2569 return new_rtx;
2572 static void
2573 concat_insn_mem_list (rtx copy_insns, rtx copy_mems, rtx *old_insns_p,
2574 rtx *old_mems_p)
2576 rtx new_insns = *old_insns_p;
2577 rtx new_mems = *old_mems_p;
2579 while (copy_insns)
2581 new_insns = alloc_INSN_LIST (XEXP (copy_insns, 0), new_insns);
2582 new_mems = alloc_EXPR_LIST (VOIDmode, XEXP (copy_mems, 0), new_mems);
2583 copy_insns = XEXP (copy_insns, 1);
2584 copy_mems = XEXP (copy_mems, 1);
2587 *old_insns_p = new_insns;
2588 *old_mems_p = new_mems;
2591 /* Join PRED_DEPS to the SUCC_DEPS. */
2592 void
2593 deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps)
2595 unsigned reg;
2596 reg_set_iterator rsi;
2598 /* The reg_last lists are inherited by successor. */
2599 EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg, rsi)
2601 struct deps_reg *pred_rl = &pred_deps->reg_last[reg];
2602 struct deps_reg *succ_rl = &succ_deps->reg_last[reg];
2604 succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses);
2605 succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets);
2606 succ_rl->implicit_sets
2607 = concat_INSN_LIST (pred_rl->implicit_sets, succ_rl->implicit_sets);
2608 succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers,
2609 succ_rl->clobbers);
2610 succ_rl->uses_length += pred_rl->uses_length;
2611 succ_rl->clobbers_length += pred_rl->clobbers_length;
2613 IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use);
2615 /* Mem read/write lists are inherited by successor. */
2616 concat_insn_mem_list (pred_deps->pending_read_insns,
2617 pred_deps->pending_read_mems,
2618 &succ_deps->pending_read_insns,
2619 &succ_deps->pending_read_mems);
2620 concat_insn_mem_list (pred_deps->pending_write_insns,
2621 pred_deps->pending_write_mems,
2622 &succ_deps->pending_write_insns,
2623 &succ_deps->pending_write_mems);
2625 succ_deps->last_pending_memory_flush
2626 = concat_INSN_LIST (pred_deps->last_pending_memory_flush,
2627 succ_deps->last_pending_memory_flush);
2629 succ_deps->pending_read_list_length += pred_deps->pending_read_list_length;
2630 succ_deps->pending_write_list_length += pred_deps->pending_write_list_length;
2631 succ_deps->pending_flush_length += pred_deps->pending_flush_length;
2633 /* last_function_call is inherited by successor. */
2634 succ_deps->last_function_call
2635 = concat_INSN_LIST (pred_deps->last_function_call,
2636 succ_deps->last_function_call);
2638 /* last_function_call_may_noreturn is inherited by successor. */
2639 succ_deps->last_function_call_may_noreturn
2640 = concat_INSN_LIST (pred_deps->last_function_call_may_noreturn,
2641 succ_deps->last_function_call_may_noreturn);
2643 /* sched_before_next_call is inherited by successor. */
2644 succ_deps->sched_before_next_call
2645 = concat_INSN_LIST (pred_deps->sched_before_next_call,
2646 succ_deps->sched_before_next_call);
2649 /* After computing the dependencies for block BB, propagate the dependencies
2650 found in TMP_DEPS to the successors of the block. */
2651 static void
2652 propagate_deps (int bb, struct deps_desc *pred_deps)
2654 basic_block block = BASIC_BLOCK (BB_TO_BLOCK (bb));
2655 edge_iterator ei;
2656 edge e;
2658 /* bb's structures are inherited by its successors. */
2659 FOR_EACH_EDGE (e, ei, block->succs)
2661 /* Only bbs "below" bb, in the same region, are interesting. */
2662 if (e->dest == EXIT_BLOCK_PTR
2663 || CONTAINING_RGN (block->index) != CONTAINING_RGN (e->dest->index)
2664 || BLOCK_TO_BB (e->dest->index) <= bb)
2665 continue;
2667 deps_join (bb_deps + BLOCK_TO_BB (e->dest->index), pred_deps);
2670 /* These lists should point to the right place, for correct
2671 freeing later. */
2672 bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns;
2673 bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems;
2674 bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns;
2675 bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems;
2677 /* Can't allow these to be freed twice. */
2678 pred_deps->pending_read_insns = 0;
2679 pred_deps->pending_read_mems = 0;
2680 pred_deps->pending_write_insns = 0;
2681 pred_deps->pending_write_mems = 0;
2684 /* Compute dependences inside bb. In a multiple blocks region:
2685 (1) a bb is analyzed after its predecessors, and (2) the lists in
2686 effect at the end of bb (after analyzing for bb) are inherited by
2687 bb's successors.
2689 Specifically for reg-reg data dependences, the block insns are
2690 scanned by sched_analyze () top-to-bottom. Three lists are
2691 maintained by sched_analyze (): reg_last[].sets for register DEFs,
2692 reg_last[].implicit_sets for implicit hard register DEFs, and
2693 reg_last[].uses for register USEs.
2695 When analysis is completed for bb, we update for its successors:
2696 ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
2697 ; - IMPLICIT_DEFS[succ] = Union (IMPLICIT_DEFS [succ], IMPLICIT_DEFS [bb])
2698 ; - USES[succ] = Union (USES [succ], DEFS [bb])
2700 The mechanism for computing mem-mem data dependence is very
2701 similar, and the result is interblock dependences in the region. */
2703 static void
2704 compute_block_dependences (int bb)
2706 rtx head, tail;
2707 struct deps_desc tmp_deps;
2709 tmp_deps = bb_deps[bb];
2711 /* Do the analysis for this block. */
2712 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2713 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2715 sched_analyze (&tmp_deps, head, tail);
2717 /* Selective scheduling handles control dependencies by itself. */
2718 if (!sel_sched_p ())
2719 add_branch_dependences (head, tail);
2721 if (current_nr_blocks > 1)
2722 propagate_deps (bb, &tmp_deps);
2724 /* Free up the INSN_LISTs. */
2725 free_deps (&tmp_deps);
2727 if (targetm.sched.dependencies_evaluation_hook)
2728 targetm.sched.dependencies_evaluation_hook (head, tail);
2731 /* Free dependencies of instructions inside BB. */
2732 static void
2733 free_block_dependencies (int bb)
2735 rtx head;
2736 rtx tail;
2738 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2740 if (no_real_insns_p (head, tail))
2741 return;
2743 sched_free_deps (head, tail, true);
2746 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
2747 them to the unused_*_list variables, so that they can be reused. */
2749 static void
2750 free_pending_lists (void)
2752 int bb;
2754 for (bb = 0; bb < current_nr_blocks; bb++)
2756 free_INSN_LIST_list (&bb_deps[bb].pending_read_insns);
2757 free_INSN_LIST_list (&bb_deps[bb].pending_write_insns);
2758 free_EXPR_LIST_list (&bb_deps[bb].pending_read_mems);
2759 free_EXPR_LIST_list (&bb_deps[bb].pending_write_mems);
2763 /* Print dependences for debugging starting from FROM_BB.
2764 Callable from debugger. */
2765 /* Print dependences for debugging starting from FROM_BB.
2766 Callable from debugger. */
2767 DEBUG_FUNCTION void
2768 debug_rgn_dependencies (int from_bb)
2770 int bb;
2772 fprintf (sched_dump,
2773 ";; --------------- forward dependences: ------------ \n");
2775 for (bb = from_bb; bb < current_nr_blocks; bb++)
2777 rtx head, tail;
2779 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2780 fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
2781 BB_TO_BLOCK (bb), bb);
2783 debug_dependencies (head, tail);
2787 /* Print dependencies information for instructions between HEAD and TAIL.
2788 ??? This function would probably fit best in haifa-sched.c. */
2789 void debug_dependencies (rtx head, rtx tail)
2791 rtx insn;
2792 rtx next_tail = NEXT_INSN (tail);
2794 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2795 "insn", "code", "bb", "dep", "prio", "cost",
2796 "reservation");
2797 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2798 "----", "----", "--", "---", "----", "----",
2799 "-----------");
2801 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2803 if (! INSN_P (insn))
2805 int n;
2806 fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
2807 if (NOTE_P (insn))
2809 n = NOTE_KIND (insn);
2810 fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
2812 else
2813 fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
2814 continue;
2817 fprintf (sched_dump,
2818 ";; %s%5d%6d%6d%6d%6d%6d ",
2819 (SCHED_GROUP_P (insn) ? "+" : " "),
2820 INSN_UID (insn),
2821 INSN_CODE (insn),
2822 BLOCK_NUM (insn),
2823 sched_emulate_haifa_p ? -1 : sd_lists_size (insn, SD_LIST_BACK),
2824 (sel_sched_p () ? (sched_emulate_haifa_p ? -1
2825 : INSN_PRIORITY (insn))
2826 : INSN_PRIORITY (insn)),
2827 (sel_sched_p () ? (sched_emulate_haifa_p ? -1
2828 : insn_cost (insn))
2829 : insn_cost (insn)));
2831 if (recog_memoized (insn) < 0)
2832 fprintf (sched_dump, "nothing");
2833 else
2834 print_reservation (sched_dump, insn);
2836 fprintf (sched_dump, "\t: ");
2838 sd_iterator_def sd_it;
2839 dep_t dep;
2841 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
2842 fprintf (sched_dump, "%d ", INSN_UID (DEP_CON (dep)));
2844 fprintf (sched_dump, "\n");
2847 fprintf (sched_dump, "\n");
2850 /* Returns true if all the basic blocks of the current region have
2851 NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region. */
2852 bool
2853 sched_is_disabled_for_current_region_p (void)
2855 int bb;
2857 for (bb = 0; bb < current_nr_blocks; bb++)
2858 if (!(BASIC_BLOCK (BB_TO_BLOCK (bb))->flags & BB_DISABLE_SCHEDULE))
2859 return false;
2861 return true;
2864 /* Free all region dependencies saved in INSN_BACK_DEPS and
2865 INSN_RESOLVED_BACK_DEPS. The Haifa scheduler does this on the fly
2866 when scheduling, so this function is supposed to be called from
2867 the selective scheduling only. */
2868 void
2869 free_rgn_deps (void)
2871 int bb;
2873 for (bb = 0; bb < current_nr_blocks; bb++)
2875 rtx head, tail;
2877 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2878 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2880 sched_free_deps (head, tail, false);
2884 static int rgn_n_insns;
2886 /* Compute insn priority for a current region. */
2887 void
2888 compute_priorities (void)
2890 int bb;
2892 current_sched_info->sched_max_insns_priority = 0;
2893 for (bb = 0; bb < current_nr_blocks; bb++)
2895 rtx head, tail;
2897 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2898 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2900 if (no_real_insns_p (head, tail))
2901 continue;
2903 rgn_n_insns += set_priorities (head, tail);
2905 current_sched_info->sched_max_insns_priority++;
2908 /* Schedule a region. A region is either an inner loop, a loop-free
2909 subroutine, or a single basic block. Each bb in the region is
2910 scheduled after its flow predecessors. */
2912 static void
2913 schedule_region (int rgn)
2915 int bb;
2916 int sched_rgn_n_insns = 0;
2918 rgn_n_insns = 0;
2920 rgn_setup_region (rgn);
2922 /* Don't schedule region that is marked by
2923 NOTE_DISABLE_SCHED_OF_BLOCK. */
2924 if (sched_is_disabled_for_current_region_p ())
2925 return;
2927 sched_rgn_compute_dependencies (rgn);
2929 sched_rgn_local_init (rgn);
2931 /* Set priorities. */
2932 compute_priorities ();
2934 sched_extend_ready_list (rgn_n_insns);
2936 if (sched_pressure_p)
2938 sched_init_region_reg_pressure_info ();
2939 for (bb = 0; bb < current_nr_blocks; bb++)
2941 basic_block first_bb, last_bb;
2942 rtx head, tail;
2944 first_bb = EBB_FIRST_BB (bb);
2945 last_bb = EBB_LAST_BB (bb);
2947 get_ebb_head_tail (first_bb, last_bb, &head, &tail);
2949 if (no_real_insns_p (head, tail))
2951 gcc_assert (first_bb == last_bb);
2952 continue;
2954 sched_setup_bb_reg_pressure_info (first_bb, PREV_INSN (head));
2958 /* Now we can schedule all blocks. */
2959 for (bb = 0; bb < current_nr_blocks; bb++)
2961 basic_block first_bb, last_bb, curr_bb;
2962 rtx head, tail;
2964 first_bb = EBB_FIRST_BB (bb);
2965 last_bb = EBB_LAST_BB (bb);
2967 get_ebb_head_tail (first_bb, last_bb, &head, &tail);
2969 if (no_real_insns_p (head, tail))
2971 gcc_assert (first_bb == last_bb);
2972 continue;
2975 current_sched_info->prev_head = PREV_INSN (head);
2976 current_sched_info->next_tail = NEXT_INSN (tail);
2978 remove_notes (head, tail);
2980 unlink_bb_notes (first_bb, last_bb);
2982 target_bb = bb;
2984 gcc_assert (flag_schedule_interblock || current_nr_blocks == 1);
2985 current_sched_info->queue_must_finish_empty = current_nr_blocks == 1;
2987 curr_bb = first_bb;
2988 if (dbg_cnt (sched_block))
2990 schedule_block (&curr_bb);
2991 gcc_assert (EBB_FIRST_BB (bb) == first_bb);
2992 sched_rgn_n_insns += sched_n_insns;
2994 else
2996 sched_rgn_n_insns += rgn_n_insns;
2999 /* Clean up. */
3000 if (current_nr_blocks > 1)
3001 free_trg_info ();
3004 /* Sanity check: verify that all region insns were scheduled. */
3005 gcc_assert (sched_rgn_n_insns == rgn_n_insns);
3007 sched_finish_ready_list ();
3009 /* Done with this region. */
3010 sched_rgn_local_finish ();
3012 /* Free dependencies. */
3013 for (bb = 0; bb < current_nr_blocks; ++bb)
3014 free_block_dependencies (bb);
3016 gcc_assert (haifa_recovery_bb_ever_added_p
3017 || deps_pools_are_empty_p ());
3020 /* Initialize data structures for region scheduling. */
3022 void
3023 sched_rgn_init (bool single_blocks_p)
3025 min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
3026 / 100);
3028 nr_inter = 0;
3029 nr_spec = 0;
3031 extend_regions ();
3033 CONTAINING_RGN (ENTRY_BLOCK) = -1;
3034 CONTAINING_RGN (EXIT_BLOCK) = -1;
3036 /* Compute regions for scheduling. */
3037 if (single_blocks_p
3038 || n_basic_blocks == NUM_FIXED_BLOCKS + 1
3039 || !flag_schedule_interblock
3040 || is_cfg_nonregular ())
3042 find_single_block_region (sel_sched_p ());
3044 else
3046 /* Compute the dominators and post dominators. */
3047 if (!sel_sched_p ())
3048 calculate_dominance_info (CDI_DOMINATORS);
3050 /* Find regions. */
3051 find_rgns ();
3053 if (sched_verbose >= 3)
3054 debug_regions ();
3056 /* For now. This will move as more and more of haifa is converted
3057 to using the cfg code. */
3058 if (!sel_sched_p ())
3059 free_dominance_info (CDI_DOMINATORS);
3062 gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks);
3064 RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1) +
3065 RGN_NR_BLOCKS (nr_regions - 1));
3068 /* Free data structures for region scheduling. */
3069 void
3070 sched_rgn_finish (void)
3072 /* Reposition the prologue and epilogue notes in case we moved the
3073 prologue/epilogue insns. */
3074 if (reload_completed)
3075 reposition_prologue_and_epilogue_notes ();
3077 if (sched_verbose)
3079 if (reload_completed == 0
3080 && flag_schedule_interblock)
3082 fprintf (sched_dump,
3083 "\n;; Procedure interblock/speculative motions == %d/%d \n",
3084 nr_inter, nr_spec);
3086 else
3087 gcc_assert (nr_inter <= 0);
3088 fprintf (sched_dump, "\n\n");
3091 nr_regions = 0;
3093 free (rgn_table);
3094 rgn_table = NULL;
3096 free (rgn_bb_table);
3097 rgn_bb_table = NULL;
3099 free (block_to_bb);
3100 block_to_bb = NULL;
3102 free (containing_rgn);
3103 containing_rgn = NULL;
3105 free (ebb_head);
3106 ebb_head = NULL;
3109 /* Setup global variables like CURRENT_BLOCKS and CURRENT_NR_BLOCK to
3110 point to the region RGN. */
3111 void
3112 rgn_setup_region (int rgn)
3114 int bb;
3116 /* Set variables for the current region. */
3117 current_nr_blocks = RGN_NR_BLOCKS (rgn);
3118 current_blocks = RGN_BLOCKS (rgn);
3120 /* EBB_HEAD is a region-scope structure. But we realloc it for
3121 each region to save time/memory/something else.
3122 See comments in add_block1, for what reasons we allocate +1 element. */
3123 ebb_head = XRESIZEVEC (int, ebb_head, current_nr_blocks + 1);
3124 for (bb = 0; bb <= current_nr_blocks; bb++)
3125 ebb_head[bb] = current_blocks + bb;
3128 /* Compute instruction dependencies in region RGN. */
3129 void
3130 sched_rgn_compute_dependencies (int rgn)
3132 if (!RGN_DONT_CALC_DEPS (rgn))
3134 int bb;
3136 if (sel_sched_p ())
3137 sched_emulate_haifa_p = 1;
3139 init_deps_global ();
3141 /* Initializations for region data dependence analysis. */
3142 bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks);
3143 for (bb = 0; bb < current_nr_blocks; bb++)
3144 init_deps (bb_deps + bb, false);
3146 /* Initialize bitmap used in add_branch_dependences. */
3147 insn_referenced = sbitmap_alloc (sched_max_luid);
3148 sbitmap_zero (insn_referenced);
3150 /* Compute backward dependencies. */
3151 for (bb = 0; bb < current_nr_blocks; bb++)
3152 compute_block_dependences (bb);
3154 sbitmap_free (insn_referenced);
3155 free_pending_lists ();
3156 finish_deps_global ();
3157 free (bb_deps);
3159 /* We don't want to recalculate this twice. */
3160 RGN_DONT_CALC_DEPS (rgn) = 1;
3162 if (sel_sched_p ())
3163 sched_emulate_haifa_p = 0;
3165 else
3166 /* (This is a recovery block. It is always a single block region.)
3167 OR (We use selective scheduling.) */
3168 gcc_assert (current_nr_blocks == 1 || sel_sched_p ());
3171 /* Init region data structures. Returns true if this region should
3172 not be scheduled. */
3173 void
3174 sched_rgn_local_init (int rgn)
3176 int bb;
3178 /* Compute interblock info: probabilities, split-edges, dominators, etc. */
3179 if (current_nr_blocks > 1)
3181 basic_block block;
3182 edge e;
3183 edge_iterator ei;
3185 prob = XNEWVEC (int, current_nr_blocks);
3187 dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
3188 sbitmap_vector_zero (dom, current_nr_blocks);
3190 /* Use ->aux to implement EDGE_TO_BIT mapping. */
3191 rgn_nr_edges = 0;
3192 FOR_EACH_BB (block)
3194 if (CONTAINING_RGN (block->index) != rgn)
3195 continue;
3196 FOR_EACH_EDGE (e, ei, block->succs)
3197 SET_EDGE_TO_BIT (e, rgn_nr_edges++);
3200 rgn_edges = XNEWVEC (edge, rgn_nr_edges);
3201 rgn_nr_edges = 0;
3202 FOR_EACH_BB (block)
3204 if (CONTAINING_RGN (block->index) != rgn)
3205 continue;
3206 FOR_EACH_EDGE (e, ei, block->succs)
3207 rgn_edges[rgn_nr_edges++] = e;
3210 /* Split edges. */
3211 pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
3212 sbitmap_vector_zero (pot_split, current_nr_blocks);
3213 ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
3214 sbitmap_vector_zero (ancestor_edges, current_nr_blocks);
3216 /* Compute probabilities, dominators, split_edges. */
3217 for (bb = 0; bb < current_nr_blocks; bb++)
3218 compute_dom_prob_ps (bb);
3220 /* Cleanup ->aux used for EDGE_TO_BIT mapping. */
3221 /* We don't need them anymore. But we want to avoid duplication of
3222 aux fields in the newly created edges. */
3223 FOR_EACH_BB (block)
3225 if (CONTAINING_RGN (block->index) != rgn)
3226 continue;
3227 FOR_EACH_EDGE (e, ei, block->succs)
3228 e->aux = NULL;
3233 /* Free data computed for the finished region. */
3234 void
3235 sched_rgn_local_free (void)
3237 free (prob);
3238 sbitmap_vector_free (dom);
3239 sbitmap_vector_free (pot_split);
3240 sbitmap_vector_free (ancestor_edges);
3241 free (rgn_edges);
3244 /* Free data computed for the finished region. */
3245 void
3246 sched_rgn_local_finish (void)
3248 if (current_nr_blocks > 1 && !sel_sched_p ())
3250 sched_rgn_local_free ();
3254 /* Setup scheduler infos. */
3255 void
3256 rgn_setup_common_sched_info (void)
3258 memcpy (&rgn_common_sched_info, &haifa_common_sched_info,
3259 sizeof (rgn_common_sched_info));
3261 rgn_common_sched_info.fix_recovery_cfg = rgn_fix_recovery_cfg;
3262 rgn_common_sched_info.add_block = rgn_add_block;
3263 rgn_common_sched_info.estimate_number_of_insns
3264 = rgn_estimate_number_of_insns;
3265 rgn_common_sched_info.sched_pass_id = SCHED_RGN_PASS;
3267 common_sched_info = &rgn_common_sched_info;
3270 /* Setup all *_sched_info structures (for the Haifa frontend
3271 and for the dependence analysis) in the interblock scheduler. */
3272 void
3273 rgn_setup_sched_infos (void)
3275 if (!sel_sched_p ())
3276 memcpy (&rgn_sched_deps_info, &rgn_const_sched_deps_info,
3277 sizeof (rgn_sched_deps_info));
3278 else
3279 memcpy (&rgn_sched_deps_info, &rgn_const_sel_sched_deps_info,
3280 sizeof (rgn_sched_deps_info));
3282 sched_deps_info = &rgn_sched_deps_info;
3284 memcpy (&rgn_sched_info, &rgn_const_sched_info, sizeof (rgn_sched_info));
3285 current_sched_info = &rgn_sched_info;
3288 /* The one entry point in this file. */
3289 void
3290 schedule_insns (void)
3292 int rgn;
3294 /* Taking care of this degenerate case makes the rest of
3295 this code simpler. */
3296 if (n_basic_blocks == NUM_FIXED_BLOCKS)
3297 return;
3299 rgn_setup_common_sched_info ();
3300 rgn_setup_sched_infos ();
3302 haifa_sched_init ();
3303 sched_rgn_init (reload_completed);
3305 bitmap_initialize (&not_in_df, 0);
3306 bitmap_clear (&not_in_df);
3308 /* Schedule every region in the subroutine. */
3309 for (rgn = 0; rgn < nr_regions; rgn++)
3310 if (dbg_cnt (sched_region))
3311 schedule_region (rgn);
3313 /* Clean up. */
3314 sched_rgn_finish ();
3315 bitmap_clear (&not_in_df);
3317 haifa_sched_finish ();
3320 /* INSN has been added to/removed from current region. */
3321 static void
3322 rgn_add_remove_insn (rtx insn, int remove_p)
3324 if (!remove_p)
3325 rgn_n_insns++;
3326 else
3327 rgn_n_insns--;
3329 if (INSN_BB (insn) == target_bb)
3331 if (!remove_p)
3332 target_n_insns++;
3333 else
3334 target_n_insns--;
3338 /* Extend internal data structures. */
3339 void
3340 extend_regions (void)
3342 rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks);
3343 rgn_bb_table = XRESIZEVEC (int, rgn_bb_table, n_basic_blocks);
3344 block_to_bb = XRESIZEVEC (int, block_to_bb, last_basic_block);
3345 containing_rgn = XRESIZEVEC (int, containing_rgn, last_basic_block);
3348 void
3349 rgn_make_new_region_out_of_new_block (basic_block bb)
3351 int i;
3353 i = RGN_BLOCKS (nr_regions);
3354 /* I - first free position in rgn_bb_table. */
3356 rgn_bb_table[i] = bb->index;
3357 RGN_NR_BLOCKS (nr_regions) = 1;
3358 RGN_HAS_REAL_EBB (nr_regions) = 0;
3359 RGN_DONT_CALC_DEPS (nr_regions) = 0;
3360 CONTAINING_RGN (bb->index) = nr_regions;
3361 BLOCK_TO_BB (bb->index) = 0;
3363 nr_regions++;
3365 RGN_BLOCKS (nr_regions) = i + 1;
3368 /* BB was added to ebb after AFTER. */
3369 static void
3370 rgn_add_block (basic_block bb, basic_block after)
3372 extend_regions ();
3373 bitmap_set_bit (&not_in_df, bb->index);
3375 if (after == 0 || after == EXIT_BLOCK_PTR)
3377 rgn_make_new_region_out_of_new_block (bb);
3378 RGN_DONT_CALC_DEPS (nr_regions - 1) = (after == EXIT_BLOCK_PTR);
3380 else
3382 int i, pos;
3384 /* We need to fix rgn_table, block_to_bb, containing_rgn
3385 and ebb_head. */
3387 BLOCK_TO_BB (bb->index) = BLOCK_TO_BB (after->index);
3389 /* We extend ebb_head to one more position to
3390 easily find the last position of the last ebb in
3391 the current region. Thus, ebb_head[BLOCK_TO_BB (after) + 1]
3392 is _always_ valid for access. */
3394 i = BLOCK_TO_BB (after->index) + 1;
3395 pos = ebb_head[i] - 1;
3396 /* Now POS is the index of the last block in the region. */
3398 /* Find index of basic block AFTER. */
3399 for (; rgn_bb_table[pos] != after->index; pos--);
3401 pos++;
3402 gcc_assert (pos > ebb_head[i - 1]);
3404 /* i - ebb right after "AFTER". */
3405 /* ebb_head[i] - VALID. */
3407 /* Source position: ebb_head[i]
3408 Destination position: ebb_head[i] + 1
3409 Last position:
3410 RGN_BLOCKS (nr_regions) - 1
3411 Number of elements to copy: (last_position) - (source_position) + 1
3414 memmove (rgn_bb_table + pos + 1,
3415 rgn_bb_table + pos,
3416 ((RGN_BLOCKS (nr_regions) - 1) - (pos) + 1)
3417 * sizeof (*rgn_bb_table));
3419 rgn_bb_table[pos] = bb->index;
3421 for (; i <= current_nr_blocks; i++)
3422 ebb_head [i]++;
3424 i = CONTAINING_RGN (after->index);
3425 CONTAINING_RGN (bb->index) = i;
3427 RGN_HAS_REAL_EBB (i) = 1;
3429 for (++i; i <= nr_regions; i++)
3430 RGN_BLOCKS (i)++;
3434 /* Fix internal data after interblock movement of jump instruction.
3435 For parameter meaning please refer to
3436 sched-int.h: struct sched_info: fix_recovery_cfg. */
3437 static void
3438 rgn_fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti)
3440 int old_pos, new_pos, i;
3442 BLOCK_TO_BB (check_bb_nexti) = BLOCK_TO_BB (bbi);
3444 for (old_pos = ebb_head[BLOCK_TO_BB (check_bbi) + 1] - 1;
3445 rgn_bb_table[old_pos] != check_bb_nexti;
3446 old_pos--);
3447 gcc_assert (old_pos > ebb_head[BLOCK_TO_BB (check_bbi)]);
3449 for (new_pos = ebb_head[BLOCK_TO_BB (bbi) + 1] - 1;
3450 rgn_bb_table[new_pos] != bbi;
3451 new_pos--);
3452 new_pos++;
3453 gcc_assert (new_pos > ebb_head[BLOCK_TO_BB (bbi)]);
3455 gcc_assert (new_pos < old_pos);
3457 memmove (rgn_bb_table + new_pos + 1,
3458 rgn_bb_table + new_pos,
3459 (old_pos - new_pos) * sizeof (*rgn_bb_table));
3461 rgn_bb_table[new_pos] = check_bb_nexti;
3463 for (i = BLOCK_TO_BB (bbi) + 1; i <= BLOCK_TO_BB (check_bbi); i++)
3464 ebb_head[i]++;
3467 /* Return next block in ebb chain. For parameter meaning please refer to
3468 sched-int.h: struct sched_info: advance_target_bb. */
3469 static basic_block
3470 advance_target_bb (basic_block bb, rtx insn)
3472 if (insn)
3473 return 0;
3475 gcc_assert (BLOCK_TO_BB (bb->index) == target_bb
3476 && BLOCK_TO_BB (bb->next_bb->index) == target_bb);
3477 return bb->next_bb;
3480 #endif
3482 static bool
3483 gate_handle_sched (void)
3485 #ifdef INSN_SCHEDULING
3486 return flag_schedule_insns && dbg_cnt (sched_func);
3487 #else
3488 return 0;
3489 #endif
3492 /* Run instruction scheduler. */
3493 static unsigned int
3494 rest_of_handle_sched (void)
3496 #ifdef INSN_SCHEDULING
3497 if (flag_selective_scheduling
3498 && ! maybe_skip_selective_scheduling ())
3499 run_selective_scheduling ();
3500 else
3501 schedule_insns ();
3502 #endif
3503 return 0;
3506 static bool
3507 gate_handle_sched2 (void)
3509 #ifdef INSN_SCHEDULING
3510 return optimize > 0 && flag_schedule_insns_after_reload
3511 && dbg_cnt (sched2_func);
3512 #else
3513 return 0;
3514 #endif
3517 /* Run second scheduling pass after reload. */
3518 static unsigned int
3519 rest_of_handle_sched2 (void)
3521 #ifdef INSN_SCHEDULING
3522 if (flag_selective_scheduling2
3523 && ! maybe_skip_selective_scheduling ())
3524 run_selective_scheduling ();
3525 else
3527 /* Do control and data sched analysis again,
3528 and write some more of the results to dump file. */
3529 if (flag_sched2_use_superblocks)
3530 schedule_ebbs ();
3531 else
3532 schedule_insns ();
3534 #endif
3535 return 0;
3538 struct rtl_opt_pass pass_sched =
3541 RTL_PASS,
3542 "sched1", /* name */
3543 gate_handle_sched, /* gate */
3544 rest_of_handle_sched, /* execute */
3545 NULL, /* sub */
3546 NULL, /* next */
3547 0, /* static_pass_number */
3548 TV_SCHED, /* tv_id */
3549 0, /* properties_required */
3550 0, /* properties_provided */
3551 0, /* properties_destroyed */
3552 0, /* todo_flags_start */
3553 TODO_df_finish | TODO_verify_rtl_sharing |
3554 TODO_dump_func |
3555 TODO_verify_flow |
3556 TODO_ggc_collect /* todo_flags_finish */
3560 struct rtl_opt_pass pass_sched2 =
3563 RTL_PASS,
3564 "sched2", /* name */
3565 gate_handle_sched2, /* gate */
3566 rest_of_handle_sched2, /* execute */
3567 NULL, /* sub */
3568 NULL, /* next */
3569 0, /* static_pass_number */
3570 TV_SCHED2, /* tv_id */
3571 0, /* properties_required */
3572 0, /* properties_provided */
3573 0, /* properties_destroyed */
3574 0, /* todo_flags_start */
3575 TODO_df_finish | TODO_verify_rtl_sharing |
3576 TODO_dump_func |
3577 TODO_verify_flow |
3578 TODO_ggc_collect /* todo_flags_finish */