PR target/58115
[official-gcc.git] / gcc / sched-rgn.c
blob406dc1facd663fcbaeaff6591b2b711a72355009
1 /* Instruction scheduling pass.
2 Copyright (C) 1992-2014 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* This pass implements list scheduling within basic blocks. It is
23 run twice: (1) after flow analysis, but before register allocation,
24 and (2) after register allocation.
26 The first run performs interblock scheduling, moving insns between
27 different blocks in the same "region", and the second runs only
28 basic block scheduling.
30 Interblock motions performed are useful motions and speculative
31 motions, including speculative loads. Motions requiring code
32 duplication are not supported. The identification of motion type
33 and the check for validity of speculative motions requires
34 construction and analysis of the function's control flow graph.
36 The main entry point for this pass is schedule_insns(), called for
37 each function. The work of the scheduler is organized in three
38 levels: (1) function level: insns are subject to splitting,
39 control-flow-graph is constructed, regions are computed (after
40 reload, each region is of one block), (2) region level: control
41 flow graph attributes required for interblock scheduling are
42 computed (dominators, reachability, etc.), data dependences and
43 priorities are computed, and (3) block level: insns in the block
44 are actually scheduled. */
46 #include "config.h"
47 #include "system.h"
48 #include "coretypes.h"
49 #include "tm.h"
50 #include "diagnostic-core.h"
51 #include "rtl.h"
52 #include "tm_p.h"
53 #include "hard-reg-set.h"
54 #include "regs.h"
55 #include "function.h"
56 #include "flags.h"
57 #include "insn-config.h"
58 #include "insn-attr.h"
59 #include "except.h"
60 #include "recog.h"
61 #include "params.h"
62 #include "sched-int.h"
63 #include "sel-sched.h"
64 #include "target.h"
65 #include "tree-pass.h"
66 #include "dbgcnt.h"
68 #ifdef INSN_SCHEDULING
70 /* Some accessor macros for h_i_d members only used within this file. */
71 #define FED_BY_SPEC_LOAD(INSN) (HID (INSN)->fed_by_spec_load)
72 #define IS_LOAD_INSN(INSN) (HID (insn)->is_load_insn)
74 /* nr_inter/spec counts interblock/speculative motion for the function. */
75 static int nr_inter, nr_spec;
77 static int is_cfg_nonregular (void);
79 /* Number of regions in the procedure. */
80 int nr_regions = 0;
82 /* Table of region descriptions. */
83 region *rgn_table = NULL;
85 /* Array of lists of regions' blocks. */
86 int *rgn_bb_table = NULL;
88 /* Topological order of blocks in the region (if b2 is reachable from
89 b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is
90 always referred to by either block or b, while its topological
91 order name (in the region) is referred to by bb. */
92 int *block_to_bb = NULL;
94 /* The number of the region containing a block. */
95 int *containing_rgn = NULL;
97 /* ebb_head [i] - is index in rgn_bb_table of the head basic block of i'th ebb.
98 Currently we can get a ebb only through splitting of currently
99 scheduling block, therefore, we don't need ebb_head array for every region,
100 hence, its sufficient to hold it for current one only. */
101 int *ebb_head = NULL;
103 /* The minimum probability of reaching a source block so that it will be
104 considered for speculative scheduling. */
105 static int min_spec_prob;
107 static void find_single_block_region (bool);
108 static void find_rgns (void);
109 static bool too_large (int, int *, int *);
111 /* Blocks of the current region being scheduled. */
112 int current_nr_blocks;
113 int current_blocks;
115 /* A speculative motion requires checking live information on the path
116 from 'source' to 'target'. The split blocks are those to be checked.
117 After a speculative motion, live information should be modified in
118 the 'update' blocks.
120 Lists of split and update blocks for each candidate of the current
121 target are in array bblst_table. */
122 static basic_block *bblst_table;
123 static int bblst_size, bblst_last;
125 /* Arrays that hold the DFA state at the end of a basic block, to re-use
126 as the initial state at the start of successor blocks. The BB_STATE
127 array holds the actual DFA state, and BB_STATE_ARRAY[I] is a pointer
128 into BB_STATE for basic block I. FIXME: This should be a vec. */
129 static char *bb_state_array = NULL;
130 static state_t *bb_state = NULL;
132 /* Target info declarations.
134 The block currently being scheduled is referred to as the "target" block,
135 while other blocks in the region from which insns can be moved to the
136 target are called "source" blocks. The candidate structure holds info
137 about such sources: are they valid? Speculative? Etc. */
138 typedef struct
140 basic_block *first_member;
141 int nr_members;
143 bblst;
145 typedef struct
147 char is_valid;
148 char is_speculative;
149 int src_prob;
150 bblst split_bbs;
151 bblst update_bbs;
153 candidate;
155 static candidate *candidate_table;
156 #define IS_VALID(src) (candidate_table[src].is_valid)
157 #define IS_SPECULATIVE(src) (candidate_table[src].is_speculative)
158 #define IS_SPECULATIVE_INSN(INSN) \
159 (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
160 #define SRC_PROB(src) ( candidate_table[src].src_prob )
162 /* The bb being currently scheduled. */
163 int target_bb;
165 /* List of edges. */
166 typedef struct
168 edge *first_member;
169 int nr_members;
171 edgelst;
173 static edge *edgelst_table;
174 static int edgelst_last;
176 static void extract_edgelst (sbitmap, edgelst *);
178 /* Target info functions. */
179 static void split_edges (int, int, edgelst *);
180 static void compute_trg_info (int);
181 void debug_candidate (int);
182 void debug_candidates (int);
184 /* Dominators array: dom[i] contains the sbitmap of dominators of
185 bb i in the region. */
186 static sbitmap *dom;
188 /* bb 0 is the only region entry. */
189 #define IS_RGN_ENTRY(bb) (!bb)
191 /* Is bb_src dominated by bb_trg. */
192 #define IS_DOMINATED(bb_src, bb_trg) \
193 ( bitmap_bit_p (dom[bb_src], bb_trg) )
195 /* Probability: Prob[i] is an int in [0, REG_BR_PROB_BASE] which is
196 the probability of bb i relative to the region entry. */
197 static int *prob;
199 /* Bit-set of edges, where bit i stands for edge i. */
200 typedef sbitmap edgeset;
202 /* Number of edges in the region. */
203 static int rgn_nr_edges;
205 /* Array of size rgn_nr_edges. */
206 static edge *rgn_edges;
208 /* Mapping from each edge in the graph to its number in the rgn. */
209 #define EDGE_TO_BIT(edge) ((int)(size_t)(edge)->aux)
210 #define SET_EDGE_TO_BIT(edge,nr) ((edge)->aux = (void *)(size_t)(nr))
212 /* The split edges of a source bb is different for each target
213 bb. In order to compute this efficiently, the 'potential-split edges'
214 are computed for each bb prior to scheduling a region. This is actually
215 the split edges of each bb relative to the region entry.
217 pot_split[bb] is the set of potential split edges of bb. */
218 static edgeset *pot_split;
220 /* For every bb, a set of its ancestor edges. */
221 static edgeset *ancestor_edges;
223 #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
225 /* Speculative scheduling functions. */
226 static int check_live_1 (int, rtx);
227 static void update_live_1 (int, rtx);
228 static int is_pfree (rtx, int, int);
229 static int find_conditional_protection (rtx, int);
230 static int is_conditionally_protected (rtx, int, int);
231 static int is_prisky (rtx, int, int);
232 static int is_exception_free (rtx, int, int);
234 static bool sets_likely_spilled (rtx);
235 static void sets_likely_spilled_1 (rtx, const_rtx, void *);
236 static void add_branch_dependences (rtx, rtx);
237 static void compute_block_dependences (int);
239 static void schedule_region (int);
240 static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *);
241 static void propagate_deps (int, struct deps_desc *);
242 static void free_pending_lists (void);
244 /* Functions for construction of the control flow graph. */
246 /* Return 1 if control flow graph should not be constructed, 0 otherwise.
248 We decide not to build the control flow graph if there is possibly more
249 than one entry to the function, if computed branches exist, if we
250 have nonlocal gotos, or if we have an unreachable loop. */
252 static int
253 is_cfg_nonregular (void)
255 basic_block b;
256 rtx insn;
258 /* If we have a label that could be the target of a nonlocal goto, then
259 the cfg is not well structured. */
260 if (nonlocal_goto_handler_labels)
261 return 1;
263 /* If we have any forced labels, then the cfg is not well structured. */
264 if (forced_labels)
265 return 1;
267 /* If we have exception handlers, then we consider the cfg not well
268 structured. ?!? We should be able to handle this now that we
269 compute an accurate cfg for EH. */
270 if (current_function_has_exception_handlers ())
271 return 1;
273 /* If we have insns which refer to labels as non-jumped-to operands,
274 then we consider the cfg not well structured. */
275 FOR_EACH_BB_FN (b, cfun)
276 FOR_BB_INSNS (b, insn)
278 rtx note, next, set, dest;
280 /* If this function has a computed jump, then we consider the cfg
281 not well structured. */
282 if (JUMP_P (insn) && computed_jump_p (insn))
283 return 1;
285 if (!INSN_P (insn))
286 continue;
288 note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX);
289 if (note == NULL_RTX)
290 continue;
292 /* For that label not to be seen as a referred-to label, this
293 must be a single-set which is feeding a jump *only*. This
294 could be a conditional jump with the label split off for
295 machine-specific reasons or a casesi/tablejump. */
296 next = next_nonnote_insn (insn);
297 if (next == NULL_RTX
298 || !JUMP_P (next)
299 || (JUMP_LABEL (next) != XEXP (note, 0)
300 && find_reg_note (next, REG_LABEL_TARGET,
301 XEXP (note, 0)) == NULL_RTX)
302 || BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (next))
303 return 1;
305 set = single_set (insn);
306 if (set == NULL_RTX)
307 return 1;
309 dest = SET_DEST (set);
310 if (!REG_P (dest) || !dead_or_set_p (next, dest))
311 return 1;
314 /* Unreachable loops with more than one basic block are detected
315 during the DFS traversal in find_rgns.
317 Unreachable loops with a single block are detected here. This
318 test is redundant with the one in find_rgns, but it's much
319 cheaper to go ahead and catch the trivial case here. */
320 FOR_EACH_BB_FN (b, cfun)
322 if (EDGE_COUNT (b->preds) == 0
323 || (single_pred_p (b)
324 && single_pred (b) == b))
325 return 1;
328 /* All the tests passed. Consider the cfg well structured. */
329 return 0;
332 /* Extract list of edges from a bitmap containing EDGE_TO_BIT bits. */
334 static void
335 extract_edgelst (sbitmap set, edgelst *el)
337 unsigned int i = 0;
338 sbitmap_iterator sbi;
340 /* edgelst table space is reused in each call to extract_edgelst. */
341 edgelst_last = 0;
343 el->first_member = &edgelst_table[edgelst_last];
344 el->nr_members = 0;
346 /* Iterate over each word in the bitset. */
347 EXECUTE_IF_SET_IN_BITMAP (set, 0, i, sbi)
349 edgelst_table[edgelst_last++] = rgn_edges[i];
350 el->nr_members++;
354 /* Functions for the construction of regions. */
356 /* Print the regions, for debugging purposes. Callable from debugger. */
358 DEBUG_FUNCTION void
359 debug_regions (void)
361 int rgn, bb;
363 fprintf (sched_dump, "\n;; ------------ REGIONS ----------\n\n");
364 for (rgn = 0; rgn < nr_regions; rgn++)
366 fprintf (sched_dump, ";;\trgn %d nr_blocks %d:\n", rgn,
367 rgn_table[rgn].rgn_nr_blocks);
368 fprintf (sched_dump, ";;\tbb/block: ");
370 /* We don't have ebb_head initialized yet, so we can't use
371 BB_TO_BLOCK (). */
372 current_blocks = RGN_BLOCKS (rgn);
374 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
375 fprintf (sched_dump, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
377 fprintf (sched_dump, "\n\n");
381 /* Print the region's basic blocks. */
383 DEBUG_FUNCTION void
384 debug_region (int rgn)
386 int bb;
388 fprintf (stderr, "\n;; ------------ REGION %d ----------\n\n", rgn);
389 fprintf (stderr, ";;\trgn %d nr_blocks %d:\n", rgn,
390 rgn_table[rgn].rgn_nr_blocks);
391 fprintf (stderr, ";;\tbb/block: ");
393 /* We don't have ebb_head initialized yet, so we can't use
394 BB_TO_BLOCK (). */
395 current_blocks = RGN_BLOCKS (rgn);
397 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
398 fprintf (stderr, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
400 fprintf (stderr, "\n\n");
402 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
404 dump_bb (stderr,
405 BASIC_BLOCK_FOR_FN (cfun, rgn_bb_table[current_blocks + bb]),
406 0, TDF_SLIM | TDF_BLOCKS);
407 fprintf (stderr, "\n");
410 fprintf (stderr, "\n");
414 /* True when a bb with index BB_INDEX contained in region RGN. */
415 static bool
416 bb_in_region_p (int bb_index, int rgn)
418 int i;
420 for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
421 if (rgn_bb_table[current_blocks + i] == bb_index)
422 return true;
424 return false;
427 /* Dump region RGN to file F using dot syntax. */
428 void
429 dump_region_dot (FILE *f, int rgn)
431 int i;
433 fprintf (f, "digraph Region_%d {\n", rgn);
435 /* We don't have ebb_head initialized yet, so we can't use
436 BB_TO_BLOCK (). */
437 current_blocks = RGN_BLOCKS (rgn);
439 for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
441 edge e;
442 edge_iterator ei;
443 int src_bb_num = rgn_bb_table[current_blocks + i];
444 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, src_bb_num);
446 FOR_EACH_EDGE (e, ei, bb->succs)
447 if (bb_in_region_p (e->dest->index, rgn))
448 fprintf (f, "\t%d -> %d\n", src_bb_num, e->dest->index);
450 fprintf (f, "}\n");
453 /* The same, but first open a file specified by FNAME. */
454 void
455 dump_region_dot_file (const char *fname, int rgn)
457 FILE *f = fopen (fname, "wt");
458 dump_region_dot (f, rgn);
459 fclose (f);
462 /* Build a single block region for each basic block in the function.
463 This allows for using the same code for interblock and basic block
464 scheduling. */
466 static void
467 find_single_block_region (bool ebbs_p)
469 basic_block bb, ebb_start;
470 int i = 0;
472 nr_regions = 0;
474 if (ebbs_p) {
475 int probability_cutoff;
476 if (profile_info && flag_branch_probabilities)
477 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
478 else
479 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
480 probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
482 FOR_EACH_BB_FN (ebb_start, cfun)
484 RGN_NR_BLOCKS (nr_regions) = 0;
485 RGN_BLOCKS (nr_regions) = i;
486 RGN_DONT_CALC_DEPS (nr_regions) = 0;
487 RGN_HAS_REAL_EBB (nr_regions) = 0;
489 for (bb = ebb_start; ; bb = bb->next_bb)
491 edge e;
493 rgn_bb_table[i] = bb->index;
494 RGN_NR_BLOCKS (nr_regions)++;
495 CONTAINING_RGN (bb->index) = nr_regions;
496 BLOCK_TO_BB (bb->index) = i - RGN_BLOCKS (nr_regions);
497 i++;
499 if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
500 || LABEL_P (BB_HEAD (bb->next_bb)))
501 break;
503 e = find_fallthru_edge (bb->succs);
504 if (! e)
505 break;
506 if (e->probability <= probability_cutoff)
507 break;
510 ebb_start = bb;
511 nr_regions++;
514 else
515 FOR_EACH_BB_FN (bb, cfun)
517 rgn_bb_table[nr_regions] = bb->index;
518 RGN_NR_BLOCKS (nr_regions) = 1;
519 RGN_BLOCKS (nr_regions) = nr_regions;
520 RGN_DONT_CALC_DEPS (nr_regions) = 0;
521 RGN_HAS_REAL_EBB (nr_regions) = 0;
523 CONTAINING_RGN (bb->index) = nr_regions;
524 BLOCK_TO_BB (bb->index) = 0;
525 nr_regions++;
529 /* Estimate number of the insns in the BB. */
530 static int
531 rgn_estimate_number_of_insns (basic_block bb)
533 int count;
535 count = INSN_LUID (BB_END (bb)) - INSN_LUID (BB_HEAD (bb));
537 if (MAY_HAVE_DEBUG_INSNS)
539 rtx insn;
541 FOR_BB_INSNS (bb, insn)
542 if (DEBUG_INSN_P (insn))
543 count--;
546 return count;
549 /* Update number of blocks and the estimate for number of insns
550 in the region. Return true if the region is "too large" for interblock
551 scheduling (compile time considerations). */
553 static bool
554 too_large (int block, int *num_bbs, int *num_insns)
556 (*num_bbs)++;
557 (*num_insns) += (common_sched_info->estimate_number_of_insns
558 (BASIC_BLOCK_FOR_FN (cfun, block)));
560 return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
561 || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
564 /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
565 is still an inner loop. Put in max_hdr[blk] the header of the most inner
566 loop containing blk. */
567 #define UPDATE_LOOP_RELATIONS(blk, hdr) \
569 if (max_hdr[blk] == -1) \
570 max_hdr[blk] = hdr; \
571 else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
572 bitmap_clear_bit (inner, hdr); \
573 else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
575 bitmap_clear_bit (inner,max_hdr[blk]); \
576 max_hdr[blk] = hdr; \
580 /* Find regions for interblock scheduling.
582 A region for scheduling can be:
584 * A loop-free procedure, or
586 * A reducible inner loop, or
588 * A basic block not contained in any other region.
590 ?!? In theory we could build other regions based on extended basic
591 blocks or reverse extended basic blocks. Is it worth the trouble?
593 Loop blocks that form a region are put into the region's block list
594 in topological order.
596 This procedure stores its results into the following global (ick) variables
598 * rgn_nr
599 * rgn_table
600 * rgn_bb_table
601 * block_to_bb
602 * containing region
604 We use dominator relationships to avoid making regions out of non-reducible
605 loops.
607 This procedure needs to be converted to work on pred/succ lists instead
608 of edge tables. That would simplify it somewhat. */
610 static void
611 haifa_find_rgns (void)
613 int *max_hdr, *dfs_nr, *degree;
614 char no_loops = 1;
615 int node, child, loop_head, i, head, tail;
616 int count = 0, sp, idx = 0;
617 edge_iterator current_edge;
618 edge_iterator *stack;
619 int num_bbs, num_insns, unreachable;
620 int too_large_failure;
621 basic_block bb;
623 /* Note if a block is a natural loop header. */
624 sbitmap header;
626 /* Note if a block is a natural inner loop header. */
627 sbitmap inner;
629 /* Note if a block is in the block queue. */
630 sbitmap in_queue;
632 /* Note if a block is in the block queue. */
633 sbitmap in_stack;
635 /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
636 and a mapping from block to its loop header (if the block is contained
637 in a loop, else -1).
639 Store results in HEADER, INNER, and MAX_HDR respectively, these will
640 be used as inputs to the second traversal.
642 STACK, SP and DFS_NR are only used during the first traversal. */
644 /* Allocate and initialize variables for the first traversal. */
645 max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
646 dfs_nr = XCNEWVEC (int, last_basic_block_for_fn (cfun));
647 stack = XNEWVEC (edge_iterator, n_edges_for_fn (cfun));
649 inner = sbitmap_alloc (last_basic_block_for_fn (cfun));
650 bitmap_ones (inner);
652 header = sbitmap_alloc (last_basic_block_for_fn (cfun));
653 bitmap_clear (header);
655 in_queue = sbitmap_alloc (last_basic_block_for_fn (cfun));
656 bitmap_clear (in_queue);
658 in_stack = sbitmap_alloc (last_basic_block_for_fn (cfun));
659 bitmap_clear (in_stack);
661 for (i = 0; i < last_basic_block_for_fn (cfun); i++)
662 max_hdr[i] = -1;
664 #define EDGE_PASSED(E) (ei_end_p ((E)) || ei_edge ((E))->aux)
665 #define SET_EDGE_PASSED(E) (ei_edge ((E))->aux = ei_edge ((E)))
667 /* DFS traversal to find inner loops in the cfg. */
669 current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->succs);
670 sp = -1;
672 while (1)
674 if (EDGE_PASSED (current_edge))
676 /* We have reached a leaf node or a node that was already
677 processed. Pop edges off the stack until we find
678 an edge that has not yet been processed. */
679 while (sp >= 0 && EDGE_PASSED (current_edge))
681 /* Pop entry off the stack. */
682 current_edge = stack[sp--];
683 node = ei_edge (current_edge)->src->index;
684 gcc_assert (node != ENTRY_BLOCK);
685 child = ei_edge (current_edge)->dest->index;
686 gcc_assert (child != EXIT_BLOCK);
687 bitmap_clear_bit (in_stack, child);
688 if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child]))
689 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
690 ei_next (&current_edge);
693 /* See if have finished the DFS tree traversal. */
694 if (sp < 0 && EDGE_PASSED (current_edge))
695 break;
697 /* Nope, continue the traversal with the popped node. */
698 continue;
701 /* Process a node. */
702 node = ei_edge (current_edge)->src->index;
703 gcc_assert (node != ENTRY_BLOCK);
704 bitmap_set_bit (in_stack, node);
705 dfs_nr[node] = ++count;
707 /* We don't traverse to the exit block. */
708 child = ei_edge (current_edge)->dest->index;
709 if (child == EXIT_BLOCK)
711 SET_EDGE_PASSED (current_edge);
712 ei_next (&current_edge);
713 continue;
716 /* If the successor is in the stack, then we've found a loop.
717 Mark the loop, if it is not a natural loop, then it will
718 be rejected during the second traversal. */
719 if (bitmap_bit_p (in_stack, child))
721 no_loops = 0;
722 bitmap_set_bit (header, child);
723 UPDATE_LOOP_RELATIONS (node, child);
724 SET_EDGE_PASSED (current_edge);
725 ei_next (&current_edge);
726 continue;
729 /* If the child was already visited, then there is no need to visit
730 it again. Just update the loop relationships and restart
731 with a new edge. */
732 if (dfs_nr[child])
734 if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child]))
735 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
736 SET_EDGE_PASSED (current_edge);
737 ei_next (&current_edge);
738 continue;
741 /* Push an entry on the stack and continue DFS traversal. */
742 stack[++sp] = current_edge;
743 SET_EDGE_PASSED (current_edge);
744 current_edge = ei_start (ei_edge (current_edge)->dest->succs);
747 /* Reset ->aux field used by EDGE_PASSED. */
748 FOR_ALL_BB_FN (bb, cfun)
750 edge_iterator ei;
751 edge e;
752 FOR_EACH_EDGE (e, ei, bb->succs)
753 e->aux = NULL;
757 /* Another check for unreachable blocks. The earlier test in
758 is_cfg_nonregular only finds unreachable blocks that do not
759 form a loop.
761 The DFS traversal will mark every block that is reachable from
762 the entry node by placing a nonzero value in dfs_nr. Thus if
763 dfs_nr is zero for any block, then it must be unreachable. */
764 unreachable = 0;
765 FOR_EACH_BB_FN (bb, cfun)
766 if (dfs_nr[bb->index] == 0)
768 unreachable = 1;
769 break;
772 /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array
773 to hold degree counts. */
774 degree = dfs_nr;
776 FOR_EACH_BB_FN (bb, cfun)
777 degree[bb->index] = EDGE_COUNT (bb->preds);
779 /* Do not perform region scheduling if there are any unreachable
780 blocks. */
781 if (!unreachable)
783 int *queue, *degree1 = NULL;
784 /* We use EXTENDED_RGN_HEADER as an addition to HEADER and put
785 there basic blocks, which are forced to be region heads.
786 This is done to try to assemble few smaller regions
787 from a too_large region. */
788 sbitmap extended_rgn_header = NULL;
789 bool extend_regions_p;
791 if (no_loops)
792 bitmap_set_bit (header, 0);
794 /* Second traversal:find reducible inner loops and topologically sort
795 block of each region. */
797 queue = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
799 extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
800 if (extend_regions_p)
802 degree1 = XNEWVEC (int, last_basic_block_for_fn (cfun));
803 extended_rgn_header =
804 sbitmap_alloc (last_basic_block_for_fn (cfun));
805 bitmap_clear (extended_rgn_header);
808 /* Find blocks which are inner loop headers. We still have non-reducible
809 loops to consider at this point. */
810 FOR_EACH_BB_FN (bb, cfun)
812 if (bitmap_bit_p (header, bb->index) && bitmap_bit_p (inner, bb->index))
814 edge e;
815 edge_iterator ei;
816 basic_block jbb;
818 /* Now check that the loop is reducible. We do this separate
819 from finding inner loops so that we do not find a reducible
820 loop which contains an inner non-reducible loop.
822 A simple way to find reducible/natural loops is to verify
823 that each block in the loop is dominated by the loop
824 header.
826 If there exists a block that is not dominated by the loop
827 header, then the block is reachable from outside the loop
828 and thus the loop is not a natural loop. */
829 FOR_EACH_BB_FN (jbb, cfun)
831 /* First identify blocks in the loop, except for the loop
832 entry block. */
833 if (bb->index == max_hdr[jbb->index] && bb != jbb)
835 /* Now verify that the block is dominated by the loop
836 header. */
837 if (!dominated_by_p (CDI_DOMINATORS, jbb, bb))
838 break;
842 /* If we exited the loop early, then I is the header of
843 a non-reducible loop and we should quit processing it
844 now. */
845 if (jbb != EXIT_BLOCK_PTR_FOR_FN (cfun))
846 continue;
848 /* I is a header of an inner loop, or block 0 in a subroutine
849 with no loops at all. */
850 head = tail = -1;
851 too_large_failure = 0;
852 loop_head = max_hdr[bb->index];
854 if (extend_regions_p)
855 /* We save degree in case when we meet a too_large region
856 and cancel it. We need a correct degree later when
857 calling extend_rgns. */
858 memcpy (degree1, degree,
859 last_basic_block_for_fn (cfun) * sizeof (int));
861 /* Decrease degree of all I's successors for topological
862 ordering. */
863 FOR_EACH_EDGE (e, ei, bb->succs)
864 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
865 --degree[e->dest->index];
867 /* Estimate # insns, and count # blocks in the region. */
868 num_bbs = 1;
869 num_insns = common_sched_info->estimate_number_of_insns (bb);
871 /* Find all loop latches (blocks with back edges to the loop
872 header) or all the leaf blocks in the cfg has no loops.
874 Place those blocks into the queue. */
875 if (no_loops)
877 FOR_EACH_BB_FN (jbb, cfun)
878 /* Leaf nodes have only a single successor which must
879 be EXIT_BLOCK. */
880 if (single_succ_p (jbb)
881 && single_succ (jbb) == EXIT_BLOCK_PTR_FOR_FN (cfun))
883 queue[++tail] = jbb->index;
884 bitmap_set_bit (in_queue, jbb->index);
886 if (too_large (jbb->index, &num_bbs, &num_insns))
888 too_large_failure = 1;
889 break;
893 else
895 edge e;
897 FOR_EACH_EDGE (e, ei, bb->preds)
899 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
900 continue;
902 node = e->src->index;
904 if (max_hdr[node] == loop_head && node != bb->index)
906 /* This is a loop latch. */
907 queue[++tail] = node;
908 bitmap_set_bit (in_queue, node);
910 if (too_large (node, &num_bbs, &num_insns))
912 too_large_failure = 1;
913 break;
919 /* Now add all the blocks in the loop to the queue.
921 We know the loop is a natural loop; however the algorithm
922 above will not always mark certain blocks as being in the
923 loop. Consider:
924 node children
925 a b,c
927 c a,d
930 The algorithm in the DFS traversal may not mark B & D as part
931 of the loop (i.e. they will not have max_hdr set to A).
933 We know they can not be loop latches (else they would have
934 had max_hdr set since they'd have a backedge to a dominator
935 block). So we don't need them on the initial queue.
937 We know they are part of the loop because they are dominated
938 by the loop header and can be reached by a backwards walk of
939 the edges starting with nodes on the initial queue.
941 It is safe and desirable to include those nodes in the
942 loop/scheduling region. To do so we would need to decrease
943 the degree of a node if it is the target of a backedge
944 within the loop itself as the node is placed in the queue.
946 We do not do this because I'm not sure that the actual
947 scheduling code will properly handle this case. ?!? */
949 while (head < tail && !too_large_failure)
951 edge e;
952 child = queue[++head];
954 FOR_EACH_EDGE (e, ei,
955 BASIC_BLOCK_FOR_FN (cfun, child)->preds)
957 node = e->src->index;
959 /* See discussion above about nodes not marked as in
960 this loop during the initial DFS traversal. */
961 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
962 || max_hdr[node] != loop_head)
964 tail = -1;
965 break;
967 else if (!bitmap_bit_p (in_queue, node) && node != bb->index)
969 queue[++tail] = node;
970 bitmap_set_bit (in_queue, node);
972 if (too_large (node, &num_bbs, &num_insns))
974 too_large_failure = 1;
975 break;
981 if (tail >= 0 && !too_large_failure)
983 /* Place the loop header into list of region blocks. */
984 degree[bb->index] = -1;
985 rgn_bb_table[idx] = bb->index;
986 RGN_NR_BLOCKS (nr_regions) = num_bbs;
987 RGN_BLOCKS (nr_regions) = idx++;
988 RGN_DONT_CALC_DEPS (nr_regions) = 0;
989 RGN_HAS_REAL_EBB (nr_regions) = 0;
990 CONTAINING_RGN (bb->index) = nr_regions;
991 BLOCK_TO_BB (bb->index) = count = 0;
993 /* Remove blocks from queue[] when their in degree
994 becomes zero. Repeat until no blocks are left on the
995 list. This produces a topological list of blocks in
996 the region. */
997 while (tail >= 0)
999 if (head < 0)
1000 head = tail;
1001 child = queue[head];
1002 if (degree[child] == 0)
1004 edge e;
1006 degree[child] = -1;
1007 rgn_bb_table[idx++] = child;
1008 BLOCK_TO_BB (child) = ++count;
1009 CONTAINING_RGN (child) = nr_regions;
1010 queue[head] = queue[tail--];
1012 FOR_EACH_EDGE (e, ei,
1013 BASIC_BLOCK_FOR_FN (cfun,
1014 child)->succs)
1015 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1016 --degree[e->dest->index];
1018 else
1019 --head;
1021 ++nr_regions;
1023 else if (extend_regions_p)
1025 /* Restore DEGREE. */
1026 int *t = degree;
1028 degree = degree1;
1029 degree1 = t;
1031 /* And force successors of BB to be region heads.
1032 This may provide several smaller regions instead
1033 of one too_large region. */
1034 FOR_EACH_EDGE (e, ei, bb->succs)
1035 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1036 bitmap_set_bit (extended_rgn_header, e->dest->index);
1040 free (queue);
1042 if (extend_regions_p)
1044 free (degree1);
1046 bitmap_ior (header, header, extended_rgn_header);
1047 sbitmap_free (extended_rgn_header);
1049 extend_rgns (degree, &idx, header, max_hdr);
1053 /* Any block that did not end up in a region is placed into a region
1054 by itself. */
1055 FOR_EACH_BB_FN (bb, cfun)
1056 if (degree[bb->index] >= 0)
1058 rgn_bb_table[idx] = bb->index;
1059 RGN_NR_BLOCKS (nr_regions) = 1;
1060 RGN_BLOCKS (nr_regions) = idx++;
1061 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1062 RGN_HAS_REAL_EBB (nr_regions) = 0;
1063 CONTAINING_RGN (bb->index) = nr_regions++;
1064 BLOCK_TO_BB (bb->index) = 0;
1067 free (max_hdr);
1068 free (degree);
1069 free (stack);
1070 sbitmap_free (header);
1071 sbitmap_free (inner);
1072 sbitmap_free (in_queue);
1073 sbitmap_free (in_stack);
1077 /* Wrapper function.
1078 If FLAG_SEL_SCHED_PIPELINING is set, then use custom function to form
1079 regions. Otherwise just call find_rgns_haifa. */
1080 static void
1081 find_rgns (void)
1083 if (sel_sched_p () && flag_sel_sched_pipelining)
1084 sel_find_rgns ();
1085 else
1086 haifa_find_rgns ();
1089 static int gather_region_statistics (int **);
1090 static void print_region_statistics (int *, int, int *, int);
1092 /* Calculate the histogram that shows the number of regions having the
1093 given number of basic blocks, and store it in the RSP array. Return
1094 the size of this array. */
1095 static int
1096 gather_region_statistics (int **rsp)
1098 int i, *a = 0, a_sz = 0;
1100 /* a[i] is the number of regions that have (i + 1) basic blocks. */
1101 for (i = 0; i < nr_regions; i++)
1103 int nr_blocks = RGN_NR_BLOCKS (i);
1105 gcc_assert (nr_blocks >= 1);
1107 if (nr_blocks > a_sz)
1109 a = XRESIZEVEC (int, a, nr_blocks);
1111 a[a_sz++] = 0;
1112 while (a_sz != nr_blocks);
1115 a[nr_blocks - 1]++;
1118 *rsp = a;
1119 return a_sz;
1122 /* Print regions statistics. S1 and S2 denote the data before and after
1123 calling extend_rgns, respectively. */
1124 static void
1125 print_region_statistics (int *s1, int s1_sz, int *s2, int s2_sz)
1127 int i;
1129 /* We iterate until s2_sz because extend_rgns does not decrease
1130 the maximal region size. */
1131 for (i = 1; i < s2_sz; i++)
1133 int n1, n2;
1135 n2 = s2[i];
1137 if (n2 == 0)
1138 continue;
1140 if (i >= s1_sz)
1141 n1 = 0;
1142 else
1143 n1 = s1[i];
1145 fprintf (sched_dump, ";; Region extension statistics: size %d: " \
1146 "was %d + %d more\n", i + 1, n1, n2 - n1);
1150 /* Extend regions.
1151 DEGREE - Array of incoming edge count, considering only
1152 the edges, that don't have their sources in formed regions yet.
1153 IDXP - pointer to the next available index in rgn_bb_table.
1154 HEADER - set of all region heads.
1155 LOOP_HDR - mapping from block to the containing loop
1156 (two blocks can reside within one region if they have
1157 the same loop header). */
1158 void
1159 extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
1161 int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
1162 int nblocks = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
1164 max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
1166 max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun));
1168 order = XNEWVEC (int, last_basic_block_for_fn (cfun));
1169 post_order_compute (order, false, false);
1171 for (i = nblocks - 1; i >= 0; i--)
1173 int bbn = order[i];
1174 if (degree[bbn] >= 0)
1176 max_hdr[bbn] = bbn;
1177 rescan = 1;
1179 else
1180 /* This block already was processed in find_rgns. */
1181 max_hdr[bbn] = -1;
1184 /* The idea is to topologically walk through CFG in top-down order.
1185 During the traversal, if all the predecessors of a node are
1186 marked to be in the same region (they all have the same max_hdr),
1187 then current node is also marked to be a part of that region.
1188 Otherwise the node starts its own region.
1189 CFG should be traversed until no further changes are made. On each
1190 iteration the set of the region heads is extended (the set of those
1191 blocks that have max_hdr[bbi] == bbi). This set is upper bounded by the
1192 set of all basic blocks, thus the algorithm is guaranteed to
1193 terminate. */
1195 while (rescan && iter < max_iter)
1197 rescan = 0;
1199 for (i = nblocks - 1; i >= 0; i--)
1201 edge e;
1202 edge_iterator ei;
1203 int bbn = order[i];
1205 if (max_hdr[bbn] != -1 && !bitmap_bit_p (header, bbn))
1207 int hdr = -1;
1209 FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->preds)
1211 int predn = e->src->index;
1213 if (predn != ENTRY_BLOCK
1214 /* If pred wasn't processed in find_rgns. */
1215 && max_hdr[predn] != -1
1216 /* And pred and bb reside in the same loop.
1217 (Or out of any loop). */
1218 && loop_hdr[bbn] == loop_hdr[predn])
1220 if (hdr == -1)
1221 /* Then bb extends the containing region of pred. */
1222 hdr = max_hdr[predn];
1223 else if (hdr != max_hdr[predn])
1224 /* Too bad, there are at least two predecessors
1225 that reside in different regions. Thus, BB should
1226 begin its own region. */
1228 hdr = bbn;
1229 break;
1232 else
1233 /* BB starts its own region. */
1235 hdr = bbn;
1236 break;
1240 if (hdr == bbn)
1242 /* If BB start its own region,
1243 update set of headers with BB. */
1244 bitmap_set_bit (header, bbn);
1245 rescan = 1;
1247 else
1248 gcc_assert (hdr != -1);
1250 max_hdr[bbn] = hdr;
1254 iter++;
1257 /* Statistics were gathered on the SPEC2000 package of tests with
1258 mainline weekly snapshot gcc-4.1-20051015 on ia64.
1260 Statistics for SPECint:
1261 1 iteration : 1751 cases (38.7%)
1262 2 iterations: 2770 cases (61.3%)
1263 Blocks wrapped in regions by find_rgns without extension: 18295 blocks
1264 Blocks wrapped in regions by 2 iterations in extend_rgns: 23821 blocks
1265 (We don't count single block regions here).
1267 Statistics for SPECfp:
1268 1 iteration : 621 cases (35.9%)
1269 2 iterations: 1110 cases (64.1%)
1270 Blocks wrapped in regions by find_rgns without extension: 6476 blocks
1271 Blocks wrapped in regions by 2 iterations in extend_rgns: 11155 blocks
1272 (We don't count single block regions here).
1274 By default we do at most 2 iterations.
1275 This can be overridden with max-sched-extend-regions-iters parameter:
1276 0 - disable region extension,
1277 N > 0 - do at most N iterations. */
1279 if (sched_verbose && iter != 0)
1280 fprintf (sched_dump, ";; Region extension iterations: %d%s\n", iter,
1281 rescan ? "... failed" : "");
1283 if (!rescan && iter != 0)
1285 int *s1 = NULL, s1_sz = 0;
1287 /* Save the old statistics for later printout. */
1288 if (sched_verbose >= 6)
1289 s1_sz = gather_region_statistics (&s1);
1291 /* We have succeeded. Now assemble the regions. */
1292 for (i = nblocks - 1; i >= 0; i--)
1294 int bbn = order[i];
1296 if (max_hdr[bbn] == bbn)
1297 /* BBN is a region head. */
1299 edge e;
1300 edge_iterator ei;
1301 int num_bbs = 0, j, num_insns = 0, large;
1303 large = too_large (bbn, &num_bbs, &num_insns);
1305 degree[bbn] = -1;
1306 rgn_bb_table[idx] = bbn;
1307 RGN_BLOCKS (nr_regions) = idx++;
1308 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1309 RGN_HAS_REAL_EBB (nr_regions) = 0;
1310 CONTAINING_RGN (bbn) = nr_regions;
1311 BLOCK_TO_BB (bbn) = 0;
1313 FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->succs)
1314 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1315 degree[e->dest->index]--;
1317 if (!large)
1318 /* Here we check whether the region is too_large. */
1319 for (j = i - 1; j >= 0; j--)
1321 int succn = order[j];
1322 if (max_hdr[succn] == bbn)
1324 if ((large = too_large (succn, &num_bbs, &num_insns)))
1325 break;
1329 if (large)
1330 /* If the region is too_large, then wrap every block of
1331 the region into single block region.
1332 Here we wrap region head only. Other blocks are
1333 processed in the below cycle. */
1335 RGN_NR_BLOCKS (nr_regions) = 1;
1336 nr_regions++;
1339 num_bbs = 1;
1341 for (j = i - 1; j >= 0; j--)
1343 int succn = order[j];
1345 if (max_hdr[succn] == bbn)
1346 /* This cycle iterates over all basic blocks, that
1347 are supposed to be in the region with head BBN,
1348 and wraps them into that region (or in single
1349 block region). */
1351 gcc_assert (degree[succn] == 0);
1353 degree[succn] = -1;
1354 rgn_bb_table[idx] = succn;
1355 BLOCK_TO_BB (succn) = large ? 0 : num_bbs++;
1356 CONTAINING_RGN (succn) = nr_regions;
1358 if (large)
1359 /* Wrap SUCCN into single block region. */
1361 RGN_BLOCKS (nr_regions) = idx;
1362 RGN_NR_BLOCKS (nr_regions) = 1;
1363 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1364 RGN_HAS_REAL_EBB (nr_regions) = 0;
1365 nr_regions++;
1368 idx++;
1370 FOR_EACH_EDGE (e, ei,
1371 BASIC_BLOCK_FOR_FN (cfun, succn)->succs)
1372 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1373 degree[e->dest->index]--;
1377 if (!large)
1379 RGN_NR_BLOCKS (nr_regions) = num_bbs;
1380 nr_regions++;
1385 if (sched_verbose >= 6)
1387 int *s2, s2_sz;
1389 /* Get the new statistics and print the comparison with the
1390 one before calling this function. */
1391 s2_sz = gather_region_statistics (&s2);
1392 print_region_statistics (s1, s1_sz, s2, s2_sz);
1393 free (s1);
1394 free (s2);
1398 free (order);
1399 free (max_hdr);
1401 *idxp = idx;
1404 /* Functions for regions scheduling information. */
1406 /* Compute dominators, probability, and potential-split-edges of bb.
1407 Assume that these values were already computed for bb's predecessors. */
1409 static void
1410 compute_dom_prob_ps (int bb)
1412 edge_iterator in_ei;
1413 edge in_edge;
1415 /* We shouldn't have any real ebbs yet. */
1416 gcc_assert (ebb_head [bb] == bb + current_blocks);
1418 if (IS_RGN_ENTRY (bb))
1420 bitmap_set_bit (dom[bb], 0);
1421 prob[bb] = REG_BR_PROB_BASE;
1422 return;
1425 prob[bb] = 0;
1427 /* Initialize dom[bb] to '111..1'. */
1428 bitmap_ones (dom[bb]);
1430 FOR_EACH_EDGE (in_edge, in_ei,
1431 BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb))->preds)
1433 int pred_bb;
1434 edge out_edge;
1435 edge_iterator out_ei;
1437 if (in_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1438 continue;
1440 pred_bb = BLOCK_TO_BB (in_edge->src->index);
1441 bitmap_and (dom[bb], dom[bb], dom[pred_bb]);
1442 bitmap_ior (ancestor_edges[bb],
1443 ancestor_edges[bb], ancestor_edges[pred_bb]);
1445 bitmap_set_bit (ancestor_edges[bb], EDGE_TO_BIT (in_edge));
1447 bitmap_ior (pot_split[bb], pot_split[bb], pot_split[pred_bb]);
1449 FOR_EACH_EDGE (out_edge, out_ei, in_edge->src->succs)
1450 bitmap_set_bit (pot_split[bb], EDGE_TO_BIT (out_edge));
1452 prob[bb] += combine_probabilities (prob[pred_bb], in_edge->probability);
1453 // The rounding divide in combine_probabilities can result in an extra
1454 // probability increment propagating along 50-50 edges. Eventually when
1455 // the edges re-merge, the accumulated probability can go slightly above
1456 // REG_BR_PROB_BASE.
1457 if (prob[bb] > REG_BR_PROB_BASE)
1458 prob[bb] = REG_BR_PROB_BASE;
1461 bitmap_set_bit (dom[bb], bb);
1462 bitmap_and_compl (pot_split[bb], pot_split[bb], ancestor_edges[bb]);
1464 if (sched_verbose >= 2)
1465 fprintf (sched_dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb),
1466 (100 * prob[bb]) / REG_BR_PROB_BASE);
1469 /* Functions for target info. */
1471 /* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
1472 Note that bb_trg dominates bb_src. */
1474 static void
1475 split_edges (int bb_src, int bb_trg, edgelst *bl)
1477 sbitmap src = sbitmap_alloc (SBITMAP_SIZE (pot_split[bb_src]));
1478 bitmap_copy (src, pot_split[bb_src]);
1480 bitmap_and_compl (src, src, pot_split[bb_trg]);
1481 extract_edgelst (src, bl);
1482 sbitmap_free (src);
1485 /* Find the valid candidate-source-blocks for the target block TRG, compute
1486 their probability, and check if they are speculative or not.
1487 For speculative sources, compute their update-blocks and split-blocks. */
1489 static void
1490 compute_trg_info (int trg)
1492 candidate *sp;
1493 edgelst el = { NULL, 0 };
1494 int i, j, k, update_idx;
1495 basic_block block;
1496 sbitmap visited;
1497 edge_iterator ei;
1498 edge e;
1500 candidate_table = XNEWVEC (candidate, current_nr_blocks);
1502 bblst_last = 0;
1503 /* bblst_table holds split blocks and update blocks for each block after
1504 the current one in the region. split blocks and update blocks are
1505 the TO blocks of region edges, so there can be at most rgn_nr_edges
1506 of them. */
1507 bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
1508 bblst_table = XNEWVEC (basic_block, bblst_size);
1510 edgelst_last = 0;
1511 edgelst_table = XNEWVEC (edge, rgn_nr_edges);
1513 /* Define some of the fields for the target bb as well. */
1514 sp = candidate_table + trg;
1515 sp->is_valid = 1;
1516 sp->is_speculative = 0;
1517 sp->src_prob = REG_BR_PROB_BASE;
1519 visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
1521 for (i = trg + 1; i < current_nr_blocks; i++)
1523 sp = candidate_table + i;
1525 sp->is_valid = IS_DOMINATED (i, trg);
1526 if (sp->is_valid)
1528 int tf = prob[trg], cf = prob[i];
1530 /* In CFGs with low probability edges TF can possibly be zero. */
1531 sp->src_prob = (tf ? GCOV_COMPUTE_SCALE (cf, tf) : 0);
1532 sp->is_valid = (sp->src_prob >= min_spec_prob);
1535 if (sp->is_valid)
1537 split_edges (i, trg, &el);
1538 sp->is_speculative = (el.nr_members) ? 1 : 0;
1539 if (sp->is_speculative && !flag_schedule_speculative)
1540 sp->is_valid = 0;
1543 if (sp->is_valid)
1545 /* Compute split blocks and store them in bblst_table.
1546 The TO block of every split edge is a split block. */
1547 sp->split_bbs.first_member = &bblst_table[bblst_last];
1548 sp->split_bbs.nr_members = el.nr_members;
1549 for (j = 0; j < el.nr_members; bblst_last++, j++)
1550 bblst_table[bblst_last] = el.first_member[j]->dest;
1551 sp->update_bbs.first_member = &bblst_table[bblst_last];
1553 /* Compute update blocks and store them in bblst_table.
1554 For every split edge, look at the FROM block, and check
1555 all out edges. For each out edge that is not a split edge,
1556 add the TO block to the update block list. This list can end
1557 up with a lot of duplicates. We need to weed them out to avoid
1558 overrunning the end of the bblst_table. */
1560 update_idx = 0;
1561 bitmap_clear (visited);
1562 for (j = 0; j < el.nr_members; j++)
1564 block = el.first_member[j]->src;
1565 FOR_EACH_EDGE (e, ei, block->succs)
1567 if (!bitmap_bit_p (visited, e->dest->index))
1569 for (k = 0; k < el.nr_members; k++)
1570 if (e == el.first_member[k])
1571 break;
1573 if (k >= el.nr_members)
1575 bblst_table[bblst_last++] = e->dest;
1576 bitmap_set_bit (visited, e->dest->index);
1577 update_idx++;
1582 sp->update_bbs.nr_members = update_idx;
1584 /* Make sure we didn't overrun the end of bblst_table. */
1585 gcc_assert (bblst_last <= bblst_size);
1587 else
1589 sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0;
1591 sp->is_speculative = 0;
1592 sp->src_prob = 0;
1596 sbitmap_free (visited);
1599 /* Free the computed target info. */
1600 static void
1601 free_trg_info (void)
1603 free (candidate_table);
1604 free (bblst_table);
1605 free (edgelst_table);
1608 /* Print candidates info, for debugging purposes. Callable from debugger. */
1610 DEBUG_FUNCTION void
1611 debug_candidate (int i)
1613 if (!candidate_table[i].is_valid)
1614 return;
1616 if (candidate_table[i].is_speculative)
1618 int j;
1619 fprintf (sched_dump, "src b %d bb %d speculative \n", BB_TO_BLOCK (i), i);
1621 fprintf (sched_dump, "split path: ");
1622 for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++)
1624 int b = candidate_table[i].split_bbs.first_member[j]->index;
1626 fprintf (sched_dump, " %d ", b);
1628 fprintf (sched_dump, "\n");
1630 fprintf (sched_dump, "update path: ");
1631 for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++)
1633 int b = candidate_table[i].update_bbs.first_member[j]->index;
1635 fprintf (sched_dump, " %d ", b);
1637 fprintf (sched_dump, "\n");
1639 else
1641 fprintf (sched_dump, " src %d equivalent\n", BB_TO_BLOCK (i));
1645 /* Print candidates info, for debugging purposes. Callable from debugger. */
1647 DEBUG_FUNCTION void
1648 debug_candidates (int trg)
1650 int i;
1652 fprintf (sched_dump, "----------- candidate table: target: b=%d bb=%d ---\n",
1653 BB_TO_BLOCK (trg), trg);
1654 for (i = trg + 1; i < current_nr_blocks; i++)
1655 debug_candidate (i);
1658 /* Functions for speculative scheduling. */
1660 static bitmap_head not_in_df;
1662 /* Return 0 if x is a set of a register alive in the beginning of one
1663 of the split-blocks of src, otherwise return 1. */
1665 static int
1666 check_live_1 (int src, rtx x)
1668 int i;
1669 int regno;
1670 rtx reg = SET_DEST (x);
1672 if (reg == 0)
1673 return 1;
1675 while (GET_CODE (reg) == SUBREG
1676 || GET_CODE (reg) == ZERO_EXTRACT
1677 || GET_CODE (reg) == STRICT_LOW_PART)
1678 reg = XEXP (reg, 0);
1680 if (GET_CODE (reg) == PARALLEL)
1682 int i;
1684 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1685 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1686 if (check_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0)))
1687 return 1;
1689 return 0;
1692 if (!REG_P (reg))
1693 return 1;
1695 regno = REGNO (reg);
1697 if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
1699 /* Global registers are assumed live. */
1700 return 0;
1702 else
1704 if (regno < FIRST_PSEUDO_REGISTER)
1706 /* Check for hard registers. */
1707 int j = hard_regno_nregs[regno][GET_MODE (reg)];
1708 while (--j >= 0)
1710 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1712 basic_block b = candidate_table[src].split_bbs.first_member[i];
1713 int t = bitmap_bit_p (&not_in_df, b->index);
1715 /* We can have split blocks, that were recently generated.
1716 Such blocks are always outside current region. */
1717 gcc_assert (!t || (CONTAINING_RGN (b->index)
1718 != CONTAINING_RGN (BB_TO_BLOCK (src))));
1720 if (t || REGNO_REG_SET_P (df_get_live_in (b), regno + j))
1721 return 0;
1725 else
1727 /* Check for pseudo registers. */
1728 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1730 basic_block b = candidate_table[src].split_bbs.first_member[i];
1731 int t = bitmap_bit_p (&not_in_df, b->index);
1733 gcc_assert (!t || (CONTAINING_RGN (b->index)
1734 != CONTAINING_RGN (BB_TO_BLOCK (src))));
1736 if (t || REGNO_REG_SET_P (df_get_live_in (b), regno))
1737 return 0;
1742 return 1;
1745 /* If x is a set of a register R, mark that R is alive in the beginning
1746 of every update-block of src. */
1748 static void
1749 update_live_1 (int src, rtx x)
1751 int i;
1752 int regno;
1753 rtx reg = SET_DEST (x);
1755 if (reg == 0)
1756 return;
1758 while (GET_CODE (reg) == SUBREG
1759 || GET_CODE (reg) == ZERO_EXTRACT
1760 || GET_CODE (reg) == STRICT_LOW_PART)
1761 reg = XEXP (reg, 0);
1763 if (GET_CODE (reg) == PARALLEL)
1765 int i;
1767 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1768 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1769 update_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0));
1771 return;
1774 if (!REG_P (reg))
1775 return;
1777 /* Global registers are always live, so the code below does not apply
1778 to them. */
1780 regno = REGNO (reg);
1782 if (! HARD_REGISTER_NUM_P (regno)
1783 || !global_regs[regno])
1785 for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
1787 basic_block b = candidate_table[src].update_bbs.first_member[i];
1789 if (HARD_REGISTER_NUM_P (regno))
1790 bitmap_set_range (df_get_live_in (b), regno,
1791 hard_regno_nregs[regno][GET_MODE (reg)]);
1792 else
1793 bitmap_set_bit (df_get_live_in (b), regno);
1798 /* Return 1 if insn can be speculatively moved from block src to trg,
1799 otherwise return 0. Called before first insertion of insn to
1800 ready-list or before the scheduling. */
1802 static int
1803 check_live (rtx insn, int src)
1805 /* Find the registers set by instruction. */
1806 if (GET_CODE (PATTERN (insn)) == SET
1807 || GET_CODE (PATTERN (insn)) == CLOBBER)
1808 return check_live_1 (src, PATTERN (insn));
1809 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1811 int j;
1812 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1813 if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1814 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1815 && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j)))
1816 return 0;
1818 return 1;
1821 return 1;
1824 /* Update the live registers info after insn was moved speculatively from
1825 block src to trg. */
1827 static void
1828 update_live (rtx insn, int src)
1830 /* Find the registers set by instruction. */
1831 if (GET_CODE (PATTERN (insn)) == SET
1832 || GET_CODE (PATTERN (insn)) == CLOBBER)
1833 update_live_1 (src, PATTERN (insn));
1834 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1836 int j;
1837 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1838 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1839 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1840 update_live_1 (src, XVECEXP (PATTERN (insn), 0, j));
1844 /* Nonzero if block bb_to is equal to, or reachable from block bb_from. */
1845 #define IS_REACHABLE(bb_from, bb_to) \
1846 (bb_from == bb_to \
1847 || IS_RGN_ENTRY (bb_from) \
1848 || (bitmap_bit_p (ancestor_edges[bb_to], \
1849 EDGE_TO_BIT (single_pred_edge (BASIC_BLOCK_FOR_FN (cfun, \
1850 BB_TO_BLOCK (bb_from)))))))
1852 /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
1854 static void
1855 set_spec_fed (rtx load_insn)
1857 sd_iterator_def sd_it;
1858 dep_t dep;
1860 FOR_EACH_DEP (load_insn, SD_LIST_FORW, sd_it, dep)
1861 if (DEP_TYPE (dep) == REG_DEP_TRUE)
1862 FED_BY_SPEC_LOAD (DEP_CON (dep)) = 1;
1865 /* On the path from the insn to load_insn_bb, find a conditional
1866 branch depending on insn, that guards the speculative load. */
1868 static int
1869 find_conditional_protection (rtx insn, int load_insn_bb)
1871 sd_iterator_def sd_it;
1872 dep_t dep;
1874 /* Iterate through DEF-USE forward dependences. */
1875 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
1877 rtx next = DEP_CON (dep);
1879 if ((CONTAINING_RGN (BLOCK_NUM (next)) ==
1880 CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb)))
1881 && IS_REACHABLE (INSN_BB (next), load_insn_bb)
1882 && load_insn_bb != INSN_BB (next)
1883 && DEP_TYPE (dep) == REG_DEP_TRUE
1884 && (JUMP_P (next)
1885 || find_conditional_protection (next, load_insn_bb)))
1886 return 1;
1888 return 0;
1889 } /* find_conditional_protection */
1891 /* Returns 1 if the same insn1 that participates in the computation
1892 of load_insn's address is feeding a conditional branch that is
1893 guarding on load_insn. This is true if we find two DEF-USE
1894 chains:
1895 insn1 -> ... -> conditional-branch
1896 insn1 -> ... -> load_insn,
1897 and if a flow path exists:
1898 insn1 -> ... -> conditional-branch -> ... -> load_insn,
1899 and if insn1 is on the path
1900 region-entry -> ... -> bb_trg -> ... load_insn.
1902 Locate insn1 by climbing on INSN_BACK_DEPS from load_insn.
1903 Locate the branch by following INSN_FORW_DEPS from insn1. */
1905 static int
1906 is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg)
1908 sd_iterator_def sd_it;
1909 dep_t dep;
1911 FOR_EACH_DEP (load_insn, SD_LIST_BACK, sd_it, dep)
1913 rtx insn1 = DEP_PRO (dep);
1915 /* Must be a DEF-USE dependence upon non-branch. */
1916 if (DEP_TYPE (dep) != REG_DEP_TRUE
1917 || JUMP_P (insn1))
1918 continue;
1920 /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
1921 if (INSN_BB (insn1) == bb_src
1922 || (CONTAINING_RGN (BLOCK_NUM (insn1))
1923 != CONTAINING_RGN (BB_TO_BLOCK (bb_src)))
1924 || (!IS_REACHABLE (bb_trg, INSN_BB (insn1))
1925 && !IS_REACHABLE (INSN_BB (insn1), bb_trg)))
1926 continue;
1928 /* Now search for the conditional-branch. */
1929 if (find_conditional_protection (insn1, bb_src))
1930 return 1;
1932 /* Recursive step: search another insn1, "above" current insn1. */
1933 return is_conditionally_protected (insn1, bb_src, bb_trg);
1936 /* The chain does not exist. */
1937 return 0;
1938 } /* is_conditionally_protected */
1940 /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence
1941 load_insn can move speculatively from bb_src to bb_trg. All the
1942 following must hold:
1944 (1) both loads have 1 base register (PFREE_CANDIDATEs).
1945 (2) load_insn and load1 have a def-use dependence upon
1946 the same insn 'insn1'.
1947 (3) either load2 is in bb_trg, or:
1948 - there's only one split-block, and
1949 - load1 is on the escape path, and
1951 From all these we can conclude that the two loads access memory
1952 addresses that differ at most by a constant, and hence if moving
1953 load_insn would cause an exception, it would have been caused by
1954 load2 anyhow. */
1956 static int
1957 is_pfree (rtx load_insn, int bb_src, int bb_trg)
1959 sd_iterator_def back_sd_it;
1960 dep_t back_dep;
1961 candidate *candp = candidate_table + bb_src;
1963 if (candp->split_bbs.nr_members != 1)
1964 /* Must have exactly one escape block. */
1965 return 0;
1967 FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
1969 rtx insn1 = DEP_PRO (back_dep);
1971 if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
1972 /* Found a DEF-USE dependence (insn1, load_insn). */
1974 sd_iterator_def fore_sd_it;
1975 dep_t fore_dep;
1977 FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
1979 rtx insn2 = DEP_CON (fore_dep);
1981 if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
1983 /* Found a DEF-USE dependence (insn1, insn2). */
1984 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
1985 /* insn2 not guaranteed to be a 1 base reg load. */
1986 continue;
1988 if (INSN_BB (insn2) == bb_trg)
1989 /* insn2 is the similar load, in the target block. */
1990 return 1;
1992 if (*(candp->split_bbs.first_member) == BLOCK_FOR_INSN (insn2))
1993 /* insn2 is a similar load, in a split-block. */
1994 return 1;
2000 /* Couldn't find a similar load. */
2001 return 0;
2002 } /* is_pfree */
2004 /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
2005 a load moved speculatively, or if load_insn is protected by
2006 a compare on load_insn's address). */
2008 static int
2009 is_prisky (rtx load_insn, int bb_src, int bb_trg)
2011 if (FED_BY_SPEC_LOAD (load_insn))
2012 return 1;
2014 if (sd_lists_empty_p (load_insn, SD_LIST_BACK))
2015 /* Dependence may 'hide' out of the region. */
2016 return 1;
2018 if (is_conditionally_protected (load_insn, bb_src, bb_trg))
2019 return 1;
2021 return 0;
2024 /* Insn is a candidate to be moved speculatively from bb_src to bb_trg.
2025 Return 1 if insn is exception-free (and the motion is valid)
2026 and 0 otherwise. */
2028 static int
2029 is_exception_free (rtx insn, int bb_src, int bb_trg)
2031 int insn_class = haifa_classify_insn (insn);
2033 /* Handle non-load insns. */
2034 switch (insn_class)
2036 case TRAP_FREE:
2037 return 1;
2038 case TRAP_RISKY:
2039 return 0;
2040 default:;
2043 /* Handle loads. */
2044 if (!flag_schedule_speculative_load)
2045 return 0;
2046 IS_LOAD_INSN (insn) = 1;
2047 switch (insn_class)
2049 case IFREE:
2050 return (1);
2051 case IRISKY:
2052 return 0;
2053 case PFREE_CANDIDATE:
2054 if (is_pfree (insn, bb_src, bb_trg))
2055 return 1;
2056 /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */
2057 case PRISKY_CANDIDATE:
2058 if (!flag_schedule_speculative_load_dangerous
2059 || is_prisky (insn, bb_src, bb_trg))
2060 return 0;
2061 break;
2062 default:;
2065 return flag_schedule_speculative_load_dangerous;
2068 /* The number of insns from the current block scheduled so far. */
2069 static int sched_target_n_insns;
2070 /* The number of insns from the current block to be scheduled in total. */
2071 static int target_n_insns;
2072 /* The number of insns from the entire region scheduled so far. */
2073 static int sched_n_insns;
2075 /* Implementations of the sched_info functions for region scheduling. */
2076 static void init_ready_list (void);
2077 static int can_schedule_ready_p (rtx);
2078 static void begin_schedule_ready (rtx);
2079 static ds_t new_ready (rtx, ds_t);
2080 static int schedule_more_p (void);
2081 static const char *rgn_print_insn (const_rtx, int);
2082 static int rgn_rank (rtx, rtx);
2083 static void compute_jump_reg_dependencies (rtx, regset);
2085 /* Functions for speculative scheduling. */
2086 static void rgn_add_remove_insn (rtx, int);
2087 static void rgn_add_block (basic_block, basic_block);
2088 static void rgn_fix_recovery_cfg (int, int, int);
2089 static basic_block advance_target_bb (basic_block, rtx);
2091 /* Return nonzero if there are more insns that should be scheduled. */
2093 static int
2094 schedule_more_p (void)
2096 return sched_target_n_insns < target_n_insns;
2099 /* Add all insns that are initially ready to the ready list READY. Called
2100 once before scheduling a set of insns. */
2102 static void
2103 init_ready_list (void)
2105 rtx prev_head = current_sched_info->prev_head;
2106 rtx next_tail = current_sched_info->next_tail;
2107 int bb_src;
2108 rtx insn;
2110 target_n_insns = 0;
2111 sched_target_n_insns = 0;
2112 sched_n_insns = 0;
2114 /* Print debugging information. */
2115 if (sched_verbose >= 5)
2116 debug_rgn_dependencies (target_bb);
2118 /* Prepare current target block info. */
2119 if (current_nr_blocks > 1)
2120 compute_trg_info (target_bb);
2122 /* Initialize ready list with all 'ready' insns in target block.
2123 Count number of insns in the target block being scheduled. */
2124 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
2126 gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED);
2127 TODO_SPEC (insn) = HARD_DEP;
2128 try_ready (insn);
2129 target_n_insns++;
2131 gcc_assert (!(TODO_SPEC (insn) & BEGIN_CONTROL));
2134 /* Add to ready list all 'ready' insns in valid source blocks.
2135 For speculative insns, check-live, exception-free, and
2136 issue-delay. */
2137 for (bb_src = target_bb + 1; bb_src < current_nr_blocks; bb_src++)
2138 if (IS_VALID (bb_src))
2140 rtx src_head;
2141 rtx src_next_tail;
2142 rtx tail, head;
2144 get_ebb_head_tail (EBB_FIRST_BB (bb_src), EBB_LAST_BB (bb_src),
2145 &head, &tail);
2146 src_next_tail = NEXT_INSN (tail);
2147 src_head = head;
2149 for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn))
2150 if (INSN_P (insn))
2152 gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED);
2153 TODO_SPEC (insn) = HARD_DEP;
2154 try_ready (insn);
2159 /* Called after taking INSN from the ready list. Returns nonzero if this
2160 insn can be scheduled, nonzero if we should silently discard it. */
2162 static int
2163 can_schedule_ready_p (rtx insn)
2165 /* An interblock motion? */
2166 if (INSN_BB (insn) != target_bb
2167 && IS_SPECULATIVE_INSN (insn)
2168 && !check_live (insn, INSN_BB (insn)))
2169 return 0;
2170 else
2171 return 1;
2174 /* Updates counter and other information. Split from can_schedule_ready_p ()
2175 because when we schedule insn speculatively then insn passed to
2176 can_schedule_ready_p () differs from the one passed to
2177 begin_schedule_ready (). */
2178 static void
2179 begin_schedule_ready (rtx insn)
2181 /* An interblock motion? */
2182 if (INSN_BB (insn) != target_bb)
2184 if (IS_SPECULATIVE_INSN (insn))
2186 gcc_assert (check_live (insn, INSN_BB (insn)));
2188 update_live (insn, INSN_BB (insn));
2190 /* For speculative load, mark insns fed by it. */
2191 if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn))
2192 set_spec_fed (insn);
2194 nr_spec++;
2196 nr_inter++;
2198 else
2200 /* In block motion. */
2201 sched_target_n_insns++;
2203 sched_n_insns++;
2206 /* Called after INSN has all its hard dependencies resolved and the speculation
2207 of type TS is enough to overcome them all.
2208 Return nonzero if it should be moved to the ready list or the queue, or zero
2209 if we should silently discard it. */
2210 static ds_t
2211 new_ready (rtx next, ds_t ts)
2213 if (INSN_BB (next) != target_bb)
2215 int not_ex_free = 0;
2217 /* For speculative insns, before inserting to ready/queue,
2218 check live, exception-free, and issue-delay. */
2219 if (!IS_VALID (INSN_BB (next))
2220 || CANT_MOVE (next)
2221 || (IS_SPECULATIVE_INSN (next)
2222 && ((recog_memoized (next) >= 0
2223 && min_insn_conflict_delay (curr_state, next, next)
2224 > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY))
2225 || IS_SPECULATION_CHECK_P (next)
2226 || !check_live (next, INSN_BB (next))
2227 || (not_ex_free = !is_exception_free (next, INSN_BB (next),
2228 target_bb)))))
2230 if (not_ex_free
2231 /* We are here because is_exception_free () == false.
2232 But we possibly can handle that with control speculation. */
2233 && sched_deps_info->generate_spec_deps
2234 && spec_info->mask & BEGIN_CONTROL)
2236 ds_t new_ds;
2238 /* Add control speculation to NEXT's dependency type. */
2239 new_ds = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK);
2241 /* Check if NEXT can be speculated with new dependency type. */
2242 if (sched_insn_is_legitimate_for_speculation_p (next, new_ds))
2243 /* Here we got new control-speculative instruction. */
2244 ts = new_ds;
2245 else
2246 /* NEXT isn't ready yet. */
2247 ts = DEP_POSTPONED;
2249 else
2250 /* NEXT isn't ready yet. */
2251 ts = DEP_POSTPONED;
2255 return ts;
2258 /* Return a string that contains the insn uid and optionally anything else
2259 necessary to identify this insn in an output. It's valid to use a
2260 static buffer for this. The ALIGNED parameter should cause the string
2261 to be formatted so that multiple output lines will line up nicely. */
2263 static const char *
2264 rgn_print_insn (const_rtx insn, int aligned)
2266 static char tmp[80];
2268 if (aligned)
2269 sprintf (tmp, "b%3d: i%4d", INSN_BB (insn), INSN_UID (insn));
2270 else
2272 if (current_nr_blocks > 1 && INSN_BB (insn) != target_bb)
2273 sprintf (tmp, "%d/b%d", INSN_UID (insn), INSN_BB (insn));
2274 else
2275 sprintf (tmp, "%d", INSN_UID (insn));
2277 return tmp;
2280 /* Compare priority of two insns. Return a positive number if the second
2281 insn is to be preferred for scheduling, and a negative one if the first
2282 is to be preferred. Zero if they are equally good. */
2284 static int
2285 rgn_rank (rtx insn1, rtx insn2)
2287 /* Some comparison make sense in interblock scheduling only. */
2288 if (INSN_BB (insn1) != INSN_BB (insn2))
2290 int spec_val, prob_val;
2292 /* Prefer an inblock motion on an interblock motion. */
2293 if ((INSN_BB (insn2) == target_bb) && (INSN_BB (insn1) != target_bb))
2294 return 1;
2295 if ((INSN_BB (insn1) == target_bb) && (INSN_BB (insn2) != target_bb))
2296 return -1;
2298 /* Prefer a useful motion on a speculative one. */
2299 spec_val = IS_SPECULATIVE_INSN (insn1) - IS_SPECULATIVE_INSN (insn2);
2300 if (spec_val)
2301 return spec_val;
2303 /* Prefer a more probable (speculative) insn. */
2304 prob_val = INSN_PROBABILITY (insn2) - INSN_PROBABILITY (insn1);
2305 if (prob_val)
2306 return prob_val;
2308 return 0;
2311 /* NEXT is an instruction that depends on INSN (a backward dependence);
2312 return nonzero if we should include this dependence in priority
2313 calculations. */
2316 contributes_to_priority (rtx next, rtx insn)
2318 /* NEXT and INSN reside in one ebb. */
2319 return BLOCK_TO_BB (BLOCK_NUM (next)) == BLOCK_TO_BB (BLOCK_NUM (insn));
2322 /* INSN is a JUMP_INSN. Store the set of registers that must be
2323 considered as used by this jump in USED. */
2325 static void
2326 compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
2327 regset used ATTRIBUTE_UNUSED)
2329 /* Nothing to do here, since we postprocess jumps in
2330 add_branch_dependences. */
2333 /* This variable holds common_sched_info hooks and data relevant to
2334 the interblock scheduler. */
2335 static struct common_sched_info_def rgn_common_sched_info;
2338 /* This holds data for the dependence analysis relevant to
2339 the interblock scheduler. */
2340 static struct sched_deps_info_def rgn_sched_deps_info;
2342 /* This holds constant data used for initializing the above structure
2343 for the Haifa scheduler. */
2344 static const struct sched_deps_info_def rgn_const_sched_deps_info =
2346 compute_jump_reg_dependencies,
2347 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2348 0, 0, 0
2351 /* Same as above, but for the selective scheduler. */
2352 static const struct sched_deps_info_def rgn_const_sel_sched_deps_info =
2354 compute_jump_reg_dependencies,
2355 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2356 0, 0, 0
2359 /* Return true if scheduling INSN will trigger finish of scheduling
2360 current block. */
2361 static bool
2362 rgn_insn_finishes_block_p (rtx insn)
2364 if (INSN_BB (insn) == target_bb
2365 && sched_target_n_insns + 1 == target_n_insns)
2366 /* INSN is the last not-scheduled instruction in the current block. */
2367 return true;
2369 return false;
2372 /* Used in schedule_insns to initialize current_sched_info for scheduling
2373 regions (or single basic blocks). */
2375 static const struct haifa_sched_info rgn_const_sched_info =
2377 init_ready_list,
2378 can_schedule_ready_p,
2379 schedule_more_p,
2380 new_ready,
2381 rgn_rank,
2382 rgn_print_insn,
2383 contributes_to_priority,
2384 rgn_insn_finishes_block_p,
2386 NULL, NULL,
2387 NULL, NULL,
2388 0, 0,
2390 rgn_add_remove_insn,
2391 begin_schedule_ready,
2392 NULL,
2393 advance_target_bb,
2394 NULL, NULL,
2395 SCHED_RGN
2398 /* This variable holds the data and hooks needed to the Haifa scheduler backend
2399 for the interblock scheduler frontend. */
2400 static struct haifa_sched_info rgn_sched_info;
2402 /* Returns maximum priority that an insn was assigned to. */
2405 get_rgn_sched_max_insns_priority (void)
2407 return rgn_sched_info.sched_max_insns_priority;
2410 /* Determine if PAT sets a TARGET_CLASS_LIKELY_SPILLED_P register. */
2412 static bool
2413 sets_likely_spilled (rtx pat)
2415 bool ret = false;
2416 note_stores (pat, sets_likely_spilled_1, &ret);
2417 return ret;
2420 static void
2421 sets_likely_spilled_1 (rtx x, const_rtx pat, void *data)
2423 bool *ret = (bool *) data;
2425 if (GET_CODE (pat) == SET
2426 && REG_P (x)
2427 && HARD_REGISTER_P (x)
2428 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (x))))
2429 *ret = true;
2432 /* A bitmap to note insns that participate in any dependency. Used in
2433 add_branch_dependences. */
2434 static sbitmap insn_referenced;
2436 /* Add dependences so that branches are scheduled to run last in their
2437 block. */
2438 static void
2439 add_branch_dependences (rtx head, rtx tail)
2441 rtx insn, last;
2443 /* For all branches, calls, uses, clobbers, cc0 setters, and instructions
2444 that can throw exceptions, force them to remain in order at the end of
2445 the block by adding dependencies and giving the last a high priority.
2446 There may be notes present, and prev_head may also be a note.
2448 Branches must obviously remain at the end. Calls should remain at the
2449 end since moving them results in worse register allocation. Uses remain
2450 at the end to ensure proper register allocation.
2452 cc0 setters remain at the end because they can't be moved away from
2453 their cc0 user.
2455 Predecessors of SCHED_GROUP_P instructions at the end remain at the end.
2457 COND_EXEC insns cannot be moved past a branch (see e.g. PR17808).
2459 Insns setting TARGET_CLASS_LIKELY_SPILLED_P registers (usually return
2460 values) are not moved before reload because we can wind up with register
2461 allocation failures. */
2463 while (tail != head && DEBUG_INSN_P (tail))
2464 tail = PREV_INSN (tail);
2466 insn = tail;
2467 last = 0;
2468 while (CALL_P (insn)
2469 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
2470 || (NONJUMP_INSN_P (insn)
2471 && (GET_CODE (PATTERN (insn)) == USE
2472 || GET_CODE (PATTERN (insn)) == CLOBBER
2473 || can_throw_internal (insn)
2474 #ifdef HAVE_cc0
2475 || sets_cc0_p (PATTERN (insn))
2476 #endif
2477 || (!reload_completed
2478 && sets_likely_spilled (PATTERN (insn)))))
2479 || NOTE_P (insn)
2480 || (last != 0 && SCHED_GROUP_P (last)))
2482 if (!NOTE_P (insn))
2484 if (last != 0
2485 && sd_find_dep_between (insn, last, false) == NULL)
2487 if (! sched_insns_conditions_mutex_p (last, insn))
2488 add_dependence (last, insn, REG_DEP_ANTI);
2489 bitmap_set_bit (insn_referenced, INSN_LUID (insn));
2492 CANT_MOVE (insn) = 1;
2494 last = insn;
2497 /* Don't overrun the bounds of the basic block. */
2498 if (insn == head)
2499 break;
2502 insn = PREV_INSN (insn);
2503 while (insn != head && DEBUG_INSN_P (insn));
2506 /* Make sure these insns are scheduled last in their block. */
2507 insn = last;
2508 if (insn != 0)
2509 while (insn != head)
2511 insn = prev_nonnote_insn (insn);
2513 if (bitmap_bit_p (insn_referenced, INSN_LUID (insn))
2514 || DEBUG_INSN_P (insn))
2515 continue;
2517 if (! sched_insns_conditions_mutex_p (last, insn))
2518 add_dependence (last, insn, REG_DEP_ANTI);
2521 if (!targetm.have_conditional_execution ())
2522 return;
2524 /* Finally, if the block ends in a jump, and we are doing intra-block
2525 scheduling, make sure that the branch depends on any COND_EXEC insns
2526 inside the block to avoid moving the COND_EXECs past the branch insn.
2528 We only have to do this after reload, because (1) before reload there
2529 are no COND_EXEC insns, and (2) the region scheduler is an intra-block
2530 scheduler after reload.
2532 FIXME: We could in some cases move COND_EXEC insns past the branch if
2533 this scheduler would be a little smarter. Consider this code:
2535 T = [addr]
2536 C ? addr += 4
2537 !C ? X += 12
2538 C ? T += 1
2539 C ? jump foo
2541 On a target with a one cycle stall on a memory access the optimal
2542 sequence would be:
2544 T = [addr]
2545 C ? addr += 4
2546 C ? T += 1
2547 C ? jump foo
2548 !C ? X += 12
2550 We don't want to put the 'X += 12' before the branch because it just
2551 wastes a cycle of execution time when the branch is taken.
2553 Note that in the example "!C" will always be true. That is another
2554 possible improvement for handling COND_EXECs in this scheduler: it
2555 could remove always-true predicates. */
2557 if (!reload_completed || ! (JUMP_P (tail) || JUMP_TABLE_DATA_P (tail)))
2558 return;
2560 insn = tail;
2561 while (insn != head)
2563 insn = PREV_INSN (insn);
2565 /* Note that we want to add this dependency even when
2566 sched_insns_conditions_mutex_p returns true. The whole point
2567 is that we _want_ this dependency, even if these insns really
2568 are independent. */
2569 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == COND_EXEC)
2570 add_dependence (tail, insn, REG_DEP_ANTI);
2574 /* Data structures for the computation of data dependences in a regions. We
2575 keep one `deps' structure for every basic block. Before analyzing the
2576 data dependences for a bb, its variables are initialized as a function of
2577 the variables of its predecessors. When the analysis for a bb completes,
2578 we save the contents to the corresponding bb_deps[bb] variable. */
2580 static struct deps_desc *bb_deps;
2582 static void
2583 concat_insn_mem_list (rtx copy_insns, rtx copy_mems, rtx *old_insns_p,
2584 rtx *old_mems_p)
2586 rtx new_insns = *old_insns_p;
2587 rtx new_mems = *old_mems_p;
2589 while (copy_insns)
2591 new_insns = alloc_INSN_LIST (XEXP (copy_insns, 0), new_insns);
2592 new_mems = alloc_EXPR_LIST (VOIDmode, XEXP (copy_mems, 0), new_mems);
2593 copy_insns = XEXP (copy_insns, 1);
2594 copy_mems = XEXP (copy_mems, 1);
2597 *old_insns_p = new_insns;
2598 *old_mems_p = new_mems;
2601 /* Join PRED_DEPS to the SUCC_DEPS. */
2602 void
2603 deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps)
2605 unsigned reg;
2606 reg_set_iterator rsi;
2608 /* The reg_last lists are inherited by successor. */
2609 EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg, rsi)
2611 struct deps_reg *pred_rl = &pred_deps->reg_last[reg];
2612 struct deps_reg *succ_rl = &succ_deps->reg_last[reg];
2614 succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses);
2615 succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets);
2616 succ_rl->implicit_sets
2617 = concat_INSN_LIST (pred_rl->implicit_sets, succ_rl->implicit_sets);
2618 succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers,
2619 succ_rl->clobbers);
2620 succ_rl->uses_length += pred_rl->uses_length;
2621 succ_rl->clobbers_length += pred_rl->clobbers_length;
2623 IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use);
2625 /* Mem read/write lists are inherited by successor. */
2626 concat_insn_mem_list (pred_deps->pending_read_insns,
2627 pred_deps->pending_read_mems,
2628 &succ_deps->pending_read_insns,
2629 &succ_deps->pending_read_mems);
2630 concat_insn_mem_list (pred_deps->pending_write_insns,
2631 pred_deps->pending_write_mems,
2632 &succ_deps->pending_write_insns,
2633 &succ_deps->pending_write_mems);
2635 succ_deps->pending_jump_insns
2636 = concat_INSN_LIST (pred_deps->pending_jump_insns,
2637 succ_deps->pending_jump_insns);
2638 succ_deps->last_pending_memory_flush
2639 = concat_INSN_LIST (pred_deps->last_pending_memory_flush,
2640 succ_deps->last_pending_memory_flush);
2642 succ_deps->pending_read_list_length += pred_deps->pending_read_list_length;
2643 succ_deps->pending_write_list_length += pred_deps->pending_write_list_length;
2644 succ_deps->pending_flush_length += pred_deps->pending_flush_length;
2646 /* last_function_call is inherited by successor. */
2647 succ_deps->last_function_call
2648 = concat_INSN_LIST (pred_deps->last_function_call,
2649 succ_deps->last_function_call);
2651 /* last_function_call_may_noreturn is inherited by successor. */
2652 succ_deps->last_function_call_may_noreturn
2653 = concat_INSN_LIST (pred_deps->last_function_call_may_noreturn,
2654 succ_deps->last_function_call_may_noreturn);
2656 /* sched_before_next_call is inherited by successor. */
2657 succ_deps->sched_before_next_call
2658 = concat_INSN_LIST (pred_deps->sched_before_next_call,
2659 succ_deps->sched_before_next_call);
2662 /* After computing the dependencies for block BB, propagate the dependencies
2663 found in TMP_DEPS to the successors of the block. */
2664 static void
2665 propagate_deps (int bb, struct deps_desc *pred_deps)
2667 basic_block block = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb));
2668 edge_iterator ei;
2669 edge e;
2671 /* bb's structures are inherited by its successors. */
2672 FOR_EACH_EDGE (e, ei, block->succs)
2674 /* Only bbs "below" bb, in the same region, are interesting. */
2675 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2676 || CONTAINING_RGN (block->index) != CONTAINING_RGN (e->dest->index)
2677 || BLOCK_TO_BB (e->dest->index) <= bb)
2678 continue;
2680 deps_join (bb_deps + BLOCK_TO_BB (e->dest->index), pred_deps);
2683 /* These lists should point to the right place, for correct
2684 freeing later. */
2685 bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns;
2686 bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems;
2687 bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns;
2688 bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems;
2689 bb_deps[bb].pending_jump_insns = pred_deps->pending_jump_insns;
2691 /* Can't allow these to be freed twice. */
2692 pred_deps->pending_read_insns = 0;
2693 pred_deps->pending_read_mems = 0;
2694 pred_deps->pending_write_insns = 0;
2695 pred_deps->pending_write_mems = 0;
2696 pred_deps->pending_jump_insns = 0;
2699 /* Compute dependences inside bb. In a multiple blocks region:
2700 (1) a bb is analyzed after its predecessors, and (2) the lists in
2701 effect at the end of bb (after analyzing for bb) are inherited by
2702 bb's successors.
2704 Specifically for reg-reg data dependences, the block insns are
2705 scanned by sched_analyze () top-to-bottom. Three lists are
2706 maintained by sched_analyze (): reg_last[].sets for register DEFs,
2707 reg_last[].implicit_sets for implicit hard register DEFs, and
2708 reg_last[].uses for register USEs.
2710 When analysis is completed for bb, we update for its successors:
2711 ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
2712 ; - IMPLICIT_DEFS[succ] = Union (IMPLICIT_DEFS [succ], IMPLICIT_DEFS [bb])
2713 ; - USES[succ] = Union (USES [succ], DEFS [bb])
2715 The mechanism for computing mem-mem data dependence is very
2716 similar, and the result is interblock dependences in the region. */
2718 static void
2719 compute_block_dependences (int bb)
2721 rtx head, tail;
2722 struct deps_desc tmp_deps;
2724 tmp_deps = bb_deps[bb];
2726 /* Do the analysis for this block. */
2727 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2728 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2730 sched_analyze (&tmp_deps, head, tail);
2732 /* Selective scheduling handles control dependencies by itself. */
2733 if (!sel_sched_p ())
2734 add_branch_dependences (head, tail);
2736 if (current_nr_blocks > 1)
2737 propagate_deps (bb, &tmp_deps);
2739 /* Free up the INSN_LISTs. */
2740 free_deps (&tmp_deps);
2742 if (targetm.sched.dependencies_evaluation_hook)
2743 targetm.sched.dependencies_evaluation_hook (head, tail);
2746 /* Free dependencies of instructions inside BB. */
2747 static void
2748 free_block_dependencies (int bb)
2750 rtx head;
2751 rtx tail;
2753 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2755 if (no_real_insns_p (head, tail))
2756 return;
2758 sched_free_deps (head, tail, true);
2761 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
2762 them to the unused_*_list variables, so that they can be reused. */
2764 static void
2765 free_pending_lists (void)
2767 int bb;
2769 for (bb = 0; bb < current_nr_blocks; bb++)
2771 free_INSN_LIST_list (&bb_deps[bb].pending_read_insns);
2772 free_INSN_LIST_list (&bb_deps[bb].pending_write_insns);
2773 free_EXPR_LIST_list (&bb_deps[bb].pending_read_mems);
2774 free_EXPR_LIST_list (&bb_deps[bb].pending_write_mems);
2775 free_INSN_LIST_list (&bb_deps[bb].pending_jump_insns);
2779 /* Print dependences for debugging starting from FROM_BB.
2780 Callable from debugger. */
2781 /* Print dependences for debugging starting from FROM_BB.
2782 Callable from debugger. */
2783 DEBUG_FUNCTION void
2784 debug_rgn_dependencies (int from_bb)
2786 int bb;
2788 fprintf (sched_dump,
2789 ";; --------------- forward dependences: ------------ \n");
2791 for (bb = from_bb; bb < current_nr_blocks; bb++)
2793 rtx head, tail;
2795 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2796 fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
2797 BB_TO_BLOCK (bb), bb);
2799 debug_dependencies (head, tail);
2803 /* Print dependencies information for instructions between HEAD and TAIL.
2804 ??? This function would probably fit best in haifa-sched.c. */
2805 void debug_dependencies (rtx head, rtx tail)
2807 rtx insn;
2808 rtx next_tail = NEXT_INSN (tail);
2810 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2811 "insn", "code", "bb", "dep", "prio", "cost",
2812 "reservation");
2813 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2814 "----", "----", "--", "---", "----", "----",
2815 "-----------");
2817 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2819 if (! INSN_P (insn))
2821 int n;
2822 fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
2823 if (NOTE_P (insn))
2825 n = NOTE_KIND (insn);
2826 fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
2828 else
2829 fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
2830 continue;
2833 fprintf (sched_dump,
2834 ";; %s%5d%6d%6d%6d%6d%6d ",
2835 (SCHED_GROUP_P (insn) ? "+" : " "),
2836 INSN_UID (insn),
2837 INSN_CODE (insn),
2838 BLOCK_NUM (insn),
2839 sched_emulate_haifa_p ? -1 : sd_lists_size (insn, SD_LIST_BACK),
2840 (sel_sched_p () ? (sched_emulate_haifa_p ? -1
2841 : INSN_PRIORITY (insn))
2842 : INSN_PRIORITY (insn)),
2843 (sel_sched_p () ? (sched_emulate_haifa_p ? -1
2844 : insn_cost (insn))
2845 : insn_cost (insn)));
2847 if (recog_memoized (insn) < 0)
2848 fprintf (sched_dump, "nothing");
2849 else
2850 print_reservation (sched_dump, insn);
2852 fprintf (sched_dump, "\t: ");
2854 sd_iterator_def sd_it;
2855 dep_t dep;
2857 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
2858 fprintf (sched_dump, "%d%s%s ", INSN_UID (DEP_CON (dep)),
2859 DEP_NONREG (dep) ? "n" : "",
2860 DEP_MULTIPLE (dep) ? "m" : "");
2862 fprintf (sched_dump, "\n");
2865 fprintf (sched_dump, "\n");
2868 /* Returns true if all the basic blocks of the current region have
2869 NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region. */
2870 bool
2871 sched_is_disabled_for_current_region_p (void)
2873 int bb;
2875 for (bb = 0; bb < current_nr_blocks; bb++)
2876 if (!(BASIC_BLOCK_FOR_FN (cfun,
2877 BB_TO_BLOCK (bb))->flags & BB_DISABLE_SCHEDULE))
2878 return false;
2880 return true;
2883 /* Free all region dependencies saved in INSN_BACK_DEPS and
2884 INSN_RESOLVED_BACK_DEPS. The Haifa scheduler does this on the fly
2885 when scheduling, so this function is supposed to be called from
2886 the selective scheduling only. */
2887 void
2888 free_rgn_deps (void)
2890 int bb;
2892 for (bb = 0; bb < current_nr_blocks; bb++)
2894 rtx head, tail;
2896 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2897 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2899 sched_free_deps (head, tail, false);
2903 static int rgn_n_insns;
2905 /* Compute insn priority for a current region. */
2906 void
2907 compute_priorities (void)
2909 int bb;
2911 current_sched_info->sched_max_insns_priority = 0;
2912 for (bb = 0; bb < current_nr_blocks; bb++)
2914 rtx head, tail;
2916 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2917 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2919 if (no_real_insns_p (head, tail))
2920 continue;
2922 rgn_n_insns += set_priorities (head, tail);
2924 current_sched_info->sched_max_insns_priority++;
2927 /* (Re-)initialize the arrays of DFA states at the end of each basic block.
2929 SAVED_LAST_BASIC_BLOCK is the previous length of the arrays. It must be
2930 zero for the first call to this function, to allocate the arrays for the
2931 first time.
2933 This function is called once during initialization of the scheduler, and
2934 called again to resize the arrays if new basic blocks have been created,
2935 for example for speculation recovery code. */
2937 static void
2938 realloc_bb_state_array (int saved_last_basic_block)
2940 char *old_bb_state_array = bb_state_array;
2941 size_t lbb = (size_t) last_basic_block_for_fn (cfun);
2942 size_t slbb = (size_t) saved_last_basic_block;
2944 /* Nothing to do if nothing changed since the last time this was called. */
2945 if (saved_last_basic_block == last_basic_block_for_fn (cfun))
2946 return;
2948 /* The selective scheduler doesn't use the state arrays. */
2949 if (sel_sched_p ())
2951 gcc_assert (bb_state_array == NULL && bb_state == NULL);
2952 return;
2955 gcc_checking_assert (saved_last_basic_block == 0
2956 || (bb_state_array != NULL && bb_state != NULL));
2958 bb_state_array = XRESIZEVEC (char, bb_state_array, lbb * dfa_state_size);
2959 bb_state = XRESIZEVEC (state_t, bb_state, lbb);
2961 /* If BB_STATE_ARRAY has moved, fixup all the state pointers array.
2962 Otherwise only fixup the newly allocated ones. For the state
2963 array itself, only initialize the new entries. */
2964 bool bb_state_array_moved = (bb_state_array != old_bb_state_array);
2965 for (size_t i = bb_state_array_moved ? 0 : slbb; i < lbb; i++)
2966 bb_state[i] = (state_t) (bb_state_array + i * dfa_state_size);
2967 for (size_t i = slbb; i < lbb; i++)
2968 state_reset (bb_state[i]);
2971 /* Free the arrays of DFA states at the end of each basic block. */
2973 static void
2974 free_bb_state_array (void)
2976 free (bb_state_array);
2977 free (bb_state);
2978 bb_state_array = NULL;
2979 bb_state = NULL;
2982 /* Schedule a region. A region is either an inner loop, a loop-free
2983 subroutine, or a single basic block. Each bb in the region is
2984 scheduled after its flow predecessors. */
2986 static void
2987 schedule_region (int rgn)
2989 int bb;
2990 int sched_rgn_n_insns = 0;
2992 rgn_n_insns = 0;
2994 rgn_setup_region (rgn);
2996 /* Don't schedule region that is marked by
2997 NOTE_DISABLE_SCHED_OF_BLOCK. */
2998 if (sched_is_disabled_for_current_region_p ())
2999 return;
3001 sched_rgn_compute_dependencies (rgn);
3003 sched_rgn_local_init (rgn);
3005 /* Set priorities. */
3006 compute_priorities ();
3008 sched_extend_ready_list (rgn_n_insns);
3010 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
3012 sched_init_region_reg_pressure_info ();
3013 for (bb = 0; bb < current_nr_blocks; bb++)
3015 basic_block first_bb, last_bb;
3016 rtx head, tail;
3018 first_bb = EBB_FIRST_BB (bb);
3019 last_bb = EBB_LAST_BB (bb);
3021 get_ebb_head_tail (first_bb, last_bb, &head, &tail);
3023 if (no_real_insns_p (head, tail))
3025 gcc_assert (first_bb == last_bb);
3026 continue;
3028 sched_setup_bb_reg_pressure_info (first_bb, PREV_INSN (head));
3032 /* Now we can schedule all blocks. */
3033 for (bb = 0; bb < current_nr_blocks; bb++)
3035 basic_block first_bb, last_bb, curr_bb;
3036 rtx head, tail;
3038 first_bb = EBB_FIRST_BB (bb);
3039 last_bb = EBB_LAST_BB (bb);
3041 get_ebb_head_tail (first_bb, last_bb, &head, &tail);
3043 if (no_real_insns_p (head, tail))
3045 gcc_assert (first_bb == last_bb);
3046 continue;
3049 current_sched_info->prev_head = PREV_INSN (head);
3050 current_sched_info->next_tail = NEXT_INSN (tail);
3052 remove_notes (head, tail);
3054 unlink_bb_notes (first_bb, last_bb);
3056 target_bb = bb;
3058 gcc_assert (flag_schedule_interblock || current_nr_blocks == 1);
3059 current_sched_info->queue_must_finish_empty = current_nr_blocks == 1;
3061 curr_bb = first_bb;
3062 if (dbg_cnt (sched_block))
3064 edge f;
3065 int saved_last_basic_block = last_basic_block_for_fn (cfun);
3067 schedule_block (&curr_bb, bb_state[first_bb->index]);
3068 gcc_assert (EBB_FIRST_BB (bb) == first_bb);
3069 sched_rgn_n_insns += sched_n_insns;
3070 realloc_bb_state_array (saved_last_basic_block);
3071 f = find_fallthru_edge (last_bb->succs);
3072 if (f && f->probability * 100 / REG_BR_PROB_BASE >=
3073 PARAM_VALUE (PARAM_SCHED_STATE_EDGE_PROB_CUTOFF))
3075 memcpy (bb_state[f->dest->index], curr_state,
3076 dfa_state_size);
3077 if (sched_verbose >= 5)
3078 fprintf (sched_dump, "saving state for edge %d->%d\n",
3079 f->src->index, f->dest->index);
3082 else
3084 sched_rgn_n_insns += rgn_n_insns;
3087 /* Clean up. */
3088 if (current_nr_blocks > 1)
3089 free_trg_info ();
3092 /* Sanity check: verify that all region insns were scheduled. */
3093 gcc_assert (sched_rgn_n_insns == rgn_n_insns);
3095 sched_finish_ready_list ();
3097 /* Done with this region. */
3098 sched_rgn_local_finish ();
3100 /* Free dependencies. */
3101 for (bb = 0; bb < current_nr_blocks; ++bb)
3102 free_block_dependencies (bb);
3104 gcc_assert (haifa_recovery_bb_ever_added_p
3105 || deps_pools_are_empty_p ());
3108 /* Initialize data structures for region scheduling. */
3110 void
3111 sched_rgn_init (bool single_blocks_p)
3113 min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
3114 / 100);
3116 nr_inter = 0;
3117 nr_spec = 0;
3119 extend_regions ();
3121 CONTAINING_RGN (ENTRY_BLOCK) = -1;
3122 CONTAINING_RGN (EXIT_BLOCK) = -1;
3124 realloc_bb_state_array (0);
3126 /* Compute regions for scheduling. */
3127 if (single_blocks_p
3128 || n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS + 1
3129 || !flag_schedule_interblock
3130 || is_cfg_nonregular ())
3132 find_single_block_region (sel_sched_p ());
3134 else
3136 /* Compute the dominators and post dominators. */
3137 if (!sel_sched_p ())
3138 calculate_dominance_info (CDI_DOMINATORS);
3140 /* Find regions. */
3141 find_rgns ();
3143 if (sched_verbose >= 3)
3144 debug_regions ();
3146 /* For now. This will move as more and more of haifa is converted
3147 to using the cfg code. */
3148 if (!sel_sched_p ())
3149 free_dominance_info (CDI_DOMINATORS);
3152 gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks_for_fn (cfun));
3154 RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1) +
3155 RGN_NR_BLOCKS (nr_regions - 1));
3158 /* Free data structures for region scheduling. */
3159 void
3160 sched_rgn_finish (void)
3162 free_bb_state_array ();
3164 /* Reposition the prologue and epilogue notes in case we moved the
3165 prologue/epilogue insns. */
3166 if (reload_completed)
3167 reposition_prologue_and_epilogue_notes ();
3169 if (sched_verbose)
3171 if (reload_completed == 0
3172 && flag_schedule_interblock)
3174 fprintf (sched_dump,
3175 "\n;; Procedure interblock/speculative motions == %d/%d \n",
3176 nr_inter, nr_spec);
3178 else
3179 gcc_assert (nr_inter <= 0);
3180 fprintf (sched_dump, "\n\n");
3183 nr_regions = 0;
3185 free (rgn_table);
3186 rgn_table = NULL;
3188 free (rgn_bb_table);
3189 rgn_bb_table = NULL;
3191 free (block_to_bb);
3192 block_to_bb = NULL;
3194 free (containing_rgn);
3195 containing_rgn = NULL;
3197 free (ebb_head);
3198 ebb_head = NULL;
3201 /* Setup global variables like CURRENT_BLOCKS and CURRENT_NR_BLOCK to
3202 point to the region RGN. */
3203 void
3204 rgn_setup_region (int rgn)
3206 int bb;
3208 /* Set variables for the current region. */
3209 current_nr_blocks = RGN_NR_BLOCKS (rgn);
3210 current_blocks = RGN_BLOCKS (rgn);
3212 /* EBB_HEAD is a region-scope structure. But we realloc it for
3213 each region to save time/memory/something else.
3214 See comments in add_block1, for what reasons we allocate +1 element. */
3215 ebb_head = XRESIZEVEC (int, ebb_head, current_nr_blocks + 1);
3216 for (bb = 0; bb <= current_nr_blocks; bb++)
3217 ebb_head[bb] = current_blocks + bb;
3220 /* Compute instruction dependencies in region RGN. */
3221 void
3222 sched_rgn_compute_dependencies (int rgn)
3224 if (!RGN_DONT_CALC_DEPS (rgn))
3226 int bb;
3228 if (sel_sched_p ())
3229 sched_emulate_haifa_p = 1;
3231 init_deps_global ();
3233 /* Initializations for region data dependence analysis. */
3234 bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks);
3235 for (bb = 0; bb < current_nr_blocks; bb++)
3236 init_deps (bb_deps + bb, false);
3238 /* Initialize bitmap used in add_branch_dependences. */
3239 insn_referenced = sbitmap_alloc (sched_max_luid);
3240 bitmap_clear (insn_referenced);
3242 /* Compute backward dependencies. */
3243 for (bb = 0; bb < current_nr_blocks; bb++)
3244 compute_block_dependences (bb);
3246 sbitmap_free (insn_referenced);
3247 free_pending_lists ();
3248 finish_deps_global ();
3249 free (bb_deps);
3251 /* We don't want to recalculate this twice. */
3252 RGN_DONT_CALC_DEPS (rgn) = 1;
3254 if (sel_sched_p ())
3255 sched_emulate_haifa_p = 0;
3257 else
3258 /* (This is a recovery block. It is always a single block region.)
3259 OR (We use selective scheduling.) */
3260 gcc_assert (current_nr_blocks == 1 || sel_sched_p ());
3263 /* Init region data structures. Returns true if this region should
3264 not be scheduled. */
3265 void
3266 sched_rgn_local_init (int rgn)
3268 int bb;
3270 /* Compute interblock info: probabilities, split-edges, dominators, etc. */
3271 if (current_nr_blocks > 1)
3273 basic_block block;
3274 edge e;
3275 edge_iterator ei;
3277 prob = XNEWVEC (int, current_nr_blocks);
3279 dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
3280 bitmap_vector_clear (dom, current_nr_blocks);
3282 /* Use ->aux to implement EDGE_TO_BIT mapping. */
3283 rgn_nr_edges = 0;
3284 FOR_EACH_BB_FN (block, cfun)
3286 if (CONTAINING_RGN (block->index) != rgn)
3287 continue;
3288 FOR_EACH_EDGE (e, ei, block->succs)
3289 SET_EDGE_TO_BIT (e, rgn_nr_edges++);
3292 rgn_edges = XNEWVEC (edge, rgn_nr_edges);
3293 rgn_nr_edges = 0;
3294 FOR_EACH_BB_FN (block, cfun)
3296 if (CONTAINING_RGN (block->index) != rgn)
3297 continue;
3298 FOR_EACH_EDGE (e, ei, block->succs)
3299 rgn_edges[rgn_nr_edges++] = e;
3302 /* Split edges. */
3303 pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
3304 bitmap_vector_clear (pot_split, current_nr_blocks);
3305 ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
3306 bitmap_vector_clear (ancestor_edges, current_nr_blocks);
3308 /* Compute probabilities, dominators, split_edges. */
3309 for (bb = 0; bb < current_nr_blocks; bb++)
3310 compute_dom_prob_ps (bb);
3312 /* Cleanup ->aux used for EDGE_TO_BIT mapping. */
3313 /* We don't need them anymore. But we want to avoid duplication of
3314 aux fields in the newly created edges. */
3315 FOR_EACH_BB_FN (block, cfun)
3317 if (CONTAINING_RGN (block->index) != rgn)
3318 continue;
3319 FOR_EACH_EDGE (e, ei, block->succs)
3320 e->aux = NULL;
3325 /* Free data computed for the finished region. */
3326 void
3327 sched_rgn_local_free (void)
3329 free (prob);
3330 sbitmap_vector_free (dom);
3331 sbitmap_vector_free (pot_split);
3332 sbitmap_vector_free (ancestor_edges);
3333 free (rgn_edges);
3336 /* Free data computed for the finished region. */
3337 void
3338 sched_rgn_local_finish (void)
3340 if (current_nr_blocks > 1 && !sel_sched_p ())
3342 sched_rgn_local_free ();
3346 /* Setup scheduler infos. */
3347 void
3348 rgn_setup_common_sched_info (void)
3350 memcpy (&rgn_common_sched_info, &haifa_common_sched_info,
3351 sizeof (rgn_common_sched_info));
3353 rgn_common_sched_info.fix_recovery_cfg = rgn_fix_recovery_cfg;
3354 rgn_common_sched_info.add_block = rgn_add_block;
3355 rgn_common_sched_info.estimate_number_of_insns
3356 = rgn_estimate_number_of_insns;
3357 rgn_common_sched_info.sched_pass_id = SCHED_RGN_PASS;
3359 common_sched_info = &rgn_common_sched_info;
3362 /* Setup all *_sched_info structures (for the Haifa frontend
3363 and for the dependence analysis) in the interblock scheduler. */
3364 void
3365 rgn_setup_sched_infos (void)
3367 if (!sel_sched_p ())
3368 memcpy (&rgn_sched_deps_info, &rgn_const_sched_deps_info,
3369 sizeof (rgn_sched_deps_info));
3370 else
3371 memcpy (&rgn_sched_deps_info, &rgn_const_sel_sched_deps_info,
3372 sizeof (rgn_sched_deps_info));
3374 sched_deps_info = &rgn_sched_deps_info;
3376 memcpy (&rgn_sched_info, &rgn_const_sched_info, sizeof (rgn_sched_info));
3377 current_sched_info = &rgn_sched_info;
3380 /* The one entry point in this file. */
3381 void
3382 schedule_insns (void)
3384 int rgn;
3386 /* Taking care of this degenerate case makes the rest of
3387 this code simpler. */
3388 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
3389 return;
3391 rgn_setup_common_sched_info ();
3392 rgn_setup_sched_infos ();
3394 haifa_sched_init ();
3395 sched_rgn_init (reload_completed);
3397 bitmap_initialize (&not_in_df, 0);
3398 bitmap_clear (&not_in_df);
3400 /* Schedule every region in the subroutine. */
3401 for (rgn = 0; rgn < nr_regions; rgn++)
3402 if (dbg_cnt (sched_region))
3403 schedule_region (rgn);
3405 /* Clean up. */
3406 sched_rgn_finish ();
3407 bitmap_clear (&not_in_df);
3409 haifa_sched_finish ();
3412 /* INSN has been added to/removed from current region. */
3413 static void
3414 rgn_add_remove_insn (rtx insn, int remove_p)
3416 if (!remove_p)
3417 rgn_n_insns++;
3418 else
3419 rgn_n_insns--;
3421 if (INSN_BB (insn) == target_bb)
3423 if (!remove_p)
3424 target_n_insns++;
3425 else
3426 target_n_insns--;
3430 /* Extend internal data structures. */
3431 void
3432 extend_regions (void)
3434 rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks_for_fn (cfun));
3435 rgn_bb_table = XRESIZEVEC (int, rgn_bb_table,
3436 n_basic_blocks_for_fn (cfun));
3437 block_to_bb = XRESIZEVEC (int, block_to_bb,
3438 last_basic_block_for_fn (cfun));
3439 containing_rgn = XRESIZEVEC (int, containing_rgn,
3440 last_basic_block_for_fn (cfun));
3443 void
3444 rgn_make_new_region_out_of_new_block (basic_block bb)
3446 int i;
3448 i = RGN_BLOCKS (nr_regions);
3449 /* I - first free position in rgn_bb_table. */
3451 rgn_bb_table[i] = bb->index;
3452 RGN_NR_BLOCKS (nr_regions) = 1;
3453 RGN_HAS_REAL_EBB (nr_regions) = 0;
3454 RGN_DONT_CALC_DEPS (nr_regions) = 0;
3455 CONTAINING_RGN (bb->index) = nr_regions;
3456 BLOCK_TO_BB (bb->index) = 0;
3458 nr_regions++;
3460 RGN_BLOCKS (nr_regions) = i + 1;
3463 /* BB was added to ebb after AFTER. */
3464 static void
3465 rgn_add_block (basic_block bb, basic_block after)
3467 extend_regions ();
3468 bitmap_set_bit (&not_in_df, bb->index);
3470 if (after == 0 || after == EXIT_BLOCK_PTR_FOR_FN (cfun))
3472 rgn_make_new_region_out_of_new_block (bb);
3473 RGN_DONT_CALC_DEPS (nr_regions - 1) = (after
3474 == EXIT_BLOCK_PTR_FOR_FN (cfun));
3476 else
3478 int i, pos;
3480 /* We need to fix rgn_table, block_to_bb, containing_rgn
3481 and ebb_head. */
3483 BLOCK_TO_BB (bb->index) = BLOCK_TO_BB (after->index);
3485 /* We extend ebb_head to one more position to
3486 easily find the last position of the last ebb in
3487 the current region. Thus, ebb_head[BLOCK_TO_BB (after) + 1]
3488 is _always_ valid for access. */
3490 i = BLOCK_TO_BB (after->index) + 1;
3491 pos = ebb_head[i] - 1;
3492 /* Now POS is the index of the last block in the region. */
3494 /* Find index of basic block AFTER. */
3495 for (; rgn_bb_table[pos] != after->index; pos--)
3498 pos++;
3499 gcc_assert (pos > ebb_head[i - 1]);
3501 /* i - ebb right after "AFTER". */
3502 /* ebb_head[i] - VALID. */
3504 /* Source position: ebb_head[i]
3505 Destination position: ebb_head[i] + 1
3506 Last position:
3507 RGN_BLOCKS (nr_regions) - 1
3508 Number of elements to copy: (last_position) - (source_position) + 1
3511 memmove (rgn_bb_table + pos + 1,
3512 rgn_bb_table + pos,
3513 ((RGN_BLOCKS (nr_regions) - 1) - (pos) + 1)
3514 * sizeof (*rgn_bb_table));
3516 rgn_bb_table[pos] = bb->index;
3518 for (; i <= current_nr_blocks; i++)
3519 ebb_head [i]++;
3521 i = CONTAINING_RGN (after->index);
3522 CONTAINING_RGN (bb->index) = i;
3524 RGN_HAS_REAL_EBB (i) = 1;
3526 for (++i; i <= nr_regions; i++)
3527 RGN_BLOCKS (i)++;
3531 /* Fix internal data after interblock movement of jump instruction.
3532 For parameter meaning please refer to
3533 sched-int.h: struct sched_info: fix_recovery_cfg. */
3534 static void
3535 rgn_fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti)
3537 int old_pos, new_pos, i;
3539 BLOCK_TO_BB (check_bb_nexti) = BLOCK_TO_BB (bbi);
3541 for (old_pos = ebb_head[BLOCK_TO_BB (check_bbi) + 1] - 1;
3542 rgn_bb_table[old_pos] != check_bb_nexti;
3543 old_pos--)
3545 gcc_assert (old_pos > ebb_head[BLOCK_TO_BB (check_bbi)]);
3547 for (new_pos = ebb_head[BLOCK_TO_BB (bbi) + 1] - 1;
3548 rgn_bb_table[new_pos] != bbi;
3549 new_pos--)
3551 new_pos++;
3552 gcc_assert (new_pos > ebb_head[BLOCK_TO_BB (bbi)]);
3554 gcc_assert (new_pos < old_pos);
3556 memmove (rgn_bb_table + new_pos + 1,
3557 rgn_bb_table + new_pos,
3558 (old_pos - new_pos) * sizeof (*rgn_bb_table));
3560 rgn_bb_table[new_pos] = check_bb_nexti;
3562 for (i = BLOCK_TO_BB (bbi) + 1; i <= BLOCK_TO_BB (check_bbi); i++)
3563 ebb_head[i]++;
3566 /* Return next block in ebb chain. For parameter meaning please refer to
3567 sched-int.h: struct sched_info: advance_target_bb. */
3568 static basic_block
3569 advance_target_bb (basic_block bb, rtx insn)
3571 if (insn)
3572 return 0;
3574 gcc_assert (BLOCK_TO_BB (bb->index) == target_bb
3575 && BLOCK_TO_BB (bb->next_bb->index) == target_bb);
3576 return bb->next_bb;
3579 #endif
3581 static bool
3582 gate_handle_live_range_shrinkage (void)
3584 #ifdef INSN_SCHEDULING
3585 return flag_live_range_shrinkage;
3586 #else
3587 return 0;
3588 #endif
3591 /* Run instruction scheduler. */
3592 static unsigned int
3593 rest_of_handle_live_range_shrinkage (void)
3595 #ifdef INSN_SCHEDULING
3596 int saved;
3598 initialize_live_range_shrinkage ();
3599 saved = flag_schedule_interblock;
3600 flag_schedule_interblock = false;
3601 schedule_insns ();
3602 flag_schedule_interblock = saved;
3603 finish_live_range_shrinkage ();
3604 #endif
3605 return 0;
3608 static bool
3609 gate_handle_sched (void)
3611 #ifdef INSN_SCHEDULING
3612 return optimize > 0 && flag_schedule_insns && dbg_cnt (sched_func);
3613 #else
3614 return 0;
3615 #endif
3618 /* Run instruction scheduler. */
3619 static unsigned int
3620 rest_of_handle_sched (void)
3622 #ifdef INSN_SCHEDULING
3623 if (flag_selective_scheduling
3624 && ! maybe_skip_selective_scheduling ())
3625 run_selective_scheduling ();
3626 else
3627 schedule_insns ();
3628 #endif
3629 return 0;
3632 static bool
3633 gate_handle_sched2 (void)
3635 #ifdef INSN_SCHEDULING
3636 return optimize > 0 && flag_schedule_insns_after_reload
3637 && !targetm.delay_sched2 && dbg_cnt (sched2_func);
3638 #else
3639 return 0;
3640 #endif
3643 /* Run second scheduling pass after reload. */
3644 static unsigned int
3645 rest_of_handle_sched2 (void)
3647 #ifdef INSN_SCHEDULING
3648 if (flag_selective_scheduling2
3649 && ! maybe_skip_selective_scheduling ())
3650 run_selective_scheduling ();
3651 else
3653 /* Do control and data sched analysis again,
3654 and write some more of the results to dump file. */
3655 if (flag_sched2_use_superblocks)
3656 schedule_ebbs ();
3657 else
3658 schedule_insns ();
3660 #endif
3661 return 0;
3664 namespace {
3666 const pass_data pass_data_live_range_shrinkage =
3668 RTL_PASS, /* type */
3669 "lr_shrinkage", /* name */
3670 OPTGROUP_NONE, /* optinfo_flags */
3671 true, /* has_gate */
3672 true, /* has_execute */
3673 TV_LIVE_RANGE_SHRINKAGE, /* tv_id */
3674 0, /* properties_required */
3675 0, /* properties_provided */
3676 0, /* properties_destroyed */
3677 0, /* todo_flags_start */
3678 ( TODO_df_finish | TODO_verify_rtl_sharing
3679 | TODO_verify_flow ), /* todo_flags_finish */
3682 class pass_live_range_shrinkage : public rtl_opt_pass
3684 public:
3685 pass_live_range_shrinkage(gcc::context *ctxt)
3686 : rtl_opt_pass(pass_data_live_range_shrinkage, ctxt)
3689 /* opt_pass methods: */
3690 bool gate () { return gate_handle_live_range_shrinkage (); }
3691 unsigned int execute () { return rest_of_handle_live_range_shrinkage (); }
3693 }; // class pass_live_range_shrinkage
3695 } // anon namespace
3697 rtl_opt_pass *
3698 make_pass_live_range_shrinkage (gcc::context *ctxt)
3700 return new pass_live_range_shrinkage (ctxt);
3703 namespace {
3705 const pass_data pass_data_sched =
3707 RTL_PASS, /* type */
3708 "sched1", /* name */
3709 OPTGROUP_NONE, /* optinfo_flags */
3710 true, /* has_gate */
3711 true, /* has_execute */
3712 TV_SCHED, /* tv_id */
3713 0, /* properties_required */
3714 0, /* properties_provided */
3715 0, /* properties_destroyed */
3716 0, /* todo_flags_start */
3717 ( TODO_df_finish | TODO_verify_rtl_sharing
3718 | TODO_verify_flow ), /* todo_flags_finish */
3721 class pass_sched : public rtl_opt_pass
3723 public:
3724 pass_sched (gcc::context *ctxt)
3725 : rtl_opt_pass (pass_data_sched, ctxt)
3728 /* opt_pass methods: */
3729 bool gate () { return gate_handle_sched (); }
3730 unsigned int execute () { return rest_of_handle_sched (); }
3732 }; // class pass_sched
3734 } // anon namespace
3736 rtl_opt_pass *
3737 make_pass_sched (gcc::context *ctxt)
3739 return new pass_sched (ctxt);
3742 namespace {
3744 const pass_data pass_data_sched2 =
3746 RTL_PASS, /* type */
3747 "sched2", /* name */
3748 OPTGROUP_NONE, /* optinfo_flags */
3749 true, /* has_gate */
3750 true, /* has_execute */
3751 TV_SCHED2, /* tv_id */
3752 0, /* properties_required */
3753 0, /* properties_provided */
3754 0, /* properties_destroyed */
3755 0, /* todo_flags_start */
3756 ( TODO_df_finish | TODO_verify_rtl_sharing
3757 | TODO_verify_flow ), /* todo_flags_finish */
3760 class pass_sched2 : public rtl_opt_pass
3762 public:
3763 pass_sched2 (gcc::context *ctxt)
3764 : rtl_opt_pass (pass_data_sched2, ctxt)
3767 /* opt_pass methods: */
3768 bool gate () { return gate_handle_sched2 (); }
3769 unsigned int execute () { return rest_of_handle_sched2 (); }
3771 }; // class pass_sched2
3773 } // anon namespace
3775 rtl_opt_pass *
3776 make_pass_sched2 (gcc::context *ctxt)
3778 return new pass_sched2 (ctxt);