Mark ChangeLog
[official-gcc.git] / gcc / sched-rgn.c
blobae1bb36eda716245c339a12b58ddb1342a524454
1 /* Instruction scheduling pass.
2 Copyright (C) 1992-2013 Free Software Foundation, Inc.
3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* This pass implements list scheduling within basic blocks. It is
23 run twice: (1) after flow analysis, but before register allocation,
24 and (2) after register allocation.
26 The first run performs interblock scheduling, moving insns between
27 different blocks in the same "region", and the second runs only
28 basic block scheduling.
30 Interblock motions performed are useful motions and speculative
31 motions, including speculative loads. Motions requiring code
32 duplication are not supported. The identification of motion type
33 and the check for validity of speculative motions requires
34 construction and analysis of the function's control flow graph.
36 The main entry point for this pass is schedule_insns(), called for
37 each function. The work of the scheduler is organized in three
38 levels: (1) function level: insns are subject to splitting,
39 control-flow-graph is constructed, regions are computed (after
40 reload, each region is of one block), (2) region level: control
41 flow graph attributes required for interblock scheduling are
42 computed (dominators, reachability, etc.), data dependences and
43 priorities are computed, and (3) block level: insns in the block
44 are actually scheduled. */
46 #include "config.h"
47 #include "system.h"
48 #include "coretypes.h"
49 #include "tm.h"
50 #include "diagnostic-core.h"
51 #include "rtl.h"
52 #include "tm_p.h"
53 #include "hard-reg-set.h"
54 #include "regs.h"
55 #include "function.h"
56 #include "flags.h"
57 #include "insn-config.h"
58 #include "insn-attr.h"
59 #include "except.h"
60 #include "recog.h"
61 #include "params.h"
62 #include "sched-int.h"
63 #include "sel-sched.h"
64 #include "target.h"
65 #include "tree-pass.h"
66 #include "dbgcnt.h"
68 #ifdef INSN_SCHEDULING
70 /* Some accessor macros for h_i_d members only used within this file. */
71 #define FED_BY_SPEC_LOAD(INSN) (HID (INSN)->fed_by_spec_load)
72 #define IS_LOAD_INSN(INSN) (HID (insn)->is_load_insn)
74 /* nr_inter/spec counts interblock/speculative motion for the function. */
75 static int nr_inter, nr_spec;
77 static int is_cfg_nonregular (void);
79 /* Number of regions in the procedure. */
80 int nr_regions = 0;
82 /* Table of region descriptions. */
83 region *rgn_table = NULL;
85 /* Array of lists of regions' blocks. */
86 int *rgn_bb_table = NULL;
88 /* Topological order of blocks in the region (if b2 is reachable from
89 b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is
90 always referred to by either block or b, while its topological
91 order name (in the region) is referred to by bb. */
92 int *block_to_bb = NULL;
94 /* The number of the region containing a block. */
95 int *containing_rgn = NULL;
97 /* ebb_head [i] - is index in rgn_bb_table of the head basic block of i'th ebb.
98 Currently we can get a ebb only through splitting of currently
99 scheduling block, therefore, we don't need ebb_head array for every region,
100 hence, its sufficient to hold it for current one only. */
101 int *ebb_head = NULL;
103 /* The minimum probability of reaching a source block so that it will be
104 considered for speculative scheduling. */
105 static int min_spec_prob;
107 static void find_single_block_region (bool);
108 static void find_rgns (void);
109 static bool too_large (int, int *, int *);
111 /* Blocks of the current region being scheduled. */
112 int current_nr_blocks;
113 int current_blocks;
115 /* A speculative motion requires checking live information on the path
116 from 'source' to 'target'. The split blocks are those to be checked.
117 After a speculative motion, live information should be modified in
118 the 'update' blocks.
120 Lists of split and update blocks for each candidate of the current
121 target are in array bblst_table. */
122 static basic_block *bblst_table;
123 static int bblst_size, bblst_last;
125 /* Arrays that hold the DFA state at the end of a basic block, to re-use
126 as the initial state at the start of successor blocks. The BB_STATE
127 array holds the actual DFA state, and BB_STATE_ARRAY[I] is a pointer
128 into BB_STATE for basic block I. FIXME: This should be a vec. */
129 static char *bb_state_array = NULL;
130 static state_t *bb_state = NULL;
132 /* Target info declarations.
134 The block currently being scheduled is referred to as the "target" block,
135 while other blocks in the region from which insns can be moved to the
136 target are called "source" blocks. The candidate structure holds info
137 about such sources: are they valid? Speculative? Etc. */
138 typedef struct
140 basic_block *first_member;
141 int nr_members;
143 bblst;
145 typedef struct
147 char is_valid;
148 char is_speculative;
149 int src_prob;
150 bblst split_bbs;
151 bblst update_bbs;
153 candidate;
155 static candidate *candidate_table;
156 #define IS_VALID(src) (candidate_table[src].is_valid)
157 #define IS_SPECULATIVE(src) (candidate_table[src].is_speculative)
158 #define IS_SPECULATIVE_INSN(INSN) \
159 (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
160 #define SRC_PROB(src) ( candidate_table[src].src_prob )
162 /* The bb being currently scheduled. */
163 int target_bb;
165 /* List of edges. */
166 typedef struct
168 edge *first_member;
169 int nr_members;
171 edgelst;
173 static edge *edgelst_table;
174 static int edgelst_last;
176 static void extract_edgelst (sbitmap, edgelst *);
178 /* Target info functions. */
179 static void split_edges (int, int, edgelst *);
180 static void compute_trg_info (int);
181 void debug_candidate (int);
182 void debug_candidates (int);
184 /* Dominators array: dom[i] contains the sbitmap of dominators of
185 bb i in the region. */
186 static sbitmap *dom;
188 /* bb 0 is the only region entry. */
189 #define IS_RGN_ENTRY(bb) (!bb)
191 /* Is bb_src dominated by bb_trg. */
192 #define IS_DOMINATED(bb_src, bb_trg) \
193 ( bitmap_bit_p (dom[bb_src], bb_trg) )
195 /* Probability: Prob[i] is an int in [0, REG_BR_PROB_BASE] which is
196 the probability of bb i relative to the region entry. */
197 static int *prob;
199 /* Bit-set of edges, where bit i stands for edge i. */
200 typedef sbitmap edgeset;
202 /* Number of edges in the region. */
203 static int rgn_nr_edges;
205 /* Array of size rgn_nr_edges. */
206 static edge *rgn_edges;
208 /* Mapping from each edge in the graph to its number in the rgn. */
209 #define EDGE_TO_BIT(edge) ((int)(size_t)(edge)->aux)
210 #define SET_EDGE_TO_BIT(edge,nr) ((edge)->aux = (void *)(size_t)(nr))
212 /* The split edges of a source bb is different for each target
213 bb. In order to compute this efficiently, the 'potential-split edges'
214 are computed for each bb prior to scheduling a region. This is actually
215 the split edges of each bb relative to the region entry.
217 pot_split[bb] is the set of potential split edges of bb. */
218 static edgeset *pot_split;
220 /* For every bb, a set of its ancestor edges. */
221 static edgeset *ancestor_edges;
223 #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
225 /* Speculative scheduling functions. */
226 static int check_live_1 (int, rtx);
227 static void update_live_1 (int, rtx);
228 static int is_pfree (rtx, int, int);
229 static int find_conditional_protection (rtx, int);
230 static int is_conditionally_protected (rtx, int, int);
231 static int is_prisky (rtx, int, int);
232 static int is_exception_free (rtx, int, int);
234 static bool sets_likely_spilled (rtx);
235 static void sets_likely_spilled_1 (rtx, const_rtx, void *);
236 static void add_branch_dependences (rtx, rtx);
237 static void compute_block_dependences (int);
239 static void schedule_region (int);
240 static void concat_insn_mem_list (rtx, rtx, rtx *, rtx *);
241 static void propagate_deps (int, struct deps_desc *);
242 static void free_pending_lists (void);
244 /* Functions for construction of the control flow graph. */
246 /* Return 1 if control flow graph should not be constructed, 0 otherwise.
248 We decide not to build the control flow graph if there is possibly more
249 than one entry to the function, if computed branches exist, if we
250 have nonlocal gotos, or if we have an unreachable loop. */
252 static int
253 is_cfg_nonregular (void)
255 basic_block b;
256 rtx insn;
258 /* If we have a label that could be the target of a nonlocal goto, then
259 the cfg is not well structured. */
260 if (nonlocal_goto_handler_labels)
261 return 1;
263 /* If we have any forced labels, then the cfg is not well structured. */
264 if (forced_labels)
265 return 1;
267 /* If we have exception handlers, then we consider the cfg not well
268 structured. ?!? We should be able to handle this now that we
269 compute an accurate cfg for EH. */
270 if (current_function_has_exception_handlers ())
271 return 1;
273 /* If we have insns which refer to labels as non-jumped-to operands,
274 then we consider the cfg not well structured. */
275 FOR_EACH_BB (b)
276 FOR_BB_INSNS (b, insn)
278 rtx note, next, set, dest;
280 /* If this function has a computed jump, then we consider the cfg
281 not well structured. */
282 if (JUMP_P (insn) && computed_jump_p (insn))
283 return 1;
285 if (!INSN_P (insn))
286 continue;
288 note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX);
289 if (note == NULL_RTX)
290 continue;
292 /* For that label not to be seen as a referred-to label, this
293 must be a single-set which is feeding a jump *only*. This
294 could be a conditional jump with the label split off for
295 machine-specific reasons or a casesi/tablejump. */
296 next = next_nonnote_insn (insn);
297 if (next == NULL_RTX
298 || !JUMP_P (next)
299 || (JUMP_LABEL (next) != XEXP (note, 0)
300 && find_reg_note (next, REG_LABEL_TARGET,
301 XEXP (note, 0)) == NULL_RTX)
302 || BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (next))
303 return 1;
305 set = single_set (insn);
306 if (set == NULL_RTX)
307 return 1;
309 dest = SET_DEST (set);
310 if (!REG_P (dest) || !dead_or_set_p (next, dest))
311 return 1;
314 /* Unreachable loops with more than one basic block are detected
315 during the DFS traversal in find_rgns.
317 Unreachable loops with a single block are detected here. This
318 test is redundant with the one in find_rgns, but it's much
319 cheaper to go ahead and catch the trivial case here. */
320 FOR_EACH_BB (b)
322 if (EDGE_COUNT (b->preds) == 0
323 || (single_pred_p (b)
324 && single_pred (b) == b))
325 return 1;
328 /* All the tests passed. Consider the cfg well structured. */
329 return 0;
332 /* Extract list of edges from a bitmap containing EDGE_TO_BIT bits. */
334 static void
335 extract_edgelst (sbitmap set, edgelst *el)
337 unsigned int i = 0;
338 sbitmap_iterator sbi;
340 /* edgelst table space is reused in each call to extract_edgelst. */
341 edgelst_last = 0;
343 el->first_member = &edgelst_table[edgelst_last];
344 el->nr_members = 0;
346 /* Iterate over each word in the bitset. */
347 EXECUTE_IF_SET_IN_BITMAP (set, 0, i, sbi)
349 edgelst_table[edgelst_last++] = rgn_edges[i];
350 el->nr_members++;
354 /* Functions for the construction of regions. */
356 /* Print the regions, for debugging purposes. Callable from debugger. */
358 DEBUG_FUNCTION void
359 debug_regions (void)
361 int rgn, bb;
363 fprintf (sched_dump, "\n;; ------------ REGIONS ----------\n\n");
364 for (rgn = 0; rgn < nr_regions; rgn++)
366 fprintf (sched_dump, ";;\trgn %d nr_blocks %d:\n", rgn,
367 rgn_table[rgn].rgn_nr_blocks);
368 fprintf (sched_dump, ";;\tbb/block: ");
370 /* We don't have ebb_head initialized yet, so we can't use
371 BB_TO_BLOCK (). */
372 current_blocks = RGN_BLOCKS (rgn);
374 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
375 fprintf (sched_dump, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
377 fprintf (sched_dump, "\n\n");
381 /* Print the region's basic blocks. */
383 DEBUG_FUNCTION void
384 debug_region (int rgn)
386 int bb;
388 fprintf (stderr, "\n;; ------------ REGION %d ----------\n\n", rgn);
389 fprintf (stderr, ";;\trgn %d nr_blocks %d:\n", rgn,
390 rgn_table[rgn].rgn_nr_blocks);
391 fprintf (stderr, ";;\tbb/block: ");
393 /* We don't have ebb_head initialized yet, so we can't use
394 BB_TO_BLOCK (). */
395 current_blocks = RGN_BLOCKS (rgn);
397 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
398 fprintf (stderr, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
400 fprintf (stderr, "\n\n");
402 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
404 dump_bb (stderr, BASIC_BLOCK (rgn_bb_table[current_blocks + bb]),
405 0, TDF_SLIM | TDF_BLOCKS);
406 fprintf (stderr, "\n");
409 fprintf (stderr, "\n");
413 /* True when a bb with index BB_INDEX contained in region RGN. */
414 static bool
415 bb_in_region_p (int bb_index, int rgn)
417 int i;
419 for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
420 if (rgn_bb_table[current_blocks + i] == bb_index)
421 return true;
423 return false;
426 /* Dump region RGN to file F using dot syntax. */
427 void
428 dump_region_dot (FILE *f, int rgn)
430 int i;
432 fprintf (f, "digraph Region_%d {\n", rgn);
434 /* We don't have ebb_head initialized yet, so we can't use
435 BB_TO_BLOCK (). */
436 current_blocks = RGN_BLOCKS (rgn);
438 for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++)
440 edge e;
441 edge_iterator ei;
442 int src_bb_num = rgn_bb_table[current_blocks + i];
443 basic_block bb = BASIC_BLOCK (src_bb_num);
445 FOR_EACH_EDGE (e, ei, bb->succs)
446 if (bb_in_region_p (e->dest->index, rgn))
447 fprintf (f, "\t%d -> %d\n", src_bb_num, e->dest->index);
449 fprintf (f, "}\n");
452 /* The same, but first open a file specified by FNAME. */
453 void
454 dump_region_dot_file (const char *fname, int rgn)
456 FILE *f = fopen (fname, "wt");
457 dump_region_dot (f, rgn);
458 fclose (f);
461 /* Build a single block region for each basic block in the function.
462 This allows for using the same code for interblock and basic block
463 scheduling. */
465 static void
466 find_single_block_region (bool ebbs_p)
468 basic_block bb, ebb_start;
469 int i = 0;
471 nr_regions = 0;
473 if (ebbs_p) {
474 int probability_cutoff;
475 if (profile_info && flag_branch_probabilities)
476 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
477 else
478 probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY);
479 probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
481 FOR_EACH_BB (ebb_start)
483 RGN_NR_BLOCKS (nr_regions) = 0;
484 RGN_BLOCKS (nr_regions) = i;
485 RGN_DONT_CALC_DEPS (nr_regions) = 0;
486 RGN_HAS_REAL_EBB (nr_regions) = 0;
488 for (bb = ebb_start; ; bb = bb->next_bb)
490 edge e;
492 rgn_bb_table[i] = bb->index;
493 RGN_NR_BLOCKS (nr_regions)++;
494 CONTAINING_RGN (bb->index) = nr_regions;
495 BLOCK_TO_BB (bb->index) = i - RGN_BLOCKS (nr_regions);
496 i++;
498 if (bb->next_bb == EXIT_BLOCK_PTR
499 || LABEL_P (BB_HEAD (bb->next_bb)))
500 break;
502 e = find_fallthru_edge (bb->succs);
503 if (! e)
504 break;
505 if (e->probability <= probability_cutoff)
506 break;
509 ebb_start = bb;
510 nr_regions++;
513 else
514 FOR_EACH_BB (bb)
516 rgn_bb_table[nr_regions] = bb->index;
517 RGN_NR_BLOCKS (nr_regions) = 1;
518 RGN_BLOCKS (nr_regions) = nr_regions;
519 RGN_DONT_CALC_DEPS (nr_regions) = 0;
520 RGN_HAS_REAL_EBB (nr_regions) = 0;
522 CONTAINING_RGN (bb->index) = nr_regions;
523 BLOCK_TO_BB (bb->index) = 0;
524 nr_regions++;
528 /* Estimate number of the insns in the BB. */
529 static int
530 rgn_estimate_number_of_insns (basic_block bb)
532 int count;
534 count = INSN_LUID (BB_END (bb)) - INSN_LUID (BB_HEAD (bb));
536 if (MAY_HAVE_DEBUG_INSNS)
538 rtx insn;
540 FOR_BB_INSNS (bb, insn)
541 if (DEBUG_INSN_P (insn))
542 count--;
545 return count;
548 /* Update number of blocks and the estimate for number of insns
549 in the region. Return true if the region is "too large" for interblock
550 scheduling (compile time considerations). */
552 static bool
553 too_large (int block, int *num_bbs, int *num_insns)
555 (*num_bbs)++;
556 (*num_insns) += (common_sched_info->estimate_number_of_insns
557 (BASIC_BLOCK (block)));
559 return ((*num_bbs > PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS))
560 || (*num_insns > PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS)));
563 /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
564 is still an inner loop. Put in max_hdr[blk] the header of the most inner
565 loop containing blk. */
566 #define UPDATE_LOOP_RELATIONS(blk, hdr) \
568 if (max_hdr[blk] == -1) \
569 max_hdr[blk] = hdr; \
570 else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
571 bitmap_clear_bit (inner, hdr); \
572 else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
574 bitmap_clear_bit (inner,max_hdr[blk]); \
575 max_hdr[blk] = hdr; \
579 /* Find regions for interblock scheduling.
581 A region for scheduling can be:
583 * A loop-free procedure, or
585 * A reducible inner loop, or
587 * A basic block not contained in any other region.
589 ?!? In theory we could build other regions based on extended basic
590 blocks or reverse extended basic blocks. Is it worth the trouble?
592 Loop blocks that form a region are put into the region's block list
593 in topological order.
595 This procedure stores its results into the following global (ick) variables
597 * rgn_nr
598 * rgn_table
599 * rgn_bb_table
600 * block_to_bb
601 * containing region
603 We use dominator relationships to avoid making regions out of non-reducible
604 loops.
606 This procedure needs to be converted to work on pred/succ lists instead
607 of edge tables. That would simplify it somewhat. */
609 static void
610 haifa_find_rgns (void)
612 int *max_hdr, *dfs_nr, *degree;
613 char no_loops = 1;
614 int node, child, loop_head, i, head, tail;
615 int count = 0, sp, idx = 0;
616 edge_iterator current_edge;
617 edge_iterator *stack;
618 int num_bbs, num_insns, unreachable;
619 int too_large_failure;
620 basic_block bb;
622 /* Note if a block is a natural loop header. */
623 sbitmap header;
625 /* Note if a block is a natural inner loop header. */
626 sbitmap inner;
628 /* Note if a block is in the block queue. */
629 sbitmap in_queue;
631 /* Note if a block is in the block queue. */
632 sbitmap in_stack;
634 /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
635 and a mapping from block to its loop header (if the block is contained
636 in a loop, else -1).
638 Store results in HEADER, INNER, and MAX_HDR respectively, these will
639 be used as inputs to the second traversal.
641 STACK, SP and DFS_NR are only used during the first traversal. */
643 /* Allocate and initialize variables for the first traversal. */
644 max_hdr = XNEWVEC (int, last_basic_block);
645 dfs_nr = XCNEWVEC (int, last_basic_block);
646 stack = XNEWVEC (edge_iterator, n_edges);
648 inner = sbitmap_alloc (last_basic_block);
649 bitmap_ones (inner);
651 header = sbitmap_alloc (last_basic_block);
652 bitmap_clear (header);
654 in_queue = sbitmap_alloc (last_basic_block);
655 bitmap_clear (in_queue);
657 in_stack = sbitmap_alloc (last_basic_block);
658 bitmap_clear (in_stack);
660 for (i = 0; i < last_basic_block; i++)
661 max_hdr[i] = -1;
663 #define EDGE_PASSED(E) (ei_end_p ((E)) || ei_edge ((E))->aux)
664 #define SET_EDGE_PASSED(E) (ei_edge ((E))->aux = ei_edge ((E)))
666 /* DFS traversal to find inner loops in the cfg. */
668 current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR)->succs);
669 sp = -1;
671 while (1)
673 if (EDGE_PASSED (current_edge))
675 /* We have reached a leaf node or a node that was already
676 processed. Pop edges off the stack until we find
677 an edge that has not yet been processed. */
678 while (sp >= 0 && EDGE_PASSED (current_edge))
680 /* Pop entry off the stack. */
681 current_edge = stack[sp--];
682 node = ei_edge (current_edge)->src->index;
683 gcc_assert (node != ENTRY_BLOCK);
684 child = ei_edge (current_edge)->dest->index;
685 gcc_assert (child != EXIT_BLOCK);
686 bitmap_clear_bit (in_stack, child);
687 if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child]))
688 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
689 ei_next (&current_edge);
692 /* See if have finished the DFS tree traversal. */
693 if (sp < 0 && EDGE_PASSED (current_edge))
694 break;
696 /* Nope, continue the traversal with the popped node. */
697 continue;
700 /* Process a node. */
701 node = ei_edge (current_edge)->src->index;
702 gcc_assert (node != ENTRY_BLOCK);
703 bitmap_set_bit (in_stack, node);
704 dfs_nr[node] = ++count;
706 /* We don't traverse to the exit block. */
707 child = ei_edge (current_edge)->dest->index;
708 if (child == EXIT_BLOCK)
710 SET_EDGE_PASSED (current_edge);
711 ei_next (&current_edge);
712 continue;
715 /* If the successor is in the stack, then we've found a loop.
716 Mark the loop, if it is not a natural loop, then it will
717 be rejected during the second traversal. */
718 if (bitmap_bit_p (in_stack, child))
720 no_loops = 0;
721 bitmap_set_bit (header, child);
722 UPDATE_LOOP_RELATIONS (node, child);
723 SET_EDGE_PASSED (current_edge);
724 ei_next (&current_edge);
725 continue;
728 /* If the child was already visited, then there is no need to visit
729 it again. Just update the loop relationships and restart
730 with a new edge. */
731 if (dfs_nr[child])
733 if (max_hdr[child] >= 0 && bitmap_bit_p (in_stack, max_hdr[child]))
734 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
735 SET_EDGE_PASSED (current_edge);
736 ei_next (&current_edge);
737 continue;
740 /* Push an entry on the stack and continue DFS traversal. */
741 stack[++sp] = current_edge;
742 SET_EDGE_PASSED (current_edge);
743 current_edge = ei_start (ei_edge (current_edge)->dest->succs);
746 /* Reset ->aux field used by EDGE_PASSED. */
747 FOR_ALL_BB (bb)
749 edge_iterator ei;
750 edge e;
751 FOR_EACH_EDGE (e, ei, bb->succs)
752 e->aux = NULL;
756 /* Another check for unreachable blocks. The earlier test in
757 is_cfg_nonregular only finds unreachable blocks that do not
758 form a loop.
760 The DFS traversal will mark every block that is reachable from
761 the entry node by placing a nonzero value in dfs_nr. Thus if
762 dfs_nr is zero for any block, then it must be unreachable. */
763 unreachable = 0;
764 FOR_EACH_BB (bb)
765 if (dfs_nr[bb->index] == 0)
767 unreachable = 1;
768 break;
771 /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array
772 to hold degree counts. */
773 degree = dfs_nr;
775 FOR_EACH_BB (bb)
776 degree[bb->index] = EDGE_COUNT (bb->preds);
778 /* Do not perform region scheduling if there are any unreachable
779 blocks. */
780 if (!unreachable)
782 int *queue, *degree1 = NULL;
783 /* We use EXTENDED_RGN_HEADER as an addition to HEADER and put
784 there basic blocks, which are forced to be region heads.
785 This is done to try to assemble few smaller regions
786 from a too_large region. */
787 sbitmap extended_rgn_header = NULL;
788 bool extend_regions_p;
790 if (no_loops)
791 bitmap_set_bit (header, 0);
793 /* Second traversal:find reducible inner loops and topologically sort
794 block of each region. */
796 queue = XNEWVEC (int, n_basic_blocks);
798 extend_regions_p = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS) > 0;
799 if (extend_regions_p)
801 degree1 = XNEWVEC (int, last_basic_block);
802 extended_rgn_header = sbitmap_alloc (last_basic_block);
803 bitmap_clear (extended_rgn_header);
806 /* Find blocks which are inner loop headers. We still have non-reducible
807 loops to consider at this point. */
808 FOR_EACH_BB (bb)
810 if (bitmap_bit_p (header, bb->index) && bitmap_bit_p (inner, bb->index))
812 edge e;
813 edge_iterator ei;
814 basic_block jbb;
816 /* Now check that the loop is reducible. We do this separate
817 from finding inner loops so that we do not find a reducible
818 loop which contains an inner non-reducible loop.
820 A simple way to find reducible/natural loops is to verify
821 that each block in the loop is dominated by the loop
822 header.
824 If there exists a block that is not dominated by the loop
825 header, then the block is reachable from outside the loop
826 and thus the loop is not a natural loop. */
827 FOR_EACH_BB (jbb)
829 /* First identify blocks in the loop, except for the loop
830 entry block. */
831 if (bb->index == max_hdr[jbb->index] && bb != jbb)
833 /* Now verify that the block is dominated by the loop
834 header. */
835 if (!dominated_by_p (CDI_DOMINATORS, jbb, bb))
836 break;
840 /* If we exited the loop early, then I is the header of
841 a non-reducible loop and we should quit processing it
842 now. */
843 if (jbb != EXIT_BLOCK_PTR)
844 continue;
846 /* I is a header of an inner loop, or block 0 in a subroutine
847 with no loops at all. */
848 head = tail = -1;
849 too_large_failure = 0;
850 loop_head = max_hdr[bb->index];
852 if (extend_regions_p)
853 /* We save degree in case when we meet a too_large region
854 and cancel it. We need a correct degree later when
855 calling extend_rgns. */
856 memcpy (degree1, degree, last_basic_block * sizeof (int));
858 /* Decrease degree of all I's successors for topological
859 ordering. */
860 FOR_EACH_EDGE (e, ei, bb->succs)
861 if (e->dest != EXIT_BLOCK_PTR)
862 --degree[e->dest->index];
864 /* Estimate # insns, and count # blocks in the region. */
865 num_bbs = 1;
866 num_insns = common_sched_info->estimate_number_of_insns (bb);
868 /* Find all loop latches (blocks with back edges to the loop
869 header) or all the leaf blocks in the cfg has no loops.
871 Place those blocks into the queue. */
872 if (no_loops)
874 FOR_EACH_BB (jbb)
875 /* Leaf nodes have only a single successor which must
876 be EXIT_BLOCK. */
877 if (single_succ_p (jbb)
878 && single_succ (jbb) == EXIT_BLOCK_PTR)
880 queue[++tail] = jbb->index;
881 bitmap_set_bit (in_queue, jbb->index);
883 if (too_large (jbb->index, &num_bbs, &num_insns))
885 too_large_failure = 1;
886 break;
890 else
892 edge e;
894 FOR_EACH_EDGE (e, ei, bb->preds)
896 if (e->src == ENTRY_BLOCK_PTR)
897 continue;
899 node = e->src->index;
901 if (max_hdr[node] == loop_head && node != bb->index)
903 /* This is a loop latch. */
904 queue[++tail] = node;
905 bitmap_set_bit (in_queue, node);
907 if (too_large (node, &num_bbs, &num_insns))
909 too_large_failure = 1;
910 break;
916 /* Now add all the blocks in the loop to the queue.
918 We know the loop is a natural loop; however the algorithm
919 above will not always mark certain blocks as being in the
920 loop. Consider:
921 node children
922 a b,c
924 c a,d
927 The algorithm in the DFS traversal may not mark B & D as part
928 of the loop (i.e. they will not have max_hdr set to A).
930 We know they can not be loop latches (else they would have
931 had max_hdr set since they'd have a backedge to a dominator
932 block). So we don't need them on the initial queue.
934 We know they are part of the loop because they are dominated
935 by the loop header and can be reached by a backwards walk of
936 the edges starting with nodes on the initial queue.
938 It is safe and desirable to include those nodes in the
939 loop/scheduling region. To do so we would need to decrease
940 the degree of a node if it is the target of a backedge
941 within the loop itself as the node is placed in the queue.
943 We do not do this because I'm not sure that the actual
944 scheduling code will properly handle this case. ?!? */
946 while (head < tail && !too_large_failure)
948 edge e;
949 child = queue[++head];
951 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->preds)
953 node = e->src->index;
955 /* See discussion above about nodes not marked as in
956 this loop during the initial DFS traversal. */
957 if (e->src == ENTRY_BLOCK_PTR
958 || max_hdr[node] != loop_head)
960 tail = -1;
961 break;
963 else if (!bitmap_bit_p (in_queue, node) && node != bb->index)
965 queue[++tail] = node;
966 bitmap_set_bit (in_queue, node);
968 if (too_large (node, &num_bbs, &num_insns))
970 too_large_failure = 1;
971 break;
977 if (tail >= 0 && !too_large_failure)
979 /* Place the loop header into list of region blocks. */
980 degree[bb->index] = -1;
981 rgn_bb_table[idx] = bb->index;
982 RGN_NR_BLOCKS (nr_regions) = num_bbs;
983 RGN_BLOCKS (nr_regions) = idx++;
984 RGN_DONT_CALC_DEPS (nr_regions) = 0;
985 RGN_HAS_REAL_EBB (nr_regions) = 0;
986 CONTAINING_RGN (bb->index) = nr_regions;
987 BLOCK_TO_BB (bb->index) = count = 0;
989 /* Remove blocks from queue[] when their in degree
990 becomes zero. Repeat until no blocks are left on the
991 list. This produces a topological list of blocks in
992 the region. */
993 while (tail >= 0)
995 if (head < 0)
996 head = tail;
997 child = queue[head];
998 if (degree[child] == 0)
1000 edge e;
1002 degree[child] = -1;
1003 rgn_bb_table[idx++] = child;
1004 BLOCK_TO_BB (child) = ++count;
1005 CONTAINING_RGN (child) = nr_regions;
1006 queue[head] = queue[tail--];
1008 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (child)->succs)
1009 if (e->dest != EXIT_BLOCK_PTR)
1010 --degree[e->dest->index];
1012 else
1013 --head;
1015 ++nr_regions;
1017 else if (extend_regions_p)
1019 /* Restore DEGREE. */
1020 int *t = degree;
1022 degree = degree1;
1023 degree1 = t;
1025 /* And force successors of BB to be region heads.
1026 This may provide several smaller regions instead
1027 of one too_large region. */
1028 FOR_EACH_EDGE (e, ei, bb->succs)
1029 if (e->dest != EXIT_BLOCK_PTR)
1030 bitmap_set_bit (extended_rgn_header, e->dest->index);
1034 free (queue);
1036 if (extend_regions_p)
1038 free (degree1);
1040 bitmap_ior (header, header, extended_rgn_header);
1041 sbitmap_free (extended_rgn_header);
1043 extend_rgns (degree, &idx, header, max_hdr);
1047 /* Any block that did not end up in a region is placed into a region
1048 by itself. */
1049 FOR_EACH_BB (bb)
1050 if (degree[bb->index] >= 0)
1052 rgn_bb_table[idx] = bb->index;
1053 RGN_NR_BLOCKS (nr_regions) = 1;
1054 RGN_BLOCKS (nr_regions) = idx++;
1055 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1056 RGN_HAS_REAL_EBB (nr_regions) = 0;
1057 CONTAINING_RGN (bb->index) = nr_regions++;
1058 BLOCK_TO_BB (bb->index) = 0;
1061 free (max_hdr);
1062 free (degree);
1063 free (stack);
1064 sbitmap_free (header);
1065 sbitmap_free (inner);
1066 sbitmap_free (in_queue);
1067 sbitmap_free (in_stack);
1071 /* Wrapper function.
1072 If FLAG_SEL_SCHED_PIPELINING is set, then use custom function to form
1073 regions. Otherwise just call find_rgns_haifa. */
1074 static void
1075 find_rgns (void)
1077 if (sel_sched_p () && flag_sel_sched_pipelining)
1078 sel_find_rgns ();
1079 else
1080 haifa_find_rgns ();
1083 static int gather_region_statistics (int **);
1084 static void print_region_statistics (int *, int, int *, int);
1086 /* Calculate the histogram that shows the number of regions having the
1087 given number of basic blocks, and store it in the RSP array. Return
1088 the size of this array. */
1089 static int
1090 gather_region_statistics (int **rsp)
1092 int i, *a = 0, a_sz = 0;
1094 /* a[i] is the number of regions that have (i + 1) basic blocks. */
1095 for (i = 0; i < nr_regions; i++)
1097 int nr_blocks = RGN_NR_BLOCKS (i);
1099 gcc_assert (nr_blocks >= 1);
1101 if (nr_blocks > a_sz)
1103 a = XRESIZEVEC (int, a, nr_blocks);
1105 a[a_sz++] = 0;
1106 while (a_sz != nr_blocks);
1109 a[nr_blocks - 1]++;
1112 *rsp = a;
1113 return a_sz;
1116 /* Print regions statistics. S1 and S2 denote the data before and after
1117 calling extend_rgns, respectively. */
1118 static void
1119 print_region_statistics (int *s1, int s1_sz, int *s2, int s2_sz)
1121 int i;
1123 /* We iterate until s2_sz because extend_rgns does not decrease
1124 the maximal region size. */
1125 for (i = 1; i < s2_sz; i++)
1127 int n1, n2;
1129 n2 = s2[i];
1131 if (n2 == 0)
1132 continue;
1134 if (i >= s1_sz)
1135 n1 = 0;
1136 else
1137 n1 = s1[i];
1139 fprintf (sched_dump, ";; Region extension statistics: size %d: " \
1140 "was %d + %d more\n", i + 1, n1, n2 - n1);
1144 /* Extend regions.
1145 DEGREE - Array of incoming edge count, considering only
1146 the edges, that don't have their sources in formed regions yet.
1147 IDXP - pointer to the next available index in rgn_bb_table.
1148 HEADER - set of all region heads.
1149 LOOP_HDR - mapping from block to the containing loop
1150 (two blocks can reside within one region if they have
1151 the same loop header). */
1152 void
1153 extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
1155 int *order, i, rescan = 0, idx = *idxp, iter = 0, max_iter, *max_hdr;
1156 int nblocks = n_basic_blocks - NUM_FIXED_BLOCKS;
1158 max_iter = PARAM_VALUE (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS);
1160 max_hdr = XNEWVEC (int, last_basic_block);
1162 order = XNEWVEC (int, last_basic_block);
1163 post_order_compute (order, false, false);
1165 for (i = nblocks - 1; i >= 0; i--)
1167 int bbn = order[i];
1168 if (degree[bbn] >= 0)
1170 max_hdr[bbn] = bbn;
1171 rescan = 1;
1173 else
1174 /* This block already was processed in find_rgns. */
1175 max_hdr[bbn] = -1;
1178 /* The idea is to topologically walk through CFG in top-down order.
1179 During the traversal, if all the predecessors of a node are
1180 marked to be in the same region (they all have the same max_hdr),
1181 then current node is also marked to be a part of that region.
1182 Otherwise the node starts its own region.
1183 CFG should be traversed until no further changes are made. On each
1184 iteration the set of the region heads is extended (the set of those
1185 blocks that have max_hdr[bbi] == bbi). This set is upper bounded by the
1186 set of all basic blocks, thus the algorithm is guaranteed to
1187 terminate. */
1189 while (rescan && iter < max_iter)
1191 rescan = 0;
1193 for (i = nblocks - 1; i >= 0; i--)
1195 edge e;
1196 edge_iterator ei;
1197 int bbn = order[i];
1199 if (max_hdr[bbn] != -1 && !bitmap_bit_p (header, bbn))
1201 int hdr = -1;
1203 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (bbn)->preds)
1205 int predn = e->src->index;
1207 if (predn != ENTRY_BLOCK
1208 /* If pred wasn't processed in find_rgns. */
1209 && max_hdr[predn] != -1
1210 /* And pred and bb reside in the same loop.
1211 (Or out of any loop). */
1212 && loop_hdr[bbn] == loop_hdr[predn])
1214 if (hdr == -1)
1215 /* Then bb extends the containing region of pred. */
1216 hdr = max_hdr[predn];
1217 else if (hdr != max_hdr[predn])
1218 /* Too bad, there are at least two predecessors
1219 that reside in different regions. Thus, BB should
1220 begin its own region. */
1222 hdr = bbn;
1223 break;
1226 else
1227 /* BB starts its own region. */
1229 hdr = bbn;
1230 break;
1234 if (hdr == bbn)
1236 /* If BB start its own region,
1237 update set of headers with BB. */
1238 bitmap_set_bit (header, bbn);
1239 rescan = 1;
1241 else
1242 gcc_assert (hdr != -1);
1244 max_hdr[bbn] = hdr;
1248 iter++;
1251 /* Statistics were gathered on the SPEC2000 package of tests with
1252 mainline weekly snapshot gcc-4.1-20051015 on ia64.
1254 Statistics for SPECint:
1255 1 iteration : 1751 cases (38.7%)
1256 2 iterations: 2770 cases (61.3%)
1257 Blocks wrapped in regions by find_rgns without extension: 18295 blocks
1258 Blocks wrapped in regions by 2 iterations in extend_rgns: 23821 blocks
1259 (We don't count single block regions here).
1261 Statistics for SPECfp:
1262 1 iteration : 621 cases (35.9%)
1263 2 iterations: 1110 cases (64.1%)
1264 Blocks wrapped in regions by find_rgns without extension: 6476 blocks
1265 Blocks wrapped in regions by 2 iterations in extend_rgns: 11155 blocks
1266 (We don't count single block regions here).
1268 By default we do at most 2 iterations.
1269 This can be overridden with max-sched-extend-regions-iters parameter:
1270 0 - disable region extension,
1271 N > 0 - do at most N iterations. */
1273 if (sched_verbose && iter != 0)
1274 fprintf (sched_dump, ";; Region extension iterations: %d%s\n", iter,
1275 rescan ? "... failed" : "");
1277 if (!rescan && iter != 0)
1279 int *s1 = NULL, s1_sz = 0;
1281 /* Save the old statistics for later printout. */
1282 if (sched_verbose >= 6)
1283 s1_sz = gather_region_statistics (&s1);
1285 /* We have succeeded. Now assemble the regions. */
1286 for (i = nblocks - 1; i >= 0; i--)
1288 int bbn = order[i];
1290 if (max_hdr[bbn] == bbn)
1291 /* BBN is a region head. */
1293 edge e;
1294 edge_iterator ei;
1295 int num_bbs = 0, j, num_insns = 0, large;
1297 large = too_large (bbn, &num_bbs, &num_insns);
1299 degree[bbn] = -1;
1300 rgn_bb_table[idx] = bbn;
1301 RGN_BLOCKS (nr_regions) = idx++;
1302 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1303 RGN_HAS_REAL_EBB (nr_regions) = 0;
1304 CONTAINING_RGN (bbn) = nr_regions;
1305 BLOCK_TO_BB (bbn) = 0;
1307 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (bbn)->succs)
1308 if (e->dest != EXIT_BLOCK_PTR)
1309 degree[e->dest->index]--;
1311 if (!large)
1312 /* Here we check whether the region is too_large. */
1313 for (j = i - 1; j >= 0; j--)
1315 int succn = order[j];
1316 if (max_hdr[succn] == bbn)
1318 if ((large = too_large (succn, &num_bbs, &num_insns)))
1319 break;
1323 if (large)
1324 /* If the region is too_large, then wrap every block of
1325 the region into single block region.
1326 Here we wrap region head only. Other blocks are
1327 processed in the below cycle. */
1329 RGN_NR_BLOCKS (nr_regions) = 1;
1330 nr_regions++;
1333 num_bbs = 1;
1335 for (j = i - 1; j >= 0; j--)
1337 int succn = order[j];
1339 if (max_hdr[succn] == bbn)
1340 /* This cycle iterates over all basic blocks, that
1341 are supposed to be in the region with head BBN,
1342 and wraps them into that region (or in single
1343 block region). */
1345 gcc_assert (degree[succn] == 0);
1347 degree[succn] = -1;
1348 rgn_bb_table[idx] = succn;
1349 BLOCK_TO_BB (succn) = large ? 0 : num_bbs++;
1350 CONTAINING_RGN (succn) = nr_regions;
1352 if (large)
1353 /* Wrap SUCCN into single block region. */
1355 RGN_BLOCKS (nr_regions) = idx;
1356 RGN_NR_BLOCKS (nr_regions) = 1;
1357 RGN_DONT_CALC_DEPS (nr_regions) = 0;
1358 RGN_HAS_REAL_EBB (nr_regions) = 0;
1359 nr_regions++;
1362 idx++;
1364 FOR_EACH_EDGE (e, ei, BASIC_BLOCK (succn)->succs)
1365 if (e->dest != EXIT_BLOCK_PTR)
1366 degree[e->dest->index]--;
1370 if (!large)
1372 RGN_NR_BLOCKS (nr_regions) = num_bbs;
1373 nr_regions++;
1378 if (sched_verbose >= 6)
1380 int *s2, s2_sz;
1382 /* Get the new statistics and print the comparison with the
1383 one before calling this function. */
1384 s2_sz = gather_region_statistics (&s2);
1385 print_region_statistics (s1, s1_sz, s2, s2_sz);
1386 free (s1);
1387 free (s2);
1391 free (order);
1392 free (max_hdr);
1394 *idxp = idx;
1397 /* Functions for regions scheduling information. */
1399 /* Compute dominators, probability, and potential-split-edges of bb.
1400 Assume that these values were already computed for bb's predecessors. */
1402 static void
1403 compute_dom_prob_ps (int bb)
1405 edge_iterator in_ei;
1406 edge in_edge;
1408 /* We shouldn't have any real ebbs yet. */
1409 gcc_assert (ebb_head [bb] == bb + current_blocks);
1411 if (IS_RGN_ENTRY (bb))
1413 bitmap_set_bit (dom[bb], 0);
1414 prob[bb] = REG_BR_PROB_BASE;
1415 return;
1418 prob[bb] = 0;
1420 /* Initialize dom[bb] to '111..1'. */
1421 bitmap_ones (dom[bb]);
1423 FOR_EACH_EDGE (in_edge, in_ei, BASIC_BLOCK (BB_TO_BLOCK (bb))->preds)
1425 int pred_bb;
1426 edge out_edge;
1427 edge_iterator out_ei;
1429 if (in_edge->src == ENTRY_BLOCK_PTR)
1430 continue;
1432 pred_bb = BLOCK_TO_BB (in_edge->src->index);
1433 bitmap_and (dom[bb], dom[bb], dom[pred_bb]);
1434 bitmap_ior (ancestor_edges[bb],
1435 ancestor_edges[bb], ancestor_edges[pred_bb]);
1437 bitmap_set_bit (ancestor_edges[bb], EDGE_TO_BIT (in_edge));
1439 bitmap_ior (pot_split[bb], pot_split[bb], pot_split[pred_bb]);
1441 FOR_EACH_EDGE (out_edge, out_ei, in_edge->src->succs)
1442 bitmap_set_bit (pot_split[bb], EDGE_TO_BIT (out_edge));
1444 prob[bb] += ((prob[pred_bb] * in_edge->probability) / REG_BR_PROB_BASE);
1447 bitmap_set_bit (dom[bb], bb);
1448 bitmap_and_compl (pot_split[bb], pot_split[bb], ancestor_edges[bb]);
1450 if (sched_verbose >= 2)
1451 fprintf (sched_dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb),
1452 (100 * prob[bb]) / REG_BR_PROB_BASE);
1455 /* Functions for target info. */
1457 /* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
1458 Note that bb_trg dominates bb_src. */
1460 static void
1461 split_edges (int bb_src, int bb_trg, edgelst *bl)
1463 sbitmap src = sbitmap_alloc (SBITMAP_SIZE (pot_split[bb_src]));
1464 bitmap_copy (src, pot_split[bb_src]);
1466 bitmap_and_compl (src, src, pot_split[bb_trg]);
1467 extract_edgelst (src, bl);
1468 sbitmap_free (src);
1471 /* Find the valid candidate-source-blocks for the target block TRG, compute
1472 their probability, and check if they are speculative or not.
1473 For speculative sources, compute their update-blocks and split-blocks. */
1475 static void
1476 compute_trg_info (int trg)
1478 candidate *sp;
1479 edgelst el = { NULL, 0 };
1480 int i, j, k, update_idx;
1481 basic_block block;
1482 sbitmap visited;
1483 edge_iterator ei;
1484 edge e;
1486 candidate_table = XNEWVEC (candidate, current_nr_blocks);
1488 bblst_last = 0;
1489 /* bblst_table holds split blocks and update blocks for each block after
1490 the current one in the region. split blocks and update blocks are
1491 the TO blocks of region edges, so there can be at most rgn_nr_edges
1492 of them. */
1493 bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
1494 bblst_table = XNEWVEC (basic_block, bblst_size);
1496 edgelst_last = 0;
1497 edgelst_table = XNEWVEC (edge, rgn_nr_edges);
1499 /* Define some of the fields for the target bb as well. */
1500 sp = candidate_table + trg;
1501 sp->is_valid = 1;
1502 sp->is_speculative = 0;
1503 sp->src_prob = REG_BR_PROB_BASE;
1505 visited = sbitmap_alloc (last_basic_block);
1507 for (i = trg + 1; i < current_nr_blocks; i++)
1509 sp = candidate_table + i;
1511 sp->is_valid = IS_DOMINATED (i, trg);
1512 if (sp->is_valid)
1514 int tf = prob[trg], cf = prob[i];
1516 /* In CFGs with low probability edges TF can possibly be zero. */
1517 sp->src_prob = (tf ? ((cf * REG_BR_PROB_BASE) / tf) : 0);
1518 sp->is_valid = (sp->src_prob >= min_spec_prob);
1521 if (sp->is_valid)
1523 split_edges (i, trg, &el);
1524 sp->is_speculative = (el.nr_members) ? 1 : 0;
1525 if (sp->is_speculative && !flag_schedule_speculative)
1526 sp->is_valid = 0;
1529 if (sp->is_valid)
1531 /* Compute split blocks and store them in bblst_table.
1532 The TO block of every split edge is a split block. */
1533 sp->split_bbs.first_member = &bblst_table[bblst_last];
1534 sp->split_bbs.nr_members = el.nr_members;
1535 for (j = 0; j < el.nr_members; bblst_last++, j++)
1536 bblst_table[bblst_last] = el.first_member[j]->dest;
1537 sp->update_bbs.first_member = &bblst_table[bblst_last];
1539 /* Compute update blocks and store them in bblst_table.
1540 For every split edge, look at the FROM block, and check
1541 all out edges. For each out edge that is not a split edge,
1542 add the TO block to the update block list. This list can end
1543 up with a lot of duplicates. We need to weed them out to avoid
1544 overrunning the end of the bblst_table. */
1546 update_idx = 0;
1547 bitmap_clear (visited);
1548 for (j = 0; j < el.nr_members; j++)
1550 block = el.first_member[j]->src;
1551 FOR_EACH_EDGE (e, ei, block->succs)
1553 if (!bitmap_bit_p (visited, e->dest->index))
1555 for (k = 0; k < el.nr_members; k++)
1556 if (e == el.first_member[k])
1557 break;
1559 if (k >= el.nr_members)
1561 bblst_table[bblst_last++] = e->dest;
1562 bitmap_set_bit (visited, e->dest->index);
1563 update_idx++;
1568 sp->update_bbs.nr_members = update_idx;
1570 /* Make sure we didn't overrun the end of bblst_table. */
1571 gcc_assert (bblst_last <= bblst_size);
1573 else
1575 sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0;
1577 sp->is_speculative = 0;
1578 sp->src_prob = 0;
1582 sbitmap_free (visited);
1585 /* Free the computed target info. */
1586 static void
1587 free_trg_info (void)
1589 free (candidate_table);
1590 free (bblst_table);
1591 free (edgelst_table);
1594 /* Print candidates info, for debugging purposes. Callable from debugger. */
1596 DEBUG_FUNCTION void
1597 debug_candidate (int i)
1599 if (!candidate_table[i].is_valid)
1600 return;
1602 if (candidate_table[i].is_speculative)
1604 int j;
1605 fprintf (sched_dump, "src b %d bb %d speculative \n", BB_TO_BLOCK (i), i);
1607 fprintf (sched_dump, "split path: ");
1608 for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++)
1610 int b = candidate_table[i].split_bbs.first_member[j]->index;
1612 fprintf (sched_dump, " %d ", b);
1614 fprintf (sched_dump, "\n");
1616 fprintf (sched_dump, "update path: ");
1617 for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++)
1619 int b = candidate_table[i].update_bbs.first_member[j]->index;
1621 fprintf (sched_dump, " %d ", b);
1623 fprintf (sched_dump, "\n");
1625 else
1627 fprintf (sched_dump, " src %d equivalent\n", BB_TO_BLOCK (i));
1631 /* Print candidates info, for debugging purposes. Callable from debugger. */
1633 DEBUG_FUNCTION void
1634 debug_candidates (int trg)
1636 int i;
1638 fprintf (sched_dump, "----------- candidate table: target: b=%d bb=%d ---\n",
1639 BB_TO_BLOCK (trg), trg);
1640 for (i = trg + 1; i < current_nr_blocks; i++)
1641 debug_candidate (i);
1644 /* Functions for speculative scheduling. */
1646 static bitmap_head not_in_df;
1648 /* Return 0 if x is a set of a register alive in the beginning of one
1649 of the split-blocks of src, otherwise return 1. */
1651 static int
1652 check_live_1 (int src, rtx x)
1654 int i;
1655 int regno;
1656 rtx reg = SET_DEST (x);
1658 if (reg == 0)
1659 return 1;
1661 while (GET_CODE (reg) == SUBREG
1662 || GET_CODE (reg) == ZERO_EXTRACT
1663 || GET_CODE (reg) == STRICT_LOW_PART)
1664 reg = XEXP (reg, 0);
1666 if (GET_CODE (reg) == PARALLEL)
1668 int i;
1670 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1671 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1672 if (check_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0)))
1673 return 1;
1675 return 0;
1678 if (!REG_P (reg))
1679 return 1;
1681 regno = REGNO (reg);
1683 if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
1685 /* Global registers are assumed live. */
1686 return 0;
1688 else
1690 if (regno < FIRST_PSEUDO_REGISTER)
1692 /* Check for hard registers. */
1693 int j = hard_regno_nregs[regno][GET_MODE (reg)];
1694 while (--j >= 0)
1696 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1698 basic_block b = candidate_table[src].split_bbs.first_member[i];
1699 int t = bitmap_bit_p (&not_in_df, b->index);
1701 /* We can have split blocks, that were recently generated.
1702 Such blocks are always outside current region. */
1703 gcc_assert (!t || (CONTAINING_RGN (b->index)
1704 != CONTAINING_RGN (BB_TO_BLOCK (src))));
1706 if (t || REGNO_REG_SET_P (df_get_live_in (b), regno + j))
1707 return 0;
1711 else
1713 /* Check for pseudo registers. */
1714 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1716 basic_block b = candidate_table[src].split_bbs.first_member[i];
1717 int t = bitmap_bit_p (&not_in_df, b->index);
1719 gcc_assert (!t || (CONTAINING_RGN (b->index)
1720 != CONTAINING_RGN (BB_TO_BLOCK (src))));
1722 if (t || REGNO_REG_SET_P (df_get_live_in (b), regno))
1723 return 0;
1728 return 1;
1731 /* If x is a set of a register R, mark that R is alive in the beginning
1732 of every update-block of src. */
1734 static void
1735 update_live_1 (int src, rtx x)
1737 int i;
1738 int regno;
1739 rtx reg = SET_DEST (x);
1741 if (reg == 0)
1742 return;
1744 while (GET_CODE (reg) == SUBREG
1745 || GET_CODE (reg) == ZERO_EXTRACT
1746 || GET_CODE (reg) == STRICT_LOW_PART)
1747 reg = XEXP (reg, 0);
1749 if (GET_CODE (reg) == PARALLEL)
1751 int i;
1753 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1754 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1755 update_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0));
1757 return;
1760 if (!REG_P (reg))
1761 return;
1763 /* Global registers are always live, so the code below does not apply
1764 to them. */
1766 regno = REGNO (reg);
1768 if (! HARD_REGISTER_NUM_P (regno)
1769 || !global_regs[regno])
1771 for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
1773 basic_block b = candidate_table[src].update_bbs.first_member[i];
1775 if (HARD_REGISTER_NUM_P (regno))
1776 bitmap_set_range (df_get_live_in (b), regno,
1777 hard_regno_nregs[regno][GET_MODE (reg)]);
1778 else
1779 bitmap_set_bit (df_get_live_in (b), regno);
1784 /* Return 1 if insn can be speculatively moved from block src to trg,
1785 otherwise return 0. Called before first insertion of insn to
1786 ready-list or before the scheduling. */
1788 static int
1789 check_live (rtx insn, int src)
1791 /* Find the registers set by instruction. */
1792 if (GET_CODE (PATTERN (insn)) == SET
1793 || GET_CODE (PATTERN (insn)) == CLOBBER)
1794 return check_live_1 (src, PATTERN (insn));
1795 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1797 int j;
1798 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1799 if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1800 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1801 && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j)))
1802 return 0;
1804 return 1;
1807 return 1;
1810 /* Update the live registers info after insn was moved speculatively from
1811 block src to trg. */
1813 static void
1814 update_live (rtx insn, int src)
1816 /* Find the registers set by instruction. */
1817 if (GET_CODE (PATTERN (insn)) == SET
1818 || GET_CODE (PATTERN (insn)) == CLOBBER)
1819 update_live_1 (src, PATTERN (insn));
1820 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1822 int j;
1823 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1824 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1825 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1826 update_live_1 (src, XVECEXP (PATTERN (insn), 0, j));
1830 /* Nonzero if block bb_to is equal to, or reachable from block bb_from. */
1831 #define IS_REACHABLE(bb_from, bb_to) \
1832 (bb_from == bb_to \
1833 || IS_RGN_ENTRY (bb_from) \
1834 || (bitmap_bit_p (ancestor_edges[bb_to], \
1835 EDGE_TO_BIT (single_pred_edge (BASIC_BLOCK (BB_TO_BLOCK (bb_from)))))))
1837 /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
1839 static void
1840 set_spec_fed (rtx load_insn)
1842 sd_iterator_def sd_it;
1843 dep_t dep;
1845 FOR_EACH_DEP (load_insn, SD_LIST_FORW, sd_it, dep)
1846 if (DEP_TYPE (dep) == REG_DEP_TRUE)
1847 FED_BY_SPEC_LOAD (DEP_CON (dep)) = 1;
1850 /* On the path from the insn to load_insn_bb, find a conditional
1851 branch depending on insn, that guards the speculative load. */
1853 static int
1854 find_conditional_protection (rtx insn, int load_insn_bb)
1856 sd_iterator_def sd_it;
1857 dep_t dep;
1859 /* Iterate through DEF-USE forward dependences. */
1860 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
1862 rtx next = DEP_CON (dep);
1864 if ((CONTAINING_RGN (BLOCK_NUM (next)) ==
1865 CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb)))
1866 && IS_REACHABLE (INSN_BB (next), load_insn_bb)
1867 && load_insn_bb != INSN_BB (next)
1868 && DEP_TYPE (dep) == REG_DEP_TRUE
1869 && (JUMP_P (next)
1870 || find_conditional_protection (next, load_insn_bb)))
1871 return 1;
1873 return 0;
1874 } /* find_conditional_protection */
1876 /* Returns 1 if the same insn1 that participates in the computation
1877 of load_insn's address is feeding a conditional branch that is
1878 guarding on load_insn. This is true if we find two DEF-USE
1879 chains:
1880 insn1 -> ... -> conditional-branch
1881 insn1 -> ... -> load_insn,
1882 and if a flow path exists:
1883 insn1 -> ... -> conditional-branch -> ... -> load_insn,
1884 and if insn1 is on the path
1885 region-entry -> ... -> bb_trg -> ... load_insn.
1887 Locate insn1 by climbing on INSN_BACK_DEPS from load_insn.
1888 Locate the branch by following INSN_FORW_DEPS from insn1. */
1890 static int
1891 is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg)
1893 sd_iterator_def sd_it;
1894 dep_t dep;
1896 FOR_EACH_DEP (load_insn, SD_LIST_BACK, sd_it, dep)
1898 rtx insn1 = DEP_PRO (dep);
1900 /* Must be a DEF-USE dependence upon non-branch. */
1901 if (DEP_TYPE (dep) != REG_DEP_TRUE
1902 || JUMP_P (insn1))
1903 continue;
1905 /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
1906 if (INSN_BB (insn1) == bb_src
1907 || (CONTAINING_RGN (BLOCK_NUM (insn1))
1908 != CONTAINING_RGN (BB_TO_BLOCK (bb_src)))
1909 || (!IS_REACHABLE (bb_trg, INSN_BB (insn1))
1910 && !IS_REACHABLE (INSN_BB (insn1), bb_trg)))
1911 continue;
1913 /* Now search for the conditional-branch. */
1914 if (find_conditional_protection (insn1, bb_src))
1915 return 1;
1917 /* Recursive step: search another insn1, "above" current insn1. */
1918 return is_conditionally_protected (insn1, bb_src, bb_trg);
1921 /* The chain does not exist. */
1922 return 0;
1923 } /* is_conditionally_protected */
1925 /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence
1926 load_insn can move speculatively from bb_src to bb_trg. All the
1927 following must hold:
1929 (1) both loads have 1 base register (PFREE_CANDIDATEs).
1930 (2) load_insn and load1 have a def-use dependence upon
1931 the same insn 'insn1'.
1932 (3) either load2 is in bb_trg, or:
1933 - there's only one split-block, and
1934 - load1 is on the escape path, and
1936 From all these we can conclude that the two loads access memory
1937 addresses that differ at most by a constant, and hence if moving
1938 load_insn would cause an exception, it would have been caused by
1939 load2 anyhow. */
1941 static int
1942 is_pfree (rtx load_insn, int bb_src, int bb_trg)
1944 sd_iterator_def back_sd_it;
1945 dep_t back_dep;
1946 candidate *candp = candidate_table + bb_src;
1948 if (candp->split_bbs.nr_members != 1)
1949 /* Must have exactly one escape block. */
1950 return 0;
1952 FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
1954 rtx insn1 = DEP_PRO (back_dep);
1956 if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
1957 /* Found a DEF-USE dependence (insn1, load_insn). */
1959 sd_iterator_def fore_sd_it;
1960 dep_t fore_dep;
1962 FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
1964 rtx insn2 = DEP_CON (fore_dep);
1966 if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
1968 /* Found a DEF-USE dependence (insn1, insn2). */
1969 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
1970 /* insn2 not guaranteed to be a 1 base reg load. */
1971 continue;
1973 if (INSN_BB (insn2) == bb_trg)
1974 /* insn2 is the similar load, in the target block. */
1975 return 1;
1977 if (*(candp->split_bbs.first_member) == BLOCK_FOR_INSN (insn2))
1978 /* insn2 is a similar load, in a split-block. */
1979 return 1;
1985 /* Couldn't find a similar load. */
1986 return 0;
1987 } /* is_pfree */
1989 /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
1990 a load moved speculatively, or if load_insn is protected by
1991 a compare on load_insn's address). */
1993 static int
1994 is_prisky (rtx load_insn, int bb_src, int bb_trg)
1996 if (FED_BY_SPEC_LOAD (load_insn))
1997 return 1;
1999 if (sd_lists_empty_p (load_insn, SD_LIST_BACK))
2000 /* Dependence may 'hide' out of the region. */
2001 return 1;
2003 if (is_conditionally_protected (load_insn, bb_src, bb_trg))
2004 return 1;
2006 return 0;
2009 /* Insn is a candidate to be moved speculatively from bb_src to bb_trg.
2010 Return 1 if insn is exception-free (and the motion is valid)
2011 and 0 otherwise. */
2013 static int
2014 is_exception_free (rtx insn, int bb_src, int bb_trg)
2016 int insn_class = haifa_classify_insn (insn);
2018 /* Handle non-load insns. */
2019 switch (insn_class)
2021 case TRAP_FREE:
2022 return 1;
2023 case TRAP_RISKY:
2024 return 0;
2025 default:;
2028 /* Handle loads. */
2029 if (!flag_schedule_speculative_load)
2030 return 0;
2031 IS_LOAD_INSN (insn) = 1;
2032 switch (insn_class)
2034 case IFREE:
2035 return (1);
2036 case IRISKY:
2037 return 0;
2038 case PFREE_CANDIDATE:
2039 if (is_pfree (insn, bb_src, bb_trg))
2040 return 1;
2041 /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */
2042 case PRISKY_CANDIDATE:
2043 if (!flag_schedule_speculative_load_dangerous
2044 || is_prisky (insn, bb_src, bb_trg))
2045 return 0;
2046 break;
2047 default:;
2050 return flag_schedule_speculative_load_dangerous;
2053 /* The number of insns from the current block scheduled so far. */
2054 static int sched_target_n_insns;
2055 /* The number of insns from the current block to be scheduled in total. */
2056 static int target_n_insns;
2057 /* The number of insns from the entire region scheduled so far. */
2058 static int sched_n_insns;
2060 /* Implementations of the sched_info functions for region scheduling. */
2061 static void init_ready_list (void);
2062 static int can_schedule_ready_p (rtx);
2063 static void begin_schedule_ready (rtx);
2064 static ds_t new_ready (rtx, ds_t);
2065 static int schedule_more_p (void);
2066 static const char *rgn_print_insn (const_rtx, int);
2067 static int rgn_rank (rtx, rtx);
2068 static void compute_jump_reg_dependencies (rtx, regset);
2070 /* Functions for speculative scheduling. */
2071 static void rgn_add_remove_insn (rtx, int);
2072 static void rgn_add_block (basic_block, basic_block);
2073 static void rgn_fix_recovery_cfg (int, int, int);
2074 static basic_block advance_target_bb (basic_block, rtx);
2076 /* Return nonzero if there are more insns that should be scheduled. */
2078 static int
2079 schedule_more_p (void)
2081 return sched_target_n_insns < target_n_insns;
2084 /* Add all insns that are initially ready to the ready list READY. Called
2085 once before scheduling a set of insns. */
2087 static void
2088 init_ready_list (void)
2090 rtx prev_head = current_sched_info->prev_head;
2091 rtx next_tail = current_sched_info->next_tail;
2092 int bb_src;
2093 rtx insn;
2095 target_n_insns = 0;
2096 sched_target_n_insns = 0;
2097 sched_n_insns = 0;
2099 /* Print debugging information. */
2100 if (sched_verbose >= 5)
2101 debug_rgn_dependencies (target_bb);
2103 /* Prepare current target block info. */
2104 if (current_nr_blocks > 1)
2105 compute_trg_info (target_bb);
2107 /* Initialize ready list with all 'ready' insns in target block.
2108 Count number of insns in the target block being scheduled. */
2109 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
2111 gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED);
2112 TODO_SPEC (insn) = HARD_DEP;
2113 try_ready (insn);
2114 target_n_insns++;
2116 gcc_assert (!(TODO_SPEC (insn) & BEGIN_CONTROL));
2119 /* Add to ready list all 'ready' insns in valid source blocks.
2120 For speculative insns, check-live, exception-free, and
2121 issue-delay. */
2122 for (bb_src = target_bb + 1; bb_src < current_nr_blocks; bb_src++)
2123 if (IS_VALID (bb_src))
2125 rtx src_head;
2126 rtx src_next_tail;
2127 rtx tail, head;
2129 get_ebb_head_tail (EBB_FIRST_BB (bb_src), EBB_LAST_BB (bb_src),
2130 &head, &tail);
2131 src_next_tail = NEXT_INSN (tail);
2132 src_head = head;
2134 for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn))
2135 if (INSN_P (insn))
2137 gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED);
2138 TODO_SPEC (insn) = HARD_DEP;
2139 try_ready (insn);
2144 /* Called after taking INSN from the ready list. Returns nonzero if this
2145 insn can be scheduled, nonzero if we should silently discard it. */
2147 static int
2148 can_schedule_ready_p (rtx insn)
2150 /* An interblock motion? */
2151 if (INSN_BB (insn) != target_bb
2152 && IS_SPECULATIVE_INSN (insn)
2153 && !check_live (insn, INSN_BB (insn)))
2154 return 0;
2155 else
2156 return 1;
2159 /* Updates counter and other information. Split from can_schedule_ready_p ()
2160 because when we schedule insn speculatively then insn passed to
2161 can_schedule_ready_p () differs from the one passed to
2162 begin_schedule_ready (). */
2163 static void
2164 begin_schedule_ready (rtx insn)
2166 /* An interblock motion? */
2167 if (INSN_BB (insn) != target_bb)
2169 if (IS_SPECULATIVE_INSN (insn))
2171 gcc_assert (check_live (insn, INSN_BB (insn)));
2173 update_live (insn, INSN_BB (insn));
2175 /* For speculative load, mark insns fed by it. */
2176 if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn))
2177 set_spec_fed (insn);
2179 nr_spec++;
2181 nr_inter++;
2183 else
2185 /* In block motion. */
2186 sched_target_n_insns++;
2188 sched_n_insns++;
2191 /* Called after INSN has all its hard dependencies resolved and the speculation
2192 of type TS is enough to overcome them all.
2193 Return nonzero if it should be moved to the ready list or the queue, or zero
2194 if we should silently discard it. */
2195 static ds_t
2196 new_ready (rtx next, ds_t ts)
2198 if (INSN_BB (next) != target_bb)
2200 int not_ex_free = 0;
2202 /* For speculative insns, before inserting to ready/queue,
2203 check live, exception-free, and issue-delay. */
2204 if (!IS_VALID (INSN_BB (next))
2205 || CANT_MOVE (next)
2206 || (IS_SPECULATIVE_INSN (next)
2207 && ((recog_memoized (next) >= 0
2208 && min_insn_conflict_delay (curr_state, next, next)
2209 > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY))
2210 || IS_SPECULATION_CHECK_P (next)
2211 || !check_live (next, INSN_BB (next))
2212 || (not_ex_free = !is_exception_free (next, INSN_BB (next),
2213 target_bb)))))
2215 if (not_ex_free
2216 /* We are here because is_exception_free () == false.
2217 But we possibly can handle that with control speculation. */
2218 && sched_deps_info->generate_spec_deps
2219 && spec_info->mask & BEGIN_CONTROL)
2221 ds_t new_ds;
2223 /* Add control speculation to NEXT's dependency type. */
2224 new_ds = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK);
2226 /* Check if NEXT can be speculated with new dependency type. */
2227 if (sched_insn_is_legitimate_for_speculation_p (next, new_ds))
2228 /* Here we got new control-speculative instruction. */
2229 ts = new_ds;
2230 else
2231 /* NEXT isn't ready yet. */
2232 ts = DEP_POSTPONED;
2234 else
2235 /* NEXT isn't ready yet. */
2236 ts = DEP_POSTPONED;
2240 return ts;
2243 /* Return a string that contains the insn uid and optionally anything else
2244 necessary to identify this insn in an output. It's valid to use a
2245 static buffer for this. The ALIGNED parameter should cause the string
2246 to be formatted so that multiple output lines will line up nicely. */
2248 static const char *
2249 rgn_print_insn (const_rtx insn, int aligned)
2251 static char tmp[80];
2253 if (aligned)
2254 sprintf (tmp, "b%3d: i%4d", INSN_BB (insn), INSN_UID (insn));
2255 else
2257 if (current_nr_blocks > 1 && INSN_BB (insn) != target_bb)
2258 sprintf (tmp, "%d/b%d", INSN_UID (insn), INSN_BB (insn));
2259 else
2260 sprintf (tmp, "%d", INSN_UID (insn));
2262 return tmp;
2265 /* Compare priority of two insns. Return a positive number if the second
2266 insn is to be preferred for scheduling, and a negative one if the first
2267 is to be preferred. Zero if they are equally good. */
2269 static int
2270 rgn_rank (rtx insn1, rtx insn2)
2272 /* Some comparison make sense in interblock scheduling only. */
2273 if (INSN_BB (insn1) != INSN_BB (insn2))
2275 int spec_val, prob_val;
2277 /* Prefer an inblock motion on an interblock motion. */
2278 if ((INSN_BB (insn2) == target_bb) && (INSN_BB (insn1) != target_bb))
2279 return 1;
2280 if ((INSN_BB (insn1) == target_bb) && (INSN_BB (insn2) != target_bb))
2281 return -1;
2283 /* Prefer a useful motion on a speculative one. */
2284 spec_val = IS_SPECULATIVE_INSN (insn1) - IS_SPECULATIVE_INSN (insn2);
2285 if (spec_val)
2286 return spec_val;
2288 /* Prefer a more probable (speculative) insn. */
2289 prob_val = INSN_PROBABILITY (insn2) - INSN_PROBABILITY (insn1);
2290 if (prob_val)
2291 return prob_val;
2293 return 0;
2296 /* NEXT is an instruction that depends on INSN (a backward dependence);
2297 return nonzero if we should include this dependence in priority
2298 calculations. */
2301 contributes_to_priority (rtx next, rtx insn)
2303 /* NEXT and INSN reside in one ebb. */
2304 return BLOCK_TO_BB (BLOCK_NUM (next)) == BLOCK_TO_BB (BLOCK_NUM (insn));
2307 /* INSN is a JUMP_INSN. Store the set of registers that must be
2308 considered as used by this jump in USED. */
2310 static void
2311 compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
2312 regset used ATTRIBUTE_UNUSED)
2314 /* Nothing to do here, since we postprocess jumps in
2315 add_branch_dependences. */
2318 /* This variable holds common_sched_info hooks and data relevant to
2319 the interblock scheduler. */
2320 static struct common_sched_info_def rgn_common_sched_info;
2323 /* This holds data for the dependence analysis relevant to
2324 the interblock scheduler. */
2325 static struct sched_deps_info_def rgn_sched_deps_info;
2327 /* This holds constant data used for initializing the above structure
2328 for the Haifa scheduler. */
2329 static const struct sched_deps_info_def rgn_const_sched_deps_info =
2331 compute_jump_reg_dependencies,
2332 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2333 0, 0, 0
2336 /* Same as above, but for the selective scheduler. */
2337 static const struct sched_deps_info_def rgn_const_sel_sched_deps_info =
2339 compute_jump_reg_dependencies,
2340 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2341 0, 0, 0
2344 /* Return true if scheduling INSN will trigger finish of scheduling
2345 current block. */
2346 static bool
2347 rgn_insn_finishes_block_p (rtx insn)
2349 if (INSN_BB (insn) == target_bb
2350 && sched_target_n_insns + 1 == target_n_insns)
2351 /* INSN is the last not-scheduled instruction in the current block. */
2352 return true;
2354 return false;
2357 /* Used in schedule_insns to initialize current_sched_info for scheduling
2358 regions (or single basic blocks). */
2360 static const struct haifa_sched_info rgn_const_sched_info =
2362 init_ready_list,
2363 can_schedule_ready_p,
2364 schedule_more_p,
2365 new_ready,
2366 rgn_rank,
2367 rgn_print_insn,
2368 contributes_to_priority,
2369 rgn_insn_finishes_block_p,
2371 NULL, NULL,
2372 NULL, NULL,
2373 0, 0,
2375 rgn_add_remove_insn,
2376 begin_schedule_ready,
2377 NULL,
2378 advance_target_bb,
2379 NULL, NULL,
2380 SCHED_RGN
2383 /* This variable holds the data and hooks needed to the Haifa scheduler backend
2384 for the interblock scheduler frontend. */
2385 static struct haifa_sched_info rgn_sched_info;
2387 /* Returns maximum priority that an insn was assigned to. */
2390 get_rgn_sched_max_insns_priority (void)
2392 return rgn_sched_info.sched_max_insns_priority;
2395 /* Determine if PAT sets a TARGET_CLASS_LIKELY_SPILLED_P register. */
2397 static bool
2398 sets_likely_spilled (rtx pat)
2400 bool ret = false;
2401 note_stores (pat, sets_likely_spilled_1, &ret);
2402 return ret;
2405 static void
2406 sets_likely_spilled_1 (rtx x, const_rtx pat, void *data)
2408 bool *ret = (bool *) data;
2410 if (GET_CODE (pat) == SET
2411 && REG_P (x)
2412 && HARD_REGISTER_P (x)
2413 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (x))))
2414 *ret = true;
2417 /* A bitmap to note insns that participate in any dependency. Used in
2418 add_branch_dependences. */
2419 static sbitmap insn_referenced;
2421 /* Add dependences so that branches are scheduled to run last in their
2422 block. */
2423 static void
2424 add_branch_dependences (rtx head, rtx tail)
2426 rtx insn, last;
2428 /* For all branches, calls, uses, clobbers, cc0 setters, and instructions
2429 that can throw exceptions, force them to remain in order at the end of
2430 the block by adding dependencies and giving the last a high priority.
2431 There may be notes present, and prev_head may also be a note.
2433 Branches must obviously remain at the end. Calls should remain at the
2434 end since moving them results in worse register allocation. Uses remain
2435 at the end to ensure proper register allocation.
2437 cc0 setters remain at the end because they can't be moved away from
2438 their cc0 user.
2440 COND_EXEC insns cannot be moved past a branch (see e.g. PR17808).
2442 Insns setting TARGET_CLASS_LIKELY_SPILLED_P registers (usually return
2443 values) are not moved before reload because we can wind up with register
2444 allocation failures. */
2446 while (tail != head && DEBUG_INSN_P (tail))
2447 tail = PREV_INSN (tail);
2449 insn = tail;
2450 last = 0;
2451 while (CALL_P (insn)
2452 || JUMP_P (insn)
2453 || (NONJUMP_INSN_P (insn)
2454 && (GET_CODE (PATTERN (insn)) == USE
2455 || GET_CODE (PATTERN (insn)) == CLOBBER
2456 || can_throw_internal (insn)
2457 #ifdef HAVE_cc0
2458 || sets_cc0_p (PATTERN (insn))
2459 #endif
2460 || (!reload_completed
2461 && sets_likely_spilled (PATTERN (insn)))))
2462 || NOTE_P (insn))
2464 if (!NOTE_P (insn))
2466 if (last != 0
2467 && sd_find_dep_between (insn, last, false) == NULL)
2469 if (! sched_insns_conditions_mutex_p (last, insn))
2470 add_dependence (last, insn, REG_DEP_ANTI);
2471 bitmap_set_bit (insn_referenced, INSN_LUID (insn));
2474 CANT_MOVE (insn) = 1;
2476 last = insn;
2479 /* Don't overrun the bounds of the basic block. */
2480 if (insn == head)
2481 break;
2484 insn = PREV_INSN (insn);
2485 while (insn != head && DEBUG_INSN_P (insn));
2488 /* Make sure these insns are scheduled last in their block. */
2489 insn = last;
2490 if (insn != 0)
2491 while (insn != head)
2493 insn = prev_nonnote_insn (insn);
2495 if (bitmap_bit_p (insn_referenced, INSN_LUID (insn))
2496 || DEBUG_INSN_P (insn))
2497 continue;
2499 if (! sched_insns_conditions_mutex_p (last, insn))
2500 add_dependence (last, insn, REG_DEP_ANTI);
2503 if (!targetm.have_conditional_execution ())
2504 return;
2506 /* Finally, if the block ends in a jump, and we are doing intra-block
2507 scheduling, make sure that the branch depends on any COND_EXEC insns
2508 inside the block to avoid moving the COND_EXECs past the branch insn.
2510 We only have to do this after reload, because (1) before reload there
2511 are no COND_EXEC insns, and (2) the region scheduler is an intra-block
2512 scheduler after reload.
2514 FIXME: We could in some cases move COND_EXEC insns past the branch if
2515 this scheduler would be a little smarter. Consider this code:
2517 T = [addr]
2518 C ? addr += 4
2519 !C ? X += 12
2520 C ? T += 1
2521 C ? jump foo
2523 On a target with a one cycle stall on a memory access the optimal
2524 sequence would be:
2526 T = [addr]
2527 C ? addr += 4
2528 C ? T += 1
2529 C ? jump foo
2530 !C ? X += 12
2532 We don't want to put the 'X += 12' before the branch because it just
2533 wastes a cycle of execution time when the branch is taken.
2535 Note that in the example "!C" will always be true. That is another
2536 possible improvement for handling COND_EXECs in this scheduler: it
2537 could remove always-true predicates. */
2539 if (!reload_completed || ! JUMP_P (tail))
2540 return;
2542 insn = tail;
2543 while (insn != head)
2545 insn = PREV_INSN (insn);
2547 /* Note that we want to add this dependency even when
2548 sched_insns_conditions_mutex_p returns true. The whole point
2549 is that we _want_ this dependency, even if these insns really
2550 are independent. */
2551 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == COND_EXEC)
2552 add_dependence (tail, insn, REG_DEP_ANTI);
2556 /* Data structures for the computation of data dependences in a regions. We
2557 keep one `deps' structure for every basic block. Before analyzing the
2558 data dependences for a bb, its variables are initialized as a function of
2559 the variables of its predecessors. When the analysis for a bb completes,
2560 we save the contents to the corresponding bb_deps[bb] variable. */
2562 static struct deps_desc *bb_deps;
2564 static void
2565 concat_insn_mem_list (rtx copy_insns, rtx copy_mems, rtx *old_insns_p,
2566 rtx *old_mems_p)
2568 rtx new_insns = *old_insns_p;
2569 rtx new_mems = *old_mems_p;
2571 while (copy_insns)
2573 new_insns = alloc_INSN_LIST (XEXP (copy_insns, 0), new_insns);
2574 new_mems = alloc_EXPR_LIST (VOIDmode, XEXP (copy_mems, 0), new_mems);
2575 copy_insns = XEXP (copy_insns, 1);
2576 copy_mems = XEXP (copy_mems, 1);
2579 *old_insns_p = new_insns;
2580 *old_mems_p = new_mems;
2583 /* Join PRED_DEPS to the SUCC_DEPS. */
2584 void
2585 deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps)
2587 unsigned reg;
2588 reg_set_iterator rsi;
2590 /* The reg_last lists are inherited by successor. */
2591 EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg, rsi)
2593 struct deps_reg *pred_rl = &pred_deps->reg_last[reg];
2594 struct deps_reg *succ_rl = &succ_deps->reg_last[reg];
2596 succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses);
2597 succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets);
2598 succ_rl->implicit_sets
2599 = concat_INSN_LIST (pred_rl->implicit_sets, succ_rl->implicit_sets);
2600 succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers,
2601 succ_rl->clobbers);
2602 succ_rl->uses_length += pred_rl->uses_length;
2603 succ_rl->clobbers_length += pred_rl->clobbers_length;
2605 IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use);
2607 /* Mem read/write lists are inherited by successor. */
2608 concat_insn_mem_list (pred_deps->pending_read_insns,
2609 pred_deps->pending_read_mems,
2610 &succ_deps->pending_read_insns,
2611 &succ_deps->pending_read_mems);
2612 concat_insn_mem_list (pred_deps->pending_write_insns,
2613 pred_deps->pending_write_mems,
2614 &succ_deps->pending_write_insns,
2615 &succ_deps->pending_write_mems);
2617 succ_deps->pending_jump_insns
2618 = concat_INSN_LIST (pred_deps->pending_jump_insns,
2619 succ_deps->pending_jump_insns);
2620 succ_deps->last_pending_memory_flush
2621 = concat_INSN_LIST (pred_deps->last_pending_memory_flush,
2622 succ_deps->last_pending_memory_flush);
2624 succ_deps->pending_read_list_length += pred_deps->pending_read_list_length;
2625 succ_deps->pending_write_list_length += pred_deps->pending_write_list_length;
2626 succ_deps->pending_flush_length += pred_deps->pending_flush_length;
2628 /* last_function_call is inherited by successor. */
2629 succ_deps->last_function_call
2630 = concat_INSN_LIST (pred_deps->last_function_call,
2631 succ_deps->last_function_call);
2633 /* last_function_call_may_noreturn is inherited by successor. */
2634 succ_deps->last_function_call_may_noreturn
2635 = concat_INSN_LIST (pred_deps->last_function_call_may_noreturn,
2636 succ_deps->last_function_call_may_noreturn);
2638 /* sched_before_next_call is inherited by successor. */
2639 succ_deps->sched_before_next_call
2640 = concat_INSN_LIST (pred_deps->sched_before_next_call,
2641 succ_deps->sched_before_next_call);
2644 /* After computing the dependencies for block BB, propagate the dependencies
2645 found in TMP_DEPS to the successors of the block. */
2646 static void
2647 propagate_deps (int bb, struct deps_desc *pred_deps)
2649 basic_block block = BASIC_BLOCK (BB_TO_BLOCK (bb));
2650 edge_iterator ei;
2651 edge e;
2653 /* bb's structures are inherited by its successors. */
2654 FOR_EACH_EDGE (e, ei, block->succs)
2656 /* Only bbs "below" bb, in the same region, are interesting. */
2657 if (e->dest == EXIT_BLOCK_PTR
2658 || CONTAINING_RGN (block->index) != CONTAINING_RGN (e->dest->index)
2659 || BLOCK_TO_BB (e->dest->index) <= bb)
2660 continue;
2662 deps_join (bb_deps + BLOCK_TO_BB (e->dest->index), pred_deps);
2665 /* These lists should point to the right place, for correct
2666 freeing later. */
2667 bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns;
2668 bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems;
2669 bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns;
2670 bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems;
2671 bb_deps[bb].pending_jump_insns = pred_deps->pending_jump_insns;
2673 /* Can't allow these to be freed twice. */
2674 pred_deps->pending_read_insns = 0;
2675 pred_deps->pending_read_mems = 0;
2676 pred_deps->pending_write_insns = 0;
2677 pred_deps->pending_write_mems = 0;
2678 pred_deps->pending_jump_insns = 0;
2681 /* Compute dependences inside bb. In a multiple blocks region:
2682 (1) a bb is analyzed after its predecessors, and (2) the lists in
2683 effect at the end of bb (after analyzing for bb) are inherited by
2684 bb's successors.
2686 Specifically for reg-reg data dependences, the block insns are
2687 scanned by sched_analyze () top-to-bottom. Three lists are
2688 maintained by sched_analyze (): reg_last[].sets for register DEFs,
2689 reg_last[].implicit_sets for implicit hard register DEFs, and
2690 reg_last[].uses for register USEs.
2692 When analysis is completed for bb, we update for its successors:
2693 ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
2694 ; - IMPLICIT_DEFS[succ] = Union (IMPLICIT_DEFS [succ], IMPLICIT_DEFS [bb])
2695 ; - USES[succ] = Union (USES [succ], DEFS [bb])
2697 The mechanism for computing mem-mem data dependence is very
2698 similar, and the result is interblock dependences in the region. */
2700 static void
2701 compute_block_dependences (int bb)
2703 rtx head, tail;
2704 struct deps_desc tmp_deps;
2706 tmp_deps = bb_deps[bb];
2708 /* Do the analysis for this block. */
2709 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2710 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2712 sched_analyze (&tmp_deps, head, tail);
2714 /* Selective scheduling handles control dependencies by itself. */
2715 if (!sel_sched_p ())
2716 add_branch_dependences (head, tail);
2718 if (current_nr_blocks > 1)
2719 propagate_deps (bb, &tmp_deps);
2721 /* Free up the INSN_LISTs. */
2722 free_deps (&tmp_deps);
2724 if (targetm.sched.dependencies_evaluation_hook)
2725 targetm.sched.dependencies_evaluation_hook (head, tail);
2728 /* Free dependencies of instructions inside BB. */
2729 static void
2730 free_block_dependencies (int bb)
2732 rtx head;
2733 rtx tail;
2735 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2737 if (no_real_insns_p (head, tail))
2738 return;
2740 sched_free_deps (head, tail, true);
2743 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
2744 them to the unused_*_list variables, so that they can be reused. */
2746 static void
2747 free_pending_lists (void)
2749 int bb;
2751 for (bb = 0; bb < current_nr_blocks; bb++)
2753 free_INSN_LIST_list (&bb_deps[bb].pending_read_insns);
2754 free_INSN_LIST_list (&bb_deps[bb].pending_write_insns);
2755 free_EXPR_LIST_list (&bb_deps[bb].pending_read_mems);
2756 free_EXPR_LIST_list (&bb_deps[bb].pending_write_mems);
2757 free_INSN_LIST_list (&bb_deps[bb].pending_jump_insns);
2761 /* Print dependences for debugging starting from FROM_BB.
2762 Callable from debugger. */
2763 /* Print dependences for debugging starting from FROM_BB.
2764 Callable from debugger. */
2765 DEBUG_FUNCTION void
2766 debug_rgn_dependencies (int from_bb)
2768 int bb;
2770 fprintf (sched_dump,
2771 ";; --------------- forward dependences: ------------ \n");
2773 for (bb = from_bb; bb < current_nr_blocks; bb++)
2775 rtx head, tail;
2777 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2778 fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
2779 BB_TO_BLOCK (bb), bb);
2781 debug_dependencies (head, tail);
2785 /* Print dependencies information for instructions between HEAD and TAIL.
2786 ??? This function would probably fit best in haifa-sched.c. */
2787 void debug_dependencies (rtx head, rtx tail)
2789 rtx insn;
2790 rtx next_tail = NEXT_INSN (tail);
2792 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2793 "insn", "code", "bb", "dep", "prio", "cost",
2794 "reservation");
2795 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2796 "----", "----", "--", "---", "----", "----",
2797 "-----------");
2799 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2801 if (! INSN_P (insn))
2803 int n;
2804 fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
2805 if (NOTE_P (insn))
2807 n = NOTE_KIND (insn);
2808 fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
2810 else
2811 fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
2812 continue;
2815 fprintf (sched_dump,
2816 ";; %s%5d%6d%6d%6d%6d%6d ",
2817 (SCHED_GROUP_P (insn) ? "+" : " "),
2818 INSN_UID (insn),
2819 INSN_CODE (insn),
2820 BLOCK_NUM (insn),
2821 sched_emulate_haifa_p ? -1 : sd_lists_size (insn, SD_LIST_BACK),
2822 (sel_sched_p () ? (sched_emulate_haifa_p ? -1
2823 : INSN_PRIORITY (insn))
2824 : INSN_PRIORITY (insn)),
2825 (sel_sched_p () ? (sched_emulate_haifa_p ? -1
2826 : insn_cost (insn))
2827 : insn_cost (insn)));
2829 if (recog_memoized (insn) < 0)
2830 fprintf (sched_dump, "nothing");
2831 else
2832 print_reservation (sched_dump, insn);
2834 fprintf (sched_dump, "\t: ");
2836 sd_iterator_def sd_it;
2837 dep_t dep;
2839 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
2840 fprintf (sched_dump, "%d%s%s ", INSN_UID (DEP_CON (dep)),
2841 DEP_NONREG (dep) ? "n" : "",
2842 DEP_MULTIPLE (dep) ? "m" : "");
2844 fprintf (sched_dump, "\n");
2847 fprintf (sched_dump, "\n");
2850 /* Returns true if all the basic blocks of the current region have
2851 NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region. */
2852 bool
2853 sched_is_disabled_for_current_region_p (void)
2855 int bb;
2857 for (bb = 0; bb < current_nr_blocks; bb++)
2858 if (!(BASIC_BLOCK (BB_TO_BLOCK (bb))->flags & BB_DISABLE_SCHEDULE))
2859 return false;
2861 return true;
2864 /* Free all region dependencies saved in INSN_BACK_DEPS and
2865 INSN_RESOLVED_BACK_DEPS. The Haifa scheduler does this on the fly
2866 when scheduling, so this function is supposed to be called from
2867 the selective scheduling only. */
2868 void
2869 free_rgn_deps (void)
2871 int bb;
2873 for (bb = 0; bb < current_nr_blocks; bb++)
2875 rtx head, tail;
2877 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2878 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2880 sched_free_deps (head, tail, false);
2884 static int rgn_n_insns;
2886 /* Compute insn priority for a current region. */
2887 void
2888 compute_priorities (void)
2890 int bb;
2892 current_sched_info->sched_max_insns_priority = 0;
2893 for (bb = 0; bb < current_nr_blocks; bb++)
2895 rtx head, tail;
2897 gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
2898 get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
2900 if (no_real_insns_p (head, tail))
2901 continue;
2903 rgn_n_insns += set_priorities (head, tail);
2905 current_sched_info->sched_max_insns_priority++;
2908 /* (Re-)initialize the arrays of DFA states at the end of each basic block.
2910 SAVED_LAST_BASIC_BLOCK is the previous length of the arrays. It must be
2911 zero for the first call to this function, to allocate the arrays for the
2912 first time.
2914 This function is called once during initialization of the scheduler, and
2915 called again to resize the arrays if new basic blocks have been created,
2916 for example for speculation recovery code. */
2918 static void
2919 realloc_bb_state_array (int saved_last_basic_block)
2921 char *old_bb_state_array = bb_state_array;
2922 size_t lbb = (size_t) last_basic_block;
2923 size_t slbb = (size_t) saved_last_basic_block;
2925 /* Nothing to do if nothing changed since the last time this was called. */
2926 if (saved_last_basic_block == last_basic_block)
2927 return;
2929 /* The selective scheduler doesn't use the state arrays. */
2930 if (sel_sched_p ())
2932 gcc_assert (bb_state_array == NULL && bb_state == NULL);
2933 return;
2936 gcc_checking_assert (saved_last_basic_block == 0
2937 || (bb_state_array != NULL && bb_state != NULL));
2939 bb_state_array = XRESIZEVEC (char, bb_state_array, lbb * dfa_state_size);
2940 bb_state = XRESIZEVEC (state_t, bb_state, lbb);
2942 /* If BB_STATE_ARRAY has moved, fixup all the state pointers array.
2943 Otherwise only fixup the newly allocated ones. For the state
2944 array itself, only initialize the new entries. */
2945 bool bb_state_array_moved = (bb_state_array != old_bb_state_array);
2946 for (size_t i = bb_state_array_moved ? 0 : slbb; i < lbb; i++)
2947 bb_state[i] = (state_t) (bb_state_array + i * dfa_state_size);
2948 for (size_t i = slbb; i < lbb; i++)
2949 state_reset (bb_state[i]);
2952 /* Free the arrays of DFA states at the end of each basic block. */
2954 static void
2955 free_bb_state_array (void)
2957 free (bb_state_array);
2958 free (bb_state);
2959 bb_state_array = NULL;
2960 bb_state = NULL;
2963 /* Schedule a region. A region is either an inner loop, a loop-free
2964 subroutine, or a single basic block. Each bb in the region is
2965 scheduled after its flow predecessors. */
2967 static void
2968 schedule_region (int rgn)
2970 int bb;
2971 int sched_rgn_n_insns = 0;
2973 rgn_n_insns = 0;
2975 rgn_setup_region (rgn);
2977 /* Don't schedule region that is marked by
2978 NOTE_DISABLE_SCHED_OF_BLOCK. */
2979 if (sched_is_disabled_for_current_region_p ())
2980 return;
2982 sched_rgn_compute_dependencies (rgn);
2984 sched_rgn_local_init (rgn);
2986 /* Set priorities. */
2987 compute_priorities ();
2989 sched_extend_ready_list (rgn_n_insns);
2991 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2993 sched_init_region_reg_pressure_info ();
2994 for (bb = 0; bb < current_nr_blocks; bb++)
2996 basic_block first_bb, last_bb;
2997 rtx head, tail;
2999 first_bb = EBB_FIRST_BB (bb);
3000 last_bb = EBB_LAST_BB (bb);
3002 get_ebb_head_tail (first_bb, last_bb, &head, &tail);
3004 if (no_real_insns_p (head, tail))
3006 gcc_assert (first_bb == last_bb);
3007 continue;
3009 sched_setup_bb_reg_pressure_info (first_bb, PREV_INSN (head));
3013 /* Now we can schedule all blocks. */
3014 for (bb = 0; bb < current_nr_blocks; bb++)
3016 basic_block first_bb, last_bb, curr_bb;
3017 rtx head, tail;
3019 first_bb = EBB_FIRST_BB (bb);
3020 last_bb = EBB_LAST_BB (bb);
3022 get_ebb_head_tail (first_bb, last_bb, &head, &tail);
3024 if (no_real_insns_p (head, tail))
3026 gcc_assert (first_bb == last_bb);
3027 continue;
3030 current_sched_info->prev_head = PREV_INSN (head);
3031 current_sched_info->next_tail = NEXT_INSN (tail);
3033 remove_notes (head, tail);
3035 unlink_bb_notes (first_bb, last_bb);
3037 target_bb = bb;
3039 gcc_assert (flag_schedule_interblock || current_nr_blocks == 1);
3040 current_sched_info->queue_must_finish_empty = current_nr_blocks == 1;
3042 curr_bb = first_bb;
3043 if (dbg_cnt (sched_block))
3045 edge f;
3046 int saved_last_basic_block = last_basic_block;
3048 schedule_block (&curr_bb, bb_state[first_bb->index]);
3049 gcc_assert (EBB_FIRST_BB (bb) == first_bb);
3050 sched_rgn_n_insns += sched_n_insns;
3051 realloc_bb_state_array (saved_last_basic_block);
3052 f = find_fallthru_edge (last_bb->succs);
3053 if (f && f->probability * 100 / REG_BR_PROB_BASE >=
3054 PARAM_VALUE (PARAM_SCHED_STATE_EDGE_PROB_CUTOFF))
3056 memcpy (bb_state[f->dest->index], curr_state,
3057 dfa_state_size);
3058 if (sched_verbose >= 5)
3059 fprintf (sched_dump, "saving state for edge %d->%d\n",
3060 f->src->index, f->dest->index);
3063 else
3065 sched_rgn_n_insns += rgn_n_insns;
3068 /* Clean up. */
3069 if (current_nr_blocks > 1)
3070 free_trg_info ();
3073 /* Sanity check: verify that all region insns were scheduled. */
3074 gcc_assert (sched_rgn_n_insns == rgn_n_insns);
3076 sched_finish_ready_list ();
3078 /* Done with this region. */
3079 sched_rgn_local_finish ();
3081 /* Free dependencies. */
3082 for (bb = 0; bb < current_nr_blocks; ++bb)
3083 free_block_dependencies (bb);
3085 gcc_assert (haifa_recovery_bb_ever_added_p
3086 || deps_pools_are_empty_p ());
3089 /* Initialize data structures for region scheduling. */
3091 void
3092 sched_rgn_init (bool single_blocks_p)
3094 min_spec_prob = ((PARAM_VALUE (PARAM_MIN_SPEC_PROB) * REG_BR_PROB_BASE)
3095 / 100);
3097 nr_inter = 0;
3098 nr_spec = 0;
3100 extend_regions ();
3102 CONTAINING_RGN (ENTRY_BLOCK) = -1;
3103 CONTAINING_RGN (EXIT_BLOCK) = -1;
3105 realloc_bb_state_array (0);
3107 /* Compute regions for scheduling. */
3108 if (single_blocks_p
3109 || n_basic_blocks == NUM_FIXED_BLOCKS + 1
3110 || !flag_schedule_interblock
3111 || is_cfg_nonregular ())
3113 find_single_block_region (sel_sched_p ());
3115 else
3117 /* Compute the dominators and post dominators. */
3118 if (!sel_sched_p ())
3119 calculate_dominance_info (CDI_DOMINATORS);
3121 /* Find regions. */
3122 find_rgns ();
3124 if (sched_verbose >= 3)
3125 debug_regions ();
3127 /* For now. This will move as more and more of haifa is converted
3128 to using the cfg code. */
3129 if (!sel_sched_p ())
3130 free_dominance_info (CDI_DOMINATORS);
3133 gcc_assert (0 < nr_regions && nr_regions <= n_basic_blocks);
3135 RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1) +
3136 RGN_NR_BLOCKS (nr_regions - 1));
3139 /* Free data structures for region scheduling. */
3140 void
3141 sched_rgn_finish (void)
3143 free_bb_state_array ();
3145 /* Reposition the prologue and epilogue notes in case we moved the
3146 prologue/epilogue insns. */
3147 if (reload_completed)
3148 reposition_prologue_and_epilogue_notes ();
3150 if (sched_verbose)
3152 if (reload_completed == 0
3153 && flag_schedule_interblock)
3155 fprintf (sched_dump,
3156 "\n;; Procedure interblock/speculative motions == %d/%d \n",
3157 nr_inter, nr_spec);
3159 else
3160 gcc_assert (nr_inter <= 0);
3161 fprintf (sched_dump, "\n\n");
3164 nr_regions = 0;
3166 free (rgn_table);
3167 rgn_table = NULL;
3169 free (rgn_bb_table);
3170 rgn_bb_table = NULL;
3172 free (block_to_bb);
3173 block_to_bb = NULL;
3175 free (containing_rgn);
3176 containing_rgn = NULL;
3178 free (ebb_head);
3179 ebb_head = NULL;
3182 /* Setup global variables like CURRENT_BLOCKS and CURRENT_NR_BLOCK to
3183 point to the region RGN. */
3184 void
3185 rgn_setup_region (int rgn)
3187 int bb;
3189 /* Set variables for the current region. */
3190 current_nr_blocks = RGN_NR_BLOCKS (rgn);
3191 current_blocks = RGN_BLOCKS (rgn);
3193 /* EBB_HEAD is a region-scope structure. But we realloc it for
3194 each region to save time/memory/something else.
3195 See comments in add_block1, for what reasons we allocate +1 element. */
3196 ebb_head = XRESIZEVEC (int, ebb_head, current_nr_blocks + 1);
3197 for (bb = 0; bb <= current_nr_blocks; bb++)
3198 ebb_head[bb] = current_blocks + bb;
3201 /* Compute instruction dependencies in region RGN. */
3202 void
3203 sched_rgn_compute_dependencies (int rgn)
3205 if (!RGN_DONT_CALC_DEPS (rgn))
3207 int bb;
3209 if (sel_sched_p ())
3210 sched_emulate_haifa_p = 1;
3212 init_deps_global ();
3214 /* Initializations for region data dependence analysis. */
3215 bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks);
3216 for (bb = 0; bb < current_nr_blocks; bb++)
3217 init_deps (bb_deps + bb, false);
3219 /* Initialize bitmap used in add_branch_dependences. */
3220 insn_referenced = sbitmap_alloc (sched_max_luid);
3221 bitmap_clear (insn_referenced);
3223 /* Compute backward dependencies. */
3224 for (bb = 0; bb < current_nr_blocks; bb++)
3225 compute_block_dependences (bb);
3227 sbitmap_free (insn_referenced);
3228 free_pending_lists ();
3229 finish_deps_global ();
3230 free (bb_deps);
3232 /* We don't want to recalculate this twice. */
3233 RGN_DONT_CALC_DEPS (rgn) = 1;
3235 if (sel_sched_p ())
3236 sched_emulate_haifa_p = 0;
3238 else
3239 /* (This is a recovery block. It is always a single block region.)
3240 OR (We use selective scheduling.) */
3241 gcc_assert (current_nr_blocks == 1 || sel_sched_p ());
3244 /* Init region data structures. Returns true if this region should
3245 not be scheduled. */
3246 void
3247 sched_rgn_local_init (int rgn)
3249 int bb;
3251 /* Compute interblock info: probabilities, split-edges, dominators, etc. */
3252 if (current_nr_blocks > 1)
3254 basic_block block;
3255 edge e;
3256 edge_iterator ei;
3258 prob = XNEWVEC (int, current_nr_blocks);
3260 dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
3261 bitmap_vector_clear (dom, current_nr_blocks);
3263 /* Use ->aux to implement EDGE_TO_BIT mapping. */
3264 rgn_nr_edges = 0;
3265 FOR_EACH_BB (block)
3267 if (CONTAINING_RGN (block->index) != rgn)
3268 continue;
3269 FOR_EACH_EDGE (e, ei, block->succs)
3270 SET_EDGE_TO_BIT (e, rgn_nr_edges++);
3273 rgn_edges = XNEWVEC (edge, rgn_nr_edges);
3274 rgn_nr_edges = 0;
3275 FOR_EACH_BB (block)
3277 if (CONTAINING_RGN (block->index) != rgn)
3278 continue;
3279 FOR_EACH_EDGE (e, ei, block->succs)
3280 rgn_edges[rgn_nr_edges++] = e;
3283 /* Split edges. */
3284 pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
3285 bitmap_vector_clear (pot_split, current_nr_blocks);
3286 ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
3287 bitmap_vector_clear (ancestor_edges, current_nr_blocks);
3289 /* Compute probabilities, dominators, split_edges. */
3290 for (bb = 0; bb < current_nr_blocks; bb++)
3291 compute_dom_prob_ps (bb);
3293 /* Cleanup ->aux used for EDGE_TO_BIT mapping. */
3294 /* We don't need them anymore. But we want to avoid duplication of
3295 aux fields in the newly created edges. */
3296 FOR_EACH_BB (block)
3298 if (CONTAINING_RGN (block->index) != rgn)
3299 continue;
3300 FOR_EACH_EDGE (e, ei, block->succs)
3301 e->aux = NULL;
3306 /* Free data computed for the finished region. */
3307 void
3308 sched_rgn_local_free (void)
3310 free (prob);
3311 sbitmap_vector_free (dom);
3312 sbitmap_vector_free (pot_split);
3313 sbitmap_vector_free (ancestor_edges);
3314 free (rgn_edges);
3317 /* Free data computed for the finished region. */
3318 void
3319 sched_rgn_local_finish (void)
3321 if (current_nr_blocks > 1 && !sel_sched_p ())
3323 sched_rgn_local_free ();
3327 /* Setup scheduler infos. */
3328 void
3329 rgn_setup_common_sched_info (void)
3331 memcpy (&rgn_common_sched_info, &haifa_common_sched_info,
3332 sizeof (rgn_common_sched_info));
3334 rgn_common_sched_info.fix_recovery_cfg = rgn_fix_recovery_cfg;
3335 rgn_common_sched_info.add_block = rgn_add_block;
3336 rgn_common_sched_info.estimate_number_of_insns
3337 = rgn_estimate_number_of_insns;
3338 rgn_common_sched_info.sched_pass_id = SCHED_RGN_PASS;
3340 common_sched_info = &rgn_common_sched_info;
3343 /* Setup all *_sched_info structures (for the Haifa frontend
3344 and for the dependence analysis) in the interblock scheduler. */
3345 void
3346 rgn_setup_sched_infos (void)
3348 if (!sel_sched_p ())
3349 memcpy (&rgn_sched_deps_info, &rgn_const_sched_deps_info,
3350 sizeof (rgn_sched_deps_info));
3351 else
3352 memcpy (&rgn_sched_deps_info, &rgn_const_sel_sched_deps_info,
3353 sizeof (rgn_sched_deps_info));
3355 sched_deps_info = &rgn_sched_deps_info;
3357 memcpy (&rgn_sched_info, &rgn_const_sched_info, sizeof (rgn_sched_info));
3358 current_sched_info = &rgn_sched_info;
3361 /* The one entry point in this file. */
3362 void
3363 schedule_insns (void)
3365 int rgn;
3367 /* Taking care of this degenerate case makes the rest of
3368 this code simpler. */
3369 if (n_basic_blocks == NUM_FIXED_BLOCKS)
3370 return;
3372 rgn_setup_common_sched_info ();
3373 rgn_setup_sched_infos ();
3375 haifa_sched_init ();
3376 sched_rgn_init (reload_completed);
3378 bitmap_initialize (&not_in_df, 0);
3379 bitmap_clear (&not_in_df);
3381 /* Schedule every region in the subroutine. */
3382 for (rgn = 0; rgn < nr_regions; rgn++)
3383 if (dbg_cnt (sched_region))
3384 schedule_region (rgn);
3386 /* Clean up. */
3387 sched_rgn_finish ();
3388 bitmap_clear (&not_in_df);
3390 haifa_sched_finish ();
3393 /* INSN has been added to/removed from current region. */
3394 static void
3395 rgn_add_remove_insn (rtx insn, int remove_p)
3397 if (!remove_p)
3398 rgn_n_insns++;
3399 else
3400 rgn_n_insns--;
3402 if (INSN_BB (insn) == target_bb)
3404 if (!remove_p)
3405 target_n_insns++;
3406 else
3407 target_n_insns--;
3411 /* Extend internal data structures. */
3412 void
3413 extend_regions (void)
3415 rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks);
3416 rgn_bb_table = XRESIZEVEC (int, rgn_bb_table, n_basic_blocks);
3417 block_to_bb = XRESIZEVEC (int, block_to_bb, last_basic_block);
3418 containing_rgn = XRESIZEVEC (int, containing_rgn, last_basic_block);
3421 void
3422 rgn_make_new_region_out_of_new_block (basic_block bb)
3424 int i;
3426 i = RGN_BLOCKS (nr_regions);
3427 /* I - first free position in rgn_bb_table. */
3429 rgn_bb_table[i] = bb->index;
3430 RGN_NR_BLOCKS (nr_regions) = 1;
3431 RGN_HAS_REAL_EBB (nr_regions) = 0;
3432 RGN_DONT_CALC_DEPS (nr_regions) = 0;
3433 CONTAINING_RGN (bb->index) = nr_regions;
3434 BLOCK_TO_BB (bb->index) = 0;
3436 nr_regions++;
3438 RGN_BLOCKS (nr_regions) = i + 1;
3441 /* BB was added to ebb after AFTER. */
3442 static void
3443 rgn_add_block (basic_block bb, basic_block after)
3445 extend_regions ();
3446 bitmap_set_bit (&not_in_df, bb->index);
3448 if (after == 0 || after == EXIT_BLOCK_PTR)
3450 rgn_make_new_region_out_of_new_block (bb);
3451 RGN_DONT_CALC_DEPS (nr_regions - 1) = (after == EXIT_BLOCK_PTR);
3453 else
3455 int i, pos;
3457 /* We need to fix rgn_table, block_to_bb, containing_rgn
3458 and ebb_head. */
3460 BLOCK_TO_BB (bb->index) = BLOCK_TO_BB (after->index);
3462 /* We extend ebb_head to one more position to
3463 easily find the last position of the last ebb in
3464 the current region. Thus, ebb_head[BLOCK_TO_BB (after) + 1]
3465 is _always_ valid for access. */
3467 i = BLOCK_TO_BB (after->index) + 1;
3468 pos = ebb_head[i] - 1;
3469 /* Now POS is the index of the last block in the region. */
3471 /* Find index of basic block AFTER. */
3472 for (; rgn_bb_table[pos] != after->index; pos--)
3475 pos++;
3476 gcc_assert (pos > ebb_head[i - 1]);
3478 /* i - ebb right after "AFTER". */
3479 /* ebb_head[i] - VALID. */
3481 /* Source position: ebb_head[i]
3482 Destination position: ebb_head[i] + 1
3483 Last position:
3484 RGN_BLOCKS (nr_regions) - 1
3485 Number of elements to copy: (last_position) - (source_position) + 1
3488 memmove (rgn_bb_table + pos + 1,
3489 rgn_bb_table + pos,
3490 ((RGN_BLOCKS (nr_regions) - 1) - (pos) + 1)
3491 * sizeof (*rgn_bb_table));
3493 rgn_bb_table[pos] = bb->index;
3495 for (; i <= current_nr_blocks; i++)
3496 ebb_head [i]++;
3498 i = CONTAINING_RGN (after->index);
3499 CONTAINING_RGN (bb->index) = i;
3501 RGN_HAS_REAL_EBB (i) = 1;
3503 for (++i; i <= nr_regions; i++)
3504 RGN_BLOCKS (i)++;
3508 /* Fix internal data after interblock movement of jump instruction.
3509 For parameter meaning please refer to
3510 sched-int.h: struct sched_info: fix_recovery_cfg. */
3511 static void
3512 rgn_fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti)
3514 int old_pos, new_pos, i;
3516 BLOCK_TO_BB (check_bb_nexti) = BLOCK_TO_BB (bbi);
3518 for (old_pos = ebb_head[BLOCK_TO_BB (check_bbi) + 1] - 1;
3519 rgn_bb_table[old_pos] != check_bb_nexti;
3520 old_pos--)
3522 gcc_assert (old_pos > ebb_head[BLOCK_TO_BB (check_bbi)]);
3524 for (new_pos = ebb_head[BLOCK_TO_BB (bbi) + 1] - 1;
3525 rgn_bb_table[new_pos] != bbi;
3526 new_pos--)
3528 new_pos++;
3529 gcc_assert (new_pos > ebb_head[BLOCK_TO_BB (bbi)]);
3531 gcc_assert (new_pos < old_pos);
3533 memmove (rgn_bb_table + new_pos + 1,
3534 rgn_bb_table + new_pos,
3535 (old_pos - new_pos) * sizeof (*rgn_bb_table));
3537 rgn_bb_table[new_pos] = check_bb_nexti;
3539 for (i = BLOCK_TO_BB (bbi) + 1; i <= BLOCK_TO_BB (check_bbi); i++)
3540 ebb_head[i]++;
3543 /* Return next block in ebb chain. For parameter meaning please refer to
3544 sched-int.h: struct sched_info: advance_target_bb. */
3545 static basic_block
3546 advance_target_bb (basic_block bb, rtx insn)
3548 if (insn)
3549 return 0;
3551 gcc_assert (BLOCK_TO_BB (bb->index) == target_bb
3552 && BLOCK_TO_BB (bb->next_bb->index) == target_bb);
3553 return bb->next_bb;
3556 #endif
3558 static bool
3559 gate_handle_sched (void)
3561 #ifdef INSN_SCHEDULING
3562 return optimize > 0 && flag_schedule_insns && dbg_cnt (sched_func);
3563 #else
3564 return 0;
3565 #endif
3568 /* Run instruction scheduler. */
3569 static unsigned int
3570 rest_of_handle_sched (void)
3572 #ifdef INSN_SCHEDULING
3573 if (flag_selective_scheduling
3574 && ! maybe_skip_selective_scheduling ())
3575 run_selective_scheduling ();
3576 else
3577 schedule_insns ();
3578 #endif
3579 return 0;
3582 static bool
3583 gate_handle_sched2 (void)
3585 #ifdef INSN_SCHEDULING
3586 return optimize > 0 && flag_schedule_insns_after_reload
3587 && !targetm.delay_sched2 && dbg_cnt (sched2_func);
3588 #else
3589 return 0;
3590 #endif
3593 /* Run second scheduling pass after reload. */
3594 static unsigned int
3595 rest_of_handle_sched2 (void)
3597 #ifdef INSN_SCHEDULING
3598 if (flag_selective_scheduling2
3599 && ! maybe_skip_selective_scheduling ())
3600 run_selective_scheduling ();
3601 else
3603 /* Do control and data sched analysis again,
3604 and write some more of the results to dump file. */
3605 if (flag_sched2_use_superblocks)
3606 schedule_ebbs ();
3607 else
3608 schedule_insns ();
3610 #endif
3611 return 0;
3614 struct rtl_opt_pass pass_sched =
3617 RTL_PASS,
3618 "sched1", /* name */
3619 OPTGROUP_NONE, /* optinfo_flags */
3620 gate_handle_sched, /* gate */
3621 rest_of_handle_sched, /* execute */
3622 NULL, /* sub */
3623 NULL, /* next */
3624 0, /* static_pass_number */
3625 TV_SCHED, /* tv_id */
3626 0, /* properties_required */
3627 0, /* properties_provided */
3628 0, /* properties_destroyed */
3629 0, /* todo_flags_start */
3630 TODO_df_finish | TODO_verify_rtl_sharing |
3631 TODO_verify_flow |
3632 TODO_ggc_collect /* todo_flags_finish */
3636 struct rtl_opt_pass pass_sched2 =
3639 RTL_PASS,
3640 "sched2", /* name */
3641 OPTGROUP_NONE, /* optinfo_flags */
3642 gate_handle_sched2, /* gate */
3643 rest_of_handle_sched2, /* execute */
3644 NULL, /* sub */
3645 NULL, /* next */
3646 0, /* static_pass_number */
3647 TV_SCHED2, /* tv_id */
3648 0, /* properties_required */
3649 0, /* properties_provided */
3650 0, /* properties_destroyed */
3651 0, /* todo_flags_start */
3652 TODO_df_finish | TODO_verify_rtl_sharing |
3653 TODO_verify_flow |
3654 TODO_ggc_collect /* todo_flags_finish */