Remove some compile time warnings about duplicate definitions.
[official-gcc.git] / gcc / sched-rgn.c
blobcc3fc9ae05e1d9dbffd8a43675e9937203206518
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 02111-1307, USA. */
24 /* This pass implements list scheduling within basic blocks. It is
25 run twice: (1) after flow analysis, but before register allocation,
26 and (2) after register allocation.
28 The first run performs interblock scheduling, moving insns between
29 different blocks in the same "region", and the second runs only
30 basic block scheduling.
32 Interblock motions performed are useful motions and speculative
33 motions, including speculative loads. Motions requiring code
34 duplication are not supported. The identification of motion type
35 and the check for validity of speculative motions requires
36 construction and analysis of the function's control flow graph.
38 The main entry point for this pass is schedule_insns(), called for
39 each function. The work of the scheduler is organized in three
40 levels: (1) function level: insns are subject to splitting,
41 control-flow-graph is constructed, regions are computed (after
42 reload, each region is of one block), (2) region level: control
43 flow graph attributes required for interblock scheduling are
44 computed (dominators, reachability, etc.), data dependences and
45 priorities are computed, and (3) block level: insns in the block
46 are actually scheduled. */
48 #include "config.h"
49 #include "system.h"
50 #include "toplev.h"
51 #include "rtl.h"
52 #include "tm_p.h"
53 #include "hard-reg-set.h"
54 #include "basic-block.h"
55 #include "regs.h"
56 #include "function.h"
57 #include "flags.h"
58 #include "insn-config.h"
59 #include "insn-attr.h"
60 #include "except.h"
61 #include "toplev.h"
62 #include "recog.h"
63 #include "sched-int.h"
65 /* Define when we want to do count REG_DEAD notes before and after scheduling
66 for sanity checking. We can't do that when conditional execution is used,
67 as REG_DEAD exist only for unconditional deaths. */
69 #if !defined (HAVE_conditional_execution) && defined (ENABLE_CHECKING)
70 #define CHECK_DEAD_NOTES 1
71 #else
72 #define CHECK_DEAD_NOTES 0
73 #endif
76 #ifdef INSN_SCHEDULING
77 /* Some accessor macros for h_i_d members only used within this file. */
78 #define INSN_REF_COUNT(INSN) (h_i_d[INSN_UID (INSN)].ref_count)
79 #define FED_BY_SPEC_LOAD(insn) (h_i_d[INSN_UID (insn)].fed_by_spec_load)
80 #define IS_LOAD_INSN(insn) (h_i_d[INSN_UID (insn)].is_load_insn)
82 #define MAX_RGN_BLOCKS 10
83 #define MAX_RGN_INSNS 100
85 /* nr_inter/spec counts interblock/speculative motion for the function. */
86 static int nr_inter, nr_spec;
88 /* Control flow graph edges are kept in circular lists. */
89 typedef struct
91 int from_block;
92 int to_block;
93 int next_in;
94 int next_out;
96 haifa_edge;
97 static haifa_edge *edge_table;
99 #define NEXT_IN(edge) (edge_table[edge].next_in)
100 #define NEXT_OUT(edge) (edge_table[edge].next_out)
101 #define FROM_BLOCK(edge) (edge_table[edge].from_block)
102 #define TO_BLOCK(edge) (edge_table[edge].to_block)
104 /* Number of edges in the control flow graph. (In fact, larger than
105 that by 1, since edge 0 is unused.) */
106 static int nr_edges;
108 /* Circular list of incoming/outgoing edges of a block. */
109 static int *in_edges;
110 static int *out_edges;
112 #define IN_EDGES(block) (in_edges[block])
113 #define OUT_EDGES(block) (out_edges[block])
115 static int is_cfg_nonregular PARAMS ((void));
116 static int build_control_flow PARAMS ((struct edge_list *));
117 static void new_edge PARAMS ((int, int));
119 /* A region is the main entity for interblock scheduling: insns
120 are allowed to move between blocks in the same region, along
121 control flow graph edges, in the 'up' direction. */
122 typedef struct
124 int rgn_nr_blocks; /* Number of blocks in region. */
125 int rgn_blocks; /* cblocks in the region (actually index in rgn_bb_table). */
127 region;
129 /* Number of regions in the procedure. */
130 static int nr_regions;
132 /* Table of region descriptions. */
133 static region *rgn_table;
135 /* Array of lists of regions' blocks. */
136 static int *rgn_bb_table;
138 /* Topological order of blocks in the region (if b2 is reachable from
139 b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is
140 always referred to by either block or b, while its topological
141 order name (in the region) is refered to by bb. */
142 static int *block_to_bb;
144 /* The number of the region containing a block. */
145 static int *containing_rgn;
147 #define RGN_NR_BLOCKS(rgn) (rgn_table[rgn].rgn_nr_blocks)
148 #define RGN_BLOCKS(rgn) (rgn_table[rgn].rgn_blocks)
149 #define BLOCK_TO_BB(block) (block_to_bb[block])
150 #define CONTAINING_RGN(block) (containing_rgn[block])
152 void debug_regions PARAMS ((void));
153 static void find_single_block_region PARAMS ((void));
154 static void find_rgns PARAMS ((struct edge_list *, sbitmap *));
155 static int too_large PARAMS ((int, int *, int *));
157 extern void debug_live PARAMS ((int, int));
159 /* Blocks of the current region being scheduled. */
160 static int current_nr_blocks;
161 static int current_blocks;
163 /* The mapping from bb to block. */
164 #define BB_TO_BLOCK(bb) (rgn_bb_table[current_blocks + (bb)])
166 typedef struct
168 int *first_member; /* Pointer to the list start in bitlst_table. */
169 int nr_members; /* The number of members of the bit list. */
171 bitlst;
173 static int bitlst_table_last;
174 static int bitlst_table_size;
175 static int *bitlst_table;
177 static void extract_bitlst PARAMS ((sbitmap, bitlst *));
179 /* Target info declarations.
181 The block currently being scheduled is referred to as the "target" block,
182 while other blocks in the region from which insns can be moved to the
183 target are called "source" blocks. The candidate structure holds info
184 about such sources: are they valid? Speculative? Etc. */
185 typedef bitlst bblst;
186 typedef struct
188 char is_valid;
189 char is_speculative;
190 int src_prob;
191 bblst split_bbs;
192 bblst update_bbs;
194 candidate;
196 static candidate *candidate_table;
198 /* A speculative motion requires checking live information on the path
199 from 'source' to 'target'. The split blocks are those to be checked.
200 After a speculative motion, live information should be modified in
201 the 'update' blocks.
203 Lists of split and update blocks for each candidate of the current
204 target are in array bblst_table. */
205 static int *bblst_table, bblst_size, bblst_last;
207 #define IS_VALID(src) ( candidate_table[src].is_valid )
208 #define IS_SPECULATIVE(src) ( candidate_table[src].is_speculative )
209 #define SRC_PROB(src) ( candidate_table[src].src_prob )
211 /* The bb being currently scheduled. */
212 static int target_bb;
214 /* List of edges. */
215 typedef bitlst edgelst;
217 /* Target info functions. */
218 static void split_edges PARAMS ((int, int, edgelst *));
219 static void compute_trg_info PARAMS ((int));
220 void debug_candidate PARAMS ((int));
221 void debug_candidates PARAMS ((int));
223 /* Dominators array: dom[i] contains the sbitmap of dominators of
224 bb i in the region. */
225 static sbitmap *dom;
227 /* bb 0 is the only region entry. */
228 #define IS_RGN_ENTRY(bb) (!bb)
230 /* Is bb_src dominated by bb_trg. */
231 #define IS_DOMINATED(bb_src, bb_trg) \
232 ( TEST_BIT (dom[bb_src], bb_trg) )
234 /* Probability: Prob[i] is a float in [0, 1] which is the probability
235 of bb i relative to the region entry. */
236 static float *prob;
238 /* The probability of bb_src, relative to bb_trg. Note, that while the
239 'prob[bb]' is a float in [0, 1], this macro returns an integer
240 in [0, 100]. */
241 #define GET_SRC_PROB(bb_src, bb_trg) ((int) (100.0 * (prob[bb_src] / \
242 prob[bb_trg])))
244 /* Bit-set of edges, where bit i stands for edge i. */
245 typedef sbitmap edgeset;
247 /* Number of edges in the region. */
248 static int rgn_nr_edges;
250 /* Array of size rgn_nr_edges. */
251 static int *rgn_edges;
254 /* Mapping from each edge in the graph to its number in the rgn. */
255 static int *edge_to_bit;
256 #define EDGE_TO_BIT(edge) (edge_to_bit[edge])
258 /* The split edges of a source bb is different for each target
259 bb. In order to compute this efficiently, the 'potential-split edges'
260 are computed for each bb prior to scheduling a region. This is actually
261 the split edges of each bb relative to the region entry.
263 pot_split[bb] is the set of potential split edges of bb. */
264 static edgeset *pot_split;
266 /* For every bb, a set of its ancestor edges. */
267 static edgeset *ancestor_edges;
269 static void compute_dom_prob_ps PARAMS ((int));
271 #define ABS_VALUE(x) (((x)<0)?(-(x)):(x))
272 #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
273 #define IS_SPECULATIVE_INSN(INSN) (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
274 #define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
276 /* Parameters affecting the decision of rank_for_schedule().
277 ??? Nope. But MIN_PROBABILITY is used in copmute_trg_info. */
278 #define MIN_DIFF_PRIORITY 2
279 #define MIN_PROBABILITY 40
280 #define MIN_PROB_DIFF 10
282 /* Speculative scheduling functions. */
283 static int check_live_1 PARAMS ((int, rtx));
284 static void update_live_1 PARAMS ((int, rtx));
285 static int check_live PARAMS ((rtx, int));
286 static void update_live PARAMS ((rtx, int));
287 static void set_spec_fed PARAMS ((rtx));
288 static int is_pfree PARAMS ((rtx, int, int));
289 static int find_conditional_protection PARAMS ((rtx, int));
290 static int is_conditionally_protected PARAMS ((rtx, int, int));
291 static int may_trap_exp PARAMS ((rtx, int));
292 static int haifa_classify_insn PARAMS ((rtx));
293 static int is_prisky PARAMS ((rtx, int, int));
294 static int is_exception_free PARAMS ((rtx, int, int));
296 static void add_branch_dependences PARAMS ((rtx, rtx));
297 static void compute_block_backward_dependences PARAMS ((int));
298 void debug_dependencies PARAMS ((void));
300 static void init_regions PARAMS ((void));
301 static void schedule_region PARAMS ((int));
302 static void propagate_deps PARAMS ((int, struct deps *));
303 static void free_pending_lists PARAMS ((void));
305 /* Functions for construction of the control flow graph. */
307 /* Return 1 if control flow graph should not be constructed, 0 otherwise.
309 We decide not to build the control flow graph if there is possibly more
310 than one entry to the function, if computed branches exist, of if we
311 have nonlocal gotos. */
313 static int
314 is_cfg_nonregular ()
316 int b;
317 rtx insn;
318 RTX_CODE code;
320 /* If we have a label that could be the target of a nonlocal goto, then
321 the cfg is not well structured. */
322 if (nonlocal_goto_handler_labels)
323 return 1;
325 /* If we have any forced labels, then the cfg is not well structured. */
326 if (forced_labels)
327 return 1;
329 /* If this function has a computed jump, then we consider the cfg
330 not well structured. */
331 if (current_function_has_computed_jump)
332 return 1;
334 /* If we have exception handlers, then we consider the cfg not well
335 structured. ?!? We should be able to handle this now that flow.c
336 computes an accurate cfg for EH. */
337 if (exception_handler_labels)
338 return 1;
340 /* If we have non-jumping insns which refer to labels, then we consider
341 the cfg not well structured. */
342 /* Check for labels referred to other thn by jumps. */
343 for (b = 0; b < n_basic_blocks; b++)
344 for (insn = BLOCK_HEAD (b);; insn = NEXT_INSN (insn))
346 code = GET_CODE (insn);
347 if (GET_RTX_CLASS (code) == 'i' && code != JUMP_INSN)
349 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
351 if (note
352 && ! (GET_CODE (NEXT_INSN (insn)) == JUMP_INSN
353 && find_reg_note (NEXT_INSN (insn), REG_LABEL,
354 XEXP (note, 0))))
355 return 1;
358 if (insn == BLOCK_END (b))
359 break;
362 /* All the tests passed. Consider the cfg well structured. */
363 return 0;
366 /* Build the control flow graph and set nr_edges.
368 Instead of trying to build a cfg ourselves, we rely on flow to
369 do it for us. Stamp out useless code (and bug) duplication.
371 Return nonzero if an irregularity in the cfg is found which would
372 prevent cross block scheduling. */
374 static int
375 build_control_flow (edge_list)
376 struct edge_list *edge_list;
378 int i, unreachable, num_edges;
380 /* This already accounts for entry/exit edges. */
381 num_edges = NUM_EDGES (edge_list);
383 /* Unreachable loops with more than one basic block are detected
384 during the DFS traversal in find_rgns.
386 Unreachable loops with a single block are detected here. This
387 test is redundant with the one in find_rgns, but it's much
388 cheaper to go ahead and catch the trivial case here. */
389 unreachable = 0;
390 for (i = 0; i < n_basic_blocks; i++)
392 basic_block b = BASIC_BLOCK (i);
394 if (b->pred == NULL
395 || (b->pred->src == b
396 && b->pred->pred_next == NULL))
397 unreachable = 1;
400 /* ??? We can kill these soon. */
401 in_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
402 out_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
403 edge_table = (haifa_edge *) xcalloc (num_edges, sizeof (haifa_edge));
405 nr_edges = 0;
406 for (i = 0; i < num_edges; i++)
408 edge e = INDEX_EDGE (edge_list, i);
410 if (e->dest != EXIT_BLOCK_PTR
411 && e->src != ENTRY_BLOCK_PTR)
412 new_edge (e->src->index, e->dest->index);
415 /* Increment by 1, since edge 0 is unused. */
416 nr_edges++;
418 return unreachable;
421 /* Record an edge in the control flow graph from SOURCE to TARGET.
423 In theory, this is redundant with the s_succs computed above, but
424 we have not converted all of haifa to use information from the
425 integer lists. */
427 static void
428 new_edge (source, target)
429 int source, target;
431 int e, next_edge;
432 int curr_edge, fst_edge;
434 /* Check for duplicates. */
435 fst_edge = curr_edge = OUT_EDGES (source);
436 while (curr_edge)
438 if (FROM_BLOCK (curr_edge) == source
439 && TO_BLOCK (curr_edge) == target)
441 return;
444 curr_edge = NEXT_OUT (curr_edge);
446 if (fst_edge == curr_edge)
447 break;
450 e = ++nr_edges;
452 FROM_BLOCK (e) = source;
453 TO_BLOCK (e) = target;
455 if (OUT_EDGES (source))
457 next_edge = NEXT_OUT (OUT_EDGES (source));
458 NEXT_OUT (OUT_EDGES (source)) = e;
459 NEXT_OUT (e) = next_edge;
461 else
463 OUT_EDGES (source) = e;
464 NEXT_OUT (e) = e;
467 if (IN_EDGES (target))
469 next_edge = NEXT_IN (IN_EDGES (target));
470 NEXT_IN (IN_EDGES (target)) = e;
471 NEXT_IN (e) = next_edge;
473 else
475 IN_EDGES (target) = e;
476 NEXT_IN (e) = e;
480 /* Translate a bit-set SET to a list BL of the bit-set members. */
482 static void
483 extract_bitlst (set, bl)
484 sbitmap set;
485 bitlst *bl;
487 int i;
489 /* bblst table space is reused in each call to extract_bitlst. */
490 bitlst_table_last = 0;
492 bl->first_member = &bitlst_table[bitlst_table_last];
493 bl->nr_members = 0;
495 /* Iterate over each word in the bitset. */
496 EXECUTE_IF_SET_IN_SBITMAP (set, 0, i,
498 bitlst_table[bitlst_table_last++] = i;
499 (bl->nr_members)++;
504 /* Functions for the construction of regions. */
506 /* Print the regions, for debugging purposes. Callable from debugger. */
508 void
509 debug_regions ()
511 int rgn, bb;
513 fprintf (sched_dump, "\n;; ------------ REGIONS ----------\n\n");
514 for (rgn = 0; rgn < nr_regions; rgn++)
516 fprintf (sched_dump, ";;\trgn %d nr_blocks %d:\n", rgn,
517 rgn_table[rgn].rgn_nr_blocks);
518 fprintf (sched_dump, ";;\tbb/block: ");
520 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
522 current_blocks = RGN_BLOCKS (rgn);
524 if (bb != BLOCK_TO_BB (BB_TO_BLOCK (bb)))
525 abort ();
527 fprintf (sched_dump, " %d/%d ", bb, BB_TO_BLOCK (bb));
530 fprintf (sched_dump, "\n\n");
534 /* Build a single block region for each basic block in the function.
535 This allows for using the same code for interblock and basic block
536 scheduling. */
538 static void
539 find_single_block_region ()
541 int i;
543 for (i = 0; i < n_basic_blocks; i++)
545 rgn_bb_table[i] = i;
546 RGN_NR_BLOCKS (i) = 1;
547 RGN_BLOCKS (i) = i;
548 CONTAINING_RGN (i) = i;
549 BLOCK_TO_BB (i) = 0;
551 nr_regions = n_basic_blocks;
554 /* Update number of blocks and the estimate for number of insns
555 in the region. Return 1 if the region is "too large" for interblock
556 scheduling (compile time considerations), otherwise return 0. */
558 static int
559 too_large (block, num_bbs, num_insns)
560 int block, *num_bbs, *num_insns;
562 (*num_bbs)++;
563 (*num_insns) += (INSN_LUID (BLOCK_END (block)) -
564 INSN_LUID (BLOCK_HEAD (block)));
565 if ((*num_bbs > MAX_RGN_BLOCKS) || (*num_insns > MAX_RGN_INSNS))
566 return 1;
567 else
568 return 0;
571 /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
572 is still an inner loop. Put in max_hdr[blk] the header of the most inner
573 loop containing blk. */
574 #define UPDATE_LOOP_RELATIONS(blk, hdr) \
576 if (max_hdr[blk] == -1) \
577 max_hdr[blk] = hdr; \
578 else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
579 RESET_BIT (inner, hdr); \
580 else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
582 RESET_BIT (inner,max_hdr[blk]); \
583 max_hdr[blk] = hdr; \
587 /* Find regions for interblock scheduling.
589 A region for scheduling can be:
591 * A loop-free procedure, or
593 * A reducible inner loop, or
595 * A basic block not contained in any other region.
597 ?!? In theory we could build other regions based on extended basic
598 blocks or reverse extended basic blocks. Is it worth the trouble?
600 Loop blocks that form a region are put into the region's block list
601 in topological order.
603 This procedure stores its results into the following global (ick) variables
605 * rgn_nr
606 * rgn_table
607 * rgn_bb_table
608 * block_to_bb
609 * containing region
611 We use dominator relationships to avoid making regions out of non-reducible
612 loops.
614 This procedure needs to be converted to work on pred/succ lists instead
615 of edge tables. That would simplify it somewhat. */
617 static void
618 find_rgns (edge_list, dom)
619 struct edge_list *edge_list;
620 sbitmap *dom;
622 int *max_hdr, *dfs_nr, *stack, *degree;
623 char no_loops = 1;
624 int node, child, loop_head, i, head, tail;
625 int count = 0, sp, idx = 0, current_edge = out_edges[0];
626 int num_bbs, num_insns, unreachable;
627 int too_large_failure;
629 /* Note if an edge has been passed. */
630 sbitmap passed;
632 /* Note if a block is a natural loop header. */
633 sbitmap header;
635 /* Note if a block is an natural inner loop header. */
636 sbitmap inner;
638 /* Note if a block is in the block queue. */
639 sbitmap in_queue;
641 /* Note if a block is in the block queue. */
642 sbitmap in_stack;
644 int num_edges = NUM_EDGES (edge_list);
646 /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
647 and a mapping from block to its loop header (if the block is contained
648 in a loop, else -1).
650 Store results in HEADER, INNER, and MAX_HDR respectively, these will
651 be used as inputs to the second traversal.
653 STACK, SP and DFS_NR are only used during the first traversal. */
655 /* Allocate and initialize variables for the first traversal. */
656 max_hdr = (int *) xmalloc (n_basic_blocks * sizeof (int));
657 dfs_nr = (int *) xcalloc (n_basic_blocks, sizeof (int));
658 stack = (int *) xmalloc (nr_edges * sizeof (int));
660 inner = sbitmap_alloc (n_basic_blocks);
661 sbitmap_ones (inner);
663 header = sbitmap_alloc (n_basic_blocks);
664 sbitmap_zero (header);
666 passed = sbitmap_alloc (nr_edges);
667 sbitmap_zero (passed);
669 in_queue = sbitmap_alloc (n_basic_blocks);
670 sbitmap_zero (in_queue);
672 in_stack = sbitmap_alloc (n_basic_blocks);
673 sbitmap_zero (in_stack);
675 for (i = 0; i < n_basic_blocks; i++)
676 max_hdr[i] = -1;
678 /* DFS traversal to find inner loops in the cfg. */
680 sp = -1;
681 while (1)
683 if (current_edge == 0 || TEST_BIT (passed, current_edge))
685 /* We have reached a leaf node or a node that was already
686 processed. Pop edges off the stack until we find
687 an edge that has not yet been processed. */
688 while (sp >= 0
689 && (current_edge == 0 || TEST_BIT (passed, current_edge)))
691 /* Pop entry off the stack. */
692 current_edge = stack[sp--];
693 node = FROM_BLOCK (current_edge);
694 child = TO_BLOCK (current_edge);
695 RESET_BIT (in_stack, child);
696 if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child]))
697 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
698 current_edge = NEXT_OUT (current_edge);
701 /* See if have finished the DFS tree traversal. */
702 if (sp < 0 && TEST_BIT (passed, current_edge))
703 break;
705 /* Nope, continue the traversal with the popped node. */
706 continue;
709 /* Process a node. */
710 node = FROM_BLOCK (current_edge);
711 child = TO_BLOCK (current_edge);
712 SET_BIT (in_stack, node);
713 dfs_nr[node] = ++count;
715 /* If the successor is in the stack, then we've found a loop.
716 Mark the loop, if it is not a natural loop, then it will
717 be rejected during the second traversal. */
718 if (TEST_BIT (in_stack, child))
720 no_loops = 0;
721 SET_BIT (header, child);
722 UPDATE_LOOP_RELATIONS (node, child);
723 SET_BIT (passed, current_edge);
724 current_edge = NEXT_OUT (current_edge);
725 continue;
728 /* If the child was already visited, then there is no need to visit
729 it again. Just update the loop relationships and restart
730 with a new edge. */
731 if (dfs_nr[child])
733 if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child]))
734 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
735 SET_BIT (passed, current_edge);
736 current_edge = NEXT_OUT (current_edge);
737 continue;
740 /* Push an entry on the stack and continue DFS traversal. */
741 stack[++sp] = current_edge;
742 SET_BIT (passed, current_edge);
743 current_edge = OUT_EDGES (child);
745 /* This is temporary until haifa is converted to use rth's new
746 cfg routines which have true entry/exit blocks and the
747 appropriate edges from/to those blocks.
749 Generally we update dfs_nr for a node when we process its
750 out edge. However, if the node has no out edge then we will
751 not set dfs_nr for that node. This can confuse the scheduler
752 into thinking that we have unreachable blocks, which in turn
753 disables cross block scheduling.
755 So, if we have a node with no out edges, go ahead and mark it
756 as reachable now. */
757 if (current_edge == 0)
758 dfs_nr[child] = ++count;
761 /* Another check for unreachable blocks. The earlier test in
762 is_cfg_nonregular only finds unreachable blocks that do not
763 form a loop.
765 The DFS traversal will mark every block that is reachable from
766 the entry node by placing a nonzero value in dfs_nr. Thus if
767 dfs_nr is zero for any block, then it must be unreachable. */
768 unreachable = 0;
769 for (i = 0; i < n_basic_blocks; i++)
770 if (dfs_nr[i] == 0)
772 unreachable = 1;
773 break;
776 /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array
777 to hold degree counts. */
778 degree = dfs_nr;
780 for (i = 0; i < n_basic_blocks; i++)
781 degree[i] = 0;
782 for (i = 0; i < num_edges; i++)
784 edge e = INDEX_EDGE (edge_list, i);
786 if (e->dest != EXIT_BLOCK_PTR)
787 degree[e->dest->index]++;
790 /* Do not perform region scheduling if there are any unreachable
791 blocks. */
792 if (!unreachable)
794 int *queue;
796 if (no_loops)
797 SET_BIT (header, 0);
799 /* Second travsersal:find reducible inner loops and topologically sort
800 block of each region. */
802 queue = (int *) xmalloc (n_basic_blocks * sizeof (int));
804 /* Find blocks which are inner loop headers. We still have non-reducible
805 loops to consider at this point. */
806 for (i = 0; i < n_basic_blocks; i++)
808 if (TEST_BIT (header, i) && TEST_BIT (inner, i))
810 edge e;
811 int j;
813 /* Now check that the loop is reducible. We do this separate
814 from finding inner loops so that we do not find a reducible
815 loop which contains an inner non-reducible loop.
817 A simple way to find reducible/natural loops is to verify
818 that each block in the loop is dominated by the loop
819 header.
821 If there exists a block that is not dominated by the loop
822 header, then the block is reachable from outside the loop
823 and thus the loop is not a natural loop. */
824 for (j = 0; j < n_basic_blocks; j++)
826 /* First identify blocks in the loop, except for the loop
827 entry block. */
828 if (i == max_hdr[j] && i != j)
830 /* Now verify that the block is dominated by the loop
831 header. */
832 if (!TEST_BIT (dom[j], i))
833 break;
837 /* If we exited the loop early, then I is the header of
838 a non-reducible loop and we should quit processing it
839 now. */
840 if (j != n_basic_blocks)
841 continue;
843 /* I is a header of an inner loop, or block 0 in a subroutine
844 with no loops at all. */
845 head = tail = -1;
846 too_large_failure = 0;
847 loop_head = max_hdr[i];
849 /* Decrease degree of all I's successors for topological
850 ordering. */
851 for (e = BASIC_BLOCK (i)->succ; e; e = e->succ_next)
852 if (e->dest != EXIT_BLOCK_PTR)
853 --degree[e->dest->index];
855 /* Estimate # insns, and count # blocks in the region. */
856 num_bbs = 1;
857 num_insns = (INSN_LUID (BLOCK_END (i))
858 - INSN_LUID (BLOCK_HEAD (i)));
860 /* Find all loop latches (blocks with back edges to the loop
861 header) or all the leaf blocks in the cfg has no loops.
863 Place those blocks into the queue. */
864 if (no_loops)
866 for (j = 0; j < n_basic_blocks; j++)
867 /* Leaf nodes have only a single successor which must
868 be EXIT_BLOCK. */
869 if (BASIC_BLOCK (j)->succ
870 && BASIC_BLOCK (j)->succ->dest == EXIT_BLOCK_PTR
871 && BASIC_BLOCK (j)->succ->succ_next == NULL)
873 queue[++tail] = j;
874 SET_BIT (in_queue, j);
876 if (too_large (j, &num_bbs, &num_insns))
878 too_large_failure = 1;
879 break;
883 else
885 edge e;
887 for (e = BASIC_BLOCK (i)->pred; e; e = e->pred_next)
889 if (e->src == ENTRY_BLOCK_PTR)
890 continue;
892 node = e->src->index;
894 if (max_hdr[node] == loop_head && node != i)
896 /* This is a loop latch. */
897 queue[++tail] = node;
898 SET_BIT (in_queue, node);
900 if (too_large (node, &num_bbs, &num_insns))
902 too_large_failure = 1;
903 break;
909 /* Now add all the blocks in the loop to the queue.
911 We know the loop is a natural loop; however the algorithm
912 above will not always mark certain blocks as being in the
913 loop. Consider:
914 node children
915 a b,c
917 c a,d
920 The algorithm in the DFS traversal may not mark B & D as part
921 of the loop (ie they will not have max_hdr set to A).
923 We know they can not be loop latches (else they would have
924 had max_hdr set since they'd have a backedge to a dominator
925 block). So we don't need them on the initial queue.
927 We know they are part of the loop because they are dominated
928 by the loop header and can be reached by a backwards walk of
929 the edges starting with nodes on the initial queue.
931 It is safe and desirable to include those nodes in the
932 loop/scheduling region. To do so we would need to decrease
933 the degree of a node if it is the target of a backedge
934 within the loop itself as the node is placed in the queue.
936 We do not do this because I'm not sure that the actual
937 scheduling code will properly handle this case. ?!? */
939 while (head < tail && !too_large_failure)
941 edge e;
942 child = queue[++head];
944 for (e = BASIC_BLOCK (child)->pred; e; e = e->pred_next)
946 node = e->src->index;
948 /* See discussion above about nodes not marked as in
949 this loop during the initial DFS traversal. */
950 if (e->src == ENTRY_BLOCK_PTR
951 || max_hdr[node] != loop_head)
953 tail = -1;
954 break;
956 else if (!TEST_BIT (in_queue, node) && node != i)
958 queue[++tail] = node;
959 SET_BIT (in_queue, node);
961 if (too_large (node, &num_bbs, &num_insns))
963 too_large_failure = 1;
964 break;
970 if (tail >= 0 && !too_large_failure)
972 /* Place the loop header into list of region blocks. */
973 degree[i] = -1;
974 rgn_bb_table[idx] = i;
975 RGN_NR_BLOCKS (nr_regions) = num_bbs;
976 RGN_BLOCKS (nr_regions) = idx++;
977 CONTAINING_RGN (i) = nr_regions;
978 BLOCK_TO_BB (i) = count = 0;
980 /* Remove blocks from queue[] when their in degree
981 becomes zero. Repeat until no blocks are left on the
982 list. This produces a topological list of blocks in
983 the region. */
984 while (tail >= 0)
986 if (head < 0)
987 head = tail;
988 child = queue[head];
989 if (degree[child] == 0)
991 edge e;
993 degree[child] = -1;
994 rgn_bb_table[idx++] = child;
995 BLOCK_TO_BB (child) = ++count;
996 CONTAINING_RGN (child) = nr_regions;
997 queue[head] = queue[tail--];
999 for (e = BASIC_BLOCK (child)->succ;
1001 e = e->succ_next)
1002 if (e->dest != EXIT_BLOCK_PTR)
1003 --degree[e->dest->index];
1005 else
1006 --head;
1008 ++nr_regions;
1012 free (queue);
1015 /* Any block that did not end up in a region is placed into a region
1016 by itself. */
1017 for (i = 0; i < n_basic_blocks; i++)
1018 if (degree[i] >= 0)
1020 rgn_bb_table[idx] = i;
1021 RGN_NR_BLOCKS (nr_regions) = 1;
1022 RGN_BLOCKS (nr_regions) = idx++;
1023 CONTAINING_RGN (i) = nr_regions++;
1024 BLOCK_TO_BB (i) = 0;
1027 free (max_hdr);
1028 free (dfs_nr);
1029 free (stack);
1030 free (passed);
1031 free (header);
1032 free (inner);
1033 free (in_queue);
1034 free (in_stack);
1037 /* Functions for regions scheduling information. */
1039 /* Compute dominators, probability, and potential-split-edges of bb.
1040 Assume that these values were already computed for bb's predecessors. */
1042 static void
1043 compute_dom_prob_ps (bb)
1044 int bb;
1046 int nxt_in_edge, fst_in_edge, pred;
1047 int fst_out_edge, nxt_out_edge, nr_out_edges, nr_rgn_out_edges;
1049 prob[bb] = 0.0;
1050 if (IS_RGN_ENTRY (bb))
1052 SET_BIT (dom[bb], 0);
1053 prob[bb] = 1.0;
1054 return;
1057 fst_in_edge = nxt_in_edge = IN_EDGES (BB_TO_BLOCK (bb));
1059 /* Initialize dom[bb] to '111..1'. */
1060 sbitmap_ones (dom[bb]);
1064 pred = FROM_BLOCK (nxt_in_edge);
1065 sbitmap_a_and_b (dom[bb], dom[bb], dom[BLOCK_TO_BB (pred)]);
1066 sbitmap_a_or_b (ancestor_edges[bb], ancestor_edges[bb], ancestor_edges[BLOCK_TO_BB (pred)]);
1068 SET_BIT (ancestor_edges[bb], EDGE_TO_BIT (nxt_in_edge));
1070 nr_out_edges = 1;
1071 nr_rgn_out_edges = 0;
1072 fst_out_edge = OUT_EDGES (pred);
1073 nxt_out_edge = NEXT_OUT (fst_out_edge);
1075 sbitmap_a_or_b (pot_split[bb], pot_split[bb], pot_split[BLOCK_TO_BB (pred)]);
1077 SET_BIT (pot_split[bb], EDGE_TO_BIT (fst_out_edge));
1079 /* The successor doesn't belong in the region? */
1080 if (CONTAINING_RGN (TO_BLOCK (fst_out_edge)) !=
1081 CONTAINING_RGN (BB_TO_BLOCK (bb)))
1082 ++nr_rgn_out_edges;
1084 while (fst_out_edge != nxt_out_edge)
1086 ++nr_out_edges;
1087 /* The successor doesn't belong in the region? */
1088 if (CONTAINING_RGN (TO_BLOCK (nxt_out_edge)) !=
1089 CONTAINING_RGN (BB_TO_BLOCK (bb)))
1090 ++nr_rgn_out_edges;
1091 SET_BIT (pot_split[bb], EDGE_TO_BIT (nxt_out_edge));
1092 nxt_out_edge = NEXT_OUT (nxt_out_edge);
1096 /* Now nr_rgn_out_edges is the number of region-exit edges from
1097 pred, and nr_out_edges will be the number of pred out edges
1098 not leaving the region. */
1099 nr_out_edges -= nr_rgn_out_edges;
1100 if (nr_rgn_out_edges > 0)
1101 prob[bb] += 0.9 * prob[BLOCK_TO_BB (pred)] / nr_out_edges;
1102 else
1103 prob[bb] += prob[BLOCK_TO_BB (pred)] / nr_out_edges;
1104 nxt_in_edge = NEXT_IN (nxt_in_edge);
1106 while (fst_in_edge != nxt_in_edge);
1108 SET_BIT (dom[bb], bb);
1109 sbitmap_difference (pot_split[bb], pot_split[bb], ancestor_edges[bb]);
1111 if (sched_verbose >= 2)
1112 fprintf (sched_dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb),
1113 (int) (100.0 * prob[bb]));
1116 /* Functions for target info. */
1118 /* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
1119 Note that bb_trg dominates bb_src. */
1121 static void
1122 split_edges (bb_src, bb_trg, bl)
1123 int bb_src;
1124 int bb_trg;
1125 edgelst *bl;
1127 sbitmap src = (edgeset) sbitmap_alloc (pot_split[bb_src]->n_bits);
1128 sbitmap_copy (src, pot_split[bb_src]);
1130 sbitmap_difference (src, src, pot_split[bb_trg]);
1131 extract_bitlst (src, bl);
1132 sbitmap_free (src);
1135 /* Find the valid candidate-source-blocks for the target block TRG, compute
1136 their probability, and check if they are speculative or not.
1137 For speculative sources, compute their update-blocks and split-blocks. */
1139 static void
1140 compute_trg_info (trg)
1141 int trg;
1143 candidate *sp;
1144 edgelst el;
1145 int check_block, update_idx;
1146 int i, j, k, fst_edge, nxt_edge;
1148 /* Define some of the fields for the target bb as well. */
1149 sp = candidate_table + trg;
1150 sp->is_valid = 1;
1151 sp->is_speculative = 0;
1152 sp->src_prob = 100;
1154 for (i = trg + 1; i < current_nr_blocks; i++)
1156 sp = candidate_table + i;
1158 sp->is_valid = IS_DOMINATED (i, trg);
1159 if (sp->is_valid)
1161 sp->src_prob = GET_SRC_PROB (i, trg);
1162 sp->is_valid = (sp->src_prob >= MIN_PROBABILITY);
1165 if (sp->is_valid)
1167 split_edges (i, trg, &el);
1168 sp->is_speculative = (el.nr_members) ? 1 : 0;
1169 if (sp->is_speculative && !flag_schedule_speculative)
1170 sp->is_valid = 0;
1173 if (sp->is_valid)
1175 char *update_blocks;
1177 /* Compute split blocks and store them in bblst_table.
1178 The TO block of every split edge is a split block. */
1179 sp->split_bbs.first_member = &bblst_table[bblst_last];
1180 sp->split_bbs.nr_members = el.nr_members;
1181 for (j = 0; j < el.nr_members; bblst_last++, j++)
1182 bblst_table[bblst_last] =
1183 TO_BLOCK (rgn_edges[el.first_member[j]]);
1184 sp->update_bbs.first_member = &bblst_table[bblst_last];
1186 /* Compute update blocks and store them in bblst_table.
1187 For every split edge, look at the FROM block, and check
1188 all out edges. For each out edge that is not a split edge,
1189 add the TO block to the update block list. This list can end
1190 up with a lot of duplicates. We need to weed them out to avoid
1191 overrunning the end of the bblst_table. */
1192 update_blocks = (char *) alloca (n_basic_blocks);
1193 memset (update_blocks, 0, n_basic_blocks);
1195 update_idx = 0;
1196 for (j = 0; j < el.nr_members; j++)
1198 check_block = FROM_BLOCK (rgn_edges[el.first_member[j]]);
1199 fst_edge = nxt_edge = OUT_EDGES (check_block);
1202 if (! update_blocks[TO_BLOCK (nxt_edge)])
1204 for (k = 0; k < el.nr_members; k++)
1205 if (EDGE_TO_BIT (nxt_edge) == el.first_member[k])
1206 break;
1208 if (k >= el.nr_members)
1210 bblst_table[bblst_last++] = TO_BLOCK (nxt_edge);
1211 update_blocks[TO_BLOCK (nxt_edge)] = 1;
1212 update_idx++;
1216 nxt_edge = NEXT_OUT (nxt_edge);
1218 while (fst_edge != nxt_edge);
1220 sp->update_bbs.nr_members = update_idx;
1222 /* Make sure we didn't overrun the end of bblst_table. */
1223 if (bblst_last > bblst_size)
1224 abort ();
1226 else
1228 sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0;
1230 sp->is_speculative = 0;
1231 sp->src_prob = 0;
1236 /* Print candidates info, for debugging purposes. Callable from debugger. */
1238 void
1239 debug_candidate (i)
1240 int i;
1242 if (!candidate_table[i].is_valid)
1243 return;
1245 if (candidate_table[i].is_speculative)
1247 int j;
1248 fprintf (sched_dump, "src b %d bb %d speculative \n", BB_TO_BLOCK (i), i);
1250 fprintf (sched_dump, "split path: ");
1251 for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++)
1253 int b = candidate_table[i].split_bbs.first_member[j];
1255 fprintf (sched_dump, " %d ", b);
1257 fprintf (sched_dump, "\n");
1259 fprintf (sched_dump, "update path: ");
1260 for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++)
1262 int b = candidate_table[i].update_bbs.first_member[j];
1264 fprintf (sched_dump, " %d ", b);
1266 fprintf (sched_dump, "\n");
1268 else
1270 fprintf (sched_dump, " src %d equivalent\n", BB_TO_BLOCK (i));
1274 /* Print candidates info, for debugging purposes. Callable from debugger. */
1276 void
1277 debug_candidates (trg)
1278 int trg;
1280 int i;
1282 fprintf (sched_dump, "----------- candidate table: target: b=%d bb=%d ---\n",
1283 BB_TO_BLOCK (trg), trg);
1284 for (i = trg + 1; i < current_nr_blocks; i++)
1285 debug_candidate (i);
1288 /* Functions for speculative scheduing. */
1290 /* Return 0 if x is a set of a register alive in the beginning of one
1291 of the split-blocks of src, otherwise return 1. */
1293 static int
1294 check_live_1 (src, x)
1295 int src;
1296 rtx x;
1298 int i;
1299 int regno;
1300 rtx reg = SET_DEST (x);
1302 if (reg == 0)
1303 return 1;
1305 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
1306 || GET_CODE (reg) == SIGN_EXTRACT
1307 || GET_CODE (reg) == STRICT_LOW_PART)
1308 reg = XEXP (reg, 0);
1310 if (GET_CODE (reg) == PARALLEL)
1312 int i;
1314 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1315 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1316 if (check_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0)))
1317 return 1;
1319 return 0;
1322 if (GET_CODE (reg) != REG)
1323 return 1;
1325 regno = REGNO (reg);
1327 if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
1329 /* Global registers are assumed live. */
1330 return 0;
1332 else
1334 if (regno < FIRST_PSEUDO_REGISTER)
1336 /* Check for hard registers. */
1337 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1338 while (--j >= 0)
1340 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1342 int b = candidate_table[src].split_bbs.first_member[i];
1344 if (REGNO_REG_SET_P (BASIC_BLOCK (b)->global_live_at_start,
1345 regno + j))
1347 return 0;
1352 else
1354 /* Check for psuedo registers. */
1355 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1357 int b = candidate_table[src].split_bbs.first_member[i];
1359 if (REGNO_REG_SET_P (BASIC_BLOCK (b)->global_live_at_start, regno))
1361 return 0;
1367 return 1;
1370 /* If x is a set of a register R, mark that R is alive in the beginning
1371 of every update-block of src. */
1373 static void
1374 update_live_1 (src, x)
1375 int src;
1376 rtx x;
1378 int i;
1379 int regno;
1380 rtx reg = SET_DEST (x);
1382 if (reg == 0)
1383 return;
1385 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
1386 || GET_CODE (reg) == SIGN_EXTRACT
1387 || GET_CODE (reg) == STRICT_LOW_PART)
1388 reg = XEXP (reg, 0);
1390 if (GET_CODE (reg) == PARALLEL)
1392 int i;
1394 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1395 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1396 update_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0));
1398 return;
1401 if (GET_CODE (reg) != REG)
1402 return;
1404 /* Global registers are always live, so the code below does not apply
1405 to them. */
1407 regno = REGNO (reg);
1409 if (regno >= FIRST_PSEUDO_REGISTER || !global_regs[regno])
1411 if (regno < FIRST_PSEUDO_REGISTER)
1413 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1414 while (--j >= 0)
1416 for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
1418 int b = candidate_table[src].update_bbs.first_member[i];
1420 SET_REGNO_REG_SET (BASIC_BLOCK (b)->global_live_at_start,
1421 regno + j);
1425 else
1427 for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
1429 int b = candidate_table[src].update_bbs.first_member[i];
1431 SET_REGNO_REG_SET (BASIC_BLOCK (b)->global_live_at_start, regno);
1437 /* Return 1 if insn can be speculatively moved from block src to trg,
1438 otherwise return 0. Called before first insertion of insn to
1439 ready-list or before the scheduling. */
1441 static int
1442 check_live (insn, src)
1443 rtx insn;
1444 int src;
1446 /* Find the registers set by instruction. */
1447 if (GET_CODE (PATTERN (insn)) == SET
1448 || GET_CODE (PATTERN (insn)) == CLOBBER)
1449 return check_live_1 (src, PATTERN (insn));
1450 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1452 int j;
1453 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1454 if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1455 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1456 && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j)))
1457 return 0;
1459 return 1;
1462 return 1;
1465 /* Update the live registers info after insn was moved speculatively from
1466 block src to trg. */
1468 static void
1469 update_live (insn, src)
1470 rtx insn;
1471 int src;
1473 /* Find the registers set by instruction. */
1474 if (GET_CODE (PATTERN (insn)) == SET
1475 || GET_CODE (PATTERN (insn)) == CLOBBER)
1476 update_live_1 (src, PATTERN (insn));
1477 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1479 int j;
1480 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1481 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1482 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1483 update_live_1 (src, XVECEXP (PATTERN (insn), 0, j));
1487 /* Exception Free Loads:
1489 We define five classes of speculative loads: IFREE, IRISKY,
1490 PFREE, PRISKY, and MFREE.
1492 IFREE loads are loads that are proved to be exception-free, just
1493 by examining the load insn. Examples for such loads are loads
1494 from TOC and loads of global data.
1496 IRISKY loads are loads that are proved to be exception-risky,
1497 just by examining the load insn. Examples for such loads are
1498 volatile loads and loads from shared memory.
1500 PFREE loads are loads for which we can prove, by examining other
1501 insns, that they are exception-free. Currently, this class consists
1502 of loads for which we are able to find a "similar load", either in
1503 the target block, or, if only one split-block exists, in that split
1504 block. Load2 is similar to load1 if both have same single base
1505 register. We identify only part of the similar loads, by finding
1506 an insn upon which both load1 and load2 have a DEF-USE dependence.
1508 PRISKY loads are loads for which we can prove, by examining other
1509 insns, that they are exception-risky. Currently we have two proofs for
1510 such loads. The first proof detects loads that are probably guarded by a
1511 test on the memory address. This proof is based on the
1512 backward and forward data dependence information for the region.
1513 Let load-insn be the examined load.
1514 Load-insn is PRISKY iff ALL the following hold:
1516 - insn1 is not in the same block as load-insn
1517 - there is a DEF-USE dependence chain (insn1, ..., load-insn)
1518 - test-insn is either a compare or a branch, not in the same block
1519 as load-insn
1520 - load-insn is reachable from test-insn
1521 - there is a DEF-USE dependence chain (insn1, ..., test-insn)
1523 This proof might fail when the compare and the load are fed
1524 by an insn not in the region. To solve this, we will add to this
1525 group all loads that have no input DEF-USE dependence.
1527 The second proof detects loads that are directly or indirectly
1528 fed by a speculative load. This proof is affected by the
1529 scheduling process. We will use the flag fed_by_spec_load.
1530 Initially, all insns have this flag reset. After a speculative
1531 motion of an insn, if insn is either a load, or marked as
1532 fed_by_spec_load, we will also mark as fed_by_spec_load every
1533 insn1 for which a DEF-USE dependence (insn, insn1) exists. A
1534 load which is fed_by_spec_load is also PRISKY.
1536 MFREE (maybe-free) loads are all the remaining loads. They may be
1537 exception-free, but we cannot prove it.
1539 Now, all loads in IFREE and PFREE classes are considered
1540 exception-free, while all loads in IRISKY and PRISKY classes are
1541 considered exception-risky. As for loads in the MFREE class,
1542 these are considered either exception-free or exception-risky,
1543 depending on whether we are pessimistic or optimistic. We have
1544 to take the pessimistic approach to assure the safety of
1545 speculative scheduling, but we can take the optimistic approach
1546 by invoking the -fsched_spec_load_dangerous option. */
1548 enum INSN_TRAP_CLASS
1550 TRAP_FREE = 0, IFREE = 1, PFREE_CANDIDATE = 2,
1551 PRISKY_CANDIDATE = 3, IRISKY = 4, TRAP_RISKY = 5
1554 #define WORST_CLASS(class1, class2) \
1555 ((class1 > class2) ? class1 : class2)
1557 /* Non-zero if block bb_to is equal to, or reachable from block bb_from. */
1558 #define IS_REACHABLE(bb_from, bb_to) \
1559 (bb_from == bb_to \
1560 || IS_RGN_ENTRY (bb_from) \
1561 || (TEST_BIT (ancestor_edges[bb_to], \
1562 EDGE_TO_BIT (IN_EDGES (BB_TO_BLOCK (bb_from))))))
1564 /* Non-zero iff the address is comprised from at most 1 register. */
1565 #define CONST_BASED_ADDRESS_P(x) \
1566 (GET_CODE (x) == REG \
1567 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
1568 || (GET_CODE (x) == LO_SUM)) \
1569 && (GET_CODE (XEXP (x, 0)) == CONST_INT \
1570 || GET_CODE (XEXP (x, 1)) == CONST_INT)))
1572 /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
1574 static void
1575 set_spec_fed (load_insn)
1576 rtx load_insn;
1578 rtx link;
1580 for (link = INSN_DEPEND (load_insn); link; link = XEXP (link, 1))
1581 if (GET_MODE (link) == VOIDmode)
1582 FED_BY_SPEC_LOAD (XEXP (link, 0)) = 1;
1583 } /* set_spec_fed */
1585 /* On the path from the insn to load_insn_bb, find a conditional
1586 branch depending on insn, that guards the speculative load. */
1588 static int
1589 find_conditional_protection (insn, load_insn_bb)
1590 rtx insn;
1591 int load_insn_bb;
1593 rtx link;
1595 /* Iterate through DEF-USE forward dependences. */
1596 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
1598 rtx next = XEXP (link, 0);
1599 if ((CONTAINING_RGN (BLOCK_NUM (next)) ==
1600 CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb)))
1601 && IS_REACHABLE (INSN_BB (next), load_insn_bb)
1602 && load_insn_bb != INSN_BB (next)
1603 && GET_MODE (link) == VOIDmode
1604 && (GET_CODE (next) == JUMP_INSN
1605 || find_conditional_protection (next, load_insn_bb)))
1606 return 1;
1608 return 0;
1609 } /* find_conditional_protection */
1611 /* Returns 1 if the same insn1 that participates in the computation
1612 of load_insn's address is feeding a conditional branch that is
1613 guarding on load_insn. This is true if we find a the two DEF-USE
1614 chains:
1615 insn1 -> ... -> conditional-branch
1616 insn1 -> ... -> load_insn,
1617 and if a flow path exist:
1618 insn1 -> ... -> conditional-branch -> ... -> load_insn,
1619 and if insn1 is on the path
1620 region-entry -> ... -> bb_trg -> ... load_insn.
1622 Locate insn1 by climbing on LOG_LINKS from load_insn.
1623 Locate the branch by following INSN_DEPEND from insn1. */
1625 static int
1626 is_conditionally_protected (load_insn, bb_src, bb_trg)
1627 rtx load_insn;
1628 int bb_src, bb_trg;
1630 rtx link;
1632 for (link = LOG_LINKS (load_insn); link; link = XEXP (link, 1))
1634 rtx insn1 = XEXP (link, 0);
1636 /* Must be a DEF-USE dependence upon non-branch. */
1637 if (GET_MODE (link) != VOIDmode
1638 || GET_CODE (insn1) == JUMP_INSN)
1639 continue;
1641 /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
1642 if (INSN_BB (insn1) == bb_src
1643 || (CONTAINING_RGN (BLOCK_NUM (insn1))
1644 != CONTAINING_RGN (BB_TO_BLOCK (bb_src)))
1645 || (!IS_REACHABLE (bb_trg, INSN_BB (insn1))
1646 && !IS_REACHABLE (INSN_BB (insn1), bb_trg)))
1647 continue;
1649 /* Now search for the conditional-branch. */
1650 if (find_conditional_protection (insn1, bb_src))
1651 return 1;
1653 /* Recursive step: search another insn1, "above" current insn1. */
1654 return is_conditionally_protected (insn1, bb_src, bb_trg);
1657 /* The chain does not exist. */
1658 return 0;
1659 } /* is_conditionally_protected */
1661 /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence
1662 load_insn can move speculatively from bb_src to bb_trg. All the
1663 following must hold:
1665 (1) both loads have 1 base register (PFREE_CANDIDATEs).
1666 (2) load_insn and load1 have a def-use dependence upon
1667 the same insn 'insn1'.
1668 (3) either load2 is in bb_trg, or:
1669 - there's only one split-block, and
1670 - load1 is on the escape path, and
1672 From all these we can conclude that the two loads access memory
1673 addresses that differ at most by a constant, and hence if moving
1674 load_insn would cause an exception, it would have been caused by
1675 load2 anyhow. */
1677 static int
1678 is_pfree (load_insn, bb_src, bb_trg)
1679 rtx load_insn;
1680 int bb_src, bb_trg;
1682 rtx back_link;
1683 candidate *candp = candidate_table + bb_src;
1685 if (candp->split_bbs.nr_members != 1)
1686 /* Must have exactly one escape block. */
1687 return 0;
1689 for (back_link = LOG_LINKS (load_insn);
1690 back_link; back_link = XEXP (back_link, 1))
1692 rtx insn1 = XEXP (back_link, 0);
1694 if (GET_MODE (back_link) == VOIDmode)
1696 /* Found a DEF-USE dependence (insn1, load_insn). */
1697 rtx fore_link;
1699 for (fore_link = INSN_DEPEND (insn1);
1700 fore_link; fore_link = XEXP (fore_link, 1))
1702 rtx insn2 = XEXP (fore_link, 0);
1703 if (GET_MODE (fore_link) == VOIDmode)
1705 /* Found a DEF-USE dependence (insn1, insn2). */
1706 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
1707 /* insn2 not guaranteed to be a 1 base reg load. */
1708 continue;
1710 if (INSN_BB (insn2) == bb_trg)
1711 /* insn2 is the similar load, in the target block. */
1712 return 1;
1714 if (*(candp->split_bbs.first_member) == BLOCK_NUM (insn2))
1715 /* insn2 is a similar load, in a split-block. */
1716 return 1;
1722 /* Couldn't find a similar load. */
1723 return 0;
1724 } /* is_pfree */
1726 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
1727 as found by analyzing insn's expression. */
1729 static int
1730 may_trap_exp (x, is_store)
1731 rtx x;
1732 int is_store;
1734 enum rtx_code code;
1736 if (x == 0)
1737 return TRAP_FREE;
1738 code = GET_CODE (x);
1739 if (is_store)
1741 if (code == MEM)
1742 return TRAP_RISKY;
1743 else
1744 return TRAP_FREE;
1746 if (code == MEM)
1748 /* The insn uses memory: a volatile load. */
1749 if (MEM_VOLATILE_P (x))
1750 return IRISKY;
1751 /* An exception-free load. */
1752 if (!may_trap_p (x))
1753 return IFREE;
1754 /* A load with 1 base register, to be further checked. */
1755 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
1756 return PFREE_CANDIDATE;
1757 /* No info on the load, to be further checked. */
1758 return PRISKY_CANDIDATE;
1760 else
1762 const char *fmt;
1763 int i, insn_class = TRAP_FREE;
1765 /* Neither store nor load, check if it may cause a trap. */
1766 if (may_trap_p (x))
1767 return TRAP_RISKY;
1768 /* Recursive step: walk the insn... */
1769 fmt = GET_RTX_FORMAT (code);
1770 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1772 if (fmt[i] == 'e')
1774 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
1775 insn_class = WORST_CLASS (insn_class, tmp_class);
1777 else if (fmt[i] == 'E')
1779 int j;
1780 for (j = 0; j < XVECLEN (x, i); j++)
1782 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
1783 insn_class = WORST_CLASS (insn_class, tmp_class);
1784 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
1785 break;
1788 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
1789 break;
1791 return insn_class;
1795 /* Classifies insn for the purpose of verifying that it can be
1796 moved speculatively, by examining it's patterns, returning:
1797 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
1798 TRAP_FREE: non-load insn.
1799 IFREE: load from a globaly safe location.
1800 IRISKY: volatile load.
1801 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
1802 being either PFREE or PRISKY. */
1804 static int
1805 haifa_classify_insn (insn)
1806 rtx insn;
1808 rtx pat = PATTERN (insn);
1809 int tmp_class = TRAP_FREE;
1810 int insn_class = TRAP_FREE;
1811 enum rtx_code code;
1813 if (GET_CODE (pat) == PARALLEL)
1815 int i, len = XVECLEN (pat, 0);
1817 for (i = len - 1; i >= 0; i--)
1819 code = GET_CODE (XVECEXP (pat, 0, i));
1820 switch (code)
1822 case CLOBBER:
1823 /* Test if it is a 'store'. */
1824 tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
1825 break;
1826 case SET:
1827 /* Test if it is a store. */
1828 tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
1829 if (tmp_class == TRAP_RISKY)
1830 break;
1831 /* Test if it is a load. */
1832 tmp_class
1833 = WORST_CLASS (tmp_class,
1834 may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
1835 0));
1836 break;
1837 case COND_EXEC:
1838 case TRAP_IF:
1839 tmp_class = TRAP_RISKY;
1840 break;
1841 default:
1844 insn_class = WORST_CLASS (insn_class, tmp_class);
1845 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
1846 break;
1849 else
1851 code = GET_CODE (pat);
1852 switch (code)
1854 case CLOBBER:
1855 /* Test if it is a 'store'. */
1856 tmp_class = may_trap_exp (XEXP (pat, 0), 1);
1857 break;
1858 case SET:
1859 /* Test if it is a store. */
1860 tmp_class = may_trap_exp (SET_DEST (pat), 1);
1861 if (tmp_class == TRAP_RISKY)
1862 break;
1863 /* Test if it is a load. */
1864 tmp_class =
1865 WORST_CLASS (tmp_class,
1866 may_trap_exp (SET_SRC (pat), 0));
1867 break;
1868 case COND_EXEC:
1869 case TRAP_IF:
1870 tmp_class = TRAP_RISKY;
1871 break;
1872 default:;
1874 insn_class = tmp_class;
1877 return insn_class;
1880 /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
1881 a load moved speculatively, or if load_insn is protected by
1882 a compare on load_insn's address). */
1884 static int
1885 is_prisky (load_insn, bb_src, bb_trg)
1886 rtx load_insn;
1887 int bb_src, bb_trg;
1889 if (FED_BY_SPEC_LOAD (load_insn))
1890 return 1;
1892 if (LOG_LINKS (load_insn) == NULL)
1893 /* Dependence may 'hide' out of the region. */
1894 return 1;
1896 if (is_conditionally_protected (load_insn, bb_src, bb_trg))
1897 return 1;
1899 return 0;
1902 /* Insn is a candidate to be moved speculatively from bb_src to bb_trg.
1903 Return 1 if insn is exception-free (and the motion is valid)
1904 and 0 otherwise. */
1906 static int
1907 is_exception_free (insn, bb_src, bb_trg)
1908 rtx insn;
1909 int bb_src, bb_trg;
1911 int insn_class = haifa_classify_insn (insn);
1913 /* Handle non-load insns. */
1914 switch (insn_class)
1916 case TRAP_FREE:
1917 return 1;
1918 case TRAP_RISKY:
1919 return 0;
1920 default:;
1923 /* Handle loads. */
1924 if (!flag_schedule_speculative_load)
1925 return 0;
1926 IS_LOAD_INSN (insn) = 1;
1927 switch (insn_class)
1929 case IFREE:
1930 return (1);
1931 case IRISKY:
1932 return 0;
1933 case PFREE_CANDIDATE:
1934 if (is_pfree (insn, bb_src, bb_trg))
1935 return 1;
1936 /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */
1937 case PRISKY_CANDIDATE:
1938 if (!flag_schedule_speculative_load_dangerous
1939 || is_prisky (insn, bb_src, bb_trg))
1940 return 0;
1941 break;
1942 default:;
1945 return flag_schedule_speculative_load_dangerous;
1948 /* The number of insns from the current block scheduled so far. */
1949 static int sched_target_n_insns;
1950 /* The number of insns from the current block to be scheduled in total. */
1951 static int target_n_insns;
1952 /* The number of insns from the entire region scheduled so far. */
1953 static int sched_n_insns;
1954 /* Nonzero if the last scheduled insn was a jump. */
1955 static int last_was_jump;
1957 /* Implementations of the sched_info functions for region scheduling. */
1958 static void init_ready_list PARAMS ((struct ready_list *));
1959 static int can_schedule_ready_p PARAMS ((rtx));
1960 static int new_ready PARAMS ((rtx));
1961 static int schedule_more_p PARAMS ((void));
1962 static const char *rgn_print_insn PARAMS ((rtx, int));
1963 static int rgn_rank PARAMS ((rtx, rtx));
1964 static int contributes_to_priority PARAMS ((rtx, rtx));
1965 static void compute_jump_reg_dependencies PARAMS ((rtx, regset));
1967 /* Return nonzero if there are more insns that should be scheduled. */
1969 static int
1970 schedule_more_p ()
1972 return ! last_was_jump && sched_target_n_insns < target_n_insns;
1975 /* Add all insns that are initially ready to the ready list READY. Called
1976 once before scheduling a set of insns. */
1978 static void
1979 init_ready_list (ready)
1980 struct ready_list *ready;
1982 rtx prev_head = current_sched_info->prev_head;
1983 rtx next_tail = current_sched_info->next_tail;
1984 int bb_src;
1985 rtx insn;
1987 target_n_insns = 0;
1988 sched_target_n_insns = 0;
1989 sched_n_insns = 0;
1990 last_was_jump = 0;
1992 /* Print debugging information. */
1993 if (sched_verbose >= 5)
1994 debug_dependencies ();
1996 /* Prepare current target block info. */
1997 if (current_nr_blocks > 1)
1999 candidate_table = (candidate *) xmalloc (current_nr_blocks
2000 * sizeof (candidate));
2002 bblst_last = 0;
2003 /* bblst_table holds split blocks and update blocks for each block after
2004 the current one in the region. split blocks and update blocks are
2005 the TO blocks of region edges, so there can be at most rgn_nr_edges
2006 of them. */
2007 bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
2008 bblst_table = (int *) xmalloc (bblst_size * sizeof (int));
2010 bitlst_table_last = 0;
2011 bitlst_table_size = rgn_nr_edges;
2012 bitlst_table = (int *) xmalloc (rgn_nr_edges * sizeof (int));
2014 compute_trg_info (target_bb);
2017 /* Initialize ready list with all 'ready' insns in target block.
2018 Count number of insns in the target block being scheduled. */
2019 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
2021 rtx next;
2023 if (! INSN_P (insn))
2024 continue;
2025 next = NEXT_INSN (insn);
2027 if (INSN_DEP_COUNT (insn) == 0
2028 && (SCHED_GROUP_P (next) == 0 || ! INSN_P (next)))
2029 ready_add (ready, insn);
2030 if (!(SCHED_GROUP_P (insn)))
2031 target_n_insns++;
2034 /* Add to ready list all 'ready' insns in valid source blocks.
2035 For speculative insns, check-live, exception-free, and
2036 issue-delay. */
2037 for (bb_src = target_bb + 1; bb_src < current_nr_blocks; bb_src++)
2038 if (IS_VALID (bb_src))
2040 rtx src_head;
2041 rtx src_next_tail;
2042 rtx tail, head;
2044 get_block_head_tail (BB_TO_BLOCK (bb_src), &head, &tail);
2045 src_next_tail = NEXT_INSN (tail);
2046 src_head = head;
2048 for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn))
2050 if (! INSN_P (insn))
2051 continue;
2053 if (!CANT_MOVE (insn)
2054 && (!IS_SPECULATIVE_INSN (insn)
2055 || (insn_issue_delay (insn) <= 3
2056 && check_live (insn, bb_src)
2057 && is_exception_free (insn, bb_src, target_bb))))
2059 rtx next;
2061 /* Note that we haven't squirreled away the notes for
2062 blocks other than the current. So if this is a
2063 speculative insn, NEXT might otherwise be a note. */
2064 next = next_nonnote_insn (insn);
2065 if (INSN_DEP_COUNT (insn) == 0
2066 && (! next
2067 || SCHED_GROUP_P (next) == 0
2068 || ! INSN_P (next)))
2069 ready_add (ready, insn);
2075 /* Called after taking INSN from the ready list. Returns nonzero if this
2076 insn can be scheduled, nonzero if we should silently discard it. */
2078 static int
2079 can_schedule_ready_p (insn)
2080 rtx insn;
2082 if (GET_CODE (insn) == JUMP_INSN)
2083 last_was_jump = 1;
2085 /* An interblock motion? */
2086 if (INSN_BB (insn) != target_bb)
2088 rtx temp;
2089 basic_block b1;
2091 if (IS_SPECULATIVE_INSN (insn))
2093 if (!check_live (insn, INSN_BB (insn)))
2094 return 0;
2095 update_live (insn, INSN_BB (insn));
2097 /* For speculative load, mark insns fed by it. */
2098 if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn))
2099 set_spec_fed (insn);
2101 nr_spec++;
2103 nr_inter++;
2105 /* Find the beginning of the scheduling group. */
2106 /* ??? Ought to update basic block here, but later bits of
2107 schedule_block assumes the original insn block is
2108 still intact. */
2110 temp = insn;
2111 while (SCHED_GROUP_P (temp))
2112 temp = PREV_INSN (temp);
2114 /* Update source block boundaries. */
2115 b1 = BLOCK_FOR_INSN (temp);
2116 if (temp == b1->head && insn == b1->end)
2118 /* We moved all the insns in the basic block.
2119 Emit a note after the last insn and update the
2120 begin/end boundaries to point to the note. */
2121 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
2122 b1->head = note;
2123 b1->end = note;
2125 else if (insn == b1->end)
2127 /* We took insns from the end of the basic block,
2128 so update the end of block boundary so that it
2129 points to the first insn we did not move. */
2130 b1->end = PREV_INSN (temp);
2132 else if (temp == b1->head)
2134 /* We took insns from the start of the basic block,
2135 so update the start of block boundary so that
2136 it points to the first insn we did not move. */
2137 b1->head = NEXT_INSN (insn);
2140 else
2142 /* In block motion. */
2143 sched_target_n_insns++;
2145 sched_n_insns++;
2147 return 1;
2150 /* Called after INSN has all its dependencies resolved. Return nonzero
2151 if it should be moved to the ready list or the queue, or zero if we
2152 should silently discard it. */
2153 static int
2154 new_ready (next)
2155 rtx next;
2157 /* For speculative insns, before inserting to ready/queue,
2158 check live, exception-free, and issue-delay. */
2159 if (INSN_BB (next) != target_bb
2160 && (!IS_VALID (INSN_BB (next))
2161 || CANT_MOVE (next)
2162 || (IS_SPECULATIVE_INSN (next)
2163 && (insn_issue_delay (next) > 3
2164 || !check_live (next, INSN_BB (next))
2165 || !is_exception_free (next, INSN_BB (next), target_bb)))))
2166 return 0;
2167 return 1;
2170 /* Return a string that contains the insn uid and optionally anything else
2171 necessary to identify this insn in an output. It's valid to use a
2172 static buffer for this. The ALIGNED parameter should cause the string
2173 to be formatted so that multiple output lines will line up nicely. */
2175 static const char *
2176 rgn_print_insn (insn, aligned)
2177 rtx insn;
2178 int aligned;
2180 static char tmp[80];
2182 if (aligned)
2183 sprintf (tmp, "b%3d: i%4d", INSN_BB (insn), INSN_UID (insn));
2184 else
2186 if (current_nr_blocks > 1 && INSN_BB (insn) != target_bb)
2187 sprintf (tmp, "%d/b%d", INSN_UID (insn), INSN_BB (insn));
2188 else
2189 sprintf (tmp, "%d", INSN_UID (insn));
2191 return tmp;
2194 /* Compare priority of two insns. Return a positive number if the second
2195 insn is to be preferred for scheduling, and a negative one if the first
2196 is to be preferred. Zero if they are equally good. */
2198 static int
2199 rgn_rank (insn1, insn2)
2200 rtx insn1, insn2;
2202 /* Some comparison make sense in interblock scheduling only. */
2203 if (INSN_BB (insn1) != INSN_BB (insn2))
2205 int spec_val, prob_val;
2207 /* Prefer an inblock motion on an interblock motion. */
2208 if ((INSN_BB (insn2) == target_bb) && (INSN_BB (insn1) != target_bb))
2209 return 1;
2210 if ((INSN_BB (insn1) == target_bb) && (INSN_BB (insn2) != target_bb))
2211 return -1;
2213 /* Prefer a useful motion on a speculative one. */
2214 spec_val = IS_SPECULATIVE_INSN (insn1) - IS_SPECULATIVE_INSN (insn2);
2215 if (spec_val)
2216 return spec_val;
2218 /* Prefer a more probable (speculative) insn. */
2219 prob_val = INSN_PROBABILITY (insn2) - INSN_PROBABILITY (insn1);
2220 if (prob_val)
2221 return prob_val;
2223 return 0;
2226 /* NEXT is an instruction that depends on INSN (a backward dependence);
2227 return nonzero if we should include this dependence in priority
2228 calculations. */
2230 static int
2231 contributes_to_priority (next, insn)
2232 rtx next, insn;
2234 return BLOCK_NUM (next) == BLOCK_NUM (insn);
2237 /* INSN is a JUMP_INSN. Store the set of registers that must be considered
2238 to be set by this jump in SET. */
2240 static void
2241 compute_jump_reg_dependencies (insn, set)
2242 rtx insn ATTRIBUTE_UNUSED;
2243 regset set ATTRIBUTE_UNUSED;
2245 /* Nothing to do here, since we postprocess jumps in
2246 add_branch_dependences. */
2249 /* Used in schedule_insns to initialize current_sched_info for scheduling
2250 regions (or single basic blocks). */
2252 static struct sched_info region_sched_info =
2254 init_ready_list,
2255 can_schedule_ready_p,
2256 schedule_more_p,
2257 new_ready,
2258 rgn_rank,
2259 rgn_print_insn,
2260 contributes_to_priority,
2261 compute_jump_reg_dependencies,
2263 NULL, NULL,
2264 NULL, NULL,
2265 0, 0
2268 /* Add dependences so that branches are scheduled to run last in their
2269 block. */
2271 static void
2272 add_branch_dependences (head, tail)
2273 rtx head, tail;
2275 rtx insn, last;
2277 /* For all branches, calls, uses, clobbers, and cc0 setters, force them
2278 to remain in order at the end of the block by adding dependencies and
2279 giving the last a high priority. There may be notes present, and
2280 prev_head may also be a note.
2282 Branches must obviously remain at the end. Calls should remain at the
2283 end since moving them results in worse register allocation. Uses remain
2284 at the end to ensure proper register allocation. cc0 setters remaim
2285 at the end because they can't be moved away from their cc0 user. */
2286 insn = tail;
2287 last = 0;
2288 while (GET_CODE (insn) == CALL_INSN
2289 || GET_CODE (insn) == JUMP_INSN
2290 || (GET_CODE (insn) == INSN
2291 && (GET_CODE (PATTERN (insn)) == USE
2292 || GET_CODE (PATTERN (insn)) == CLOBBER
2293 #ifdef HAVE_cc0
2294 || sets_cc0_p (PATTERN (insn))
2295 #endif
2297 || GET_CODE (insn) == NOTE)
2299 if (GET_CODE (insn) != NOTE)
2301 if (last != 0
2302 && !find_insn_list (insn, LOG_LINKS (last)))
2304 add_dependence (last, insn, REG_DEP_ANTI);
2305 INSN_REF_COUNT (insn)++;
2308 CANT_MOVE (insn) = 1;
2310 last = insn;
2311 /* Skip over insns that are part of a group.
2312 Make each insn explicitly depend on the previous insn.
2313 This ensures that only the group header will ever enter
2314 the ready queue (and, when scheduled, will automatically
2315 schedule the SCHED_GROUP_P block). */
2316 while (SCHED_GROUP_P (insn))
2318 rtx temp = prev_nonnote_insn (insn);
2319 add_dependence (insn, temp, REG_DEP_ANTI);
2320 insn = temp;
2324 /* Don't overrun the bounds of the basic block. */
2325 if (insn == head)
2326 break;
2328 insn = PREV_INSN (insn);
2331 /* Make sure these insns are scheduled last in their block. */
2332 insn = last;
2333 if (insn != 0)
2334 while (insn != head)
2336 insn = prev_nonnote_insn (insn);
2338 if (INSN_REF_COUNT (insn) != 0)
2339 continue;
2341 add_dependence (last, insn, REG_DEP_ANTI);
2342 INSN_REF_COUNT (insn) = 1;
2344 /* Skip over insns that are part of a group. */
2345 while (SCHED_GROUP_P (insn))
2346 insn = prev_nonnote_insn (insn);
2350 /* Data structures for the computation of data dependences in a regions. We
2351 keep one `deps' structure for every basic block. Before analyzing the
2352 data dependences for a bb, its variables are initialized as a function of
2353 the variables of its predecessors. When the analysis for a bb completes,
2354 we save the contents to the corresponding bb_deps[bb] variable. */
2356 static struct deps *bb_deps;
2358 /* After computing the dependencies for block BB, propagate the dependencies
2359 found in TMP_DEPS to the successors of the block. */
2360 static void
2361 propagate_deps (bb, tmp_deps)
2362 int bb;
2363 struct deps *tmp_deps;
2365 int b = BB_TO_BLOCK (bb);
2366 int e, first_edge;
2367 int reg;
2368 rtx link_insn, link_mem;
2369 rtx u;
2371 /* These lists should point to the right place, for correct
2372 freeing later. */
2373 bb_deps[bb].pending_read_insns = tmp_deps->pending_read_insns;
2374 bb_deps[bb].pending_read_mems = tmp_deps->pending_read_mems;
2375 bb_deps[bb].pending_write_insns = tmp_deps->pending_write_insns;
2376 bb_deps[bb].pending_write_mems = tmp_deps->pending_write_mems;
2378 /* bb's structures are inherited by its successors. */
2379 first_edge = e = OUT_EDGES (b);
2380 if (e <= 0)
2381 return;
2385 rtx x;
2386 int b_succ = TO_BLOCK (e);
2387 int bb_succ = BLOCK_TO_BB (b_succ);
2388 struct deps *succ_deps = bb_deps + bb_succ;
2390 /* Only bbs "below" bb, in the same region, are interesting. */
2391 if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ)
2392 || bb_succ <= bb)
2394 e = NEXT_OUT (e);
2395 continue;
2398 /* The reg_last lists are inherited by bb_succ. */
2399 EXECUTE_IF_SET_IN_REG_SET (&tmp_deps->reg_last_in_use, 0, reg,
2401 struct deps_reg *tmp_deps_reg = &tmp_deps->reg_last[reg];
2402 struct deps_reg *succ_deps_reg = &succ_deps->reg_last[reg];
2404 for (u = tmp_deps_reg->uses; u; u = XEXP (u, 1))
2405 if (! find_insn_list (XEXP (u, 0), succ_deps_reg->uses))
2406 succ_deps_reg->uses
2407 = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->uses);
2409 for (u = tmp_deps_reg->sets; u; u = XEXP (u, 1))
2410 if (! find_insn_list (XEXP (u, 0), succ_deps_reg->sets))
2411 succ_deps_reg->sets
2412 = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->sets);
2414 for (u = tmp_deps_reg->clobbers; u; u = XEXP (u, 1))
2415 if (! find_insn_list (XEXP (u, 0), succ_deps_reg->clobbers))
2416 succ_deps_reg->clobbers
2417 = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->clobbers);
2419 IOR_REG_SET (&succ_deps->reg_last_in_use, &tmp_deps->reg_last_in_use);
2421 /* Mem read/write lists are inherited by bb_succ. */
2422 link_insn = tmp_deps->pending_read_insns;
2423 link_mem = tmp_deps->pending_read_mems;
2424 while (link_insn)
2426 if (!(find_insn_mem_list (XEXP (link_insn, 0),
2427 XEXP (link_mem, 0),
2428 succ_deps->pending_read_insns,
2429 succ_deps->pending_read_mems)))
2430 add_insn_mem_dependence (succ_deps, &succ_deps->pending_read_insns,
2431 &succ_deps->pending_read_mems,
2432 XEXP (link_insn, 0), XEXP (link_mem, 0));
2433 link_insn = XEXP (link_insn, 1);
2434 link_mem = XEXP (link_mem, 1);
2437 link_insn = tmp_deps->pending_write_insns;
2438 link_mem = tmp_deps->pending_write_mems;
2439 while (link_insn)
2441 if (!(find_insn_mem_list (XEXP (link_insn, 0),
2442 XEXP (link_mem, 0),
2443 succ_deps->pending_write_insns,
2444 succ_deps->pending_write_mems)))
2445 add_insn_mem_dependence (succ_deps,
2446 &succ_deps->pending_write_insns,
2447 &succ_deps->pending_write_mems,
2448 XEXP (link_insn, 0), XEXP (link_mem, 0));
2450 link_insn = XEXP (link_insn, 1);
2451 link_mem = XEXP (link_mem, 1);
2454 /* last_function_call is inherited by bb_succ. */
2455 for (u = tmp_deps->last_function_call; u; u = XEXP (u, 1))
2456 if (! find_insn_list (XEXP (u, 0), succ_deps->last_function_call))
2457 succ_deps->last_function_call
2458 = alloc_INSN_LIST (XEXP (u, 0), succ_deps->last_function_call);
2460 /* last_pending_memory_flush is inherited by bb_succ. */
2461 for (u = tmp_deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2462 if (! find_insn_list (XEXP (u, 0),
2463 succ_deps->last_pending_memory_flush))
2464 succ_deps->last_pending_memory_flush
2465 = alloc_INSN_LIST (XEXP (u, 0),
2466 succ_deps->last_pending_memory_flush);
2468 /* sched_before_next_call is inherited by bb_succ. */
2469 x = LOG_LINKS (tmp_deps->sched_before_next_call);
2470 for (; x; x = XEXP (x, 1))
2471 add_dependence (succ_deps->sched_before_next_call,
2472 XEXP (x, 0), REG_DEP_ANTI);
2474 e = NEXT_OUT (e);
2476 while (e != first_edge);
2479 /* Compute backward dependences inside bb. In a multiple blocks region:
2480 (1) a bb is analyzed after its predecessors, and (2) the lists in
2481 effect at the end of bb (after analyzing for bb) are inherited by
2482 bb's successrs.
2484 Specifically for reg-reg data dependences, the block insns are
2485 scanned by sched_analyze () top-to-bottom. Two lists are
2486 maintained by sched_analyze (): reg_last[].sets for register DEFs,
2487 and reg_last[].uses for register USEs.
2489 When analysis is completed for bb, we update for its successors:
2490 ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
2491 ; - USES[succ] = Union (USES [succ], DEFS [bb])
2493 The mechanism for computing mem-mem data dependence is very
2494 similar, and the result is interblock dependences in the region. */
2496 static void
2497 compute_block_backward_dependences (bb)
2498 int bb;
2500 rtx head, tail;
2501 struct deps tmp_deps;
2503 tmp_deps = bb_deps[bb];
2505 /* Do the analysis for this block. */
2506 get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
2507 sched_analyze (&tmp_deps, head, tail);
2508 add_branch_dependences (head, tail);
2510 if (current_nr_blocks > 1)
2511 propagate_deps (bb, &tmp_deps);
2513 /* Free up the INSN_LISTs. */
2514 free_deps (&tmp_deps);
2517 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
2518 them to the unused_*_list variables, so that they can be reused. */
2520 static void
2521 free_pending_lists ()
2523 int bb;
2525 for (bb = 0; bb < current_nr_blocks; bb++)
2527 free_INSN_LIST_list (&bb_deps[bb].pending_read_insns);
2528 free_INSN_LIST_list (&bb_deps[bb].pending_write_insns);
2529 free_EXPR_LIST_list (&bb_deps[bb].pending_read_mems);
2530 free_EXPR_LIST_list (&bb_deps[bb].pending_write_mems);
2534 /* Print dependences for debugging, callable from debugger. */
2536 void
2537 debug_dependencies ()
2539 int bb;
2541 fprintf (sched_dump, ";; --------------- forward dependences: ------------ \n");
2542 for (bb = 0; bb < current_nr_blocks; bb++)
2544 if (1)
2546 rtx head, tail;
2547 rtx next_tail;
2548 rtx insn;
2550 get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
2551 next_tail = NEXT_INSN (tail);
2552 fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
2553 BB_TO_BLOCK (bb), bb);
2555 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
2556 "insn", "code", "bb", "dep", "prio", "cost", "blockage", "units");
2557 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
2558 "----", "----", "--", "---", "----", "----", "--------", "-----");
2559 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2561 rtx link;
2562 int unit, range;
2564 if (! INSN_P (insn))
2566 int n;
2567 fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
2568 if (GET_CODE (insn) == NOTE)
2570 n = NOTE_LINE_NUMBER (insn);
2571 if (n < 0)
2572 fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
2573 else
2574 fprintf (sched_dump, "line %d, file %s\n", n,
2575 NOTE_SOURCE_FILE (insn));
2577 else
2578 fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
2579 continue;
2582 unit = insn_unit (insn);
2583 range = (unit < 0
2584 || function_units[unit].blockage_range_function == 0) ? 0 :
2585 function_units[unit].blockage_range_function (insn);
2586 fprintf (sched_dump,
2587 ";; %s%5d%6d%6d%6d%6d%6d %3d -%3d ",
2588 (SCHED_GROUP_P (insn) ? "+" : " "),
2589 INSN_UID (insn),
2590 INSN_CODE (insn),
2591 INSN_BB (insn),
2592 INSN_DEP_COUNT (insn),
2593 INSN_PRIORITY (insn),
2594 insn_cost (insn, 0, 0),
2595 (int) MIN_BLOCKAGE_COST (range),
2596 (int) MAX_BLOCKAGE_COST (range));
2597 insn_print_units (insn);
2598 fprintf (sched_dump, "\t: ");
2599 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
2600 fprintf (sched_dump, "%d ", INSN_UID (XEXP (link, 0)));
2601 fprintf (sched_dump, "\n");
2605 fprintf (sched_dump, "\n");
2608 /* Schedule a region. A region is either an inner loop, a loop-free
2609 subroutine, or a single basic block. Each bb in the region is
2610 scheduled after its flow predecessors. */
2612 static void
2613 schedule_region (rgn)
2614 int rgn;
2616 int bb;
2617 int rgn_n_insns = 0;
2618 int sched_rgn_n_insns = 0;
2620 /* Set variables for the current region. */
2621 current_nr_blocks = RGN_NR_BLOCKS (rgn);
2622 current_blocks = RGN_BLOCKS (rgn);
2624 init_deps_global ();
2626 /* Initializations for region data dependence analyisis. */
2627 bb_deps = (struct deps *) xmalloc (sizeof (struct deps) * current_nr_blocks);
2628 for (bb = 0; bb < current_nr_blocks; bb++)
2629 init_deps (bb_deps + bb);
2631 /* Compute LOG_LINKS. */
2632 for (bb = 0; bb < current_nr_blocks; bb++)
2633 compute_block_backward_dependences (bb);
2635 /* Compute INSN_DEPEND. */
2636 for (bb = current_nr_blocks - 1; bb >= 0; bb--)
2638 rtx head, tail;
2639 get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
2641 compute_forward_dependences (head, tail);
2644 /* Set priorities. */
2645 for (bb = 0; bb < current_nr_blocks; bb++)
2647 rtx head, tail;
2648 get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
2650 rgn_n_insns += set_priorities (head, tail);
2653 /* Compute interblock info: probabilities, split-edges, dominators, etc. */
2654 if (current_nr_blocks > 1)
2656 int i;
2658 prob = (float *) xmalloc ((current_nr_blocks) * sizeof (float));
2660 dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
2661 sbitmap_vector_zero (dom, current_nr_blocks);
2662 /* Edge to bit. */
2663 rgn_nr_edges = 0;
2664 edge_to_bit = (int *) xmalloc (nr_edges * sizeof (int));
2665 for (i = 1; i < nr_edges; i++)
2666 if (CONTAINING_RGN (FROM_BLOCK (i)) == rgn)
2667 EDGE_TO_BIT (i) = rgn_nr_edges++;
2668 rgn_edges = (int *) xmalloc (rgn_nr_edges * sizeof (int));
2670 rgn_nr_edges = 0;
2671 for (i = 1; i < nr_edges; i++)
2672 if (CONTAINING_RGN (FROM_BLOCK (i)) == (rgn))
2673 rgn_edges[rgn_nr_edges++] = i;
2675 /* Split edges. */
2676 pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
2677 sbitmap_vector_zero (pot_split, current_nr_blocks);
2678 ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
2679 sbitmap_vector_zero (ancestor_edges, current_nr_blocks);
2681 /* Compute probabilities, dominators, split_edges. */
2682 for (bb = 0; bb < current_nr_blocks; bb++)
2683 compute_dom_prob_ps (bb);
2686 /* Now we can schedule all blocks. */
2687 for (bb = 0; bb < current_nr_blocks; bb++)
2689 rtx head, tail;
2690 int b = BB_TO_BLOCK (bb);
2692 get_block_head_tail (b, &head, &tail);
2694 if (no_real_insns_p (head, tail))
2695 continue;
2697 current_sched_info->prev_head = PREV_INSN (head);
2698 current_sched_info->next_tail = NEXT_INSN (tail);
2700 if (write_symbols != NO_DEBUG)
2702 save_line_notes (b, head, tail);
2703 rm_line_notes (head, tail);
2706 /* rm_other_notes only removes notes which are _inside_ the
2707 block---that is, it won't remove notes before the first real insn
2708 or after the last real insn of the block. So if the first insn
2709 has a REG_SAVE_NOTE which would otherwise be emitted before the
2710 insn, it is redundant with the note before the start of the
2711 block, and so we have to take it out. */
2712 if (INSN_P (head))
2714 rtx note;
2716 for (note = REG_NOTES (head); note; note = XEXP (note, 1))
2717 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
2719 remove_note (head, note);
2720 note = XEXP (note, 1);
2721 remove_note (head, note);
2725 /* Remove remaining note insns from the block, save them in
2726 note_list. These notes are restored at the end of
2727 schedule_block (). */
2728 rm_other_notes (head, tail);
2730 target_bb = bb;
2732 current_sched_info->queue_must_finish_empty
2733 = current_nr_blocks > 1 && !flag_schedule_interblock;
2735 schedule_block (b, rgn_n_insns);
2736 sched_rgn_n_insns += sched_n_insns;
2738 /* Update target block boundaries. */
2739 if (head == BLOCK_HEAD (b))
2740 BLOCK_HEAD (b) = current_sched_info->head;
2741 if (tail == BLOCK_END (b))
2742 BLOCK_END (b) = current_sched_info->tail;
2744 /* Clean up. */
2745 if (current_nr_blocks > 1)
2747 free (candidate_table);
2748 free (bblst_table);
2749 free (bitlst_table);
2753 /* Sanity check: verify that all region insns were scheduled. */
2754 if (sched_rgn_n_insns != rgn_n_insns)
2755 abort ();
2757 /* Restore line notes. */
2758 if (write_symbols != NO_DEBUG)
2760 for (bb = 0; bb < current_nr_blocks; bb++)
2762 rtx head, tail;
2763 get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
2764 restore_line_notes (head, tail);
2768 /* Done with this region. */
2769 free_pending_lists ();
2771 finish_deps_global ();
2773 free (bb_deps);
2775 if (current_nr_blocks > 1)
2777 free (prob);
2778 sbitmap_vector_free (dom);
2779 sbitmap_vector_free (pot_split);
2780 sbitmap_vector_free (ancestor_edges);
2781 free (edge_to_bit);
2782 free (rgn_edges);
2786 /* Indexed by region, holds the number of death notes found in that region.
2787 Used for consistency checks. */
2788 static int *deaths_in_region;
2790 /* Initialize data structures for region scheduling. */
2792 static void
2793 init_regions ()
2795 sbitmap blocks;
2796 int rgn;
2798 nr_regions = 0;
2799 rgn_table = (region *) xmalloc ((n_basic_blocks) * sizeof (region));
2800 rgn_bb_table = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
2801 block_to_bb = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
2802 containing_rgn = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
2804 /* Compute regions for scheduling. */
2805 if (reload_completed
2806 || n_basic_blocks == 1
2807 || !flag_schedule_interblock)
2809 find_single_block_region ();
2811 else
2813 /* Verify that a 'good' control flow graph can be built. */
2814 if (is_cfg_nonregular ())
2816 find_single_block_region ();
2818 else
2820 sbitmap *dom;
2821 struct edge_list *edge_list;
2823 dom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
2825 /* The scheduler runs after flow; therefore, we can't blindly call
2826 back into find_basic_blocks since doing so could invalidate the
2827 info in global_live_at_start.
2829 Consider a block consisting entirely of dead stores; after life
2830 analysis it would be a block of NOTE_INSN_DELETED notes. If
2831 we call find_basic_blocks again, then the block would be removed
2832 entirely and invalidate our the register live information.
2834 We could (should?) recompute register live information. Doing
2835 so may even be beneficial. */
2836 edge_list = create_edge_list ();
2838 /* Compute the dominators and post dominators. */
2839 calculate_dominance_info (NULL, dom, CDI_DOMINATORS);
2841 /* build_control_flow will return nonzero if it detects unreachable
2842 blocks or any other irregularity with the cfg which prevents
2843 cross block scheduling. */
2844 if (build_control_flow (edge_list) != 0)
2845 find_single_block_region ();
2846 else
2847 find_rgns (edge_list, dom);
2849 if (sched_verbose >= 3)
2850 debug_regions ();
2852 /* We are done with flow's edge list. */
2853 free_edge_list (edge_list);
2855 /* For now. This will move as more and more of haifa is converted
2856 to using the cfg code in flow.c. */
2857 free (dom);
2862 if (CHECK_DEAD_NOTES)
2864 blocks = sbitmap_alloc (n_basic_blocks);
2865 deaths_in_region = (int *) xmalloc (sizeof (int) * nr_regions);
2866 /* Remove all death notes from the subroutine. */
2867 for (rgn = 0; rgn < nr_regions; rgn++)
2869 int b;
2871 sbitmap_zero (blocks);
2872 for (b = RGN_NR_BLOCKS (rgn) - 1; b >= 0; --b)
2873 SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn) + b]);
2875 deaths_in_region[rgn] = count_or_remove_death_notes (blocks, 1);
2877 sbitmap_free (blocks);
2879 else
2880 count_or_remove_death_notes (NULL, 1);
2883 /* The one entry point in this file. DUMP_FILE is the dump file for
2884 this pass. */
2886 void
2887 schedule_insns (dump_file)
2888 FILE *dump_file;
2890 sbitmap large_region_blocks, blocks;
2891 int rgn;
2892 int any_large_regions;
2894 /* Taking care of this degenerate case makes the rest of
2895 this code simpler. */
2896 if (n_basic_blocks == 0)
2897 return;
2899 nr_inter = 0;
2900 nr_spec = 0;
2902 sched_init (dump_file);
2904 init_regions ();
2906 current_sched_info = &region_sched_info;
2908 /* Schedule every region in the subroutine. */
2909 for (rgn = 0; rgn < nr_regions; rgn++)
2910 schedule_region (rgn);
2912 /* Update life analysis for the subroutine. Do single block regions
2913 first so that we can verify that live_at_start didn't change. Then
2914 do all other blocks. */
2915 /* ??? There is an outside possibility that update_life_info, or more
2916 to the point propagate_block, could get called with non-zero flags
2917 more than once for one basic block. This would be kinda bad if it
2918 were to happen, since REG_INFO would be accumulated twice for the
2919 block, and we'd have twice the REG_DEAD notes.
2921 I'm fairly certain that this _shouldn't_ happen, since I don't think
2922 that live_at_start should change at region heads. Not sure what the
2923 best way to test for this kind of thing... */
2925 allocate_reg_life_data ();
2926 compute_bb_for_insn (get_max_uid ());
2928 any_large_regions = 0;
2929 large_region_blocks = sbitmap_alloc (n_basic_blocks);
2930 sbitmap_ones (large_region_blocks);
2932 blocks = sbitmap_alloc (n_basic_blocks);
2933 sbitmap_zero (blocks);
2935 /* Update life information. For regions consisting of multiple blocks
2936 we've possibly done interblock scheduling that affects global liveness.
2937 For regions consisting of single blocks we need to do only local
2938 liveness. */
2939 for (rgn = 0; rgn < nr_regions; rgn++)
2940 if (RGN_NR_BLOCKS (rgn) > 1)
2941 any_large_regions = 1;
2942 else
2944 SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
2945 RESET_BIT (large_region_blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
2948 /* Don't update reg info after reload, since that affects
2949 regs_ever_live, which should not change after reload. */
2950 update_life_info (blocks, UPDATE_LIFE_LOCAL,
2951 (reload_completed ? PROP_DEATH_NOTES
2952 : PROP_DEATH_NOTES | PROP_REG_INFO));
2953 if (any_large_regions)
2955 update_life_info (large_region_blocks, UPDATE_LIFE_GLOBAL,
2956 PROP_DEATH_NOTES | PROP_REG_INFO);
2959 if (CHECK_DEAD_NOTES)
2961 /* Verify the counts of basic block notes in single the basic block
2962 regions. */
2963 for (rgn = 0; rgn < nr_regions; rgn++)
2964 if (RGN_NR_BLOCKS (rgn) == 1)
2966 sbitmap_zero (blocks);
2967 SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
2969 if (deaths_in_region[rgn]
2970 != count_or_remove_death_notes (blocks, 0))
2971 abort ();
2973 free (deaths_in_region);
2976 /* Reposition the prologue and epilogue notes in case we moved the
2977 prologue/epilogue insns. */
2978 if (reload_completed)
2979 reposition_prologue_and_epilogue_notes (get_insns ());
2981 /* Delete redundant line notes. */
2982 if (write_symbols != NO_DEBUG)
2983 rm_redundant_line_notes ();
2985 if (sched_verbose)
2987 if (reload_completed == 0 && flag_schedule_interblock)
2989 fprintf (sched_dump,
2990 "\n;; Procedure interblock/speculative motions == %d/%d \n",
2991 nr_inter, nr_spec);
2993 else
2995 if (nr_inter > 0)
2996 abort ();
2998 fprintf (sched_dump, "\n\n");
3001 /* Clean up. */
3002 free (rgn_table);
3003 free (rgn_bb_table);
3004 free (block_to_bb);
3005 free (containing_rgn);
3007 sched_finish ();
3009 if (edge_table)
3011 free (edge_table);
3012 edge_table = NULL;
3015 if (in_edges)
3017 free (in_edges);
3018 in_edges = NULL;
3020 if (out_edges)
3022 free (out_edges);
3023 out_edges = NULL;
3026 sbitmap_free (blocks);
3027 sbitmap_free (large_region_blocks);
3029 #endif