Daily bump.
[official-gcc.git] / gcc / sched-rgn.c
blobab65e2cec5e04b8c3c664e7334deb94441f2ac57
1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 02111-1307, USA. */
24 /* This pass implements list scheduling within basic blocks. It is
25 run twice: (1) after flow analysis, but before register allocation,
26 and (2) after register allocation.
28 The first run performs interblock scheduling, moving insns between
29 different blocks in the same "region", and the second runs only
30 basic block scheduling.
32 Interblock motions performed are useful motions and speculative
33 motions, including speculative loads. Motions requiring code
34 duplication are not supported. The identification of motion type
35 and the check for validity of speculative motions requires
36 construction and analysis of the function's control flow graph.
38 The main entry point for this pass is schedule_insns(), called for
39 each function. The work of the scheduler is organized in three
40 levels: (1) function level: insns are subject to splitting,
41 control-flow-graph is constructed, regions are computed (after
42 reload, each region is of one block), (2) region level: control
43 flow graph attributes required for interblock scheduling are
44 computed (dominators, reachability, etc.), data dependences and
45 priorities are computed, and (3) block level: insns in the block
46 are actually scheduled. */
48 #include "config.h"
49 #include "system.h"
50 #include "toplev.h"
51 #include "rtl.h"
52 #include "tm_p.h"
53 #include "hard-reg-set.h"
54 #include "basic-block.h"
55 #include "regs.h"
56 #include "function.h"
57 #include "flags.h"
58 #include "insn-config.h"
59 #include "insn-attr.h"
60 #include "except.h"
61 #include "toplev.h"
62 #include "recog.h"
63 #include "cfglayout.h"
64 #include "sched-int.h"
66 /* Define when we want to do count REG_DEAD notes before and after scheduling
67 for sanity checking. We can't do that when conditional execution is used,
68 as REG_DEAD exist only for unconditional deaths. */
70 #if !defined (HAVE_conditional_execution) && defined (ENABLE_CHECKING)
71 #define CHECK_DEAD_NOTES 1
72 #else
73 #define CHECK_DEAD_NOTES 0
74 #endif
77 #ifdef INSN_SCHEDULING
78 /* Some accessor macros for h_i_d members only used within this file. */
79 #define INSN_REF_COUNT(INSN) (h_i_d[INSN_UID (INSN)].ref_count)
80 #define FED_BY_SPEC_LOAD(insn) (h_i_d[INSN_UID (insn)].fed_by_spec_load)
81 #define IS_LOAD_INSN(insn) (h_i_d[INSN_UID (insn)].is_load_insn)
83 #define MAX_RGN_BLOCKS 10
84 #define MAX_RGN_INSNS 100
86 /* nr_inter/spec counts interblock/speculative motion for the function. */
87 static int nr_inter, nr_spec;
89 /* Control flow graph edges are kept in circular lists. */
90 typedef struct
92 int from_block;
93 int to_block;
94 int next_in;
95 int next_out;
97 haifa_edge;
98 static haifa_edge *edge_table;
100 #define NEXT_IN(edge) (edge_table[edge].next_in)
101 #define NEXT_OUT(edge) (edge_table[edge].next_out)
102 #define FROM_BLOCK(edge) (edge_table[edge].from_block)
103 #define TO_BLOCK(edge) (edge_table[edge].to_block)
105 /* Number of edges in the control flow graph. (In fact, larger than
106 that by 1, since edge 0 is unused.) */
107 static int nr_edges;
109 /* Circular list of incoming/outgoing edges of a block. */
110 static int *in_edges;
111 static int *out_edges;
113 #define IN_EDGES(block) (in_edges[block])
114 #define OUT_EDGES(block) (out_edges[block])
116 static int is_cfg_nonregular PARAMS ((void));
117 static int build_control_flow PARAMS ((struct edge_list *));
118 static void new_edge PARAMS ((int, int));
120 /* A region is the main entity for interblock scheduling: insns
121 are allowed to move between blocks in the same region, along
122 control flow graph edges, in the 'up' direction. */
123 typedef struct
125 int rgn_nr_blocks; /* Number of blocks in region. */
126 int rgn_blocks; /* cblocks in the region (actually index in rgn_bb_table). */
128 region;
130 /* Number of regions in the procedure. */
131 static int nr_regions;
133 /* Table of region descriptions. */
134 static region *rgn_table;
136 /* Array of lists of regions' blocks. */
137 static int *rgn_bb_table;
139 /* Topological order of blocks in the region (if b2 is reachable from
140 b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is
141 always referred to by either block or b, while its topological
142 order name (in the region) is refered to by bb. */
143 static int *block_to_bb;
145 /* The number of the region containing a block. */
146 static int *containing_rgn;
148 #define RGN_NR_BLOCKS(rgn) (rgn_table[rgn].rgn_nr_blocks)
149 #define RGN_BLOCKS(rgn) (rgn_table[rgn].rgn_blocks)
150 #define BLOCK_TO_BB(block) (block_to_bb[block])
151 #define CONTAINING_RGN(block) (containing_rgn[block])
153 void debug_regions PARAMS ((void));
154 static void find_single_block_region PARAMS ((void));
155 static void find_rgns PARAMS ((struct edge_list *, sbitmap *));
156 static int too_large PARAMS ((int, int *, int *));
158 extern void debug_live PARAMS ((int, int));
160 /* Blocks of the current region being scheduled. */
161 static int current_nr_blocks;
162 static int current_blocks;
164 /* The mapping from bb to block. */
165 #define BB_TO_BLOCK(bb) (rgn_bb_table[current_blocks + (bb)])
167 typedef struct
169 int *first_member; /* Pointer to the list start in bitlst_table. */
170 int nr_members; /* The number of members of the bit list. */
172 bitlst;
174 static int bitlst_table_last;
175 static int bitlst_table_size;
176 static int *bitlst_table;
178 static void extract_bitlst PARAMS ((sbitmap, bitlst *));
180 /* Target info declarations.
182 The block currently being scheduled is referred to as the "target" block,
183 while other blocks in the region from which insns can be moved to the
184 target are called "source" blocks. The candidate structure holds info
185 about such sources: are they valid? Speculative? Etc. */
186 typedef bitlst bblst;
187 typedef struct
189 char is_valid;
190 char is_speculative;
191 int src_prob;
192 bblst split_bbs;
193 bblst update_bbs;
195 candidate;
197 static candidate *candidate_table;
199 /* A speculative motion requires checking live information on the path
200 from 'source' to 'target'. The split blocks are those to be checked.
201 After a speculative motion, live information should be modified in
202 the 'update' blocks.
204 Lists of split and update blocks for each candidate of the current
205 target are in array bblst_table. */
206 static int *bblst_table, bblst_size, bblst_last;
208 #define IS_VALID(src) ( candidate_table[src].is_valid )
209 #define IS_SPECULATIVE(src) ( candidate_table[src].is_speculative )
210 #define SRC_PROB(src) ( candidate_table[src].src_prob )
212 /* The bb being currently scheduled. */
213 static int target_bb;
215 /* List of edges. */
216 typedef bitlst edgelst;
218 /* Target info functions. */
219 static void split_edges PARAMS ((int, int, edgelst *));
220 static void compute_trg_info PARAMS ((int));
221 void debug_candidate PARAMS ((int));
222 void debug_candidates PARAMS ((int));
224 /* Dominators array: dom[i] contains the sbitmap of dominators of
225 bb i in the region. */
226 static sbitmap *dom;
228 /* bb 0 is the only region entry. */
229 #define IS_RGN_ENTRY(bb) (!bb)
231 /* Is bb_src dominated by bb_trg. */
232 #define IS_DOMINATED(bb_src, bb_trg) \
233 ( TEST_BIT (dom[bb_src], bb_trg) )
235 /* Probability: Prob[i] is a float in [0, 1] which is the probability
236 of bb i relative to the region entry. */
237 static float *prob;
239 /* The probability of bb_src, relative to bb_trg. Note, that while the
240 'prob[bb]' is a float in [0, 1], this macro returns an integer
241 in [0, 100]. */
242 #define GET_SRC_PROB(bb_src, bb_trg) ((int) (100.0 * (prob[bb_src] / \
243 prob[bb_trg])))
245 /* Bit-set of edges, where bit i stands for edge i. */
246 typedef sbitmap edgeset;
248 /* Number of edges in the region. */
249 static int rgn_nr_edges;
251 /* Array of size rgn_nr_edges. */
252 static int *rgn_edges;
255 /* Mapping from each edge in the graph to its number in the rgn. */
256 static int *edge_to_bit;
257 #define EDGE_TO_BIT(edge) (edge_to_bit[edge])
259 /* The split edges of a source bb is different for each target
260 bb. In order to compute this efficiently, the 'potential-split edges'
261 are computed for each bb prior to scheduling a region. This is actually
262 the split edges of each bb relative to the region entry.
264 pot_split[bb] is the set of potential split edges of bb. */
265 static edgeset *pot_split;
267 /* For every bb, a set of its ancestor edges. */
268 static edgeset *ancestor_edges;
270 static void compute_dom_prob_ps PARAMS ((int));
272 #define ABS_VALUE(x) (((x)<0)?(-(x)):(x))
273 #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
274 #define IS_SPECULATIVE_INSN(INSN) (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
275 #define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
277 /* Parameters affecting the decision of rank_for_schedule().
278 ??? Nope. But MIN_PROBABILITY is used in copmute_trg_info. */
279 #define MIN_DIFF_PRIORITY 2
280 #define MIN_PROBABILITY 40
281 #define MIN_PROB_DIFF 10
283 /* Speculative scheduling functions. */
284 static int check_live_1 PARAMS ((int, rtx));
285 static void update_live_1 PARAMS ((int, rtx));
286 static int check_live PARAMS ((rtx, int));
287 static void update_live PARAMS ((rtx, int));
288 static void set_spec_fed PARAMS ((rtx));
289 static int is_pfree PARAMS ((rtx, int, int));
290 static int find_conditional_protection PARAMS ((rtx, int));
291 static int is_conditionally_protected PARAMS ((rtx, int, int));
292 static int may_trap_exp PARAMS ((rtx, int));
293 static int haifa_classify_insn PARAMS ((rtx));
294 static int is_prisky PARAMS ((rtx, int, int));
295 static int is_exception_free PARAMS ((rtx, int, int));
297 static bool sets_likely_spilled PARAMS ((rtx));
298 static void sets_likely_spilled_1 PARAMS ((rtx, rtx, void *));
299 static void add_branch_dependences PARAMS ((rtx, rtx));
300 static void compute_block_backward_dependences PARAMS ((int));
301 void debug_dependencies PARAMS ((void));
303 static void init_regions PARAMS ((void));
304 static void schedule_region PARAMS ((int));
305 static rtx concat_INSN_LIST PARAMS ((rtx, rtx));
306 static void concat_insn_mem_list PARAMS ((rtx, rtx, rtx *, rtx *));
307 static void propagate_deps PARAMS ((int, struct deps *));
308 static void free_pending_lists PARAMS ((void));
310 /* Functions for construction of the control flow graph. */
312 /* Return 1 if control flow graph should not be constructed, 0 otherwise.
314 We decide not to build the control flow graph if there is possibly more
315 than one entry to the function, if computed branches exist, of if we
316 have nonlocal gotos. */
318 static int
319 is_cfg_nonregular ()
321 int b;
322 rtx insn;
323 RTX_CODE code;
325 /* If we have a label that could be the target of a nonlocal goto, then
326 the cfg is not well structured. */
327 if (nonlocal_goto_handler_labels)
328 return 1;
330 /* If we have any forced labels, then the cfg is not well structured. */
331 if (forced_labels)
332 return 1;
334 /* If this function has a computed jump, then we consider the cfg
335 not well structured. */
336 if (current_function_has_computed_jump)
337 return 1;
339 /* If we have exception handlers, then we consider the cfg not well
340 structured. ?!? We should be able to handle this now that flow.c
341 computes an accurate cfg for EH. */
342 if (current_function_has_exception_handlers ())
343 return 1;
345 /* If we have non-jumping insns which refer to labels, then we consider
346 the cfg not well structured. */
347 /* Check for labels referred to other thn by jumps. */
348 for (b = 0; b < n_basic_blocks; b++)
349 for (insn = BLOCK_HEAD (b);; insn = NEXT_INSN (insn))
351 code = GET_CODE (insn);
352 if (GET_RTX_CLASS (code) == 'i' && code != JUMP_INSN)
354 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
356 if (note
357 && ! (GET_CODE (NEXT_INSN (insn)) == JUMP_INSN
358 && find_reg_note (NEXT_INSN (insn), REG_LABEL,
359 XEXP (note, 0))))
360 return 1;
363 if (insn == BLOCK_END (b))
364 break;
367 /* All the tests passed. Consider the cfg well structured. */
368 return 0;
371 /* Build the control flow graph and set nr_edges.
373 Instead of trying to build a cfg ourselves, we rely on flow to
374 do it for us. Stamp out useless code (and bug) duplication.
376 Return nonzero if an irregularity in the cfg is found which would
377 prevent cross block scheduling. */
379 static int
380 build_control_flow (edge_list)
381 struct edge_list *edge_list;
383 int i, unreachable, num_edges;
385 /* This already accounts for entry/exit edges. */
386 num_edges = NUM_EDGES (edge_list);
388 /* Unreachable loops with more than one basic block are detected
389 during the DFS traversal in find_rgns.
391 Unreachable loops with a single block are detected here. This
392 test is redundant with the one in find_rgns, but it's much
393 cheaper to go ahead and catch the trivial case here. */
394 unreachable = 0;
395 for (i = 0; i < n_basic_blocks; i++)
397 basic_block b = BASIC_BLOCK (i);
399 if (b->pred == NULL
400 || (b->pred->src == b
401 && b->pred->pred_next == NULL))
402 unreachable = 1;
405 /* ??? We can kill these soon. */
406 in_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
407 out_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
408 edge_table = (haifa_edge *) xcalloc (num_edges, sizeof (haifa_edge));
410 nr_edges = 0;
411 for (i = 0; i < num_edges; i++)
413 edge e = INDEX_EDGE (edge_list, i);
415 if (e->dest != EXIT_BLOCK_PTR
416 && e->src != ENTRY_BLOCK_PTR)
417 new_edge (e->src->index, e->dest->index);
420 /* Increment by 1, since edge 0 is unused. */
421 nr_edges++;
423 return unreachable;
426 /* Record an edge in the control flow graph from SOURCE to TARGET.
428 In theory, this is redundant with the s_succs computed above, but
429 we have not converted all of haifa to use information from the
430 integer lists. */
432 static void
433 new_edge (source, target)
434 int source, target;
436 int e, next_edge;
437 int curr_edge, fst_edge;
439 /* Check for duplicates. */
440 fst_edge = curr_edge = OUT_EDGES (source);
441 while (curr_edge)
443 if (FROM_BLOCK (curr_edge) == source
444 && TO_BLOCK (curr_edge) == target)
446 return;
449 curr_edge = NEXT_OUT (curr_edge);
451 if (fst_edge == curr_edge)
452 break;
455 e = ++nr_edges;
457 FROM_BLOCK (e) = source;
458 TO_BLOCK (e) = target;
460 if (OUT_EDGES (source))
462 next_edge = NEXT_OUT (OUT_EDGES (source));
463 NEXT_OUT (OUT_EDGES (source)) = e;
464 NEXT_OUT (e) = next_edge;
466 else
468 OUT_EDGES (source) = e;
469 NEXT_OUT (e) = e;
472 if (IN_EDGES (target))
474 next_edge = NEXT_IN (IN_EDGES (target));
475 NEXT_IN (IN_EDGES (target)) = e;
476 NEXT_IN (e) = next_edge;
478 else
480 IN_EDGES (target) = e;
481 NEXT_IN (e) = e;
485 /* Translate a bit-set SET to a list BL of the bit-set members. */
487 static void
488 extract_bitlst (set, bl)
489 sbitmap set;
490 bitlst *bl;
492 int i;
494 /* bblst table space is reused in each call to extract_bitlst. */
495 bitlst_table_last = 0;
497 bl->first_member = &bitlst_table[bitlst_table_last];
498 bl->nr_members = 0;
500 /* Iterate over each word in the bitset. */
501 EXECUTE_IF_SET_IN_SBITMAP (set, 0, i,
503 bitlst_table[bitlst_table_last++] = i;
504 (bl->nr_members)++;
509 /* Functions for the construction of regions. */
511 /* Print the regions, for debugging purposes. Callable from debugger. */
513 void
514 debug_regions ()
516 int rgn, bb;
518 fprintf (sched_dump, "\n;; ------------ REGIONS ----------\n\n");
519 for (rgn = 0; rgn < nr_regions; rgn++)
521 fprintf (sched_dump, ";;\trgn %d nr_blocks %d:\n", rgn,
522 rgn_table[rgn].rgn_nr_blocks);
523 fprintf (sched_dump, ";;\tbb/block: ");
525 for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
527 current_blocks = RGN_BLOCKS (rgn);
529 if (bb != BLOCK_TO_BB (BB_TO_BLOCK (bb)))
530 abort ();
532 fprintf (sched_dump, " %d/%d ", bb, BB_TO_BLOCK (bb));
535 fprintf (sched_dump, "\n\n");
539 /* Build a single block region for each basic block in the function.
540 This allows for using the same code for interblock and basic block
541 scheduling. */
543 static void
544 find_single_block_region ()
546 int i;
548 for (i = 0; i < n_basic_blocks; i++)
550 rgn_bb_table[i] = i;
551 RGN_NR_BLOCKS (i) = 1;
552 RGN_BLOCKS (i) = i;
553 CONTAINING_RGN (i) = i;
554 BLOCK_TO_BB (i) = 0;
556 nr_regions = n_basic_blocks;
559 /* Update number of blocks and the estimate for number of insns
560 in the region. Return 1 if the region is "too large" for interblock
561 scheduling (compile time considerations), otherwise return 0. */
563 static int
564 too_large (block, num_bbs, num_insns)
565 int block, *num_bbs, *num_insns;
567 (*num_bbs)++;
568 (*num_insns) += (INSN_LUID (BLOCK_END (block)) -
569 INSN_LUID (BLOCK_HEAD (block)));
570 if ((*num_bbs > MAX_RGN_BLOCKS) || (*num_insns > MAX_RGN_INSNS))
571 return 1;
572 else
573 return 0;
576 /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
577 is still an inner loop. Put in max_hdr[blk] the header of the most inner
578 loop containing blk. */
579 #define UPDATE_LOOP_RELATIONS(blk, hdr) \
581 if (max_hdr[blk] == -1) \
582 max_hdr[blk] = hdr; \
583 else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
584 RESET_BIT (inner, hdr); \
585 else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
587 RESET_BIT (inner,max_hdr[blk]); \
588 max_hdr[blk] = hdr; \
592 /* Find regions for interblock scheduling.
594 A region for scheduling can be:
596 * A loop-free procedure, or
598 * A reducible inner loop, or
600 * A basic block not contained in any other region.
602 ?!? In theory we could build other regions based on extended basic
603 blocks or reverse extended basic blocks. Is it worth the trouble?
605 Loop blocks that form a region are put into the region's block list
606 in topological order.
608 This procedure stores its results into the following global (ick) variables
610 * rgn_nr
611 * rgn_table
612 * rgn_bb_table
613 * block_to_bb
614 * containing region
616 We use dominator relationships to avoid making regions out of non-reducible
617 loops.
619 This procedure needs to be converted to work on pred/succ lists instead
620 of edge tables. That would simplify it somewhat. */
622 static void
623 find_rgns (edge_list, dom)
624 struct edge_list *edge_list;
625 sbitmap *dom;
627 int *max_hdr, *dfs_nr, *stack, *degree;
628 char no_loops = 1;
629 int node, child, loop_head, i, head, tail;
630 int count = 0, sp, idx = 0, current_edge = out_edges[0];
631 int num_bbs, num_insns, unreachable;
632 int too_large_failure;
634 /* Note if an edge has been passed. */
635 sbitmap passed;
637 /* Note if a block is a natural loop header. */
638 sbitmap header;
640 /* Note if a block is an natural inner loop header. */
641 sbitmap inner;
643 /* Note if a block is in the block queue. */
644 sbitmap in_queue;
646 /* Note if a block is in the block queue. */
647 sbitmap in_stack;
649 int num_edges = NUM_EDGES (edge_list);
651 /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
652 and a mapping from block to its loop header (if the block is contained
653 in a loop, else -1).
655 Store results in HEADER, INNER, and MAX_HDR respectively, these will
656 be used as inputs to the second traversal.
658 STACK, SP and DFS_NR are only used during the first traversal. */
660 /* Allocate and initialize variables for the first traversal. */
661 max_hdr = (int *) xmalloc (n_basic_blocks * sizeof (int));
662 dfs_nr = (int *) xcalloc (n_basic_blocks, sizeof (int));
663 stack = (int *) xmalloc (nr_edges * sizeof (int));
665 inner = sbitmap_alloc (n_basic_blocks);
666 sbitmap_ones (inner);
668 header = sbitmap_alloc (n_basic_blocks);
669 sbitmap_zero (header);
671 passed = sbitmap_alloc (nr_edges);
672 sbitmap_zero (passed);
674 in_queue = sbitmap_alloc (n_basic_blocks);
675 sbitmap_zero (in_queue);
677 in_stack = sbitmap_alloc (n_basic_blocks);
678 sbitmap_zero (in_stack);
680 for (i = 0; i < n_basic_blocks; i++)
681 max_hdr[i] = -1;
683 /* DFS traversal to find inner loops in the cfg. */
685 sp = -1;
686 while (1)
688 if (current_edge == 0 || TEST_BIT (passed, current_edge))
690 /* We have reached a leaf node or a node that was already
691 processed. Pop edges off the stack until we find
692 an edge that has not yet been processed. */
693 while (sp >= 0
694 && (current_edge == 0 || TEST_BIT (passed, current_edge)))
696 /* Pop entry off the stack. */
697 current_edge = stack[sp--];
698 node = FROM_BLOCK (current_edge);
699 child = TO_BLOCK (current_edge);
700 RESET_BIT (in_stack, child);
701 if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child]))
702 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
703 current_edge = NEXT_OUT (current_edge);
706 /* See if have finished the DFS tree traversal. */
707 if (sp < 0 && TEST_BIT (passed, current_edge))
708 break;
710 /* Nope, continue the traversal with the popped node. */
711 continue;
714 /* Process a node. */
715 node = FROM_BLOCK (current_edge);
716 child = TO_BLOCK (current_edge);
717 SET_BIT (in_stack, node);
718 dfs_nr[node] = ++count;
720 /* If the successor is in the stack, then we've found a loop.
721 Mark the loop, if it is not a natural loop, then it will
722 be rejected during the second traversal. */
723 if (TEST_BIT (in_stack, child))
725 no_loops = 0;
726 SET_BIT (header, child);
727 UPDATE_LOOP_RELATIONS (node, child);
728 SET_BIT (passed, current_edge);
729 current_edge = NEXT_OUT (current_edge);
730 continue;
733 /* If the child was already visited, then there is no need to visit
734 it again. Just update the loop relationships and restart
735 with a new edge. */
736 if (dfs_nr[child])
738 if (max_hdr[child] >= 0 && TEST_BIT (in_stack, max_hdr[child]))
739 UPDATE_LOOP_RELATIONS (node, max_hdr[child]);
740 SET_BIT (passed, current_edge);
741 current_edge = NEXT_OUT (current_edge);
742 continue;
745 /* Push an entry on the stack and continue DFS traversal. */
746 stack[++sp] = current_edge;
747 SET_BIT (passed, current_edge);
748 current_edge = OUT_EDGES (child);
750 /* This is temporary until haifa is converted to use rth's new
751 cfg routines which have true entry/exit blocks and the
752 appropriate edges from/to those blocks.
754 Generally we update dfs_nr for a node when we process its
755 out edge. However, if the node has no out edge then we will
756 not set dfs_nr for that node. This can confuse the scheduler
757 into thinking that we have unreachable blocks, which in turn
758 disables cross block scheduling.
760 So, if we have a node with no out edges, go ahead and mark it
761 as reachable now. */
762 if (current_edge == 0)
763 dfs_nr[child] = ++count;
766 /* Another check for unreachable blocks. The earlier test in
767 is_cfg_nonregular only finds unreachable blocks that do not
768 form a loop.
770 The DFS traversal will mark every block that is reachable from
771 the entry node by placing a nonzero value in dfs_nr. Thus if
772 dfs_nr is zero for any block, then it must be unreachable. */
773 unreachable = 0;
774 for (i = 0; i < n_basic_blocks; i++)
775 if (dfs_nr[i] == 0)
777 unreachable = 1;
778 break;
781 /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array
782 to hold degree counts. */
783 degree = dfs_nr;
785 for (i = 0; i < n_basic_blocks; i++)
786 degree[i] = 0;
787 for (i = 0; i < num_edges; i++)
789 edge e = INDEX_EDGE (edge_list, i);
791 if (e->dest != EXIT_BLOCK_PTR)
792 degree[e->dest->index]++;
795 /* Do not perform region scheduling if there are any unreachable
796 blocks. */
797 if (!unreachable)
799 int *queue;
801 if (no_loops)
802 SET_BIT (header, 0);
804 /* Second travsersal:find reducible inner loops and topologically sort
805 block of each region. */
807 queue = (int *) xmalloc (n_basic_blocks * sizeof (int));
809 /* Find blocks which are inner loop headers. We still have non-reducible
810 loops to consider at this point. */
811 for (i = 0; i < n_basic_blocks; i++)
813 if (TEST_BIT (header, i) && TEST_BIT (inner, i))
815 edge e;
816 int j;
818 /* Now check that the loop is reducible. We do this separate
819 from finding inner loops so that we do not find a reducible
820 loop which contains an inner non-reducible loop.
822 A simple way to find reducible/natural loops is to verify
823 that each block in the loop is dominated by the loop
824 header.
826 If there exists a block that is not dominated by the loop
827 header, then the block is reachable from outside the loop
828 and thus the loop is not a natural loop. */
829 for (j = 0; j < n_basic_blocks; j++)
831 /* First identify blocks in the loop, except for the loop
832 entry block. */
833 if (i == max_hdr[j] && i != j)
835 /* Now verify that the block is dominated by the loop
836 header. */
837 if (!TEST_BIT (dom[j], i))
838 break;
842 /* If we exited the loop early, then I is the header of
843 a non-reducible loop and we should quit processing it
844 now. */
845 if (j != n_basic_blocks)
846 continue;
848 /* I is a header of an inner loop, or block 0 in a subroutine
849 with no loops at all. */
850 head = tail = -1;
851 too_large_failure = 0;
852 loop_head = max_hdr[i];
854 /* Decrease degree of all I's successors for topological
855 ordering. */
856 for (e = BASIC_BLOCK (i)->succ; e; e = e->succ_next)
857 if (e->dest != EXIT_BLOCK_PTR)
858 --degree[e->dest->index];
860 /* Estimate # insns, and count # blocks in the region. */
861 num_bbs = 1;
862 num_insns = (INSN_LUID (BLOCK_END (i))
863 - INSN_LUID (BLOCK_HEAD (i)));
865 /* Find all loop latches (blocks with back edges to the loop
866 header) or all the leaf blocks in the cfg has no loops.
868 Place those blocks into the queue. */
869 if (no_loops)
871 for (j = 0; j < n_basic_blocks; j++)
872 /* Leaf nodes have only a single successor which must
873 be EXIT_BLOCK. */
874 if (BASIC_BLOCK (j)->succ
875 && BASIC_BLOCK (j)->succ->dest == EXIT_BLOCK_PTR
876 && BASIC_BLOCK (j)->succ->succ_next == NULL)
878 queue[++tail] = j;
879 SET_BIT (in_queue, j);
881 if (too_large (j, &num_bbs, &num_insns))
883 too_large_failure = 1;
884 break;
888 else
890 edge e;
892 for (e = BASIC_BLOCK (i)->pred; e; e = e->pred_next)
894 if (e->src == ENTRY_BLOCK_PTR)
895 continue;
897 node = e->src->index;
899 if (max_hdr[node] == loop_head && node != i)
901 /* This is a loop latch. */
902 queue[++tail] = node;
903 SET_BIT (in_queue, node);
905 if (too_large (node, &num_bbs, &num_insns))
907 too_large_failure = 1;
908 break;
914 /* Now add all the blocks in the loop to the queue.
916 We know the loop is a natural loop; however the algorithm
917 above will not always mark certain blocks as being in the
918 loop. Consider:
919 node children
920 a b,c
922 c a,d
925 The algorithm in the DFS traversal may not mark B & D as part
926 of the loop (ie they will not have max_hdr set to A).
928 We know they can not be loop latches (else they would have
929 had max_hdr set since they'd have a backedge to a dominator
930 block). So we don't need them on the initial queue.
932 We know they are part of the loop because they are dominated
933 by the loop header and can be reached by a backwards walk of
934 the edges starting with nodes on the initial queue.
936 It is safe and desirable to include those nodes in the
937 loop/scheduling region. To do so we would need to decrease
938 the degree of a node if it is the target of a backedge
939 within the loop itself as the node is placed in the queue.
941 We do not do this because I'm not sure that the actual
942 scheduling code will properly handle this case. ?!? */
944 while (head < tail && !too_large_failure)
946 edge e;
947 child = queue[++head];
949 for (e = BASIC_BLOCK (child)->pred; e; e = e->pred_next)
951 node = e->src->index;
953 /* See discussion above about nodes not marked as in
954 this loop during the initial DFS traversal. */
955 if (e->src == ENTRY_BLOCK_PTR
956 || max_hdr[node] != loop_head)
958 tail = -1;
959 break;
961 else if (!TEST_BIT (in_queue, node) && node != i)
963 queue[++tail] = node;
964 SET_BIT (in_queue, node);
966 if (too_large (node, &num_bbs, &num_insns))
968 too_large_failure = 1;
969 break;
975 if (tail >= 0 && !too_large_failure)
977 /* Place the loop header into list of region blocks. */
978 degree[i] = -1;
979 rgn_bb_table[idx] = i;
980 RGN_NR_BLOCKS (nr_regions) = num_bbs;
981 RGN_BLOCKS (nr_regions) = idx++;
982 CONTAINING_RGN (i) = nr_regions;
983 BLOCK_TO_BB (i) = count = 0;
985 /* Remove blocks from queue[] when their in degree
986 becomes zero. Repeat until no blocks are left on the
987 list. This produces a topological list of blocks in
988 the region. */
989 while (tail >= 0)
991 if (head < 0)
992 head = tail;
993 child = queue[head];
994 if (degree[child] == 0)
996 edge e;
998 degree[child] = -1;
999 rgn_bb_table[idx++] = child;
1000 BLOCK_TO_BB (child) = ++count;
1001 CONTAINING_RGN (child) = nr_regions;
1002 queue[head] = queue[tail--];
1004 for (e = BASIC_BLOCK (child)->succ;
1006 e = e->succ_next)
1007 if (e->dest != EXIT_BLOCK_PTR)
1008 --degree[e->dest->index];
1010 else
1011 --head;
1013 ++nr_regions;
1017 free (queue);
1020 /* Any block that did not end up in a region is placed into a region
1021 by itself. */
1022 for (i = 0; i < n_basic_blocks; i++)
1023 if (degree[i] >= 0)
1025 rgn_bb_table[idx] = i;
1026 RGN_NR_BLOCKS (nr_regions) = 1;
1027 RGN_BLOCKS (nr_regions) = idx++;
1028 CONTAINING_RGN (i) = nr_regions++;
1029 BLOCK_TO_BB (i) = 0;
1032 free (max_hdr);
1033 free (dfs_nr);
1034 free (stack);
1035 sbitmap_free (passed);
1036 sbitmap_free (header);
1037 sbitmap_free (inner);
1038 sbitmap_free (in_queue);
1039 sbitmap_free (in_stack);
1042 /* Functions for regions scheduling information. */
1044 /* Compute dominators, probability, and potential-split-edges of bb.
1045 Assume that these values were already computed for bb's predecessors. */
1047 static void
1048 compute_dom_prob_ps (bb)
1049 int bb;
1051 int nxt_in_edge, fst_in_edge, pred;
1052 int fst_out_edge, nxt_out_edge, nr_out_edges, nr_rgn_out_edges;
1054 prob[bb] = 0.0;
1055 if (IS_RGN_ENTRY (bb))
1057 SET_BIT (dom[bb], 0);
1058 prob[bb] = 1.0;
1059 return;
1062 fst_in_edge = nxt_in_edge = IN_EDGES (BB_TO_BLOCK (bb));
1064 /* Initialize dom[bb] to '111..1'. */
1065 sbitmap_ones (dom[bb]);
1069 pred = FROM_BLOCK (nxt_in_edge);
1070 sbitmap_a_and_b (dom[bb], dom[bb], dom[BLOCK_TO_BB (pred)]);
1071 sbitmap_a_or_b (ancestor_edges[bb], ancestor_edges[bb], ancestor_edges[BLOCK_TO_BB (pred)]);
1073 SET_BIT (ancestor_edges[bb], EDGE_TO_BIT (nxt_in_edge));
1075 nr_out_edges = 1;
1076 nr_rgn_out_edges = 0;
1077 fst_out_edge = OUT_EDGES (pred);
1078 nxt_out_edge = NEXT_OUT (fst_out_edge);
1080 sbitmap_a_or_b (pot_split[bb], pot_split[bb], pot_split[BLOCK_TO_BB (pred)]);
1082 SET_BIT (pot_split[bb], EDGE_TO_BIT (fst_out_edge));
1084 /* The successor doesn't belong in the region? */
1085 if (CONTAINING_RGN (TO_BLOCK (fst_out_edge)) !=
1086 CONTAINING_RGN (BB_TO_BLOCK (bb)))
1087 ++nr_rgn_out_edges;
1089 while (fst_out_edge != nxt_out_edge)
1091 ++nr_out_edges;
1092 /* The successor doesn't belong in the region? */
1093 if (CONTAINING_RGN (TO_BLOCK (nxt_out_edge)) !=
1094 CONTAINING_RGN (BB_TO_BLOCK (bb)))
1095 ++nr_rgn_out_edges;
1096 SET_BIT (pot_split[bb], EDGE_TO_BIT (nxt_out_edge));
1097 nxt_out_edge = NEXT_OUT (nxt_out_edge);
1101 /* Now nr_rgn_out_edges is the number of region-exit edges from
1102 pred, and nr_out_edges will be the number of pred out edges
1103 not leaving the region. */
1104 nr_out_edges -= nr_rgn_out_edges;
1105 if (nr_rgn_out_edges > 0)
1106 prob[bb] += 0.9 * prob[BLOCK_TO_BB (pred)] / nr_out_edges;
1107 else
1108 prob[bb] += prob[BLOCK_TO_BB (pred)] / nr_out_edges;
1109 nxt_in_edge = NEXT_IN (nxt_in_edge);
1111 while (fst_in_edge != nxt_in_edge);
1113 SET_BIT (dom[bb], bb);
1114 sbitmap_difference (pot_split[bb], pot_split[bb], ancestor_edges[bb]);
1116 if (sched_verbose >= 2)
1117 fprintf (sched_dump, ";; bb_prob(%d, %d) = %3d\n", bb, BB_TO_BLOCK (bb),
1118 (int) (100.0 * prob[bb]));
1121 /* Functions for target info. */
1123 /* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
1124 Note that bb_trg dominates bb_src. */
1126 static void
1127 split_edges (bb_src, bb_trg, bl)
1128 int bb_src;
1129 int bb_trg;
1130 edgelst *bl;
1132 sbitmap src = (edgeset) sbitmap_alloc (pot_split[bb_src]->n_bits);
1133 sbitmap_copy (src, pot_split[bb_src]);
1135 sbitmap_difference (src, src, pot_split[bb_trg]);
1136 extract_bitlst (src, bl);
1137 sbitmap_free (src);
1140 /* Find the valid candidate-source-blocks for the target block TRG, compute
1141 their probability, and check if they are speculative or not.
1142 For speculative sources, compute their update-blocks and split-blocks. */
1144 static void
1145 compute_trg_info (trg)
1146 int trg;
1148 candidate *sp;
1149 edgelst el;
1150 int check_block, update_idx;
1151 int i, j, k, fst_edge, nxt_edge;
1153 /* Define some of the fields for the target bb as well. */
1154 sp = candidate_table + trg;
1155 sp->is_valid = 1;
1156 sp->is_speculative = 0;
1157 sp->src_prob = 100;
1159 for (i = trg + 1; i < current_nr_blocks; i++)
1161 sp = candidate_table + i;
1163 sp->is_valid = IS_DOMINATED (i, trg);
1164 if (sp->is_valid)
1166 sp->src_prob = GET_SRC_PROB (i, trg);
1167 sp->is_valid = (sp->src_prob >= MIN_PROBABILITY);
1170 if (sp->is_valid)
1172 split_edges (i, trg, &el);
1173 sp->is_speculative = (el.nr_members) ? 1 : 0;
1174 if (sp->is_speculative && !flag_schedule_speculative)
1175 sp->is_valid = 0;
1178 if (sp->is_valid)
1180 char *update_blocks;
1182 /* Compute split blocks and store them in bblst_table.
1183 The TO block of every split edge is a split block. */
1184 sp->split_bbs.first_member = &bblst_table[bblst_last];
1185 sp->split_bbs.nr_members = el.nr_members;
1186 for (j = 0; j < el.nr_members; bblst_last++, j++)
1187 bblst_table[bblst_last] =
1188 TO_BLOCK (rgn_edges[el.first_member[j]]);
1189 sp->update_bbs.first_member = &bblst_table[bblst_last];
1191 /* Compute update blocks and store them in bblst_table.
1192 For every split edge, look at the FROM block, and check
1193 all out edges. For each out edge that is not a split edge,
1194 add the TO block to the update block list. This list can end
1195 up with a lot of duplicates. We need to weed them out to avoid
1196 overrunning the end of the bblst_table. */
1197 update_blocks = (char *) alloca (n_basic_blocks);
1198 memset (update_blocks, 0, n_basic_blocks);
1200 update_idx = 0;
1201 for (j = 0; j < el.nr_members; j++)
1203 check_block = FROM_BLOCK (rgn_edges[el.first_member[j]]);
1204 fst_edge = nxt_edge = OUT_EDGES (check_block);
1207 if (! update_blocks[TO_BLOCK (nxt_edge)])
1209 for (k = 0; k < el.nr_members; k++)
1210 if (EDGE_TO_BIT (nxt_edge) == el.first_member[k])
1211 break;
1213 if (k >= el.nr_members)
1215 bblst_table[bblst_last++] = TO_BLOCK (nxt_edge);
1216 update_blocks[TO_BLOCK (nxt_edge)] = 1;
1217 update_idx++;
1221 nxt_edge = NEXT_OUT (nxt_edge);
1223 while (fst_edge != nxt_edge);
1225 sp->update_bbs.nr_members = update_idx;
1227 /* Make sure we didn't overrun the end of bblst_table. */
1228 if (bblst_last > bblst_size)
1229 abort ();
1231 else
1233 sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0;
1235 sp->is_speculative = 0;
1236 sp->src_prob = 0;
1241 /* Print candidates info, for debugging purposes. Callable from debugger. */
1243 void
1244 debug_candidate (i)
1245 int i;
1247 if (!candidate_table[i].is_valid)
1248 return;
1250 if (candidate_table[i].is_speculative)
1252 int j;
1253 fprintf (sched_dump, "src b %d bb %d speculative \n", BB_TO_BLOCK (i), i);
1255 fprintf (sched_dump, "split path: ");
1256 for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++)
1258 int b = candidate_table[i].split_bbs.first_member[j];
1260 fprintf (sched_dump, " %d ", b);
1262 fprintf (sched_dump, "\n");
1264 fprintf (sched_dump, "update path: ");
1265 for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++)
1267 int b = candidate_table[i].update_bbs.first_member[j];
1269 fprintf (sched_dump, " %d ", b);
1271 fprintf (sched_dump, "\n");
1273 else
1275 fprintf (sched_dump, " src %d equivalent\n", BB_TO_BLOCK (i));
1279 /* Print candidates info, for debugging purposes. Callable from debugger. */
1281 void
1282 debug_candidates (trg)
1283 int trg;
1285 int i;
1287 fprintf (sched_dump, "----------- candidate table: target: b=%d bb=%d ---\n",
1288 BB_TO_BLOCK (trg), trg);
1289 for (i = trg + 1; i < current_nr_blocks; i++)
1290 debug_candidate (i);
1293 /* Functions for speculative scheduing. */
1295 /* Return 0 if x is a set of a register alive in the beginning of one
1296 of the split-blocks of src, otherwise return 1. */
1298 static int
1299 check_live_1 (src, x)
1300 int src;
1301 rtx x;
1303 int i;
1304 int regno;
1305 rtx reg = SET_DEST (x);
1307 if (reg == 0)
1308 return 1;
1310 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
1311 || GET_CODE (reg) == SIGN_EXTRACT
1312 || GET_CODE (reg) == STRICT_LOW_PART)
1313 reg = XEXP (reg, 0);
1315 if (GET_CODE (reg) == PARALLEL)
1317 int i;
1319 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1320 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1321 if (check_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0)))
1322 return 1;
1324 return 0;
1327 if (GET_CODE (reg) != REG)
1328 return 1;
1330 regno = REGNO (reg);
1332 if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno])
1334 /* Global registers are assumed live. */
1335 return 0;
1337 else
1339 if (regno < FIRST_PSEUDO_REGISTER)
1341 /* Check for hard registers. */
1342 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1343 while (--j >= 0)
1345 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1347 int b = candidate_table[src].split_bbs.first_member[i];
1349 if (REGNO_REG_SET_P (BASIC_BLOCK (b)->global_live_at_start,
1350 regno + j))
1352 return 0;
1357 else
1359 /* Check for psuedo registers. */
1360 for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++)
1362 int b = candidate_table[src].split_bbs.first_member[i];
1364 if (REGNO_REG_SET_P (BASIC_BLOCK (b)->global_live_at_start, regno))
1366 return 0;
1372 return 1;
1375 /* If x is a set of a register R, mark that R is alive in the beginning
1376 of every update-block of src. */
1378 static void
1379 update_live_1 (src, x)
1380 int src;
1381 rtx x;
1383 int i;
1384 int regno;
1385 rtx reg = SET_DEST (x);
1387 if (reg == 0)
1388 return;
1390 while (GET_CODE (reg) == SUBREG || GET_CODE (reg) == ZERO_EXTRACT
1391 || GET_CODE (reg) == SIGN_EXTRACT
1392 || GET_CODE (reg) == STRICT_LOW_PART)
1393 reg = XEXP (reg, 0);
1395 if (GET_CODE (reg) == PARALLEL)
1397 int i;
1399 for (i = XVECLEN (reg, 0) - 1; i >= 0; i--)
1400 if (XEXP (XVECEXP (reg, 0, i), 0) != 0)
1401 update_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0));
1403 return;
1406 if (GET_CODE (reg) != REG)
1407 return;
1409 /* Global registers are always live, so the code below does not apply
1410 to them. */
1412 regno = REGNO (reg);
1414 if (regno >= FIRST_PSEUDO_REGISTER || !global_regs[regno])
1416 if (regno < FIRST_PSEUDO_REGISTER)
1418 int j = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1419 while (--j >= 0)
1421 for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
1423 int b = candidate_table[src].update_bbs.first_member[i];
1425 SET_REGNO_REG_SET (BASIC_BLOCK (b)->global_live_at_start,
1426 regno + j);
1430 else
1432 for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++)
1434 int b = candidate_table[src].update_bbs.first_member[i];
1436 SET_REGNO_REG_SET (BASIC_BLOCK (b)->global_live_at_start, regno);
1442 /* Return 1 if insn can be speculatively moved from block src to trg,
1443 otherwise return 0. Called before first insertion of insn to
1444 ready-list or before the scheduling. */
1446 static int
1447 check_live (insn, src)
1448 rtx insn;
1449 int src;
1451 /* Find the registers set by instruction. */
1452 if (GET_CODE (PATTERN (insn)) == SET
1453 || GET_CODE (PATTERN (insn)) == CLOBBER)
1454 return check_live_1 (src, PATTERN (insn));
1455 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1457 int j;
1458 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1459 if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1460 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1461 && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j)))
1462 return 0;
1464 return 1;
1467 return 1;
1470 /* Update the live registers info after insn was moved speculatively from
1471 block src to trg. */
1473 static void
1474 update_live (insn, src)
1475 rtx insn;
1476 int src;
1478 /* Find the registers set by instruction. */
1479 if (GET_CODE (PATTERN (insn)) == SET
1480 || GET_CODE (PATTERN (insn)) == CLOBBER)
1481 update_live_1 (src, PATTERN (insn));
1482 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
1484 int j;
1485 for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--)
1486 if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET
1487 || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER)
1488 update_live_1 (src, XVECEXP (PATTERN (insn), 0, j));
1492 /* Exception Free Loads:
1494 We define five classes of speculative loads: IFREE, IRISKY,
1495 PFREE, PRISKY, and MFREE.
1497 IFREE loads are loads that are proved to be exception-free, just
1498 by examining the load insn. Examples for such loads are loads
1499 from TOC and loads of global data.
1501 IRISKY loads are loads that are proved to be exception-risky,
1502 just by examining the load insn. Examples for such loads are
1503 volatile loads and loads from shared memory.
1505 PFREE loads are loads for which we can prove, by examining other
1506 insns, that they are exception-free. Currently, this class consists
1507 of loads for which we are able to find a "similar load", either in
1508 the target block, or, if only one split-block exists, in that split
1509 block. Load2 is similar to load1 if both have same single base
1510 register. We identify only part of the similar loads, by finding
1511 an insn upon which both load1 and load2 have a DEF-USE dependence.
1513 PRISKY loads are loads for which we can prove, by examining other
1514 insns, that they are exception-risky. Currently we have two proofs for
1515 such loads. The first proof detects loads that are probably guarded by a
1516 test on the memory address. This proof is based on the
1517 backward and forward data dependence information for the region.
1518 Let load-insn be the examined load.
1519 Load-insn is PRISKY iff ALL the following hold:
1521 - insn1 is not in the same block as load-insn
1522 - there is a DEF-USE dependence chain (insn1, ..., load-insn)
1523 - test-insn is either a compare or a branch, not in the same block
1524 as load-insn
1525 - load-insn is reachable from test-insn
1526 - there is a DEF-USE dependence chain (insn1, ..., test-insn)
1528 This proof might fail when the compare and the load are fed
1529 by an insn not in the region. To solve this, we will add to this
1530 group all loads that have no input DEF-USE dependence.
1532 The second proof detects loads that are directly or indirectly
1533 fed by a speculative load. This proof is affected by the
1534 scheduling process. We will use the flag fed_by_spec_load.
1535 Initially, all insns have this flag reset. After a speculative
1536 motion of an insn, if insn is either a load, or marked as
1537 fed_by_spec_load, we will also mark as fed_by_spec_load every
1538 insn1 for which a DEF-USE dependence (insn, insn1) exists. A
1539 load which is fed_by_spec_load is also PRISKY.
1541 MFREE (maybe-free) loads are all the remaining loads. They may be
1542 exception-free, but we cannot prove it.
1544 Now, all loads in IFREE and PFREE classes are considered
1545 exception-free, while all loads in IRISKY and PRISKY classes are
1546 considered exception-risky. As for loads in the MFREE class,
1547 these are considered either exception-free or exception-risky,
1548 depending on whether we are pessimistic or optimistic. We have
1549 to take the pessimistic approach to assure the safety of
1550 speculative scheduling, but we can take the optimistic approach
1551 by invoking the -fsched_spec_load_dangerous option. */
1553 enum INSN_TRAP_CLASS
1555 TRAP_FREE = 0, IFREE = 1, PFREE_CANDIDATE = 2,
1556 PRISKY_CANDIDATE = 3, IRISKY = 4, TRAP_RISKY = 5
1559 #define WORST_CLASS(class1, class2) \
1560 ((class1 > class2) ? class1 : class2)
1562 /* Non-zero if block bb_to is equal to, or reachable from block bb_from. */
1563 #define IS_REACHABLE(bb_from, bb_to) \
1564 (bb_from == bb_to \
1565 || IS_RGN_ENTRY (bb_from) \
1566 || (TEST_BIT (ancestor_edges[bb_to], \
1567 EDGE_TO_BIT (IN_EDGES (BB_TO_BLOCK (bb_from))))))
1569 /* Non-zero iff the address is comprised from at most 1 register. */
1570 #define CONST_BASED_ADDRESS_P(x) \
1571 (GET_CODE (x) == REG \
1572 || ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS \
1573 || (GET_CODE (x) == LO_SUM)) \
1574 && (CONSTANT_P (XEXP (x, 0)) \
1575 || CONSTANT_P (XEXP (x, 1)))))
1577 /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
1579 static void
1580 set_spec_fed (load_insn)
1581 rtx load_insn;
1583 rtx link;
1585 for (link = INSN_DEPEND (load_insn); link; link = XEXP (link, 1))
1586 if (GET_MODE (link) == VOIDmode)
1587 FED_BY_SPEC_LOAD (XEXP (link, 0)) = 1;
1588 } /* set_spec_fed */
1590 /* On the path from the insn to load_insn_bb, find a conditional
1591 branch depending on insn, that guards the speculative load. */
1593 static int
1594 find_conditional_protection (insn, load_insn_bb)
1595 rtx insn;
1596 int load_insn_bb;
1598 rtx link;
1600 /* Iterate through DEF-USE forward dependences. */
1601 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
1603 rtx next = XEXP (link, 0);
1604 if ((CONTAINING_RGN (BLOCK_NUM (next)) ==
1605 CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb)))
1606 && IS_REACHABLE (INSN_BB (next), load_insn_bb)
1607 && load_insn_bb != INSN_BB (next)
1608 && GET_MODE (link) == VOIDmode
1609 && (GET_CODE (next) == JUMP_INSN
1610 || find_conditional_protection (next, load_insn_bb)))
1611 return 1;
1613 return 0;
1614 } /* find_conditional_protection */
1616 /* Returns 1 if the same insn1 that participates in the computation
1617 of load_insn's address is feeding a conditional branch that is
1618 guarding on load_insn. This is true if we find a the two DEF-USE
1619 chains:
1620 insn1 -> ... -> conditional-branch
1621 insn1 -> ... -> load_insn,
1622 and if a flow path exist:
1623 insn1 -> ... -> conditional-branch -> ... -> load_insn,
1624 and if insn1 is on the path
1625 region-entry -> ... -> bb_trg -> ... load_insn.
1627 Locate insn1 by climbing on LOG_LINKS from load_insn.
1628 Locate the branch by following INSN_DEPEND from insn1. */
1630 static int
1631 is_conditionally_protected (load_insn, bb_src, bb_trg)
1632 rtx load_insn;
1633 int bb_src, bb_trg;
1635 rtx link;
1637 for (link = LOG_LINKS (load_insn); link; link = XEXP (link, 1))
1639 rtx insn1 = XEXP (link, 0);
1641 /* Must be a DEF-USE dependence upon non-branch. */
1642 if (GET_MODE (link) != VOIDmode
1643 || GET_CODE (insn1) == JUMP_INSN)
1644 continue;
1646 /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
1647 if (INSN_BB (insn1) == bb_src
1648 || (CONTAINING_RGN (BLOCK_NUM (insn1))
1649 != CONTAINING_RGN (BB_TO_BLOCK (bb_src)))
1650 || (!IS_REACHABLE (bb_trg, INSN_BB (insn1))
1651 && !IS_REACHABLE (INSN_BB (insn1), bb_trg)))
1652 continue;
1654 /* Now search for the conditional-branch. */
1655 if (find_conditional_protection (insn1, bb_src))
1656 return 1;
1658 /* Recursive step: search another insn1, "above" current insn1. */
1659 return is_conditionally_protected (insn1, bb_src, bb_trg);
1662 /* The chain does not exist. */
1663 return 0;
1664 } /* is_conditionally_protected */
1666 /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence
1667 load_insn can move speculatively from bb_src to bb_trg. All the
1668 following must hold:
1670 (1) both loads have 1 base register (PFREE_CANDIDATEs).
1671 (2) load_insn and load1 have a def-use dependence upon
1672 the same insn 'insn1'.
1673 (3) either load2 is in bb_trg, or:
1674 - there's only one split-block, and
1675 - load1 is on the escape path, and
1677 From all these we can conclude that the two loads access memory
1678 addresses that differ at most by a constant, and hence if moving
1679 load_insn would cause an exception, it would have been caused by
1680 load2 anyhow. */
1682 static int
1683 is_pfree (load_insn, bb_src, bb_trg)
1684 rtx load_insn;
1685 int bb_src, bb_trg;
1687 rtx back_link;
1688 candidate *candp = candidate_table + bb_src;
1690 if (candp->split_bbs.nr_members != 1)
1691 /* Must have exactly one escape block. */
1692 return 0;
1694 for (back_link = LOG_LINKS (load_insn);
1695 back_link; back_link = XEXP (back_link, 1))
1697 rtx insn1 = XEXP (back_link, 0);
1699 if (GET_MODE (back_link) == VOIDmode)
1701 /* Found a DEF-USE dependence (insn1, load_insn). */
1702 rtx fore_link;
1704 for (fore_link = INSN_DEPEND (insn1);
1705 fore_link; fore_link = XEXP (fore_link, 1))
1707 rtx insn2 = XEXP (fore_link, 0);
1708 if (GET_MODE (fore_link) == VOIDmode)
1710 /* Found a DEF-USE dependence (insn1, insn2). */
1711 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
1712 /* insn2 not guaranteed to be a 1 base reg load. */
1713 continue;
1715 if (INSN_BB (insn2) == bb_trg)
1716 /* insn2 is the similar load, in the target block. */
1717 return 1;
1719 if (*(candp->split_bbs.first_member) == BLOCK_NUM (insn2))
1720 /* insn2 is a similar load, in a split-block. */
1721 return 1;
1727 /* Couldn't find a similar load. */
1728 return 0;
1729 } /* is_pfree */
1731 /* Returns a class that insn with GET_DEST(insn)=x may belong to,
1732 as found by analyzing insn's expression. */
1734 static int
1735 may_trap_exp (x, is_store)
1736 rtx x;
1737 int is_store;
1739 enum rtx_code code;
1741 if (x == 0)
1742 return TRAP_FREE;
1743 code = GET_CODE (x);
1744 if (is_store)
1746 if (code == MEM && may_trap_p (x))
1747 return TRAP_RISKY;
1748 else
1749 return TRAP_FREE;
1751 if (code == MEM)
1753 /* The insn uses memory: a volatile load. */
1754 if (MEM_VOLATILE_P (x))
1755 return IRISKY;
1756 /* An exception-free load. */
1757 if (!may_trap_p (x))
1758 return IFREE;
1759 /* A load with 1 base register, to be further checked. */
1760 if (CONST_BASED_ADDRESS_P (XEXP (x, 0)))
1761 return PFREE_CANDIDATE;
1762 /* No info on the load, to be further checked. */
1763 return PRISKY_CANDIDATE;
1765 else
1767 const char *fmt;
1768 int i, insn_class = TRAP_FREE;
1770 /* Neither store nor load, check if it may cause a trap. */
1771 if (may_trap_p (x))
1772 return TRAP_RISKY;
1773 /* Recursive step: walk the insn... */
1774 fmt = GET_RTX_FORMAT (code);
1775 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1777 if (fmt[i] == 'e')
1779 int tmp_class = may_trap_exp (XEXP (x, i), is_store);
1780 insn_class = WORST_CLASS (insn_class, tmp_class);
1782 else if (fmt[i] == 'E')
1784 int j;
1785 for (j = 0; j < XVECLEN (x, i); j++)
1787 int tmp_class = may_trap_exp (XVECEXP (x, i, j), is_store);
1788 insn_class = WORST_CLASS (insn_class, tmp_class);
1789 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
1790 break;
1793 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
1794 break;
1796 return insn_class;
1800 /* Classifies insn for the purpose of verifying that it can be
1801 moved speculatively, by examining it's patterns, returning:
1802 TRAP_RISKY: store, or risky non-load insn (e.g. division by variable).
1803 TRAP_FREE: non-load insn.
1804 IFREE: load from a globaly safe location.
1805 IRISKY: volatile load.
1806 PFREE_CANDIDATE, PRISKY_CANDIDATE: load that need to be checked for
1807 being either PFREE or PRISKY. */
1809 static int
1810 haifa_classify_insn (insn)
1811 rtx insn;
1813 rtx pat = PATTERN (insn);
1814 int tmp_class = TRAP_FREE;
1815 int insn_class = TRAP_FREE;
1816 enum rtx_code code;
1818 if (GET_CODE (pat) == PARALLEL)
1820 int i, len = XVECLEN (pat, 0);
1822 for (i = len - 1; i >= 0; i--)
1824 code = GET_CODE (XVECEXP (pat, 0, i));
1825 switch (code)
1827 case CLOBBER:
1828 /* Test if it is a 'store'. */
1829 tmp_class = may_trap_exp (XEXP (XVECEXP (pat, 0, i), 0), 1);
1830 break;
1831 case SET:
1832 /* Test if it is a store. */
1833 tmp_class = may_trap_exp (SET_DEST (XVECEXP (pat, 0, i)), 1);
1834 if (tmp_class == TRAP_RISKY)
1835 break;
1836 /* Test if it is a load. */
1837 tmp_class
1838 = WORST_CLASS (tmp_class,
1839 may_trap_exp (SET_SRC (XVECEXP (pat, 0, i)),
1840 0));
1841 break;
1842 case COND_EXEC:
1843 case TRAP_IF:
1844 tmp_class = TRAP_RISKY;
1845 break;
1846 default:
1849 insn_class = WORST_CLASS (insn_class, tmp_class);
1850 if (insn_class == TRAP_RISKY || insn_class == IRISKY)
1851 break;
1854 else
1856 code = GET_CODE (pat);
1857 switch (code)
1859 case CLOBBER:
1860 /* Test if it is a 'store'. */
1861 tmp_class = may_trap_exp (XEXP (pat, 0), 1);
1862 break;
1863 case SET:
1864 /* Test if it is a store. */
1865 tmp_class = may_trap_exp (SET_DEST (pat), 1);
1866 if (tmp_class == TRAP_RISKY)
1867 break;
1868 /* Test if it is a load. */
1869 tmp_class =
1870 WORST_CLASS (tmp_class,
1871 may_trap_exp (SET_SRC (pat), 0));
1872 break;
1873 case COND_EXEC:
1874 case TRAP_IF:
1875 tmp_class = TRAP_RISKY;
1876 break;
1877 default:;
1879 insn_class = tmp_class;
1882 return insn_class;
1885 /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
1886 a load moved speculatively, or if load_insn is protected by
1887 a compare on load_insn's address). */
1889 static int
1890 is_prisky (load_insn, bb_src, bb_trg)
1891 rtx load_insn;
1892 int bb_src, bb_trg;
1894 if (FED_BY_SPEC_LOAD (load_insn))
1895 return 1;
1897 if (LOG_LINKS (load_insn) == NULL)
1898 /* Dependence may 'hide' out of the region. */
1899 return 1;
1901 if (is_conditionally_protected (load_insn, bb_src, bb_trg))
1902 return 1;
1904 return 0;
1907 /* Insn is a candidate to be moved speculatively from bb_src to bb_trg.
1908 Return 1 if insn is exception-free (and the motion is valid)
1909 and 0 otherwise. */
1911 static int
1912 is_exception_free (insn, bb_src, bb_trg)
1913 rtx insn;
1914 int bb_src, bb_trg;
1916 int insn_class = haifa_classify_insn (insn);
1918 /* Handle non-load insns. */
1919 switch (insn_class)
1921 case TRAP_FREE:
1922 return 1;
1923 case TRAP_RISKY:
1924 return 0;
1925 default:;
1928 /* Handle loads. */
1929 if (!flag_schedule_speculative_load)
1930 return 0;
1931 IS_LOAD_INSN (insn) = 1;
1932 switch (insn_class)
1934 case IFREE:
1935 return (1);
1936 case IRISKY:
1937 return 0;
1938 case PFREE_CANDIDATE:
1939 if (is_pfree (insn, bb_src, bb_trg))
1940 return 1;
1941 /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */
1942 case PRISKY_CANDIDATE:
1943 if (!flag_schedule_speculative_load_dangerous
1944 || is_prisky (insn, bb_src, bb_trg))
1945 return 0;
1946 break;
1947 default:;
1950 return flag_schedule_speculative_load_dangerous;
1953 /* The number of insns from the current block scheduled so far. */
1954 static int sched_target_n_insns;
1955 /* The number of insns from the current block to be scheduled in total. */
1956 static int target_n_insns;
1957 /* The number of insns from the entire region scheduled so far. */
1958 static int sched_n_insns;
1959 /* Nonzero if the last scheduled insn was a jump. */
1960 static int last_was_jump;
1962 /* Implementations of the sched_info functions for region scheduling. */
1963 static void init_ready_list PARAMS ((struct ready_list *));
1964 static int can_schedule_ready_p PARAMS ((rtx));
1965 static int new_ready PARAMS ((rtx));
1966 static int schedule_more_p PARAMS ((void));
1967 static const char *rgn_print_insn PARAMS ((rtx, int));
1968 static int rgn_rank PARAMS ((rtx, rtx));
1969 static int contributes_to_priority PARAMS ((rtx, rtx));
1970 static void compute_jump_reg_dependencies PARAMS ((rtx, regset));
1972 /* Return nonzero if there are more insns that should be scheduled. */
1974 static int
1975 schedule_more_p ()
1977 return ! last_was_jump && sched_target_n_insns < target_n_insns;
1980 /* Add all insns that are initially ready to the ready list READY. Called
1981 once before scheduling a set of insns. */
1983 static void
1984 init_ready_list (ready)
1985 struct ready_list *ready;
1987 rtx prev_head = current_sched_info->prev_head;
1988 rtx next_tail = current_sched_info->next_tail;
1989 int bb_src;
1990 rtx insn;
1992 target_n_insns = 0;
1993 sched_target_n_insns = 0;
1994 sched_n_insns = 0;
1995 last_was_jump = 0;
1997 /* Print debugging information. */
1998 if (sched_verbose >= 5)
1999 debug_dependencies ();
2001 /* Prepare current target block info. */
2002 if (current_nr_blocks > 1)
2004 candidate_table = (candidate *) xmalloc (current_nr_blocks
2005 * sizeof (candidate));
2007 bblst_last = 0;
2008 /* bblst_table holds split blocks and update blocks for each block after
2009 the current one in the region. split blocks and update blocks are
2010 the TO blocks of region edges, so there can be at most rgn_nr_edges
2011 of them. */
2012 bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges;
2013 bblst_table = (int *) xmalloc (bblst_size * sizeof (int));
2015 bitlst_table_last = 0;
2016 bitlst_table_size = rgn_nr_edges;
2017 bitlst_table = (int *) xmalloc (rgn_nr_edges * sizeof (int));
2019 compute_trg_info (target_bb);
2022 /* Initialize ready list with all 'ready' insns in target block.
2023 Count number of insns in the target block being scheduled. */
2024 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
2026 rtx next;
2028 if (! INSN_P (insn))
2029 continue;
2030 next = NEXT_INSN (insn);
2032 if (INSN_DEP_COUNT (insn) == 0
2033 && (SCHED_GROUP_P (next) == 0 || ! INSN_P (next)))
2034 ready_add (ready, insn);
2035 if (!(SCHED_GROUP_P (insn)))
2036 target_n_insns++;
2039 /* Add to ready list all 'ready' insns in valid source blocks.
2040 For speculative insns, check-live, exception-free, and
2041 issue-delay. */
2042 for (bb_src = target_bb + 1; bb_src < current_nr_blocks; bb_src++)
2043 if (IS_VALID (bb_src))
2045 rtx src_head;
2046 rtx src_next_tail;
2047 rtx tail, head;
2049 get_block_head_tail (BB_TO_BLOCK (bb_src), &head, &tail);
2050 src_next_tail = NEXT_INSN (tail);
2051 src_head = head;
2053 for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn))
2055 if (! INSN_P (insn))
2056 continue;
2058 if (!CANT_MOVE (insn)
2059 && (!IS_SPECULATIVE_INSN (insn)
2060 || (insn_issue_delay (insn) <= 3
2061 && check_live (insn, bb_src)
2062 && is_exception_free (insn, bb_src, target_bb))))
2064 rtx next;
2066 /* Note that we haven't squirreled away the notes for
2067 blocks other than the current. So if this is a
2068 speculative insn, NEXT might otherwise be a note. */
2069 next = next_nonnote_insn (insn);
2070 if (INSN_DEP_COUNT (insn) == 0
2071 && (! next
2072 || SCHED_GROUP_P (next) == 0
2073 || ! INSN_P (next)))
2074 ready_add (ready, insn);
2080 /* Called after taking INSN from the ready list. Returns nonzero if this
2081 insn can be scheduled, nonzero if we should silently discard it. */
2083 static int
2084 can_schedule_ready_p (insn)
2085 rtx insn;
2087 if (GET_CODE (insn) == JUMP_INSN)
2088 last_was_jump = 1;
2090 /* An interblock motion? */
2091 if (INSN_BB (insn) != target_bb)
2093 rtx temp;
2094 basic_block b1;
2096 if (IS_SPECULATIVE_INSN (insn))
2098 if (!check_live (insn, INSN_BB (insn)))
2099 return 0;
2100 update_live (insn, INSN_BB (insn));
2102 /* For speculative load, mark insns fed by it. */
2103 if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn))
2104 set_spec_fed (insn);
2106 nr_spec++;
2108 nr_inter++;
2110 /* Find the beginning of the scheduling group. */
2111 /* ??? Ought to update basic block here, but later bits of
2112 schedule_block assumes the original insn block is
2113 still intact. */
2115 temp = insn;
2116 while (SCHED_GROUP_P (temp))
2117 temp = PREV_INSN (temp);
2119 /* Update source block boundaries. */
2120 b1 = BLOCK_FOR_INSN (temp);
2121 if (temp == b1->head && insn == b1->end)
2123 /* We moved all the insns in the basic block.
2124 Emit a note after the last insn and update the
2125 begin/end boundaries to point to the note. */
2126 rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
2127 b1->head = note;
2128 b1->end = note;
2130 else if (insn == b1->end)
2132 /* We took insns from the end of the basic block,
2133 so update the end of block boundary so that it
2134 points to the first insn we did not move. */
2135 b1->end = PREV_INSN (temp);
2137 else if (temp == b1->head)
2139 /* We took insns from the start of the basic block,
2140 so update the start of block boundary so that
2141 it points to the first insn we did not move. */
2142 b1->head = NEXT_INSN (insn);
2145 else
2147 /* In block motion. */
2148 sched_target_n_insns++;
2150 sched_n_insns++;
2152 return 1;
2155 /* Called after INSN has all its dependencies resolved. Return nonzero
2156 if it should be moved to the ready list or the queue, or zero if we
2157 should silently discard it. */
2158 static int
2159 new_ready (next)
2160 rtx next;
2162 /* For speculative insns, before inserting to ready/queue,
2163 check live, exception-free, and issue-delay. */
2164 if (INSN_BB (next) != target_bb
2165 && (!IS_VALID (INSN_BB (next))
2166 || CANT_MOVE (next)
2167 || (IS_SPECULATIVE_INSN (next)
2168 && (insn_issue_delay (next) > 3
2169 || !check_live (next, INSN_BB (next))
2170 || !is_exception_free (next, INSN_BB (next), target_bb)))))
2171 return 0;
2172 return 1;
2175 /* Return a string that contains the insn uid and optionally anything else
2176 necessary to identify this insn in an output. It's valid to use a
2177 static buffer for this. The ALIGNED parameter should cause the string
2178 to be formatted so that multiple output lines will line up nicely. */
2180 static const char *
2181 rgn_print_insn (insn, aligned)
2182 rtx insn;
2183 int aligned;
2185 static char tmp[80];
2187 if (aligned)
2188 sprintf (tmp, "b%3d: i%4d", INSN_BB (insn), INSN_UID (insn));
2189 else
2191 if (current_nr_blocks > 1 && INSN_BB (insn) != target_bb)
2192 sprintf (tmp, "%d/b%d", INSN_UID (insn), INSN_BB (insn));
2193 else
2194 sprintf (tmp, "%d", INSN_UID (insn));
2196 return tmp;
2199 /* Compare priority of two insns. Return a positive number if the second
2200 insn is to be preferred for scheduling, and a negative one if the first
2201 is to be preferred. Zero if they are equally good. */
2203 static int
2204 rgn_rank (insn1, insn2)
2205 rtx insn1, insn2;
2207 /* Some comparison make sense in interblock scheduling only. */
2208 if (INSN_BB (insn1) != INSN_BB (insn2))
2210 int spec_val, prob_val;
2212 /* Prefer an inblock motion on an interblock motion. */
2213 if ((INSN_BB (insn2) == target_bb) && (INSN_BB (insn1) != target_bb))
2214 return 1;
2215 if ((INSN_BB (insn1) == target_bb) && (INSN_BB (insn2) != target_bb))
2216 return -1;
2218 /* Prefer a useful motion on a speculative one. */
2219 spec_val = IS_SPECULATIVE_INSN (insn1) - IS_SPECULATIVE_INSN (insn2);
2220 if (spec_val)
2221 return spec_val;
2223 /* Prefer a more probable (speculative) insn. */
2224 prob_val = INSN_PROBABILITY (insn2) - INSN_PROBABILITY (insn1);
2225 if (prob_val)
2226 return prob_val;
2228 return 0;
2231 /* NEXT is an instruction that depends on INSN (a backward dependence);
2232 return nonzero if we should include this dependence in priority
2233 calculations. */
2235 static int
2236 contributes_to_priority (next, insn)
2237 rtx next, insn;
2239 return BLOCK_NUM (next) == BLOCK_NUM (insn);
2242 /* INSN is a JUMP_INSN. Store the set of registers that must be considered
2243 to be set by this jump in SET. */
2245 static void
2246 compute_jump_reg_dependencies (insn, set)
2247 rtx insn ATTRIBUTE_UNUSED;
2248 regset set ATTRIBUTE_UNUSED;
2250 /* Nothing to do here, since we postprocess jumps in
2251 add_branch_dependences. */
2254 /* Used in schedule_insns to initialize current_sched_info for scheduling
2255 regions (or single basic blocks). */
2257 static struct sched_info region_sched_info =
2259 init_ready_list,
2260 can_schedule_ready_p,
2261 schedule_more_p,
2262 new_ready,
2263 rgn_rank,
2264 rgn_print_insn,
2265 contributes_to_priority,
2266 compute_jump_reg_dependencies,
2268 NULL, NULL,
2269 NULL, NULL,
2270 0, 0
2273 /* Determine if PAT sets a CLASS_LIKELY_SPILLED_P register. */
2275 static bool
2276 sets_likely_spilled (pat)
2277 rtx pat;
2279 bool ret = false;
2280 note_stores (pat, sets_likely_spilled_1, &ret);
2281 return ret;
2284 static void
2285 sets_likely_spilled_1 (x, pat, data)
2286 rtx x, pat;
2287 void *data;
2289 bool *ret = (bool *) data;
2291 if (GET_CODE (pat) == SET
2292 && REG_P (x)
2293 && REGNO (x) < FIRST_PSEUDO_REGISTER
2294 && CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (REGNO (x))))
2295 *ret = true;
2298 /* Add dependences so that branches are scheduled to run last in their
2299 block. */
2301 static void
2302 add_branch_dependences (head, tail)
2303 rtx head, tail;
2305 rtx insn, last;
2307 /* For all branches, calls, uses, clobbers, cc0 setters, and instructions
2308 that can throw exceptions, force them to remain in order at the end of
2309 the block by adding dependencies and giving the last a high priority.
2310 There may be notes present, and prev_head may also be a note.
2312 Branches must obviously remain at the end. Calls should remain at the
2313 end since moving them results in worse register allocation. Uses remain
2314 at the end to ensure proper register allocation.
2316 cc0 setters remaim at the end because they can't be moved away from
2317 their cc0 user.
2319 Insns setting CLASS_LIKELY_SPILLED_P registers (usually return values)
2320 are not moved before reload because we can wind up with register
2321 allocation failures. */
2323 insn = tail;
2324 last = 0;
2325 while (GET_CODE (insn) == CALL_INSN
2326 || GET_CODE (insn) == JUMP_INSN
2327 || (GET_CODE (insn) == INSN
2328 && (GET_CODE (PATTERN (insn)) == USE
2329 || GET_CODE (PATTERN (insn)) == CLOBBER
2330 || can_throw_internal (insn)
2331 #ifdef HAVE_cc0
2332 || sets_cc0_p (PATTERN (insn))
2333 #endif
2334 || (!reload_completed
2335 && sets_likely_spilled (PATTERN (insn)))))
2336 || GET_CODE (insn) == NOTE)
2338 if (GET_CODE (insn) != NOTE)
2340 if (last != 0 && !find_insn_list (insn, LOG_LINKS (last)))
2342 add_dependence (last, insn, REG_DEP_ANTI);
2343 INSN_REF_COUNT (insn)++;
2346 CANT_MOVE (insn) = 1;
2348 last = insn;
2349 /* Skip over insns that are part of a group.
2350 Make each insn explicitly depend on the previous insn.
2351 This ensures that only the group header will ever enter
2352 the ready queue (and, when scheduled, will automatically
2353 schedule the SCHED_GROUP_P block). */
2354 while (SCHED_GROUP_P (insn))
2356 rtx temp = prev_nonnote_insn (insn);
2357 add_dependence (insn, temp, REG_DEP_ANTI);
2358 insn = temp;
2362 /* Don't overrun the bounds of the basic block. */
2363 if (insn == head)
2364 break;
2366 insn = PREV_INSN (insn);
2369 /* Make sure these insns are scheduled last in their block. */
2370 insn = last;
2371 if (insn != 0)
2372 while (insn != head)
2374 insn = prev_nonnote_insn (insn);
2376 if (INSN_REF_COUNT (insn) != 0)
2377 continue;
2379 add_dependence (last, insn, REG_DEP_ANTI);
2380 INSN_REF_COUNT (insn) = 1;
2382 /* Skip over insns that are part of a group. */
2383 while (SCHED_GROUP_P (insn))
2384 insn = prev_nonnote_insn (insn);
2388 /* Data structures for the computation of data dependences in a regions. We
2389 keep one `deps' structure for every basic block. Before analyzing the
2390 data dependences for a bb, its variables are initialized as a function of
2391 the variables of its predecessors. When the analysis for a bb completes,
2392 we save the contents to the corresponding bb_deps[bb] variable. */
2394 static struct deps *bb_deps;
2396 /* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */
2398 static rtx
2399 concat_INSN_LIST (copy, old)
2400 rtx copy, old;
2402 rtx new = old;
2403 for (; copy ; copy = XEXP (copy, 1))
2404 new = alloc_INSN_LIST (XEXP (copy, 0), new);
2405 return new;
2408 static void
2409 concat_insn_mem_list (copy_insns, copy_mems, old_insns_p, old_mems_p)
2410 rtx copy_insns, copy_mems;
2411 rtx *old_insns_p, *old_mems_p;
2413 rtx new_insns = *old_insns_p;
2414 rtx new_mems = *old_mems_p;
2416 while (copy_insns)
2418 new_insns = alloc_INSN_LIST (XEXP (copy_insns, 0), new_insns);
2419 new_mems = alloc_EXPR_LIST (VOIDmode, XEXP (copy_mems, 0), new_mems);
2420 copy_insns = XEXP (copy_insns, 1);
2421 copy_mems = XEXP (copy_mems, 1);
2424 *old_insns_p = new_insns;
2425 *old_mems_p = new_mems;
2428 /* After computing the dependencies for block BB, propagate the dependencies
2429 found in TMP_DEPS to the successors of the block. */
2430 static void
2431 propagate_deps (bb, pred_deps)
2432 int bb;
2433 struct deps *pred_deps;
2435 int b = BB_TO_BLOCK (bb);
2436 int e, first_edge;
2438 /* bb's structures are inherited by its successors. */
2439 first_edge = e = OUT_EDGES (b);
2440 if (e > 0)
2443 int b_succ = TO_BLOCK (e);
2444 int bb_succ = BLOCK_TO_BB (b_succ);
2445 struct deps *succ_deps = bb_deps + bb_succ;
2446 int reg;
2448 /* Only bbs "below" bb, in the same region, are interesting. */
2449 if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ)
2450 || bb_succ <= bb)
2452 e = NEXT_OUT (e);
2453 continue;
2456 /* The reg_last lists are inherited by bb_succ. */
2457 EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg,
2459 struct deps_reg *pred_rl = &pred_deps->reg_last[reg];
2460 struct deps_reg *succ_rl = &succ_deps->reg_last[reg];
2462 succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses);
2463 succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets);
2464 succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers,
2465 succ_rl->clobbers);
2466 succ_rl->uses_length += pred_rl->uses_length;
2467 succ_rl->clobbers_length += pred_rl->clobbers_length;
2469 IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use);
2471 /* Mem read/write lists are inherited by bb_succ. */
2472 concat_insn_mem_list (pred_deps->pending_read_insns,
2473 pred_deps->pending_read_mems,
2474 &succ_deps->pending_read_insns,
2475 &succ_deps->pending_read_mems);
2476 concat_insn_mem_list (pred_deps->pending_write_insns,
2477 pred_deps->pending_write_mems,
2478 &succ_deps->pending_write_insns,
2479 &succ_deps->pending_write_mems);
2481 succ_deps->last_pending_memory_flush
2482 = concat_INSN_LIST (pred_deps->last_pending_memory_flush,
2483 succ_deps->last_pending_memory_flush);
2485 succ_deps->pending_lists_length += pred_deps->pending_lists_length;
2486 succ_deps->pending_flush_length += pred_deps->pending_flush_length;
2488 /* last_function_call is inherited by bb_succ. */
2489 succ_deps->last_function_call
2490 = concat_INSN_LIST (pred_deps->last_function_call,
2491 succ_deps->last_function_call);
2493 /* sched_before_next_call is inherited by bb_succ. */
2494 succ_deps->sched_before_next_call
2495 = concat_INSN_LIST (pred_deps->sched_before_next_call,
2496 succ_deps->sched_before_next_call);
2498 e = NEXT_OUT (e);
2500 while (e != first_edge);
2502 /* These lists should point to the right place, for correct
2503 freeing later. */
2504 bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns;
2505 bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems;
2506 bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns;
2507 bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems;
2509 /* Can't allow these to be freed twice. */
2510 pred_deps->pending_read_insns = 0;
2511 pred_deps->pending_read_mems = 0;
2512 pred_deps->pending_write_insns = 0;
2513 pred_deps->pending_write_mems = 0;
2516 /* Compute backward dependences inside bb. In a multiple blocks region:
2517 (1) a bb is analyzed after its predecessors, and (2) the lists in
2518 effect at the end of bb (after analyzing for bb) are inherited by
2519 bb's successrs.
2521 Specifically for reg-reg data dependences, the block insns are
2522 scanned by sched_analyze () top-to-bottom. Two lists are
2523 maintained by sched_analyze (): reg_last[].sets for register DEFs,
2524 and reg_last[].uses for register USEs.
2526 When analysis is completed for bb, we update for its successors:
2527 ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
2528 ; - USES[succ] = Union (USES [succ], DEFS [bb])
2530 The mechanism for computing mem-mem data dependence is very
2531 similar, and the result is interblock dependences in the region. */
2533 static void
2534 compute_block_backward_dependences (bb)
2535 int bb;
2537 rtx head, tail;
2538 struct deps tmp_deps;
2540 tmp_deps = bb_deps[bb];
2542 /* Do the analysis for this block. */
2543 get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
2544 sched_analyze (&tmp_deps, head, tail);
2545 add_branch_dependences (head, tail);
2547 if (current_nr_blocks > 1)
2548 propagate_deps (bb, &tmp_deps);
2550 /* Free up the INSN_LISTs. */
2551 free_deps (&tmp_deps);
2554 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
2555 them to the unused_*_list variables, so that they can be reused. */
2557 static void
2558 free_pending_lists ()
2560 int bb;
2562 for (bb = 0; bb < current_nr_blocks; bb++)
2564 free_INSN_LIST_list (&bb_deps[bb].pending_read_insns);
2565 free_INSN_LIST_list (&bb_deps[bb].pending_write_insns);
2566 free_EXPR_LIST_list (&bb_deps[bb].pending_read_mems);
2567 free_EXPR_LIST_list (&bb_deps[bb].pending_write_mems);
2571 /* Print dependences for debugging, callable from debugger. */
2573 void
2574 debug_dependencies ()
2576 int bb;
2578 fprintf (sched_dump, ";; --------------- forward dependences: ------------ \n");
2579 for (bb = 0; bb < current_nr_blocks; bb++)
2581 if (1)
2583 rtx head, tail;
2584 rtx next_tail;
2585 rtx insn;
2587 get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
2588 next_tail = NEXT_INSN (tail);
2589 fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
2590 BB_TO_BLOCK (bb), bb);
2592 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
2593 "insn", "code", "bb", "dep", "prio", "cost", "blockage", "units");
2594 fprintf (sched_dump, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
2595 "----", "----", "--", "---", "----", "----", "--------", "-----");
2596 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
2598 rtx link;
2599 int unit, range;
2601 if (! INSN_P (insn))
2603 int n;
2604 fprintf (sched_dump, ";; %6d ", INSN_UID (insn));
2605 if (GET_CODE (insn) == NOTE)
2607 n = NOTE_LINE_NUMBER (insn);
2608 if (n < 0)
2609 fprintf (sched_dump, "%s\n", GET_NOTE_INSN_NAME (n));
2610 else
2611 fprintf (sched_dump, "line %d, file %s\n", n,
2612 NOTE_SOURCE_FILE (insn));
2614 else
2615 fprintf (sched_dump, " {%s}\n", GET_RTX_NAME (GET_CODE (insn)));
2616 continue;
2619 unit = insn_unit (insn);
2620 range = (unit < 0
2621 || function_units[unit].blockage_range_function == 0) ? 0 :
2622 function_units[unit].blockage_range_function (insn);
2623 fprintf (sched_dump,
2624 ";; %s%5d%6d%6d%6d%6d%6d %3d -%3d ",
2625 (SCHED_GROUP_P (insn) ? "+" : " "),
2626 INSN_UID (insn),
2627 INSN_CODE (insn),
2628 INSN_BB (insn),
2629 INSN_DEP_COUNT (insn),
2630 INSN_PRIORITY (insn),
2631 insn_cost (insn, 0, 0),
2632 (int) MIN_BLOCKAGE_COST (range),
2633 (int) MAX_BLOCKAGE_COST (range));
2634 insn_print_units (insn);
2635 fprintf (sched_dump, "\t: ");
2636 for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
2637 fprintf (sched_dump, "%d ", INSN_UID (XEXP (link, 0)));
2638 fprintf (sched_dump, "\n");
2642 fprintf (sched_dump, "\n");
2645 /* Schedule a region. A region is either an inner loop, a loop-free
2646 subroutine, or a single basic block. Each bb in the region is
2647 scheduled after its flow predecessors. */
2649 static void
2650 schedule_region (rgn)
2651 int rgn;
2653 int bb;
2654 int rgn_n_insns = 0;
2655 int sched_rgn_n_insns = 0;
2657 /* Set variables for the current region. */
2658 current_nr_blocks = RGN_NR_BLOCKS (rgn);
2659 current_blocks = RGN_BLOCKS (rgn);
2661 init_deps_global ();
2663 /* Initializations for region data dependence analyisis. */
2664 bb_deps = (struct deps *) xmalloc (sizeof (struct deps) * current_nr_blocks);
2665 for (bb = 0; bb < current_nr_blocks; bb++)
2666 init_deps (bb_deps + bb);
2668 /* Compute LOG_LINKS. */
2669 for (bb = 0; bb < current_nr_blocks; bb++)
2670 compute_block_backward_dependences (bb);
2672 /* Compute INSN_DEPEND. */
2673 for (bb = current_nr_blocks - 1; bb >= 0; bb--)
2675 rtx head, tail;
2676 get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
2678 compute_forward_dependences (head, tail);
2681 /* Set priorities. */
2682 for (bb = 0; bb < current_nr_blocks; bb++)
2684 rtx head, tail;
2685 get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
2687 rgn_n_insns += set_priorities (head, tail);
2690 /* Compute interblock info: probabilities, split-edges, dominators, etc. */
2691 if (current_nr_blocks > 1)
2693 int i;
2695 prob = (float *) xmalloc ((current_nr_blocks) * sizeof (float));
2697 dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks);
2698 sbitmap_vector_zero (dom, current_nr_blocks);
2699 /* Edge to bit. */
2700 rgn_nr_edges = 0;
2701 edge_to_bit = (int *) xmalloc (nr_edges * sizeof (int));
2702 for (i = 1; i < nr_edges; i++)
2703 if (CONTAINING_RGN (FROM_BLOCK (i)) == rgn)
2704 EDGE_TO_BIT (i) = rgn_nr_edges++;
2705 rgn_edges = (int *) xmalloc (rgn_nr_edges * sizeof (int));
2707 rgn_nr_edges = 0;
2708 for (i = 1; i < nr_edges; i++)
2709 if (CONTAINING_RGN (FROM_BLOCK (i)) == (rgn))
2710 rgn_edges[rgn_nr_edges++] = i;
2712 /* Split edges. */
2713 pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
2714 sbitmap_vector_zero (pot_split, current_nr_blocks);
2715 ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges);
2716 sbitmap_vector_zero (ancestor_edges, current_nr_blocks);
2718 /* Compute probabilities, dominators, split_edges. */
2719 for (bb = 0; bb < current_nr_blocks; bb++)
2720 compute_dom_prob_ps (bb);
2723 /* Now we can schedule all blocks. */
2724 for (bb = 0; bb < current_nr_blocks; bb++)
2726 rtx head, tail;
2727 int b = BB_TO_BLOCK (bb);
2729 get_block_head_tail (b, &head, &tail);
2731 if (no_real_insns_p (head, tail))
2732 continue;
2734 current_sched_info->prev_head = PREV_INSN (head);
2735 current_sched_info->next_tail = NEXT_INSN (tail);
2737 if (write_symbols != NO_DEBUG)
2739 save_line_notes (b, head, tail);
2740 rm_line_notes (head, tail);
2743 /* rm_other_notes only removes notes which are _inside_ the
2744 block---that is, it won't remove notes before the first real insn
2745 or after the last real insn of the block. So if the first insn
2746 has a REG_SAVE_NOTE which would otherwise be emitted before the
2747 insn, it is redundant with the note before the start of the
2748 block, and so we have to take it out. */
2749 if (INSN_P (head))
2751 rtx note;
2753 for (note = REG_NOTES (head); note; note = XEXP (note, 1))
2754 if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
2756 remove_note (head, note);
2757 note = XEXP (note, 1);
2758 remove_note (head, note);
2762 /* Remove remaining note insns from the block, save them in
2763 note_list. These notes are restored at the end of
2764 schedule_block (). */
2765 rm_other_notes (head, tail);
2767 target_bb = bb;
2769 current_sched_info->queue_must_finish_empty
2770 = current_nr_blocks > 1 && !flag_schedule_interblock;
2772 schedule_block (b, rgn_n_insns);
2773 sched_rgn_n_insns += sched_n_insns;
2775 /* Update target block boundaries. */
2776 if (head == BLOCK_HEAD (b))
2777 BLOCK_HEAD (b) = current_sched_info->head;
2778 if (tail == BLOCK_END (b))
2779 BLOCK_END (b) = current_sched_info->tail;
2781 /* Clean up. */
2782 if (current_nr_blocks > 1)
2784 free (candidate_table);
2785 free (bblst_table);
2786 free (bitlst_table);
2790 /* Sanity check: verify that all region insns were scheduled. */
2791 if (sched_rgn_n_insns != rgn_n_insns)
2792 abort ();
2794 /* Restore line notes. */
2795 if (write_symbols != NO_DEBUG)
2797 for (bb = 0; bb < current_nr_blocks; bb++)
2799 rtx head, tail;
2800 get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
2801 restore_line_notes (head, tail);
2805 /* Done with this region. */
2806 free_pending_lists ();
2808 finish_deps_global ();
2810 free (bb_deps);
2812 if (current_nr_blocks > 1)
2814 free (prob);
2815 sbitmap_vector_free (dom);
2816 sbitmap_vector_free (pot_split);
2817 sbitmap_vector_free (ancestor_edges);
2818 free (edge_to_bit);
2819 free (rgn_edges);
2823 /* Indexed by region, holds the number of death notes found in that region.
2824 Used for consistency checks. */
2825 static int *deaths_in_region;
2827 /* Initialize data structures for region scheduling. */
2829 static void
2830 init_regions ()
2832 sbitmap blocks;
2833 int rgn;
2835 nr_regions = 0;
2836 rgn_table = (region *) xmalloc ((n_basic_blocks) * sizeof (region));
2837 rgn_bb_table = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
2838 block_to_bb = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
2839 containing_rgn = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
2841 /* Compute regions for scheduling. */
2842 if (reload_completed
2843 || n_basic_blocks == 1
2844 || !flag_schedule_interblock)
2846 find_single_block_region ();
2848 else
2850 /* Verify that a 'good' control flow graph can be built. */
2851 if (is_cfg_nonregular ())
2853 find_single_block_region ();
2855 else
2857 sbitmap *dom;
2858 struct edge_list *edge_list;
2860 dom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
2862 /* The scheduler runs after flow; therefore, we can't blindly call
2863 back into find_basic_blocks since doing so could invalidate the
2864 info in global_live_at_start.
2866 Consider a block consisting entirely of dead stores; after life
2867 analysis it would be a block of NOTE_INSN_DELETED notes. If
2868 we call find_basic_blocks again, then the block would be removed
2869 entirely and invalidate our the register live information.
2871 We could (should?) recompute register live information. Doing
2872 so may even be beneficial. */
2873 edge_list = create_edge_list ();
2875 /* Compute the dominators and post dominators. */
2876 calculate_dominance_info (NULL, dom, CDI_DOMINATORS);
2878 /* build_control_flow will return nonzero if it detects unreachable
2879 blocks or any other irregularity with the cfg which prevents
2880 cross block scheduling. */
2881 if (build_control_flow (edge_list) != 0)
2882 find_single_block_region ();
2883 else
2884 find_rgns (edge_list, dom);
2886 if (sched_verbose >= 3)
2887 debug_regions ();
2889 /* We are done with flow's edge list. */
2890 free_edge_list (edge_list);
2892 /* For now. This will move as more and more of haifa is converted
2893 to using the cfg code in flow.c. */
2894 free (dom);
2899 if (CHECK_DEAD_NOTES)
2901 blocks = sbitmap_alloc (n_basic_blocks);
2902 deaths_in_region = (int *) xmalloc (sizeof (int) * nr_regions);
2903 /* Remove all death notes from the subroutine. */
2904 for (rgn = 0; rgn < nr_regions; rgn++)
2906 int b;
2908 sbitmap_zero (blocks);
2909 for (b = RGN_NR_BLOCKS (rgn) - 1; b >= 0; --b)
2910 SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn) + b]);
2912 deaths_in_region[rgn] = count_or_remove_death_notes (blocks, 1);
2914 sbitmap_free (blocks);
2916 else
2917 count_or_remove_death_notes (NULL, 1);
2920 /* The one entry point in this file. DUMP_FILE is the dump file for
2921 this pass. */
2923 void
2924 schedule_insns (dump_file)
2925 FILE *dump_file;
2927 sbitmap large_region_blocks, blocks;
2928 int rgn;
2929 int any_large_regions;
2931 /* Taking care of this degenerate case makes the rest of
2932 this code simpler. */
2933 if (n_basic_blocks == 0)
2934 return;
2936 scope_to_insns_initialize ();
2938 nr_inter = 0;
2939 nr_spec = 0;
2941 sched_init (dump_file);
2943 init_regions ();
2945 current_sched_info = &region_sched_info;
2947 /* Schedule every region in the subroutine. */
2948 for (rgn = 0; rgn < nr_regions; rgn++)
2949 schedule_region (rgn);
2951 /* Update life analysis for the subroutine. Do single block regions
2952 first so that we can verify that live_at_start didn't change. Then
2953 do all other blocks. */
2954 /* ??? There is an outside possibility that update_life_info, or more
2955 to the point propagate_block, could get called with non-zero flags
2956 more than once for one basic block. This would be kinda bad if it
2957 were to happen, since REG_INFO would be accumulated twice for the
2958 block, and we'd have twice the REG_DEAD notes.
2960 I'm fairly certain that this _shouldn't_ happen, since I don't think
2961 that live_at_start should change at region heads. Not sure what the
2962 best way to test for this kind of thing... */
2964 allocate_reg_life_data ();
2965 compute_bb_for_insn (get_max_uid ());
2967 any_large_regions = 0;
2968 large_region_blocks = sbitmap_alloc (n_basic_blocks);
2969 sbitmap_ones (large_region_blocks);
2971 blocks = sbitmap_alloc (n_basic_blocks);
2972 sbitmap_zero (blocks);
2974 /* Update life information. For regions consisting of multiple blocks
2975 we've possibly done interblock scheduling that affects global liveness.
2976 For regions consisting of single blocks we need to do only local
2977 liveness. */
2978 for (rgn = 0; rgn < nr_regions; rgn++)
2979 if (RGN_NR_BLOCKS (rgn) > 1)
2980 any_large_regions = 1;
2981 else
2983 SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
2984 RESET_BIT (large_region_blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
2987 /* Don't update reg info after reload, since that affects
2988 regs_ever_live, which should not change after reload. */
2989 update_life_info (blocks, UPDATE_LIFE_LOCAL,
2990 (reload_completed ? PROP_DEATH_NOTES
2991 : PROP_DEATH_NOTES | PROP_REG_INFO));
2992 if (any_large_regions)
2994 update_life_info (large_region_blocks, UPDATE_LIFE_GLOBAL,
2995 PROP_DEATH_NOTES | PROP_REG_INFO);
2998 if (CHECK_DEAD_NOTES)
3000 /* Verify the counts of basic block notes in single the basic block
3001 regions. */
3002 for (rgn = 0; rgn < nr_regions; rgn++)
3003 if (RGN_NR_BLOCKS (rgn) == 1)
3005 sbitmap_zero (blocks);
3006 SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn)]);
3008 if (deaths_in_region[rgn]
3009 != count_or_remove_death_notes (blocks, 0))
3010 abort ();
3012 free (deaths_in_region);
3015 /* Reposition the prologue and epilogue notes in case we moved the
3016 prologue/epilogue insns. */
3017 if (reload_completed)
3018 reposition_prologue_and_epilogue_notes (get_insns ());
3020 /* Delete redundant line notes. */
3021 if (write_symbols != NO_DEBUG)
3022 rm_redundant_line_notes ();
3024 scope_to_insns_finalize ();
3026 if (sched_verbose)
3028 if (reload_completed == 0 && flag_schedule_interblock)
3030 fprintf (sched_dump,
3031 "\n;; Procedure interblock/speculative motions == %d/%d \n",
3032 nr_inter, nr_spec);
3034 else
3036 if (nr_inter > 0)
3037 abort ();
3039 fprintf (sched_dump, "\n\n");
3042 /* Clean up. */
3043 free (rgn_table);
3044 free (rgn_bb_table);
3045 free (block_to_bb);
3046 free (containing_rgn);
3048 sched_finish ();
3050 if (edge_table)
3052 free (edge_table);
3053 edge_table = NULL;
3056 if (in_edges)
3058 free (in_edges);
3059 in_edges = NULL;
3061 if (out_edges)
3063 free (out_edges);
3064 out_edges = NULL;
3067 sbitmap_free (blocks);
3068 sbitmap_free (large_region_blocks);
3070 #endif