1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
24 /* This pass implements list scheduling within basic blocks. It is
25 run twice: (1) after flow analysis, but before register allocation,
26 and (2) after register allocation.
28 The first run performs interblock scheduling, moving insns between
29 different blocks in the same "region", and the second runs only
30 basic block scheduling.
32 Interblock motions performed are useful motions and speculative
33 motions, including speculative loads. Motions requiring code
34 duplication are not supported. The identification of motion type
35 and the check for validity of speculative motions requires
36 construction and analysis of the function's control flow graph.
38 The main entry point for this pass is schedule_insns(), called for
39 each function. The work of the scheduler is organized in three
40 levels: (1) function level: insns are subject to splitting,
41 control-flow-graph is constructed, regions are computed (after
42 reload, each region is of one block), (2) region level: control
43 flow graph attributes required for interblock scheduling are
44 computed (dominators, reachability, etc.), data dependences and
45 priorities are computed, and (3) block level: insns in the block
46 are actually scheduled. */
50 #include "coretypes.h"
55 #include "hard-reg-set.h"
56 #include "basic-block.h"
60 #include "insn-config.h"
61 #include "insn-attr.h"
65 #include "cfglayout.h"
66 #include "sched-int.h"
69 /* Define when we want to do count REG_DEAD notes before and after scheduling
70 for sanity checking. We can't do that when conditional execution is used,
71 as REG_DEAD exist only for unconditional deaths. */
73 #if !defined (HAVE_conditional_execution) && defined (ENABLE_CHECKING)
74 #define CHECK_DEAD_NOTES 1
76 #define CHECK_DEAD_NOTES 0
80 #ifdef INSN_SCHEDULING
81 /* Some accessor macros for h_i_d members only used within this file. */
82 #define INSN_REF_COUNT(INSN) (h_i_d[INSN_UID (INSN)].ref_count)
83 #define FED_BY_SPEC_LOAD(insn) (h_i_d[INSN_UID (insn)].fed_by_spec_load)
84 #define IS_LOAD_INSN(insn) (h_i_d[INSN_UID (insn)].is_load_insn)
86 #define MAX_RGN_BLOCKS 10
87 #define MAX_RGN_INSNS 100
89 /* nr_inter/spec counts interblock/speculative motion for the function. */
90 static int nr_inter
, nr_spec
;
92 /* Control flow graph edges are kept in circular lists. */
101 static haifa_edge
*edge_table
;
103 #define NEXT_IN(edge) (edge_table[edge].next_in)
104 #define NEXT_OUT(edge) (edge_table[edge].next_out)
105 #define FROM_BLOCK(edge) (edge_table[edge].from_block)
106 #define TO_BLOCK(edge) (edge_table[edge].to_block)
108 /* Number of edges in the control flow graph. (In fact, larger than
109 that by 1, since edge 0 is unused.) */
112 /* Circular list of incoming/outgoing edges of a block. */
113 static int *in_edges
;
114 static int *out_edges
;
116 #define IN_EDGES(block) (in_edges[block])
117 #define OUT_EDGES(block) (out_edges[block])
119 static int is_cfg_nonregular (void);
120 static int build_control_flow (struct edge_list
*);
121 static void new_edge (int, int);
123 /* A region is the main entity for interblock scheduling: insns
124 are allowed to move between blocks in the same region, along
125 control flow graph edges, in the 'up' direction. */
128 int rgn_nr_blocks
; /* Number of blocks in region. */
129 int rgn_blocks
; /* cblocks in the region (actually index in rgn_bb_table). */
133 /* Number of regions in the procedure. */
134 static int nr_regions
;
136 /* Table of region descriptions. */
137 static region
*rgn_table
;
139 /* Array of lists of regions' blocks. */
140 static int *rgn_bb_table
;
142 /* Topological order of blocks in the region (if b2 is reachable from
143 b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is
144 always referred to by either block or b, while its topological
145 order name (in the region) is referred to by bb. */
146 static int *block_to_bb
;
148 /* The number of the region containing a block. */
149 static int *containing_rgn
;
151 #define RGN_NR_BLOCKS(rgn) (rgn_table[rgn].rgn_nr_blocks)
152 #define RGN_BLOCKS(rgn) (rgn_table[rgn].rgn_blocks)
153 #define BLOCK_TO_BB(block) (block_to_bb[block])
154 #define CONTAINING_RGN(block) (containing_rgn[block])
156 void debug_regions (void);
157 static void find_single_block_region (void);
158 static void find_rgns (struct edge_list
*, dominance_info
);
159 static int too_large (int, int *, int *);
161 extern void debug_live (int, int);
163 /* Blocks of the current region being scheduled. */
164 static int current_nr_blocks
;
165 static int current_blocks
;
167 /* The mapping from bb to block. */
168 #define BB_TO_BLOCK(bb) (rgn_bb_table[current_blocks + (bb)])
172 int *first_member
; /* Pointer to the list start in bitlst_table. */
173 int nr_members
; /* The number of members of the bit list. */
177 static int bitlst_table_last
;
178 static int *bitlst_table
;
180 static void extract_bitlst (sbitmap
, bitlst
*);
182 /* Target info declarations.
184 The block currently being scheduled is referred to as the "target" block,
185 while other blocks in the region from which insns can be moved to the
186 target are called "source" blocks. The candidate structure holds info
187 about such sources: are they valid? Speculative? Etc. */
188 typedef bitlst bblst
;
199 static candidate
*candidate_table
;
201 /* A speculative motion requires checking live information on the path
202 from 'source' to 'target'. The split blocks are those to be checked.
203 After a speculative motion, live information should be modified in
206 Lists of split and update blocks for each candidate of the current
207 target are in array bblst_table. */
208 static int *bblst_table
, bblst_size
, bblst_last
;
210 #define IS_VALID(src) ( candidate_table[src].is_valid )
211 #define IS_SPECULATIVE(src) ( candidate_table[src].is_speculative )
212 #define SRC_PROB(src) ( candidate_table[src].src_prob )
214 /* The bb being currently scheduled. */
215 static int target_bb
;
218 typedef bitlst edgelst
;
220 /* Target info functions. */
221 static void split_edges (int, int, edgelst
*);
222 static void compute_trg_info (int);
223 void debug_candidate (int);
224 void debug_candidates (int);
226 /* Dominators array: dom[i] contains the sbitmap of dominators of
227 bb i in the region. */
230 /* bb 0 is the only region entry. */
231 #define IS_RGN_ENTRY(bb) (!bb)
233 /* Is bb_src dominated by bb_trg. */
234 #define IS_DOMINATED(bb_src, bb_trg) \
235 ( TEST_BIT (dom[bb_src], bb_trg) )
237 /* Probability: Prob[i] is a float in [0, 1] which is the probability
238 of bb i relative to the region entry. */
241 /* The probability of bb_src, relative to bb_trg. Note, that while the
242 'prob[bb]' is a float in [0, 1], this macro returns an integer
244 #define GET_SRC_PROB(bb_src, bb_trg) ((int) (100.0 * (prob[bb_src] / \
247 /* Bit-set of edges, where bit i stands for edge i. */
248 typedef sbitmap edgeset
;
250 /* Number of edges in the region. */
251 static int rgn_nr_edges
;
253 /* Array of size rgn_nr_edges. */
254 static int *rgn_edges
;
257 /* Mapping from each edge in the graph to its number in the rgn. */
258 static int *edge_to_bit
;
259 #define EDGE_TO_BIT(edge) (edge_to_bit[edge])
261 /* The split edges of a source bb is different for each target
262 bb. In order to compute this efficiently, the 'potential-split edges'
263 are computed for each bb prior to scheduling a region. This is actually
264 the split edges of each bb relative to the region entry.
266 pot_split[bb] is the set of potential split edges of bb. */
267 static edgeset
*pot_split
;
269 /* For every bb, a set of its ancestor edges. */
270 static edgeset
*ancestor_edges
;
272 static void compute_dom_prob_ps (int);
274 #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
275 #define IS_SPECULATIVE_INSN(INSN) (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
276 #define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
278 /* Parameters affecting the decision of rank_for_schedule().
279 ??? Nope. But MIN_PROBABILITY is used in compute_trg_info. */
280 #define MIN_PROBABILITY 40
282 /* Speculative scheduling functions. */
283 static int check_live_1 (int, rtx
);
284 static void update_live_1 (int, rtx
);
285 static int check_live (rtx
, int);
286 static void update_live (rtx
, int);
287 static void set_spec_fed (rtx
);
288 static int is_pfree (rtx
, int, int);
289 static int find_conditional_protection (rtx
, int);
290 static int is_conditionally_protected (rtx
, int, int);
291 static int is_prisky (rtx
, int, int);
292 static int is_exception_free (rtx
, int, int);
294 static bool sets_likely_spilled (rtx
);
295 static void sets_likely_spilled_1 (rtx
, rtx
, void *);
296 static void add_branch_dependences (rtx
, rtx
);
297 static void compute_block_backward_dependences (int);
298 void debug_dependencies (void);
300 static void init_regions (void);
301 static void schedule_region (int);
302 static rtx
concat_INSN_LIST (rtx
, rtx
);
303 static void concat_insn_mem_list (rtx
, rtx
, rtx
*, rtx
*);
304 static void propagate_deps (int, struct deps
*);
305 static void free_pending_lists (void);
307 /* Functions for construction of the control flow graph. */
309 /* Return 1 if control flow graph should not be constructed, 0 otherwise.
311 We decide not to build the control flow graph if there is possibly more
312 than one entry to the function, if computed branches exist, of if we
313 have nonlocal gotos. */
316 is_cfg_nonregular (void)
322 /* If we have a label that could be the target of a nonlocal goto, then
323 the cfg is not well structured. */
324 if (nonlocal_goto_handler_labels
)
327 /* If we have any forced labels, then the cfg is not well structured. */
331 /* If this function has a computed jump, then we consider the cfg
332 not well structured. */
333 if (current_function_has_computed_jump
)
336 /* If we have exception handlers, then we consider the cfg not well
337 structured. ?!? We should be able to handle this now that flow.c
338 computes an accurate cfg for EH. */
339 if (current_function_has_exception_handlers ())
342 /* If we have non-jumping insns which refer to labels, then we consider
343 the cfg not well structured. */
344 /* Check for labels referred to other thn by jumps. */
346 for (insn
= b
->head
;; insn
= NEXT_INSN (insn
))
348 code
= GET_CODE (insn
);
349 if (GET_RTX_CLASS (code
) == 'i' && code
!= JUMP_INSN
)
351 rtx note
= find_reg_note (insn
, REG_LABEL
, NULL_RTX
);
354 && ! (GET_CODE (NEXT_INSN (insn
)) == JUMP_INSN
355 && find_reg_note (NEXT_INSN (insn
), REG_LABEL
,
364 /* All the tests passed. Consider the cfg well structured. */
368 /* Build the control flow graph and set nr_edges.
370 Instead of trying to build a cfg ourselves, we rely on flow to
371 do it for us. Stamp out useless code (and bug) duplication.
373 Return nonzero if an irregularity in the cfg is found which would
374 prevent cross block scheduling. */
377 build_control_flow (struct edge_list
*edge_list
)
379 int i
, unreachable
, num_edges
;
382 /* This already accounts for entry/exit edges. */
383 num_edges
= NUM_EDGES (edge_list
);
385 /* Unreachable loops with more than one basic block are detected
386 during the DFS traversal in find_rgns.
388 Unreachable loops with a single block are detected here. This
389 test is redundant with the one in find_rgns, but it's much
390 cheaper to go ahead and catch the trivial case here. */
395 || (b
->pred
->src
== b
396 && b
->pred
->pred_next
== NULL
))
400 /* ??? We can kill these soon. */
401 in_edges
= xcalloc (last_basic_block
, sizeof (int));
402 out_edges
= xcalloc (last_basic_block
, sizeof (int));
403 edge_table
= xcalloc (num_edges
, sizeof (haifa_edge
));
406 for (i
= 0; i
< num_edges
; i
++)
408 edge e
= INDEX_EDGE (edge_list
, i
);
410 if (e
->dest
!= EXIT_BLOCK_PTR
411 && e
->src
!= ENTRY_BLOCK_PTR
)
412 new_edge (e
->src
->index
, e
->dest
->index
);
415 /* Increment by 1, since edge 0 is unused. */
421 /* Record an edge in the control flow graph from SOURCE to TARGET.
423 In theory, this is redundant with the s_succs computed above, but
424 we have not converted all of haifa to use information from the
428 new_edge (int source
, int target
)
431 int curr_edge
, fst_edge
;
433 /* Check for duplicates. */
434 fst_edge
= curr_edge
= OUT_EDGES (source
);
437 if (FROM_BLOCK (curr_edge
) == source
438 && TO_BLOCK (curr_edge
) == target
)
443 curr_edge
= NEXT_OUT (curr_edge
);
445 if (fst_edge
== curr_edge
)
451 FROM_BLOCK (e
) = source
;
452 TO_BLOCK (e
) = target
;
454 if (OUT_EDGES (source
))
456 next_edge
= NEXT_OUT (OUT_EDGES (source
));
457 NEXT_OUT (OUT_EDGES (source
)) = e
;
458 NEXT_OUT (e
) = next_edge
;
462 OUT_EDGES (source
) = e
;
466 if (IN_EDGES (target
))
468 next_edge
= NEXT_IN (IN_EDGES (target
));
469 NEXT_IN (IN_EDGES (target
)) = e
;
470 NEXT_IN (e
) = next_edge
;
474 IN_EDGES (target
) = e
;
479 /* Translate a bit-set SET to a list BL of the bit-set members. */
482 extract_bitlst (sbitmap set
, bitlst
*bl
)
486 /* bblst table space is reused in each call to extract_bitlst. */
487 bitlst_table_last
= 0;
489 bl
->first_member
= &bitlst_table
[bitlst_table_last
];
492 /* Iterate over each word in the bitset. */
493 EXECUTE_IF_SET_IN_SBITMAP (set
, 0, i
,
495 bitlst_table
[bitlst_table_last
++] = i
;
501 /* Functions for the construction of regions. */
503 /* Print the regions, for debugging purposes. Callable from debugger. */
510 fprintf (sched_dump
, "\n;; ------------ REGIONS ----------\n\n");
511 for (rgn
= 0; rgn
< nr_regions
; rgn
++)
513 fprintf (sched_dump
, ";;\trgn %d nr_blocks %d:\n", rgn
,
514 rgn_table
[rgn
].rgn_nr_blocks
);
515 fprintf (sched_dump
, ";;\tbb/block: ");
517 for (bb
= 0; bb
< rgn_table
[rgn
].rgn_nr_blocks
; bb
++)
519 current_blocks
= RGN_BLOCKS (rgn
);
521 if (bb
!= BLOCK_TO_BB (BB_TO_BLOCK (bb
)))
524 fprintf (sched_dump
, " %d/%d ", bb
, BB_TO_BLOCK (bb
));
527 fprintf (sched_dump
, "\n\n");
531 /* Build a single block region for each basic block in the function.
532 This allows for using the same code for interblock and basic block
536 find_single_block_region (void)
544 rgn_bb_table
[nr_regions
] = bb
->index
;
545 RGN_NR_BLOCKS (nr_regions
) = 1;
546 RGN_BLOCKS (nr_regions
) = nr_regions
;
547 CONTAINING_RGN (bb
->index
) = nr_regions
;
548 BLOCK_TO_BB (bb
->index
) = 0;
553 /* Update number of blocks and the estimate for number of insns
554 in the region. Return 1 if the region is "too large" for interblock
555 scheduling (compile time considerations), otherwise return 0. */
558 too_large (int block
, int *num_bbs
, int *num_insns
)
561 (*num_insns
) += (INSN_LUID (BLOCK_END (block
)) -
562 INSN_LUID (BLOCK_HEAD (block
)));
563 if ((*num_bbs
> MAX_RGN_BLOCKS
) || (*num_insns
> MAX_RGN_INSNS
))
569 /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
570 is still an inner loop. Put in max_hdr[blk] the header of the most inner
571 loop containing blk. */
572 #define UPDATE_LOOP_RELATIONS(blk, hdr) \
574 if (max_hdr[blk] == -1) \
575 max_hdr[blk] = hdr; \
576 else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
577 RESET_BIT (inner, hdr); \
578 else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
580 RESET_BIT (inner,max_hdr[blk]); \
581 max_hdr[blk] = hdr; \
585 /* Find regions for interblock scheduling.
587 A region for scheduling can be:
589 * A loop-free procedure, or
591 * A reducible inner loop, or
593 * A basic block not contained in any other region.
595 ?!? In theory we could build other regions based on extended basic
596 blocks or reverse extended basic blocks. Is it worth the trouble?
598 Loop blocks that form a region are put into the region's block list
599 in topological order.
601 This procedure stores its results into the following global (ick) variables
609 We use dominator relationships to avoid making regions out of non-reducible
612 This procedure needs to be converted to work on pred/succ lists instead
613 of edge tables. That would simplify it somewhat. */
616 find_rgns (struct edge_list
*edge_list
, dominance_info dom
)
618 int *max_hdr
, *dfs_nr
, *stack
, *degree
;
620 int node
, child
, loop_head
, i
, head
, tail
;
621 int count
= 0, sp
, idx
= 0;
622 int current_edge
= out_edges
[ENTRY_BLOCK_PTR
->succ
->dest
->index
];
623 int num_bbs
, num_insns
, unreachable
;
624 int too_large_failure
;
627 /* Note if an edge has been passed. */
630 /* Note if a block is a natural loop header. */
633 /* Note if a block is a natural inner loop header. */
636 /* Note if a block is in the block queue. */
639 /* Note if a block is in the block queue. */
642 int num_edges
= NUM_EDGES (edge_list
);
644 /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
645 and a mapping from block to its loop header (if the block is contained
648 Store results in HEADER, INNER, and MAX_HDR respectively, these will
649 be used as inputs to the second traversal.
651 STACK, SP and DFS_NR are only used during the first traversal. */
653 /* Allocate and initialize variables for the first traversal. */
654 max_hdr
= xmalloc (last_basic_block
* sizeof (int));
655 dfs_nr
= xcalloc (last_basic_block
, sizeof (int));
656 stack
= xmalloc (nr_edges
* sizeof (int));
658 inner
= sbitmap_alloc (last_basic_block
);
659 sbitmap_ones (inner
);
661 header
= sbitmap_alloc (last_basic_block
);
662 sbitmap_zero (header
);
664 passed
= sbitmap_alloc (nr_edges
);
665 sbitmap_zero (passed
);
667 in_queue
= sbitmap_alloc (last_basic_block
);
668 sbitmap_zero (in_queue
);
670 in_stack
= sbitmap_alloc (last_basic_block
);
671 sbitmap_zero (in_stack
);
673 for (i
= 0; i
< last_basic_block
; i
++)
676 /* DFS traversal to find inner loops in the cfg. */
681 if (current_edge
== 0 || TEST_BIT (passed
, current_edge
))
683 /* We have reached a leaf node or a node that was already
684 processed. Pop edges off the stack until we find
685 an edge that has not yet been processed. */
687 && (current_edge
== 0 || TEST_BIT (passed
, current_edge
)))
689 /* Pop entry off the stack. */
690 current_edge
= stack
[sp
--];
691 node
= FROM_BLOCK (current_edge
);
692 child
= TO_BLOCK (current_edge
);
693 RESET_BIT (in_stack
, child
);
694 if (max_hdr
[child
] >= 0 && TEST_BIT (in_stack
, max_hdr
[child
]))
695 UPDATE_LOOP_RELATIONS (node
, max_hdr
[child
]);
696 current_edge
= NEXT_OUT (current_edge
);
699 /* See if have finished the DFS tree traversal. */
700 if (sp
< 0 && TEST_BIT (passed
, current_edge
))
703 /* Nope, continue the traversal with the popped node. */
707 /* Process a node. */
708 node
= FROM_BLOCK (current_edge
);
709 child
= TO_BLOCK (current_edge
);
710 SET_BIT (in_stack
, node
);
711 dfs_nr
[node
] = ++count
;
713 /* If the successor is in the stack, then we've found a loop.
714 Mark the loop, if it is not a natural loop, then it will
715 be rejected during the second traversal. */
716 if (TEST_BIT (in_stack
, child
))
719 SET_BIT (header
, child
);
720 UPDATE_LOOP_RELATIONS (node
, child
);
721 SET_BIT (passed
, current_edge
);
722 current_edge
= NEXT_OUT (current_edge
);
726 /* If the child was already visited, then there is no need to visit
727 it again. Just update the loop relationships and restart
731 if (max_hdr
[child
] >= 0 && TEST_BIT (in_stack
, max_hdr
[child
]))
732 UPDATE_LOOP_RELATIONS (node
, max_hdr
[child
]);
733 SET_BIT (passed
, current_edge
);
734 current_edge
= NEXT_OUT (current_edge
);
738 /* Push an entry on the stack and continue DFS traversal. */
739 stack
[++sp
] = current_edge
;
740 SET_BIT (passed
, current_edge
);
741 current_edge
= OUT_EDGES (child
);
743 /* This is temporary until haifa is converted to use rth's new
744 cfg routines which have true entry/exit blocks and the
745 appropriate edges from/to those blocks.
747 Generally we update dfs_nr for a node when we process its
748 out edge. However, if the node has no out edge then we will
749 not set dfs_nr for that node. This can confuse the scheduler
750 into thinking that we have unreachable blocks, which in turn
751 disables cross block scheduling.
753 So, if we have a node with no out edges, go ahead and mark it
755 if (current_edge
== 0)
756 dfs_nr
[child
] = ++count
;
759 /* Another check for unreachable blocks. The earlier test in
760 is_cfg_nonregular only finds unreachable blocks that do not
763 The DFS traversal will mark every block that is reachable from
764 the entry node by placing a nonzero value in dfs_nr. Thus if
765 dfs_nr is zero for any block, then it must be unreachable. */
768 if (dfs_nr
[bb
->index
] == 0)
774 /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array
775 to hold degree counts. */
779 degree
[bb
->index
] = 0;
780 for (i
= 0; i
< num_edges
; i
++)
782 edge e
= INDEX_EDGE (edge_list
, i
);
784 if (e
->dest
!= EXIT_BLOCK_PTR
)
785 degree
[e
->dest
->index
]++;
788 /* Do not perform region scheduling if there are any unreachable
797 /* Second traversal:find reducible inner loops and topologically sort
798 block of each region. */
800 queue
= xmalloc (n_basic_blocks
* sizeof (int));
802 /* Find blocks which are inner loop headers. We still have non-reducible
803 loops to consider at this point. */
806 if (TEST_BIT (header
, bb
->index
) && TEST_BIT (inner
, bb
->index
))
811 /* Now check that the loop is reducible. We do this separate
812 from finding inner loops so that we do not find a reducible
813 loop which contains an inner non-reducible loop.
815 A simple way to find reducible/natural loops is to verify
816 that each block in the loop is dominated by the loop
819 If there exists a block that is not dominated by the loop
820 header, then the block is reachable from outside the loop
821 and thus the loop is not a natural loop. */
824 /* First identify blocks in the loop, except for the loop
826 if (bb
->index
== max_hdr
[jbb
->index
] && bb
!= jbb
)
828 /* Now verify that the block is dominated by the loop
830 if (!dominated_by_p (dom
, jbb
, bb
))
835 /* If we exited the loop early, then I is the header of
836 a non-reducible loop and we should quit processing it
838 if (jbb
!= EXIT_BLOCK_PTR
)
841 /* I is a header of an inner loop, or block 0 in a subroutine
842 with no loops at all. */
844 too_large_failure
= 0;
845 loop_head
= max_hdr
[bb
->index
];
847 /* Decrease degree of all I's successors for topological
849 for (e
= bb
->succ
; e
; e
= e
->succ_next
)
850 if (e
->dest
!= EXIT_BLOCK_PTR
)
851 --degree
[e
->dest
->index
];
853 /* Estimate # insns, and count # blocks in the region. */
855 num_insns
= (INSN_LUID (bb
->end
)
856 - INSN_LUID (bb
->head
));
858 /* Find all loop latches (blocks with back edges to the loop
859 header) or all the leaf blocks in the cfg has no loops.
861 Place those blocks into the queue. */
865 /* Leaf nodes have only a single successor which must
868 && jbb
->succ
->dest
== EXIT_BLOCK_PTR
869 && jbb
->succ
->succ_next
== NULL
)
871 queue
[++tail
] = jbb
->index
;
872 SET_BIT (in_queue
, jbb
->index
);
874 if (too_large (jbb
->index
, &num_bbs
, &num_insns
))
876 too_large_failure
= 1;
885 for (e
= bb
->pred
; e
; e
= e
->pred_next
)
887 if (e
->src
== ENTRY_BLOCK_PTR
)
890 node
= e
->src
->index
;
892 if (max_hdr
[node
] == loop_head
&& node
!= bb
->index
)
894 /* This is a loop latch. */
895 queue
[++tail
] = node
;
896 SET_BIT (in_queue
, node
);
898 if (too_large (node
, &num_bbs
, &num_insns
))
900 too_large_failure
= 1;
907 /* Now add all the blocks in the loop to the queue.
909 We know the loop is a natural loop; however the algorithm
910 above will not always mark certain blocks as being in the
918 The algorithm in the DFS traversal may not mark B & D as part
919 of the loop (ie they will not have max_hdr set to A).
921 We know they can not be loop latches (else they would have
922 had max_hdr set since they'd have a backedge to a dominator
923 block). So we don't need them on the initial queue.
925 We know they are part of the loop because they are dominated
926 by the loop header and can be reached by a backwards walk of
927 the edges starting with nodes on the initial queue.
929 It is safe and desirable to include those nodes in the
930 loop/scheduling region. To do so we would need to decrease
931 the degree of a node if it is the target of a backedge
932 within the loop itself as the node is placed in the queue.
934 We do not do this because I'm not sure that the actual
935 scheduling code will properly handle this case. ?!? */
937 while (head
< tail
&& !too_large_failure
)
940 child
= queue
[++head
];
942 for (e
= BASIC_BLOCK (child
)->pred
; e
; e
= e
->pred_next
)
944 node
= e
->src
->index
;
946 /* See discussion above about nodes not marked as in
947 this loop during the initial DFS traversal. */
948 if (e
->src
== ENTRY_BLOCK_PTR
949 || max_hdr
[node
] != loop_head
)
954 else if (!TEST_BIT (in_queue
, node
) && node
!= bb
->index
)
956 queue
[++tail
] = node
;
957 SET_BIT (in_queue
, node
);
959 if (too_large (node
, &num_bbs
, &num_insns
))
961 too_large_failure
= 1;
968 if (tail
>= 0 && !too_large_failure
)
970 /* Place the loop header into list of region blocks. */
971 degree
[bb
->index
] = -1;
972 rgn_bb_table
[idx
] = bb
->index
;
973 RGN_NR_BLOCKS (nr_regions
) = num_bbs
;
974 RGN_BLOCKS (nr_regions
) = idx
++;
975 CONTAINING_RGN (bb
->index
) = nr_regions
;
976 BLOCK_TO_BB (bb
->index
) = count
= 0;
978 /* Remove blocks from queue[] when their in degree
979 becomes zero. Repeat until no blocks are left on the
980 list. This produces a topological list of blocks in
987 if (degree
[child
] == 0)
992 rgn_bb_table
[idx
++] = child
;
993 BLOCK_TO_BB (child
) = ++count
;
994 CONTAINING_RGN (child
) = nr_regions
;
995 queue
[head
] = queue
[tail
--];
997 for (e
= BASIC_BLOCK (child
)->succ
;
1000 if (e
->dest
!= EXIT_BLOCK_PTR
)
1001 --degree
[e
->dest
->index
];
1013 /* Any block that did not end up in a region is placed into a region
1016 if (degree
[bb
->index
] >= 0)
1018 rgn_bb_table
[idx
] = bb
->index
;
1019 RGN_NR_BLOCKS (nr_regions
) = 1;
1020 RGN_BLOCKS (nr_regions
) = idx
++;
1021 CONTAINING_RGN (bb
->index
) = nr_regions
++;
1022 BLOCK_TO_BB (bb
->index
) = 0;
1028 sbitmap_free (passed
);
1029 sbitmap_free (header
);
1030 sbitmap_free (inner
);
1031 sbitmap_free (in_queue
);
1032 sbitmap_free (in_stack
);
1035 /* Functions for regions scheduling information. */
1037 /* Compute dominators, probability, and potential-split-edges of bb.
1038 Assume that these values were already computed for bb's predecessors. */
1041 compute_dom_prob_ps (int bb
)
1043 int nxt_in_edge
, fst_in_edge
, pred
;
1044 int fst_out_edge
, nxt_out_edge
, nr_out_edges
, nr_rgn_out_edges
;
1047 if (IS_RGN_ENTRY (bb
))
1049 SET_BIT (dom
[bb
], 0);
1054 fst_in_edge
= nxt_in_edge
= IN_EDGES (BB_TO_BLOCK (bb
));
1056 /* Initialize dom[bb] to '111..1'. */
1057 sbitmap_ones (dom
[bb
]);
1061 pred
= FROM_BLOCK (nxt_in_edge
);
1062 sbitmap_a_and_b (dom
[bb
], dom
[bb
], dom
[BLOCK_TO_BB (pred
)]);
1063 sbitmap_a_or_b (ancestor_edges
[bb
], ancestor_edges
[bb
], ancestor_edges
[BLOCK_TO_BB (pred
)]);
1065 SET_BIT (ancestor_edges
[bb
], EDGE_TO_BIT (nxt_in_edge
));
1068 nr_rgn_out_edges
= 0;
1069 fst_out_edge
= OUT_EDGES (pred
);
1070 nxt_out_edge
= NEXT_OUT (fst_out_edge
);
1072 sbitmap_a_or_b (pot_split
[bb
], pot_split
[bb
], pot_split
[BLOCK_TO_BB (pred
)]);
1074 SET_BIT (pot_split
[bb
], EDGE_TO_BIT (fst_out_edge
));
1076 /* The successor doesn't belong in the region? */
1077 if (CONTAINING_RGN (TO_BLOCK (fst_out_edge
)) !=
1078 CONTAINING_RGN (BB_TO_BLOCK (bb
)))
1081 while (fst_out_edge
!= nxt_out_edge
)
1084 /* The successor doesn't belong in the region? */
1085 if (CONTAINING_RGN (TO_BLOCK (nxt_out_edge
)) !=
1086 CONTAINING_RGN (BB_TO_BLOCK (bb
)))
1088 SET_BIT (pot_split
[bb
], EDGE_TO_BIT (nxt_out_edge
));
1089 nxt_out_edge
= NEXT_OUT (nxt_out_edge
);
1093 /* Now nr_rgn_out_edges is the number of region-exit edges from
1094 pred, and nr_out_edges will be the number of pred out edges
1095 not leaving the region. */
1096 nr_out_edges
-= nr_rgn_out_edges
;
1097 if (nr_rgn_out_edges
> 0)
1098 prob
[bb
] += 0.9 * prob
[BLOCK_TO_BB (pred
)] / nr_out_edges
;
1100 prob
[bb
] += prob
[BLOCK_TO_BB (pred
)] / nr_out_edges
;
1101 nxt_in_edge
= NEXT_IN (nxt_in_edge
);
1103 while (fst_in_edge
!= nxt_in_edge
);
1105 SET_BIT (dom
[bb
], bb
);
1106 sbitmap_difference (pot_split
[bb
], pot_split
[bb
], ancestor_edges
[bb
]);
1108 if (sched_verbose
>= 2)
1109 fprintf (sched_dump
, ";; bb_prob(%d, %d) = %3d\n", bb
, BB_TO_BLOCK (bb
),
1110 (int) (100.0 * prob
[bb
]));
1113 /* Functions for target info. */
1115 /* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
1116 Note that bb_trg dominates bb_src. */
1119 split_edges (int bb_src
, int bb_trg
, edgelst
*bl
)
1121 sbitmap src
= sbitmap_alloc (pot_split
[bb_src
]->n_bits
);
1122 sbitmap_copy (src
, pot_split
[bb_src
]);
1124 sbitmap_difference (src
, src
, pot_split
[bb_trg
]);
1125 extract_bitlst (src
, bl
);
1129 /* Find the valid candidate-source-blocks for the target block TRG, compute
1130 their probability, and check if they are speculative or not.
1131 For speculative sources, compute their update-blocks and split-blocks. */
1134 compute_trg_info (int trg
)
1138 int check_block
, update_idx
;
1139 int i
, j
, k
, fst_edge
, nxt_edge
;
1141 /* Define some of the fields for the target bb as well. */
1142 sp
= candidate_table
+ trg
;
1144 sp
->is_speculative
= 0;
1147 for (i
= trg
+ 1; i
< current_nr_blocks
; i
++)
1149 sp
= candidate_table
+ i
;
1151 sp
->is_valid
= IS_DOMINATED (i
, trg
);
1154 sp
->src_prob
= GET_SRC_PROB (i
, trg
);
1155 sp
->is_valid
= (sp
->src_prob
>= MIN_PROBABILITY
);
1160 split_edges (i
, trg
, &el
);
1161 sp
->is_speculative
= (el
.nr_members
) ? 1 : 0;
1162 if (sp
->is_speculative
&& !flag_schedule_speculative
)
1168 char *update_blocks
;
1170 /* Compute split blocks and store them in bblst_table.
1171 The TO block of every split edge is a split block. */
1172 sp
->split_bbs
.first_member
= &bblst_table
[bblst_last
];
1173 sp
->split_bbs
.nr_members
= el
.nr_members
;
1174 for (j
= 0; j
< el
.nr_members
; bblst_last
++, j
++)
1175 bblst_table
[bblst_last
] =
1176 TO_BLOCK (rgn_edges
[el
.first_member
[j
]]);
1177 sp
->update_bbs
.first_member
= &bblst_table
[bblst_last
];
1179 /* Compute update blocks and store them in bblst_table.
1180 For every split edge, look at the FROM block, and check
1181 all out edges. For each out edge that is not a split edge,
1182 add the TO block to the update block list. This list can end
1183 up with a lot of duplicates. We need to weed them out to avoid
1184 overrunning the end of the bblst_table. */
1185 update_blocks
= alloca (last_basic_block
);
1186 memset (update_blocks
, 0, last_basic_block
);
1189 for (j
= 0; j
< el
.nr_members
; j
++)
1191 check_block
= FROM_BLOCK (rgn_edges
[el
.first_member
[j
]]);
1192 fst_edge
= nxt_edge
= OUT_EDGES (check_block
);
1195 if (! update_blocks
[TO_BLOCK (nxt_edge
)])
1197 for (k
= 0; k
< el
.nr_members
; k
++)
1198 if (EDGE_TO_BIT (nxt_edge
) == el
.first_member
[k
])
1201 if (k
>= el
.nr_members
)
1203 bblst_table
[bblst_last
++] = TO_BLOCK (nxt_edge
);
1204 update_blocks
[TO_BLOCK (nxt_edge
)] = 1;
1209 nxt_edge
= NEXT_OUT (nxt_edge
);
1211 while (fst_edge
!= nxt_edge
);
1213 sp
->update_bbs
.nr_members
= update_idx
;
1215 /* Make sure we didn't overrun the end of bblst_table. */
1216 if (bblst_last
> bblst_size
)
1221 sp
->split_bbs
.nr_members
= sp
->update_bbs
.nr_members
= 0;
1223 sp
->is_speculative
= 0;
1229 /* Print candidates info, for debugging purposes. Callable from debugger. */
1232 debug_candidate (int i
)
1234 if (!candidate_table
[i
].is_valid
)
1237 if (candidate_table
[i
].is_speculative
)
1240 fprintf (sched_dump
, "src b %d bb %d speculative \n", BB_TO_BLOCK (i
), i
);
1242 fprintf (sched_dump
, "split path: ");
1243 for (j
= 0; j
< candidate_table
[i
].split_bbs
.nr_members
; j
++)
1245 int b
= candidate_table
[i
].split_bbs
.first_member
[j
];
1247 fprintf (sched_dump
, " %d ", b
);
1249 fprintf (sched_dump
, "\n");
1251 fprintf (sched_dump
, "update path: ");
1252 for (j
= 0; j
< candidate_table
[i
].update_bbs
.nr_members
; j
++)
1254 int b
= candidate_table
[i
].update_bbs
.first_member
[j
];
1256 fprintf (sched_dump
, " %d ", b
);
1258 fprintf (sched_dump
, "\n");
1262 fprintf (sched_dump
, " src %d equivalent\n", BB_TO_BLOCK (i
));
1266 /* Print candidates info, for debugging purposes. Callable from debugger. */
1269 debug_candidates (int trg
)
1273 fprintf (sched_dump
, "----------- candidate table: target: b=%d bb=%d ---\n",
1274 BB_TO_BLOCK (trg
), trg
);
1275 for (i
= trg
+ 1; i
< current_nr_blocks
; i
++)
1276 debug_candidate (i
);
1279 /* Functions for speculative scheduling. */
1281 /* Return 0 if x is a set of a register alive in the beginning of one
1282 of the split-blocks of src, otherwise return 1. */
1285 check_live_1 (int src
, rtx x
)
1289 rtx reg
= SET_DEST (x
);
1294 while (GET_CODE (reg
) == SUBREG
|| GET_CODE (reg
) == ZERO_EXTRACT
1295 || GET_CODE (reg
) == SIGN_EXTRACT
1296 || GET_CODE (reg
) == STRICT_LOW_PART
)
1297 reg
= XEXP (reg
, 0);
1299 if (GET_CODE (reg
) == PARALLEL
)
1303 for (i
= XVECLEN (reg
, 0) - 1; i
>= 0; i
--)
1304 if (XEXP (XVECEXP (reg
, 0, i
), 0) != 0)
1305 if (check_live_1 (src
, XEXP (XVECEXP (reg
, 0, i
), 0)))
1311 if (GET_CODE (reg
) != REG
)
1314 regno
= REGNO (reg
);
1316 if (regno
< FIRST_PSEUDO_REGISTER
&& global_regs
[regno
])
1318 /* Global registers are assumed live. */
1323 if (regno
< FIRST_PSEUDO_REGISTER
)
1325 /* Check for hard registers. */
1326 int j
= HARD_REGNO_NREGS (regno
, GET_MODE (reg
));
1329 for (i
= 0; i
< candidate_table
[src
].split_bbs
.nr_members
; i
++)
1331 int b
= candidate_table
[src
].split_bbs
.first_member
[i
];
1333 if (REGNO_REG_SET_P (BASIC_BLOCK (b
)->global_live_at_start
,
1343 /* Check for psuedo registers. */
1344 for (i
= 0; i
< candidate_table
[src
].split_bbs
.nr_members
; i
++)
1346 int b
= candidate_table
[src
].split_bbs
.first_member
[i
];
1348 if (REGNO_REG_SET_P (BASIC_BLOCK (b
)->global_live_at_start
, regno
))
1359 /* If x is a set of a register R, mark that R is alive in the beginning
1360 of every update-block of src. */
1363 update_live_1 (int src
, rtx x
)
1367 rtx reg
= SET_DEST (x
);
1372 while (GET_CODE (reg
) == SUBREG
|| GET_CODE (reg
) == ZERO_EXTRACT
1373 || GET_CODE (reg
) == SIGN_EXTRACT
1374 || GET_CODE (reg
) == STRICT_LOW_PART
)
1375 reg
= XEXP (reg
, 0);
1377 if (GET_CODE (reg
) == PARALLEL
)
1381 for (i
= XVECLEN (reg
, 0) - 1; i
>= 0; i
--)
1382 if (XEXP (XVECEXP (reg
, 0, i
), 0) != 0)
1383 update_live_1 (src
, XEXP (XVECEXP (reg
, 0, i
), 0));
1388 if (GET_CODE (reg
) != REG
)
1391 /* Global registers are always live, so the code below does not apply
1394 regno
= REGNO (reg
);
1396 if (regno
>= FIRST_PSEUDO_REGISTER
|| !global_regs
[regno
])
1398 if (regno
< FIRST_PSEUDO_REGISTER
)
1400 int j
= HARD_REGNO_NREGS (regno
, GET_MODE (reg
));
1403 for (i
= 0; i
< candidate_table
[src
].update_bbs
.nr_members
; i
++)
1405 int b
= candidate_table
[src
].update_bbs
.first_member
[i
];
1407 SET_REGNO_REG_SET (BASIC_BLOCK (b
)->global_live_at_start
,
1414 for (i
= 0; i
< candidate_table
[src
].update_bbs
.nr_members
; i
++)
1416 int b
= candidate_table
[src
].update_bbs
.first_member
[i
];
1418 SET_REGNO_REG_SET (BASIC_BLOCK (b
)->global_live_at_start
, regno
);
1424 /* Return 1 if insn can be speculatively moved from block src to trg,
1425 otherwise return 0. Called before first insertion of insn to
1426 ready-list or before the scheduling. */
1429 check_live (rtx insn
, int src
)
1431 /* Find the registers set by instruction. */
1432 if (GET_CODE (PATTERN (insn
)) == SET
1433 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
1434 return check_live_1 (src
, PATTERN (insn
));
1435 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1438 for (j
= XVECLEN (PATTERN (insn
), 0) - 1; j
>= 0; j
--)
1439 if ((GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == SET
1440 || GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == CLOBBER
)
1441 && !check_live_1 (src
, XVECEXP (PATTERN (insn
), 0, j
)))
1450 /* Update the live registers info after insn was moved speculatively from
1451 block src to trg. */
1454 update_live (rtx insn
, int src
)
1456 /* Find the registers set by instruction. */
1457 if (GET_CODE (PATTERN (insn
)) == SET
1458 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
1459 update_live_1 (src
, PATTERN (insn
));
1460 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1463 for (j
= XVECLEN (PATTERN (insn
), 0) - 1; j
>= 0; j
--)
1464 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == SET
1465 || GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == CLOBBER
)
1466 update_live_1 (src
, XVECEXP (PATTERN (insn
), 0, j
));
1470 /* Nonzero if block bb_to is equal to, or reachable from block bb_from. */
1471 #define IS_REACHABLE(bb_from, bb_to) \
1473 || IS_RGN_ENTRY (bb_from) \
1474 || (TEST_BIT (ancestor_edges[bb_to], \
1475 EDGE_TO_BIT (IN_EDGES (BB_TO_BLOCK (bb_from))))))
1477 /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
1480 set_spec_fed (rtx load_insn
)
1484 for (link
= INSN_DEPEND (load_insn
); link
; link
= XEXP (link
, 1))
1485 if (GET_MODE (link
) == VOIDmode
)
1486 FED_BY_SPEC_LOAD (XEXP (link
, 0)) = 1;
1487 } /* set_spec_fed */
1489 /* On the path from the insn to load_insn_bb, find a conditional
1490 branch depending on insn, that guards the speculative load. */
1493 find_conditional_protection (rtx insn
, int load_insn_bb
)
1497 /* Iterate through DEF-USE forward dependences. */
1498 for (link
= INSN_DEPEND (insn
); link
; link
= XEXP (link
, 1))
1500 rtx next
= XEXP (link
, 0);
1501 if ((CONTAINING_RGN (BLOCK_NUM (next
)) ==
1502 CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb
)))
1503 && IS_REACHABLE (INSN_BB (next
), load_insn_bb
)
1504 && load_insn_bb
!= INSN_BB (next
)
1505 && GET_MODE (link
) == VOIDmode
1506 && (GET_CODE (next
) == JUMP_INSN
1507 || find_conditional_protection (next
, load_insn_bb
)))
1511 } /* find_conditional_protection */
1513 /* Returns 1 if the same insn1 that participates in the computation
1514 of load_insn's address is feeding a conditional branch that is
1515 guarding on load_insn. This is true if we find a the two DEF-USE
1517 insn1 -> ... -> conditional-branch
1518 insn1 -> ... -> load_insn,
1519 and if a flow path exist:
1520 insn1 -> ... -> conditional-branch -> ... -> load_insn,
1521 and if insn1 is on the path
1522 region-entry -> ... -> bb_trg -> ... load_insn.
1524 Locate insn1 by climbing on LOG_LINKS from load_insn.
1525 Locate the branch by following INSN_DEPEND from insn1. */
1528 is_conditionally_protected (rtx load_insn
, int bb_src
, int bb_trg
)
1532 for (link
= LOG_LINKS (load_insn
); link
; link
= XEXP (link
, 1))
1534 rtx insn1
= XEXP (link
, 0);
1536 /* Must be a DEF-USE dependence upon non-branch. */
1537 if (GET_MODE (link
) != VOIDmode
1538 || GET_CODE (insn1
) == JUMP_INSN
)
1541 /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
1542 if (INSN_BB (insn1
) == bb_src
1543 || (CONTAINING_RGN (BLOCK_NUM (insn1
))
1544 != CONTAINING_RGN (BB_TO_BLOCK (bb_src
)))
1545 || (!IS_REACHABLE (bb_trg
, INSN_BB (insn1
))
1546 && !IS_REACHABLE (INSN_BB (insn1
), bb_trg
)))
1549 /* Now search for the conditional-branch. */
1550 if (find_conditional_protection (insn1
, bb_src
))
1553 /* Recursive step: search another insn1, "above" current insn1. */
1554 return is_conditionally_protected (insn1
, bb_src
, bb_trg
);
1557 /* The chain does not exist. */
1559 } /* is_conditionally_protected */
1561 /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence
1562 load_insn can move speculatively from bb_src to bb_trg. All the
1563 following must hold:
1565 (1) both loads have 1 base register (PFREE_CANDIDATEs).
1566 (2) load_insn and load1 have a def-use dependence upon
1567 the same insn 'insn1'.
1568 (3) either load2 is in bb_trg, or:
1569 - there's only one split-block, and
1570 - load1 is on the escape path, and
1572 From all these we can conclude that the two loads access memory
1573 addresses that differ at most by a constant, and hence if moving
1574 load_insn would cause an exception, it would have been caused by
1578 is_pfree (rtx load_insn
, int bb_src
, int bb_trg
)
1581 candidate
*candp
= candidate_table
+ bb_src
;
1583 if (candp
->split_bbs
.nr_members
!= 1)
1584 /* Must have exactly one escape block. */
1587 for (back_link
= LOG_LINKS (load_insn
);
1588 back_link
; back_link
= XEXP (back_link
, 1))
1590 rtx insn1
= XEXP (back_link
, 0);
1592 if (GET_MODE (back_link
) == VOIDmode
)
1594 /* Found a DEF-USE dependence (insn1, load_insn). */
1597 for (fore_link
= INSN_DEPEND (insn1
);
1598 fore_link
; fore_link
= XEXP (fore_link
, 1))
1600 rtx insn2
= XEXP (fore_link
, 0);
1601 if (GET_MODE (fore_link
) == VOIDmode
)
1603 /* Found a DEF-USE dependence (insn1, insn2). */
1604 if (haifa_classify_insn (insn2
) != PFREE_CANDIDATE
)
1605 /* insn2 not guaranteed to be a 1 base reg load. */
1608 if (INSN_BB (insn2
) == bb_trg
)
1609 /* insn2 is the similar load, in the target block. */
1612 if (*(candp
->split_bbs
.first_member
) == BLOCK_NUM (insn2
))
1613 /* insn2 is a similar load, in a split-block. */
1620 /* Couldn't find a similar load. */
1624 /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
1625 a load moved speculatively, or if load_insn is protected by
1626 a compare on load_insn's address). */
1629 is_prisky (rtx load_insn
, int bb_src
, int bb_trg
)
1631 if (FED_BY_SPEC_LOAD (load_insn
))
1634 if (LOG_LINKS (load_insn
) == NULL
)
1635 /* Dependence may 'hide' out of the region. */
1638 if (is_conditionally_protected (load_insn
, bb_src
, bb_trg
))
1644 /* Insn is a candidate to be moved speculatively from bb_src to bb_trg.
1645 Return 1 if insn is exception-free (and the motion is valid)
1649 is_exception_free (rtx insn
, int bb_src
, int bb_trg
)
1651 int insn_class
= haifa_classify_insn (insn
);
1653 /* Handle non-load insns. */
1664 if (!flag_schedule_speculative_load
)
1666 IS_LOAD_INSN (insn
) = 1;
1673 case PFREE_CANDIDATE
:
1674 if (is_pfree (insn
, bb_src
, bb_trg
))
1676 /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */
1677 case PRISKY_CANDIDATE
:
1678 if (!flag_schedule_speculative_load_dangerous
1679 || is_prisky (insn
, bb_src
, bb_trg
))
1685 return flag_schedule_speculative_load_dangerous
;
1688 /* The number of insns from the current block scheduled so far. */
1689 static int sched_target_n_insns
;
1690 /* The number of insns from the current block to be scheduled in total. */
1691 static int target_n_insns
;
1692 /* The number of insns from the entire region scheduled so far. */
1693 static int sched_n_insns
;
1694 /* Nonzero if the last scheduled insn was a jump. */
1695 static int last_was_jump
;
1697 /* Implementations of the sched_info functions for region scheduling. */
1698 static void init_ready_list (struct ready_list
*);
1699 static int can_schedule_ready_p (rtx
);
1700 static int new_ready (rtx
);
1701 static int schedule_more_p (void);
1702 static const char *rgn_print_insn (rtx
, int);
1703 static int rgn_rank (rtx
, rtx
);
1704 static int contributes_to_priority (rtx
, rtx
);
1705 static void compute_jump_reg_dependencies (rtx
, regset
, regset
, regset
);
1707 /* Return nonzero if there are more insns that should be scheduled. */
1710 schedule_more_p (void)
1712 return ! last_was_jump
&& sched_target_n_insns
< target_n_insns
;
1715 /* Add all insns that are initially ready to the ready list READY. Called
1716 once before scheduling a set of insns. */
1719 init_ready_list (struct ready_list
*ready
)
1721 rtx prev_head
= current_sched_info
->prev_head
;
1722 rtx next_tail
= current_sched_info
->next_tail
;
1727 sched_target_n_insns
= 0;
1731 /* Print debugging information. */
1732 if (sched_verbose
>= 5)
1733 debug_dependencies ();
1735 /* Prepare current target block info. */
1736 if (current_nr_blocks
> 1)
1738 candidate_table
= xmalloc (current_nr_blocks
* sizeof (candidate
));
1741 /* bblst_table holds split blocks and update blocks for each block after
1742 the current one in the region. split blocks and update blocks are
1743 the TO blocks of region edges, so there can be at most rgn_nr_edges
1745 bblst_size
= (current_nr_blocks
- target_bb
) * rgn_nr_edges
;
1746 bblst_table
= xmalloc (bblst_size
* sizeof (int));
1748 bitlst_table_last
= 0;
1749 bitlst_table
= xmalloc (rgn_nr_edges
* sizeof (int));
1751 compute_trg_info (target_bb
);
1754 /* Initialize ready list with all 'ready' insns in target block.
1755 Count number of insns in the target block being scheduled. */
1756 for (insn
= NEXT_INSN (prev_head
); insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1758 if (INSN_DEP_COUNT (insn
) == 0)
1759 ready_add (ready
, insn
);
1763 /* Add to ready list all 'ready' insns in valid source blocks.
1764 For speculative insns, check-live, exception-free, and
1766 for (bb_src
= target_bb
+ 1; bb_src
< current_nr_blocks
; bb_src
++)
1767 if (IS_VALID (bb_src
))
1773 get_block_head_tail (BB_TO_BLOCK (bb_src
), &head
, &tail
);
1774 src_next_tail
= NEXT_INSN (tail
);
1777 for (insn
= src_head
; insn
!= src_next_tail
; insn
= NEXT_INSN (insn
))
1779 if (! INSN_P (insn
))
1782 if (!CANT_MOVE (insn
)
1783 && (!IS_SPECULATIVE_INSN (insn
)
1784 || ((((!targetm
.sched
.use_dfa_pipeline_interface
1785 || !(*targetm
.sched
.use_dfa_pipeline_interface
) ())
1786 && insn_issue_delay (insn
) <= 3)
1787 || (targetm
.sched
.use_dfa_pipeline_interface
1788 && (*targetm
.sched
.use_dfa_pipeline_interface
) ()
1789 && (recog_memoized (insn
) < 0
1790 || min_insn_conflict_delay (curr_state
,
1792 && check_live (insn
, bb_src
)
1793 && is_exception_free (insn
, bb_src
, target_bb
))))
1794 if (INSN_DEP_COUNT (insn
) == 0)
1795 ready_add (ready
, insn
);
1800 /* Called after taking INSN from the ready list. Returns nonzero if this
1801 insn can be scheduled, nonzero if we should silently discard it. */
1804 can_schedule_ready_p (rtx insn
)
1806 if (GET_CODE (insn
) == JUMP_INSN
)
1809 /* An interblock motion? */
1810 if (INSN_BB (insn
) != target_bb
)
1814 if (IS_SPECULATIVE_INSN (insn
))
1816 if (!check_live (insn
, INSN_BB (insn
)))
1818 update_live (insn
, INSN_BB (insn
));
1820 /* For speculative load, mark insns fed by it. */
1821 if (IS_LOAD_INSN (insn
) || FED_BY_SPEC_LOAD (insn
))
1822 set_spec_fed (insn
);
1828 /* Update source block boundaries. */
1829 b1
= BLOCK_FOR_INSN (insn
);
1830 if (insn
== b1
->head
&& insn
== b1
->end
)
1832 /* We moved all the insns in the basic block.
1833 Emit a note after the last insn and update the
1834 begin/end boundaries to point to the note. */
1835 rtx note
= emit_note_after (NOTE_INSN_DELETED
, insn
);
1839 else if (insn
== b1
->end
)
1841 /* We took insns from the end of the basic block,
1842 so update the end of block boundary so that it
1843 points to the first insn we did not move. */
1844 b1
->end
= PREV_INSN (insn
);
1846 else if (insn
== b1
->head
)
1848 /* We took insns from the start of the basic block,
1849 so update the start of block boundary so that
1850 it points to the first insn we did not move. */
1851 b1
->head
= NEXT_INSN (insn
);
1856 /* In block motion. */
1857 sched_target_n_insns
++;
1864 /* Called after INSN has all its dependencies resolved. Return nonzero
1865 if it should be moved to the ready list or the queue, or zero if we
1866 should silently discard it. */
1868 new_ready (rtx next
)
1870 /* For speculative insns, before inserting to ready/queue,
1871 check live, exception-free, and issue-delay. */
1872 if (INSN_BB (next
) != target_bb
1873 && (!IS_VALID (INSN_BB (next
))
1875 || (IS_SPECULATIVE_INSN (next
)
1877 || (targetm
.sched
.use_dfa_pipeline_interface
1878 && (*targetm
.sched
.use_dfa_pipeline_interface
) ()
1879 && recog_memoized (next
) >= 0
1880 && min_insn_conflict_delay (curr_state
, next
,
1882 || ((!targetm
.sched
.use_dfa_pipeline_interface
1883 || !(*targetm
.sched
.use_dfa_pipeline_interface
) ())
1884 && insn_issue_delay (next
) > 3)
1885 || !check_live (next
, INSN_BB (next
))
1886 || !is_exception_free (next
, INSN_BB (next
), target_bb
)))))
1891 /* Return a string that contains the insn uid and optionally anything else
1892 necessary to identify this insn in an output. It's valid to use a
1893 static buffer for this. The ALIGNED parameter should cause the string
1894 to be formatted so that multiple output lines will line up nicely. */
1897 rgn_print_insn (rtx insn
, int aligned
)
1899 static char tmp
[80];
1902 sprintf (tmp
, "b%3d: i%4d", INSN_BB (insn
), INSN_UID (insn
));
1905 if (current_nr_blocks
> 1 && INSN_BB (insn
) != target_bb
)
1906 sprintf (tmp
, "%d/b%d", INSN_UID (insn
), INSN_BB (insn
));
1908 sprintf (tmp
, "%d", INSN_UID (insn
));
1913 /* Compare priority of two insns. Return a positive number if the second
1914 insn is to be preferred for scheduling, and a negative one if the first
1915 is to be preferred. Zero if they are equally good. */
1918 rgn_rank (rtx insn1
, rtx insn2
)
1920 /* Some comparison make sense in interblock scheduling only. */
1921 if (INSN_BB (insn1
) != INSN_BB (insn2
))
1923 int spec_val
, prob_val
;
1925 /* Prefer an inblock motion on an interblock motion. */
1926 if ((INSN_BB (insn2
) == target_bb
) && (INSN_BB (insn1
) != target_bb
))
1928 if ((INSN_BB (insn1
) == target_bb
) && (INSN_BB (insn2
) != target_bb
))
1931 /* Prefer a useful motion on a speculative one. */
1932 spec_val
= IS_SPECULATIVE_INSN (insn1
) - IS_SPECULATIVE_INSN (insn2
);
1936 /* Prefer a more probable (speculative) insn. */
1937 prob_val
= INSN_PROBABILITY (insn2
) - INSN_PROBABILITY (insn1
);
1944 /* NEXT is an instruction that depends on INSN (a backward dependence);
1945 return nonzero if we should include this dependence in priority
1949 contributes_to_priority (rtx next
, rtx insn
)
1951 return BLOCK_NUM (next
) == BLOCK_NUM (insn
);
1954 /* INSN is a JUMP_INSN, COND_SET is the set of registers that are
1955 conditionally set before INSN. Store the set of registers that
1956 must be considered as used by this jump in USED and that of
1957 registers that must be considered as set in SET. */
1960 compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED
,
1961 regset cond_exec ATTRIBUTE_UNUSED
,
1962 regset used ATTRIBUTE_UNUSED
,
1963 regset set ATTRIBUTE_UNUSED
)
1965 /* Nothing to do here, since we postprocess jumps in
1966 add_branch_dependences. */
1969 /* Used in schedule_insns to initialize current_sched_info for scheduling
1970 regions (or single basic blocks). */
1972 static struct sched_info region_sched_info
=
1975 can_schedule_ready_p
,
1980 contributes_to_priority
,
1981 compute_jump_reg_dependencies
,
1988 /* Determine if PAT sets a CLASS_LIKELY_SPILLED_P register. */
1991 sets_likely_spilled (rtx pat
)
1994 note_stores (pat
, sets_likely_spilled_1
, &ret
);
1999 sets_likely_spilled_1 (rtx x
, rtx pat
, void *data
)
2001 bool *ret
= (bool *) data
;
2003 if (GET_CODE (pat
) == SET
2005 && REGNO (x
) < FIRST_PSEUDO_REGISTER
2006 && CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (REGNO (x
))))
2010 /* Add dependences so that branches are scheduled to run last in their
2014 add_branch_dependences (rtx head
, rtx tail
)
2018 /* For all branches, calls, uses, clobbers, cc0 setters, and instructions
2019 that can throw exceptions, force them to remain in order at the end of
2020 the block by adding dependencies and giving the last a high priority.
2021 There may be notes present, and prev_head may also be a note.
2023 Branches must obviously remain at the end. Calls should remain at the
2024 end since moving them results in worse register allocation. Uses remain
2025 at the end to ensure proper register allocation.
2027 cc0 setters remaim at the end because they can't be moved away from
2030 Insns setting CLASS_LIKELY_SPILLED_P registers (usually return values)
2031 are not moved before reload because we can wind up with register
2032 allocation failures. */
2036 while (GET_CODE (insn
) == CALL_INSN
2037 || GET_CODE (insn
) == JUMP_INSN
2038 || (GET_CODE (insn
) == INSN
2039 && (GET_CODE (PATTERN (insn
)) == USE
2040 || GET_CODE (PATTERN (insn
)) == CLOBBER
2041 || can_throw_internal (insn
)
2043 || sets_cc0_p (PATTERN (insn
))
2045 || (!reload_completed
2046 && sets_likely_spilled (PATTERN (insn
)))))
2047 || GET_CODE (insn
) == NOTE
)
2049 if (GET_CODE (insn
) != NOTE
)
2051 if (last
!= 0 && !find_insn_list (insn
, LOG_LINKS (last
)))
2053 add_dependence (last
, insn
, REG_DEP_ANTI
);
2054 INSN_REF_COUNT (insn
)++;
2057 CANT_MOVE (insn
) = 1;
2062 /* Don't overrun the bounds of the basic block. */
2066 insn
= PREV_INSN (insn
);
2069 /* Make sure these insns are scheduled last in their block. */
2072 while (insn
!= head
)
2074 insn
= prev_nonnote_insn (insn
);
2076 if (INSN_REF_COUNT (insn
) != 0)
2079 add_dependence (last
, insn
, REG_DEP_ANTI
);
2080 INSN_REF_COUNT (insn
) = 1;
2084 /* Data structures for the computation of data dependences in a regions. We
2085 keep one `deps' structure for every basic block. Before analyzing the
2086 data dependences for a bb, its variables are initialized as a function of
2087 the variables of its predecessors. When the analysis for a bb completes,
2088 we save the contents to the corresponding bb_deps[bb] variable. */
2090 static struct deps
*bb_deps
;
2092 /* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */
2095 concat_INSN_LIST (rtx copy
, rtx old
)
2098 for (; copy
; copy
= XEXP (copy
, 1))
2099 new = alloc_INSN_LIST (XEXP (copy
, 0), new);
2104 concat_insn_mem_list (rtx copy_insns
, rtx copy_mems
, rtx
*old_insns_p
,
2107 rtx new_insns
= *old_insns_p
;
2108 rtx new_mems
= *old_mems_p
;
2112 new_insns
= alloc_INSN_LIST (XEXP (copy_insns
, 0), new_insns
);
2113 new_mems
= alloc_EXPR_LIST (VOIDmode
, XEXP (copy_mems
, 0), new_mems
);
2114 copy_insns
= XEXP (copy_insns
, 1);
2115 copy_mems
= XEXP (copy_mems
, 1);
2118 *old_insns_p
= new_insns
;
2119 *old_mems_p
= new_mems
;
2122 /* After computing the dependencies for block BB, propagate the dependencies
2123 found in TMP_DEPS to the successors of the block. */
2125 propagate_deps (int bb
, struct deps
*pred_deps
)
2127 int b
= BB_TO_BLOCK (bb
);
2130 /* bb's structures are inherited by its successors. */
2131 first_edge
= e
= OUT_EDGES (b
);
2135 int b_succ
= TO_BLOCK (e
);
2136 int bb_succ
= BLOCK_TO_BB (b_succ
);
2137 struct deps
*succ_deps
= bb_deps
+ bb_succ
;
2140 /* Only bbs "below" bb, in the same region, are interesting. */
2141 if (CONTAINING_RGN (b
) != CONTAINING_RGN (b_succ
)
2148 /* The reg_last lists are inherited by bb_succ. */
2149 EXECUTE_IF_SET_IN_REG_SET (&pred_deps
->reg_last_in_use
, 0, reg
,
2151 struct deps_reg
*pred_rl
= &pred_deps
->reg_last
[reg
];
2152 struct deps_reg
*succ_rl
= &succ_deps
->reg_last
[reg
];
2154 succ_rl
->uses
= concat_INSN_LIST (pred_rl
->uses
, succ_rl
->uses
);
2155 succ_rl
->sets
= concat_INSN_LIST (pred_rl
->sets
, succ_rl
->sets
);
2156 succ_rl
->clobbers
= concat_INSN_LIST (pred_rl
->clobbers
,
2158 succ_rl
->uses_length
+= pred_rl
->uses_length
;
2159 succ_rl
->clobbers_length
+= pred_rl
->clobbers_length
;
2161 IOR_REG_SET (&succ_deps
->reg_last_in_use
, &pred_deps
->reg_last_in_use
);
2163 /* Mem read/write lists are inherited by bb_succ. */
2164 concat_insn_mem_list (pred_deps
->pending_read_insns
,
2165 pred_deps
->pending_read_mems
,
2166 &succ_deps
->pending_read_insns
,
2167 &succ_deps
->pending_read_mems
);
2168 concat_insn_mem_list (pred_deps
->pending_write_insns
,
2169 pred_deps
->pending_write_mems
,
2170 &succ_deps
->pending_write_insns
,
2171 &succ_deps
->pending_write_mems
);
2173 succ_deps
->last_pending_memory_flush
2174 = concat_INSN_LIST (pred_deps
->last_pending_memory_flush
,
2175 succ_deps
->last_pending_memory_flush
);
2177 succ_deps
->pending_lists_length
+= pred_deps
->pending_lists_length
;
2178 succ_deps
->pending_flush_length
+= pred_deps
->pending_flush_length
;
2180 /* last_function_call is inherited by bb_succ. */
2181 succ_deps
->last_function_call
2182 = concat_INSN_LIST (pred_deps
->last_function_call
,
2183 succ_deps
->last_function_call
);
2185 /* sched_before_next_call is inherited by bb_succ. */
2186 succ_deps
->sched_before_next_call
2187 = concat_INSN_LIST (pred_deps
->sched_before_next_call
,
2188 succ_deps
->sched_before_next_call
);
2192 while (e
!= first_edge
);
2194 /* These lists should point to the right place, for correct
2196 bb_deps
[bb
].pending_read_insns
= pred_deps
->pending_read_insns
;
2197 bb_deps
[bb
].pending_read_mems
= pred_deps
->pending_read_mems
;
2198 bb_deps
[bb
].pending_write_insns
= pred_deps
->pending_write_insns
;
2199 bb_deps
[bb
].pending_write_mems
= pred_deps
->pending_write_mems
;
2201 /* Can't allow these to be freed twice. */
2202 pred_deps
->pending_read_insns
= 0;
2203 pred_deps
->pending_read_mems
= 0;
2204 pred_deps
->pending_write_insns
= 0;
2205 pred_deps
->pending_write_mems
= 0;
2208 /* Compute backward dependences inside bb. In a multiple blocks region:
2209 (1) a bb is analyzed after its predecessors, and (2) the lists in
2210 effect at the end of bb (after analyzing for bb) are inherited by
2213 Specifically for reg-reg data dependences, the block insns are
2214 scanned by sched_analyze () top-to-bottom. Two lists are
2215 maintained by sched_analyze (): reg_last[].sets for register DEFs,
2216 and reg_last[].uses for register USEs.
2218 When analysis is completed for bb, we update for its successors:
2219 ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
2220 ; - USES[succ] = Union (USES [succ], DEFS [bb])
2222 The mechanism for computing mem-mem data dependence is very
2223 similar, and the result is interblock dependences in the region. */
2226 compute_block_backward_dependences (int bb
)
2229 struct deps tmp_deps
;
2231 tmp_deps
= bb_deps
[bb
];
2233 /* Do the analysis for this block. */
2234 get_block_head_tail (BB_TO_BLOCK (bb
), &head
, &tail
);
2235 sched_analyze (&tmp_deps
, head
, tail
);
2236 add_branch_dependences (head
, tail
);
2238 if (current_nr_blocks
> 1)
2239 propagate_deps (bb
, &tmp_deps
);
2241 /* Free up the INSN_LISTs. */
2242 free_deps (&tmp_deps
);
2245 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
2246 them to the unused_*_list variables, so that they can be reused. */
2249 free_pending_lists (void)
2253 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2255 free_INSN_LIST_list (&bb_deps
[bb
].pending_read_insns
);
2256 free_INSN_LIST_list (&bb_deps
[bb
].pending_write_insns
);
2257 free_EXPR_LIST_list (&bb_deps
[bb
].pending_read_mems
);
2258 free_EXPR_LIST_list (&bb_deps
[bb
].pending_write_mems
);
2262 /* Print dependences for debugging, callable from debugger. */
2265 debug_dependencies (void)
2269 fprintf (sched_dump
, ";; --------------- forward dependences: ------------ \n");
2270 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2278 get_block_head_tail (BB_TO_BLOCK (bb
), &head
, &tail
);
2279 next_tail
= NEXT_INSN (tail
);
2280 fprintf (sched_dump
, "\n;; --- Region Dependences --- b %d bb %d \n",
2281 BB_TO_BLOCK (bb
), bb
);
2283 if (targetm
.sched
.use_dfa_pipeline_interface
2284 && (*targetm
.sched
.use_dfa_pipeline_interface
) ())
2286 fprintf (sched_dump
, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2287 "insn", "code", "bb", "dep", "prio", "cost",
2289 fprintf (sched_dump
, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2290 "----", "----", "--", "---", "----", "----",
2295 fprintf (sched_dump
, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
2296 "insn", "code", "bb", "dep", "prio", "cost", "blockage", "units");
2297 fprintf (sched_dump
, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
2298 "----", "----", "--", "---", "----", "----", "--------", "-----");
2301 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
2305 if (! INSN_P (insn
))
2308 fprintf (sched_dump
, ";; %6d ", INSN_UID (insn
));
2309 if (GET_CODE (insn
) == NOTE
)
2311 n
= NOTE_LINE_NUMBER (insn
);
2313 fprintf (sched_dump
, "%s\n", GET_NOTE_INSN_NAME (n
));
2315 fprintf (sched_dump
, "line %d, file %s\n", n
,
2316 NOTE_SOURCE_FILE (insn
));
2319 fprintf (sched_dump
, " {%s}\n", GET_RTX_NAME (GET_CODE (insn
)));
2323 if (targetm
.sched
.use_dfa_pipeline_interface
2324 && (*targetm
.sched
.use_dfa_pipeline_interface
) ())
2326 fprintf (sched_dump
,
2327 ";; %s%5d%6d%6d%6d%6d%6d ",
2328 (SCHED_GROUP_P (insn
) ? "+" : " "),
2332 INSN_DEP_COUNT (insn
),
2333 INSN_PRIORITY (insn
),
2334 insn_cost (insn
, 0, 0));
2336 if (recog_memoized (insn
) < 0)
2337 fprintf (sched_dump
, "nothing");
2339 print_reservation (sched_dump
, insn
);
2343 int unit
= insn_unit (insn
);
2346 || function_units
[unit
].blockage_range_function
== 0
2348 : function_units
[unit
].blockage_range_function (insn
));
2349 fprintf (sched_dump
,
2350 ";; %s%5d%6d%6d%6d%6d%6d %3d -%3d ",
2351 (SCHED_GROUP_P (insn
) ? "+" : " "),
2355 INSN_DEP_COUNT (insn
),
2356 INSN_PRIORITY (insn
),
2357 insn_cost (insn
, 0, 0),
2358 (int) MIN_BLOCKAGE_COST (range
),
2359 (int) MAX_BLOCKAGE_COST (range
));
2360 insn_print_units (insn
);
2363 fprintf (sched_dump
, "\t: ");
2364 for (link
= INSN_DEPEND (insn
); link
; link
= XEXP (link
, 1))
2365 fprintf (sched_dump
, "%d ", INSN_UID (XEXP (link
, 0)));
2366 fprintf (sched_dump
, "\n");
2370 fprintf (sched_dump
, "\n");
2373 /* Schedule a region. A region is either an inner loop, a loop-free
2374 subroutine, or a single basic block. Each bb in the region is
2375 scheduled after its flow predecessors. */
2378 schedule_region (int rgn
)
2381 int rgn_n_insns
= 0;
2382 int sched_rgn_n_insns
= 0;
2384 /* Set variables for the current region. */
2385 current_nr_blocks
= RGN_NR_BLOCKS (rgn
);
2386 current_blocks
= RGN_BLOCKS (rgn
);
2388 init_deps_global ();
2390 /* Initializations for region data dependence analysis. */
2391 bb_deps
= xmalloc (sizeof (struct deps
) * current_nr_blocks
);
2392 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2393 init_deps (bb_deps
+ bb
);
2395 /* Compute LOG_LINKS. */
2396 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2397 compute_block_backward_dependences (bb
);
2399 /* Compute INSN_DEPEND. */
2400 for (bb
= current_nr_blocks
- 1; bb
>= 0; bb
--)
2403 get_block_head_tail (BB_TO_BLOCK (bb
), &head
, &tail
);
2405 compute_forward_dependences (head
, tail
);
2407 if (targetm
.sched
.dependencies_evaluation_hook
)
2408 targetm
.sched
.dependencies_evaluation_hook (head
, tail
);
2412 /* Set priorities. */
2413 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2416 get_block_head_tail (BB_TO_BLOCK (bb
), &head
, &tail
);
2418 rgn_n_insns
+= set_priorities (head
, tail
);
2421 /* Compute interblock info: probabilities, split-edges, dominators, etc. */
2422 if (current_nr_blocks
> 1)
2426 prob
= xmalloc ((current_nr_blocks
) * sizeof (float));
2428 dom
= sbitmap_vector_alloc (current_nr_blocks
, current_nr_blocks
);
2429 sbitmap_vector_zero (dom
, current_nr_blocks
);
2432 edge_to_bit
= xmalloc (nr_edges
* sizeof (int));
2433 for (i
= 1; i
< nr_edges
; i
++)
2434 if (CONTAINING_RGN (FROM_BLOCK (i
)) == rgn
)
2435 EDGE_TO_BIT (i
) = rgn_nr_edges
++;
2436 rgn_edges
= xmalloc (rgn_nr_edges
* sizeof (int));
2439 for (i
= 1; i
< nr_edges
; i
++)
2440 if (CONTAINING_RGN (FROM_BLOCK (i
)) == (rgn
))
2441 rgn_edges
[rgn_nr_edges
++] = i
;
2444 pot_split
= sbitmap_vector_alloc (current_nr_blocks
, rgn_nr_edges
);
2445 sbitmap_vector_zero (pot_split
, current_nr_blocks
);
2446 ancestor_edges
= sbitmap_vector_alloc (current_nr_blocks
, rgn_nr_edges
);
2447 sbitmap_vector_zero (ancestor_edges
, current_nr_blocks
);
2449 /* Compute probabilities, dominators, split_edges. */
2450 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2451 compute_dom_prob_ps (bb
);
2454 /* Now we can schedule all blocks. */
2455 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2458 int b
= BB_TO_BLOCK (bb
);
2460 get_block_head_tail (b
, &head
, &tail
);
2462 if (no_real_insns_p (head
, tail
))
2465 current_sched_info
->prev_head
= PREV_INSN (head
);
2466 current_sched_info
->next_tail
= NEXT_INSN (tail
);
2468 if (write_symbols
!= NO_DEBUG
)
2470 save_line_notes (b
, head
, tail
);
2471 rm_line_notes (head
, tail
);
2474 /* rm_other_notes only removes notes which are _inside_ the
2475 block---that is, it won't remove notes before the first real insn
2476 or after the last real insn of the block. So if the first insn
2477 has a REG_SAVE_NOTE which would otherwise be emitted before the
2478 insn, it is redundant with the note before the start of the
2479 block, and so we have to take it out. */
2484 for (note
= REG_NOTES (head
); note
; note
= XEXP (note
, 1))
2485 if (REG_NOTE_KIND (note
) == REG_SAVE_NOTE
)
2487 remove_note (head
, note
);
2488 note
= XEXP (note
, 1);
2489 remove_note (head
, note
);
2493 /* Remove remaining note insns from the block, save them in
2494 note_list. These notes are restored at the end of
2495 schedule_block (). */
2496 rm_other_notes (head
, tail
);
2500 current_sched_info
->queue_must_finish_empty
2501 = current_nr_blocks
> 1 && !flag_schedule_interblock
;
2503 schedule_block (b
, rgn_n_insns
);
2504 sched_rgn_n_insns
+= sched_n_insns
;
2506 /* Update target block boundaries. */
2507 if (head
== BLOCK_HEAD (b
))
2508 BLOCK_HEAD (b
) = current_sched_info
->head
;
2509 if (tail
== BLOCK_END (b
))
2510 BLOCK_END (b
) = current_sched_info
->tail
;
2513 if (current_nr_blocks
> 1)
2515 free (candidate_table
);
2517 free (bitlst_table
);
2521 /* Sanity check: verify that all region insns were scheduled. */
2522 if (sched_rgn_n_insns
!= rgn_n_insns
)
2525 /* Restore line notes. */
2526 if (write_symbols
!= NO_DEBUG
)
2528 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2531 get_block_head_tail (BB_TO_BLOCK (bb
), &head
, &tail
);
2532 restore_line_notes (head
, tail
);
2536 /* Done with this region. */
2537 free_pending_lists ();
2539 finish_deps_global ();
2543 if (current_nr_blocks
> 1)
2546 sbitmap_vector_free (dom
);
2547 sbitmap_vector_free (pot_split
);
2548 sbitmap_vector_free (ancestor_edges
);
2554 /* Indexed by region, holds the number of death notes found in that region.
2555 Used for consistency checks. */
2556 static int *deaths_in_region
;
2558 /* Initialize data structures for region scheduling. */
2567 rgn_table
= xmalloc ((n_basic_blocks
) * sizeof (region
));
2568 rgn_bb_table
= xmalloc ((n_basic_blocks
) * sizeof (int));
2569 block_to_bb
= xmalloc ((last_basic_block
) * sizeof (int));
2570 containing_rgn
= xmalloc ((last_basic_block
) * sizeof (int));
2572 /* Compute regions for scheduling. */
2573 if (reload_completed
2574 || n_basic_blocks
== 1
2575 || !flag_schedule_interblock
)
2577 find_single_block_region ();
2581 /* Verify that a 'good' control flow graph can be built. */
2582 if (is_cfg_nonregular ())
2584 find_single_block_region ();
2589 struct edge_list
*edge_list
;
2591 /* The scheduler runs after estimate_probabilities; therefore, we
2592 can't blindly call back into find_basic_blocks since doing so
2593 could invalidate the branch probability info. We could,
2594 however, call cleanup_cfg. */
2595 edge_list
= create_edge_list ();
2597 /* Compute the dominators and post dominators. */
2598 dom
= calculate_dominance_info (CDI_DOMINATORS
);
2600 /* build_control_flow will return nonzero if it detects unreachable
2601 blocks or any other irregularity with the cfg which prevents
2602 cross block scheduling. */
2603 if (build_control_flow (edge_list
) != 0)
2604 find_single_block_region ();
2606 find_rgns (edge_list
, dom
);
2608 if (sched_verbose
>= 3)
2611 /* We are done with flow's edge list. */
2612 free_edge_list (edge_list
);
2614 /* For now. This will move as more and more of haifa is converted
2615 to using the cfg code in flow.c. */
2616 free_dominance_info (dom
);
2621 if (CHECK_DEAD_NOTES
)
2623 blocks
= sbitmap_alloc (last_basic_block
);
2624 deaths_in_region
= xmalloc (sizeof (int) * nr_regions
);
2625 /* Remove all death notes from the subroutine. */
2626 for (rgn
= 0; rgn
< nr_regions
; rgn
++)
2630 sbitmap_zero (blocks
);
2631 for (b
= RGN_NR_BLOCKS (rgn
) - 1; b
>= 0; --b
)
2632 SET_BIT (blocks
, rgn_bb_table
[RGN_BLOCKS (rgn
) + b
]);
2634 deaths_in_region
[rgn
] = count_or_remove_death_notes (blocks
, 1);
2636 sbitmap_free (blocks
);
2639 count_or_remove_death_notes (NULL
, 1);
2642 /* The one entry point in this file. DUMP_FILE is the dump file for
2646 schedule_insns (FILE *dump_file
)
2648 sbitmap large_region_blocks
, blocks
;
2650 int any_large_regions
;
2653 /* Taking care of this degenerate case makes the rest of
2654 this code simpler. */
2655 if (n_basic_blocks
== 0)
2661 sched_init (dump_file
);
2665 current_sched_info
= ®ion_sched_info
;
2667 /* Schedule every region in the subroutine. */
2668 for (rgn
= 0; rgn
< nr_regions
; rgn
++)
2669 schedule_region (rgn
);
2671 /* Update life analysis for the subroutine. Do single block regions
2672 first so that we can verify that live_at_start didn't change. Then
2673 do all other blocks. */
2674 /* ??? There is an outside possibility that update_life_info, or more
2675 to the point propagate_block, could get called with nonzero flags
2676 more than once for one basic block. This would be kinda bad if it
2677 were to happen, since REG_INFO would be accumulated twice for the
2678 block, and we'd have twice the REG_DEAD notes.
2680 I'm fairly certain that this _shouldn't_ happen, since I don't think
2681 that live_at_start should change at region heads. Not sure what the
2682 best way to test for this kind of thing... */
2684 allocate_reg_life_data ();
2685 compute_bb_for_insn ();
2687 any_large_regions
= 0;
2688 large_region_blocks
= sbitmap_alloc (last_basic_block
);
2689 sbitmap_zero (large_region_blocks
);
2691 SET_BIT (large_region_blocks
, bb
->index
);
2693 blocks
= sbitmap_alloc (last_basic_block
);
2694 sbitmap_zero (blocks
);
2696 /* Update life information. For regions consisting of multiple blocks
2697 we've possibly done interblock scheduling that affects global liveness.
2698 For regions consisting of single blocks we need to do only local
2700 for (rgn
= 0; rgn
< nr_regions
; rgn
++)
2701 if (RGN_NR_BLOCKS (rgn
) > 1)
2702 any_large_regions
= 1;
2705 SET_BIT (blocks
, rgn_bb_table
[RGN_BLOCKS (rgn
)]);
2706 RESET_BIT (large_region_blocks
, rgn_bb_table
[RGN_BLOCKS (rgn
)]);
2709 /* Don't update reg info after reload, since that affects
2710 regs_ever_live, which should not change after reload. */
2711 update_life_info (blocks
, UPDATE_LIFE_LOCAL
,
2712 (reload_completed
? PROP_DEATH_NOTES
2713 : PROP_DEATH_NOTES
| PROP_REG_INFO
));
2714 if (any_large_regions
)
2716 update_life_info (large_region_blocks
, UPDATE_LIFE_GLOBAL
,
2717 PROP_DEATH_NOTES
| PROP_REG_INFO
);
2720 if (CHECK_DEAD_NOTES
)
2722 /* Verify the counts of basic block notes in single the basic block
2724 for (rgn
= 0; rgn
< nr_regions
; rgn
++)
2725 if (RGN_NR_BLOCKS (rgn
) == 1)
2727 sbitmap_zero (blocks
);
2728 SET_BIT (blocks
, rgn_bb_table
[RGN_BLOCKS (rgn
)]);
2730 if (deaths_in_region
[rgn
]
2731 != count_or_remove_death_notes (blocks
, 0))
2734 free (deaths_in_region
);
2737 /* Reposition the prologue and epilogue notes in case we moved the
2738 prologue/epilogue insns. */
2739 if (reload_completed
)
2740 reposition_prologue_and_epilogue_notes (get_insns ());
2742 /* Delete redundant line notes. */
2743 if (write_symbols
!= NO_DEBUG
)
2744 rm_redundant_line_notes ();
2748 if (reload_completed
== 0 && flag_schedule_interblock
)
2750 fprintf (sched_dump
,
2751 "\n;; Procedure interblock/speculative motions == %d/%d \n",
2759 fprintf (sched_dump
, "\n\n");
2764 free (rgn_bb_table
);
2766 free (containing_rgn
);
2787 sbitmap_free (blocks
);
2788 sbitmap_free (large_region_blocks
);