1 /* Instruction scheduling pass.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 2, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to the Free
21 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
24 /* This pass implements list scheduling within basic blocks. It is
25 run twice: (1) after flow analysis, but before register allocation,
26 and (2) after register allocation.
28 The first run performs interblock scheduling, moving insns between
29 different blocks in the same "region", and the second runs only
30 basic block scheduling.
32 Interblock motions performed are useful motions and speculative
33 motions, including speculative loads. Motions requiring code
34 duplication are not supported. The identification of motion type
35 and the check for validity of speculative motions requires
36 construction and analysis of the function's control flow graph.
38 The main entry point for this pass is schedule_insns(), called for
39 each function. The work of the scheduler is organized in three
40 levels: (1) function level: insns are subject to splitting,
41 control-flow-graph is constructed, regions are computed (after
42 reload, each region is of one block), (2) region level: control
43 flow graph attributes required for interblock scheduling are
44 computed (dominators, reachability, etc.), data dependences and
45 priorities are computed, and (3) block level: insns in the block
46 are actually scheduled. */
50 #include "coretypes.h"
55 #include "hard-reg-set.h"
56 #include "basic-block.h"
60 #include "insn-config.h"
61 #include "insn-attr.h"
65 #include "cfglayout.h"
67 #include "sched-int.h"
70 /* Define when we want to do count REG_DEAD notes before and after scheduling
71 for sanity checking. We can't do that when conditional execution is used,
72 as REG_DEAD exist only for unconditional deaths. */
74 #if !defined (HAVE_conditional_execution) && defined (ENABLE_CHECKING)
75 #define CHECK_DEAD_NOTES 1
77 #define CHECK_DEAD_NOTES 0
81 #ifdef INSN_SCHEDULING
82 /* Some accessor macros for h_i_d members only used within this file. */
83 #define INSN_REF_COUNT(INSN) (h_i_d[INSN_UID (INSN)].ref_count)
84 #define FED_BY_SPEC_LOAD(insn) (h_i_d[INSN_UID (insn)].fed_by_spec_load)
85 #define IS_LOAD_INSN(insn) (h_i_d[INSN_UID (insn)].is_load_insn)
87 /* nr_inter/spec counts interblock/speculative motion for the function. */
88 static int nr_inter
, nr_spec
;
90 /* Control flow graph edges are kept in circular lists. */
99 static haifa_edge
*edge_table
;
101 #define NEXT_IN(edge) (edge_table[edge].next_in)
102 #define NEXT_OUT(edge) (edge_table[edge].next_out)
103 #define FROM_BLOCK(edge) (edge_table[edge].from_block)
104 #define TO_BLOCK(edge) (edge_table[edge].to_block)
106 /* Number of edges in the control flow graph. (In fact, larger than
107 that by 1, since edge 0 is unused.) */
110 /* Circular list of incoming/outgoing edges of a block. */
111 static int *in_edges
;
112 static int *out_edges
;
114 #define IN_EDGES(block) (in_edges[block])
115 #define OUT_EDGES(block) (out_edges[block])
117 static int is_cfg_nonregular (void);
118 static int build_control_flow (struct edge_list
*);
119 static void new_edge (int, int);
121 /* A region is the main entity for interblock scheduling: insns
122 are allowed to move between blocks in the same region, along
123 control flow graph edges, in the 'up' direction. */
126 int rgn_nr_blocks
; /* Number of blocks in region. */
127 int rgn_blocks
; /* cblocks in the region (actually index in rgn_bb_table). */
131 /* Number of regions in the procedure. */
132 static int nr_regions
;
134 /* Table of region descriptions. */
135 static region
*rgn_table
;
137 /* Array of lists of regions' blocks. */
138 static int *rgn_bb_table
;
140 /* Topological order of blocks in the region (if b2 is reachable from
141 b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is
142 always referred to by either block or b, while its topological
143 order name (in the region) is referred to by bb. */
144 static int *block_to_bb
;
146 /* The number of the region containing a block. */
147 static int *containing_rgn
;
149 #define RGN_NR_BLOCKS(rgn) (rgn_table[rgn].rgn_nr_blocks)
150 #define RGN_BLOCKS(rgn) (rgn_table[rgn].rgn_blocks)
151 #define BLOCK_TO_BB(block) (block_to_bb[block])
152 #define CONTAINING_RGN(block) (containing_rgn[block])
154 void debug_regions (void);
155 static void find_single_block_region (void);
156 static void find_rgns (struct edge_list
*);
157 static bool too_large (int, int *, int *);
159 extern void debug_live (int, int);
161 /* Blocks of the current region being scheduled. */
162 static int current_nr_blocks
;
163 static int current_blocks
;
165 /* The mapping from bb to block. */
166 #define BB_TO_BLOCK(bb) (rgn_bb_table[current_blocks + (bb)])
170 int *first_member
; /* Pointer to the list start in bitlst_table. */
171 int nr_members
; /* The number of members of the bit list. */
175 static int bitlst_table_last
;
176 static int *bitlst_table
;
178 static void extract_bitlst (sbitmap
, bitlst
*);
180 /* Target info declarations.
182 The block currently being scheduled is referred to as the "target" block,
183 while other blocks in the region from which insns can be moved to the
184 target are called "source" blocks. The candidate structure holds info
185 about such sources: are they valid? Speculative? Etc. */
186 typedef bitlst bblst
;
197 static candidate
*candidate_table
;
199 /* A speculative motion requires checking live information on the path
200 from 'source' to 'target'. The split blocks are those to be checked.
201 After a speculative motion, live information should be modified in
204 Lists of split and update blocks for each candidate of the current
205 target are in array bblst_table. */
206 static int *bblst_table
, bblst_size
, bblst_last
;
208 #define IS_VALID(src) ( candidate_table[src].is_valid )
209 #define IS_SPECULATIVE(src) ( candidate_table[src].is_speculative )
210 #define SRC_PROB(src) ( candidate_table[src].src_prob )
212 /* The bb being currently scheduled. */
213 static int target_bb
;
216 typedef bitlst edgelst
;
218 /* Target info functions. */
219 static void split_edges (int, int, edgelst
*);
220 static void compute_trg_info (int);
221 void debug_candidate (int);
222 void debug_candidates (int);
224 /* Dominators array: dom[i] contains the sbitmap of dominators of
225 bb i in the region. */
228 /* bb 0 is the only region entry. */
229 #define IS_RGN_ENTRY(bb) (!bb)
231 /* Is bb_src dominated by bb_trg. */
232 #define IS_DOMINATED(bb_src, bb_trg) \
233 ( TEST_BIT (dom[bb_src], bb_trg) )
235 /* Probability: Prob[i] is a float in [0, 1] which is the probability
236 of bb i relative to the region entry. */
239 /* The probability of bb_src, relative to bb_trg. Note, that while the
240 'prob[bb]' is a float in [0, 1], this macro returns an integer
242 #define GET_SRC_PROB(bb_src, bb_trg) ((int) (100.0 * (prob[bb_src] / \
245 /* Bit-set of edges, where bit i stands for edge i. */
246 typedef sbitmap edgeset
;
248 /* Number of edges in the region. */
249 static int rgn_nr_edges
;
251 /* Array of size rgn_nr_edges. */
252 static int *rgn_edges
;
255 /* Mapping from each edge in the graph to its number in the rgn. */
256 static int *edge_to_bit
;
257 #define EDGE_TO_BIT(edge) (edge_to_bit[edge])
259 /* The split edges of a source bb is different for each target
260 bb. In order to compute this efficiently, the 'potential-split edges'
261 are computed for each bb prior to scheduling a region. This is actually
262 the split edges of each bb relative to the region entry.
264 pot_split[bb] is the set of potential split edges of bb. */
265 static edgeset
*pot_split
;
267 /* For every bb, a set of its ancestor edges. */
268 static edgeset
*ancestor_edges
;
270 static void compute_dom_prob_ps (int);
272 #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
273 #define IS_SPECULATIVE_INSN(INSN) (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN))))
274 #define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
276 /* Parameters affecting the decision of rank_for_schedule().
277 ??? Nope. But MIN_PROBABILITY is used in compute_trg_info. */
278 #define MIN_PROBABILITY 40
280 /* Speculative scheduling functions. */
281 static int check_live_1 (int, rtx
);
282 static void update_live_1 (int, rtx
);
283 static int check_live (rtx
, int);
284 static void update_live (rtx
, int);
285 static void set_spec_fed (rtx
);
286 static int is_pfree (rtx
, int, int);
287 static int find_conditional_protection (rtx
, int);
288 static int is_conditionally_protected (rtx
, int, int);
289 static int is_prisky (rtx
, int, int);
290 static int is_exception_free (rtx
, int, int);
292 static bool sets_likely_spilled (rtx
);
293 static void sets_likely_spilled_1 (rtx
, rtx
, void *);
294 static void add_branch_dependences (rtx
, rtx
);
295 static void compute_block_backward_dependences (int);
296 void debug_dependencies (void);
298 static void init_regions (void);
299 static void schedule_region (int);
300 static rtx
concat_INSN_LIST (rtx
, rtx
);
301 static void concat_insn_mem_list (rtx
, rtx
, rtx
*, rtx
*);
302 static void propagate_deps (int, struct deps
*);
303 static void free_pending_lists (void);
305 /* Functions for construction of the control flow graph. */
307 /* Return 1 if control flow graph should not be constructed, 0 otherwise.
309 We decide not to build the control flow graph if there is possibly more
310 than one entry to the function, if computed branches exist, of if we
311 have nonlocal gotos. */
314 is_cfg_nonregular (void)
320 /* If we have a label that could be the target of a nonlocal goto, then
321 the cfg is not well structured. */
322 if (nonlocal_goto_handler_labels
)
325 /* If we have any forced labels, then the cfg is not well structured. */
329 /* If this function has a computed jump, then we consider the cfg
330 not well structured. */
331 if (current_function_has_computed_jump
)
334 /* If we have exception handlers, then we consider the cfg not well
335 structured. ?!? We should be able to handle this now that flow.c
336 computes an accurate cfg for EH. */
337 if (current_function_has_exception_handlers ())
340 /* If we have non-jumping insns which refer to labels, then we consider
341 the cfg not well structured. */
342 /* Check for labels referred to other thn by jumps. */
344 for (insn
= BB_HEAD (b
); ; insn
= NEXT_INSN (insn
))
346 code
= GET_CODE (insn
);
347 if (INSN_P (insn
) && code
!= JUMP_INSN
)
349 rtx note
= find_reg_note (insn
, REG_LABEL
, NULL_RTX
);
352 && ! (GET_CODE (NEXT_INSN (insn
)) == JUMP_INSN
353 && find_reg_note (NEXT_INSN (insn
), REG_LABEL
,
358 if (insn
== BB_END (b
))
362 /* All the tests passed. Consider the cfg well structured. */
366 /* Build the control flow graph and set nr_edges.
368 Instead of trying to build a cfg ourselves, we rely on flow to
369 do it for us. Stamp out useless code (and bug) duplication.
371 Return nonzero if an irregularity in the cfg is found which would
372 prevent cross block scheduling. */
375 build_control_flow (struct edge_list
*edge_list
)
377 int i
, unreachable
, num_edges
;
380 /* This already accounts for entry/exit edges. */
381 num_edges
= NUM_EDGES (edge_list
);
383 /* Unreachable loops with more than one basic block are detected
384 during the DFS traversal in find_rgns.
386 Unreachable loops with a single block are detected here. This
387 test is redundant with the one in find_rgns, but it's much
388 cheaper to go ahead and catch the trivial case here. */
393 || (b
->pred
->src
== b
394 && b
->pred
->pred_next
== NULL
))
398 /* ??? We can kill these soon. */
399 in_edges
= xcalloc (last_basic_block
, sizeof (int));
400 out_edges
= xcalloc (last_basic_block
, sizeof (int));
401 edge_table
= xcalloc (num_edges
, sizeof (haifa_edge
));
404 for (i
= 0; i
< num_edges
; i
++)
406 edge e
= INDEX_EDGE (edge_list
, i
);
408 if (e
->dest
!= EXIT_BLOCK_PTR
409 && e
->src
!= ENTRY_BLOCK_PTR
)
410 new_edge (e
->src
->index
, e
->dest
->index
);
413 /* Increment by 1, since edge 0 is unused. */
419 /* Record an edge in the control flow graph from SOURCE to TARGET.
421 In theory, this is redundant with the s_succs computed above, but
422 we have not converted all of haifa to use information from the
426 new_edge (int source
, int target
)
429 int curr_edge
, fst_edge
;
431 /* Check for duplicates. */
432 fst_edge
= curr_edge
= OUT_EDGES (source
);
435 if (FROM_BLOCK (curr_edge
) == source
436 && TO_BLOCK (curr_edge
) == target
)
441 curr_edge
= NEXT_OUT (curr_edge
);
443 if (fst_edge
== curr_edge
)
449 FROM_BLOCK (e
) = source
;
450 TO_BLOCK (e
) = target
;
452 if (OUT_EDGES (source
))
454 next_edge
= NEXT_OUT (OUT_EDGES (source
));
455 NEXT_OUT (OUT_EDGES (source
)) = e
;
456 NEXT_OUT (e
) = next_edge
;
460 OUT_EDGES (source
) = e
;
464 if (IN_EDGES (target
))
466 next_edge
= NEXT_IN (IN_EDGES (target
));
467 NEXT_IN (IN_EDGES (target
)) = e
;
468 NEXT_IN (e
) = next_edge
;
472 IN_EDGES (target
) = e
;
477 /* Translate a bit-set SET to a list BL of the bit-set members. */
480 extract_bitlst (sbitmap set
, bitlst
*bl
)
484 /* bblst table space is reused in each call to extract_bitlst. */
485 bitlst_table_last
= 0;
487 bl
->first_member
= &bitlst_table
[bitlst_table_last
];
490 /* Iterate over each word in the bitset. */
491 EXECUTE_IF_SET_IN_SBITMAP (set
, 0, i
,
493 bitlst_table
[bitlst_table_last
++] = i
;
499 /* Functions for the construction of regions. */
501 /* Print the regions, for debugging purposes. Callable from debugger. */
508 fprintf (sched_dump
, "\n;; ------------ REGIONS ----------\n\n");
509 for (rgn
= 0; rgn
< nr_regions
; rgn
++)
511 fprintf (sched_dump
, ";;\trgn %d nr_blocks %d:\n", rgn
,
512 rgn_table
[rgn
].rgn_nr_blocks
);
513 fprintf (sched_dump
, ";;\tbb/block: ");
515 for (bb
= 0; bb
< rgn_table
[rgn
].rgn_nr_blocks
; bb
++)
517 current_blocks
= RGN_BLOCKS (rgn
);
519 if (bb
!= BLOCK_TO_BB (BB_TO_BLOCK (bb
)))
522 fprintf (sched_dump
, " %d/%d ", bb
, BB_TO_BLOCK (bb
));
525 fprintf (sched_dump
, "\n\n");
529 /* Build a single block region for each basic block in the function.
530 This allows for using the same code for interblock and basic block
534 find_single_block_region (void)
542 rgn_bb_table
[nr_regions
] = bb
->index
;
543 RGN_NR_BLOCKS (nr_regions
) = 1;
544 RGN_BLOCKS (nr_regions
) = nr_regions
;
545 CONTAINING_RGN (bb
->index
) = nr_regions
;
546 BLOCK_TO_BB (bb
->index
) = 0;
551 /* Update number of blocks and the estimate for number of insns
552 in the region. Return true if the region is "too large" for interblock
553 scheduling (compile time considerations). */
556 too_large (int block
, int *num_bbs
, int *num_insns
)
559 (*num_insns
) += (INSN_LUID (BB_END (BASIC_BLOCK (block
)))
560 - INSN_LUID (BB_HEAD (BASIC_BLOCK (block
))));
562 return ((*num_bbs
> PARAM_VALUE (PARAM_MAX_SCHED_REGION_BLOCKS
))
563 || (*num_insns
> PARAM_VALUE (PARAM_MAX_SCHED_REGION_INSNS
)));
566 /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk]
567 is still an inner loop. Put in max_hdr[blk] the header of the most inner
568 loop containing blk. */
569 #define UPDATE_LOOP_RELATIONS(blk, hdr) \
571 if (max_hdr[blk] == -1) \
572 max_hdr[blk] = hdr; \
573 else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \
574 RESET_BIT (inner, hdr); \
575 else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \
577 RESET_BIT (inner,max_hdr[blk]); \
578 max_hdr[blk] = hdr; \
582 /* Find regions for interblock scheduling.
584 A region for scheduling can be:
586 * A loop-free procedure, or
588 * A reducible inner loop, or
590 * A basic block not contained in any other region.
592 ?!? In theory we could build other regions based on extended basic
593 blocks or reverse extended basic blocks. Is it worth the trouble?
595 Loop blocks that form a region are put into the region's block list
596 in topological order.
598 This procedure stores its results into the following global (ick) variables
606 We use dominator relationships to avoid making regions out of non-reducible
609 This procedure needs to be converted to work on pred/succ lists instead
610 of edge tables. That would simplify it somewhat. */
613 find_rgns (struct edge_list
*edge_list
)
615 int *max_hdr
, *dfs_nr
, *stack
, *degree
;
617 int node
, child
, loop_head
, i
, head
, tail
;
618 int count
= 0, sp
, idx
= 0;
619 int current_edge
= out_edges
[ENTRY_BLOCK_PTR
->succ
->dest
->index
];
620 int num_bbs
, num_insns
, unreachable
;
621 int too_large_failure
;
624 /* Note if an edge has been passed. */
627 /* Note if a block is a natural loop header. */
630 /* Note if a block is a natural inner loop header. */
633 /* Note if a block is in the block queue. */
636 /* Note if a block is in the block queue. */
639 int num_edges
= NUM_EDGES (edge_list
);
641 /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops
642 and a mapping from block to its loop header (if the block is contained
645 Store results in HEADER, INNER, and MAX_HDR respectively, these will
646 be used as inputs to the second traversal.
648 STACK, SP and DFS_NR are only used during the first traversal. */
650 /* Allocate and initialize variables for the first traversal. */
651 max_hdr
= xmalloc (last_basic_block
* sizeof (int));
652 dfs_nr
= xcalloc (last_basic_block
, sizeof (int));
653 stack
= xmalloc (nr_edges
* sizeof (int));
655 inner
= sbitmap_alloc (last_basic_block
);
656 sbitmap_ones (inner
);
658 header
= sbitmap_alloc (last_basic_block
);
659 sbitmap_zero (header
);
661 passed
= sbitmap_alloc (nr_edges
);
662 sbitmap_zero (passed
);
664 in_queue
= sbitmap_alloc (last_basic_block
);
665 sbitmap_zero (in_queue
);
667 in_stack
= sbitmap_alloc (last_basic_block
);
668 sbitmap_zero (in_stack
);
670 for (i
= 0; i
< last_basic_block
; i
++)
673 /* DFS traversal to find inner loops in the cfg. */
678 if (current_edge
== 0 || TEST_BIT (passed
, current_edge
))
680 /* We have reached a leaf node or a node that was already
681 processed. Pop edges off the stack until we find
682 an edge that has not yet been processed. */
684 && (current_edge
== 0 || TEST_BIT (passed
, current_edge
)))
686 /* Pop entry off the stack. */
687 current_edge
= stack
[sp
--];
688 node
= FROM_BLOCK (current_edge
);
689 child
= TO_BLOCK (current_edge
);
690 RESET_BIT (in_stack
, child
);
691 if (max_hdr
[child
] >= 0 && TEST_BIT (in_stack
, max_hdr
[child
]))
692 UPDATE_LOOP_RELATIONS (node
, max_hdr
[child
]);
693 current_edge
= NEXT_OUT (current_edge
);
696 /* See if have finished the DFS tree traversal. */
697 if (sp
< 0 && TEST_BIT (passed
, current_edge
))
700 /* Nope, continue the traversal with the popped node. */
704 /* Process a node. */
705 node
= FROM_BLOCK (current_edge
);
706 child
= TO_BLOCK (current_edge
);
707 SET_BIT (in_stack
, node
);
708 dfs_nr
[node
] = ++count
;
710 /* If the successor is in the stack, then we've found a loop.
711 Mark the loop, if it is not a natural loop, then it will
712 be rejected during the second traversal. */
713 if (TEST_BIT (in_stack
, child
))
716 SET_BIT (header
, child
);
717 UPDATE_LOOP_RELATIONS (node
, child
);
718 SET_BIT (passed
, current_edge
);
719 current_edge
= NEXT_OUT (current_edge
);
723 /* If the child was already visited, then there is no need to visit
724 it again. Just update the loop relationships and restart
728 if (max_hdr
[child
] >= 0 && TEST_BIT (in_stack
, max_hdr
[child
]))
729 UPDATE_LOOP_RELATIONS (node
, max_hdr
[child
]);
730 SET_BIT (passed
, current_edge
);
731 current_edge
= NEXT_OUT (current_edge
);
735 /* Push an entry on the stack and continue DFS traversal. */
736 stack
[++sp
] = current_edge
;
737 SET_BIT (passed
, current_edge
);
738 current_edge
= OUT_EDGES (child
);
740 /* This is temporary until haifa is converted to use rth's new
741 cfg routines which have true entry/exit blocks and the
742 appropriate edges from/to those blocks.
744 Generally we update dfs_nr for a node when we process its
745 out edge. However, if the node has no out edge then we will
746 not set dfs_nr for that node. This can confuse the scheduler
747 into thinking that we have unreachable blocks, which in turn
748 disables cross block scheduling.
750 So, if we have a node with no out edges, go ahead and mark it
752 if (current_edge
== 0)
753 dfs_nr
[child
] = ++count
;
756 /* Another check for unreachable blocks. The earlier test in
757 is_cfg_nonregular only finds unreachable blocks that do not
760 The DFS traversal will mark every block that is reachable from
761 the entry node by placing a nonzero value in dfs_nr. Thus if
762 dfs_nr is zero for any block, then it must be unreachable. */
765 if (dfs_nr
[bb
->index
] == 0)
771 /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array
772 to hold degree counts. */
776 degree
[bb
->index
] = 0;
777 for (i
= 0; i
< num_edges
; i
++)
779 edge e
= INDEX_EDGE (edge_list
, i
);
781 if (e
->dest
!= EXIT_BLOCK_PTR
)
782 degree
[e
->dest
->index
]++;
785 /* Do not perform region scheduling if there are any unreachable
794 /* Second traversal:find reducible inner loops and topologically sort
795 block of each region. */
797 queue
= xmalloc (n_basic_blocks
* sizeof (int));
799 /* Find blocks which are inner loop headers. We still have non-reducible
800 loops to consider at this point. */
803 if (TEST_BIT (header
, bb
->index
) && TEST_BIT (inner
, bb
->index
))
808 /* Now check that the loop is reducible. We do this separate
809 from finding inner loops so that we do not find a reducible
810 loop which contains an inner non-reducible loop.
812 A simple way to find reducible/natural loops is to verify
813 that each block in the loop is dominated by the loop
816 If there exists a block that is not dominated by the loop
817 header, then the block is reachable from outside the loop
818 and thus the loop is not a natural loop. */
821 /* First identify blocks in the loop, except for the loop
823 if (bb
->index
== max_hdr
[jbb
->index
] && bb
!= jbb
)
825 /* Now verify that the block is dominated by the loop
827 if (!dominated_by_p (CDI_DOMINATORS
, jbb
, bb
))
832 /* If we exited the loop early, then I is the header of
833 a non-reducible loop and we should quit processing it
835 if (jbb
!= EXIT_BLOCK_PTR
)
838 /* I is a header of an inner loop, or block 0 in a subroutine
839 with no loops at all. */
841 too_large_failure
= 0;
842 loop_head
= max_hdr
[bb
->index
];
844 /* Decrease degree of all I's successors for topological
846 for (e
= bb
->succ
; e
; e
= e
->succ_next
)
847 if (e
->dest
!= EXIT_BLOCK_PTR
)
848 --degree
[e
->dest
->index
];
850 /* Estimate # insns, and count # blocks in the region. */
852 num_insns
= (INSN_LUID (BB_END (bb
))
853 - INSN_LUID (BB_HEAD (bb
)));
855 /* Find all loop latches (blocks with back edges to the loop
856 header) or all the leaf blocks in the cfg has no loops.
858 Place those blocks into the queue. */
862 /* Leaf nodes have only a single successor which must
865 && jbb
->succ
->dest
== EXIT_BLOCK_PTR
866 && jbb
->succ
->succ_next
== NULL
)
868 queue
[++tail
] = jbb
->index
;
869 SET_BIT (in_queue
, jbb
->index
);
871 if (too_large (jbb
->index
, &num_bbs
, &num_insns
))
873 too_large_failure
= 1;
882 for (e
= bb
->pred
; e
; e
= e
->pred_next
)
884 if (e
->src
== ENTRY_BLOCK_PTR
)
887 node
= e
->src
->index
;
889 if (max_hdr
[node
] == loop_head
&& node
!= bb
->index
)
891 /* This is a loop latch. */
892 queue
[++tail
] = node
;
893 SET_BIT (in_queue
, node
);
895 if (too_large (node
, &num_bbs
, &num_insns
))
897 too_large_failure
= 1;
904 /* Now add all the blocks in the loop to the queue.
906 We know the loop is a natural loop; however the algorithm
907 above will not always mark certain blocks as being in the
915 The algorithm in the DFS traversal may not mark B & D as part
916 of the loop (ie they will not have max_hdr set to A).
918 We know they can not be loop latches (else they would have
919 had max_hdr set since they'd have a backedge to a dominator
920 block). So we don't need them on the initial queue.
922 We know they are part of the loop because they are dominated
923 by the loop header and can be reached by a backwards walk of
924 the edges starting with nodes on the initial queue.
926 It is safe and desirable to include those nodes in the
927 loop/scheduling region. To do so we would need to decrease
928 the degree of a node if it is the target of a backedge
929 within the loop itself as the node is placed in the queue.
931 We do not do this because I'm not sure that the actual
932 scheduling code will properly handle this case. ?!? */
934 while (head
< tail
&& !too_large_failure
)
937 child
= queue
[++head
];
939 for (e
= BASIC_BLOCK (child
)->pred
; e
; e
= e
->pred_next
)
941 node
= e
->src
->index
;
943 /* See discussion above about nodes not marked as in
944 this loop during the initial DFS traversal. */
945 if (e
->src
== ENTRY_BLOCK_PTR
946 || max_hdr
[node
] != loop_head
)
951 else if (!TEST_BIT (in_queue
, node
) && node
!= bb
->index
)
953 queue
[++tail
] = node
;
954 SET_BIT (in_queue
, node
);
956 if (too_large (node
, &num_bbs
, &num_insns
))
958 too_large_failure
= 1;
965 if (tail
>= 0 && !too_large_failure
)
967 /* Place the loop header into list of region blocks. */
968 degree
[bb
->index
] = -1;
969 rgn_bb_table
[idx
] = bb
->index
;
970 RGN_NR_BLOCKS (nr_regions
) = num_bbs
;
971 RGN_BLOCKS (nr_regions
) = idx
++;
972 CONTAINING_RGN (bb
->index
) = nr_regions
;
973 BLOCK_TO_BB (bb
->index
) = count
= 0;
975 /* Remove blocks from queue[] when their in degree
976 becomes zero. Repeat until no blocks are left on the
977 list. This produces a topological list of blocks in
984 if (degree
[child
] == 0)
989 rgn_bb_table
[idx
++] = child
;
990 BLOCK_TO_BB (child
) = ++count
;
991 CONTAINING_RGN (child
) = nr_regions
;
992 queue
[head
] = queue
[tail
--];
994 for (e
= BASIC_BLOCK (child
)->succ
;
997 if (e
->dest
!= EXIT_BLOCK_PTR
)
998 --degree
[e
->dest
->index
];
1010 /* Any block that did not end up in a region is placed into a region
1013 if (degree
[bb
->index
] >= 0)
1015 rgn_bb_table
[idx
] = bb
->index
;
1016 RGN_NR_BLOCKS (nr_regions
) = 1;
1017 RGN_BLOCKS (nr_regions
) = idx
++;
1018 CONTAINING_RGN (bb
->index
) = nr_regions
++;
1019 BLOCK_TO_BB (bb
->index
) = 0;
1025 sbitmap_free (passed
);
1026 sbitmap_free (header
);
1027 sbitmap_free (inner
);
1028 sbitmap_free (in_queue
);
1029 sbitmap_free (in_stack
);
1032 /* Functions for regions scheduling information. */
1034 /* Compute dominators, probability, and potential-split-edges of bb.
1035 Assume that these values were already computed for bb's predecessors. */
1038 compute_dom_prob_ps (int bb
)
1040 int nxt_in_edge
, fst_in_edge
, pred
;
1041 int fst_out_edge
, nxt_out_edge
, nr_out_edges
, nr_rgn_out_edges
;
1044 if (IS_RGN_ENTRY (bb
))
1046 SET_BIT (dom
[bb
], 0);
1051 fst_in_edge
= nxt_in_edge
= IN_EDGES (BB_TO_BLOCK (bb
));
1053 /* Initialize dom[bb] to '111..1'. */
1054 sbitmap_ones (dom
[bb
]);
1058 pred
= FROM_BLOCK (nxt_in_edge
);
1059 sbitmap_a_and_b (dom
[bb
], dom
[bb
], dom
[BLOCK_TO_BB (pred
)]);
1060 sbitmap_a_or_b (ancestor_edges
[bb
], ancestor_edges
[bb
], ancestor_edges
[BLOCK_TO_BB (pred
)]);
1062 SET_BIT (ancestor_edges
[bb
], EDGE_TO_BIT (nxt_in_edge
));
1065 nr_rgn_out_edges
= 0;
1066 fst_out_edge
= OUT_EDGES (pred
);
1067 nxt_out_edge
= NEXT_OUT (fst_out_edge
);
1069 sbitmap_a_or_b (pot_split
[bb
], pot_split
[bb
], pot_split
[BLOCK_TO_BB (pred
)]);
1071 SET_BIT (pot_split
[bb
], EDGE_TO_BIT (fst_out_edge
));
1073 /* The successor doesn't belong in the region? */
1074 if (CONTAINING_RGN (TO_BLOCK (fst_out_edge
)) !=
1075 CONTAINING_RGN (BB_TO_BLOCK (bb
)))
1078 while (fst_out_edge
!= nxt_out_edge
)
1081 /* The successor doesn't belong in the region? */
1082 if (CONTAINING_RGN (TO_BLOCK (nxt_out_edge
)) !=
1083 CONTAINING_RGN (BB_TO_BLOCK (bb
)))
1085 SET_BIT (pot_split
[bb
], EDGE_TO_BIT (nxt_out_edge
));
1086 nxt_out_edge
= NEXT_OUT (nxt_out_edge
);
1090 /* Now nr_rgn_out_edges is the number of region-exit edges from
1091 pred, and nr_out_edges will be the number of pred out edges
1092 not leaving the region. */
1093 nr_out_edges
-= nr_rgn_out_edges
;
1094 if (nr_rgn_out_edges
> 0)
1095 prob
[bb
] += 0.9 * prob
[BLOCK_TO_BB (pred
)] / nr_out_edges
;
1097 prob
[bb
] += prob
[BLOCK_TO_BB (pred
)] / nr_out_edges
;
1098 nxt_in_edge
= NEXT_IN (nxt_in_edge
);
1100 while (fst_in_edge
!= nxt_in_edge
);
1102 SET_BIT (dom
[bb
], bb
);
1103 sbitmap_difference (pot_split
[bb
], pot_split
[bb
], ancestor_edges
[bb
]);
1105 if (sched_verbose
>= 2)
1106 fprintf (sched_dump
, ";; bb_prob(%d, %d) = %3d\n", bb
, BB_TO_BLOCK (bb
),
1107 (int) (100.0 * prob
[bb
]));
1110 /* Functions for target info. */
1112 /* Compute in BL the list of split-edges of bb_src relatively to bb_trg.
1113 Note that bb_trg dominates bb_src. */
1116 split_edges (int bb_src
, int bb_trg
, edgelst
*bl
)
1118 sbitmap src
= sbitmap_alloc (pot_split
[bb_src
]->n_bits
);
1119 sbitmap_copy (src
, pot_split
[bb_src
]);
1121 sbitmap_difference (src
, src
, pot_split
[bb_trg
]);
1122 extract_bitlst (src
, bl
);
1126 /* Find the valid candidate-source-blocks for the target block TRG, compute
1127 their probability, and check if they are speculative or not.
1128 For speculative sources, compute their update-blocks and split-blocks. */
1131 compute_trg_info (int trg
)
1135 int check_block
, update_idx
;
1136 int i
, j
, k
, fst_edge
, nxt_edge
;
1138 /* Define some of the fields for the target bb as well. */
1139 sp
= candidate_table
+ trg
;
1141 sp
->is_speculative
= 0;
1144 for (i
= trg
+ 1; i
< current_nr_blocks
; i
++)
1146 sp
= candidate_table
+ i
;
1148 sp
->is_valid
= IS_DOMINATED (i
, trg
);
1151 sp
->src_prob
= GET_SRC_PROB (i
, trg
);
1152 sp
->is_valid
= (sp
->src_prob
>= MIN_PROBABILITY
);
1157 split_edges (i
, trg
, &el
);
1158 sp
->is_speculative
= (el
.nr_members
) ? 1 : 0;
1159 if (sp
->is_speculative
&& !flag_schedule_speculative
)
1165 char *update_blocks
;
1167 /* Compute split blocks and store them in bblst_table.
1168 The TO block of every split edge is a split block. */
1169 sp
->split_bbs
.first_member
= &bblst_table
[bblst_last
];
1170 sp
->split_bbs
.nr_members
= el
.nr_members
;
1171 for (j
= 0; j
< el
.nr_members
; bblst_last
++, j
++)
1172 bblst_table
[bblst_last
] =
1173 TO_BLOCK (rgn_edges
[el
.first_member
[j
]]);
1174 sp
->update_bbs
.first_member
= &bblst_table
[bblst_last
];
1176 /* Compute update blocks and store them in bblst_table.
1177 For every split edge, look at the FROM block, and check
1178 all out edges. For each out edge that is not a split edge,
1179 add the TO block to the update block list. This list can end
1180 up with a lot of duplicates. We need to weed them out to avoid
1181 overrunning the end of the bblst_table. */
1182 update_blocks
= alloca (last_basic_block
);
1183 memset (update_blocks
, 0, last_basic_block
);
1186 for (j
= 0; j
< el
.nr_members
; j
++)
1188 check_block
= FROM_BLOCK (rgn_edges
[el
.first_member
[j
]]);
1189 fst_edge
= nxt_edge
= OUT_EDGES (check_block
);
1192 if (! update_blocks
[TO_BLOCK (nxt_edge
)])
1194 for (k
= 0; k
< el
.nr_members
; k
++)
1195 if (EDGE_TO_BIT (nxt_edge
) == el
.first_member
[k
])
1198 if (k
>= el
.nr_members
)
1200 bblst_table
[bblst_last
++] = TO_BLOCK (nxt_edge
);
1201 update_blocks
[TO_BLOCK (nxt_edge
)] = 1;
1206 nxt_edge
= NEXT_OUT (nxt_edge
);
1208 while (fst_edge
!= nxt_edge
);
1210 sp
->update_bbs
.nr_members
= update_idx
;
1212 /* Make sure we didn't overrun the end of bblst_table. */
1213 if (bblst_last
> bblst_size
)
1218 sp
->split_bbs
.nr_members
= sp
->update_bbs
.nr_members
= 0;
1220 sp
->is_speculative
= 0;
1226 /* Print candidates info, for debugging purposes. Callable from debugger. */
1229 debug_candidate (int i
)
1231 if (!candidate_table
[i
].is_valid
)
1234 if (candidate_table
[i
].is_speculative
)
1237 fprintf (sched_dump
, "src b %d bb %d speculative \n", BB_TO_BLOCK (i
), i
);
1239 fprintf (sched_dump
, "split path: ");
1240 for (j
= 0; j
< candidate_table
[i
].split_bbs
.nr_members
; j
++)
1242 int b
= candidate_table
[i
].split_bbs
.first_member
[j
];
1244 fprintf (sched_dump
, " %d ", b
);
1246 fprintf (sched_dump
, "\n");
1248 fprintf (sched_dump
, "update path: ");
1249 for (j
= 0; j
< candidate_table
[i
].update_bbs
.nr_members
; j
++)
1251 int b
= candidate_table
[i
].update_bbs
.first_member
[j
];
1253 fprintf (sched_dump
, " %d ", b
);
1255 fprintf (sched_dump
, "\n");
1259 fprintf (sched_dump
, " src %d equivalent\n", BB_TO_BLOCK (i
));
1263 /* Print candidates info, for debugging purposes. Callable from debugger. */
1266 debug_candidates (int trg
)
1270 fprintf (sched_dump
, "----------- candidate table: target: b=%d bb=%d ---\n",
1271 BB_TO_BLOCK (trg
), trg
);
1272 for (i
= trg
+ 1; i
< current_nr_blocks
; i
++)
1273 debug_candidate (i
);
1276 /* Functions for speculative scheduling. */
1278 /* Return 0 if x is a set of a register alive in the beginning of one
1279 of the split-blocks of src, otherwise return 1. */
1282 check_live_1 (int src
, rtx x
)
1286 rtx reg
= SET_DEST (x
);
1291 while (GET_CODE (reg
) == SUBREG
|| GET_CODE (reg
) == ZERO_EXTRACT
1292 || GET_CODE (reg
) == SIGN_EXTRACT
1293 || GET_CODE (reg
) == STRICT_LOW_PART
)
1294 reg
= XEXP (reg
, 0);
1296 if (GET_CODE (reg
) == PARALLEL
)
1300 for (i
= XVECLEN (reg
, 0) - 1; i
>= 0; i
--)
1301 if (XEXP (XVECEXP (reg
, 0, i
), 0) != 0)
1302 if (check_live_1 (src
, XEXP (XVECEXP (reg
, 0, i
), 0)))
1308 if (GET_CODE (reg
) != REG
)
1311 regno
= REGNO (reg
);
1313 if (regno
< FIRST_PSEUDO_REGISTER
&& global_regs
[regno
])
1315 /* Global registers are assumed live. */
1320 if (regno
< FIRST_PSEUDO_REGISTER
)
1322 /* Check for hard registers. */
1323 int j
= hard_regno_nregs
[regno
][GET_MODE (reg
)];
1326 for (i
= 0; i
< candidate_table
[src
].split_bbs
.nr_members
; i
++)
1328 int b
= candidate_table
[src
].split_bbs
.first_member
[i
];
1330 if (REGNO_REG_SET_P (BASIC_BLOCK (b
)->global_live_at_start
,
1340 /* Check for pseudo registers. */
1341 for (i
= 0; i
< candidate_table
[src
].split_bbs
.nr_members
; i
++)
1343 int b
= candidate_table
[src
].split_bbs
.first_member
[i
];
1345 if (REGNO_REG_SET_P (BASIC_BLOCK (b
)->global_live_at_start
, regno
))
1356 /* If x is a set of a register R, mark that R is alive in the beginning
1357 of every update-block of src. */
1360 update_live_1 (int src
, rtx x
)
1364 rtx reg
= SET_DEST (x
);
1369 while (GET_CODE (reg
) == SUBREG
|| GET_CODE (reg
) == ZERO_EXTRACT
1370 || GET_CODE (reg
) == SIGN_EXTRACT
1371 || GET_CODE (reg
) == STRICT_LOW_PART
)
1372 reg
= XEXP (reg
, 0);
1374 if (GET_CODE (reg
) == PARALLEL
)
1378 for (i
= XVECLEN (reg
, 0) - 1; i
>= 0; i
--)
1379 if (XEXP (XVECEXP (reg
, 0, i
), 0) != 0)
1380 update_live_1 (src
, XEXP (XVECEXP (reg
, 0, i
), 0));
1385 if (GET_CODE (reg
) != REG
)
1388 /* Global registers are always live, so the code below does not apply
1391 regno
= REGNO (reg
);
1393 if (regno
>= FIRST_PSEUDO_REGISTER
|| !global_regs
[regno
])
1395 if (regno
< FIRST_PSEUDO_REGISTER
)
1397 int j
= hard_regno_nregs
[regno
][GET_MODE (reg
)];
1400 for (i
= 0; i
< candidate_table
[src
].update_bbs
.nr_members
; i
++)
1402 int b
= candidate_table
[src
].update_bbs
.first_member
[i
];
1404 SET_REGNO_REG_SET (BASIC_BLOCK (b
)->global_live_at_start
,
1411 for (i
= 0; i
< candidate_table
[src
].update_bbs
.nr_members
; i
++)
1413 int b
= candidate_table
[src
].update_bbs
.first_member
[i
];
1415 SET_REGNO_REG_SET (BASIC_BLOCK (b
)->global_live_at_start
, regno
);
1421 /* Return 1 if insn can be speculatively moved from block src to trg,
1422 otherwise return 0. Called before first insertion of insn to
1423 ready-list or before the scheduling. */
1426 check_live (rtx insn
, int src
)
1428 /* Find the registers set by instruction. */
1429 if (GET_CODE (PATTERN (insn
)) == SET
1430 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
1431 return check_live_1 (src
, PATTERN (insn
));
1432 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1435 for (j
= XVECLEN (PATTERN (insn
), 0) - 1; j
>= 0; j
--)
1436 if ((GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == SET
1437 || GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == CLOBBER
)
1438 && !check_live_1 (src
, XVECEXP (PATTERN (insn
), 0, j
)))
1447 /* Update the live registers info after insn was moved speculatively from
1448 block src to trg. */
1451 update_live (rtx insn
, int src
)
1453 /* Find the registers set by instruction. */
1454 if (GET_CODE (PATTERN (insn
)) == SET
1455 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
1456 update_live_1 (src
, PATTERN (insn
));
1457 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
1460 for (j
= XVECLEN (PATTERN (insn
), 0) - 1; j
>= 0; j
--)
1461 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == SET
1462 || GET_CODE (XVECEXP (PATTERN (insn
), 0, j
)) == CLOBBER
)
1463 update_live_1 (src
, XVECEXP (PATTERN (insn
), 0, j
));
1467 /* Nonzero if block bb_to is equal to, or reachable from block bb_from. */
1468 #define IS_REACHABLE(bb_from, bb_to) \
1470 || IS_RGN_ENTRY (bb_from) \
1471 || (TEST_BIT (ancestor_edges[bb_to], \
1472 EDGE_TO_BIT (IN_EDGES (BB_TO_BLOCK (bb_from))))))
1474 /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */
1477 set_spec_fed (rtx load_insn
)
1481 for (link
= INSN_DEPEND (load_insn
); link
; link
= XEXP (link
, 1))
1482 if (GET_MODE (link
) == VOIDmode
)
1483 FED_BY_SPEC_LOAD (XEXP (link
, 0)) = 1;
1484 } /* set_spec_fed */
1486 /* On the path from the insn to load_insn_bb, find a conditional
1487 branch depending on insn, that guards the speculative load. */
1490 find_conditional_protection (rtx insn
, int load_insn_bb
)
1494 /* Iterate through DEF-USE forward dependences. */
1495 for (link
= INSN_DEPEND (insn
); link
; link
= XEXP (link
, 1))
1497 rtx next
= XEXP (link
, 0);
1498 if ((CONTAINING_RGN (BLOCK_NUM (next
)) ==
1499 CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb
)))
1500 && IS_REACHABLE (INSN_BB (next
), load_insn_bb
)
1501 && load_insn_bb
!= INSN_BB (next
)
1502 && GET_MODE (link
) == VOIDmode
1503 && (GET_CODE (next
) == JUMP_INSN
1504 || find_conditional_protection (next
, load_insn_bb
)))
1508 } /* find_conditional_protection */
1510 /* Returns 1 if the same insn1 that participates in the computation
1511 of load_insn's address is feeding a conditional branch that is
1512 guarding on load_insn. This is true if we find a the two DEF-USE
1514 insn1 -> ... -> conditional-branch
1515 insn1 -> ... -> load_insn,
1516 and if a flow path exist:
1517 insn1 -> ... -> conditional-branch -> ... -> load_insn,
1518 and if insn1 is on the path
1519 region-entry -> ... -> bb_trg -> ... load_insn.
1521 Locate insn1 by climbing on LOG_LINKS from load_insn.
1522 Locate the branch by following INSN_DEPEND from insn1. */
1525 is_conditionally_protected (rtx load_insn
, int bb_src
, int bb_trg
)
1529 for (link
= LOG_LINKS (load_insn
); link
; link
= XEXP (link
, 1))
1531 rtx insn1
= XEXP (link
, 0);
1533 /* Must be a DEF-USE dependence upon non-branch. */
1534 if (GET_MODE (link
) != VOIDmode
1535 || GET_CODE (insn1
) == JUMP_INSN
)
1538 /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */
1539 if (INSN_BB (insn1
) == bb_src
1540 || (CONTAINING_RGN (BLOCK_NUM (insn1
))
1541 != CONTAINING_RGN (BB_TO_BLOCK (bb_src
)))
1542 || (!IS_REACHABLE (bb_trg
, INSN_BB (insn1
))
1543 && !IS_REACHABLE (INSN_BB (insn1
), bb_trg
)))
1546 /* Now search for the conditional-branch. */
1547 if (find_conditional_protection (insn1
, bb_src
))
1550 /* Recursive step: search another insn1, "above" current insn1. */
1551 return is_conditionally_protected (insn1
, bb_src
, bb_trg
);
1554 /* The chain does not exist. */
1556 } /* is_conditionally_protected */
1558 /* Returns 1 if a clue for "similar load" 'insn2' is found, and hence
1559 load_insn can move speculatively from bb_src to bb_trg. All the
1560 following must hold:
1562 (1) both loads have 1 base register (PFREE_CANDIDATEs).
1563 (2) load_insn and load1 have a def-use dependence upon
1564 the same insn 'insn1'.
1565 (3) either load2 is in bb_trg, or:
1566 - there's only one split-block, and
1567 - load1 is on the escape path, and
1569 From all these we can conclude that the two loads access memory
1570 addresses that differ at most by a constant, and hence if moving
1571 load_insn would cause an exception, it would have been caused by
1575 is_pfree (rtx load_insn
, int bb_src
, int bb_trg
)
1578 candidate
*candp
= candidate_table
+ bb_src
;
1580 if (candp
->split_bbs
.nr_members
!= 1)
1581 /* Must have exactly one escape block. */
1584 for (back_link
= LOG_LINKS (load_insn
);
1585 back_link
; back_link
= XEXP (back_link
, 1))
1587 rtx insn1
= XEXP (back_link
, 0);
1589 if (GET_MODE (back_link
) == VOIDmode
)
1591 /* Found a DEF-USE dependence (insn1, load_insn). */
1594 for (fore_link
= INSN_DEPEND (insn1
);
1595 fore_link
; fore_link
= XEXP (fore_link
, 1))
1597 rtx insn2
= XEXP (fore_link
, 0);
1598 if (GET_MODE (fore_link
) == VOIDmode
)
1600 /* Found a DEF-USE dependence (insn1, insn2). */
1601 if (haifa_classify_insn (insn2
) != PFREE_CANDIDATE
)
1602 /* insn2 not guaranteed to be a 1 base reg load. */
1605 if (INSN_BB (insn2
) == bb_trg
)
1606 /* insn2 is the similar load, in the target block. */
1609 if (*(candp
->split_bbs
.first_member
) == BLOCK_NUM (insn2
))
1610 /* insn2 is a similar load, in a split-block. */
1617 /* Couldn't find a similar load. */
1621 /* Return 1 if load_insn is prisky (i.e. if load_insn is fed by
1622 a load moved speculatively, or if load_insn is protected by
1623 a compare on load_insn's address). */
1626 is_prisky (rtx load_insn
, int bb_src
, int bb_trg
)
1628 if (FED_BY_SPEC_LOAD (load_insn
))
1631 if (LOG_LINKS (load_insn
) == NULL
)
1632 /* Dependence may 'hide' out of the region. */
1635 if (is_conditionally_protected (load_insn
, bb_src
, bb_trg
))
1641 /* Insn is a candidate to be moved speculatively from bb_src to bb_trg.
1642 Return 1 if insn is exception-free (and the motion is valid)
1646 is_exception_free (rtx insn
, int bb_src
, int bb_trg
)
1648 int insn_class
= haifa_classify_insn (insn
);
1650 /* Handle non-load insns. */
1661 if (!flag_schedule_speculative_load
)
1663 IS_LOAD_INSN (insn
) = 1;
1670 case PFREE_CANDIDATE
:
1671 if (is_pfree (insn
, bb_src
, bb_trg
))
1673 /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */
1674 case PRISKY_CANDIDATE
:
1675 if (!flag_schedule_speculative_load_dangerous
1676 || is_prisky (insn
, bb_src
, bb_trg
))
1682 return flag_schedule_speculative_load_dangerous
;
1685 /* The number of insns from the current block scheduled so far. */
1686 static int sched_target_n_insns
;
1687 /* The number of insns from the current block to be scheduled in total. */
1688 static int target_n_insns
;
1689 /* The number of insns from the entire region scheduled so far. */
1690 static int sched_n_insns
;
1691 /* Nonzero if the last scheduled insn was a jump. */
1692 static int last_was_jump
;
1694 /* Implementations of the sched_info functions for region scheduling. */
1695 static void init_ready_list (struct ready_list
*);
1696 static int can_schedule_ready_p (rtx
);
1697 static int new_ready (rtx
);
1698 static int schedule_more_p (void);
1699 static const char *rgn_print_insn (rtx
, int);
1700 static int rgn_rank (rtx
, rtx
);
1701 static int contributes_to_priority (rtx
, rtx
);
1702 static void compute_jump_reg_dependencies (rtx
, regset
, regset
, regset
);
1704 /* Return nonzero if there are more insns that should be scheduled. */
1707 schedule_more_p (void)
1709 return ! last_was_jump
&& sched_target_n_insns
< target_n_insns
;
1712 /* Add all insns that are initially ready to the ready list READY. Called
1713 once before scheduling a set of insns. */
1716 init_ready_list (struct ready_list
*ready
)
1718 rtx prev_head
= current_sched_info
->prev_head
;
1719 rtx next_tail
= current_sched_info
->next_tail
;
1724 sched_target_n_insns
= 0;
1728 /* Print debugging information. */
1729 if (sched_verbose
>= 5)
1730 debug_dependencies ();
1732 /* Prepare current target block info. */
1733 if (current_nr_blocks
> 1)
1735 candidate_table
= xmalloc (current_nr_blocks
* sizeof (candidate
));
1738 /* bblst_table holds split blocks and update blocks for each block after
1739 the current one in the region. split blocks and update blocks are
1740 the TO blocks of region edges, so there can be at most rgn_nr_edges
1742 bblst_size
= (current_nr_blocks
- target_bb
) * rgn_nr_edges
;
1743 bblst_table
= xmalloc (bblst_size
* sizeof (int));
1745 bitlst_table_last
= 0;
1746 bitlst_table
= xmalloc (rgn_nr_edges
* sizeof (int));
1748 compute_trg_info (target_bb
);
1751 /* Initialize ready list with all 'ready' insns in target block.
1752 Count number of insns in the target block being scheduled. */
1753 for (insn
= NEXT_INSN (prev_head
); insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1755 if (INSN_DEP_COUNT (insn
) == 0)
1757 ready_add (ready
, insn
);
1759 if (targetm
.sched
.adjust_priority
)
1760 INSN_PRIORITY (insn
) =
1761 targetm
.sched
.adjust_priority (insn
, INSN_PRIORITY (insn
));
1766 /* Add to ready list all 'ready' insns in valid source blocks.
1767 For speculative insns, check-live, exception-free, and
1769 for (bb_src
= target_bb
+ 1; bb_src
< current_nr_blocks
; bb_src
++)
1770 if (IS_VALID (bb_src
))
1776 get_block_head_tail (BB_TO_BLOCK (bb_src
), &head
, &tail
);
1777 src_next_tail
= NEXT_INSN (tail
);
1780 for (insn
= src_head
; insn
!= src_next_tail
; insn
= NEXT_INSN (insn
))
1782 if (! INSN_P (insn
))
1785 if (!CANT_MOVE (insn
)
1786 && (!IS_SPECULATIVE_INSN (insn
)
1787 || ((((!targetm
.sched
.use_dfa_pipeline_interface
1788 || !targetm
.sched
.use_dfa_pipeline_interface ())
1789 && insn_issue_delay (insn
) <= 3)
1790 || (targetm
.sched
.use_dfa_pipeline_interface
1791 && targetm
.sched
.use_dfa_pipeline_interface ()
1792 && (recog_memoized (insn
) < 0
1793 || min_insn_conflict_delay (curr_state
,
1795 && check_live (insn
, bb_src
)
1796 && is_exception_free (insn
, bb_src
, target_bb
))))
1797 if (INSN_DEP_COUNT (insn
) == 0)
1799 ready_add (ready
, insn
);
1801 if (targetm
.sched
.adjust_priority
)
1802 INSN_PRIORITY (insn
) =
1803 targetm
.sched
.adjust_priority (insn
, INSN_PRIORITY (insn
));
1809 /* Called after taking INSN from the ready list. Returns nonzero if this
1810 insn can be scheduled, nonzero if we should silently discard it. */
1813 can_schedule_ready_p (rtx insn
)
1815 if (GET_CODE (insn
) == JUMP_INSN
)
1818 /* An interblock motion? */
1819 if (INSN_BB (insn
) != target_bb
)
1823 if (IS_SPECULATIVE_INSN (insn
))
1825 if (!check_live (insn
, INSN_BB (insn
)))
1827 update_live (insn
, INSN_BB (insn
));
1829 /* For speculative load, mark insns fed by it. */
1830 if (IS_LOAD_INSN (insn
) || FED_BY_SPEC_LOAD (insn
))
1831 set_spec_fed (insn
);
1837 /* Update source block boundaries. */
1838 b1
= BLOCK_FOR_INSN (insn
);
1839 if (insn
== BB_HEAD (b1
) && insn
== BB_END (b1
))
1841 /* We moved all the insns in the basic block.
1842 Emit a note after the last insn and update the
1843 begin/end boundaries to point to the note. */
1844 rtx note
= emit_note_after (NOTE_INSN_DELETED
, insn
);
1845 BB_HEAD (b1
) = note
;
1848 else if (insn
== BB_END (b1
))
1850 /* We took insns from the end of the basic block,
1851 so update the end of block boundary so that it
1852 points to the first insn we did not move. */
1853 BB_END (b1
) = PREV_INSN (insn
);
1855 else if (insn
== BB_HEAD (b1
))
1857 /* We took insns from the start of the basic block,
1858 so update the start of block boundary so that
1859 it points to the first insn we did not move. */
1860 BB_HEAD (b1
) = NEXT_INSN (insn
);
1865 /* In block motion. */
1866 sched_target_n_insns
++;
1873 /* Called after INSN has all its dependencies resolved. Return nonzero
1874 if it should be moved to the ready list or the queue, or zero if we
1875 should silently discard it. */
1877 new_ready (rtx next
)
1879 /* For speculative insns, before inserting to ready/queue,
1880 check live, exception-free, and issue-delay. */
1881 if (INSN_BB (next
) != target_bb
1882 && (!IS_VALID (INSN_BB (next
))
1884 || (IS_SPECULATIVE_INSN (next
)
1886 || (targetm
.sched
.use_dfa_pipeline_interface
1887 && targetm
.sched
.use_dfa_pipeline_interface ()
1888 && recog_memoized (next
) >= 0
1889 && min_insn_conflict_delay (curr_state
, next
,
1891 || ((!targetm
.sched
.use_dfa_pipeline_interface
1892 || !targetm
.sched
.use_dfa_pipeline_interface ())
1893 && insn_issue_delay (next
) > 3)
1894 || !check_live (next
, INSN_BB (next
))
1895 || !is_exception_free (next
, INSN_BB (next
), target_bb
)))))
1900 /* Return a string that contains the insn uid and optionally anything else
1901 necessary to identify this insn in an output. It's valid to use a
1902 static buffer for this. The ALIGNED parameter should cause the string
1903 to be formatted so that multiple output lines will line up nicely. */
1906 rgn_print_insn (rtx insn
, int aligned
)
1908 static char tmp
[80];
1911 sprintf (tmp
, "b%3d: i%4d", INSN_BB (insn
), INSN_UID (insn
));
1914 if (current_nr_blocks
> 1 && INSN_BB (insn
) != target_bb
)
1915 sprintf (tmp
, "%d/b%d", INSN_UID (insn
), INSN_BB (insn
));
1917 sprintf (tmp
, "%d", INSN_UID (insn
));
1922 /* Compare priority of two insns. Return a positive number if the second
1923 insn is to be preferred for scheduling, and a negative one if the first
1924 is to be preferred. Zero if they are equally good. */
1927 rgn_rank (rtx insn1
, rtx insn2
)
1929 /* Some comparison make sense in interblock scheduling only. */
1930 if (INSN_BB (insn1
) != INSN_BB (insn2
))
1932 int spec_val
, prob_val
;
1934 /* Prefer an inblock motion on an interblock motion. */
1935 if ((INSN_BB (insn2
) == target_bb
) && (INSN_BB (insn1
) != target_bb
))
1937 if ((INSN_BB (insn1
) == target_bb
) && (INSN_BB (insn2
) != target_bb
))
1940 /* Prefer a useful motion on a speculative one. */
1941 spec_val
= IS_SPECULATIVE_INSN (insn1
) - IS_SPECULATIVE_INSN (insn2
);
1945 /* Prefer a more probable (speculative) insn. */
1946 prob_val
= INSN_PROBABILITY (insn2
) - INSN_PROBABILITY (insn1
);
1953 /* NEXT is an instruction that depends on INSN (a backward dependence);
1954 return nonzero if we should include this dependence in priority
1958 contributes_to_priority (rtx next
, rtx insn
)
1960 return BLOCK_NUM (next
) == BLOCK_NUM (insn
);
1963 /* INSN is a JUMP_INSN, COND_SET is the set of registers that are
1964 conditionally set before INSN. Store the set of registers that
1965 must be considered as used by this jump in USED and that of
1966 registers that must be considered as set in SET. */
1969 compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED
,
1970 regset cond_exec ATTRIBUTE_UNUSED
,
1971 regset used ATTRIBUTE_UNUSED
,
1972 regset set ATTRIBUTE_UNUSED
)
1974 /* Nothing to do here, since we postprocess jumps in
1975 add_branch_dependences. */
1978 /* Used in schedule_insns to initialize current_sched_info for scheduling
1979 regions (or single basic blocks). */
1981 static struct sched_info region_sched_info
=
1984 can_schedule_ready_p
,
1989 contributes_to_priority
,
1990 compute_jump_reg_dependencies
,
1997 /* Determine if PAT sets a CLASS_LIKELY_SPILLED_P register. */
2000 sets_likely_spilled (rtx pat
)
2003 note_stores (pat
, sets_likely_spilled_1
, &ret
);
2008 sets_likely_spilled_1 (rtx x
, rtx pat
, void *data
)
2010 bool *ret
= (bool *) data
;
2012 if (GET_CODE (pat
) == SET
2014 && REGNO (x
) < FIRST_PSEUDO_REGISTER
2015 && CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (REGNO (x
))))
2019 /* Add dependences so that branches are scheduled to run last in their
2023 add_branch_dependences (rtx head
, rtx tail
)
2027 /* For all branches, calls, uses, clobbers, cc0 setters, and instructions
2028 that can throw exceptions, force them to remain in order at the end of
2029 the block by adding dependencies and giving the last a high priority.
2030 There may be notes present, and prev_head may also be a note.
2032 Branches must obviously remain at the end. Calls should remain at the
2033 end since moving them results in worse register allocation. Uses remain
2034 at the end to ensure proper register allocation.
2036 cc0 setters remain at the end because they can't be moved away from
2039 Insns setting CLASS_LIKELY_SPILLED_P registers (usually return values)
2040 are not moved before reload because we can wind up with register
2041 allocation failures. */
2045 while (GET_CODE (insn
) == CALL_INSN
2046 || GET_CODE (insn
) == JUMP_INSN
2047 || (GET_CODE (insn
) == INSN
2048 && (GET_CODE (PATTERN (insn
)) == USE
2049 || GET_CODE (PATTERN (insn
)) == CLOBBER
2050 || can_throw_internal (insn
)
2052 || sets_cc0_p (PATTERN (insn
))
2054 || (!reload_completed
2055 && sets_likely_spilled (PATTERN (insn
)))))
2056 || GET_CODE (insn
) == NOTE
)
2058 if (GET_CODE (insn
) != NOTE
)
2060 if (last
!= 0 && !find_insn_list (insn
, LOG_LINKS (last
)))
2062 add_dependence (last
, insn
, REG_DEP_ANTI
);
2063 INSN_REF_COUNT (insn
)++;
2066 CANT_MOVE (insn
) = 1;
2071 /* Don't overrun the bounds of the basic block. */
2075 insn
= PREV_INSN (insn
);
2078 /* Make sure these insns are scheduled last in their block. */
2081 while (insn
!= head
)
2083 insn
= prev_nonnote_insn (insn
);
2085 if (INSN_REF_COUNT (insn
) != 0)
2088 add_dependence (last
, insn
, REG_DEP_ANTI
);
2089 INSN_REF_COUNT (insn
) = 1;
2093 /* Data structures for the computation of data dependences in a regions. We
2094 keep one `deps' structure for every basic block. Before analyzing the
2095 data dependences for a bb, its variables are initialized as a function of
2096 the variables of its predecessors. When the analysis for a bb completes,
2097 we save the contents to the corresponding bb_deps[bb] variable. */
2099 static struct deps
*bb_deps
;
2101 /* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */
2104 concat_INSN_LIST (rtx copy
, rtx old
)
2107 for (; copy
; copy
= XEXP (copy
, 1))
2108 new = alloc_INSN_LIST (XEXP (copy
, 0), new);
2113 concat_insn_mem_list (rtx copy_insns
, rtx copy_mems
, rtx
*old_insns_p
,
2116 rtx new_insns
= *old_insns_p
;
2117 rtx new_mems
= *old_mems_p
;
2121 new_insns
= alloc_INSN_LIST (XEXP (copy_insns
, 0), new_insns
);
2122 new_mems
= alloc_EXPR_LIST (VOIDmode
, XEXP (copy_mems
, 0), new_mems
);
2123 copy_insns
= XEXP (copy_insns
, 1);
2124 copy_mems
= XEXP (copy_mems
, 1);
2127 *old_insns_p
= new_insns
;
2128 *old_mems_p
= new_mems
;
2131 /* After computing the dependencies for block BB, propagate the dependencies
2132 found in TMP_DEPS to the successors of the block. */
2134 propagate_deps (int bb
, struct deps
*pred_deps
)
2136 int b
= BB_TO_BLOCK (bb
);
2139 /* bb's structures are inherited by its successors. */
2140 first_edge
= e
= OUT_EDGES (b
);
2144 int b_succ
= TO_BLOCK (e
);
2145 int bb_succ
= BLOCK_TO_BB (b_succ
);
2146 struct deps
*succ_deps
= bb_deps
+ bb_succ
;
2149 /* Only bbs "below" bb, in the same region, are interesting. */
2150 if (CONTAINING_RGN (b
) != CONTAINING_RGN (b_succ
)
2157 /* The reg_last lists are inherited by bb_succ. */
2158 EXECUTE_IF_SET_IN_REG_SET (&pred_deps
->reg_last_in_use
, 0, reg
,
2160 struct deps_reg
*pred_rl
= &pred_deps
->reg_last
[reg
];
2161 struct deps_reg
*succ_rl
= &succ_deps
->reg_last
[reg
];
2163 succ_rl
->uses
= concat_INSN_LIST (pred_rl
->uses
, succ_rl
->uses
);
2164 succ_rl
->sets
= concat_INSN_LIST (pred_rl
->sets
, succ_rl
->sets
);
2165 succ_rl
->clobbers
= concat_INSN_LIST (pred_rl
->clobbers
,
2167 succ_rl
->uses_length
+= pred_rl
->uses_length
;
2168 succ_rl
->clobbers_length
+= pred_rl
->clobbers_length
;
2170 IOR_REG_SET (&succ_deps
->reg_last_in_use
, &pred_deps
->reg_last_in_use
);
2172 /* Mem read/write lists are inherited by bb_succ. */
2173 concat_insn_mem_list (pred_deps
->pending_read_insns
,
2174 pred_deps
->pending_read_mems
,
2175 &succ_deps
->pending_read_insns
,
2176 &succ_deps
->pending_read_mems
);
2177 concat_insn_mem_list (pred_deps
->pending_write_insns
,
2178 pred_deps
->pending_write_mems
,
2179 &succ_deps
->pending_write_insns
,
2180 &succ_deps
->pending_write_mems
);
2182 succ_deps
->last_pending_memory_flush
2183 = concat_INSN_LIST (pred_deps
->last_pending_memory_flush
,
2184 succ_deps
->last_pending_memory_flush
);
2186 succ_deps
->pending_lists_length
+= pred_deps
->pending_lists_length
;
2187 succ_deps
->pending_flush_length
+= pred_deps
->pending_flush_length
;
2189 /* last_function_call is inherited by bb_succ. */
2190 succ_deps
->last_function_call
2191 = concat_INSN_LIST (pred_deps
->last_function_call
,
2192 succ_deps
->last_function_call
);
2194 /* sched_before_next_call is inherited by bb_succ. */
2195 succ_deps
->sched_before_next_call
2196 = concat_INSN_LIST (pred_deps
->sched_before_next_call
,
2197 succ_deps
->sched_before_next_call
);
2201 while (e
!= first_edge
);
2203 /* These lists should point to the right place, for correct
2205 bb_deps
[bb
].pending_read_insns
= pred_deps
->pending_read_insns
;
2206 bb_deps
[bb
].pending_read_mems
= pred_deps
->pending_read_mems
;
2207 bb_deps
[bb
].pending_write_insns
= pred_deps
->pending_write_insns
;
2208 bb_deps
[bb
].pending_write_mems
= pred_deps
->pending_write_mems
;
2210 /* Can't allow these to be freed twice. */
2211 pred_deps
->pending_read_insns
= 0;
2212 pred_deps
->pending_read_mems
= 0;
2213 pred_deps
->pending_write_insns
= 0;
2214 pred_deps
->pending_write_mems
= 0;
2217 /* Compute backward dependences inside bb. In a multiple blocks region:
2218 (1) a bb is analyzed after its predecessors, and (2) the lists in
2219 effect at the end of bb (after analyzing for bb) are inherited by
2222 Specifically for reg-reg data dependences, the block insns are
2223 scanned by sched_analyze () top-to-bottom. Two lists are
2224 maintained by sched_analyze (): reg_last[].sets for register DEFs,
2225 and reg_last[].uses for register USEs.
2227 When analysis is completed for bb, we update for its successors:
2228 ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb])
2229 ; - USES[succ] = Union (USES [succ], DEFS [bb])
2231 The mechanism for computing mem-mem data dependence is very
2232 similar, and the result is interblock dependences in the region. */
2235 compute_block_backward_dependences (int bb
)
2238 struct deps tmp_deps
;
2240 tmp_deps
= bb_deps
[bb
];
2242 /* Do the analysis for this block. */
2243 get_block_head_tail (BB_TO_BLOCK (bb
), &head
, &tail
);
2244 sched_analyze (&tmp_deps
, head
, tail
);
2245 add_branch_dependences (head
, tail
);
2247 if (current_nr_blocks
> 1)
2248 propagate_deps (bb
, &tmp_deps
);
2250 /* Free up the INSN_LISTs. */
2251 free_deps (&tmp_deps
);
2254 /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add
2255 them to the unused_*_list variables, so that they can be reused. */
2258 free_pending_lists (void)
2262 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2264 free_INSN_LIST_list (&bb_deps
[bb
].pending_read_insns
);
2265 free_INSN_LIST_list (&bb_deps
[bb
].pending_write_insns
);
2266 free_EXPR_LIST_list (&bb_deps
[bb
].pending_read_mems
);
2267 free_EXPR_LIST_list (&bb_deps
[bb
].pending_write_mems
);
2271 /* Print dependences for debugging, callable from debugger. */
2274 debug_dependencies (void)
2278 fprintf (sched_dump
, ";; --------------- forward dependences: ------------ \n");
2279 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2287 get_block_head_tail (BB_TO_BLOCK (bb
), &head
, &tail
);
2288 next_tail
= NEXT_INSN (tail
);
2289 fprintf (sched_dump
, "\n;; --- Region Dependences --- b %d bb %d \n",
2290 BB_TO_BLOCK (bb
), bb
);
2292 if (targetm
.sched
.use_dfa_pipeline_interface
2293 && targetm
.sched
.use_dfa_pipeline_interface ())
2295 fprintf (sched_dump
, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2296 "insn", "code", "bb", "dep", "prio", "cost",
2298 fprintf (sched_dump
, ";; %7s%6s%6s%6s%6s%6s%14s\n",
2299 "----", "----", "--", "---", "----", "----",
2304 fprintf (sched_dump
, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
2305 "insn", "code", "bb", "dep", "prio", "cost", "blockage", "units");
2306 fprintf (sched_dump
, ";; %7s%6s%6s%6s%6s%6s%11s%6s\n",
2307 "----", "----", "--", "---", "----", "----", "--------", "-----");
2310 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
2314 if (! INSN_P (insn
))
2317 fprintf (sched_dump
, ";; %6d ", INSN_UID (insn
));
2318 if (GET_CODE (insn
) == NOTE
)
2320 n
= NOTE_LINE_NUMBER (insn
);
2322 fprintf (sched_dump
, "%s\n", GET_NOTE_INSN_NAME (n
));
2324 fprintf (sched_dump
, "line %d, file %s\n", n
,
2325 NOTE_SOURCE_FILE (insn
));
2328 fprintf (sched_dump
, " {%s}\n", GET_RTX_NAME (GET_CODE (insn
)));
2332 if (targetm
.sched
.use_dfa_pipeline_interface
2333 && targetm
.sched
.use_dfa_pipeline_interface ())
2335 fprintf (sched_dump
,
2336 ";; %s%5d%6d%6d%6d%6d%6d ",
2337 (SCHED_GROUP_P (insn
) ? "+" : " "),
2341 INSN_DEP_COUNT (insn
),
2342 INSN_PRIORITY (insn
),
2343 insn_cost (insn
, 0, 0));
2345 if (recog_memoized (insn
) < 0)
2346 fprintf (sched_dump
, "nothing");
2348 print_reservation (sched_dump
, insn
);
2352 int unit
= insn_unit (insn
);
2355 || function_units
[unit
].blockage_range_function
== 0
2357 : function_units
[unit
].blockage_range_function (insn
));
2358 fprintf (sched_dump
,
2359 ";; %s%5d%6d%6d%6d%6d%6d %3d -%3d ",
2360 (SCHED_GROUP_P (insn
) ? "+" : " "),
2364 INSN_DEP_COUNT (insn
),
2365 INSN_PRIORITY (insn
),
2366 insn_cost (insn
, 0, 0),
2367 (int) MIN_BLOCKAGE_COST (range
),
2368 (int) MAX_BLOCKAGE_COST (range
));
2369 insn_print_units (insn
);
2372 fprintf (sched_dump
, "\t: ");
2373 for (link
= INSN_DEPEND (insn
); link
; link
= XEXP (link
, 1))
2374 fprintf (sched_dump
, "%d ", INSN_UID (XEXP (link
, 0)));
2375 fprintf (sched_dump
, "\n");
2379 fprintf (sched_dump
, "\n");
2382 /* Schedule a region. A region is either an inner loop, a loop-free
2383 subroutine, or a single basic block. Each bb in the region is
2384 scheduled after its flow predecessors. */
2387 schedule_region (int rgn
)
2390 int rgn_n_insns
= 0;
2391 int sched_rgn_n_insns
= 0;
2393 /* Set variables for the current region. */
2394 current_nr_blocks
= RGN_NR_BLOCKS (rgn
);
2395 current_blocks
= RGN_BLOCKS (rgn
);
2397 init_deps_global ();
2399 /* Initializations for region data dependence analysis. */
2400 bb_deps
= xmalloc (sizeof (struct deps
) * current_nr_blocks
);
2401 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2402 init_deps (bb_deps
+ bb
);
2404 /* Compute LOG_LINKS. */
2405 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2406 compute_block_backward_dependences (bb
);
2408 /* Compute INSN_DEPEND. */
2409 for (bb
= current_nr_blocks
- 1; bb
>= 0; bb
--)
2412 get_block_head_tail (BB_TO_BLOCK (bb
), &head
, &tail
);
2414 compute_forward_dependences (head
, tail
);
2416 if (targetm
.sched
.dependencies_evaluation_hook
)
2417 targetm
.sched
.dependencies_evaluation_hook (head
, tail
);
2421 /* Set priorities. */
2422 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2425 get_block_head_tail (BB_TO_BLOCK (bb
), &head
, &tail
);
2427 rgn_n_insns
+= set_priorities (head
, tail
);
2430 /* Compute interblock info: probabilities, split-edges, dominators, etc. */
2431 if (current_nr_blocks
> 1)
2435 prob
= xmalloc ((current_nr_blocks
) * sizeof (float));
2437 dom
= sbitmap_vector_alloc (current_nr_blocks
, current_nr_blocks
);
2438 sbitmap_vector_zero (dom
, current_nr_blocks
);
2441 edge_to_bit
= xmalloc (nr_edges
* sizeof (int));
2442 for (i
= 1; i
< nr_edges
; i
++)
2443 if (CONTAINING_RGN (FROM_BLOCK (i
)) == rgn
)
2444 EDGE_TO_BIT (i
) = rgn_nr_edges
++;
2445 rgn_edges
= xmalloc (rgn_nr_edges
* sizeof (int));
2448 for (i
= 1; i
< nr_edges
; i
++)
2449 if (CONTAINING_RGN (FROM_BLOCK (i
)) == (rgn
))
2450 rgn_edges
[rgn_nr_edges
++] = i
;
2453 pot_split
= sbitmap_vector_alloc (current_nr_blocks
, rgn_nr_edges
);
2454 sbitmap_vector_zero (pot_split
, current_nr_blocks
);
2455 ancestor_edges
= sbitmap_vector_alloc (current_nr_blocks
, rgn_nr_edges
);
2456 sbitmap_vector_zero (ancestor_edges
, current_nr_blocks
);
2458 /* Compute probabilities, dominators, split_edges. */
2459 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2460 compute_dom_prob_ps (bb
);
2463 /* Now we can schedule all blocks. */
2464 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2467 int b
= BB_TO_BLOCK (bb
);
2469 get_block_head_tail (b
, &head
, &tail
);
2471 if (no_real_insns_p (head
, tail
))
2474 current_sched_info
->prev_head
= PREV_INSN (head
);
2475 current_sched_info
->next_tail
= NEXT_INSN (tail
);
2477 if (write_symbols
!= NO_DEBUG
)
2479 save_line_notes (b
, head
, tail
);
2480 rm_line_notes (head
, tail
);
2483 /* rm_other_notes only removes notes which are _inside_ the
2484 block---that is, it won't remove notes before the first real insn
2485 or after the last real insn of the block. So if the first insn
2486 has a REG_SAVE_NOTE which would otherwise be emitted before the
2487 insn, it is redundant with the note before the start of the
2488 block, and so we have to take it out. */
2493 for (note
= REG_NOTES (head
); note
; note
= XEXP (note
, 1))
2494 if (REG_NOTE_KIND (note
) == REG_SAVE_NOTE
)
2496 remove_note (head
, note
);
2497 note
= XEXP (note
, 1);
2498 remove_note (head
, note
);
2502 /* Remove remaining note insns from the block, save them in
2503 note_list. These notes are restored at the end of
2504 schedule_block (). */
2505 rm_other_notes (head
, tail
);
2509 current_sched_info
->queue_must_finish_empty
2510 = current_nr_blocks
> 1 && !flag_schedule_interblock
;
2512 schedule_block (b
, rgn_n_insns
);
2513 sched_rgn_n_insns
+= sched_n_insns
;
2515 /* Update target block boundaries. */
2516 if (head
== BB_HEAD (BASIC_BLOCK (b
)))
2517 BB_HEAD (BASIC_BLOCK (b
)) = current_sched_info
->head
;
2518 if (tail
== BB_END (BASIC_BLOCK (b
)))
2519 BB_END (BASIC_BLOCK (b
)) = current_sched_info
->tail
;
2522 if (current_nr_blocks
> 1)
2524 free (candidate_table
);
2526 free (bitlst_table
);
2530 /* Sanity check: verify that all region insns were scheduled. */
2531 if (sched_rgn_n_insns
!= rgn_n_insns
)
2534 /* Restore line notes. */
2535 if (write_symbols
!= NO_DEBUG
)
2537 for (bb
= 0; bb
< current_nr_blocks
; bb
++)
2540 get_block_head_tail (BB_TO_BLOCK (bb
), &head
, &tail
);
2541 restore_line_notes (head
, tail
);
2545 /* Done with this region. */
2546 free_pending_lists ();
2548 finish_deps_global ();
2552 if (current_nr_blocks
> 1)
2555 sbitmap_vector_free (dom
);
2556 sbitmap_vector_free (pot_split
);
2557 sbitmap_vector_free (ancestor_edges
);
2563 /* Indexed by region, holds the number of death notes found in that region.
2564 Used for consistency checks. */
2565 static int *deaths_in_region
;
2567 /* Initialize data structures for region scheduling. */
2576 rgn_table
= xmalloc ((n_basic_blocks
) * sizeof (region
));
2577 rgn_bb_table
= xmalloc ((n_basic_blocks
) * sizeof (int));
2578 block_to_bb
= xmalloc ((last_basic_block
) * sizeof (int));
2579 containing_rgn
= xmalloc ((last_basic_block
) * sizeof (int));
2581 /* Compute regions for scheduling. */
2582 if (reload_completed
2583 || n_basic_blocks
== 1
2584 || !flag_schedule_interblock
)
2586 find_single_block_region ();
2590 /* Verify that a 'good' control flow graph can be built. */
2591 if (is_cfg_nonregular ())
2593 find_single_block_region ();
2597 struct edge_list
*edge_list
;
2599 /* The scheduler runs after estimate_probabilities; therefore, we
2600 can't blindly call back into find_basic_blocks since doing so
2601 could invalidate the branch probability info. We could,
2602 however, call cleanup_cfg. */
2603 edge_list
= create_edge_list ();
2605 /* Compute the dominators and post dominators. */
2606 calculate_dominance_info (CDI_DOMINATORS
);
2608 /* build_control_flow will return nonzero if it detects unreachable
2609 blocks or any other irregularity with the cfg which prevents
2610 cross block scheduling. */
2611 if (build_control_flow (edge_list
) != 0)
2612 find_single_block_region ();
2614 find_rgns (edge_list
);
2616 if (sched_verbose
>= 3)
2619 /* We are done with flow's edge list. */
2620 free_edge_list (edge_list
);
2622 /* For now. This will move as more and more of haifa is converted
2623 to using the cfg code in flow.c. */
2624 free_dominance_info (CDI_DOMINATORS
);
2629 if (CHECK_DEAD_NOTES
)
2631 blocks
= sbitmap_alloc (last_basic_block
);
2632 deaths_in_region
= xmalloc (sizeof (int) * nr_regions
);
2633 /* Remove all death notes from the subroutine. */
2634 for (rgn
= 0; rgn
< nr_regions
; rgn
++)
2638 sbitmap_zero (blocks
);
2639 for (b
= RGN_NR_BLOCKS (rgn
) - 1; b
>= 0; --b
)
2640 SET_BIT (blocks
, rgn_bb_table
[RGN_BLOCKS (rgn
) + b
]);
2642 deaths_in_region
[rgn
] = count_or_remove_death_notes (blocks
, 1);
2644 sbitmap_free (blocks
);
2647 count_or_remove_death_notes (NULL
, 1);
2650 /* The one entry point in this file. DUMP_FILE is the dump file for
2654 schedule_insns (FILE *dump_file
)
2656 sbitmap large_region_blocks
, blocks
;
2658 int any_large_regions
;
2661 /* Taking care of this degenerate case makes the rest of
2662 this code simpler. */
2663 if (n_basic_blocks
== 0)
2669 sched_init (dump_file
);
2673 current_sched_info
= ®ion_sched_info
;
2675 /* Schedule every region in the subroutine. */
2676 for (rgn
= 0; rgn
< nr_regions
; rgn
++)
2677 schedule_region (rgn
);
2679 /* Update life analysis for the subroutine. Do single block regions
2680 first so that we can verify that live_at_start didn't change. Then
2681 do all other blocks. */
2682 /* ??? There is an outside possibility that update_life_info, or more
2683 to the point propagate_block, could get called with nonzero flags
2684 more than once for one basic block. This would be kinda bad if it
2685 were to happen, since REG_INFO would be accumulated twice for the
2686 block, and we'd have twice the REG_DEAD notes.
2688 I'm fairly certain that this _shouldn't_ happen, since I don't think
2689 that live_at_start should change at region heads. Not sure what the
2690 best way to test for this kind of thing... */
2692 allocate_reg_life_data ();
2693 compute_bb_for_insn ();
2695 any_large_regions
= 0;
2696 large_region_blocks
= sbitmap_alloc (last_basic_block
);
2697 sbitmap_zero (large_region_blocks
);
2699 SET_BIT (large_region_blocks
, bb
->index
);
2701 blocks
= sbitmap_alloc (last_basic_block
);
2702 sbitmap_zero (blocks
);
2704 /* Update life information. For regions consisting of multiple blocks
2705 we've possibly done interblock scheduling that affects global liveness.
2706 For regions consisting of single blocks we need to do only local
2708 for (rgn
= 0; rgn
< nr_regions
; rgn
++)
2709 if (RGN_NR_BLOCKS (rgn
) > 1)
2710 any_large_regions
= 1;
2713 SET_BIT (blocks
, rgn_bb_table
[RGN_BLOCKS (rgn
)]);
2714 RESET_BIT (large_region_blocks
, rgn_bb_table
[RGN_BLOCKS (rgn
)]);
2717 /* Don't update reg info after reload, since that affects
2718 regs_ever_live, which should not change after reload. */
2719 update_life_info (blocks
, UPDATE_LIFE_LOCAL
,
2720 (reload_completed
? PROP_DEATH_NOTES
2721 : PROP_DEATH_NOTES
| PROP_REG_INFO
));
2722 if (any_large_regions
)
2724 update_life_info (large_region_blocks
, UPDATE_LIFE_GLOBAL
,
2725 PROP_DEATH_NOTES
| PROP_REG_INFO
);
2728 if (CHECK_DEAD_NOTES
)
2730 /* Verify the counts of basic block notes in single the basic block
2732 for (rgn
= 0; rgn
< nr_regions
; rgn
++)
2733 if (RGN_NR_BLOCKS (rgn
) == 1)
2735 sbitmap_zero (blocks
);
2736 SET_BIT (blocks
, rgn_bb_table
[RGN_BLOCKS (rgn
)]);
2738 if (deaths_in_region
[rgn
]
2739 != count_or_remove_death_notes (blocks
, 0))
2742 free (deaths_in_region
);
2745 /* Reposition the prologue and epilogue notes in case we moved the
2746 prologue/epilogue insns. */
2747 if (reload_completed
)
2748 reposition_prologue_and_epilogue_notes (get_insns ());
2750 /* Delete redundant line notes. */
2751 if (write_symbols
!= NO_DEBUG
)
2752 rm_redundant_line_notes ();
2756 if (reload_completed
== 0 && flag_schedule_interblock
)
2758 fprintf (sched_dump
,
2759 "\n;; Procedure interblock/speculative motions == %d/%d \n",
2767 fprintf (sched_dump
, "\n\n");
2772 free (rgn_bb_table
);
2774 free (containing_rgn
);
2795 sbitmap_free (blocks
);
2796 sbitmap_free (large_region_blocks
);