1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
36 #include "coretypes.h"
39 #include "hard-reg-set.h"
43 #include "insn-config.h"
51 #include "cfglayout.h"
53 #include "tree-pass.h"
57 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
59 /* Set to true when we are running first pass of try_optimize_cfg loop. */
60 static bool first_pass
;
61 static bool try_crossjump_to_edge (int, edge
, edge
);
62 static bool try_crossjump_bb (int, basic_block
);
63 static bool outgoing_edges_match (int, basic_block
, basic_block
);
65 static void merge_blocks_move_predecessor_nojumps (basic_block
, basic_block
);
66 static void merge_blocks_move_successor_nojumps (basic_block
, basic_block
);
67 static bool try_optimize_cfg (int);
68 static bool try_simplify_condjump (basic_block
);
69 static bool try_forward_edges (int, basic_block
);
70 static edge
thread_jump (int, edge
, basic_block
);
71 static bool mark_effect (rtx
, bitmap
);
72 static void notice_new_block (basic_block
);
73 static void update_forwarder_flag (basic_block
);
74 static int mentions_nonequal_regs (rtx
*, void *);
76 /* Set flags for newly created block. */
79 notice_new_block (basic_block bb
)
84 if (forwarder_block_p (bb
))
85 bb
->flags
|= BB_FORWARDER_BLOCK
;
88 /* Recompute forwarder flag after block has been modified. */
91 update_forwarder_flag (basic_block bb
)
93 if (forwarder_block_p (bb
))
94 bb
->flags
|= BB_FORWARDER_BLOCK
;
96 bb
->flags
&= ~BB_FORWARDER_BLOCK
;
99 /* Simplify a conditional jump around an unconditional jump.
100 Return true if something changed. */
103 try_simplify_condjump (basic_block cbranch_block
)
105 basic_block jump_block
, jump_dest_block
, cbranch_dest_block
;
106 edge cbranch_jump_edge
, cbranch_fallthru_edge
;
109 /* Verify that there are exactly two successors. */
110 if (EDGE_COUNT (cbranch_block
->succs
) != 2)
113 /* Verify that we've got a normal conditional branch at the end
115 cbranch_insn
= BB_END (cbranch_block
);
116 if (!any_condjump_p (cbranch_insn
))
119 cbranch_fallthru_edge
= FALLTHRU_EDGE (cbranch_block
);
120 cbranch_jump_edge
= BRANCH_EDGE (cbranch_block
);
122 /* The next block must not have multiple predecessors, must not
123 be the last block in the function, and must contain just the
124 unconditional jump. */
125 jump_block
= cbranch_fallthru_edge
->dest
;
126 if (!single_pred_p (jump_block
)
127 || jump_block
->next_bb
== EXIT_BLOCK_PTR
128 || !FORWARDER_BLOCK_P (jump_block
))
130 jump_dest_block
= single_succ (jump_block
);
132 /* If we are partitioning hot/cold basic blocks, we don't want to
133 mess up unconditional or indirect jumps that cross between hot
136 Basic block partitioning may result in some jumps that appear to
137 be optimizable (or blocks that appear to be mergeable), but which really
138 must be left untouched (they are required to make it safely across
139 partition boundaries). See the comments at the top of
140 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
142 if (BB_PARTITION (jump_block
) != BB_PARTITION (jump_dest_block
)
143 || (cbranch_jump_edge
->flags
& EDGE_CROSSING
))
146 /* The conditional branch must target the block after the
147 unconditional branch. */
148 cbranch_dest_block
= cbranch_jump_edge
->dest
;
150 if (cbranch_dest_block
== EXIT_BLOCK_PTR
151 || !can_fallthru (jump_block
, cbranch_dest_block
))
154 /* Invert the conditional branch. */
155 if (!invert_jump (cbranch_insn
, block_label (jump_dest_block
), 0))
159 fprintf (dump_file
, "Simplifying condjump %i around jump %i\n",
160 INSN_UID (cbranch_insn
), INSN_UID (BB_END (jump_block
)));
162 /* Success. Update the CFG to match. Note that after this point
163 the edge variable names appear backwards; the redirection is done
164 this way to preserve edge profile data. */
165 cbranch_jump_edge
= redirect_edge_succ_nodup (cbranch_jump_edge
,
167 cbranch_fallthru_edge
= redirect_edge_succ_nodup (cbranch_fallthru_edge
,
169 cbranch_jump_edge
->flags
|= EDGE_FALLTHRU
;
170 cbranch_fallthru_edge
->flags
&= ~EDGE_FALLTHRU
;
171 update_br_prob_note (cbranch_block
);
173 /* Delete the block with the unconditional jump, and clean up the mess. */
174 delete_basic_block (jump_block
);
175 tidy_fallthru_edge (cbranch_jump_edge
);
176 update_forwarder_flag (cbranch_block
);
181 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
182 on register. Used by jump threading. */
185 mark_effect (rtx exp
, regset nonequal
)
189 switch (GET_CODE (exp
))
191 /* In case we do clobber the register, mark it as equal, as we know the
192 value is dead so it don't have to match. */
194 if (REG_P (XEXP (exp
, 0)))
196 dest
= XEXP (exp
, 0);
197 regno
= REGNO (dest
);
198 CLEAR_REGNO_REG_SET (nonequal
, regno
);
199 if (regno
< FIRST_PSEUDO_REGISTER
)
201 int n
= hard_regno_nregs
[regno
][GET_MODE (dest
)];
203 CLEAR_REGNO_REG_SET (nonequal
, regno
+ n
);
209 if (rtx_equal_for_cselib_p (SET_DEST (exp
), SET_SRC (exp
)))
211 dest
= SET_DEST (exp
);
216 regno
= REGNO (dest
);
217 SET_REGNO_REG_SET (nonequal
, regno
);
218 if (regno
< FIRST_PSEUDO_REGISTER
)
220 int n
= hard_regno_nregs
[regno
][GET_MODE (dest
)];
222 SET_REGNO_REG_SET (nonequal
, regno
+ n
);
231 /* Return nonzero if X is a register set in regset DATA.
232 Called via for_each_rtx. */
234 mentions_nonequal_regs (rtx
*x
, void *data
)
236 regset nonequal
= (regset
) data
;
242 if (REGNO_REG_SET_P (nonequal
, regno
))
244 if (regno
< FIRST_PSEUDO_REGISTER
)
246 int n
= hard_regno_nregs
[regno
][GET_MODE (*x
)];
248 if (REGNO_REG_SET_P (nonequal
, regno
+ n
))
254 /* Attempt to prove that the basic block B will have no side effects and
255 always continues in the same edge if reached via E. Return the edge
256 if exist, NULL otherwise. */
259 thread_jump (int mode
, edge e
, basic_block b
)
261 rtx set1
, set2
, cond1
, cond2
, insn
;
262 enum rtx_code code1
, code2
, reversed_code2
;
263 bool reverse1
= false;
267 reg_set_iterator rsi
;
269 if (b
->flags
& BB_NONTHREADABLE_BLOCK
)
272 /* At the moment, we do handle only conditional jumps, but later we may
273 want to extend this code to tablejumps and others. */
274 if (EDGE_COUNT (e
->src
->succs
) != 2)
276 if (EDGE_COUNT (b
->succs
) != 2)
278 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
282 /* Second branch must end with onlyjump, as we will eliminate the jump. */
283 if (!any_condjump_p (BB_END (e
->src
)))
286 if (!any_condjump_p (BB_END (b
)) || !onlyjump_p (BB_END (b
)))
288 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
292 set1
= pc_set (BB_END (e
->src
));
293 set2
= pc_set (BB_END (b
));
294 if (((e
->flags
& EDGE_FALLTHRU
) != 0)
295 != (XEXP (SET_SRC (set1
), 1) == pc_rtx
))
298 cond1
= XEXP (SET_SRC (set1
), 0);
299 cond2
= XEXP (SET_SRC (set2
), 0);
301 code1
= reversed_comparison_code (cond1
, BB_END (e
->src
));
303 code1
= GET_CODE (cond1
);
305 code2
= GET_CODE (cond2
);
306 reversed_code2
= reversed_comparison_code (cond2
, BB_END (b
));
308 if (!comparison_dominates_p (code1
, code2
)
309 && !comparison_dominates_p (code1
, reversed_code2
))
312 /* Ensure that the comparison operators are equivalent.
313 ??? This is far too pessimistic. We should allow swapped operands,
314 different CCmodes, or for example comparisons for interval, that
315 dominate even when operands are not equivalent. */
316 if (!rtx_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
317 || !rtx_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
320 /* Short circuit cases where block B contains some side effects, as we can't
322 for (insn
= NEXT_INSN (BB_HEAD (b
)); insn
!= NEXT_INSN (BB_END (b
));
323 insn
= NEXT_INSN (insn
))
324 if (INSN_P (insn
) && side_effects_p (PATTERN (insn
)))
326 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
332 /* First process all values computed in the source basic block. */
333 for (insn
= NEXT_INSN (BB_HEAD (e
->src
));
334 insn
!= NEXT_INSN (BB_END (e
->src
));
335 insn
= NEXT_INSN (insn
))
337 cselib_process_insn (insn
);
339 nonequal
= BITMAP_ALLOC (NULL
);
340 CLEAR_REG_SET (nonequal
);
342 /* Now assume that we've continued by the edge E to B and continue
343 processing as if it were same basic block.
344 Our goal is to prove that whole block is an NOOP. */
346 for (insn
= NEXT_INSN (BB_HEAD (b
));
347 insn
!= NEXT_INSN (BB_END (b
)) && !failed
;
348 insn
= NEXT_INSN (insn
))
352 rtx pat
= PATTERN (insn
);
354 if (GET_CODE (pat
) == PARALLEL
)
356 for (i
= 0; i
< (unsigned)XVECLEN (pat
, 0); i
++)
357 failed
|= mark_effect (XVECEXP (pat
, 0, i
), nonequal
);
360 failed
|= mark_effect (pat
, nonequal
);
363 cselib_process_insn (insn
);
366 /* Later we should clear nonequal of dead registers. So far we don't
367 have life information in cfg_cleanup. */
370 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
374 /* cond2 must not mention any register that is not equal to the
376 if (for_each_rtx (&cond2
, mentions_nonequal_regs
, nonequal
))
379 /* In case liveness information is available, we need to prove equivalence
380 only of the live values. */
381 if (mode
& CLEANUP_UPDATE_LIFE
)
382 AND_REG_SET (nonequal
, b
->il
.rtl
->global_live_at_end
);
384 EXECUTE_IF_SET_IN_REG_SET (nonequal
, 0, i
, rsi
)
387 BITMAP_FREE (nonequal
);
389 if ((comparison_dominates_p (code1
, code2
) != 0)
390 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
391 return BRANCH_EDGE (b
);
393 return FALLTHRU_EDGE (b
);
396 BITMAP_FREE (nonequal
);
401 /* Attempt to forward edges leaving basic block B.
402 Return true if successful. */
405 try_forward_edges (int mode
, basic_block b
)
407 bool changed
= false;
409 edge e
, *threaded_edges
= NULL
;
411 /* If we are partitioning hot/cold basic blocks, we don't want to
412 mess up unconditional or indirect jumps that cross between hot
415 Basic block partitioning may result in some jumps that appear to
416 be optimizable (or blocks that appear to be mergeable), but which really m
417 ust be left untouched (they are required to make it safely across
418 partition boundaries). See the comments at the top of
419 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
421 if (find_reg_note (BB_END (b
), REG_CROSSING_JUMP
, NULL_RTX
))
424 for (ei
= ei_start (b
->succs
); (e
= ei_safe_edge (ei
)); )
426 basic_block target
, first
;
428 bool threaded
= false;
429 int nthreaded_edges
= 0;
430 bool may_thread
= first_pass
| (b
->flags
& BB_DIRTY
);
432 /* Skip complex edges because we don't know how to update them.
434 Still handle fallthru edges, as we can succeed to forward fallthru
435 edge to the same place as the branch edge of conditional branch
436 and turn conditional branch to an unconditional branch. */
437 if (e
->flags
& EDGE_COMPLEX
)
443 target
= first
= e
->dest
;
446 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
447 up jumps that cross between hot/cold sections.
449 Basic block partitioning may result in some jumps that appear
450 to be optimizable (or blocks that appear to be mergeable), but which
451 really must be left untouched (they are required to make it safely
452 across partition boundaries). See the comments at the top of
453 bb-reorder.c:partition_hot_cold_basic_blocks for complete
456 if (first
!= EXIT_BLOCK_PTR
457 && find_reg_note (BB_END (first
), REG_CROSSING_JUMP
, NULL_RTX
))
460 while (counter
< n_basic_blocks
)
462 basic_block new_target
= NULL
;
463 bool new_target_threaded
= false;
464 may_thread
|= target
->flags
& BB_DIRTY
;
466 if (FORWARDER_BLOCK_P (target
)
467 && !(single_succ_edge (target
)->flags
& EDGE_CROSSING
)
468 && single_succ (target
) != EXIT_BLOCK_PTR
)
470 /* Bypass trivial infinite loops. */
471 new_target
= single_succ (target
);
472 if (target
== new_target
)
473 counter
= n_basic_blocks
;
476 /* Allow to thread only over one edge at time to simplify updating
478 else if ((mode
& CLEANUP_THREADING
) && may_thread
)
480 edge t
= thread_jump (mode
, e
, target
);
484 threaded_edges
= xmalloc (sizeof (*threaded_edges
)
490 /* Detect an infinite loop across blocks not
491 including the start block. */
492 for (i
= 0; i
< nthreaded_edges
; ++i
)
493 if (threaded_edges
[i
] == t
)
495 if (i
< nthreaded_edges
)
497 counter
= n_basic_blocks
;
502 /* Detect an infinite loop across the start block. */
506 gcc_assert (nthreaded_edges
< n_basic_blocks
);
507 threaded_edges
[nthreaded_edges
++] = t
;
509 new_target
= t
->dest
;
510 new_target_threaded
= true;
517 /* Avoid killing of loop pre-headers, as it is the place loop
518 optimizer wants to hoist code to.
520 For fallthru forwarders, the LOOP_BEG note must appear between
521 the header of block and CODE_LABEL of the loop, for non forwarders
522 it must appear before the JUMP_INSN. */
523 if ((mode
& CLEANUP_PRE_LOOP
) && optimize
&& flag_loop_optimize
)
525 rtx insn
= (EDGE_SUCC (target
, 0)->flags
& EDGE_FALLTHRU
526 ? BB_HEAD (target
) : prev_nonnote_insn (BB_END (target
)));
529 insn
= NEXT_INSN (insn
);
531 for (; insn
&& !LABEL_P (insn
) && !INSN_P (insn
);
532 insn
= NEXT_INSN (insn
))
534 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
537 if (insn
&& NOTE_P (insn
))
540 /* Do not clean up branches to just past the end of a loop
541 at this time; it can mess up the loop optimizer's
542 recognition of some patterns. */
544 insn
= PREV_INSN (BB_HEAD (target
));
545 if (insn
&& NOTE_P (insn
)
546 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
)
552 threaded
|= new_target_threaded
;
555 if (counter
>= n_basic_blocks
)
558 fprintf (dump_file
, "Infinite loop in BB %i.\n",
561 else if (target
== first
)
562 ; /* We didn't do anything. */
565 /* Save the values now, as the edge may get removed. */
566 gcov_type edge_count
= e
->count
;
567 int edge_probability
= e
->probability
;
571 /* Don't force if target is exit block. */
572 if (threaded
&& target
!= EXIT_BLOCK_PTR
)
574 notice_new_block (redirect_edge_and_branch_force (e
, target
));
576 fprintf (dump_file
, "Conditionals threaded.\n");
578 else if (!redirect_edge_and_branch (e
, target
))
582 "Forwarding edge %i->%i to %i failed.\n",
583 b
->index
, e
->dest
->index
, target
->index
);
588 /* We successfully forwarded the edge. Now update profile
589 data: for each edge we traversed in the chain, remove
590 the original edge's execution count. */
591 edge_frequency
= ((edge_probability
* b
->frequency
592 + REG_BR_PROB_BASE
/ 2)
595 if (!FORWARDER_BLOCK_P (b
) && forwarder_block_p (b
))
596 b
->flags
|= BB_FORWARDER_BLOCK
;
602 if (!single_succ_p (first
))
604 gcc_assert (n
< nthreaded_edges
);
605 t
= threaded_edges
[n
++];
606 gcc_assert (t
->src
== first
);
607 update_bb_profile_for_threading (first
, edge_frequency
,
609 update_br_prob_note (first
);
613 first
->count
-= edge_count
;
614 if (first
->count
< 0)
616 first
->frequency
-= edge_frequency
;
617 if (first
->frequency
< 0)
618 first
->frequency
= 0;
619 /* It is possible that as the result of
620 threading we've removed edge as it is
621 threaded to the fallthru edge. Avoid
622 getting out of sync. */
623 if (n
< nthreaded_edges
624 && first
== threaded_edges
[n
]->src
)
626 t
= single_succ_edge (first
);
629 t
->count
-= edge_count
;
634 while (first
!= target
);
643 free (threaded_edges
);
648 /* Blocks A and B are to be merged into a single block. A has no incoming
649 fallthru edge, so it can be moved before B without adding or modifying
650 any jumps (aside from the jump from A to B). */
653 merge_blocks_move_predecessor_nojumps (basic_block a
, basic_block b
)
658 /* If we are partitioning hot/cold basic blocks, we don't want to
659 mess up unconditional or indirect jumps that cross between hot
662 Basic block partitioning may result in some jumps that appear to
663 be optimizable (or blocks that appear to be mergeable), but which really
664 must be left untouched (they are required to make it safely across
665 partition boundaries). See the comments at the top of
666 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
668 if (BB_PARTITION (a
) != BB_PARTITION (b
))
671 barrier
= next_nonnote_insn (BB_END (a
));
672 gcc_assert (BARRIER_P (barrier
));
673 delete_insn (barrier
);
675 /* Move block and loop notes out of the chain so that we do not
678 ??? A better solution would be to squeeze out all the non-nested notes
679 and adjust the block trees appropriately. Even better would be to have
680 a tighter connection between block trees and rtl so that this is not
682 only_notes
= squeeze_notes (&BB_HEAD (a
), &BB_END (a
));
683 gcc_assert (!only_notes
);
685 /* Scramble the insn chain. */
686 if (BB_END (a
) != PREV_INSN (BB_HEAD (b
)))
687 reorder_insns_nobb (BB_HEAD (a
), BB_END (a
), PREV_INSN (BB_HEAD (b
)));
688 a
->flags
|= BB_DIRTY
;
691 fprintf (dump_file
, "Moved block %d before %d and merged.\n",
694 /* Swap the records for the two blocks around. */
697 link_block (a
, b
->prev_bb
);
699 /* Now blocks A and B are contiguous. Merge them. */
703 /* Blocks A and B are to be merged into a single block. B has no outgoing
704 fallthru edge, so it can be moved after A without adding or modifying
705 any jumps (aside from the jump from A to B). */
708 merge_blocks_move_successor_nojumps (basic_block a
, basic_block b
)
710 rtx barrier
, real_b_end
;
714 /* If we are partitioning hot/cold basic blocks, we don't want to
715 mess up unconditional or indirect jumps that cross between hot
718 Basic block partitioning may result in some jumps that appear to
719 be optimizable (or blocks that appear to be mergeable), but which really
720 must be left untouched (they are required to make it safely across
721 partition boundaries). See the comments at the top of
722 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
724 if (BB_PARTITION (a
) != BB_PARTITION (b
))
727 real_b_end
= BB_END (b
);
729 /* If there is a jump table following block B temporarily add the jump table
730 to block B so that it will also be moved to the correct location. */
731 if (tablejump_p (BB_END (b
), &label
, &table
)
732 && prev_active_insn (label
) == BB_END (b
))
737 /* There had better have been a barrier there. Delete it. */
738 barrier
= NEXT_INSN (BB_END (b
));
739 if (barrier
&& BARRIER_P (barrier
))
740 delete_insn (barrier
);
742 /* Move block and loop notes out of the chain so that we do not
745 ??? A better solution would be to squeeze out all the non-nested notes
746 and adjust the block trees appropriately. Even better would be to have
747 a tighter connection between block trees and rtl so that this is not
749 only_notes
= squeeze_notes (&BB_HEAD (b
), &BB_END (b
));
750 gcc_assert (!only_notes
);
753 /* Scramble the insn chain. */
754 reorder_insns_nobb (BB_HEAD (b
), BB_END (b
), BB_END (a
));
756 /* Restore the real end of b. */
757 BB_END (b
) = real_b_end
;
760 fprintf (dump_file
, "Moved block %d after %d and merged.\n",
763 /* Now blocks A and B are contiguous. Merge them. */
767 /* Attempt to merge basic blocks that are potentially non-adjacent.
768 Return NULL iff the attempt failed, otherwise return basic block
769 where cleanup_cfg should continue. Because the merging commonly
770 moves basic block away or introduces another optimization
771 possibility, return basic block just before B so cleanup_cfg don't
774 It may be good idea to return basic block before C in the case
775 C has been moved after B and originally appeared earlier in the
776 insn sequence, but we have no information available about the
777 relative ordering of these two. Hopefully it is not too common. */
780 merge_blocks_move (edge e
, basic_block b
, basic_block c
, int mode
)
784 /* If we are partitioning hot/cold basic blocks, we don't want to
785 mess up unconditional or indirect jumps that cross between hot
788 Basic block partitioning may result in some jumps that appear to
789 be optimizable (or blocks that appear to be mergeable), but which really
790 must be left untouched (they are required to make it safely across
791 partition boundaries). See the comments at the top of
792 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
794 if (BB_PARTITION (b
) != BB_PARTITION (c
))
799 /* If B has a fallthru edge to C, no need to move anything. */
800 if (e
->flags
& EDGE_FALLTHRU
)
802 int b_index
= b
->index
, c_index
= c
->index
;
804 update_forwarder_flag (b
);
807 fprintf (dump_file
, "Merged %d and %d without moving.\n",
810 return b
->prev_bb
== ENTRY_BLOCK_PTR
? b
: b
->prev_bb
;
813 /* Otherwise we will need to move code around. Do that only if expensive
814 transformations are allowed. */
815 else if (mode
& CLEANUP_EXPENSIVE
)
817 edge tmp_edge
, b_fallthru_edge
;
818 bool c_has_outgoing_fallthru
;
819 bool b_has_incoming_fallthru
;
822 /* Avoid overactive code motion, as the forwarder blocks should be
823 eliminated by edge redirection instead. One exception might have
824 been if B is a forwarder block and C has no fallthru edge, but
825 that should be cleaned up by bb-reorder instead. */
826 if (FORWARDER_BLOCK_P (b
) || FORWARDER_BLOCK_P (c
))
829 /* We must make sure to not munge nesting of lexical blocks,
830 and loop notes. This is done by squeezing out all the notes
831 and leaving them there to lie. Not ideal, but functional. */
833 FOR_EACH_EDGE (tmp_edge
, ei
, c
->succs
)
834 if (tmp_edge
->flags
& EDGE_FALLTHRU
)
837 c_has_outgoing_fallthru
= (tmp_edge
!= NULL
);
839 FOR_EACH_EDGE (tmp_edge
, ei
, b
->preds
)
840 if (tmp_edge
->flags
& EDGE_FALLTHRU
)
843 b_has_incoming_fallthru
= (tmp_edge
!= NULL
);
844 b_fallthru_edge
= tmp_edge
;
847 next
= next
->prev_bb
;
849 /* Otherwise, we're going to try to move C after B. If C does
850 not have an outgoing fallthru, then it can be moved
851 immediately after B without introducing or modifying jumps. */
852 if (! c_has_outgoing_fallthru
)
854 merge_blocks_move_successor_nojumps (b
, c
);
855 return next
== ENTRY_BLOCK_PTR
? next
->next_bb
: next
;
858 /* If B does not have an incoming fallthru, then it can be moved
859 immediately before C without introducing or modifying jumps.
860 C cannot be the first block, so we do not have to worry about
861 accessing a non-existent block. */
863 if (b_has_incoming_fallthru
)
867 if (b_fallthru_edge
->src
== ENTRY_BLOCK_PTR
)
869 bb
= force_nonfallthru (b_fallthru_edge
);
871 notice_new_block (bb
);
874 merge_blocks_move_predecessor_nojumps (b
, c
);
875 return next
== ENTRY_BLOCK_PTR
? next
->next_bb
: next
;
881 /* Return true iff the condbranches at the end of BB1 and BB2 match. */
883 condjump_equiv_p (basic_block bb1
, basic_block bb2
)
887 rtx set1
, set2
, cond1
, cond2
;
888 enum rtx_code code1
, code2
;
891 b1
= BRANCH_EDGE (bb1
);
892 b2
= BRANCH_EDGE (bb2
);
893 f1
= FALLTHRU_EDGE (bb1
);
894 f2
= FALLTHRU_EDGE (bb2
);
896 /* Get around possible forwarders on fallthru edges. Other cases
897 should be optimized out already. */
898 if (FORWARDER_BLOCK_P (f1
->dest
))
899 f1
= single_succ_edge (f1
->dest
);
901 if (FORWARDER_BLOCK_P (f2
->dest
))
902 f2
= single_succ_edge (f2
->dest
);
904 /* To simplify use of this function, return false if there are
905 unneeded forwarder blocks. These will get eliminated later
906 during cleanup_cfg. */
907 if (FORWARDER_BLOCK_P (f1
->dest
)
908 || FORWARDER_BLOCK_P (f2
->dest
)
909 || FORWARDER_BLOCK_P (b1
->dest
)
910 || FORWARDER_BLOCK_P (b2
->dest
))
913 if (f1
->dest
== f2
->dest
&& b1
->dest
== b2
->dest
)
915 else if (f1
->dest
== b2
->dest
&& b1
->dest
== f2
->dest
)
920 set1
= pc_set (BB_END (bb1
));
921 set2
= pc_set (BB_END (bb2
));
922 if ((XEXP (SET_SRC (set1
), 1) == pc_rtx
)
923 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
926 cond1
= XEXP (SET_SRC (set1
), 0);
927 cond2
= XEXP (SET_SRC (set2
), 0);
928 code1
= GET_CODE (cond1
);
930 code2
= reversed_comparison_code (cond2
, BB_END (bb2
));
932 code2
= GET_CODE (cond2
);
934 if (code2
== UNKNOWN
)
937 /* Verify codes and operands match. */
938 match
= ((code1
== code2
939 && rtx_renumbered_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
940 && rtx_renumbered_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
941 || (code1
== swap_condition (code2
)
942 && rtx_renumbered_equal_p (XEXP (cond1
, 1),
944 && rtx_renumbered_equal_p (XEXP (cond1
, 0),
947 /* If we return true, we will join the blocks. Which means that
948 we will only have one branch prediction bit to work with. Thus
949 we require the existing branches to have probabilities that are
953 && maybe_hot_bb_p (bb1
)
954 && maybe_hot_bb_p (bb2
))
958 if (b1
->dest
== b2
->dest
)
959 prob2
= b2
->probability
;
961 /* Do not use f2 probability as f2 may be forwarded. */
962 prob2
= REG_BR_PROB_BASE
- b2
->probability
;
964 /* Fail if the difference in probabilities is greater than 50%.
965 This rules out two well-predicted branches with opposite
967 if (abs (b1
->probability
- prob2
) > REG_BR_PROB_BASE
/ 2)
971 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
972 bb1
->index
, bb2
->index
, b1
->probability
, prob2
);
978 if (dump_file
&& match
)
979 fprintf (dump_file
, "Conditionals in bb %i and %i match.\n",
980 bb1
->index
, bb2
->index
);
984 /* Return true iff outgoing edges of BB1 and BB2 match, together with
985 the branch instruction. This means that if we commonize the control
986 flow before end of the basic block, the semantic remains unchanged.
988 We may assume that there exists one edge with a common destination. */
991 outgoing_edges_match (int mode
, basic_block bb1
, basic_block bb2
)
993 int nehedges1
= 0, nehedges2
= 0;
994 edge fallthru1
= 0, fallthru2
= 0;
998 /* If BB1 has only one successor, we may be looking at either an
999 unconditional jump, or a fake edge to exit. */
1000 if (single_succ_p (bb1
)
1001 && (single_succ_edge (bb1
)->flags
& (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1002 && (!JUMP_P (BB_END (bb1
)) || simplejump_p (BB_END (bb1
))))
1003 return (single_succ_p (bb2
)
1004 && (single_succ_edge (bb2
)->flags
1005 & (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1006 && (!JUMP_P (BB_END (bb2
)) || simplejump_p (BB_END (bb2
))));
1008 /* Match conditional jumps - this may get tricky when fallthru and branch
1009 edges are crossed. */
1010 if (EDGE_COUNT (bb1
->succs
) == 2
1011 && any_condjump_p (BB_END (bb1
))
1012 && onlyjump_p (BB_END (bb1
)))
1014 if (EDGE_COUNT (bb2
->succs
) != 2
1015 || !any_condjump_p (BB_END (bb2
))
1016 || !onlyjump_p (BB_END (bb2
)))
1018 return condjump_equiv_p (bb1
, bb2
);
1021 /* Generic case - we are seeing a computed jump, table jump or trapping
1024 /* Check whether there are tablejumps in the end of BB1 and BB2.
1025 Return true if they are identical. */
1030 if (tablejump_p (BB_END (bb1
), &label1
, &table1
)
1031 && tablejump_p (BB_END (bb2
), &label2
, &table2
)
1032 && GET_CODE (PATTERN (table1
)) == GET_CODE (PATTERN (table2
)))
1034 /* The labels should never be the same rtx. If they really are same
1035 the jump tables are same too. So disable crossjumping of blocks BB1
1036 and BB2 because when deleting the common insns in the end of BB1
1037 by delete_basic_block () the jump table would be deleted too. */
1038 /* If LABEL2 is referenced in BB1->END do not do anything
1039 because we would loose information when replacing
1040 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1041 if (label1
!= label2
&& !rtx_referenced_p (label2
, BB_END (bb1
)))
1043 /* Set IDENTICAL to true when the tables are identical. */
1044 bool identical
= false;
1047 p1
= PATTERN (table1
);
1048 p2
= PATTERN (table2
);
1049 if (GET_CODE (p1
) == ADDR_VEC
&& rtx_equal_p (p1
, p2
))
1053 else if (GET_CODE (p1
) == ADDR_DIFF_VEC
1054 && (XVECLEN (p1
, 1) == XVECLEN (p2
, 1))
1055 && rtx_equal_p (XEXP (p1
, 2), XEXP (p2
, 2))
1056 && rtx_equal_p (XEXP (p1
, 3), XEXP (p2
, 3)))
1061 for (i
= XVECLEN (p1
, 1) - 1; i
>= 0 && identical
; i
--)
1062 if (!rtx_equal_p (XVECEXP (p1
, 1, i
), XVECEXP (p2
, 1, i
)))
1068 replace_label_data rr
;
1071 /* Temporarily replace references to LABEL1 with LABEL2
1072 in BB1->END so that we could compare the instructions. */
1075 rr
.update_label_nuses
= false;
1076 for_each_rtx (&BB_END (bb1
), replace_label
, &rr
);
1078 match
= insns_match_p (mode
, BB_END (bb1
), BB_END (bb2
));
1079 if (dump_file
&& match
)
1081 "Tablejumps in bb %i and %i match.\n",
1082 bb1
->index
, bb2
->index
);
1084 /* Set the original label in BB1->END because when deleting
1085 a block whose end is a tablejump, the tablejump referenced
1086 from the instruction is deleted too. */
1089 for_each_rtx (&BB_END (bb1
), replace_label
, &rr
);
1098 /* First ensure that the instructions match. There may be many outgoing
1099 edges so this test is generally cheaper. */
1100 if (!insns_match_p (mode
, BB_END (bb1
), BB_END (bb2
)))
1103 /* Search the outgoing edges, ensure that the counts do match, find possible
1104 fallthru and exception handling edges since these needs more
1106 if (EDGE_COUNT (bb1
->succs
) != EDGE_COUNT (bb2
->succs
))
1109 FOR_EACH_EDGE (e1
, ei
, bb1
->succs
)
1111 e2
= EDGE_SUCC (bb2
, ei
.index
);
1113 if (e1
->flags
& EDGE_EH
)
1116 if (e2
->flags
& EDGE_EH
)
1119 if (e1
->flags
& EDGE_FALLTHRU
)
1121 if (e2
->flags
& EDGE_FALLTHRU
)
1125 /* If number of edges of various types does not match, fail. */
1126 if (nehedges1
!= nehedges2
1127 || (fallthru1
!= 0) != (fallthru2
!= 0))
1130 /* fallthru edges must be forwarded to the same destination. */
1133 basic_block d1
= (forwarder_block_p (fallthru1
->dest
)
1134 ? single_succ (fallthru1
->dest
): fallthru1
->dest
);
1135 basic_block d2
= (forwarder_block_p (fallthru2
->dest
)
1136 ? single_succ (fallthru2
->dest
): fallthru2
->dest
);
1142 /* Ensure the same EH region. */
1144 rtx n1
= find_reg_note (BB_END (bb1
), REG_EH_REGION
, 0);
1145 rtx n2
= find_reg_note (BB_END (bb2
), REG_EH_REGION
, 0);
1150 if (n1
&& (!n2
|| XEXP (n1
, 0) != XEXP (n2
, 0)))
1154 /* We don't need to match the rest of edges as above checks should be enough
1155 to ensure that they are equivalent. */
1159 /* E1 and E2 are edges with the same destination block. Search their
1160 predecessors for common code. If found, redirect control flow from
1161 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1164 try_crossjump_to_edge (int mode
, edge e1
, edge e2
)
1167 basic_block src1
= e1
->src
, src2
= e2
->src
;
1168 basic_block redirect_to
, redirect_from
, to_remove
;
1169 rtx newpos1
, newpos2
;
1173 newpos1
= newpos2
= NULL_RTX
;
1175 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1176 to try this optimization.
1178 Basic block partitioning may result in some jumps that appear to
1179 be optimizable (or blocks that appear to be mergeable), but which really
1180 must be left untouched (they are required to make it safely across
1181 partition boundaries). See the comments at the top of
1182 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1184 if (flag_reorder_blocks_and_partition
&& no_new_pseudos
)
1187 /* Search backward through forwarder blocks. We don't need to worry
1188 about multiple entry or chained forwarders, as they will be optimized
1189 away. We do this to look past the unconditional jump following a
1190 conditional jump that is required due to the current CFG shape. */
1191 if (single_pred_p (src1
)
1192 && FORWARDER_BLOCK_P (src1
))
1193 e1
= single_pred_edge (src1
), src1
= e1
->src
;
1195 if (single_pred_p (src2
)
1196 && FORWARDER_BLOCK_P (src2
))
1197 e2
= single_pred_edge (src2
), src2
= e2
->src
;
1199 /* Nothing to do if we reach ENTRY, or a common source block. */
1200 if (src1
== ENTRY_BLOCK_PTR
|| src2
== ENTRY_BLOCK_PTR
)
1205 /* Seeing more than 1 forwarder blocks would confuse us later... */
1206 if (FORWARDER_BLOCK_P (e1
->dest
)
1207 && FORWARDER_BLOCK_P (single_succ (e1
->dest
)))
1210 if (FORWARDER_BLOCK_P (e2
->dest
)
1211 && FORWARDER_BLOCK_P (single_succ (e2
->dest
)))
1214 /* Likewise with dead code (possibly newly created by the other optimizations
1216 if (EDGE_COUNT (src1
->preds
) == 0 || EDGE_COUNT (src2
->preds
) == 0)
1219 /* Look for the common insn sequence, part the first ... */
1220 if (!outgoing_edges_match (mode
, src1
, src2
))
1223 /* ... and part the second. */
1224 nmatch
= flow_find_cross_jump (mode
, src1
, src2
, &newpos1
, &newpos2
);
1226 /* Don't proceed with the crossjump unless we found a sufficient number
1227 of matching instructions or the 'from' block was totally matched
1228 (such that its predecessors will hopefully be redirected and the
1230 if ((nmatch
< PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS
))
1231 && (newpos1
!= BB_HEAD (src1
)))
1234 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1236 If we have tablejumps in the end of SRC1 and SRC2
1237 they have been already compared for equivalence in outgoing_edges_match ()
1238 so replace the references to TABLE1 by references to TABLE2. */
1243 if (tablejump_p (BB_END (src1
), &label1
, &table1
)
1244 && tablejump_p (BB_END (src2
), &label2
, &table2
)
1245 && label1
!= label2
)
1247 replace_label_data rr
;
1250 /* Replace references to LABEL1 with LABEL2. */
1253 rr
.update_label_nuses
= true;
1254 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
1256 /* Do not replace the label in SRC1->END because when deleting
1257 a block whose end is a tablejump, the tablejump referenced
1258 from the instruction is deleted too. */
1259 if (insn
!= BB_END (src1
))
1260 for_each_rtx (&insn
, replace_label
, &rr
);
1265 /* Avoid splitting if possible. We must always split when SRC2 has
1266 EH predecessor edges, or we may end up with basic blocks with both
1267 normal and EH predecessor edges. */
1268 if (newpos2
== BB_HEAD (src2
)
1269 && !(EDGE_PRED (src2
, 0)->flags
& EDGE_EH
))
1273 if (newpos2
== BB_HEAD (src2
))
1275 /* Skip possible basic block header. */
1276 if (LABEL_P (newpos2
))
1277 newpos2
= NEXT_INSN (newpos2
);
1278 if (NOTE_P (newpos2
))
1279 newpos2
= NEXT_INSN (newpos2
);
1283 fprintf (dump_file
, "Splitting bb %i before %i insns\n",
1284 src2
->index
, nmatch
);
1285 redirect_to
= split_block (src2
, PREV_INSN (newpos2
))->dest
;
1290 "Cross jumping from bb %i to bb %i; %i common insns\n",
1291 src1
->index
, src2
->index
, nmatch
);
1293 redirect_to
->count
+= src1
->count
;
1294 redirect_to
->frequency
+= src1
->frequency
;
1295 /* We may have some registers visible trought the block. */
1296 redirect_to
->flags
|= BB_DIRTY
;
1298 /* Recompute the frequencies and counts of outgoing edges. */
1299 FOR_EACH_EDGE (s
, ei
, redirect_to
->succs
)
1303 basic_block d
= s
->dest
;
1305 if (FORWARDER_BLOCK_P (d
))
1306 d
= single_succ (d
);
1308 FOR_EACH_EDGE (s2
, ei
, src1
->succs
)
1310 basic_block d2
= s2
->dest
;
1311 if (FORWARDER_BLOCK_P (d2
))
1312 d2
= single_succ (d2
);
1317 s
->count
+= s2
->count
;
1319 /* Take care to update possible forwarder blocks. We verified
1320 that there is no more than one in the chain, so we can't run
1321 into infinite loop. */
1322 if (FORWARDER_BLOCK_P (s
->dest
))
1324 single_succ_edge (s
->dest
)->count
+= s2
->count
;
1325 s
->dest
->count
+= s2
->count
;
1326 s
->dest
->frequency
+= EDGE_FREQUENCY (s
);
1329 if (FORWARDER_BLOCK_P (s2
->dest
))
1331 single_succ_edge (s2
->dest
)->count
-= s2
->count
;
1332 if (single_succ_edge (s2
->dest
)->count
< 0)
1333 single_succ_edge (s2
->dest
)->count
= 0;
1334 s2
->dest
->count
-= s2
->count
;
1335 s2
->dest
->frequency
-= EDGE_FREQUENCY (s
);
1336 if (s2
->dest
->frequency
< 0)
1337 s2
->dest
->frequency
= 0;
1338 if (s2
->dest
->count
< 0)
1339 s2
->dest
->count
= 0;
1342 if (!redirect_to
->frequency
&& !src1
->frequency
)
1343 s
->probability
= (s
->probability
+ s2
->probability
) / 2;
1346 = ((s
->probability
* redirect_to
->frequency
+
1347 s2
->probability
* src1
->frequency
)
1348 / (redirect_to
->frequency
+ src1
->frequency
));
1351 update_br_prob_note (redirect_to
);
1353 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1355 /* Skip possible basic block header. */
1356 if (LABEL_P (newpos1
))
1357 newpos1
= NEXT_INSN (newpos1
);
1359 if (NOTE_P (newpos1
))
1360 newpos1
= NEXT_INSN (newpos1
);
1362 redirect_from
= split_block (src1
, PREV_INSN (newpos1
))->src
;
1363 to_remove
= single_succ (redirect_from
);
1365 redirect_edge_and_branch_force (single_succ_edge (redirect_from
), redirect_to
);
1366 delete_basic_block (to_remove
);
1368 update_forwarder_flag (redirect_from
);
1369 if (redirect_to
!= src2
)
1370 update_forwarder_flag (src2
);
1375 /* Search the predecessors of BB for common insn sequences. When found,
1376 share code between them by redirecting control flow. Return true if
1377 any changes made. */
1380 try_crossjump_bb (int mode
, basic_block bb
)
1382 edge e
, e2
, fallthru
;
1384 unsigned max
, ix
, ix2
;
1385 basic_block ev
, ev2
;
1388 /* Nothing to do if there is not at least two incoming edges. */
1389 if (EDGE_COUNT (bb
->preds
) < 2)
1392 /* Don't crossjump if this block ends in a computed jump,
1393 unless we are optimizing for size. */
1395 && bb
!= EXIT_BLOCK_PTR
1396 && computed_jump_p (BB_END (bb
)))
1399 /* If we are partitioning hot/cold basic blocks, we don't want to
1400 mess up unconditional or indirect jumps that cross between hot
1403 Basic block partitioning may result in some jumps that appear to
1404 be optimizable (or blocks that appear to be mergeable), but which really
1405 must be left untouched (they are required to make it safely across
1406 partition boundaries). See the comments at the top of
1407 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1409 if (BB_PARTITION (EDGE_PRED (bb
, 0)->src
) !=
1410 BB_PARTITION (EDGE_PRED (bb
, 1)->src
)
1411 || (EDGE_PRED (bb
, 0)->flags
& EDGE_CROSSING
))
1414 /* It is always cheapest to redirect a block that ends in a branch to
1415 a block that falls through into BB, as that adds no branches to the
1416 program. We'll try that combination first. */
1418 max
= PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES
);
1420 if (EDGE_COUNT (bb
->preds
) > max
)
1423 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1425 if (e
->flags
& EDGE_FALLTHRU
)
1430 for (ix
= 0, ev
= bb
; ix
< EDGE_COUNT (ev
->preds
); )
1432 e
= EDGE_PRED (ev
, ix
);
1435 /* As noted above, first try with the fallthru predecessor. */
1438 /* Don't combine the fallthru edge into anything else.
1439 If there is a match, we'll do it the other way around. */
1442 /* If nothing changed since the last attempt, there is nothing
1445 && (!(e
->src
->flags
& BB_DIRTY
)
1446 && !(fallthru
->src
->flags
& BB_DIRTY
)))
1449 if (try_crossjump_to_edge (mode
, e
, fallthru
))
1458 /* Non-obvious work limiting check: Recognize that we're going
1459 to call try_crossjump_bb on every basic block. So if we have
1460 two blocks with lots of outgoing edges (a switch) and they
1461 share lots of common destinations, then we would do the
1462 cross-jump check once for each common destination.
1464 Now, if the blocks actually are cross-jump candidates, then
1465 all of their destinations will be shared. Which means that
1466 we only need check them for cross-jump candidacy once. We
1467 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1468 choosing to do the check from the block for which the edge
1469 in question is the first successor of A. */
1470 if (EDGE_SUCC (e
->src
, 0) != e
)
1473 for (ix2
= 0, ev2
= bb
; ix2
< EDGE_COUNT (ev2
->preds
); )
1475 e2
= EDGE_PRED (ev2
, ix2
);
1481 /* We've already checked the fallthru edge above. */
1485 /* The "first successor" check above only prevents multiple
1486 checks of crossjump(A,B). In order to prevent redundant
1487 checks of crossjump(B,A), require that A be the block
1488 with the lowest index. */
1489 if (e
->src
->index
> e2
->src
->index
)
1492 /* If nothing changed since the last attempt, there is nothing
1495 && (!(e
->src
->flags
& BB_DIRTY
)
1496 && !(e2
->src
->flags
& BB_DIRTY
)))
1499 if (try_crossjump_to_edge (mode
, e
, e2
))
1512 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1513 instructions etc. Return nonzero if changes were made. */
1516 try_optimize_cfg (int mode
)
1518 bool changed_overall
= false;
1521 basic_block bb
, b
, next
;
1523 if (mode
& CLEANUP_CROSSJUMP
)
1524 add_noreturn_fake_exit_edges ();
1526 if (mode
& (CLEANUP_UPDATE_LIFE
| CLEANUP_CROSSJUMP
| CLEANUP_THREADING
))
1530 update_forwarder_flag (bb
);
1532 if (! targetm
.cannot_modify_jumps_p ())
1535 /* Attempt to merge blocks as made possible by edge removal. If
1536 a block has only one successor, and the successor has only
1537 one predecessor, they may be combined. */
1545 "\n\ntry_optimize_cfg iteration %i\n\n",
1548 for (b
= ENTRY_BLOCK_PTR
->next_bb
; b
!= EXIT_BLOCK_PTR
;)
1552 bool changed_here
= false;
1554 /* Delete trivially dead basic blocks. */
1555 while (EDGE_COUNT (b
->preds
) == 0)
1559 fprintf (dump_file
, "Deleting block %i.\n",
1562 delete_basic_block (b
);
1563 if (!(mode
& CLEANUP_CFGLAYOUT
))
1568 /* Remove code labels no longer used. */
1569 if (single_pred_p (b
)
1570 && (single_pred_edge (b
)->flags
& EDGE_FALLTHRU
)
1571 && !(single_pred_edge (b
)->flags
& EDGE_COMPLEX
)
1572 && LABEL_P (BB_HEAD (b
))
1573 /* If the previous block ends with a branch to this
1574 block, we can't delete the label. Normally this
1575 is a condjump that is yet to be simplified, but
1576 if CASE_DROPS_THRU, this can be a tablejump with
1577 some element going to the same place as the
1578 default (fallthru). */
1579 && (single_pred (b
) == ENTRY_BLOCK_PTR
1580 || !JUMP_P (BB_END (single_pred (b
)))
1581 || ! label_is_jump_target_p (BB_HEAD (b
),
1582 BB_END (single_pred (b
)))))
1584 rtx label
= BB_HEAD (b
);
1586 delete_insn_chain (label
, label
);
1587 /* In the case label is undeletable, move it after the
1588 BASIC_BLOCK note. */
1589 if (NOTE_LINE_NUMBER (BB_HEAD (b
)) == NOTE_INSN_DELETED_LABEL
)
1591 rtx bb_note
= NEXT_INSN (BB_HEAD (b
));
1593 reorder_insns_nobb (label
, label
, bb_note
);
1594 BB_HEAD (b
) = bb_note
;
1597 fprintf (dump_file
, "Deleted label in block %i.\n",
1601 /* If we fall through an empty block, we can remove it. */
1602 if (!(mode
& CLEANUP_CFGLAYOUT
)
1603 && single_pred_p (b
)
1604 && (single_pred_edge (b
)->flags
& EDGE_FALLTHRU
)
1605 && !LABEL_P (BB_HEAD (b
))
1606 && FORWARDER_BLOCK_P (b
)
1607 /* Note that forwarder_block_p true ensures that
1608 there is a successor for this block. */
1609 && (single_succ_edge (b
)->flags
& EDGE_FALLTHRU
)
1610 && n_basic_blocks
> 1)
1614 "Deleting fallthru block %i.\n",
1617 c
= b
->prev_bb
== ENTRY_BLOCK_PTR
? b
->next_bb
: b
->prev_bb
;
1618 redirect_edge_succ_nodup (single_pred_edge (b
),
1620 delete_basic_block (b
);
1625 if (single_succ_p (b
)
1626 && (s
= single_succ_edge (b
))
1627 && !(s
->flags
& EDGE_COMPLEX
)
1628 && (c
= s
->dest
) != EXIT_BLOCK_PTR
1629 && single_pred_p (c
)
1632 /* When not in cfg_layout mode use code aware of reordering
1633 INSN. This code possibly creates new basic blocks so it
1634 does not fit merge_blocks interface and is kept here in
1635 hope that it will become useless once more of compiler
1636 is transformed to use cfg_layout mode. */
1638 if ((mode
& CLEANUP_CFGLAYOUT
)
1639 && can_merge_blocks_p (b
, c
))
1641 merge_blocks (b
, c
);
1642 update_forwarder_flag (b
);
1643 changed_here
= true;
1645 else if (!(mode
& CLEANUP_CFGLAYOUT
)
1646 /* If the jump insn has side effects,
1647 we can't kill the edge. */
1648 && (!JUMP_P (BB_END (b
))
1649 || (reload_completed
1650 ? simplejump_p (BB_END (b
))
1651 : (onlyjump_p (BB_END (b
))
1652 && !tablejump_p (BB_END (b
),
1654 && (next
= merge_blocks_move (s
, b
, c
, mode
)))
1657 changed_here
= true;
1661 /* Simplify branch over branch. */
1662 if ((mode
& CLEANUP_EXPENSIVE
)
1663 && !(mode
& CLEANUP_CFGLAYOUT
)
1664 && try_simplify_condjump (b
))
1665 changed_here
= true;
1667 /* If B has a single outgoing edge, but uses a
1668 non-trivial jump instruction without side-effects, we
1669 can either delete the jump entirely, or replace it
1670 with a simple unconditional jump. */
1671 if (single_succ_p (b
)
1672 && single_succ (b
) != EXIT_BLOCK_PTR
1673 && onlyjump_p (BB_END (b
))
1674 && !find_reg_note (BB_END (b
), REG_CROSSING_JUMP
, NULL_RTX
)
1675 && try_redirect_by_replacing_jump (single_succ_edge (b
),
1677 (mode
& CLEANUP_CFGLAYOUT
) != 0))
1679 update_forwarder_flag (b
);
1680 changed_here
= true;
1683 /* Simplify branch to branch. */
1684 if (try_forward_edges (mode
, b
))
1685 changed_here
= true;
1687 /* Look for shared code between blocks. */
1688 if ((mode
& CLEANUP_CROSSJUMP
)
1689 && try_crossjump_bb (mode
, b
))
1690 changed_here
= true;
1692 /* Don't get confused by the index shift caused by
1700 if ((mode
& CLEANUP_CROSSJUMP
)
1701 && try_crossjump_bb (mode
, EXIT_BLOCK_PTR
))
1704 #ifdef ENABLE_CHECKING
1706 verify_flow_info ();
1709 changed_overall
|= changed
;
1715 if (mode
& CLEANUP_CROSSJUMP
)
1716 remove_fake_exit_edges ();
1719 b
->flags
&= ~(BB_FORWARDER_BLOCK
| BB_NONTHREADABLE_BLOCK
);
1721 return changed_overall
;
1724 /* Delete all unreachable basic blocks. */
1727 delete_unreachable_blocks (void)
1729 bool changed
= false;
1730 basic_block b
, next_bb
;
1732 find_unreachable_blocks ();
1734 /* Delete all unreachable basic blocks. */
1736 for (b
= ENTRY_BLOCK_PTR
->next_bb
; b
!= EXIT_BLOCK_PTR
; b
= next_bb
)
1738 next_bb
= b
->next_bb
;
1740 if (!(b
->flags
& BB_REACHABLE
))
1742 delete_basic_block (b
);
1748 tidy_fallthru_edges ();
1752 /* Merges sequential blocks if possible. */
1755 merge_seq_blocks (void)
1758 bool changed
= false;
1760 for (bb
= ENTRY_BLOCK_PTR
->next_bb
; bb
!= EXIT_BLOCK_PTR
; )
1762 if (single_succ_p (bb
)
1763 && can_merge_blocks_p (bb
, single_succ (bb
)))
1765 /* Merge the blocks and retry. */
1766 merge_blocks (bb
, single_succ (bb
));
1777 /* Tidy the CFG by deleting unreachable code and whatnot. */
1780 cleanup_cfg (int mode
)
1782 bool changed
= false;
1784 timevar_push (TV_CLEANUP_CFG
);
1785 if (delete_unreachable_blocks ())
1788 /* We've possibly created trivially dead code. Cleanup it right
1789 now to introduce more opportunities for try_optimize_cfg. */
1790 if (!(mode
& (CLEANUP_NO_INSN_DEL
| CLEANUP_UPDATE_LIFE
))
1791 && !reload_completed
)
1792 delete_trivially_dead_insns (get_insns(), max_reg_num ());
1797 while (try_optimize_cfg (mode
))
1799 delete_unreachable_blocks (), changed
= true;
1800 if (mode
& CLEANUP_UPDATE_LIFE
)
1802 /* Cleaning up CFG introduces more opportunities for dead code
1803 removal that in turn may introduce more opportunities for
1804 cleaning up the CFG. */
1805 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES
,
1807 | PROP_SCAN_DEAD_CODE
1808 | PROP_KILL_DEAD_CODE
1809 | ((mode
& CLEANUP_LOG_LINKS
)
1810 ? PROP_LOG_LINKS
: 0)))
1813 else if (!(mode
& CLEANUP_NO_INSN_DEL
)
1814 && (mode
& CLEANUP_EXPENSIVE
)
1815 && !reload_completed
)
1817 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
1822 delete_dead_jumptables ();
1825 timevar_pop (TV_CLEANUP_CFG
);
1831 rest_of_handle_jump (void)
1833 delete_unreachable_blocks ();
1835 if (cfun
->tail_call_emit
)
1836 fixup_tail_calls ();
1839 struct tree_opt_pass pass_jump
=
1841 "sibling", /* name */
1843 rest_of_handle_jump
, /* execute */
1846 0, /* static_pass_number */
1847 TV_JUMP
, /* tv_id */
1848 0, /* properties_required */
1849 0, /* properties_provided */
1850 0, /* properties_destroyed */
1851 TODO_ggc_collect
, /* todo_flags_start */
1853 TODO_verify_flow
, /* todo_flags_finish */
1859 rest_of_handle_jump2 (void)
1861 /* Turn NOTE_INSN_EXPECTED_VALUE into REG_BR_PROB. Do this
1862 before jump optimization switches branch directions. */
1863 if (flag_guess_branch_prob
)
1864 expected_value_to_br_prob ();
1866 delete_trivially_dead_insns (get_insns (), max_reg_num ());
1867 reg_scan (get_insns (), max_reg_num ());
1869 dump_flow_info (dump_file
);
1870 cleanup_cfg ((optimize
? CLEANUP_EXPENSIVE
: 0) | CLEANUP_PRE_LOOP
1871 | (flag_thread_jumps
? CLEANUP_THREADING
: 0));
1873 create_loop_notes ();
1875 purge_line_number_notes ();
1878 cleanup_cfg (CLEANUP_EXPENSIVE
| CLEANUP_PRE_LOOP
);
1880 /* Jump optimization, and the removal of NULL pointer checks, may
1881 have reduced the number of instructions substantially. CSE, and
1882 future passes, allocate arrays whose dimensions involve the
1883 maximum instruction UID, so if we can reduce the maximum UID
1884 we'll save big on memory. */
1885 renumber_insns (dump_file
);
1889 struct tree_opt_pass pass_jump2
=
1893 rest_of_handle_jump2
, /* execute */
1896 0, /* static_pass_number */
1897 TV_JUMP
, /* tv_id */
1898 0, /* properties_required */
1899 0, /* properties_provided */
1900 0, /* properties_destroyed */
1901 TODO_ggc_collect
, /* todo_flags_start */
1902 TODO_dump_func
, /* todo_flags_finish */