1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
36 #include "coretypes.h"
39 #include "hard-reg-set.h"
40 #include "basic-block.h"
43 #include "insn-config.h"
52 #include "cfglayout.h"
55 /* cleanup_cfg maintains following flags for each basic block. */
59 /* Set if BB is the forwarder block to avoid too many
60 forwarder_block_p calls. */
61 BB_FORWARDER_BLOCK
= 1,
62 BB_NONTHREADABLE_BLOCK
= 2
65 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
66 #define BB_SET_FLAG(BB, FLAG) \
67 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
68 #define BB_CLEAR_FLAG(BB, FLAG) \
69 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
71 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
73 /* Set to true when we are running first pass of try_optimize_cfg loop. */
74 static bool first_pass
;
75 static bool try_crossjump_to_edge (int, edge
, edge
);
76 static bool try_crossjump_bb (int, basic_block
);
77 static bool outgoing_edges_match (int, basic_block
, basic_block
);
78 static int flow_find_cross_jump (int, basic_block
, basic_block
, rtx
*, rtx
*);
79 static bool insns_match_p (int, rtx
, rtx
);
81 static void merge_blocks_move_predecessor_nojumps (basic_block
, basic_block
);
82 static void merge_blocks_move_successor_nojumps (basic_block
, basic_block
);
83 static bool try_optimize_cfg (int);
84 static bool try_simplify_condjump (basic_block
);
85 static bool try_forward_edges (int, basic_block
);
86 static edge
thread_jump (int, edge
, basic_block
);
87 static bool mark_effect (rtx
, bitmap
);
88 static void notice_new_block (basic_block
);
89 static void update_forwarder_flag (basic_block
);
90 static int mentions_nonequal_regs (rtx
*, void *);
91 static void merge_memattrs (rtx
, rtx
);
93 /* Set flags for newly created block. */
96 notice_new_block (basic_block bb
)
101 if (forwarder_block_p (bb
))
102 BB_SET_FLAG (bb
, BB_FORWARDER_BLOCK
);
105 /* Recompute forwarder flag after block has been modified. */
108 update_forwarder_flag (basic_block bb
)
110 if (forwarder_block_p (bb
))
111 BB_SET_FLAG (bb
, BB_FORWARDER_BLOCK
);
113 BB_CLEAR_FLAG (bb
, BB_FORWARDER_BLOCK
);
116 /* Simplify a conditional jump around an unconditional jump.
117 Return true if something changed. */
120 try_simplify_condjump (basic_block cbranch_block
)
122 basic_block jump_block
, jump_dest_block
, cbranch_dest_block
;
123 edge cbranch_jump_edge
, cbranch_fallthru_edge
;
126 /* Verify that there are exactly two successors. */
127 if (EDGE_COUNT (cbranch_block
->succs
) != 2)
130 /* Verify that we've got a normal conditional branch at the end
132 cbranch_insn
= BB_END (cbranch_block
);
133 if (!any_condjump_p (cbranch_insn
))
136 cbranch_fallthru_edge
= FALLTHRU_EDGE (cbranch_block
);
137 cbranch_jump_edge
= BRANCH_EDGE (cbranch_block
);
139 /* The next block must not have multiple predecessors, must not
140 be the last block in the function, and must contain just the
141 unconditional jump. */
142 jump_block
= cbranch_fallthru_edge
->dest
;
143 if (EDGE_COUNT (jump_block
->preds
) >= 2
144 || jump_block
->next_bb
== EXIT_BLOCK_PTR
145 || !FORWARDER_BLOCK_P (jump_block
))
147 jump_dest_block
= EDGE_SUCC (jump_block
, 0)->dest
;
149 /* If we are partitioning hot/cold basic blocks, we don't want to
150 mess up unconditional or indirect jumps that cross between hot
153 Basic block partitioning may result in some jumps that appear to
154 be optimizable (or blocks that appear to be mergeable), but which really
155 must be left untouched (they are required to make it safely across
156 partition boundaries). See the comments at the top of
157 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
159 if (flag_reorder_blocks_and_partition
160 && (BB_PARTITION (jump_block
) != BB_PARTITION (jump_dest_block
)
161 || (cbranch_jump_edge
->flags
& EDGE_CROSSING
)))
164 /* The conditional branch must target the block after the
165 unconditional branch. */
166 cbranch_dest_block
= cbranch_jump_edge
->dest
;
168 if (cbranch_dest_block
== EXIT_BLOCK_PTR
169 || !can_fallthru (jump_block
, cbranch_dest_block
))
172 /* Invert the conditional branch. */
173 if (!invert_jump (cbranch_insn
, block_label (jump_dest_block
), 0))
177 fprintf (dump_file
, "Simplifying condjump %i around jump %i\n",
178 INSN_UID (cbranch_insn
), INSN_UID (BB_END (jump_block
)));
180 /* Success. Update the CFG to match. Note that after this point
181 the edge variable names appear backwards; the redirection is done
182 this way to preserve edge profile data. */
183 cbranch_jump_edge
= redirect_edge_succ_nodup (cbranch_jump_edge
,
185 cbranch_fallthru_edge
= redirect_edge_succ_nodup (cbranch_fallthru_edge
,
187 cbranch_jump_edge
->flags
|= EDGE_FALLTHRU
;
188 cbranch_fallthru_edge
->flags
&= ~EDGE_FALLTHRU
;
189 update_br_prob_note (cbranch_block
);
191 /* Delete the block with the unconditional jump, and clean up the mess. */
192 delete_basic_block (jump_block
);
193 tidy_fallthru_edge (cbranch_jump_edge
);
194 update_forwarder_flag (cbranch_block
);
199 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
200 on register. Used by jump threading. */
203 mark_effect (rtx exp
, regset nonequal
)
207 switch (GET_CODE (exp
))
209 /* In case we do clobber the register, mark it as equal, as we know the
210 value is dead so it don't have to match. */
212 if (REG_P (XEXP (exp
, 0)))
214 dest
= XEXP (exp
, 0);
215 regno
= REGNO (dest
);
216 CLEAR_REGNO_REG_SET (nonequal
, regno
);
217 if (regno
< FIRST_PSEUDO_REGISTER
)
219 int n
= hard_regno_nregs
[regno
][GET_MODE (dest
)];
221 CLEAR_REGNO_REG_SET (nonequal
, regno
+ n
);
227 if (rtx_equal_for_cselib_p (SET_DEST (exp
), SET_SRC (exp
)))
229 dest
= SET_DEST (exp
);
234 regno
= REGNO (dest
);
235 SET_REGNO_REG_SET (nonequal
, regno
);
236 if (regno
< FIRST_PSEUDO_REGISTER
)
238 int n
= hard_regno_nregs
[regno
][GET_MODE (dest
)];
240 SET_REGNO_REG_SET (nonequal
, regno
+ n
);
249 /* Return nonzero if X is a register set in regset DATA.
250 Called via for_each_rtx. */
252 mentions_nonequal_regs (rtx
*x
, void *data
)
254 regset nonequal
= (regset
) data
;
260 if (REGNO_REG_SET_P (nonequal
, regno
))
262 if (regno
< FIRST_PSEUDO_REGISTER
)
264 int n
= hard_regno_nregs
[regno
][GET_MODE (*x
)];
266 if (REGNO_REG_SET_P (nonequal
, regno
+ n
))
272 /* Attempt to prove that the basic block B will have no side effects and
273 always continues in the same edge if reached via E. Return the edge
274 if exist, NULL otherwise. */
277 thread_jump (int mode
, edge e
, basic_block b
)
279 rtx set1
, set2
, cond1
, cond2
, insn
;
280 enum rtx_code code1
, code2
, reversed_code2
;
281 bool reverse1
= false;
285 reg_set_iterator rsi
;
287 if (BB_FLAGS (b
) & BB_NONTHREADABLE_BLOCK
)
290 /* At the moment, we do handle only conditional jumps, but later we may
291 want to extend this code to tablejumps and others. */
292 if (EDGE_COUNT (e
->src
->succs
) != 2)
294 if (EDGE_COUNT (b
->succs
) != 2)
296 BB_SET_FLAG (b
, BB_NONTHREADABLE_BLOCK
);
300 /* Second branch must end with onlyjump, as we will eliminate the jump. */
301 if (!any_condjump_p (BB_END (e
->src
)))
304 if (!any_condjump_p (BB_END (b
)) || !onlyjump_p (BB_END (b
)))
306 BB_SET_FLAG (b
, BB_NONTHREADABLE_BLOCK
);
310 set1
= pc_set (BB_END (e
->src
));
311 set2
= pc_set (BB_END (b
));
312 if (((e
->flags
& EDGE_FALLTHRU
) != 0)
313 != (XEXP (SET_SRC (set1
), 1) == pc_rtx
))
316 cond1
= XEXP (SET_SRC (set1
), 0);
317 cond2
= XEXP (SET_SRC (set2
), 0);
319 code1
= reversed_comparison_code (cond1
, BB_END (e
->src
));
321 code1
= GET_CODE (cond1
);
323 code2
= GET_CODE (cond2
);
324 reversed_code2
= reversed_comparison_code (cond2
, BB_END (b
));
326 if (!comparison_dominates_p (code1
, code2
)
327 && !comparison_dominates_p (code1
, reversed_code2
))
330 /* Ensure that the comparison operators are equivalent.
331 ??? This is far too pessimistic. We should allow swapped operands,
332 different CCmodes, or for example comparisons for interval, that
333 dominate even when operands are not equivalent. */
334 if (!rtx_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
335 || !rtx_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
338 /* Short circuit cases where block B contains some side effects, as we can't
340 for (insn
= NEXT_INSN (BB_HEAD (b
)); insn
!= NEXT_INSN (BB_END (b
));
341 insn
= NEXT_INSN (insn
))
342 if (INSN_P (insn
) && side_effects_p (PATTERN (insn
)))
344 BB_SET_FLAG (b
, BB_NONTHREADABLE_BLOCK
);
350 /* First process all values computed in the source basic block. */
351 for (insn
= NEXT_INSN (BB_HEAD (e
->src
));
352 insn
!= NEXT_INSN (BB_END (e
->src
));
353 insn
= NEXT_INSN (insn
))
355 cselib_process_insn (insn
);
357 nonequal
= BITMAP_XMALLOC();
358 CLEAR_REG_SET (nonequal
);
360 /* Now assume that we've continued by the edge E to B and continue
361 processing as if it were same basic block.
362 Our goal is to prove that whole block is an NOOP. */
364 for (insn
= NEXT_INSN (BB_HEAD (b
));
365 insn
!= NEXT_INSN (BB_END (b
)) && !failed
;
366 insn
= NEXT_INSN (insn
))
370 rtx pat
= PATTERN (insn
);
372 if (GET_CODE (pat
) == PARALLEL
)
374 for (i
= 0; i
< (unsigned)XVECLEN (pat
, 0); i
++)
375 failed
|= mark_effect (XVECEXP (pat
, 0, i
), nonequal
);
378 failed
|= mark_effect (pat
, nonequal
);
381 cselib_process_insn (insn
);
384 /* Later we should clear nonequal of dead registers. So far we don't
385 have life information in cfg_cleanup. */
388 BB_SET_FLAG (b
, BB_NONTHREADABLE_BLOCK
);
392 /* cond2 must not mention any register that is not equal to the
394 if (for_each_rtx (&cond2
, mentions_nonequal_regs
, nonequal
))
397 /* In case liveness information is available, we need to prove equivalence
398 only of the live values. */
399 if (mode
& CLEANUP_UPDATE_LIFE
)
400 AND_REG_SET (nonequal
, b
->global_live_at_end
);
402 EXECUTE_IF_SET_IN_REG_SET (nonequal
, 0, i
, rsi
)
405 BITMAP_XFREE (nonequal
);
407 if ((comparison_dominates_p (code1
, code2
) != 0)
408 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
409 return BRANCH_EDGE (b
);
411 return FALLTHRU_EDGE (b
);
414 BITMAP_XFREE (nonequal
);
419 /* Attempt to forward edges leaving basic block B.
420 Return true if successful. */
423 try_forward_edges (int mode
, basic_block b
)
425 bool changed
= false;
427 edge e
, *threaded_edges
= NULL
;
429 /* If we are partitioning hot/cold basic blocks, we don't want to
430 mess up unconditional or indirect jumps that cross between hot
433 Basic block partitioning may result in some jumps that appear to
434 be optimizable (or blocks that appear to be mergeable), but which really m
435 ust be left untouched (they are required to make it safely across
436 partition boundaries). See the comments at the top of
437 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
439 if (flag_reorder_blocks_and_partition
440 && find_reg_note (BB_END (b
), REG_CROSSING_JUMP
, NULL_RTX
))
443 for (ei
= ei_start (b
->succs
); (e
= ei_safe_edge (ei
)); )
445 basic_block target
, first
;
447 bool threaded
= false;
448 int nthreaded_edges
= 0;
449 bool may_thread
= first_pass
| (b
->flags
& BB_DIRTY
);
451 /* Skip complex edges because we don't know how to update them.
453 Still handle fallthru edges, as we can succeed to forward fallthru
454 edge to the same place as the branch edge of conditional branch
455 and turn conditional branch to an unconditional branch. */
456 if (e
->flags
& EDGE_COMPLEX
)
462 target
= first
= e
->dest
;
465 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
466 up jumps that cross between hot/cold sections.
468 Basic block partitioning may result in some jumps that appear
469 to be optimizable (or blocks that appear to be mergeable), but which
470 really must be left untouched (they are required to make it safely
471 across partition boundaries). See the comments at the top of
472 bb-reorder.c:partition_hot_cold_basic_blocks for complete
475 if (flag_reorder_blocks_and_partition
476 && first
!= EXIT_BLOCK_PTR
477 && find_reg_note (BB_END (first
), REG_CROSSING_JUMP
, NULL_RTX
))
480 while (counter
< n_basic_blocks
)
482 basic_block new_target
= NULL
;
483 bool new_target_threaded
= false;
484 may_thread
|= target
->flags
& BB_DIRTY
;
486 if (FORWARDER_BLOCK_P (target
)
487 && !(EDGE_SUCC (target
, 0)->flags
& EDGE_CROSSING
)
488 && EDGE_SUCC (target
, 0)->dest
!= EXIT_BLOCK_PTR
)
490 /* Bypass trivial infinite loops. */
491 if (target
== EDGE_SUCC (target
, 0)->dest
)
492 counter
= n_basic_blocks
;
493 new_target
= EDGE_SUCC (target
, 0)->dest
;
496 /* Allow to thread only over one edge at time to simplify updating
498 else if ((mode
& CLEANUP_THREADING
) && may_thread
)
500 edge t
= thread_jump (mode
, e
, target
);
504 threaded_edges
= xmalloc (sizeof (*threaded_edges
)
510 /* Detect an infinite loop across blocks not
511 including the start block. */
512 for (i
= 0; i
< nthreaded_edges
; ++i
)
513 if (threaded_edges
[i
] == t
)
515 if (i
< nthreaded_edges
)
517 counter
= n_basic_blocks
;
522 /* Detect an infinite loop across the start block. */
526 gcc_assert (nthreaded_edges
< n_basic_blocks
);
527 threaded_edges
[nthreaded_edges
++] = t
;
529 new_target
= t
->dest
;
530 new_target_threaded
= true;
537 /* Avoid killing of loop pre-headers, as it is the place loop
538 optimizer wants to hoist code to.
540 For fallthru forwarders, the LOOP_BEG note must appear between
541 the header of block and CODE_LABEL of the loop, for non forwarders
542 it must appear before the JUMP_INSN. */
543 if ((mode
& CLEANUP_PRE_LOOP
) && optimize
)
545 rtx insn
= (EDGE_SUCC (target
, 0)->flags
& EDGE_FALLTHRU
546 ? BB_HEAD (target
) : prev_nonnote_insn (BB_END (target
)));
549 insn
= NEXT_INSN (insn
);
551 for (; insn
&& !LABEL_P (insn
) && !INSN_P (insn
);
552 insn
= NEXT_INSN (insn
))
554 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
560 /* Do not clean up branches to just past the end of a loop
561 at this time; it can mess up the loop optimizer's
562 recognition of some patterns. */
564 insn
= PREV_INSN (BB_HEAD (target
));
565 if (insn
&& NOTE_P (insn
)
566 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
)
572 threaded
|= new_target_threaded
;
575 if (counter
>= n_basic_blocks
)
578 fprintf (dump_file
, "Infinite loop in BB %i.\n",
581 else if (target
== first
)
582 ; /* We didn't do anything. */
585 /* Save the values now, as the edge may get removed. */
586 gcov_type edge_count
= e
->count
;
587 int edge_probability
= e
->probability
;
591 /* Don't force if target is exit block. */
592 if (threaded
&& target
!= EXIT_BLOCK_PTR
)
594 notice_new_block (redirect_edge_and_branch_force (e
, target
));
596 fprintf (dump_file
, "Conditionals threaded.\n");
598 else if (!redirect_edge_and_branch (e
, target
))
602 "Forwarding edge %i->%i to %i failed.\n",
603 b
->index
, e
->dest
->index
, target
->index
);
608 /* We successfully forwarded the edge. Now update profile
609 data: for each edge we traversed in the chain, remove
610 the original edge's execution count. */
611 edge_frequency
= ((edge_probability
* b
->frequency
612 + REG_BR_PROB_BASE
/ 2)
615 if (!FORWARDER_BLOCK_P (b
) && forwarder_block_p (b
))
616 BB_SET_FLAG (b
, BB_FORWARDER_BLOCK
);
622 if (EDGE_COUNT (first
->succs
) > 1)
624 gcc_assert (n
< nthreaded_edges
);
625 t
= threaded_edges
[n
++];
626 gcc_assert (t
->src
== first
);
627 update_bb_profile_for_threading (first
, edge_frequency
,
629 update_br_prob_note (first
);
633 first
->count
-= edge_count
;
634 if (first
->count
< 0)
636 first
->frequency
-= edge_frequency
;
637 if (first
->frequency
< 0)
638 first
->frequency
= 0;
639 /* It is possible that as the result of
640 threading we've removed edge as it is
641 threaded to the fallthru edge. Avoid
642 getting out of sync. */
643 if (n
< nthreaded_edges
644 && first
== threaded_edges
[n
]->src
)
646 t
= EDGE_SUCC (first
, 0);
649 t
->count
-= edge_count
;
654 while (first
!= target
);
663 free (threaded_edges
);
668 /* Blocks A and B are to be merged into a single block. A has no incoming
669 fallthru edge, so it can be moved before B without adding or modifying
670 any jumps (aside from the jump from A to B). */
673 merge_blocks_move_predecessor_nojumps (basic_block a
, basic_block b
)
678 /* If we are partitioning hot/cold basic blocks, we don't want to
679 mess up unconditional or indirect jumps that cross between hot
682 Basic block partitioning may result in some jumps that appear to
683 be optimizable (or blocks that appear to be mergeable), but which really
684 must be left untouched (they are required to make it safely across
685 partition boundaries). See the comments at the top of
686 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
688 if (flag_reorder_blocks_and_partition
689 && (BB_PARTITION (a
) != BB_PARTITION (b
)
690 || find_reg_note (BB_END (a
), REG_CROSSING_JUMP
, NULL_RTX
)))
693 barrier
= next_nonnote_insn (BB_END (a
));
694 gcc_assert (BARRIER_P (barrier
));
695 delete_insn (barrier
);
697 /* Move block and loop notes out of the chain so that we do not
700 ??? A better solution would be to squeeze out all the non-nested notes
701 and adjust the block trees appropriately. Even better would be to have
702 a tighter connection between block trees and rtl so that this is not
704 only_notes
= squeeze_notes (&BB_HEAD (a
), &BB_END (a
));
705 gcc_assert (!only_notes
);
707 /* Scramble the insn chain. */
708 if (BB_END (a
) != PREV_INSN (BB_HEAD (b
)))
709 reorder_insns_nobb (BB_HEAD (a
), BB_END (a
), PREV_INSN (BB_HEAD (b
)));
710 a
->flags
|= BB_DIRTY
;
713 fprintf (dump_file
, "Moved block %d before %d and merged.\n",
716 /* Swap the records for the two blocks around. */
719 link_block (a
, b
->prev_bb
);
721 /* Now blocks A and B are contiguous. Merge them. */
725 /* Blocks A and B are to be merged into a single block. B has no outgoing
726 fallthru edge, so it can be moved after A without adding or modifying
727 any jumps (aside from the jump from A to B). */
730 merge_blocks_move_successor_nojumps (basic_block a
, basic_block b
)
732 rtx barrier
, real_b_end
;
736 /* If we are partitioning hot/cold basic blocks, we don't want to
737 mess up unconditional or indirect jumps that cross between hot
740 Basic block partitioning may result in some jumps that appear to
741 be optimizable (or blocks that appear to be mergeable), but which really
742 must be left untouched (they are required to make it safely across
743 partition boundaries). See the comments at the top of
744 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
746 if (flag_reorder_blocks_and_partition
747 && (find_reg_note (BB_END (a
), REG_CROSSING_JUMP
, NULL_RTX
)
748 || BB_PARTITION (a
) != BB_PARTITION (b
)))
751 real_b_end
= BB_END (b
);
753 /* If there is a jump table following block B temporarily add the jump table
754 to block B so that it will also be moved to the correct location. */
755 if (tablejump_p (BB_END (b
), &label
, &table
)
756 && prev_active_insn (label
) == BB_END (b
))
761 /* There had better have been a barrier there. Delete it. */
762 barrier
= NEXT_INSN (BB_END (b
));
763 if (barrier
&& BARRIER_P (barrier
))
764 delete_insn (barrier
);
766 /* Move block and loop notes out of the chain so that we do not
769 ??? A better solution would be to squeeze out all the non-nested notes
770 and adjust the block trees appropriately. Even better would be to have
771 a tighter connection between block trees and rtl so that this is not
773 only_notes
= squeeze_notes (&BB_HEAD (b
), &BB_END (b
));
774 gcc_assert (!only_notes
);
777 /* Scramble the insn chain. */
778 reorder_insns_nobb (BB_HEAD (b
), BB_END (b
), BB_END (a
));
780 /* Restore the real end of b. */
781 BB_END (b
) = real_b_end
;
784 fprintf (dump_file
, "Moved block %d after %d and merged.\n",
787 /* Now blocks A and B are contiguous. Merge them. */
791 /* Attempt to merge basic blocks that are potentially non-adjacent.
792 Return NULL iff the attempt failed, otherwise return basic block
793 where cleanup_cfg should continue. Because the merging commonly
794 moves basic block away or introduces another optimization
795 possibility, return basic block just before B so cleanup_cfg don't
798 It may be good idea to return basic block before C in the case
799 C has been moved after B and originally appeared earlier in the
800 insn sequence, but we have no information available about the
801 relative ordering of these two. Hopefully it is not too common. */
804 merge_blocks_move (edge e
, basic_block b
, basic_block c
, int mode
)
808 /* If we are partitioning hot/cold basic blocks, we don't want to
809 mess up unconditional or indirect jumps that cross between hot
812 Basic block partitioning may result in some jumps that appear to
813 be optimizable (or blocks that appear to be mergeable), but which really
814 must be left untouched (they are required to make it safely across
815 partition boundaries). See the comments at the top of
816 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
818 if (flag_reorder_blocks_and_partition
819 && (find_reg_note (BB_END (b
), REG_CROSSING_JUMP
, NULL_RTX
)
820 || find_reg_note (BB_END (c
), REG_CROSSING_JUMP
, NULL_RTX
)
821 || BB_PARTITION (b
) != BB_PARTITION (c
)))
826 /* If B has a fallthru edge to C, no need to move anything. */
827 if (e
->flags
& EDGE_FALLTHRU
)
829 int b_index
= b
->index
, c_index
= c
->index
;
831 update_forwarder_flag (b
);
834 fprintf (dump_file
, "Merged %d and %d without moving.\n",
837 return b
->prev_bb
== ENTRY_BLOCK_PTR
? b
: b
->prev_bb
;
840 /* Otherwise we will need to move code around. Do that only if expensive
841 transformations are allowed. */
842 else if (mode
& CLEANUP_EXPENSIVE
)
844 edge tmp_edge
, b_fallthru_edge
;
845 bool c_has_outgoing_fallthru
;
846 bool b_has_incoming_fallthru
;
849 /* Avoid overactive code motion, as the forwarder blocks should be
850 eliminated by edge redirection instead. One exception might have
851 been if B is a forwarder block and C has no fallthru edge, but
852 that should be cleaned up by bb-reorder instead. */
853 if (FORWARDER_BLOCK_P (b
) || FORWARDER_BLOCK_P (c
))
856 /* We must make sure to not munge nesting of lexical blocks,
857 and loop notes. This is done by squeezing out all the notes
858 and leaving them there to lie. Not ideal, but functional. */
860 FOR_EACH_EDGE (tmp_edge
, ei
, c
->succs
)
861 if (tmp_edge
->flags
& EDGE_FALLTHRU
)
864 c_has_outgoing_fallthru
= (tmp_edge
!= NULL
);
866 FOR_EACH_EDGE (tmp_edge
, ei
, b
->preds
)
867 if (tmp_edge
->flags
& EDGE_FALLTHRU
)
870 b_has_incoming_fallthru
= (tmp_edge
!= NULL
);
871 b_fallthru_edge
= tmp_edge
;
874 next
= next
->prev_bb
;
876 /* Otherwise, we're going to try to move C after B. If C does
877 not have an outgoing fallthru, then it can be moved
878 immediately after B without introducing or modifying jumps. */
879 if (! c_has_outgoing_fallthru
)
881 merge_blocks_move_successor_nojumps (b
, c
);
882 return next
== ENTRY_BLOCK_PTR
? next
->next_bb
: next
;
885 /* If B does not have an incoming fallthru, then it can be moved
886 immediately before C without introducing or modifying jumps.
887 C cannot be the first block, so we do not have to worry about
888 accessing a non-existent block. */
890 if (b_has_incoming_fallthru
)
894 if (b_fallthru_edge
->src
== ENTRY_BLOCK_PTR
)
896 bb
= force_nonfallthru (b_fallthru_edge
);
898 notice_new_block (bb
);
901 merge_blocks_move_predecessor_nojumps (b
, c
);
902 return next
== ENTRY_BLOCK_PTR
? next
->next_bb
: next
;
909 /* Removes the memory attributes of MEM expression
910 if they are not equal. */
913 merge_memattrs (rtx x
, rtx y
)
922 if (x
== 0 || y
== 0)
927 if (code
!= GET_CODE (y
))
930 if (GET_MODE (x
) != GET_MODE (y
))
933 if (code
== MEM
&& MEM_ATTRS (x
) != MEM_ATTRS (y
))
937 else if (! MEM_ATTRS (y
))
943 if (MEM_ALIAS_SET (x
) != MEM_ALIAS_SET (y
))
945 set_mem_alias_set (x
, 0);
946 set_mem_alias_set (y
, 0);
949 if (! mem_expr_equal_p (MEM_EXPR (x
), MEM_EXPR (y
)))
953 set_mem_offset (x
, 0);
954 set_mem_offset (y
, 0);
956 else if (MEM_OFFSET (x
) != MEM_OFFSET (y
))
958 set_mem_offset (x
, 0);
959 set_mem_offset (y
, 0);
964 else if (!MEM_SIZE (y
))
967 mem_size
= GEN_INT (MAX (INTVAL (MEM_SIZE (x
)),
968 INTVAL (MEM_SIZE (y
))));
969 set_mem_size (x
, mem_size
);
970 set_mem_size (y
, mem_size
);
972 set_mem_align (x
, MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)));
973 set_mem_align (y
, MEM_ALIGN (x
));
977 fmt
= GET_RTX_FORMAT (code
);
978 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
983 /* Two vectors must have the same length. */
984 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
987 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
988 merge_memattrs (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
));
993 merge_memattrs (XEXP (x
, i
), XEXP (y
, i
));
1000 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
1003 insns_match_p (int mode ATTRIBUTE_UNUSED
, rtx i1
, rtx i2
)
1007 /* Verify that I1 and I2 are equivalent. */
1008 if (GET_CODE (i1
) != GET_CODE (i2
))
1014 if (GET_CODE (p1
) != GET_CODE (p2
))
1017 /* If this is a CALL_INSN, compare register usage information.
1018 If we don't check this on stack register machines, the two
1019 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1020 numbers of stack registers in the same basic block.
1021 If we don't check this on machines with delay slots, a delay slot may
1022 be filled that clobbers a parameter expected by the subroutine.
1024 ??? We take the simple route for now and assume that if they're
1025 equal, they were constructed identically. */
1028 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1
),
1029 CALL_INSN_FUNCTION_USAGE (i2
))
1030 || SIBLING_CALL_P (i1
) != SIBLING_CALL_P (i2
)))
1034 /* If cross_jump_death_matters is not 0, the insn's mode
1035 indicates whether or not the insn contains any stack-like
1038 if ((mode
& CLEANUP_POST_REGSTACK
) && stack_regs_mentioned (i1
))
1040 /* If register stack conversion has already been done, then
1041 death notes must also be compared before it is certain that
1042 the two instruction streams match. */
1045 HARD_REG_SET i1_regset
, i2_regset
;
1047 CLEAR_HARD_REG_SET (i1_regset
);
1048 CLEAR_HARD_REG_SET (i2_regset
);
1050 for (note
= REG_NOTES (i1
); note
; note
= XEXP (note
, 1))
1051 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
1052 SET_HARD_REG_BIT (i1_regset
, REGNO (XEXP (note
, 0)));
1054 for (note
= REG_NOTES (i2
); note
; note
= XEXP (note
, 1))
1055 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
1056 SET_HARD_REG_BIT (i2_regset
, REGNO (XEXP (note
, 0)));
1058 GO_IF_HARD_REG_EQUAL (i1_regset
, i2_regset
, done
);
1067 if (reload_completed
1068 ? rtx_renumbered_equal_p (p1
, p2
) : rtx_equal_p (p1
, p2
))
1071 /* Do not do EQUIV substitution after reload. First, we're undoing the
1072 work of reload_cse. Second, we may be undoing the work of the post-
1073 reload splitting pass. */
1074 /* ??? Possibly add a new phase switch variable that can be used by
1075 targets to disallow the troublesome insns after splitting. */
1076 if (!reload_completed
)
1078 /* The following code helps take care of G++ cleanups. */
1079 rtx equiv1
= find_reg_equal_equiv_note (i1
);
1080 rtx equiv2
= find_reg_equal_equiv_note (i2
);
1082 if (equiv1
&& equiv2
1083 /* If the equivalences are not to a constant, they may
1084 reference pseudos that no longer exist, so we can't
1086 && (! reload_completed
1087 || (CONSTANT_P (XEXP (equiv1
, 0))
1088 && rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))))
1090 rtx s1
= single_set (i1
);
1091 rtx s2
= single_set (i2
);
1092 if (s1
!= 0 && s2
!= 0
1093 && rtx_renumbered_equal_p (SET_DEST (s1
), SET_DEST (s2
)))
1095 validate_change (i1
, &SET_SRC (s1
), XEXP (equiv1
, 0), 1);
1096 validate_change (i2
, &SET_SRC (s2
), XEXP (equiv2
, 0), 1);
1097 if (! rtx_renumbered_equal_p (p1
, p2
))
1099 else if (apply_change_group ())
1108 /* Look through the insns at the end of BB1 and BB2 and find the longest
1109 sequence that are equivalent. Store the first insns for that sequence
1110 in *F1 and *F2 and return the sequence length.
1112 To simplify callers of this function, if the blocks match exactly,
1113 store the head of the blocks in *F1 and *F2. */
1116 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED
, basic_block bb1
,
1117 basic_block bb2
, rtx
*f1
, rtx
*f2
)
1119 rtx i1
, i2
, last1
, last2
, afterlast1
, afterlast2
;
1122 /* Skip simple jumps at the end of the blocks. Complex jumps still
1123 need to be compared for equivalence, which we'll do below. */
1126 last1
= afterlast1
= last2
= afterlast2
= NULL_RTX
;
1128 || (returnjump_p (i1
) && !side_effects_p (PATTERN (i1
))))
1131 i1
= PREV_INSN (i1
);
1136 || (returnjump_p (i2
) && !side_effects_p (PATTERN (i2
))))
1139 /* Count everything except for unconditional jump as insn. */
1140 if (!simplejump_p (i2
) && !returnjump_p (i2
) && last1
)
1142 i2
= PREV_INSN (i2
);
1148 while (!INSN_P (i1
) && i1
!= BB_HEAD (bb1
))
1149 i1
= PREV_INSN (i1
);
1151 while (!INSN_P (i2
) && i2
!= BB_HEAD (bb2
))
1152 i2
= PREV_INSN (i2
);
1154 if (i1
== BB_HEAD (bb1
) || i2
== BB_HEAD (bb2
))
1157 if (!insns_match_p (mode
, i1
, i2
))
1160 merge_memattrs (i1
, i2
);
1162 /* Don't begin a cross-jump with a NOTE insn. */
1165 /* If the merged insns have different REG_EQUAL notes, then
1167 rtx equiv1
= find_reg_equal_equiv_note (i1
);
1168 rtx equiv2
= find_reg_equal_equiv_note (i2
);
1170 if (equiv1
&& !equiv2
)
1171 remove_note (i1
, equiv1
);
1172 else if (!equiv1
&& equiv2
)
1173 remove_note (i2
, equiv2
);
1174 else if (equiv1
&& equiv2
1175 && !rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))
1177 remove_note (i1
, equiv1
);
1178 remove_note (i2
, equiv2
);
1181 afterlast1
= last1
, afterlast2
= last2
;
1182 last1
= i1
, last2
= i2
;
1186 i1
= PREV_INSN (i1
);
1187 i2
= PREV_INSN (i2
);
1191 /* Don't allow the insn after a compare to be shared by
1192 cross-jumping unless the compare is also shared. */
1193 if (ninsns
&& reg_mentioned_p (cc0_rtx
, last1
) && ! sets_cc0_p (last1
))
1194 last1
= afterlast1
, last2
= afterlast2
, ninsns
--;
1197 /* Include preceding notes and labels in the cross-jump. One,
1198 this may bring us to the head of the blocks as requested above.
1199 Two, it keeps line number notes as matched as may be. */
1202 while (last1
!= BB_HEAD (bb1
) && !INSN_P (PREV_INSN (last1
)))
1203 last1
= PREV_INSN (last1
);
1205 if (last1
!= BB_HEAD (bb1
) && LABEL_P (PREV_INSN (last1
)))
1206 last1
= PREV_INSN (last1
);
1208 while (last2
!= BB_HEAD (bb2
) && !INSN_P (PREV_INSN (last2
)))
1209 last2
= PREV_INSN (last2
);
1211 if (last2
!= BB_HEAD (bb2
) && LABEL_P (PREV_INSN (last2
)))
1212 last2
= PREV_INSN (last2
);
1221 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1222 the branch instruction. This means that if we commonize the control
1223 flow before end of the basic block, the semantic remains unchanged.
1225 We may assume that there exists one edge with a common destination. */
1228 outgoing_edges_match (int mode
, basic_block bb1
, basic_block bb2
)
1230 int nehedges1
= 0, nehedges2
= 0;
1231 edge fallthru1
= 0, fallthru2
= 0;
1235 /* If BB1 has only one successor, we may be looking at either an
1236 unconditional jump, or a fake edge to exit. */
1237 if (EDGE_COUNT (bb1
->succs
) == 1
1238 && (EDGE_SUCC (bb1
, 0)->flags
& (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1239 && (!JUMP_P (BB_END (bb1
)) || simplejump_p (BB_END (bb1
))))
1240 return (EDGE_COUNT (bb2
->succs
) == 1
1241 && (EDGE_SUCC (bb2
, 0)->flags
& (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1242 && (!JUMP_P (BB_END (bb2
)) || simplejump_p (BB_END (bb2
))));
1244 /* Match conditional jumps - this may get tricky when fallthru and branch
1245 edges are crossed. */
1246 if (EDGE_COUNT (bb1
->succs
) == 2
1247 && any_condjump_p (BB_END (bb1
))
1248 && onlyjump_p (BB_END (bb1
)))
1250 edge b1
, f1
, b2
, f2
;
1251 bool reverse
, match
;
1252 rtx set1
, set2
, cond1
, cond2
;
1253 enum rtx_code code1
, code2
;
1255 if (EDGE_COUNT (bb2
->succs
) != 2
1256 || !any_condjump_p (BB_END (bb2
))
1257 || !onlyjump_p (BB_END (bb2
)))
1260 b1
= BRANCH_EDGE (bb1
);
1261 b2
= BRANCH_EDGE (bb2
);
1262 f1
= FALLTHRU_EDGE (bb1
);
1263 f2
= FALLTHRU_EDGE (bb2
);
1265 /* Get around possible forwarders on fallthru edges. Other cases
1266 should be optimized out already. */
1267 if (FORWARDER_BLOCK_P (f1
->dest
))
1268 f1
= EDGE_SUCC (f1
->dest
, 0);
1270 if (FORWARDER_BLOCK_P (f2
->dest
))
1271 f2
= EDGE_SUCC (f2
->dest
, 0);
1273 /* To simplify use of this function, return false if there are
1274 unneeded forwarder blocks. These will get eliminated later
1275 during cleanup_cfg. */
1276 if (FORWARDER_BLOCK_P (f1
->dest
)
1277 || FORWARDER_BLOCK_P (f2
->dest
)
1278 || FORWARDER_BLOCK_P (b1
->dest
)
1279 || FORWARDER_BLOCK_P (b2
->dest
))
1282 if (f1
->dest
== f2
->dest
&& b1
->dest
== b2
->dest
)
1284 else if (f1
->dest
== b2
->dest
&& b1
->dest
== f2
->dest
)
1289 set1
= pc_set (BB_END (bb1
));
1290 set2
= pc_set (BB_END (bb2
));
1291 if ((XEXP (SET_SRC (set1
), 1) == pc_rtx
)
1292 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
1295 cond1
= XEXP (SET_SRC (set1
), 0);
1296 cond2
= XEXP (SET_SRC (set2
), 0);
1297 code1
= GET_CODE (cond1
);
1299 code2
= reversed_comparison_code (cond2
, BB_END (bb2
));
1301 code2
= GET_CODE (cond2
);
1303 if (code2
== UNKNOWN
)
1306 /* Verify codes and operands match. */
1307 match
= ((code1
== code2
1308 && rtx_renumbered_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
1309 && rtx_renumbered_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
1310 || (code1
== swap_condition (code2
)
1311 && rtx_renumbered_equal_p (XEXP (cond1
, 1),
1313 && rtx_renumbered_equal_p (XEXP (cond1
, 0),
1316 /* If we return true, we will join the blocks. Which means that
1317 we will only have one branch prediction bit to work with. Thus
1318 we require the existing branches to have probabilities that are
1322 && maybe_hot_bb_p (bb1
)
1323 && maybe_hot_bb_p (bb2
))
1327 if (b1
->dest
== b2
->dest
)
1328 prob2
= b2
->probability
;
1330 /* Do not use f2 probability as f2 may be forwarded. */
1331 prob2
= REG_BR_PROB_BASE
- b2
->probability
;
1333 /* Fail if the difference in probabilities is greater than 50%.
1334 This rules out two well-predicted branches with opposite
1336 if (abs (b1
->probability
- prob2
) > REG_BR_PROB_BASE
/ 2)
1340 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1341 bb1
->index
, bb2
->index
, b1
->probability
, prob2
);
1347 if (dump_file
&& match
)
1348 fprintf (dump_file
, "Conditionals in bb %i and %i match.\n",
1349 bb1
->index
, bb2
->index
);
1354 /* Generic case - we are seeing a computed jump, table jump or trapping
1357 #ifndef CASE_DROPS_THROUGH
1358 /* Check whether there are tablejumps in the end of BB1 and BB2.
1359 Return true if they are identical. */
1364 if (tablejump_p (BB_END (bb1
), &label1
, &table1
)
1365 && tablejump_p (BB_END (bb2
), &label2
, &table2
)
1366 && GET_CODE (PATTERN (table1
)) == GET_CODE (PATTERN (table2
)))
1368 /* The labels should never be the same rtx. If they really are same
1369 the jump tables are same too. So disable crossjumping of blocks BB1
1370 and BB2 because when deleting the common insns in the end of BB1
1371 by delete_basic_block () the jump table would be deleted too. */
1372 /* If LABEL2 is referenced in BB1->END do not do anything
1373 because we would loose information when replacing
1374 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1375 if (label1
!= label2
&& !rtx_referenced_p (label2
, BB_END (bb1
)))
1377 /* Set IDENTICAL to true when the tables are identical. */
1378 bool identical
= false;
1381 p1
= PATTERN (table1
);
1382 p2
= PATTERN (table2
);
1383 if (GET_CODE (p1
) == ADDR_VEC
&& rtx_equal_p (p1
, p2
))
1387 else if (GET_CODE (p1
) == ADDR_DIFF_VEC
1388 && (XVECLEN (p1
, 1) == XVECLEN (p2
, 1))
1389 && rtx_equal_p (XEXP (p1
, 2), XEXP (p2
, 2))
1390 && rtx_equal_p (XEXP (p1
, 3), XEXP (p2
, 3)))
1395 for (i
= XVECLEN (p1
, 1) - 1; i
>= 0 && identical
; i
--)
1396 if (!rtx_equal_p (XVECEXP (p1
, 1, i
), XVECEXP (p2
, 1, i
)))
1402 replace_label_data rr
;
1405 /* Temporarily replace references to LABEL1 with LABEL2
1406 in BB1->END so that we could compare the instructions. */
1409 rr
.update_label_nuses
= false;
1410 for_each_rtx (&BB_END (bb1
), replace_label
, &rr
);
1412 match
= insns_match_p (mode
, BB_END (bb1
), BB_END (bb2
));
1413 if (dump_file
&& match
)
1415 "Tablejumps in bb %i and %i match.\n",
1416 bb1
->index
, bb2
->index
);
1418 /* Set the original label in BB1->END because when deleting
1419 a block whose end is a tablejump, the tablejump referenced
1420 from the instruction is deleted too. */
1423 for_each_rtx (&BB_END (bb1
), replace_label
, &rr
);
1433 /* First ensure that the instructions match. There may be many outgoing
1434 edges so this test is generally cheaper. */
1435 if (!insns_match_p (mode
, BB_END (bb1
), BB_END (bb2
)))
1438 /* Search the outgoing edges, ensure that the counts do match, find possible
1439 fallthru and exception handling edges since these needs more
1441 if (EDGE_COUNT (bb1
->succs
) != EDGE_COUNT (bb2
->succs
))
1444 FOR_EACH_EDGE (e1
, ei
, bb1
->succs
)
1446 e2
= EDGE_SUCC (bb2
, ei
.index
);
1448 if (e1
->flags
& EDGE_EH
)
1451 if (e2
->flags
& EDGE_EH
)
1454 if (e1
->flags
& EDGE_FALLTHRU
)
1456 if (e2
->flags
& EDGE_FALLTHRU
)
1460 /* If number of edges of various types does not match, fail. */
1461 if (nehedges1
!= nehedges2
1462 || (fallthru1
!= 0) != (fallthru2
!= 0))
1465 /* fallthru edges must be forwarded to the same destination. */
1468 basic_block d1
= (forwarder_block_p (fallthru1
->dest
)
1469 ? EDGE_SUCC (fallthru1
->dest
, 0)->dest
: fallthru1
->dest
);
1470 basic_block d2
= (forwarder_block_p (fallthru2
->dest
)
1471 ? EDGE_SUCC (fallthru2
->dest
, 0)->dest
: fallthru2
->dest
);
1477 /* Ensure the same EH region. */
1479 rtx n1
= find_reg_note (BB_END (bb1
), REG_EH_REGION
, 0);
1480 rtx n2
= find_reg_note (BB_END (bb2
), REG_EH_REGION
, 0);
1485 if (n1
&& (!n2
|| XEXP (n1
, 0) != XEXP (n2
, 0)))
1489 /* We don't need to match the rest of edges as above checks should be enough
1490 to ensure that they are equivalent. */
1494 /* E1 and E2 are edges with the same destination block. Search their
1495 predecessors for common code. If found, redirect control flow from
1496 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1499 try_crossjump_to_edge (int mode
, edge e1
, edge e2
)
1502 basic_block src1
= e1
->src
, src2
= e2
->src
;
1503 basic_block redirect_to
, redirect_from
, to_remove
;
1504 rtx newpos1
, newpos2
;
1508 newpos1
= newpos2
= NULL_RTX
;
1510 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1511 to try this optimization.
1513 Basic block partitioning may result in some jumps that appear to
1514 be optimizable (or blocks that appear to be mergeable), but which really
1515 must be left untouched (they are required to make it safely across
1516 partition boundaries). See the comments at the top of
1517 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1519 if (flag_reorder_blocks_and_partition
&& no_new_pseudos
)
1522 /* Search backward through forwarder blocks. We don't need to worry
1523 about multiple entry or chained forwarders, as they will be optimized
1524 away. We do this to look past the unconditional jump following a
1525 conditional jump that is required due to the current CFG shape. */
1526 if (EDGE_COUNT (src1
->preds
) == 1
1527 && FORWARDER_BLOCK_P (src1
))
1528 e1
= EDGE_PRED (src1
, 0), src1
= e1
->src
;
1530 if (EDGE_COUNT (src2
->preds
) == 1
1531 && FORWARDER_BLOCK_P (src2
))
1532 e2
= EDGE_PRED (src2
, 0), src2
= e2
->src
;
1534 /* Nothing to do if we reach ENTRY, or a common source block. */
1535 if (src1
== ENTRY_BLOCK_PTR
|| src2
== ENTRY_BLOCK_PTR
)
1540 /* Seeing more than 1 forwarder blocks would confuse us later... */
1541 if (FORWARDER_BLOCK_P (e1
->dest
)
1542 && FORWARDER_BLOCK_P (EDGE_SUCC (e1
->dest
, 0)->dest
))
1545 if (FORWARDER_BLOCK_P (e2
->dest
)
1546 && FORWARDER_BLOCK_P (EDGE_SUCC (e2
->dest
, 0)->dest
))
1549 /* Likewise with dead code (possibly newly created by the other optimizations
1551 if (EDGE_COUNT (src1
->preds
) == 0 || EDGE_COUNT (src2
->preds
) == 0)
1554 /* Look for the common insn sequence, part the first ... */
1555 if (!outgoing_edges_match (mode
, src1
, src2
))
1558 /* ... and part the second. */
1559 nmatch
= flow_find_cross_jump (mode
, src1
, src2
, &newpos1
, &newpos2
);
1561 /* Don't proceed with the crossjump unless we found a sufficient number
1562 of matching instructions or the 'from' block was totally matched
1563 (such that its predecessors will hopefully be redirected and the
1565 if ((nmatch
< PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS
))
1566 && (newpos1
!= BB_HEAD (src1
)))
1569 #ifndef CASE_DROPS_THROUGH
1570 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1572 If we have tablejumps in the end of SRC1 and SRC2
1573 they have been already compared for equivalence in outgoing_edges_match ()
1574 so replace the references to TABLE1 by references to TABLE2. */
1579 if (tablejump_p (BB_END (src1
), &label1
, &table1
)
1580 && tablejump_p (BB_END (src2
), &label2
, &table2
)
1581 && label1
!= label2
)
1583 replace_label_data rr
;
1586 /* Replace references to LABEL1 with LABEL2. */
1589 rr
.update_label_nuses
= true;
1590 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
1592 /* Do not replace the label in SRC1->END because when deleting
1593 a block whose end is a tablejump, the tablejump referenced
1594 from the instruction is deleted too. */
1595 if (insn
!= BB_END (src1
))
1596 for_each_rtx (&insn
, replace_label
, &rr
);
1602 /* Avoid splitting if possible. */
1603 if (newpos2
== BB_HEAD (src2
))
1608 fprintf (dump_file
, "Splitting bb %i before %i insns\n",
1609 src2
->index
, nmatch
);
1610 redirect_to
= split_block (src2
, PREV_INSN (newpos2
))->dest
;
1615 "Cross jumping from bb %i to bb %i; %i common insns\n",
1616 src1
->index
, src2
->index
, nmatch
);
1618 redirect_to
->count
+= src1
->count
;
1619 redirect_to
->frequency
+= src1
->frequency
;
1620 /* We may have some registers visible trought the block. */
1621 redirect_to
->flags
|= BB_DIRTY
;
1623 /* Recompute the frequencies and counts of outgoing edges. */
1624 FOR_EACH_EDGE (s
, ei
, redirect_to
->succs
)
1628 basic_block d
= s
->dest
;
1630 if (FORWARDER_BLOCK_P (d
))
1631 d
= EDGE_SUCC (d
, 0)->dest
;
1633 FOR_EACH_EDGE (s2
, ei
, src1
->succs
)
1635 basic_block d2
= s2
->dest
;
1636 if (FORWARDER_BLOCK_P (d2
))
1637 d2
= EDGE_SUCC (d2
, 0)->dest
;
1642 s
->count
+= s2
->count
;
1644 /* Take care to update possible forwarder blocks. We verified
1645 that there is no more than one in the chain, so we can't run
1646 into infinite loop. */
1647 if (FORWARDER_BLOCK_P (s
->dest
))
1649 EDGE_SUCC (s
->dest
, 0)->count
+= s2
->count
;
1650 s
->dest
->count
+= s2
->count
;
1651 s
->dest
->frequency
+= EDGE_FREQUENCY (s
);
1654 if (FORWARDER_BLOCK_P (s2
->dest
))
1656 EDGE_SUCC (s2
->dest
, 0)->count
-= s2
->count
;
1657 if (EDGE_SUCC (s2
->dest
, 0)->count
< 0)
1658 EDGE_SUCC (s2
->dest
, 0)->count
= 0;
1659 s2
->dest
->count
-= s2
->count
;
1660 s2
->dest
->frequency
-= EDGE_FREQUENCY (s
);
1661 if (s2
->dest
->frequency
< 0)
1662 s2
->dest
->frequency
= 0;
1663 if (s2
->dest
->count
< 0)
1664 s2
->dest
->count
= 0;
1667 if (!redirect_to
->frequency
&& !src1
->frequency
)
1668 s
->probability
= (s
->probability
+ s2
->probability
) / 2;
1671 = ((s
->probability
* redirect_to
->frequency
+
1672 s2
->probability
* src1
->frequency
)
1673 / (redirect_to
->frequency
+ src1
->frequency
));
1676 update_br_prob_note (redirect_to
);
1678 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1680 /* Skip possible basic block header. */
1681 if (LABEL_P (newpos1
))
1682 newpos1
= NEXT_INSN (newpos1
);
1684 if (NOTE_P (newpos1
))
1685 newpos1
= NEXT_INSN (newpos1
);
1687 redirect_from
= split_block (src1
, PREV_INSN (newpos1
))->src
;
1688 to_remove
= EDGE_SUCC (redirect_from
, 0)->dest
;
1690 redirect_edge_and_branch_force (EDGE_SUCC (redirect_from
, 0), redirect_to
);
1691 delete_basic_block (to_remove
);
1693 update_forwarder_flag (redirect_from
);
1698 /* Search the predecessors of BB for common insn sequences. When found,
1699 share code between them by redirecting control flow. Return true if
1700 any changes made. */
1703 try_crossjump_bb (int mode
, basic_block bb
)
1705 edge e
, e2
, fallthru
;
1707 unsigned max
, ix
, ix2
;
1708 basic_block ev
, ev2
;
1711 /* Nothing to do if there is not at least two incoming edges. */
1712 if (EDGE_COUNT (bb
->preds
) < 2)
1715 /* If we are partitioning hot/cold basic blocks, we don't want to
1716 mess up unconditional or indirect jumps that cross between hot
1719 Basic block partitioning may result in some jumps that appear to
1720 be optimizable (or blocks that appear to be mergeable), but which really
1721 must be left untouched (they are required to make it safely across
1722 partition boundaries). See the comments at the top of
1723 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1725 if (flag_reorder_blocks_and_partition
1726 && (BB_PARTITION (EDGE_PRED (bb
, 0)->src
) != BB_PARTITION (EDGE_PRED (bb
, 1)->src
)
1727 || (EDGE_PRED (bb
, 0)->flags
& EDGE_CROSSING
)))
1730 /* It is always cheapest to redirect a block that ends in a branch to
1731 a block that falls through into BB, as that adds no branches to the
1732 program. We'll try that combination first. */
1734 max
= PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES
);
1736 if (EDGE_COUNT (bb
->preds
) > max
)
1739 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1741 if (e
->flags
& EDGE_FALLTHRU
)
1746 for (ix
= 0, ev
= bb
; ix
< EDGE_COUNT (ev
->preds
); )
1748 e
= EDGE_PRED (ev
, ix
);
1751 /* As noted above, first try with the fallthru predecessor. */
1754 /* Don't combine the fallthru edge into anything else.
1755 If there is a match, we'll do it the other way around. */
1758 /* If nothing changed since the last attempt, there is nothing
1761 && (!(e
->src
->flags
& BB_DIRTY
)
1762 && !(fallthru
->src
->flags
& BB_DIRTY
)))
1765 if (try_crossjump_to_edge (mode
, e
, fallthru
))
1774 /* Non-obvious work limiting check: Recognize that we're going
1775 to call try_crossjump_bb on every basic block. So if we have
1776 two blocks with lots of outgoing edges (a switch) and they
1777 share lots of common destinations, then we would do the
1778 cross-jump check once for each common destination.
1780 Now, if the blocks actually are cross-jump candidates, then
1781 all of their destinations will be shared. Which means that
1782 we only need check them for cross-jump candidacy once. We
1783 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1784 choosing to do the check from the block for which the edge
1785 in question is the first successor of A. */
1786 if (EDGE_SUCC (e
->src
, 0) != e
)
1789 for (ix2
= 0, ev2
= bb
; ix2
< EDGE_COUNT (ev2
->preds
); )
1791 e2
= EDGE_PRED (ev2
, ix2
);
1797 /* We've already checked the fallthru edge above. */
1801 /* The "first successor" check above only prevents multiple
1802 checks of crossjump(A,B). In order to prevent redundant
1803 checks of crossjump(B,A), require that A be the block
1804 with the lowest index. */
1805 if (e
->src
->index
> e2
->src
->index
)
1808 /* If nothing changed since the last attempt, there is nothing
1811 && (!(e
->src
->flags
& BB_DIRTY
)
1812 && !(e2
->src
->flags
& BB_DIRTY
)))
1815 if (try_crossjump_to_edge (mode
, e
, e2
))
1828 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1829 instructions etc. Return nonzero if changes were made. */
1832 try_optimize_cfg (int mode
)
1834 bool changed_overall
= false;
1837 basic_block bb
, b
, next
;
1839 if (mode
& CLEANUP_CROSSJUMP
)
1840 add_noreturn_fake_exit_edges ();
1843 update_forwarder_flag (bb
);
1845 if (mode
& (CLEANUP_UPDATE_LIFE
| CLEANUP_CROSSJUMP
| CLEANUP_THREADING
))
1848 if (! targetm
.cannot_modify_jumps_p ())
1851 /* Attempt to merge blocks as made possible by edge removal. If
1852 a block has only one successor, and the successor has only
1853 one predecessor, they may be combined. */
1861 "\n\ntry_optimize_cfg iteration %i\n\n",
1864 for (b
= ENTRY_BLOCK_PTR
->next_bb
; b
!= EXIT_BLOCK_PTR
;)
1868 bool changed_here
= false;
1870 /* Delete trivially dead basic blocks. */
1871 while (EDGE_COUNT (b
->preds
) == 0)
1875 fprintf (dump_file
, "Deleting block %i.\n",
1878 delete_basic_block (b
);
1879 if (!(mode
& CLEANUP_CFGLAYOUT
))
1884 /* Remove code labels no longer used. */
1885 if (EDGE_COUNT (b
->preds
) == 1
1886 && (EDGE_PRED (b
, 0)->flags
& EDGE_FALLTHRU
)
1887 && !(EDGE_PRED (b
, 0)->flags
& EDGE_COMPLEX
)
1888 && LABEL_P (BB_HEAD (b
))
1889 /* If the previous block ends with a branch to this
1890 block, we can't delete the label. Normally this
1891 is a condjump that is yet to be simplified, but
1892 if CASE_DROPS_THRU, this can be a tablejump with
1893 some element going to the same place as the
1894 default (fallthru). */
1895 && (EDGE_PRED (b
, 0)->src
== ENTRY_BLOCK_PTR
1896 || !JUMP_P (BB_END (EDGE_PRED (b
, 0)->src
))
1897 || ! label_is_jump_target_p (BB_HEAD (b
),
1898 BB_END (EDGE_PRED (b
, 0)->src
))))
1900 rtx label
= BB_HEAD (b
);
1902 delete_insn_chain (label
, label
);
1903 /* In the case label is undeletable, move it after the
1904 BASIC_BLOCK note. */
1905 if (NOTE_LINE_NUMBER (BB_HEAD (b
)) == NOTE_INSN_DELETED_LABEL
)
1907 rtx bb_note
= NEXT_INSN (BB_HEAD (b
));
1909 reorder_insns_nobb (label
, label
, bb_note
);
1910 BB_HEAD (b
) = bb_note
;
1913 fprintf (dump_file
, "Deleted label in block %i.\n",
1917 /* If we fall through an empty block, we can remove it. */
1918 if (!(mode
& CLEANUP_CFGLAYOUT
)
1919 && EDGE_COUNT (b
->preds
) == 1
1920 && (EDGE_PRED (b
, 0)->flags
& EDGE_FALLTHRU
)
1921 && !LABEL_P (BB_HEAD (b
))
1922 && FORWARDER_BLOCK_P (b
)
1923 /* Note that forwarder_block_p true ensures that
1924 there is a successor for this block. */
1925 && (EDGE_SUCC (b
, 0)->flags
& EDGE_FALLTHRU
)
1926 && n_basic_blocks
> 1)
1930 "Deleting fallthru block %i.\n",
1933 c
= b
->prev_bb
== ENTRY_BLOCK_PTR
? b
->next_bb
: b
->prev_bb
;
1934 redirect_edge_succ_nodup (EDGE_PRED (b
, 0), EDGE_SUCC (b
, 0)->dest
);
1935 delete_basic_block (b
);
1940 if (EDGE_COUNT (b
->succs
) == 1
1941 && (s
= EDGE_SUCC (b
, 0))
1942 && !(s
->flags
& EDGE_COMPLEX
)
1943 && (c
= s
->dest
) != EXIT_BLOCK_PTR
1944 && EDGE_COUNT (c
->preds
) == 1
1947 /* When not in cfg_layout mode use code aware of reordering
1948 INSN. This code possibly creates new basic blocks so it
1949 does not fit merge_blocks interface and is kept here in
1950 hope that it will become useless once more of compiler
1951 is transformed to use cfg_layout mode. */
1953 if ((mode
& CLEANUP_CFGLAYOUT
)
1954 && can_merge_blocks_p (b
, c
))
1956 merge_blocks (b
, c
);
1957 update_forwarder_flag (b
);
1958 changed_here
= true;
1960 else if (!(mode
& CLEANUP_CFGLAYOUT
)
1961 /* If the jump insn has side effects,
1962 we can't kill the edge. */
1963 && (!JUMP_P (BB_END (b
))
1964 || (reload_completed
1965 ? simplejump_p (BB_END (b
))
1966 : (onlyjump_p (BB_END (b
))
1967 && !tablejump_p (BB_END (b
),
1969 && (next
= merge_blocks_move (s
, b
, c
, mode
)))
1972 changed_here
= true;
1976 /* Simplify branch over branch. */
1977 if ((mode
& CLEANUP_EXPENSIVE
)
1978 && !(mode
& CLEANUP_CFGLAYOUT
)
1979 && try_simplify_condjump (b
))
1980 changed_here
= true;
1982 /* If B has a single outgoing edge, but uses a
1983 non-trivial jump instruction without side-effects, we
1984 can either delete the jump entirely, or replace it
1985 with a simple unconditional jump. */
1986 if (EDGE_COUNT (b
->succs
) == 1
1987 && EDGE_SUCC (b
, 0)->dest
!= EXIT_BLOCK_PTR
1988 && onlyjump_p (BB_END (b
))
1989 && !find_reg_note (BB_END (b
), REG_CROSSING_JUMP
, NULL_RTX
)
1990 && try_redirect_by_replacing_jump (EDGE_SUCC (b
, 0), EDGE_SUCC (b
, 0)->dest
,
1991 (mode
& CLEANUP_CFGLAYOUT
) != 0))
1993 update_forwarder_flag (b
);
1994 changed_here
= true;
1997 /* Simplify branch to branch. */
1998 if (try_forward_edges (mode
, b
))
1999 changed_here
= true;
2001 /* Look for shared code between blocks. */
2002 if ((mode
& CLEANUP_CROSSJUMP
)
2003 && try_crossjump_bb (mode
, b
))
2004 changed_here
= true;
2006 /* Don't get confused by the index shift caused by
2014 if ((mode
& CLEANUP_CROSSJUMP
)
2015 && try_crossjump_bb (mode
, EXIT_BLOCK_PTR
))
2018 #ifdef ENABLE_CHECKING
2020 verify_flow_info ();
2023 changed_overall
|= changed
;
2029 if (mode
& CLEANUP_CROSSJUMP
)
2030 remove_fake_exit_edges ();
2032 clear_aux_for_blocks ();
2034 return changed_overall
;
2037 /* Delete all unreachable basic blocks. */
2040 delete_unreachable_blocks (void)
2042 bool changed
= false;
2043 basic_block b
, next_bb
;
2045 find_unreachable_blocks ();
2047 /* Delete all unreachable basic blocks. */
2049 for (b
= ENTRY_BLOCK_PTR
->next_bb
; b
!= EXIT_BLOCK_PTR
; b
= next_bb
)
2051 next_bb
= b
->next_bb
;
2053 if (!(b
->flags
& BB_REACHABLE
))
2055 delete_basic_block (b
);
2061 tidy_fallthru_edges ();
2065 /* Merges sequential blocks if possible. */
2068 merge_seq_blocks (void)
2071 bool changed
= false;
2073 for (bb
= ENTRY_BLOCK_PTR
->next_bb
; bb
!= EXIT_BLOCK_PTR
; )
2075 if (EDGE_COUNT (bb
->succs
) == 1
2076 && can_merge_blocks_p (bb
, EDGE_SUCC (bb
, 0)->dest
))
2078 /* Merge the blocks and retry. */
2079 merge_blocks (bb
, EDGE_SUCC (bb
, 0)->dest
);
2090 /* Tidy the CFG by deleting unreachable code and whatnot. */
2093 cleanup_cfg (int mode
)
2095 bool changed
= false;
2097 timevar_push (TV_CLEANUP_CFG
);
2098 if (delete_unreachable_blocks ())
2101 /* We've possibly created trivially dead code. Cleanup it right
2102 now to introduce more opportunities for try_optimize_cfg. */
2103 if (!(mode
& (CLEANUP_NO_INSN_DEL
| CLEANUP_UPDATE_LIFE
))
2104 && !reload_completed
)
2105 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2110 while (try_optimize_cfg (mode
))
2112 delete_unreachable_blocks (), changed
= true;
2113 if (mode
& CLEANUP_UPDATE_LIFE
)
2115 /* Cleaning up CFG introduces more opportunities for dead code
2116 removal that in turn may introduce more opportunities for
2117 cleaning up the CFG. */
2118 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES
,
2120 | PROP_SCAN_DEAD_CODE
2121 | PROP_KILL_DEAD_CODE
2122 | ((mode
& CLEANUP_LOG_LINKS
)
2123 ? PROP_LOG_LINKS
: 0)))
2126 else if (!(mode
& CLEANUP_NO_INSN_DEL
)
2127 && (mode
& CLEANUP_EXPENSIVE
)
2128 && !reload_completed
)
2130 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2135 delete_dead_jumptables ();
2138 /* Kill the data we won't maintain. */
2139 free_EXPR_LIST_list (&label_value_list
);
2140 timevar_pop (TV_CLEANUP_CFG
);