1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
36 #include "coretypes.h"
39 #include "hard-reg-set.h"
40 #include "basic-block.h"
43 #include "insn-config.h"
52 #include "cfglayout.h"
55 /* cleanup_cfg maintains following flags for each basic block. */
59 /* Set if BB is the forwarder block to avoid too many
60 forwarder_block_p calls. */
61 BB_FORWARDER_BLOCK
= 1,
62 BB_NONTHREADABLE_BLOCK
= 2
65 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
66 #define BB_SET_FLAG(BB, FLAG) \
67 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
68 #define BB_CLEAR_FLAG(BB, FLAG) \
69 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
71 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
73 /* Set to true when we are running first pass of try_optimize_cfg loop. */
74 static bool first_pass
;
75 static bool try_crossjump_to_edge (int, edge
, edge
);
76 static bool try_crossjump_bb (int, basic_block
);
77 static bool outgoing_edges_match (int, basic_block
, basic_block
);
78 static int flow_find_cross_jump (int, basic_block
, basic_block
, rtx
*, rtx
*);
79 static bool insns_match_p (int, rtx
, rtx
);
81 static bool tail_recursion_label_p (rtx
);
82 static void merge_blocks_move_predecessor_nojumps (basic_block
, basic_block
);
83 static void merge_blocks_move_successor_nojumps (basic_block
, basic_block
);
84 static bool try_optimize_cfg (int);
85 static bool try_simplify_condjump (basic_block
);
86 static bool try_forward_edges (int, basic_block
);
87 static edge
thread_jump (int, edge
, basic_block
);
88 static bool mark_effect (rtx
, bitmap
);
89 static void notice_new_block (basic_block
);
90 static void update_forwarder_flag (basic_block
);
91 static int mentions_nonequal_regs (rtx
*, void *);
92 static void merge_memattrs (rtx
, rtx
);
94 /* Set flags for newly created block. */
97 notice_new_block (basic_block bb
)
102 if (forwarder_block_p (bb
))
103 BB_SET_FLAG (bb
, BB_FORWARDER_BLOCK
);
106 /* Recompute forwarder flag after block has been modified. */
109 update_forwarder_flag (basic_block bb
)
111 if (forwarder_block_p (bb
))
112 BB_SET_FLAG (bb
, BB_FORWARDER_BLOCK
);
114 BB_CLEAR_FLAG (bb
, BB_FORWARDER_BLOCK
);
117 /* Simplify a conditional jump around an unconditional jump.
118 Return true if something changed. */
121 try_simplify_condjump (basic_block cbranch_block
)
123 basic_block jump_block
, jump_dest_block
, cbranch_dest_block
;
124 edge cbranch_jump_edge
, cbranch_fallthru_edge
;
129 /* Verify that there are exactly two successors. */
130 if (!cbranch_block
->succ
131 || !cbranch_block
->succ
->succ_next
132 || cbranch_block
->succ
->succ_next
->succ_next
)
135 /* Verify that we've got a normal conditional branch at the end
137 cbranch_insn
= BB_END (cbranch_block
);
138 if (!any_condjump_p (cbranch_insn
))
141 cbranch_fallthru_edge
= FALLTHRU_EDGE (cbranch_block
);
142 cbranch_jump_edge
= BRANCH_EDGE (cbranch_block
);
144 /* The next block must not have multiple predecessors, must not
145 be the last block in the function, and must contain just the
146 unconditional jump. */
147 jump_block
= cbranch_fallthru_edge
->dest
;
148 if (jump_block
->pred
->pred_next
149 || jump_block
->next_bb
== EXIT_BLOCK_PTR
150 || !FORWARDER_BLOCK_P (jump_block
))
152 jump_dest_block
= jump_block
->succ
->dest
;
154 /* If we are partitioning hot/cold basic blocks, we don't want to
155 mess up unconditional or indirect jumps that cross between hot
156 and cold sections. */
158 if (flag_reorder_blocks_and_partition
159 && (jump_block
->partition
!= jump_dest_block
->partition
160 || cbranch_jump_edge
->crossing_edge
))
163 /* The conditional branch must target the block after the
164 unconditional branch. */
165 cbranch_dest_block
= cbranch_jump_edge
->dest
;
167 if (!can_fallthru (jump_block
, cbranch_dest_block
))
170 /* Invert the conditional branch. */
171 if (!invert_jump (cbranch_insn
, block_label (jump_dest_block
), 0))
175 fprintf (dump_file
, "Simplifying condjump %i around jump %i\n",
176 INSN_UID (cbranch_insn
), INSN_UID (BB_END (jump_block
)));
178 /* Success. Update the CFG to match. Note that after this point
179 the edge variable names appear backwards; the redirection is done
180 this way to preserve edge profile data. */
181 cbranch_jump_edge
= redirect_edge_succ_nodup (cbranch_jump_edge
,
183 cbranch_fallthru_edge
= redirect_edge_succ_nodup (cbranch_fallthru_edge
,
185 cbranch_jump_edge
->flags
|= EDGE_FALLTHRU
;
186 cbranch_fallthru_edge
->flags
&= ~EDGE_FALLTHRU
;
187 update_br_prob_note (cbranch_block
);
189 end
= BB_END (jump_block
);
190 /* Deleting a block may produce unreachable code warning even when we are
191 not deleting anything live. Suppress it by moving all the line number
192 notes out of the block. */
193 for (insn
= BB_HEAD (jump_block
); insn
!= NEXT_INSN (BB_END (jump_block
));
196 next
= NEXT_INSN (insn
);
197 if (GET_CODE (insn
) == NOTE
&& NOTE_LINE_NUMBER (insn
) > 0)
199 if (insn
== BB_END (jump_block
))
201 BB_END (jump_block
) = PREV_INSN (insn
);
205 reorder_insns_nobb (insn
, insn
, end
);
209 /* Delete the block with the unconditional jump, and clean up the mess. */
210 delete_basic_block (jump_block
);
211 tidy_fallthru_edge (cbranch_jump_edge
);
216 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
217 on register. Used by jump threading. */
220 mark_effect (rtx exp
, regset nonequal
)
224 switch (GET_CODE (exp
))
226 /* In case we do clobber the register, mark it as equal, as we know the
227 value is dead so it don't have to match. */
229 if (REG_P (XEXP (exp
, 0)))
231 dest
= XEXP (exp
, 0);
232 regno
= REGNO (dest
);
233 CLEAR_REGNO_REG_SET (nonequal
, regno
);
234 if (regno
< FIRST_PSEUDO_REGISTER
)
236 int n
= hard_regno_nregs
[regno
][GET_MODE (dest
)];
238 CLEAR_REGNO_REG_SET (nonequal
, regno
+ n
);
244 if (rtx_equal_for_cselib_p (SET_DEST (exp
), SET_SRC (exp
)))
246 dest
= SET_DEST (exp
);
251 regno
= REGNO (dest
);
252 SET_REGNO_REG_SET (nonequal
, regno
);
253 if (regno
< FIRST_PSEUDO_REGISTER
)
255 int n
= hard_regno_nregs
[regno
][GET_MODE (dest
)];
257 SET_REGNO_REG_SET (nonequal
, regno
+ n
);
266 /* Return nonzero if X is a register set in regset DATA.
267 Called via for_each_rtx. */
269 mentions_nonequal_regs (rtx
*x
, void *data
)
271 regset nonequal
= (regset
) data
;
277 if (REGNO_REG_SET_P (nonequal
, regno
))
279 if (regno
< FIRST_PSEUDO_REGISTER
)
281 int n
= hard_regno_nregs
[regno
][GET_MODE (*x
)];
283 if (REGNO_REG_SET_P (nonequal
, regno
+ n
))
289 /* Attempt to prove that the basic block B will have no side effects and
290 always continues in the same edge if reached via E. Return the edge
291 if exist, NULL otherwise. */
294 thread_jump (int mode
, edge e
, basic_block b
)
296 rtx set1
, set2
, cond1
, cond2
, insn
;
297 enum rtx_code code1
, code2
, reversed_code2
;
298 bool reverse1
= false;
303 if (BB_FLAGS (b
) & BB_NONTHREADABLE_BLOCK
)
306 /* At the moment, we do handle only conditional jumps, but later we may
307 want to extend this code to tablejumps and others. */
308 if (!e
->src
->succ
->succ_next
|| e
->src
->succ
->succ_next
->succ_next
)
310 if (!b
->succ
|| !b
->succ
->succ_next
|| b
->succ
->succ_next
->succ_next
)
312 BB_SET_FLAG (b
, BB_NONTHREADABLE_BLOCK
);
316 /* Second branch must end with onlyjump, as we will eliminate the jump. */
317 if (!any_condjump_p (BB_END (e
->src
)))
320 if (!any_condjump_p (BB_END (b
)) || !onlyjump_p (BB_END (b
)))
322 BB_SET_FLAG (b
, BB_NONTHREADABLE_BLOCK
);
326 set1
= pc_set (BB_END (e
->src
));
327 set2
= pc_set (BB_END (b
));
328 if (((e
->flags
& EDGE_FALLTHRU
) != 0)
329 != (XEXP (SET_SRC (set1
), 1) == pc_rtx
))
332 cond1
= XEXP (SET_SRC (set1
), 0);
333 cond2
= XEXP (SET_SRC (set2
), 0);
335 code1
= reversed_comparison_code (cond1
, BB_END (e
->src
));
337 code1
= GET_CODE (cond1
);
339 code2
= GET_CODE (cond2
);
340 reversed_code2
= reversed_comparison_code (cond2
, BB_END (b
));
342 if (!comparison_dominates_p (code1
, code2
)
343 && !comparison_dominates_p (code1
, reversed_code2
))
346 /* Ensure that the comparison operators are equivalent.
347 ??? This is far too pessimistic. We should allow swapped operands,
348 different CCmodes, or for example comparisons for interval, that
349 dominate even when operands are not equivalent. */
350 if (!rtx_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
351 || !rtx_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
354 /* Short circuit cases where block B contains some side effects, as we can't
356 for (insn
= NEXT_INSN (BB_HEAD (b
)); insn
!= NEXT_INSN (BB_END (b
));
357 insn
= NEXT_INSN (insn
))
358 if (INSN_P (insn
) && side_effects_p (PATTERN (insn
)))
360 BB_SET_FLAG (b
, BB_NONTHREADABLE_BLOCK
);
366 /* First process all values computed in the source basic block. */
367 for (insn
= NEXT_INSN (BB_HEAD (e
->src
)); insn
!= NEXT_INSN (BB_END (e
->src
));
368 insn
= NEXT_INSN (insn
))
370 cselib_process_insn (insn
);
372 nonequal
= BITMAP_XMALLOC();
373 CLEAR_REG_SET (nonequal
);
375 /* Now assume that we've continued by the edge E to B and continue
376 processing as if it were same basic block.
377 Our goal is to prove that whole block is an NOOP. */
379 for (insn
= NEXT_INSN (BB_HEAD (b
)); insn
!= NEXT_INSN (BB_END (b
)) && !failed
;
380 insn
= NEXT_INSN (insn
))
384 rtx pat
= PATTERN (insn
);
386 if (GET_CODE (pat
) == PARALLEL
)
388 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
389 failed
|= mark_effect (XVECEXP (pat
, 0, i
), nonequal
);
392 failed
|= mark_effect (pat
, nonequal
);
395 cselib_process_insn (insn
);
398 /* Later we should clear nonequal of dead registers. So far we don't
399 have life information in cfg_cleanup. */
402 BB_SET_FLAG (b
, BB_NONTHREADABLE_BLOCK
);
406 /* cond2 must not mention any register that is not equal to the
408 if (for_each_rtx (&cond2
, mentions_nonequal_regs
, nonequal
))
411 /* In case liveness information is available, we need to prove equivalence
412 only of the live values. */
413 if (mode
& CLEANUP_UPDATE_LIFE
)
414 AND_REG_SET (nonequal
, b
->global_live_at_end
);
416 EXECUTE_IF_SET_IN_REG_SET (nonequal
, 0, i
, goto failed_exit
;);
418 BITMAP_XFREE (nonequal
);
420 if ((comparison_dominates_p (code1
, code2
) != 0)
421 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
422 return BRANCH_EDGE (b
);
424 return FALLTHRU_EDGE (b
);
427 BITMAP_XFREE (nonequal
);
432 /* Attempt to forward edges leaving basic block B.
433 Return true if successful. */
436 try_forward_edges (int mode
, basic_block b
)
438 bool changed
= false;
439 edge e
, next
, *threaded_edges
= NULL
;
441 /* If we are partitioning hot/cold basic blocks, we don't want to
442 mess up unconditional or indirect jumps that cross between hot
443 and cold sections. */
445 if (flag_reorder_blocks_and_partition
446 && find_reg_note (BB_END (b
), REG_CROSSING_JUMP
, NULL_RTX
))
449 for (e
= b
->succ
; e
; e
= next
)
451 basic_block target
, first
;
453 bool threaded
= false;
454 int nthreaded_edges
= 0;
455 bool may_thread
= first_pass
| (b
->flags
& BB_DIRTY
);
459 /* Skip complex edges because we don't know how to update them.
461 Still handle fallthru edges, as we can succeed to forward fallthru
462 edge to the same place as the branch edge of conditional branch
463 and turn conditional branch to an unconditional branch. */
464 if (e
->flags
& EDGE_COMPLEX
)
467 target
= first
= e
->dest
;
470 while (counter
< n_basic_blocks
)
472 basic_block new_target
= NULL
;
473 bool new_target_threaded
= false;
474 may_thread
|= target
->flags
& BB_DIRTY
;
476 if (FORWARDER_BLOCK_P (target
)
477 && target
->succ
->dest
!= EXIT_BLOCK_PTR
)
479 /* Bypass trivial infinite loops. */
480 if (target
== target
->succ
->dest
)
481 counter
= n_basic_blocks
;
482 new_target
= target
->succ
->dest
;
485 /* Allow to thread only over one edge at time to simplify updating
487 else if ((mode
& CLEANUP_THREADING
) && may_thread
)
489 edge t
= thread_jump (mode
, e
, target
);
493 threaded_edges
= xmalloc (sizeof (*threaded_edges
)
499 /* Detect an infinite loop across blocks not
500 including the start block. */
501 for (i
= 0; i
< nthreaded_edges
; ++i
)
502 if (threaded_edges
[i
] == t
)
504 if (i
< nthreaded_edges
)
506 counter
= n_basic_blocks
;
511 /* Detect an infinite loop across the start block. */
515 if (nthreaded_edges
>= n_basic_blocks
)
517 threaded_edges
[nthreaded_edges
++] = t
;
519 new_target
= t
->dest
;
520 new_target_threaded
= true;
527 /* Avoid killing of loop pre-headers, as it is the place loop
528 optimizer wants to hoist code to.
530 For fallthru forwarders, the LOOP_BEG note must appear between
531 the header of block and CODE_LABEL of the loop, for non forwarders
532 it must appear before the JUMP_INSN. */
533 if ((mode
& CLEANUP_PRE_LOOP
) && optimize
)
535 rtx insn
= (target
->succ
->flags
& EDGE_FALLTHRU
536 ? BB_HEAD (target
) : prev_nonnote_insn (BB_END (target
)));
538 if (GET_CODE (insn
) != NOTE
)
539 insn
= NEXT_INSN (insn
);
541 for (; insn
&& GET_CODE (insn
) != CODE_LABEL
&& !INSN_P (insn
);
542 insn
= NEXT_INSN (insn
))
543 if (GET_CODE (insn
) == NOTE
544 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
)
547 if (GET_CODE (insn
) == NOTE
)
550 /* Do not clean up branches to just past the end of a loop
551 at this time; it can mess up the loop optimizer's
552 recognition of some patterns. */
554 insn
= PREV_INSN (BB_HEAD (target
));
555 if (insn
&& GET_CODE (insn
) == NOTE
556 && NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
)
562 threaded
|= new_target_threaded
;
565 if (counter
>= n_basic_blocks
)
568 fprintf (dump_file
, "Infinite loop in BB %i.\n",
571 else if (target
== first
)
572 ; /* We didn't do anything. */
575 /* Save the values now, as the edge may get removed. */
576 gcov_type edge_count
= e
->count
;
577 int edge_probability
= e
->probability
;
581 /* Don't force if target is exit block. */
582 if (threaded
&& target
!= EXIT_BLOCK_PTR
)
584 notice_new_block (redirect_edge_and_branch_force (e
, target
));
586 fprintf (dump_file
, "Conditionals threaded.\n");
588 else if (!redirect_edge_and_branch (e
, target
))
592 "Forwarding edge %i->%i to %i failed.\n",
593 b
->index
, e
->dest
->index
, target
->index
);
597 /* We successfully forwarded the edge. Now update profile
598 data: for each edge we traversed in the chain, remove
599 the original edge's execution count. */
600 edge_frequency
= ((edge_probability
* b
->frequency
601 + REG_BR_PROB_BASE
/ 2)
604 if (!FORWARDER_BLOCK_P (b
) && forwarder_block_p (b
))
605 BB_SET_FLAG (b
, BB_FORWARDER_BLOCK
);
611 first
->count
-= edge_count
;
612 if (first
->count
< 0)
614 first
->frequency
-= edge_frequency
;
615 if (first
->frequency
< 0)
616 first
->frequency
= 0;
617 if (first
->succ
->succ_next
)
621 if (n
>= nthreaded_edges
)
623 t
= threaded_edges
[n
++];
626 if (first
->frequency
)
627 prob
= edge_frequency
* REG_BR_PROB_BASE
/ first
->frequency
;
630 if (prob
> t
->probability
)
631 prob
= t
->probability
;
632 t
->probability
-= prob
;
633 prob
= REG_BR_PROB_BASE
- prob
;
636 first
->succ
->probability
= REG_BR_PROB_BASE
;
637 first
->succ
->succ_next
->probability
= 0;
640 for (e
= first
->succ
; e
; e
= e
->succ_next
)
641 e
->probability
= ((e
->probability
* REG_BR_PROB_BASE
)
643 update_br_prob_note (first
);
647 /* It is possible that as the result of
648 threading we've removed edge as it is
649 threaded to the fallthru edge. Avoid
650 getting out of sync. */
651 if (n
< nthreaded_edges
652 && first
== threaded_edges
[n
]->src
)
657 t
->count
-= edge_count
;
662 while (first
!= target
);
669 free (threaded_edges
);
673 /* Return true if LABEL is used for tail recursion. */
676 tail_recursion_label_p (rtx label
)
680 for (x
= tail_recursion_label_list
; x
; x
= XEXP (x
, 1))
681 if (label
== XEXP (x
, 0))
687 /* Blocks A and B are to be merged into a single block. A has no incoming
688 fallthru edge, so it can be moved before B without adding or modifying
689 any jumps (aside from the jump from A to B). */
692 merge_blocks_move_predecessor_nojumps (basic_block a
, basic_block b
)
696 /* If we are partitioning hot/cold basic blocks, we don't want to
697 mess up unconditional or indirect jumps that cross between hot
698 and cold sections. */
700 if (flag_reorder_blocks_and_partition
701 && (a
->partition
!= b
->partition
702 || find_reg_note (BB_END (a
), REG_CROSSING_JUMP
, NULL_RTX
)))
705 barrier
= next_nonnote_insn (BB_END (a
));
706 if (GET_CODE (barrier
) != BARRIER
)
708 delete_insn (barrier
);
710 /* Move block and loop notes out of the chain so that we do not
713 ??? A better solution would be to squeeze out all the non-nested notes
714 and adjust the block trees appropriately. Even better would be to have
715 a tighter connection between block trees and rtl so that this is not
717 if (squeeze_notes (&BB_HEAD (a
), &BB_END (a
)))
720 /* Scramble the insn chain. */
721 if (BB_END (a
) != PREV_INSN (BB_HEAD (b
)))
722 reorder_insns_nobb (BB_HEAD (a
), BB_END (a
), PREV_INSN (BB_HEAD (b
)));
723 a
->flags
|= BB_DIRTY
;
726 fprintf (dump_file
, "Moved block %d before %d and merged.\n",
729 /* Swap the records for the two blocks around. */
732 link_block (a
, b
->prev_bb
);
734 /* Now blocks A and B are contiguous. Merge them. */
738 /* Blocks A and B are to be merged into a single block. B has no outgoing
739 fallthru edge, so it can be moved after A without adding or modifying
740 any jumps (aside from the jump from A to B). */
743 merge_blocks_move_successor_nojumps (basic_block a
, basic_block b
)
745 rtx barrier
, real_b_end
;
748 /* If we are partitioning hot/cold basic blocks, we don't want to
749 mess up unconditional or indirect jumps that cross between hot
750 and cold sections. */
752 if (flag_reorder_blocks_and_partition
753 && (find_reg_note (BB_END (a
), REG_CROSSING_JUMP
, NULL_RTX
)
754 || a
->partition
!= b
->partition
))
757 real_b_end
= BB_END (b
);
759 /* If there is a jump table following block B temporarily add the jump table
760 to block B so that it will also be moved to the correct location. */
761 if (tablejump_p (BB_END (b
), &label
, &table
)
762 && prev_active_insn (label
) == BB_END (b
))
767 /* There had better have been a barrier there. Delete it. */
768 barrier
= NEXT_INSN (BB_END (b
));
769 if (barrier
&& GET_CODE (barrier
) == BARRIER
)
770 delete_insn (barrier
);
772 /* Move block and loop notes out of the chain so that we do not
775 ??? A better solution would be to squeeze out all the non-nested notes
776 and adjust the block trees appropriately. Even better would be to have
777 a tighter connection between block trees and rtl so that this is not
779 if (squeeze_notes (&BB_HEAD (b
), &BB_END (b
)))
782 /* Scramble the insn chain. */
783 reorder_insns_nobb (BB_HEAD (b
), BB_END (b
), BB_END (a
));
785 /* Restore the real end of b. */
786 BB_END (b
) = real_b_end
;
789 fprintf (dump_file
, "Moved block %d after %d and merged.\n",
792 /* Now blocks A and B are contiguous. Merge them. */
796 /* Attempt to merge basic blocks that are potentially non-adjacent.
797 Return NULL iff the attempt failed, otherwise return basic block
798 where cleanup_cfg should continue. Because the merging commonly
799 moves basic block away or introduces another optimization
800 possibility, return basic block just before B so cleanup_cfg don't
803 It may be good idea to return basic block before C in the case
804 C has been moved after B and originally appeared earlier in the
805 insn sequence, but we have no information available about the
806 relative ordering of these two. Hopefully it is not too common. */
809 merge_blocks_move (edge e
, basic_block b
, basic_block c
, int mode
)
812 /* If C has a tail recursion label, do not merge. There is no
813 edge recorded from the call_placeholder back to this label, as
814 that would make optimize_sibling_and_tail_recursive_calls more
815 complex for no gain. */
816 if ((mode
& CLEANUP_PRE_SIBCALL
)
817 && GET_CODE (BB_HEAD (c
)) == CODE_LABEL
818 && tail_recursion_label_p (BB_HEAD (c
)))
821 /* If we are partitioning hot/cold basic blocks, we don't want to
822 mess up unconditional or indirect jumps that cross between hot
823 and cold sections. */
825 if (flag_reorder_blocks_and_partition
826 && (find_reg_note (BB_END (b
), REG_CROSSING_JUMP
, NULL_RTX
)
827 || find_reg_note (BB_END (c
), REG_CROSSING_JUMP
, NULL_RTX
)
828 || b
->partition
!= c
->partition
))
833 /* If B has a fallthru edge to C, no need to move anything. */
834 if (e
->flags
& EDGE_FALLTHRU
)
836 int b_index
= b
->index
, c_index
= c
->index
;
838 update_forwarder_flag (b
);
841 fprintf (dump_file
, "Merged %d and %d without moving.\n",
844 return b
->prev_bb
== ENTRY_BLOCK_PTR
? b
: b
->prev_bb
;
847 /* Otherwise we will need to move code around. Do that only if expensive
848 transformations are allowed. */
849 else if (mode
& CLEANUP_EXPENSIVE
)
851 edge tmp_edge
, b_fallthru_edge
;
852 bool c_has_outgoing_fallthru
;
853 bool b_has_incoming_fallthru
;
855 /* Avoid overactive code motion, as the forwarder blocks should be
856 eliminated by edge redirection instead. One exception might have
857 been if B is a forwarder block and C has no fallthru edge, but
858 that should be cleaned up by bb-reorder instead. */
859 if (FORWARDER_BLOCK_P (b
) || FORWARDER_BLOCK_P (c
))
862 /* We must make sure to not munge nesting of lexical blocks,
863 and loop notes. This is done by squeezing out all the notes
864 and leaving them there to lie. Not ideal, but functional. */
866 for (tmp_edge
= c
->succ
; tmp_edge
; tmp_edge
= tmp_edge
->succ_next
)
867 if (tmp_edge
->flags
& EDGE_FALLTHRU
)
870 c_has_outgoing_fallthru
= (tmp_edge
!= NULL
);
872 for (tmp_edge
= b
->pred
; tmp_edge
; tmp_edge
= tmp_edge
->pred_next
)
873 if (tmp_edge
->flags
& EDGE_FALLTHRU
)
876 b_has_incoming_fallthru
= (tmp_edge
!= NULL
);
877 b_fallthru_edge
= tmp_edge
;
880 next
= next
->prev_bb
;
882 /* Otherwise, we're going to try to move C after B. If C does
883 not have an outgoing fallthru, then it can be moved
884 immediately after B without introducing or modifying jumps. */
885 if (! c_has_outgoing_fallthru
)
887 merge_blocks_move_successor_nojumps (b
, c
);
888 return next
== ENTRY_BLOCK_PTR
? next
->next_bb
: next
;
891 /* If B does not have an incoming fallthru, then it can be moved
892 immediately before C without introducing or modifying jumps.
893 C cannot be the first block, so we do not have to worry about
894 accessing a non-existent block. */
896 if (b_has_incoming_fallthru
)
900 if (b_fallthru_edge
->src
== ENTRY_BLOCK_PTR
)
902 bb
= force_nonfallthru (b_fallthru_edge
);
904 notice_new_block (bb
);
907 merge_blocks_move_predecessor_nojumps (b
, c
);
908 return next
== ENTRY_BLOCK_PTR
? next
->next_bb
: next
;
915 /* Removes the memory attributes of MEM expression
916 if they are not equal. */
919 merge_memattrs (rtx x
, rtx y
)
928 if (x
== 0 || y
== 0)
933 if (code
!= GET_CODE (y
))
936 if (GET_MODE (x
) != GET_MODE (y
))
939 if (code
== MEM
&& MEM_ATTRS (x
) != MEM_ATTRS (y
))
943 else if (! MEM_ATTRS (y
))
947 if (MEM_ALIAS_SET (x
) != MEM_ALIAS_SET (y
))
949 set_mem_alias_set (x
, 0);
950 set_mem_alias_set (y
, 0);
953 if (! mem_expr_equal_p (MEM_EXPR (x
), MEM_EXPR (y
)))
957 set_mem_offset (x
, 0);
958 set_mem_offset (y
, 0);
960 else if (MEM_OFFSET (x
) != MEM_OFFSET (y
))
962 set_mem_offset (x
, 0);
963 set_mem_offset (y
, 0);
966 set_mem_size (x
, MAX (MEM_SIZE (x
), MEM_SIZE (y
)));
967 set_mem_size (y
, MEM_SIZE (x
));
969 set_mem_align (x
, MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)));
970 set_mem_align (y
, MEM_ALIGN (x
));
974 fmt
= GET_RTX_FORMAT (code
);
975 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
980 /* Two vectors must have the same length. */
981 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
984 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
985 merge_memattrs (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
));
990 merge_memattrs (XEXP (x
, i
), XEXP (y
, i
));
997 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
1000 insns_match_p (int mode ATTRIBUTE_UNUSED
, rtx i1
, rtx i2
)
1004 /* Verify that I1 and I2 are equivalent. */
1005 if (GET_CODE (i1
) != GET_CODE (i2
))
1011 if (GET_CODE (p1
) != GET_CODE (p2
))
1014 /* If this is a CALL_INSN, compare register usage information.
1015 If we don't check this on stack register machines, the two
1016 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1017 numbers of stack registers in the same basic block.
1018 If we don't check this on machines with delay slots, a delay slot may
1019 be filled that clobbers a parameter expected by the subroutine.
1021 ??? We take the simple route for now and assume that if they're
1022 equal, they were constructed identically. */
1024 if (GET_CODE (i1
) == CALL_INSN
1025 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1
),
1026 CALL_INSN_FUNCTION_USAGE (i2
))
1027 || SIBLING_CALL_P (i1
) != SIBLING_CALL_P (i2
)))
1031 /* If cross_jump_death_matters is not 0, the insn's mode
1032 indicates whether or not the insn contains any stack-like
1035 if ((mode
& CLEANUP_POST_REGSTACK
) && stack_regs_mentioned (i1
))
1037 /* If register stack conversion has already been done, then
1038 death notes must also be compared before it is certain that
1039 the two instruction streams match. */
1042 HARD_REG_SET i1_regset
, i2_regset
;
1044 CLEAR_HARD_REG_SET (i1_regset
);
1045 CLEAR_HARD_REG_SET (i2_regset
);
1047 for (note
= REG_NOTES (i1
); note
; note
= XEXP (note
, 1))
1048 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
1049 SET_HARD_REG_BIT (i1_regset
, REGNO (XEXP (note
, 0)));
1051 for (note
= REG_NOTES (i2
); note
; note
= XEXP (note
, 1))
1052 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
1053 SET_HARD_REG_BIT (i2_regset
, REGNO (XEXP (note
, 0)));
1055 GO_IF_HARD_REG_EQUAL (i1_regset
, i2_regset
, done
);
1064 if (reload_completed
1065 ? rtx_renumbered_equal_p (p1
, p2
) : rtx_equal_p (p1
, p2
))
1068 /* Do not do EQUIV substitution after reload. First, we're undoing the
1069 work of reload_cse. Second, we may be undoing the work of the post-
1070 reload splitting pass. */
1071 /* ??? Possibly add a new phase switch variable that can be used by
1072 targets to disallow the troublesome insns after splitting. */
1073 if (!reload_completed
)
1075 /* The following code helps take care of G++ cleanups. */
1076 rtx equiv1
= find_reg_equal_equiv_note (i1
);
1077 rtx equiv2
= find_reg_equal_equiv_note (i2
);
1079 if (equiv1
&& equiv2
1080 /* If the equivalences are not to a constant, they may
1081 reference pseudos that no longer exist, so we can't
1083 && (! reload_completed
1084 || (CONSTANT_P (XEXP (equiv1
, 0))
1085 && rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))))
1087 rtx s1
= single_set (i1
);
1088 rtx s2
= single_set (i2
);
1089 if (s1
!= 0 && s2
!= 0
1090 && rtx_renumbered_equal_p (SET_DEST (s1
), SET_DEST (s2
)))
1092 validate_change (i1
, &SET_SRC (s1
), XEXP (equiv1
, 0), 1);
1093 validate_change (i2
, &SET_SRC (s2
), XEXP (equiv2
, 0), 1);
1094 if (! rtx_renumbered_equal_p (p1
, p2
))
1096 else if (apply_change_group ())
1105 /* Look through the insns at the end of BB1 and BB2 and find the longest
1106 sequence that are equivalent. Store the first insns for that sequence
1107 in *F1 and *F2 and return the sequence length.
1109 To simplify callers of this function, if the blocks match exactly,
1110 store the head of the blocks in *F1 and *F2. */
1113 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED
, basic_block bb1
,
1114 basic_block bb2
, rtx
*f1
, rtx
*f2
)
1116 rtx i1
, i2
, last1
, last2
, afterlast1
, afterlast2
;
1119 /* Skip simple jumps at the end of the blocks. Complex jumps still
1120 need to be compared for equivalence, which we'll do below. */
1123 last1
= afterlast1
= last2
= afterlast2
= NULL_RTX
;
1125 || (returnjump_p (i1
) && !side_effects_p (PATTERN (i1
))))
1128 i1
= PREV_INSN (i1
);
1133 || (returnjump_p (i2
) && !side_effects_p (PATTERN (i2
))))
1136 /* Count everything except for unconditional jump as insn. */
1137 if (!simplejump_p (i2
) && !returnjump_p (i2
) && last1
)
1139 i2
= PREV_INSN (i2
);
1145 while (!INSN_P (i1
) && i1
!= BB_HEAD (bb1
))
1146 i1
= PREV_INSN (i1
);
1148 while (!INSN_P (i2
) && i2
!= BB_HEAD (bb2
))
1149 i2
= PREV_INSN (i2
);
1151 if (i1
== BB_HEAD (bb1
) || i2
== BB_HEAD (bb2
))
1154 if (!insns_match_p (mode
, i1
, i2
))
1157 merge_memattrs (i1
, i2
);
1159 /* Don't begin a cross-jump with a NOTE insn. */
1162 /* If the merged insns have different REG_EQUAL notes, then
1164 rtx equiv1
= find_reg_equal_equiv_note (i1
);
1165 rtx equiv2
= find_reg_equal_equiv_note (i2
);
1167 if (equiv1
&& !equiv2
)
1168 remove_note (i1
, equiv1
);
1169 else if (!equiv1
&& equiv2
)
1170 remove_note (i2
, equiv2
);
1171 else if (equiv1
&& equiv2
1172 && !rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))
1174 remove_note (i1
, equiv1
);
1175 remove_note (i2
, equiv2
);
1178 afterlast1
= last1
, afterlast2
= last2
;
1179 last1
= i1
, last2
= i2
;
1183 i1
= PREV_INSN (i1
);
1184 i2
= PREV_INSN (i2
);
1188 /* Don't allow the insn after a compare to be shared by
1189 cross-jumping unless the compare is also shared. */
1190 if (ninsns
&& reg_mentioned_p (cc0_rtx
, last1
) && ! sets_cc0_p (last1
))
1191 last1
= afterlast1
, last2
= afterlast2
, ninsns
--;
1194 /* Include preceding notes and labels in the cross-jump. One,
1195 this may bring us to the head of the blocks as requested above.
1196 Two, it keeps line number notes as matched as may be. */
1199 while (last1
!= BB_HEAD (bb1
) && !INSN_P (PREV_INSN (last1
)))
1200 last1
= PREV_INSN (last1
);
1202 if (last1
!= BB_HEAD (bb1
) && GET_CODE (PREV_INSN (last1
)) == CODE_LABEL
)
1203 last1
= PREV_INSN (last1
);
1205 while (last2
!= BB_HEAD (bb2
) && !INSN_P (PREV_INSN (last2
)))
1206 last2
= PREV_INSN (last2
);
1208 if (last2
!= BB_HEAD (bb2
) && GET_CODE (PREV_INSN (last2
)) == CODE_LABEL
)
1209 last2
= PREV_INSN (last2
);
1218 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1219 the branch instruction. This means that if we commonize the control
1220 flow before end of the basic block, the semantic remains unchanged.
1222 We may assume that there exists one edge with a common destination. */
1225 outgoing_edges_match (int mode
, basic_block bb1
, basic_block bb2
)
1227 int nehedges1
= 0, nehedges2
= 0;
1228 edge fallthru1
= 0, fallthru2
= 0;
1231 /* If BB1 has only one successor, we may be looking at either an
1232 unconditional jump, or a fake edge to exit. */
1233 if (bb1
->succ
&& !bb1
->succ
->succ_next
1234 && (bb1
->succ
->flags
& (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1235 && (GET_CODE (BB_END (bb1
)) != JUMP_INSN
|| simplejump_p (BB_END (bb1
))))
1236 return (bb2
->succ
&& !bb2
->succ
->succ_next
1237 && (bb2
->succ
->flags
& (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1238 && (GET_CODE (BB_END (bb2
)) != JUMP_INSN
|| simplejump_p (BB_END (bb2
))));
1240 /* Match conditional jumps - this may get tricky when fallthru and branch
1241 edges are crossed. */
1243 && bb1
->succ
->succ_next
1244 && !bb1
->succ
->succ_next
->succ_next
1245 && any_condjump_p (BB_END (bb1
))
1246 && onlyjump_p (BB_END (bb1
)))
1248 edge b1
, f1
, b2
, f2
;
1249 bool reverse
, match
;
1250 rtx set1
, set2
, cond1
, cond2
;
1251 enum rtx_code code1
, code2
;
1254 || !bb2
->succ
->succ_next
1255 || bb2
->succ
->succ_next
->succ_next
1256 || !any_condjump_p (BB_END (bb2
))
1257 || !onlyjump_p (BB_END (bb2
)))
1260 b1
= BRANCH_EDGE (bb1
);
1261 b2
= BRANCH_EDGE (bb2
);
1262 f1
= FALLTHRU_EDGE (bb1
);
1263 f2
= FALLTHRU_EDGE (bb2
);
1265 /* Get around possible forwarders on fallthru edges. Other cases
1266 should be optimized out already. */
1267 if (FORWARDER_BLOCK_P (f1
->dest
))
1268 f1
= f1
->dest
->succ
;
1270 if (FORWARDER_BLOCK_P (f2
->dest
))
1271 f2
= f2
->dest
->succ
;
1273 /* To simplify use of this function, return false if there are
1274 unneeded forwarder blocks. These will get eliminated later
1275 during cleanup_cfg. */
1276 if (FORWARDER_BLOCK_P (f1
->dest
)
1277 || FORWARDER_BLOCK_P (f2
->dest
)
1278 || FORWARDER_BLOCK_P (b1
->dest
)
1279 || FORWARDER_BLOCK_P (b2
->dest
))
1282 if (f1
->dest
== f2
->dest
&& b1
->dest
== b2
->dest
)
1284 else if (f1
->dest
== b2
->dest
&& b1
->dest
== f2
->dest
)
1289 set1
= pc_set (BB_END (bb1
));
1290 set2
= pc_set (BB_END (bb2
));
1291 if ((XEXP (SET_SRC (set1
), 1) == pc_rtx
)
1292 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
1295 cond1
= XEXP (SET_SRC (set1
), 0);
1296 cond2
= XEXP (SET_SRC (set2
), 0);
1297 code1
= GET_CODE (cond1
);
1299 code2
= reversed_comparison_code (cond2
, BB_END (bb2
));
1301 code2
= GET_CODE (cond2
);
1303 if (code2
== UNKNOWN
)
1306 /* Verify codes and operands match. */
1307 match
= ((code1
== code2
1308 && rtx_renumbered_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
1309 && rtx_renumbered_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
1310 || (code1
== swap_condition (code2
)
1311 && rtx_renumbered_equal_p (XEXP (cond1
, 1),
1313 && rtx_renumbered_equal_p (XEXP (cond1
, 0),
1316 /* If we return true, we will join the blocks. Which means that
1317 we will only have one branch prediction bit to work with. Thus
1318 we require the existing branches to have probabilities that are
1322 && maybe_hot_bb_p (bb1
)
1323 && maybe_hot_bb_p (bb2
))
1327 if (b1
->dest
== b2
->dest
)
1328 prob2
= b2
->probability
;
1330 /* Do not use f2 probability as f2 may be forwarded. */
1331 prob2
= REG_BR_PROB_BASE
- b2
->probability
;
1333 /* Fail if the difference in probabilities is greater than 50%.
1334 This rules out two well-predicted branches with opposite
1336 if (abs (b1
->probability
- prob2
) > REG_BR_PROB_BASE
/ 2)
1340 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1341 bb1
->index
, bb2
->index
, b1
->probability
, prob2
);
1347 if (dump_file
&& match
)
1348 fprintf (dump_file
, "Conditionals in bb %i and %i match.\n",
1349 bb1
->index
, bb2
->index
);
1354 /* Generic case - we are seeing a computed jump, table jump or trapping
1357 #ifndef CASE_DROPS_THROUGH
1358 /* Check whether there are tablejumps in the end of BB1 and BB2.
1359 Return true if they are identical. */
1364 if (tablejump_p (BB_END (bb1
), &label1
, &table1
)
1365 && tablejump_p (BB_END (bb2
), &label2
, &table2
)
1366 && GET_CODE (PATTERN (table1
)) == GET_CODE (PATTERN (table2
)))
1368 /* The labels should never be the same rtx. If they really are same
1369 the jump tables are same too. So disable crossjumping of blocks BB1
1370 and BB2 because when deleting the common insns in the end of BB1
1371 by delete_basic_block () the jump table would be deleted too. */
1372 /* If LABEL2 is referenced in BB1->END do not do anything
1373 because we would loose information when replacing
1374 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1375 if (label1
!= label2
&& !rtx_referenced_p (label2
, BB_END (bb1
)))
1377 /* Set IDENTICAL to true when the tables are identical. */
1378 bool identical
= false;
1381 p1
= PATTERN (table1
);
1382 p2
= PATTERN (table2
);
1383 if (GET_CODE (p1
) == ADDR_VEC
&& rtx_equal_p (p1
, p2
))
1387 else if (GET_CODE (p1
) == ADDR_DIFF_VEC
1388 && (XVECLEN (p1
, 1) == XVECLEN (p2
, 1))
1389 && rtx_equal_p (XEXP (p1
, 2), XEXP (p2
, 2))
1390 && rtx_equal_p (XEXP (p1
, 3), XEXP (p2
, 3)))
1395 for (i
= XVECLEN (p1
, 1) - 1; i
>= 0 && identical
; i
--)
1396 if (!rtx_equal_p (XVECEXP (p1
, 1, i
), XVECEXP (p2
, 1, i
)))
1402 replace_label_data rr
;
1405 /* Temporarily replace references to LABEL1 with LABEL2
1406 in BB1->END so that we could compare the instructions. */
1409 rr
.update_label_nuses
= false;
1410 for_each_rtx (&BB_END (bb1
), replace_label
, &rr
);
1412 match
= insns_match_p (mode
, BB_END (bb1
), BB_END (bb2
));
1413 if (dump_file
&& match
)
1415 "Tablejumps in bb %i and %i match.\n",
1416 bb1
->index
, bb2
->index
);
1418 /* Set the original label in BB1->END because when deleting
1419 a block whose end is a tablejump, the tablejump referenced
1420 from the instruction is deleted too. */
1423 for_each_rtx (&BB_END (bb1
), replace_label
, &rr
);
1433 /* First ensure that the instructions match. There may be many outgoing
1434 edges so this test is generally cheaper. */
1435 if (!insns_match_p (mode
, BB_END (bb1
), BB_END (bb2
)))
1438 /* Search the outgoing edges, ensure that the counts do match, find possible
1439 fallthru and exception handling edges since these needs more
1441 for (e1
= bb1
->succ
, e2
= bb2
->succ
; e1
&& e2
;
1442 e1
= e1
->succ_next
, e2
= e2
->succ_next
)
1444 if (e1
->flags
& EDGE_EH
)
1447 if (e2
->flags
& EDGE_EH
)
1450 if (e1
->flags
& EDGE_FALLTHRU
)
1452 if (e2
->flags
& EDGE_FALLTHRU
)
1456 /* If number of edges of various types does not match, fail. */
1458 || nehedges1
!= nehedges2
1459 || (fallthru1
!= 0) != (fallthru2
!= 0))
1462 /* fallthru edges must be forwarded to the same destination. */
1465 basic_block d1
= (forwarder_block_p (fallthru1
->dest
)
1466 ? fallthru1
->dest
->succ
->dest
: fallthru1
->dest
);
1467 basic_block d2
= (forwarder_block_p (fallthru2
->dest
)
1468 ? fallthru2
->dest
->succ
->dest
: fallthru2
->dest
);
1474 /* Ensure the same EH region. */
1476 rtx n1
= find_reg_note (BB_END (bb1
), REG_EH_REGION
, 0);
1477 rtx n2
= find_reg_note (BB_END (bb2
), REG_EH_REGION
, 0);
1482 if (n1
&& (!n2
|| XEXP (n1
, 0) != XEXP (n2
, 0)))
1486 /* We don't need to match the rest of edges as above checks should be enough
1487 to ensure that they are equivalent. */
1491 /* E1 and E2 are edges with the same destination block. Search their
1492 predecessors for common code. If found, redirect control flow from
1493 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1496 try_crossjump_to_edge (int mode
, edge e1
, edge e2
)
1499 basic_block src1
= e1
->src
, src2
= e2
->src
;
1500 basic_block redirect_to
, redirect_from
, to_remove
;
1501 rtx newpos1
, newpos2
;
1504 newpos1
= newpos2
= NULL_RTX
;
1506 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1507 to try this optimization. */
1509 if (flag_reorder_blocks_and_partition
&& no_new_pseudos
)
1512 /* Search backward through forwarder blocks. We don't need to worry
1513 about multiple entry or chained forwarders, as they will be optimized
1514 away. We do this to look past the unconditional jump following a
1515 conditional jump that is required due to the current CFG shape. */
1517 && !src1
->pred
->pred_next
1518 && FORWARDER_BLOCK_P (src1
))
1519 e1
= src1
->pred
, src1
= e1
->src
;
1522 && !src2
->pred
->pred_next
1523 && FORWARDER_BLOCK_P (src2
))
1524 e2
= src2
->pred
, src2
= e2
->src
;
1526 /* Nothing to do if we reach ENTRY, or a common source block. */
1527 if (src1
== ENTRY_BLOCK_PTR
|| src2
== ENTRY_BLOCK_PTR
)
1532 /* Seeing more than 1 forwarder blocks would confuse us later... */
1533 if (FORWARDER_BLOCK_P (e1
->dest
)
1534 && FORWARDER_BLOCK_P (e1
->dest
->succ
->dest
))
1537 if (FORWARDER_BLOCK_P (e2
->dest
)
1538 && FORWARDER_BLOCK_P (e2
->dest
->succ
->dest
))
1541 /* Likewise with dead code (possibly newly created by the other optimizations
1543 if (!src1
->pred
|| !src2
->pred
)
1546 /* Look for the common insn sequence, part the first ... */
1547 if (!outgoing_edges_match (mode
, src1
, src2
))
1550 /* ... and part the second. */
1551 nmatch
= flow_find_cross_jump (mode
, src1
, src2
, &newpos1
, &newpos2
);
1555 #ifndef CASE_DROPS_THROUGH
1556 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1558 If we have tablejumps in the end of SRC1 and SRC2
1559 they have been already compared for equivalence in outgoing_edges_match ()
1560 so replace the references to TABLE1 by references to TABLE2. */
1565 if (tablejump_p (BB_END (src1
), &label1
, &table1
)
1566 && tablejump_p (BB_END (src2
), &label2
, &table2
)
1567 && label1
!= label2
)
1569 replace_label_data rr
;
1572 /* Replace references to LABEL1 with LABEL2. */
1575 rr
.update_label_nuses
= true;
1576 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
1578 /* Do not replace the label in SRC1->END because when deleting
1579 a block whose end is a tablejump, the tablejump referenced
1580 from the instruction is deleted too. */
1581 if (insn
!= BB_END (src1
))
1582 for_each_rtx (&insn
, replace_label
, &rr
);
1588 /* Avoid splitting if possible. */
1589 if (newpos2
== BB_HEAD (src2
))
1594 fprintf (dump_file
, "Splitting bb %i before %i insns\n",
1595 src2
->index
, nmatch
);
1596 redirect_to
= split_block (src2
, PREV_INSN (newpos2
))->dest
;
1601 "Cross jumping from bb %i to bb %i; %i common insns\n",
1602 src1
->index
, src2
->index
, nmatch
);
1604 redirect_to
->count
+= src1
->count
;
1605 redirect_to
->frequency
+= src1
->frequency
;
1606 /* We may have some registers visible trought the block. */
1607 redirect_to
->flags
|= BB_DIRTY
;
1609 /* Recompute the frequencies and counts of outgoing edges. */
1610 for (s
= redirect_to
->succ
; s
; s
= s
->succ_next
)
1613 basic_block d
= s
->dest
;
1615 if (FORWARDER_BLOCK_P (d
))
1618 for (s2
= src1
->succ
; ; s2
= s2
->succ_next
)
1620 basic_block d2
= s2
->dest
;
1621 if (FORWARDER_BLOCK_P (d2
))
1622 d2
= d2
->succ
->dest
;
1627 s
->count
+= s2
->count
;
1629 /* Take care to update possible forwarder blocks. We verified
1630 that there is no more than one in the chain, so we can't run
1631 into infinite loop. */
1632 if (FORWARDER_BLOCK_P (s
->dest
))
1634 s
->dest
->succ
->count
+= s2
->count
;
1635 s
->dest
->count
+= s2
->count
;
1636 s
->dest
->frequency
+= EDGE_FREQUENCY (s
);
1639 if (FORWARDER_BLOCK_P (s2
->dest
))
1641 s2
->dest
->succ
->count
-= s2
->count
;
1642 if (s2
->dest
->succ
->count
< 0)
1643 s2
->dest
->succ
->count
= 0;
1644 s2
->dest
->count
-= s2
->count
;
1645 s2
->dest
->frequency
-= EDGE_FREQUENCY (s
);
1646 if (s2
->dest
->frequency
< 0)
1647 s2
->dest
->frequency
= 0;
1648 if (s2
->dest
->count
< 0)
1649 s2
->dest
->count
= 0;
1652 if (!redirect_to
->frequency
&& !src1
->frequency
)
1653 s
->probability
= (s
->probability
+ s2
->probability
) / 2;
1656 = ((s
->probability
* redirect_to
->frequency
+
1657 s2
->probability
* src1
->frequency
)
1658 / (redirect_to
->frequency
+ src1
->frequency
));
1661 update_br_prob_note (redirect_to
);
1663 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1665 /* Skip possible basic block header. */
1666 if (GET_CODE (newpos1
) == CODE_LABEL
)
1667 newpos1
= NEXT_INSN (newpos1
);
1669 if (GET_CODE (newpos1
) == NOTE
)
1670 newpos1
= NEXT_INSN (newpos1
);
1672 redirect_from
= split_block (src1
, PREV_INSN (newpos1
))->src
;
1673 to_remove
= redirect_from
->succ
->dest
;
1675 redirect_edge_and_branch_force (redirect_from
->succ
, redirect_to
);
1676 delete_basic_block (to_remove
);
1678 update_forwarder_flag (redirect_from
);
1683 /* Search the predecessors of BB for common insn sequences. When found,
1684 share code between them by redirecting control flow. Return true if
1685 any changes made. */
1688 try_crossjump_bb (int mode
, basic_block bb
)
1690 edge e
, e2
, nexte2
, nexte
, fallthru
;
1694 /* Nothing to do if there is not at least two incoming edges. */
1695 if (!bb
->pred
|| !bb
->pred
->pred_next
)
1698 /* If we are partitioning hot/cold basic blocks, we don't want to
1699 mess up unconditional or indirect jumps that cross between hot
1700 and cold sections. */
1702 if (flag_reorder_blocks_and_partition
1703 && (bb
->pred
->src
->partition
!= bb
->pred
->pred_next
->src
->partition
1704 || bb
->pred
->crossing_edge
))
1707 /* It is always cheapest to redirect a block that ends in a branch to
1708 a block that falls through into BB, as that adds no branches to the
1709 program. We'll try that combination first. */
1711 max
= PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES
);
1712 for (e
= bb
->pred
; e
; e
= e
->pred_next
, n
++)
1714 if (e
->flags
& EDGE_FALLTHRU
)
1721 for (e
= bb
->pred
; e
; e
= nexte
)
1723 nexte
= e
->pred_next
;
1725 /* As noted above, first try with the fallthru predecessor. */
1728 /* Don't combine the fallthru edge into anything else.
1729 If there is a match, we'll do it the other way around. */
1732 /* If nothing changed since the last attempt, there is nothing
1735 && (!(e
->src
->flags
& BB_DIRTY
)
1736 && !(fallthru
->src
->flags
& BB_DIRTY
)))
1739 if (try_crossjump_to_edge (mode
, e
, fallthru
))
1747 /* Non-obvious work limiting check: Recognize that we're going
1748 to call try_crossjump_bb on every basic block. So if we have
1749 two blocks with lots of outgoing edges (a switch) and they
1750 share lots of common destinations, then we would do the
1751 cross-jump check once for each common destination.
1753 Now, if the blocks actually are cross-jump candidates, then
1754 all of their destinations will be shared. Which means that
1755 we only need check them for cross-jump candidacy once. We
1756 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1757 choosing to do the check from the block for which the edge
1758 in question is the first successor of A. */
1759 if (e
->src
->succ
!= e
)
1762 for (e2
= bb
->pred
; e2
; e2
= nexte2
)
1764 nexte2
= e2
->pred_next
;
1769 /* We've already checked the fallthru edge above. */
1773 /* The "first successor" check above only prevents multiple
1774 checks of crossjump(A,B). In order to prevent redundant
1775 checks of crossjump(B,A), require that A be the block
1776 with the lowest index. */
1777 if (e
->src
->index
> e2
->src
->index
)
1780 /* If nothing changed since the last attempt, there is nothing
1783 && (!(e
->src
->flags
& BB_DIRTY
)
1784 && !(e2
->src
->flags
& BB_DIRTY
)))
1787 if (try_crossjump_to_edge (mode
, e
, e2
))
1799 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1800 instructions etc. Return nonzero if changes were made. */
1803 try_optimize_cfg (int mode
)
1805 bool changed_overall
= false;
1808 basic_block bb
, b
, next
;
1810 if (mode
& CLEANUP_CROSSJUMP
)
1811 add_noreturn_fake_exit_edges ();
1814 update_forwarder_flag (bb
);
1816 if (mode
& (CLEANUP_UPDATE_LIFE
| CLEANUP_CROSSJUMP
| CLEANUP_THREADING
))
1819 if (! targetm
.cannot_modify_jumps_p ())
1822 /* Attempt to merge blocks as made possible by edge removal. If
1823 a block has only one successor, and the successor has only
1824 one predecessor, they may be combined. */
1832 "\n\ntry_optimize_cfg iteration %i\n\n",
1835 for (b
= ENTRY_BLOCK_PTR
->next_bb
; b
!= EXIT_BLOCK_PTR
;)
1839 bool changed_here
= false;
1841 /* Delete trivially dead basic blocks. */
1842 while (b
->pred
== NULL
)
1846 fprintf (dump_file
, "Deleting block %i.\n",
1849 delete_basic_block (b
);
1850 if (!(mode
& CLEANUP_CFGLAYOUT
))
1855 /* Remove code labels no longer used. Don't do this
1856 before CALL_PLACEHOLDER is removed, as some branches
1857 may be hidden within. */
1858 if (b
->pred
->pred_next
== NULL
1859 && (b
->pred
->flags
& EDGE_FALLTHRU
)
1860 && !(b
->pred
->flags
& EDGE_COMPLEX
)
1861 && GET_CODE (BB_HEAD (b
)) == CODE_LABEL
1862 && (!(mode
& CLEANUP_PRE_SIBCALL
)
1863 || !tail_recursion_label_p (BB_HEAD (b
)))
1864 /* If the previous block ends with a branch to this
1865 block, we can't delete the label. Normally this
1866 is a condjump that is yet to be simplified, but
1867 if CASE_DROPS_THRU, this can be a tablejump with
1868 some element going to the same place as the
1869 default (fallthru). */
1870 && (b
->pred
->src
== ENTRY_BLOCK_PTR
1871 || GET_CODE (BB_END (b
->pred
->src
)) != JUMP_INSN
1872 || ! label_is_jump_target_p (BB_HEAD (b
),
1873 BB_END (b
->pred
->src
))))
1875 rtx label
= BB_HEAD (b
);
1877 delete_insn_chain (label
, label
);
1878 /* In the case label is undeletable, move it after the
1879 BASIC_BLOCK note. */
1880 if (NOTE_LINE_NUMBER (BB_HEAD (b
)) == NOTE_INSN_DELETED_LABEL
)
1882 rtx bb_note
= NEXT_INSN (BB_HEAD (b
));
1884 reorder_insns_nobb (label
, label
, bb_note
);
1885 BB_HEAD (b
) = bb_note
;
1888 fprintf (dump_file
, "Deleted label in block %i.\n",
1892 /* If we fall through an empty block, we can remove it. */
1893 if (!(mode
& CLEANUP_CFGLAYOUT
)
1894 && b
->pred
->pred_next
== NULL
1895 && (b
->pred
->flags
& EDGE_FALLTHRU
)
1896 && GET_CODE (BB_HEAD (b
)) != CODE_LABEL
1897 && FORWARDER_BLOCK_P (b
)
1898 /* Note that forwarder_block_p true ensures that
1899 there is a successor for this block. */
1900 && (b
->succ
->flags
& EDGE_FALLTHRU
)
1901 && n_basic_blocks
> 1)
1905 "Deleting fallthru block %i.\n",
1908 c
= b
->prev_bb
== ENTRY_BLOCK_PTR
? b
->next_bb
: b
->prev_bb
;
1909 redirect_edge_succ_nodup (b
->pred
, b
->succ
->dest
);
1910 delete_basic_block (b
);
1915 if ((s
= b
->succ
) != NULL
1916 && s
->succ_next
== NULL
1917 && !(s
->flags
& EDGE_COMPLEX
)
1918 && (c
= s
->dest
) != EXIT_BLOCK_PTR
1919 && c
->pred
->pred_next
== NULL
1922 /* When not in cfg_layout mode use code aware of reordering
1923 INSN. This code possibly creates new basic blocks so it
1924 does not fit merge_blocks interface and is kept here in
1925 hope that it will become useless once more of compiler
1926 is transformed to use cfg_layout mode. */
1928 if ((mode
& CLEANUP_CFGLAYOUT
)
1929 && can_merge_blocks_p (b
, c
))
1931 merge_blocks (b
, c
);
1932 update_forwarder_flag (b
);
1933 changed_here
= true;
1935 else if (!(mode
& CLEANUP_CFGLAYOUT
)
1936 /* If the jump insn has side effects,
1937 we can't kill the edge. */
1938 && (GET_CODE (BB_END (b
)) != JUMP_INSN
1939 || (reload_completed
1940 ? simplejump_p (BB_END (b
))
1941 : onlyjump_p (BB_END (b
))))
1942 && (next
= merge_blocks_move (s
, b
, c
, mode
)))
1945 changed_here
= true;
1949 /* Simplify branch over branch. */
1950 if ((mode
& CLEANUP_EXPENSIVE
)
1951 && !(mode
& CLEANUP_CFGLAYOUT
)
1952 && try_simplify_condjump (b
))
1953 changed_here
= true;
1955 /* If B has a single outgoing edge, but uses a
1956 non-trivial jump instruction without side-effects, we
1957 can either delete the jump entirely, or replace it
1958 with a simple unconditional jump. */
1960 && ! b
->succ
->succ_next
1961 && b
->succ
->dest
!= EXIT_BLOCK_PTR
1962 && onlyjump_p (BB_END (b
))
1963 && !find_reg_note (BB_END (b
), REG_CROSSING_JUMP
, NULL_RTX
)
1964 && try_redirect_by_replacing_jump (b
->succ
, b
->succ
->dest
,
1965 (mode
& CLEANUP_CFGLAYOUT
) != 0))
1967 update_forwarder_flag (b
);
1968 changed_here
= true;
1971 /* Simplify branch to branch. */
1972 if (try_forward_edges (mode
, b
))
1973 changed_here
= true;
1975 /* Look for shared code between blocks. */
1976 if ((mode
& CLEANUP_CROSSJUMP
)
1977 && try_crossjump_bb (mode
, b
))
1978 changed_here
= true;
1980 /* Don't get confused by the index shift caused by
1988 if ((mode
& CLEANUP_CROSSJUMP
)
1989 && try_crossjump_bb (mode
, EXIT_BLOCK_PTR
))
1992 #ifdef ENABLE_CHECKING
1994 verify_flow_info ();
1997 changed_overall
|= changed
;
2003 if (mode
& CLEANUP_CROSSJUMP
)
2004 remove_fake_edges ();
2006 clear_aux_for_blocks ();
2008 return changed_overall
;
2011 /* Delete all unreachable basic blocks. */
2014 delete_unreachable_blocks (void)
2016 bool changed
= false;
2017 basic_block b
, next_bb
;
2019 find_unreachable_blocks ();
2021 /* Delete all unreachable basic blocks. */
2023 for (b
= ENTRY_BLOCK_PTR
->next_bb
; b
!= EXIT_BLOCK_PTR
; b
= next_bb
)
2025 next_bb
= b
->next_bb
;
2027 if (!(b
->flags
& BB_REACHABLE
))
2029 delete_basic_block (b
);
2035 tidy_fallthru_edges ();
2039 /* Merges sequential blocks if possible. */
2042 merge_seq_blocks (void)
2045 bool changed
= false;
2047 for (bb
= ENTRY_BLOCK_PTR
->next_bb
; bb
!= EXIT_BLOCK_PTR
; )
2050 && !bb
->succ
->succ_next
2051 && can_merge_blocks_p (bb
, bb
->succ
->dest
))
2053 /* Merge the blocks and retry. */
2054 merge_blocks (bb
, bb
->succ
->dest
);
2065 /* Tidy the CFG by deleting unreachable code and whatnot. */
2068 cleanup_cfg (int mode
)
2070 bool changed
= false;
2072 timevar_push (TV_CLEANUP_CFG
);
2073 if (delete_unreachable_blocks ())
2076 /* We've possibly created trivially dead code. Cleanup it right
2077 now to introduce more opportunities for try_optimize_cfg. */
2078 if (!(mode
& (CLEANUP_NO_INSN_DEL
2079 | CLEANUP_UPDATE_LIFE
| CLEANUP_PRE_SIBCALL
))
2080 && !reload_completed
)
2081 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2086 while (try_optimize_cfg (mode
))
2088 delete_unreachable_blocks (), changed
= true;
2089 if (mode
& CLEANUP_UPDATE_LIFE
)
2091 /* Cleaning up CFG introduces more opportunities for dead code
2092 removal that in turn may introduce more opportunities for
2093 cleaning up the CFG. */
2094 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES
,
2096 | PROP_SCAN_DEAD_CODE
2097 | PROP_KILL_DEAD_CODE
2098 | ((mode
& CLEANUP_LOG_LINKS
)
2099 ? PROP_LOG_LINKS
: 0)))
2102 else if (!(mode
& (CLEANUP_NO_INSN_DEL
| CLEANUP_PRE_SIBCALL
))
2103 && (mode
& CLEANUP_EXPENSIVE
)
2104 && !reload_completed
)
2106 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2111 delete_dead_jumptables ();
2114 /* Kill the data we won't maintain. */
2115 free_EXPR_LIST_list (&label_value_list
);
2116 timevar_pop (TV_CLEANUP_CFG
);