1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
23 /* This file contains optimizer of the control flow. The main entry point is
24 cleanup_cfg. Following optimizations are performed:
26 - Unreachable blocks removal
27 - Edge forwarding (edge to the forwarder block is forwarded to its
28 successor. Simplification of the branch instruction is performed by
29 underlying infrastructure so branch can be converted to simplejump or
31 - Cross jumping (tail merging)
32 - Conditional jump-around-simplejump simplification
33 - Basic block merging. */
37 #include "coretypes.h"
40 #include "hard-reg-set.h"
44 #include "insn-config.h"
52 #include "cfglayout.h"
54 #include "tree-pass.h"
60 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
62 /* Set to true when we are running first pass of try_optimize_cfg loop. */
63 static bool first_pass
;
64 static bool try_crossjump_to_edge (int, edge
, edge
);
65 static bool try_crossjump_bb (int, basic_block
);
66 static bool outgoing_edges_match (int, basic_block
, basic_block
);
67 static int flow_find_cross_jump (int, basic_block
, basic_block
, rtx
*, rtx
*);
68 static bool old_insns_match_p (int, rtx
, rtx
);
70 static void merge_blocks_move_predecessor_nojumps (basic_block
, basic_block
);
71 static void merge_blocks_move_successor_nojumps (basic_block
, basic_block
);
72 static bool try_optimize_cfg (int);
73 static bool try_simplify_condjump (basic_block
);
74 static bool try_forward_edges (int, basic_block
);
75 static edge
thread_jump (edge
, basic_block
);
76 static bool mark_effect (rtx
, bitmap
);
77 static void notice_new_block (basic_block
);
78 static void update_forwarder_flag (basic_block
);
79 static int mentions_nonequal_regs (rtx
*, void *);
80 static void merge_memattrs (rtx
, rtx
);
82 /* Set flags for newly created block. */
85 notice_new_block (basic_block bb
)
90 if (forwarder_block_p (bb
))
91 bb
->flags
|= BB_FORWARDER_BLOCK
;
94 /* Recompute forwarder flag after block has been modified. */
97 update_forwarder_flag (basic_block bb
)
99 if (forwarder_block_p (bb
))
100 bb
->flags
|= BB_FORWARDER_BLOCK
;
102 bb
->flags
&= ~BB_FORWARDER_BLOCK
;
105 /* Simplify a conditional jump around an unconditional jump.
106 Return true if something changed. */
109 try_simplify_condjump (basic_block cbranch_block
)
111 basic_block jump_block
, jump_dest_block
, cbranch_dest_block
;
112 edge cbranch_jump_edge
, cbranch_fallthru_edge
;
115 /* Verify that there are exactly two successors. */
116 if (EDGE_COUNT (cbranch_block
->succs
) != 2)
119 /* Verify that we've got a normal conditional branch at the end
121 cbranch_insn
= BB_END (cbranch_block
);
122 if (!any_condjump_p (cbranch_insn
))
125 cbranch_fallthru_edge
= FALLTHRU_EDGE (cbranch_block
);
126 cbranch_jump_edge
= BRANCH_EDGE (cbranch_block
);
128 /* The next block must not have multiple predecessors, must not
129 be the last block in the function, and must contain just the
130 unconditional jump. */
131 jump_block
= cbranch_fallthru_edge
->dest
;
132 if (!single_pred_p (jump_block
)
133 || jump_block
->next_bb
== EXIT_BLOCK_PTR
134 || !FORWARDER_BLOCK_P (jump_block
))
136 jump_dest_block
= single_succ (jump_block
);
138 /* If we are partitioning hot/cold basic blocks, we don't want to
139 mess up unconditional or indirect jumps that cross between hot
142 Basic block partitioning may result in some jumps that appear to
143 be optimizable (or blocks that appear to be mergeable), but which really
144 must be left untouched (they are required to make it safely across
145 partition boundaries). See the comments at the top of
146 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
148 if (BB_PARTITION (jump_block
) != BB_PARTITION (jump_dest_block
)
149 || (cbranch_jump_edge
->flags
& EDGE_CROSSING
))
152 /* The conditional branch must target the block after the
153 unconditional branch. */
154 cbranch_dest_block
= cbranch_jump_edge
->dest
;
156 if (cbranch_dest_block
== EXIT_BLOCK_PTR
157 || !can_fallthru (jump_block
, cbranch_dest_block
))
160 /* Invert the conditional branch. */
161 if (!invert_jump (cbranch_insn
, block_label (jump_dest_block
), 0))
165 fprintf (dump_file
, "Simplifying condjump %i around jump %i\n",
166 INSN_UID (cbranch_insn
), INSN_UID (BB_END (jump_block
)));
168 /* Success. Update the CFG to match. Note that after this point
169 the edge variable names appear backwards; the redirection is done
170 this way to preserve edge profile data. */
171 cbranch_jump_edge
= redirect_edge_succ_nodup (cbranch_jump_edge
,
173 cbranch_fallthru_edge
= redirect_edge_succ_nodup (cbranch_fallthru_edge
,
175 cbranch_jump_edge
->flags
|= EDGE_FALLTHRU
;
176 cbranch_fallthru_edge
->flags
&= ~EDGE_FALLTHRU
;
177 update_br_prob_note (cbranch_block
);
179 /* Delete the block with the unconditional jump, and clean up the mess. */
180 delete_basic_block (jump_block
);
181 tidy_fallthru_edge (cbranch_jump_edge
);
182 update_forwarder_flag (cbranch_block
);
187 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
188 on register. Used by jump threading. */
191 mark_effect (rtx exp
, regset nonequal
)
195 switch (GET_CODE (exp
))
197 /* In case we do clobber the register, mark it as equal, as we know the
198 value is dead so it don't have to match. */
200 if (REG_P (XEXP (exp
, 0)))
202 dest
= XEXP (exp
, 0);
203 regno
= REGNO (dest
);
204 CLEAR_REGNO_REG_SET (nonequal
, regno
);
205 if (regno
< FIRST_PSEUDO_REGISTER
)
207 int n
= hard_regno_nregs
[regno
][GET_MODE (dest
)];
209 CLEAR_REGNO_REG_SET (nonequal
, regno
+ n
);
215 if (rtx_equal_for_cselib_p (SET_DEST (exp
), SET_SRC (exp
)))
217 dest
= SET_DEST (exp
);
222 regno
= REGNO (dest
);
223 SET_REGNO_REG_SET (nonequal
, regno
);
224 if (regno
< FIRST_PSEUDO_REGISTER
)
226 int n
= hard_regno_nregs
[regno
][GET_MODE (dest
)];
228 SET_REGNO_REG_SET (nonequal
, regno
+ n
);
237 /* Return nonzero if X is a register set in regset DATA.
238 Called via for_each_rtx. */
240 mentions_nonequal_regs (rtx
*x
, void *data
)
242 regset nonequal
= (regset
) data
;
248 if (REGNO_REG_SET_P (nonequal
, regno
))
250 if (regno
< FIRST_PSEUDO_REGISTER
)
252 int n
= hard_regno_nregs
[regno
][GET_MODE (*x
)];
254 if (REGNO_REG_SET_P (nonequal
, regno
+ n
))
260 /* Attempt to prove that the basic block B will have no side effects and
261 always continues in the same edge if reached via E. Return the edge
262 if exist, NULL otherwise. */
265 thread_jump (edge e
, basic_block b
)
267 rtx set1
, set2
, cond1
, cond2
, insn
;
268 enum rtx_code code1
, code2
, reversed_code2
;
269 bool reverse1
= false;
273 reg_set_iterator rsi
;
275 if (b
->flags
& BB_NONTHREADABLE_BLOCK
)
278 /* At the moment, we do handle only conditional jumps, but later we may
279 want to extend this code to tablejumps and others. */
280 if (EDGE_COUNT (e
->src
->succs
) != 2)
282 if (EDGE_COUNT (b
->succs
) != 2)
284 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
288 /* Second branch must end with onlyjump, as we will eliminate the jump. */
289 if (!any_condjump_p (BB_END (e
->src
)))
292 if (!any_condjump_p (BB_END (b
)) || !onlyjump_p (BB_END (b
)))
294 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
298 set1
= pc_set (BB_END (e
->src
));
299 set2
= pc_set (BB_END (b
));
300 if (((e
->flags
& EDGE_FALLTHRU
) != 0)
301 != (XEXP (SET_SRC (set1
), 1) == pc_rtx
))
304 cond1
= XEXP (SET_SRC (set1
), 0);
305 cond2
= XEXP (SET_SRC (set2
), 0);
307 code1
= reversed_comparison_code (cond1
, BB_END (e
->src
));
309 code1
= GET_CODE (cond1
);
311 code2
= GET_CODE (cond2
);
312 reversed_code2
= reversed_comparison_code (cond2
, BB_END (b
));
314 if (!comparison_dominates_p (code1
, code2
)
315 && !comparison_dominates_p (code1
, reversed_code2
))
318 /* Ensure that the comparison operators are equivalent.
319 ??? This is far too pessimistic. We should allow swapped operands,
320 different CCmodes, or for example comparisons for interval, that
321 dominate even when operands are not equivalent. */
322 if (!rtx_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
323 || !rtx_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
326 /* Short circuit cases where block B contains some side effects, as we can't
328 for (insn
= NEXT_INSN (BB_HEAD (b
)); insn
!= NEXT_INSN (BB_END (b
));
329 insn
= NEXT_INSN (insn
))
330 if (INSN_P (insn
) && side_effects_p (PATTERN (insn
)))
332 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
338 /* First process all values computed in the source basic block. */
339 for (insn
= NEXT_INSN (BB_HEAD (e
->src
));
340 insn
!= NEXT_INSN (BB_END (e
->src
));
341 insn
= NEXT_INSN (insn
))
343 cselib_process_insn (insn
);
345 nonequal
= BITMAP_ALLOC (NULL
);
346 CLEAR_REG_SET (nonequal
);
348 /* Now assume that we've continued by the edge E to B and continue
349 processing as if it were same basic block.
350 Our goal is to prove that whole block is an NOOP. */
352 for (insn
= NEXT_INSN (BB_HEAD (b
));
353 insn
!= NEXT_INSN (BB_END (b
)) && !failed
;
354 insn
= NEXT_INSN (insn
))
358 rtx pat
= PATTERN (insn
);
360 if (GET_CODE (pat
) == PARALLEL
)
362 for (i
= 0; i
< (unsigned)XVECLEN (pat
, 0); i
++)
363 failed
|= mark_effect (XVECEXP (pat
, 0, i
), nonequal
);
366 failed
|= mark_effect (pat
, nonequal
);
369 cselib_process_insn (insn
);
372 /* Later we should clear nonequal of dead registers. So far we don't
373 have life information in cfg_cleanup. */
376 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
380 /* cond2 must not mention any register that is not equal to the
382 if (for_each_rtx (&cond2
, mentions_nonequal_regs
, nonequal
))
385 EXECUTE_IF_SET_IN_REG_SET (nonequal
, 0, i
, rsi
)
388 BITMAP_FREE (nonequal
);
390 if ((comparison_dominates_p (code1
, code2
) != 0)
391 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
392 return BRANCH_EDGE (b
);
394 return FALLTHRU_EDGE (b
);
397 BITMAP_FREE (nonequal
);
402 /* Attempt to forward edges leaving basic block B.
403 Return true if successful. */
406 try_forward_edges (int mode
, basic_block b
)
408 bool changed
= false;
410 edge e
, *threaded_edges
= NULL
;
412 /* If we are partitioning hot/cold basic blocks, we don't want to
413 mess up unconditional or indirect jumps that cross between hot
416 Basic block partitioning may result in some jumps that appear to
417 be optimizable (or blocks that appear to be mergeable), but which really m
418 ust be left untouched (they are required to make it safely across
419 partition boundaries). See the comments at the top of
420 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
422 if (find_reg_note (BB_END (b
), REG_CROSSING_JUMP
, NULL_RTX
))
425 for (ei
= ei_start (b
->succs
); (e
= ei_safe_edge (ei
)); )
427 basic_block target
, first
;
429 bool threaded
= false;
430 int nthreaded_edges
= 0;
431 bool may_thread
= first_pass
| df_get_bb_dirty (b
);
433 /* Skip complex edges because we don't know how to update them.
435 Still handle fallthru edges, as we can succeed to forward fallthru
436 edge to the same place as the branch edge of conditional branch
437 and turn conditional branch to an unconditional branch. */
438 if (e
->flags
& EDGE_COMPLEX
)
444 target
= first
= e
->dest
;
445 counter
= NUM_FIXED_BLOCKS
;
447 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
448 up jumps that cross between hot/cold sections.
450 Basic block partitioning may result in some jumps that appear
451 to be optimizable (or blocks that appear to be mergeable), but which
452 really must be left untouched (they are required to make it safely
453 across partition boundaries). See the comments at the top of
454 bb-reorder.c:partition_hot_cold_basic_blocks for complete
457 if (first
!= EXIT_BLOCK_PTR
458 && find_reg_note (BB_END (first
), REG_CROSSING_JUMP
, NULL_RTX
))
461 while (counter
< n_basic_blocks
)
463 basic_block new_target
= NULL
;
464 bool new_target_threaded
= false;
465 may_thread
|= df_get_bb_dirty (target
);
467 if (FORWARDER_BLOCK_P (target
)
468 && !(single_succ_edge (target
)->flags
& EDGE_CROSSING
)
469 && single_succ (target
) != EXIT_BLOCK_PTR
)
471 /* Bypass trivial infinite loops. */
472 new_target
= single_succ (target
);
473 if (target
== new_target
)
474 counter
= n_basic_blocks
;
477 /* Allow to thread only over one edge at time to simplify updating
479 else if ((mode
& CLEANUP_THREADING
) && may_thread
)
481 edge t
= thread_jump (e
, target
);
485 threaded_edges
= XNEWVEC (edge
, n_basic_blocks
);
490 /* Detect an infinite loop across blocks not
491 including the start block. */
492 for (i
= 0; i
< nthreaded_edges
; ++i
)
493 if (threaded_edges
[i
] == t
)
495 if (i
< nthreaded_edges
)
497 counter
= n_basic_blocks
;
502 /* Detect an infinite loop across the start block. */
506 gcc_assert (nthreaded_edges
< n_basic_blocks
- NUM_FIXED_BLOCKS
);
507 threaded_edges
[nthreaded_edges
++] = t
;
509 new_target
= t
->dest
;
510 new_target_threaded
= true;
519 threaded
|= new_target_threaded
;
522 if (counter
>= n_basic_blocks
)
525 fprintf (dump_file
, "Infinite loop in BB %i.\n",
528 else if (target
== first
)
529 ; /* We didn't do anything. */
532 /* Save the values now, as the edge may get removed. */
533 gcov_type edge_count
= e
->count
;
534 int edge_probability
= e
->probability
;
538 /* Don't force if target is exit block. */
539 if (threaded
&& target
!= EXIT_BLOCK_PTR
)
541 notice_new_block (redirect_edge_and_branch_force (e
, target
));
543 fprintf (dump_file
, "Conditionals threaded.\n");
545 else if (!redirect_edge_and_branch (e
, target
))
549 "Forwarding edge %i->%i to %i failed.\n",
550 b
->index
, e
->dest
->index
, target
->index
);
555 /* We successfully forwarded the edge. Now update profile
556 data: for each edge we traversed in the chain, remove
557 the original edge's execution count. */
558 edge_frequency
= ((edge_probability
* b
->frequency
559 + REG_BR_PROB_BASE
/ 2)
562 if (!FORWARDER_BLOCK_P (b
) && forwarder_block_p (b
))
563 b
->flags
|= BB_FORWARDER_BLOCK
;
569 if (!single_succ_p (first
))
571 gcc_assert (n
< nthreaded_edges
);
572 t
= threaded_edges
[n
++];
573 gcc_assert (t
->src
== first
);
574 update_bb_profile_for_threading (first
, edge_frequency
,
576 update_br_prob_note (first
);
580 first
->count
-= edge_count
;
581 if (first
->count
< 0)
583 first
->frequency
-= edge_frequency
;
584 if (first
->frequency
< 0)
585 first
->frequency
= 0;
586 /* It is possible that as the result of
587 threading we've removed edge as it is
588 threaded to the fallthru edge. Avoid
589 getting out of sync. */
590 if (n
< nthreaded_edges
591 && first
== threaded_edges
[n
]->src
)
593 t
= single_succ_edge (first
);
596 t
->count
-= edge_count
;
601 while (first
!= target
);
610 free (threaded_edges
);
615 /* Blocks A and B are to be merged into a single block. A has no incoming
616 fallthru edge, so it can be moved before B without adding or modifying
617 any jumps (aside from the jump from A to B). */
620 merge_blocks_move_predecessor_nojumps (basic_block a
, basic_block b
)
624 /* If we are partitioning hot/cold basic blocks, we don't want to
625 mess up unconditional or indirect jumps that cross between hot
628 Basic block partitioning may result in some jumps that appear to
629 be optimizable (or blocks that appear to be mergeable), but which really
630 must be left untouched (they are required to make it safely across
631 partition boundaries). See the comments at the top of
632 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
634 if (BB_PARTITION (a
) != BB_PARTITION (b
))
637 barrier
= next_nonnote_insn (BB_END (a
));
638 gcc_assert (BARRIER_P (barrier
));
639 delete_insn (barrier
);
641 /* Scramble the insn chain. */
642 if (BB_END (a
) != PREV_INSN (BB_HEAD (b
)))
643 reorder_insns_nobb (BB_HEAD (a
), BB_END (a
), PREV_INSN (BB_HEAD (b
)));
647 fprintf (dump_file
, "Moved block %d before %d and merged.\n",
650 /* Swap the records for the two blocks around. */
653 link_block (a
, b
->prev_bb
);
655 /* Now blocks A and B are contiguous. Merge them. */
659 /* Blocks A and B are to be merged into a single block. B has no outgoing
660 fallthru edge, so it can be moved after A without adding or modifying
661 any jumps (aside from the jump from A to B). */
664 merge_blocks_move_successor_nojumps (basic_block a
, basic_block b
)
666 rtx barrier
, real_b_end
;
669 /* If we are partitioning hot/cold basic blocks, we don't want to
670 mess up unconditional or indirect jumps that cross between hot
673 Basic block partitioning may result in some jumps that appear to
674 be optimizable (or blocks that appear to be mergeable), but which really
675 must be left untouched (they are required to make it safely across
676 partition boundaries). See the comments at the top of
677 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
679 if (BB_PARTITION (a
) != BB_PARTITION (b
))
682 real_b_end
= BB_END (b
);
684 /* If there is a jump table following block B temporarily add the jump table
685 to block B so that it will also be moved to the correct location. */
686 if (tablejump_p (BB_END (b
), &label
, &table
)
687 && prev_active_insn (label
) == BB_END (b
))
692 /* There had better have been a barrier there. Delete it. */
693 barrier
= NEXT_INSN (BB_END (b
));
694 if (barrier
&& BARRIER_P (barrier
))
695 delete_insn (barrier
);
698 /* Scramble the insn chain. */
699 reorder_insns_nobb (BB_HEAD (b
), BB_END (b
), BB_END (a
));
701 /* Restore the real end of b. */
702 BB_END (b
) = real_b_end
;
705 fprintf (dump_file
, "Moved block %d after %d and merged.\n",
708 /* Now blocks A and B are contiguous. Merge them. */
712 /* Attempt to merge basic blocks that are potentially non-adjacent.
713 Return NULL iff the attempt failed, otherwise return basic block
714 where cleanup_cfg should continue. Because the merging commonly
715 moves basic block away or introduces another optimization
716 possibility, return basic block just before B so cleanup_cfg don't
719 It may be good idea to return basic block before C in the case
720 C has been moved after B and originally appeared earlier in the
721 insn sequence, but we have no information available about the
722 relative ordering of these two. Hopefully it is not too common. */
725 merge_blocks_move (edge e
, basic_block b
, basic_block c
, int mode
)
729 /* If we are partitioning hot/cold basic blocks, we don't want to
730 mess up unconditional or indirect jumps that cross between hot
733 Basic block partitioning may result in some jumps that appear to
734 be optimizable (or blocks that appear to be mergeable), but which really
735 must be left untouched (they are required to make it safely across
736 partition boundaries). See the comments at the top of
737 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
739 if (BB_PARTITION (b
) != BB_PARTITION (c
))
742 /* If B has a fallthru edge to C, no need to move anything. */
743 if (e
->flags
& EDGE_FALLTHRU
)
745 int b_index
= b
->index
, c_index
= c
->index
;
747 update_forwarder_flag (b
);
750 fprintf (dump_file
, "Merged %d and %d without moving.\n",
753 return b
->prev_bb
== ENTRY_BLOCK_PTR
? b
: b
->prev_bb
;
756 /* Otherwise we will need to move code around. Do that only if expensive
757 transformations are allowed. */
758 else if (mode
& CLEANUP_EXPENSIVE
)
760 edge tmp_edge
, b_fallthru_edge
;
761 bool c_has_outgoing_fallthru
;
762 bool b_has_incoming_fallthru
;
765 /* Avoid overactive code motion, as the forwarder blocks should be
766 eliminated by edge redirection instead. One exception might have
767 been if B is a forwarder block and C has no fallthru edge, but
768 that should be cleaned up by bb-reorder instead. */
769 if (FORWARDER_BLOCK_P (b
) || FORWARDER_BLOCK_P (c
))
772 /* We must make sure to not munge nesting of lexical blocks,
773 and loop notes. This is done by squeezing out all the notes
774 and leaving them there to lie. Not ideal, but functional. */
776 FOR_EACH_EDGE (tmp_edge
, ei
, c
->succs
)
777 if (tmp_edge
->flags
& EDGE_FALLTHRU
)
780 c_has_outgoing_fallthru
= (tmp_edge
!= NULL
);
782 FOR_EACH_EDGE (tmp_edge
, ei
, b
->preds
)
783 if (tmp_edge
->flags
& EDGE_FALLTHRU
)
786 b_has_incoming_fallthru
= (tmp_edge
!= NULL
);
787 b_fallthru_edge
= tmp_edge
;
790 next
= next
->prev_bb
;
792 /* Otherwise, we're going to try to move C after B. If C does
793 not have an outgoing fallthru, then it can be moved
794 immediately after B without introducing or modifying jumps. */
795 if (! c_has_outgoing_fallthru
)
797 merge_blocks_move_successor_nojumps (b
, c
);
798 return next
== ENTRY_BLOCK_PTR
? next
->next_bb
: next
;
801 /* If B does not have an incoming fallthru, then it can be moved
802 immediately before C without introducing or modifying jumps.
803 C cannot be the first block, so we do not have to worry about
804 accessing a non-existent block. */
806 if (b_has_incoming_fallthru
)
810 if (b_fallthru_edge
->src
== ENTRY_BLOCK_PTR
)
812 bb
= force_nonfallthru (b_fallthru_edge
);
814 notice_new_block (bb
);
817 merge_blocks_move_predecessor_nojumps (b
, c
);
818 return next
== ENTRY_BLOCK_PTR
? next
->next_bb
: next
;
825 /* Removes the memory attributes of MEM expression
826 if they are not equal. */
829 merge_memattrs (rtx x
, rtx y
)
838 if (x
== 0 || y
== 0)
843 if (code
!= GET_CODE (y
))
846 if (GET_MODE (x
) != GET_MODE (y
))
849 if (code
== MEM
&& MEM_ATTRS (x
) != MEM_ATTRS (y
))
853 else if (! MEM_ATTRS (y
))
859 if (MEM_ALIAS_SET (x
) != MEM_ALIAS_SET (y
))
861 set_mem_alias_set (x
, 0);
862 set_mem_alias_set (y
, 0);
865 if (! mem_expr_equal_p (MEM_EXPR (x
), MEM_EXPR (y
)))
869 set_mem_offset (x
, 0);
870 set_mem_offset (y
, 0);
872 else if (MEM_OFFSET (x
) != MEM_OFFSET (y
))
874 set_mem_offset (x
, 0);
875 set_mem_offset (y
, 0);
880 else if (!MEM_SIZE (y
))
883 mem_size
= GEN_INT (MAX (INTVAL (MEM_SIZE (x
)),
884 INTVAL (MEM_SIZE (y
))));
885 set_mem_size (x
, mem_size
);
886 set_mem_size (y
, mem_size
);
888 set_mem_align (x
, MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)));
889 set_mem_align (y
, MEM_ALIGN (x
));
893 fmt
= GET_RTX_FORMAT (code
);
894 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
899 /* Two vectors must have the same length. */
900 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
903 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
904 merge_memattrs (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
));
909 merge_memattrs (XEXP (x
, i
), XEXP (y
, i
));
916 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
919 old_insns_match_p (int mode ATTRIBUTE_UNUSED
, rtx i1
, rtx i2
)
923 /* Verify that I1 and I2 are equivalent. */
924 if (GET_CODE (i1
) != GET_CODE (i2
))
930 if (GET_CODE (p1
) != GET_CODE (p2
))
933 /* If this is a CALL_INSN, compare register usage information.
934 If we don't check this on stack register machines, the two
935 CALL_INSNs might be merged leaving reg-stack.c with mismatching
936 numbers of stack registers in the same basic block.
937 If we don't check this on machines with delay slots, a delay slot may
938 be filled that clobbers a parameter expected by the subroutine.
940 ??? We take the simple route for now and assume that if they're
941 equal, they were constructed identically. */
944 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1
),
945 CALL_INSN_FUNCTION_USAGE (i2
))
946 || SIBLING_CALL_P (i1
) != SIBLING_CALL_P (i2
)))
950 /* If cross_jump_death_matters is not 0, the insn's mode
951 indicates whether or not the insn contains any stack-like
954 if ((mode
& CLEANUP_POST_REGSTACK
) && stack_regs_mentioned (i1
))
956 /* If register stack conversion has already been done, then
957 death notes must also be compared before it is certain that
958 the two instruction streams match. */
961 HARD_REG_SET i1_regset
, i2_regset
;
963 CLEAR_HARD_REG_SET (i1_regset
);
964 CLEAR_HARD_REG_SET (i2_regset
);
966 for (note
= REG_NOTES (i1
); note
; note
= XEXP (note
, 1))
967 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
968 SET_HARD_REG_BIT (i1_regset
, REGNO (XEXP (note
, 0)));
970 for (note
= REG_NOTES (i2
); note
; note
= XEXP (note
, 1))
971 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
972 SET_HARD_REG_BIT (i2_regset
, REGNO (XEXP (note
, 0)));
974 if (!hard_reg_set_equal_p (i1_regset
, i2_regset
))
980 ? rtx_renumbered_equal_p (p1
, p2
) : rtx_equal_p (p1
, p2
))
983 /* Do not do EQUIV substitution after reload. First, we're undoing the
984 work of reload_cse. Second, we may be undoing the work of the post-
985 reload splitting pass. */
986 /* ??? Possibly add a new phase switch variable that can be used by
987 targets to disallow the troublesome insns after splitting. */
988 if (!reload_completed
)
990 /* The following code helps take care of G++ cleanups. */
991 rtx equiv1
= find_reg_equal_equiv_note (i1
);
992 rtx equiv2
= find_reg_equal_equiv_note (i2
);
995 /* If the equivalences are not to a constant, they may
996 reference pseudos that no longer exist, so we can't
998 && (! reload_completed
999 || (CONSTANT_P (XEXP (equiv1
, 0))
1000 && rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))))
1002 rtx s1
= single_set (i1
);
1003 rtx s2
= single_set (i2
);
1004 if (s1
!= 0 && s2
!= 0
1005 && rtx_renumbered_equal_p (SET_DEST (s1
), SET_DEST (s2
)))
1007 validate_change (i1
, &SET_SRC (s1
), XEXP (equiv1
, 0), 1);
1008 validate_change (i2
, &SET_SRC (s2
), XEXP (equiv2
, 0), 1);
1009 if (! rtx_renumbered_equal_p (p1
, p2
))
1011 else if (apply_change_group ())
1020 /* Look through the insns at the end of BB1 and BB2 and find the longest
1021 sequence that are equivalent. Store the first insns for that sequence
1022 in *F1 and *F2 and return the sequence length.
1024 To simplify callers of this function, if the blocks match exactly,
1025 store the head of the blocks in *F1 and *F2. */
1028 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED
, basic_block bb1
,
1029 basic_block bb2
, rtx
*f1
, rtx
*f2
)
1031 rtx i1
, i2
, last1
, last2
, afterlast1
, afterlast2
;
1034 /* Skip simple jumps at the end of the blocks. Complex jumps still
1035 need to be compared for equivalence, which we'll do below. */
1038 last1
= afterlast1
= last2
= afterlast2
= NULL_RTX
;
1040 || (returnjump_p (i1
) && !side_effects_p (PATTERN (i1
))))
1043 i1
= PREV_INSN (i1
);
1048 || (returnjump_p (i2
) && !side_effects_p (PATTERN (i2
))))
1051 /* Count everything except for unconditional jump as insn. */
1052 if (!simplejump_p (i2
) && !returnjump_p (i2
) && last1
)
1054 i2
= PREV_INSN (i2
);
1060 while (!INSN_P (i1
) && i1
!= BB_HEAD (bb1
))
1061 i1
= PREV_INSN (i1
);
1063 while (!INSN_P (i2
) && i2
!= BB_HEAD (bb2
))
1064 i2
= PREV_INSN (i2
);
1066 if (i1
== BB_HEAD (bb1
) || i2
== BB_HEAD (bb2
))
1069 if (!old_insns_match_p (mode
, i1
, i2
))
1072 merge_memattrs (i1
, i2
);
1074 /* Don't begin a cross-jump with a NOTE insn. */
1077 /* If the merged insns have different REG_EQUAL notes, then
1079 rtx equiv1
= find_reg_equal_equiv_note (i1
);
1080 rtx equiv2
= find_reg_equal_equiv_note (i2
);
1082 if (equiv1
&& !equiv2
)
1083 remove_note (i1
, equiv1
);
1084 else if (!equiv1
&& equiv2
)
1085 remove_note (i2
, equiv2
);
1086 else if (equiv1
&& equiv2
1087 && !rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))
1089 remove_note (i1
, equiv1
);
1090 remove_note (i2
, equiv2
);
1093 afterlast1
= last1
, afterlast2
= last2
;
1094 last1
= i1
, last2
= i2
;
1098 i1
= PREV_INSN (i1
);
1099 i2
= PREV_INSN (i2
);
1103 /* Don't allow the insn after a compare to be shared by
1104 cross-jumping unless the compare is also shared. */
1105 if (ninsns
&& reg_mentioned_p (cc0_rtx
, last1
) && ! sets_cc0_p (last1
))
1106 last1
= afterlast1
, last2
= afterlast2
, ninsns
--;
1109 /* Include preceding notes and labels in the cross-jump. One,
1110 this may bring us to the head of the blocks as requested above.
1111 Two, it keeps line number notes as matched as may be. */
1114 while (last1
!= BB_HEAD (bb1
) && !INSN_P (PREV_INSN (last1
)))
1115 last1
= PREV_INSN (last1
);
1117 if (last1
!= BB_HEAD (bb1
) && LABEL_P (PREV_INSN (last1
)))
1118 last1
= PREV_INSN (last1
);
1120 while (last2
!= BB_HEAD (bb2
) && !INSN_P (PREV_INSN (last2
)))
1121 last2
= PREV_INSN (last2
);
1123 if (last2
!= BB_HEAD (bb2
) && LABEL_P (PREV_INSN (last2
)))
1124 last2
= PREV_INSN (last2
);
1133 /* Return true iff the condbranches at the end of BB1 and BB2 match. */
1135 condjump_equiv_p (struct equiv_info
*info
, bool call_init
)
1137 basic_block bb1
= info
->x_block
;
1138 basic_block bb2
= info
->y_block
;
1139 edge b1
= BRANCH_EDGE (bb1
);
1140 edge b2
= BRANCH_EDGE (bb2
);
1141 edge f1
= FALLTHRU_EDGE (bb1
);
1142 edge f2
= FALLTHRU_EDGE (bb2
);
1143 bool reverse
, match
;
1144 rtx set1
, set2
, cond1
, cond2
;
1146 enum rtx_code code1
, code2
;
1148 /* Get around possible forwarders on fallthru edges. Other cases
1149 should be optimized out already. */
1150 if (FORWARDER_BLOCK_P (f1
->dest
))
1151 f1
= single_succ_edge (f1
->dest
);
1153 if (FORWARDER_BLOCK_P (f2
->dest
))
1154 f2
= single_succ_edge (f2
->dest
);
1156 /* To simplify use of this function, return false if there are
1157 unneeded forwarder blocks. These will get eliminated later
1158 during cleanup_cfg. */
1159 if (FORWARDER_BLOCK_P (f1
->dest
)
1160 || FORWARDER_BLOCK_P (f2
->dest
)
1161 || FORWARDER_BLOCK_P (b1
->dest
)
1162 || FORWARDER_BLOCK_P (b2
->dest
))
1165 if (f1
->dest
== f2
->dest
&& b1
->dest
== b2
->dest
)
1167 else if (f1
->dest
== b2
->dest
&& b1
->dest
== f2
->dest
)
1172 set1
= pc_set (BB_END (bb1
));
1173 set2
= pc_set (BB_END (bb2
));
1174 if ((XEXP (SET_SRC (set1
), 1) == pc_rtx
)
1175 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
1178 src1
= SET_SRC (set1
);
1179 src2
= SET_SRC (set2
);
1180 cond1
= XEXP (src1
, 0);
1181 cond2
= XEXP (src2
, 0);
1182 code1
= GET_CODE (cond1
);
1184 code2
= reversed_comparison_code (cond2
, BB_END (bb2
));
1186 code2
= GET_CODE (cond2
);
1188 if (code2
== UNKNOWN
)
1191 if (call_init
&& !struct_equiv_init (STRUCT_EQUIV_START
| info
->mode
, info
))
1193 /* Make the sources of the pc sets unreadable so that when we call
1194 insns_match_p it won't process them.
1195 The death_notes_match_p from insns_match_p won't see the local registers
1196 used for the pc set, but that could only cause missed optimizations when
1197 there are actually condjumps that use stack registers. */
1198 SET_SRC (set1
) = pc_rtx
;
1199 SET_SRC (set2
) = pc_rtx
;
1200 /* Verify codes and operands match. */
1203 match
= (insns_match_p (BB_END (bb1
), BB_END (bb2
), info
)
1204 && rtx_equiv_p (&XEXP (cond1
, 0), XEXP (cond2
, 0), 1, info
)
1205 && rtx_equiv_p (&XEXP (cond1
, 1), XEXP (cond2
, 1), 1, info
));
1208 else if (code1
== swap_condition (code2
))
1210 match
= (insns_match_p (BB_END (bb1
), BB_END (bb2
), info
)
1211 && rtx_equiv_p (&XEXP (cond1
, 1), XEXP (cond2
, 0), 1, info
)
1212 && rtx_equiv_p (&XEXP (cond1
, 0), XEXP (cond2
, 1), 1, info
));
1217 SET_SRC (set1
) = src1
;
1218 SET_SRC (set2
) = src2
;
1219 match
&= verify_changes (0);
1221 /* If we return true, we will join the blocks. Which means that
1222 we will only have one branch prediction bit to work with. Thus
1223 we require the existing branches to have probabilities that are
1227 && maybe_hot_bb_p (bb1
)
1228 && maybe_hot_bb_p (bb2
))
1232 if (b1
->dest
== b2
->dest
)
1233 prob2
= b2
->probability
;
1235 /* Do not use f2 probability as f2 may be forwarded. */
1236 prob2
= REG_BR_PROB_BASE
- b2
->probability
;
1238 /* Fail if the difference in probabilities is greater than 50%.
1239 This rules out two well-predicted branches with opposite
1241 if (abs (b1
->probability
- prob2
) > REG_BR_PROB_BASE
/ 2)
1245 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1246 bb1
->index
, bb2
->index
, b1
->probability
, prob2
);
1252 if (dump_file
&& match
)
1253 fprintf (dump_file
, "Conditionals in bb %i and %i match.\n",
1254 bb1
->index
, bb2
->index
);
1261 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1262 the branch instruction. This means that if we commonize the control
1263 flow before end of the basic block, the semantic remains unchanged.
1265 We may assume that there exists one edge with a common destination. */
1268 outgoing_edges_match (int mode
, basic_block bb1
, basic_block bb2
)
1270 int nehedges1
= 0, nehedges2
= 0;
1271 edge fallthru1
= 0, fallthru2
= 0;
1275 /* If BB1 has only one successor, we may be looking at either an
1276 unconditional jump, or a fake edge to exit. */
1277 if (single_succ_p (bb1
)
1278 && (single_succ_edge (bb1
)->flags
& (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1279 && (!JUMP_P (BB_END (bb1
)) || simplejump_p (BB_END (bb1
))))
1280 return (single_succ_p (bb2
)
1281 && (single_succ_edge (bb2
)->flags
1282 & (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1283 && (!JUMP_P (BB_END (bb2
)) || simplejump_p (BB_END (bb2
))));
1285 /* Match conditional jumps - this may get tricky when fallthru and branch
1286 edges are crossed. */
1287 if (EDGE_COUNT (bb1
->succs
) == 2
1288 && any_condjump_p (BB_END (bb1
))
1289 && onlyjump_p (BB_END (bb1
)))
1291 edge b1
, f1
, b2
, f2
;
1292 bool reverse
, match
;
1293 rtx set1
, set2
, cond1
, cond2
;
1294 enum rtx_code code1
, code2
;
1296 if (EDGE_COUNT (bb2
->succs
) != 2
1297 || !any_condjump_p (BB_END (bb2
))
1298 || !onlyjump_p (BB_END (bb2
)))
1301 b1
= BRANCH_EDGE (bb1
);
1302 b2
= BRANCH_EDGE (bb2
);
1303 f1
= FALLTHRU_EDGE (bb1
);
1304 f2
= FALLTHRU_EDGE (bb2
);
1306 /* Get around possible forwarders on fallthru edges. Other cases
1307 should be optimized out already. */
1308 if (FORWARDER_BLOCK_P (f1
->dest
))
1309 f1
= single_succ_edge (f1
->dest
);
1311 if (FORWARDER_BLOCK_P (f2
->dest
))
1312 f2
= single_succ_edge (f2
->dest
);
1314 /* To simplify use of this function, return false if there are
1315 unneeded forwarder blocks. These will get eliminated later
1316 during cleanup_cfg. */
1317 if (FORWARDER_BLOCK_P (f1
->dest
)
1318 || FORWARDER_BLOCK_P (f2
->dest
)
1319 || FORWARDER_BLOCK_P (b1
->dest
)
1320 || FORWARDER_BLOCK_P (b2
->dest
))
1323 if (f1
->dest
== f2
->dest
&& b1
->dest
== b2
->dest
)
1325 else if (f1
->dest
== b2
->dest
&& b1
->dest
== f2
->dest
)
1330 set1
= pc_set (BB_END (bb1
));
1331 set2
= pc_set (BB_END (bb2
));
1332 if ((XEXP (SET_SRC (set1
), 1) == pc_rtx
)
1333 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
1336 cond1
= XEXP (SET_SRC (set1
), 0);
1337 cond2
= XEXP (SET_SRC (set2
), 0);
1338 code1
= GET_CODE (cond1
);
1340 code2
= reversed_comparison_code (cond2
, BB_END (bb2
));
1342 code2
= GET_CODE (cond2
);
1344 if (code2
== UNKNOWN
)
1347 /* Verify codes and operands match. */
1348 match
= ((code1
== code2
1349 && rtx_renumbered_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
1350 && rtx_renumbered_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
1351 || (code1
== swap_condition (code2
)
1352 && rtx_renumbered_equal_p (XEXP (cond1
, 1),
1354 && rtx_renumbered_equal_p (XEXP (cond1
, 0),
1357 /* If we return true, we will join the blocks. Which means that
1358 we will only have one branch prediction bit to work with. Thus
1359 we require the existing branches to have probabilities that are
1363 && maybe_hot_bb_p (bb1
)
1364 && maybe_hot_bb_p (bb2
))
1368 if (b1
->dest
== b2
->dest
)
1369 prob2
= b2
->probability
;
1371 /* Do not use f2 probability as f2 may be forwarded. */
1372 prob2
= REG_BR_PROB_BASE
- b2
->probability
;
1374 /* Fail if the difference in probabilities is greater than 50%.
1375 This rules out two well-predicted branches with opposite
1377 if (abs (b1
->probability
- prob2
) > REG_BR_PROB_BASE
/ 2)
1381 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1382 bb1
->index
, bb2
->index
, b1
->probability
, prob2
);
1388 if (dump_file
&& match
)
1389 fprintf (dump_file
, "Conditionals in bb %i and %i match.\n",
1390 bb1
->index
, bb2
->index
);
1395 /* Generic case - we are seeing a computed jump, table jump or trapping
1398 /* Check whether there are tablejumps in the end of BB1 and BB2.
1399 Return true if they are identical. */
1404 if (tablejump_p (BB_END (bb1
), &label1
, &table1
)
1405 && tablejump_p (BB_END (bb2
), &label2
, &table2
)
1406 && GET_CODE (PATTERN (table1
)) == GET_CODE (PATTERN (table2
)))
1408 /* The labels should never be the same rtx. If they really are same
1409 the jump tables are same too. So disable crossjumping of blocks BB1
1410 and BB2 because when deleting the common insns in the end of BB1
1411 by delete_basic_block () the jump table would be deleted too. */
1412 /* If LABEL2 is referenced in BB1->END do not do anything
1413 because we would loose information when replacing
1414 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1415 if (label1
!= label2
&& !rtx_referenced_p (label2
, BB_END (bb1
)))
1417 /* Set IDENTICAL to true when the tables are identical. */
1418 bool identical
= false;
1421 p1
= PATTERN (table1
);
1422 p2
= PATTERN (table2
);
1423 if (GET_CODE (p1
) == ADDR_VEC
&& rtx_equal_p (p1
, p2
))
1427 else if (GET_CODE (p1
) == ADDR_DIFF_VEC
1428 && (XVECLEN (p1
, 1) == XVECLEN (p2
, 1))
1429 && rtx_equal_p (XEXP (p1
, 2), XEXP (p2
, 2))
1430 && rtx_equal_p (XEXP (p1
, 3), XEXP (p2
, 3)))
1435 for (i
= XVECLEN (p1
, 1) - 1; i
>= 0 && identical
; i
--)
1436 if (!rtx_equal_p (XVECEXP (p1
, 1, i
), XVECEXP (p2
, 1, i
)))
1442 replace_label_data rr
;
1445 /* Temporarily replace references to LABEL1 with LABEL2
1446 in BB1->END so that we could compare the instructions. */
1449 rr
.update_label_nuses
= false;
1450 for_each_rtx (&BB_END (bb1
), replace_label
, &rr
);
1452 match
= old_insns_match_p (mode
, BB_END (bb1
), BB_END (bb2
));
1453 if (dump_file
&& match
)
1455 "Tablejumps in bb %i and %i match.\n",
1456 bb1
->index
, bb2
->index
);
1458 /* Set the original label in BB1->END because when deleting
1459 a block whose end is a tablejump, the tablejump referenced
1460 from the instruction is deleted too. */
1463 for_each_rtx (&BB_END (bb1
), replace_label
, &rr
);
1472 /* First ensure that the instructions match. There may be many outgoing
1473 edges so this test is generally cheaper. */
1474 if (!old_insns_match_p (mode
, BB_END (bb1
), BB_END (bb2
)))
1477 /* Search the outgoing edges, ensure that the counts do match, find possible
1478 fallthru and exception handling edges since these needs more
1480 if (EDGE_COUNT (bb1
->succs
) != EDGE_COUNT (bb2
->succs
))
1483 FOR_EACH_EDGE (e1
, ei
, bb1
->succs
)
1485 e2
= EDGE_SUCC (bb2
, ei
.index
);
1487 if (e1
->flags
& EDGE_EH
)
1490 if (e2
->flags
& EDGE_EH
)
1493 if (e1
->flags
& EDGE_FALLTHRU
)
1495 if (e2
->flags
& EDGE_FALLTHRU
)
1499 /* If number of edges of various types does not match, fail. */
1500 if (nehedges1
!= nehedges2
1501 || (fallthru1
!= 0) != (fallthru2
!= 0))
1504 /* fallthru edges must be forwarded to the same destination. */
1507 basic_block d1
= (forwarder_block_p (fallthru1
->dest
)
1508 ? single_succ (fallthru1
->dest
): fallthru1
->dest
);
1509 basic_block d2
= (forwarder_block_p (fallthru2
->dest
)
1510 ? single_succ (fallthru2
->dest
): fallthru2
->dest
);
1516 /* Ensure the same EH region. */
1518 rtx n1
= find_reg_note (BB_END (bb1
), REG_EH_REGION
, 0);
1519 rtx n2
= find_reg_note (BB_END (bb2
), REG_EH_REGION
, 0);
1524 if (n1
&& (!n2
|| XEXP (n1
, 0) != XEXP (n2
, 0)))
1528 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1529 version of sequence abstraction. */
1530 FOR_EACH_EDGE (e1
, ei
, bb2
->succs
)
1534 basic_block d1
= e1
->dest
;
1536 if (FORWARDER_BLOCK_P (d1
))
1537 d1
= EDGE_SUCC (d1
, 0)->dest
;
1539 FOR_EACH_EDGE (e2
, ei
, bb1
->succs
)
1541 basic_block d2
= e2
->dest
;
1542 if (FORWARDER_BLOCK_P (d2
))
1543 d2
= EDGE_SUCC (d2
, 0)->dest
;
1555 /* Returns true if BB basic block has a preserve label. */
1558 block_has_preserve_label (basic_block bb
)
1562 && LABEL_PRESERVE_P (block_label (bb
)));
1565 /* E1 and E2 are edges with the same destination block. Search their
1566 predecessors for common code. If found, redirect control flow from
1567 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1570 try_crossjump_to_edge (int mode
, edge e1
, edge e2
)
1573 basic_block src1
= e1
->src
, src2
= e2
->src
;
1574 basic_block redirect_to
, redirect_from
, to_remove
;
1575 rtx newpos1
, newpos2
;
1579 newpos1
= newpos2
= NULL_RTX
;
1581 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1582 to try this optimization.
1584 Basic block partitioning may result in some jumps that appear to
1585 be optimizable (or blocks that appear to be mergeable), but which really
1586 must be left untouched (they are required to make it safely across
1587 partition boundaries). See the comments at the top of
1588 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1590 if (flag_reorder_blocks_and_partition
&& no_new_pseudos
)
1593 /* Search backward through forwarder blocks. We don't need to worry
1594 about multiple entry or chained forwarders, as they will be optimized
1595 away. We do this to look past the unconditional jump following a
1596 conditional jump that is required due to the current CFG shape. */
1597 if (single_pred_p (src1
)
1598 && FORWARDER_BLOCK_P (src1
))
1599 e1
= single_pred_edge (src1
), src1
= e1
->src
;
1601 if (single_pred_p (src2
)
1602 && FORWARDER_BLOCK_P (src2
))
1603 e2
= single_pred_edge (src2
), src2
= e2
->src
;
1605 /* Nothing to do if we reach ENTRY, or a common source block. */
1606 if (src1
== ENTRY_BLOCK_PTR
|| src2
== ENTRY_BLOCK_PTR
)
1611 /* Seeing more than 1 forwarder blocks would confuse us later... */
1612 if (FORWARDER_BLOCK_P (e1
->dest
)
1613 && FORWARDER_BLOCK_P (single_succ (e1
->dest
)))
1616 if (FORWARDER_BLOCK_P (e2
->dest
)
1617 && FORWARDER_BLOCK_P (single_succ (e2
->dest
)))
1620 /* Likewise with dead code (possibly newly created by the other optimizations
1622 if (EDGE_COUNT (src1
->preds
) == 0 || EDGE_COUNT (src2
->preds
) == 0)
1625 /* Look for the common insn sequence, part the first ... */
1626 if (!outgoing_edges_match (mode
, src1
, src2
))
1629 /* ... and part the second. */
1630 nmatch
= flow_find_cross_jump (mode
, src1
, src2
, &newpos1
, &newpos2
);
1632 /* Don't proceed with the crossjump unless we found a sufficient number
1633 of matching instructions or the 'from' block was totally matched
1634 (such that its predecessors will hopefully be redirected and the
1636 if ((nmatch
< PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS
))
1637 && (newpos1
!= BB_HEAD (src1
)))
1640 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
1641 if (block_has_preserve_label (e1
->dest
)
1642 && (e1
->flags
& EDGE_ABNORMAL
))
1645 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1647 If we have tablejumps in the end of SRC1 and SRC2
1648 they have been already compared for equivalence in outgoing_edges_match ()
1649 so replace the references to TABLE1 by references to TABLE2. */
1654 if (tablejump_p (BB_END (src1
), &label1
, &table1
)
1655 && tablejump_p (BB_END (src2
), &label2
, &table2
)
1656 && label1
!= label2
)
1658 replace_label_data rr
;
1661 /* Replace references to LABEL1 with LABEL2. */
1664 rr
.update_label_nuses
= true;
1665 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
1667 /* Do not replace the label in SRC1->END because when deleting
1668 a block whose end is a tablejump, the tablejump referenced
1669 from the instruction is deleted too. */
1670 if (insn
!= BB_END (src1
))
1671 for_each_rtx (&insn
, replace_label
, &rr
);
1676 /* Avoid splitting if possible. We must always split when SRC2 has
1677 EH predecessor edges, or we may end up with basic blocks with both
1678 normal and EH predecessor edges. */
1679 if (newpos2
== BB_HEAD (src2
)
1680 && !(EDGE_PRED (src2
, 0)->flags
& EDGE_EH
))
1684 if (newpos2
== BB_HEAD (src2
))
1686 /* Skip possible basic block header. */
1687 if (LABEL_P (newpos2
))
1688 newpos2
= NEXT_INSN (newpos2
);
1689 if (NOTE_P (newpos2
))
1690 newpos2
= NEXT_INSN (newpos2
);
1694 fprintf (dump_file
, "Splitting bb %i before %i insns\n",
1695 src2
->index
, nmatch
);
1696 redirect_to
= split_block (src2
, PREV_INSN (newpos2
))->dest
;
1701 "Cross jumping from bb %i to bb %i; %i common insns\n",
1702 src1
->index
, src2
->index
, nmatch
);
1704 redirect_to
->count
+= src1
->count
;
1705 redirect_to
->frequency
+= src1
->frequency
;
1706 /* We may have some registers visible through the block. */
1707 df_set_bb_dirty (redirect_to
);
1709 /* Recompute the frequencies and counts of outgoing edges. */
1710 FOR_EACH_EDGE (s
, ei
, redirect_to
->succs
)
1714 basic_block d
= s
->dest
;
1716 if (FORWARDER_BLOCK_P (d
))
1717 d
= single_succ (d
);
1719 FOR_EACH_EDGE (s2
, ei
, src1
->succs
)
1721 basic_block d2
= s2
->dest
;
1722 if (FORWARDER_BLOCK_P (d2
))
1723 d2
= single_succ (d2
);
1728 s
->count
+= s2
->count
;
1730 /* Take care to update possible forwarder blocks. We verified
1731 that there is no more than one in the chain, so we can't run
1732 into infinite loop. */
1733 if (FORWARDER_BLOCK_P (s
->dest
))
1735 single_succ_edge (s
->dest
)->count
+= s2
->count
;
1736 s
->dest
->count
+= s2
->count
;
1737 s
->dest
->frequency
+= EDGE_FREQUENCY (s
);
1740 if (FORWARDER_BLOCK_P (s2
->dest
))
1742 single_succ_edge (s2
->dest
)->count
-= s2
->count
;
1743 if (single_succ_edge (s2
->dest
)->count
< 0)
1744 single_succ_edge (s2
->dest
)->count
= 0;
1745 s2
->dest
->count
-= s2
->count
;
1746 s2
->dest
->frequency
-= EDGE_FREQUENCY (s
);
1747 if (s2
->dest
->frequency
< 0)
1748 s2
->dest
->frequency
= 0;
1749 if (s2
->dest
->count
< 0)
1750 s2
->dest
->count
= 0;
1753 if (!redirect_to
->frequency
&& !src1
->frequency
)
1754 s
->probability
= (s
->probability
+ s2
->probability
) / 2;
1757 = ((s
->probability
* redirect_to
->frequency
+
1758 s2
->probability
* src1
->frequency
)
1759 / (redirect_to
->frequency
+ src1
->frequency
));
1762 update_br_prob_note (redirect_to
);
1764 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1766 /* Skip possible basic block header. */
1767 if (LABEL_P (newpos1
))
1768 newpos1
= NEXT_INSN (newpos1
);
1770 if (NOTE_P (newpos1
))
1771 newpos1
= NEXT_INSN (newpos1
);
1773 redirect_from
= split_block (src1
, PREV_INSN (newpos1
))->src
;
1774 to_remove
= single_succ (redirect_from
);
1776 redirect_edge_and_branch_force (single_succ_edge (redirect_from
), redirect_to
);
1777 delete_basic_block (to_remove
);
1779 update_forwarder_flag (redirect_from
);
1780 if (redirect_to
!= src2
)
1781 update_forwarder_flag (src2
);
1786 /* Search the predecessors of BB for common insn sequences. When found,
1787 share code between them by redirecting control flow. Return true if
1788 any changes made. */
1791 try_crossjump_bb (int mode
, basic_block bb
)
1793 edge e
, e2
, fallthru
;
1795 unsigned max
, ix
, ix2
;
1796 basic_block ev
, ev2
;
1799 /* Nothing to do if there is not at least two incoming edges. */
1800 if (EDGE_COUNT (bb
->preds
) < 2)
1803 /* Don't crossjump if this block ends in a computed jump,
1804 unless we are optimizing for size. */
1806 && bb
!= EXIT_BLOCK_PTR
1807 && computed_jump_p (BB_END (bb
)))
1810 /* If we are partitioning hot/cold basic blocks, we don't want to
1811 mess up unconditional or indirect jumps that cross between hot
1814 Basic block partitioning may result in some jumps that appear to
1815 be optimizable (or blocks that appear to be mergeable), but which really
1816 must be left untouched (they are required to make it safely across
1817 partition boundaries). See the comments at the top of
1818 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1820 if (BB_PARTITION (EDGE_PRED (bb
, 0)->src
) !=
1821 BB_PARTITION (EDGE_PRED (bb
, 1)->src
)
1822 || (EDGE_PRED (bb
, 0)->flags
& EDGE_CROSSING
))
1825 /* It is always cheapest to redirect a block that ends in a branch to
1826 a block that falls through into BB, as that adds no branches to the
1827 program. We'll try that combination first. */
1829 max
= PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES
);
1831 if (EDGE_COUNT (bb
->preds
) > max
)
1834 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1836 if (e
->flags
& EDGE_FALLTHRU
)
1841 for (ix
= 0, ev
= bb
; ix
< EDGE_COUNT (ev
->preds
); )
1843 e
= EDGE_PRED (ev
, ix
);
1846 /* As noted above, first try with the fallthru predecessor. */
1849 /* Don't combine the fallthru edge into anything else.
1850 If there is a match, we'll do it the other way around. */
1853 /* If nothing changed since the last attempt, there is nothing
1856 && (!(df_get_bb_dirty (e
->src
))
1857 && !(df_get_bb_dirty (fallthru
->src
))))
1860 if (try_crossjump_to_edge (mode
, e
, fallthru
))
1869 /* Non-obvious work limiting check: Recognize that we're going
1870 to call try_crossjump_bb on every basic block. So if we have
1871 two blocks with lots of outgoing edges (a switch) and they
1872 share lots of common destinations, then we would do the
1873 cross-jump check once for each common destination.
1875 Now, if the blocks actually are cross-jump candidates, then
1876 all of their destinations will be shared. Which means that
1877 we only need check them for cross-jump candidacy once. We
1878 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1879 choosing to do the check from the block for which the edge
1880 in question is the first successor of A. */
1881 if (EDGE_SUCC (e
->src
, 0) != e
)
1884 for (ix2
= 0, ev2
= bb
; ix2
< EDGE_COUNT (ev2
->preds
); )
1886 e2
= EDGE_PRED (ev2
, ix2
);
1892 /* We've already checked the fallthru edge above. */
1896 /* The "first successor" check above only prevents multiple
1897 checks of crossjump(A,B). In order to prevent redundant
1898 checks of crossjump(B,A), require that A be the block
1899 with the lowest index. */
1900 if (e
->src
->index
> e2
->src
->index
)
1903 /* If nothing changed since the last attempt, there is nothing
1906 && (!(df_get_bb_dirty (e
->src
))
1907 && !(df_get_bb_dirty (e2
->src
))))
1910 if (try_crossjump_to_edge (mode
, e
, e2
))
1923 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1924 instructions etc. Return nonzero if changes were made. */
1927 try_optimize_cfg (int mode
)
1929 bool changed_overall
= false;
1932 basic_block bb
, b
, next
;
1934 if (mode
& CLEANUP_CROSSJUMP
)
1935 add_noreturn_fake_exit_edges ();
1937 if (mode
& (CLEANUP_CROSSJUMP
| CLEANUP_THREADING
))
1941 update_forwarder_flag (bb
);
1943 if (! targetm
.cannot_modify_jumps_p ())
1946 /* Attempt to merge blocks as made possible by edge removal. If
1947 a block has only one successor, and the successor has only
1948 one predecessor, they may be combined. */
1956 "\n\ntry_optimize_cfg iteration %i\n\n",
1959 for (b
= ENTRY_BLOCK_PTR
->next_bb
; b
!= EXIT_BLOCK_PTR
;)
1963 bool changed_here
= false;
1965 /* Delete trivially dead basic blocks. */
1966 if (EDGE_COUNT (b
->preds
) == 0)
1970 fprintf (dump_file
, "Deleting block %i.\n",
1973 delete_basic_block (b
);
1974 if (!(mode
& CLEANUP_CFGLAYOUT
))
1976 /* Avoid trying to remove ENTRY_BLOCK_PTR. */
1977 b
= (c
== ENTRY_BLOCK_PTR
? c
->next_bb
: c
);
1981 /* Remove code labels no longer used. */
1982 if (single_pred_p (b
)
1983 && (single_pred_edge (b
)->flags
& EDGE_FALLTHRU
)
1984 && !(single_pred_edge (b
)->flags
& EDGE_COMPLEX
)
1985 && LABEL_P (BB_HEAD (b
))
1986 /* If the previous block ends with a branch to this
1987 block, we can't delete the label. Normally this
1988 is a condjump that is yet to be simplified, but
1989 if CASE_DROPS_THRU, this can be a tablejump with
1990 some element going to the same place as the
1991 default (fallthru). */
1992 && (single_pred (b
) == ENTRY_BLOCK_PTR
1993 || !JUMP_P (BB_END (single_pred (b
)))
1994 || ! label_is_jump_target_p (BB_HEAD (b
),
1995 BB_END (single_pred (b
)))))
1997 rtx label
= BB_HEAD (b
);
1999 delete_insn_chain (label
, label
, false);
2000 /* If the case label is undeletable, move it after the
2001 BASIC_BLOCK note. */
2002 if (NOTE_KIND (BB_HEAD (b
)) == NOTE_INSN_DELETED_LABEL
)
2004 rtx bb_note
= NEXT_INSN (BB_HEAD (b
));
2006 reorder_insns_nobb (label
, label
, bb_note
);
2007 BB_HEAD (b
) = bb_note
;
2008 if (BB_END (b
) == bb_note
)
2012 fprintf (dump_file
, "Deleted label in block %i.\n",
2016 /* If we fall through an empty block, we can remove it. */
2017 if (!(mode
& CLEANUP_CFGLAYOUT
)
2018 && single_pred_p (b
)
2019 && (single_pred_edge (b
)->flags
& EDGE_FALLTHRU
)
2020 && !LABEL_P (BB_HEAD (b
))
2021 && FORWARDER_BLOCK_P (b
)
2022 /* Note that forwarder_block_p true ensures that
2023 there is a successor for this block. */
2024 && (single_succ_edge (b
)->flags
& EDGE_FALLTHRU
)
2025 && n_basic_blocks
> NUM_FIXED_BLOCKS
+ 1)
2029 "Deleting fallthru block %i.\n",
2032 c
= b
->prev_bb
== ENTRY_BLOCK_PTR
? b
->next_bb
: b
->prev_bb
;
2033 redirect_edge_succ_nodup (single_pred_edge (b
),
2035 delete_basic_block (b
);
2040 if (single_succ_p (b
)
2041 && (s
= single_succ_edge (b
))
2042 && !(s
->flags
& EDGE_COMPLEX
)
2043 && (c
= s
->dest
) != EXIT_BLOCK_PTR
2044 && single_pred_p (c
)
2047 /* When not in cfg_layout mode use code aware of reordering
2048 INSN. This code possibly creates new basic blocks so it
2049 does not fit merge_blocks interface and is kept here in
2050 hope that it will become useless once more of compiler
2051 is transformed to use cfg_layout mode. */
2053 if ((mode
& CLEANUP_CFGLAYOUT
)
2054 && can_merge_blocks_p (b
, c
))
2056 merge_blocks (b
, c
);
2057 update_forwarder_flag (b
);
2058 changed_here
= true;
2060 else if (!(mode
& CLEANUP_CFGLAYOUT
)
2061 /* If the jump insn has side effects,
2062 we can't kill the edge. */
2063 && (!JUMP_P (BB_END (b
))
2064 || (reload_completed
2065 ? simplejump_p (BB_END (b
))
2066 : (onlyjump_p (BB_END (b
))
2067 && !tablejump_p (BB_END (b
),
2069 && (next
= merge_blocks_move (s
, b
, c
, mode
)))
2072 changed_here
= true;
2076 /* Simplify branch over branch. */
2077 if ((mode
& CLEANUP_EXPENSIVE
)
2078 && !(mode
& CLEANUP_CFGLAYOUT
)
2079 && try_simplify_condjump (b
))
2080 changed_here
= true;
2082 /* If B has a single outgoing edge, but uses a
2083 non-trivial jump instruction without side-effects, we
2084 can either delete the jump entirely, or replace it
2085 with a simple unconditional jump. */
2086 if (single_succ_p (b
)
2087 && single_succ (b
) != EXIT_BLOCK_PTR
2088 && onlyjump_p (BB_END (b
))
2089 && !find_reg_note (BB_END (b
), REG_CROSSING_JUMP
, NULL_RTX
)
2090 && try_redirect_by_replacing_jump (single_succ_edge (b
),
2092 (mode
& CLEANUP_CFGLAYOUT
) != 0))
2094 update_forwarder_flag (b
);
2095 changed_here
= true;
2098 /* Simplify branch to branch. */
2099 if (try_forward_edges (mode
, b
))
2100 changed_here
= true;
2102 /* Look for shared code between blocks. */
2103 if ((mode
& CLEANUP_CROSSJUMP
)
2104 && try_crossjump_bb (mode
, b
))
2105 changed_here
= true;
2107 /* Don't get confused by the index shift caused by
2115 if ((mode
& CLEANUP_CROSSJUMP
)
2116 && try_crossjump_bb (mode
, EXIT_BLOCK_PTR
))
2119 #ifdef ENABLE_CHECKING
2121 verify_flow_info ();
2124 changed_overall
|= changed
;
2130 if (mode
& CLEANUP_CROSSJUMP
)
2131 remove_fake_exit_edges ();
2134 b
->flags
&= ~(BB_FORWARDER_BLOCK
| BB_NONTHREADABLE_BLOCK
);
2136 return changed_overall
;
2139 /* Delete all unreachable basic blocks. */
2142 delete_unreachable_blocks (void)
2144 bool changed
= false;
2145 basic_block b
, next_bb
;
2147 find_unreachable_blocks ();
2149 /* Delete all unreachable basic blocks. */
2151 for (b
= ENTRY_BLOCK_PTR
->next_bb
; b
!= EXIT_BLOCK_PTR
; b
= next_bb
)
2153 next_bb
= b
->next_bb
;
2155 if (!(b
->flags
& BB_REACHABLE
))
2157 delete_basic_block (b
);
2163 tidy_fallthru_edges ();
2167 /* Delete any jump tables never referenced. We can't delete them at the
2168 time of removing tablejump insn as they are referenced by the preceding
2169 insns computing the destination, so we delay deleting and garbagecollect
2170 them once life information is computed. */
2172 delete_dead_jumptables (void)
2176 /* A dead jump table does not belong to any basic block. Scan insns
2177 between two adjacent basic blocks. */
2182 for (insn
= NEXT_INSN (BB_END (bb
));
2183 insn
&& !NOTE_INSN_BASIC_BLOCK_P (insn
);
2186 next
= NEXT_INSN (insn
);
2188 && LABEL_NUSES (insn
) == LABEL_PRESERVE_P (insn
)
2190 && (GET_CODE (PATTERN (next
)) == ADDR_VEC
2191 || GET_CODE (PATTERN (next
)) == ADDR_DIFF_VEC
))
2193 rtx label
= insn
, jump
= next
;
2196 fprintf (dump_file
, "Dead jumptable %i removed\n",
2199 next
= NEXT_INSN (next
);
2201 delete_insn (label
);
2208 /* Tidy the CFG by deleting unreachable code and whatnot. */
2211 cleanup_cfg (int mode
)
2213 bool changed
= false;
2215 /* Set the cfglayout mode flag here. We could update all the callers
2216 but that is just inconvenient, especially given that we eventually
2217 want to have cfglayout mode as the default. */
2218 if (current_ir_type () == IR_RTL_CFGLAYOUT
)
2219 mode
|= CLEANUP_CFGLAYOUT
;
2221 timevar_push (TV_CLEANUP_CFG
);
2222 if (delete_unreachable_blocks ())
2225 /* We've possibly created trivially dead code. Cleanup it right
2226 now to introduce more opportunities for try_optimize_cfg. */
2227 if (!(mode
& (CLEANUP_NO_INSN_DEL
))
2228 && !reload_completed
)
2229 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2234 while (try_optimize_cfg (mode
))
2236 delete_unreachable_blocks (), changed
= true;
2237 if (!(mode
& CLEANUP_NO_INSN_DEL
)
2238 && (mode
& CLEANUP_EXPENSIVE
)
2239 && !reload_completed
)
2241 if (!delete_trivially_dead_insns (get_insns (), max_reg_num ()))
2248 /* Don't call delete_dead_jumptables in cfglayout mode, because
2249 that function assumes that jump tables are in the insns stream.
2250 But we also don't _have_ to delete dead jumptables in cfglayout
2251 mode because we shouldn't even be looking at things that are
2252 not in a basic block. Dead jumptables are cleaned up when
2253 going out of cfglayout mode. */
2254 if (!(mode
& CLEANUP_CFGLAYOUT
))
2255 delete_dead_jumptables ();
2257 timevar_pop (TV_CLEANUP_CFG
);
2263 rest_of_handle_jump (void)
2265 delete_unreachable_blocks ();
2267 if (cfun
->tail_call_emit
)
2268 fixup_tail_calls ();
2272 struct tree_opt_pass pass_jump
=
2274 "sibling", /* name */
2276 rest_of_handle_jump
, /* execute */
2279 0, /* static_pass_number */
2280 TV_JUMP
, /* tv_id */
2281 0, /* properties_required */
2282 0, /* properties_provided */
2283 0, /* properties_destroyed */
2284 TODO_ggc_collect
, /* todo_flags_start */
2286 TODO_verify_flow
, /* todo_flags_finish */
2292 rest_of_handle_jump2 (void)
2294 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2296 dump_flow_info (dump_file
, dump_flags
);
2297 cleanup_cfg ((optimize
? CLEANUP_EXPENSIVE
: 0)
2298 | (flag_thread_jumps
? CLEANUP_THREADING
: 0));
2303 struct tree_opt_pass pass_jump2
=
2307 rest_of_handle_jump2
, /* execute */
2310 0, /* static_pass_number */
2311 TV_JUMP
, /* tv_id */
2312 0, /* properties_required */
2313 0, /* properties_provided */
2314 0, /* properties_destroyed */
2315 TODO_ggc_collect
, /* todo_flags_start */
2316 TODO_dump_func
, /* todo_flags_finish */