1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This file contains optimizer of the control flow. The main entry point is
21 cleanup_cfg. Following optimizations are performed:
23 - Unreachable blocks removal
24 - Edge forwarding (edge to the forwarder block is forwarded to its
25 successor. Simplification of the branch instruction is performed by
26 underlying infrastructure so branch can be converted to simplejump or
28 - Cross jumping (tail merging)
29 - Conditional jump-around-simplejump simplification
30 - Basic block merging. */
34 #include "coretypes.h"
42 #include "insn-config.h"
46 #include "tree-pass.h"
51 #include "cfgcleanup.h"
56 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
58 /* Set to true when we are running first pass of try_optimize_cfg loop. */
59 static bool first_pass
;
61 /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg. */
62 static bool crossjumps_occured
;
64 /* Set to true if we couldn't run an optimization due to stale liveness
65 information; we should run df_analyze to enable more opportunities. */
66 static bool block_was_dirty
;
68 static bool try_crossjump_to_edge (int, edge
, edge
, enum replace_direction
);
69 static bool try_crossjump_bb (int, basic_block
);
70 static bool outgoing_edges_match (int, basic_block
, basic_block
);
71 static enum replace_direction
old_insns_match_p (int, rtx_insn
*, rtx_insn
*);
73 static void merge_blocks_move_predecessor_nojumps (basic_block
, basic_block
);
74 static void merge_blocks_move_successor_nojumps (basic_block
, basic_block
);
75 static bool try_optimize_cfg (int);
76 static bool try_simplify_condjump (basic_block
);
77 static bool try_forward_edges (int, basic_block
);
78 static edge
thread_jump (edge
, basic_block
);
79 static bool mark_effect (rtx
, bitmap
);
80 static void notice_new_block (basic_block
);
81 static void update_forwarder_flag (basic_block
);
82 static void merge_memattrs (rtx
, rtx
);
84 /* Set flags for newly created block. */
87 notice_new_block (basic_block bb
)
92 if (forwarder_block_p (bb
))
93 bb
->flags
|= BB_FORWARDER_BLOCK
;
96 /* Recompute forwarder flag after block has been modified. */
99 update_forwarder_flag (basic_block bb
)
101 if (forwarder_block_p (bb
))
102 bb
->flags
|= BB_FORWARDER_BLOCK
;
104 bb
->flags
&= ~BB_FORWARDER_BLOCK
;
107 /* Simplify a conditional jump around an unconditional jump.
108 Return true if something changed. */
111 try_simplify_condjump (basic_block cbranch_block
)
113 basic_block jump_block
, jump_dest_block
, cbranch_dest_block
;
114 edge cbranch_jump_edge
, cbranch_fallthru_edge
;
115 rtx_insn
*cbranch_insn
;
117 /* Verify that there are exactly two successors. */
118 if (EDGE_COUNT (cbranch_block
->succs
) != 2)
121 /* Verify that we've got a normal conditional branch at the end
123 cbranch_insn
= BB_END (cbranch_block
);
124 if (!any_condjump_p (cbranch_insn
))
127 cbranch_fallthru_edge
= FALLTHRU_EDGE (cbranch_block
);
128 cbranch_jump_edge
= BRANCH_EDGE (cbranch_block
);
130 /* The next block must not have multiple predecessors, must not
131 be the last block in the function, and must contain just the
132 unconditional jump. */
133 jump_block
= cbranch_fallthru_edge
->dest
;
134 if (!single_pred_p (jump_block
)
135 || jump_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
136 || !FORWARDER_BLOCK_P (jump_block
))
138 jump_dest_block
= single_succ (jump_block
);
140 /* If we are partitioning hot/cold basic blocks, we don't want to
141 mess up unconditional or indirect jumps that cross between hot
144 Basic block partitioning may result in some jumps that appear to
145 be optimizable (or blocks that appear to be mergeable), but which really
146 must be left untouched (they are required to make it safely across
147 partition boundaries). See the comments at the top of
148 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
150 if (BB_PARTITION (jump_block
) != BB_PARTITION (jump_dest_block
)
151 || (cbranch_jump_edge
->flags
& EDGE_CROSSING
))
154 /* The conditional branch must target the block after the
155 unconditional branch. */
156 cbranch_dest_block
= cbranch_jump_edge
->dest
;
158 if (cbranch_dest_block
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
159 || jump_dest_block
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
160 || !can_fallthru (jump_block
, cbranch_dest_block
))
163 /* Invert the conditional branch. */
164 if (!invert_jump (as_a
<rtx_jump_insn
*> (cbranch_insn
),
165 block_label (jump_dest_block
), 0))
169 fprintf (dump_file
, "Simplifying condjump %i around jump %i\n",
170 INSN_UID (cbranch_insn
), INSN_UID (BB_END (jump_block
)));
172 /* Success. Update the CFG to match. Note that after this point
173 the edge variable names appear backwards; the redirection is done
174 this way to preserve edge profile data. */
175 cbranch_jump_edge
= redirect_edge_succ_nodup (cbranch_jump_edge
,
177 cbranch_fallthru_edge
= redirect_edge_succ_nodup (cbranch_fallthru_edge
,
179 cbranch_jump_edge
->flags
|= EDGE_FALLTHRU
;
180 cbranch_fallthru_edge
->flags
&= ~EDGE_FALLTHRU
;
181 update_br_prob_note (cbranch_block
);
183 /* Delete the block with the unconditional jump, and clean up the mess. */
184 delete_basic_block (jump_block
);
185 tidy_fallthru_edge (cbranch_jump_edge
);
186 update_forwarder_flag (cbranch_block
);
191 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
192 on register. Used by jump threading. */
195 mark_effect (rtx exp
, regset nonequal
)
198 switch (GET_CODE (exp
))
200 /* In case we do clobber the register, mark it as equal, as we know the
201 value is dead so it don't have to match. */
203 dest
= XEXP (exp
, 0);
205 bitmap_clear_range (nonequal
, REGNO (dest
), REG_NREGS (dest
));
209 if (rtx_equal_for_cselib_p (SET_DEST (exp
), SET_SRC (exp
)))
211 dest
= SET_DEST (exp
);
216 bitmap_set_range (nonequal
, REGNO (dest
), REG_NREGS (dest
));
224 /* Return true if X contains a register in NONEQUAL. */
226 mentions_nonequal_regs (const_rtx x
, regset nonequal
)
228 subrtx_iterator::array_type array
;
229 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
234 unsigned int end_regno
= END_REGNO (x
);
235 for (unsigned int regno
= REGNO (x
); regno
< end_regno
; ++regno
)
236 if (REGNO_REG_SET_P (nonequal
, regno
))
243 /* Attempt to prove that the basic block B will have no side effects and
244 always continues in the same edge if reached via E. Return the edge
245 if exist, NULL otherwise. */
248 thread_jump (edge e
, basic_block b
)
250 rtx set1
, set2
, cond1
, cond2
;
252 enum rtx_code code1
, code2
, reversed_code2
;
253 bool reverse1
= false;
257 reg_set_iterator rsi
;
259 if (b
->flags
& BB_NONTHREADABLE_BLOCK
)
262 /* At the moment, we do handle only conditional jumps, but later we may
263 want to extend this code to tablejumps and others. */
264 if (EDGE_COUNT (e
->src
->succs
) != 2)
266 if (EDGE_COUNT (b
->succs
) != 2)
268 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
272 /* Second branch must end with onlyjump, as we will eliminate the jump. */
273 if (!any_condjump_p (BB_END (e
->src
)))
276 if (!any_condjump_p (BB_END (b
)) || !onlyjump_p (BB_END (b
)))
278 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
282 set1
= pc_set (BB_END (e
->src
));
283 set2
= pc_set (BB_END (b
));
284 if (((e
->flags
& EDGE_FALLTHRU
) != 0)
285 != (XEXP (SET_SRC (set1
), 1) == pc_rtx
))
288 cond1
= XEXP (SET_SRC (set1
), 0);
289 cond2
= XEXP (SET_SRC (set2
), 0);
291 code1
= reversed_comparison_code (cond1
, BB_END (e
->src
));
293 code1
= GET_CODE (cond1
);
295 code2
= GET_CODE (cond2
);
296 reversed_code2
= reversed_comparison_code (cond2
, BB_END (b
));
298 if (!comparison_dominates_p (code1
, code2
)
299 && !comparison_dominates_p (code1
, reversed_code2
))
302 /* Ensure that the comparison operators are equivalent.
303 ??? This is far too pessimistic. We should allow swapped operands,
304 different CCmodes, or for example comparisons for interval, that
305 dominate even when operands are not equivalent. */
306 if (!rtx_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
307 || !rtx_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
310 /* Short circuit cases where block B contains some side effects, as we can't
312 for (insn
= NEXT_INSN (BB_HEAD (b
)); insn
!= NEXT_INSN (BB_END (b
));
313 insn
= NEXT_INSN (insn
))
314 if (INSN_P (insn
) && side_effects_p (PATTERN (insn
)))
316 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
322 /* First process all values computed in the source basic block. */
323 for (insn
= NEXT_INSN (BB_HEAD (e
->src
));
324 insn
!= NEXT_INSN (BB_END (e
->src
));
325 insn
= NEXT_INSN (insn
))
327 cselib_process_insn (insn
);
329 nonequal
= BITMAP_ALLOC (NULL
);
330 CLEAR_REG_SET (nonequal
);
332 /* Now assume that we've continued by the edge E to B and continue
333 processing as if it were same basic block.
334 Our goal is to prove that whole block is an NOOP. */
336 for (insn
= NEXT_INSN (BB_HEAD (b
));
337 insn
!= NEXT_INSN (BB_END (b
)) && !failed
;
338 insn
= NEXT_INSN (insn
))
342 rtx pat
= PATTERN (insn
);
344 if (GET_CODE (pat
) == PARALLEL
)
346 for (i
= 0; i
< (unsigned)XVECLEN (pat
, 0); i
++)
347 failed
|= mark_effect (XVECEXP (pat
, 0, i
), nonequal
);
350 failed
|= mark_effect (pat
, nonequal
);
353 cselib_process_insn (insn
);
356 /* Later we should clear nonequal of dead registers. So far we don't
357 have life information in cfg_cleanup. */
360 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
364 /* cond2 must not mention any register that is not equal to the
366 if (mentions_nonequal_regs (cond2
, nonequal
))
369 EXECUTE_IF_SET_IN_REG_SET (nonequal
, 0, i
, rsi
)
372 BITMAP_FREE (nonequal
);
374 if ((comparison_dominates_p (code1
, code2
) != 0)
375 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
376 return BRANCH_EDGE (b
);
378 return FALLTHRU_EDGE (b
);
381 BITMAP_FREE (nonequal
);
386 /* Attempt to forward edges leaving basic block B.
387 Return true if successful. */
390 try_forward_edges (int mode
, basic_block b
)
392 bool changed
= false;
394 edge e
, *threaded_edges
= NULL
;
396 /* If we are partitioning hot/cold basic blocks, we don't want to
397 mess up unconditional or indirect jumps that cross between hot
400 Basic block partitioning may result in some jumps that appear to
401 be optimizable (or blocks that appear to be mergeable), but which really
402 must be left untouched (they are required to make it safely across
403 partition boundaries). See the comments at the top of
404 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
406 if (JUMP_P (BB_END (b
)) && CROSSING_JUMP_P (BB_END (b
)))
409 for (ei
= ei_start (b
->succs
); (e
= ei_safe_edge (ei
)); )
411 basic_block target
, first
;
412 location_t goto_locus
;
414 bool threaded
= false;
415 int nthreaded_edges
= 0;
416 bool may_thread
= first_pass
|| (b
->flags
& BB_MODIFIED
) != 0;
418 /* Skip complex edges because we don't know how to update them.
420 Still handle fallthru edges, as we can succeed to forward fallthru
421 edge to the same place as the branch edge of conditional branch
422 and turn conditional branch to an unconditional branch. */
423 if (e
->flags
& EDGE_COMPLEX
)
429 target
= first
= e
->dest
;
430 counter
= NUM_FIXED_BLOCKS
;
431 goto_locus
= e
->goto_locus
;
433 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
434 up jumps that cross between hot/cold sections.
436 Basic block partitioning may result in some jumps that appear
437 to be optimizable (or blocks that appear to be mergeable), but which
438 really must be left untouched (they are required to make it safely
439 across partition boundaries). See the comments at the top of
440 bb-reorder.c:partition_hot_cold_basic_blocks for complete
443 if (first
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
444 && JUMP_P (BB_END (first
))
445 && CROSSING_JUMP_P (BB_END (first
)))
448 while (counter
< n_basic_blocks_for_fn (cfun
))
450 basic_block new_target
= NULL
;
451 bool new_target_threaded
= false;
452 may_thread
|= (target
->flags
& BB_MODIFIED
) != 0;
454 if (FORWARDER_BLOCK_P (target
)
455 && !(single_succ_edge (target
)->flags
& EDGE_CROSSING
)
456 && single_succ (target
) != EXIT_BLOCK_PTR_FOR_FN (cfun
))
458 /* Bypass trivial infinite loops. */
459 new_target
= single_succ (target
);
460 if (target
== new_target
)
461 counter
= n_basic_blocks_for_fn (cfun
);
464 /* When not optimizing, ensure that edges or forwarder
465 blocks with different locus are not optimized out. */
466 location_t new_locus
= single_succ_edge (target
)->goto_locus
;
467 location_t locus
= goto_locus
;
469 if (LOCATION_LOCUS (new_locus
) != UNKNOWN_LOCATION
470 && LOCATION_LOCUS (locus
) != UNKNOWN_LOCATION
471 && new_locus
!= locus
)
475 if (LOCATION_LOCUS (new_locus
) != UNKNOWN_LOCATION
)
478 rtx_insn
*last
= BB_END (target
);
479 if (DEBUG_INSN_P (last
))
480 last
= prev_nondebug_insn (last
);
481 if (last
&& INSN_P (last
))
482 new_locus
= INSN_LOCATION (last
);
484 new_locus
= UNKNOWN_LOCATION
;
486 if (LOCATION_LOCUS (new_locus
) != UNKNOWN_LOCATION
487 && LOCATION_LOCUS (locus
) != UNKNOWN_LOCATION
488 && new_locus
!= locus
)
492 if (LOCATION_LOCUS (new_locus
) != UNKNOWN_LOCATION
)
501 /* Allow to thread only over one edge at time to simplify updating
503 else if ((mode
& CLEANUP_THREADING
) && may_thread
)
505 edge t
= thread_jump (e
, target
);
509 threaded_edges
= XNEWVEC (edge
,
510 n_basic_blocks_for_fn (cfun
));
515 /* Detect an infinite loop across blocks not
516 including the start block. */
517 for (i
= 0; i
< nthreaded_edges
; ++i
)
518 if (threaded_edges
[i
] == t
)
520 if (i
< nthreaded_edges
)
522 counter
= n_basic_blocks_for_fn (cfun
);
527 /* Detect an infinite loop across the start block. */
531 gcc_assert (nthreaded_edges
532 < (n_basic_blocks_for_fn (cfun
)
533 - NUM_FIXED_BLOCKS
));
534 threaded_edges
[nthreaded_edges
++] = t
;
536 new_target
= t
->dest
;
537 new_target_threaded
= true;
546 threaded
|= new_target_threaded
;
549 if (counter
>= n_basic_blocks_for_fn (cfun
))
552 fprintf (dump_file
, "Infinite loop in BB %i.\n",
555 else if (target
== first
)
556 ; /* We didn't do anything. */
559 /* Save the values now, as the edge may get removed. */
560 gcov_type edge_count
= e
->count
;
561 int edge_probability
= e
->probability
;
565 e
->goto_locus
= goto_locus
;
567 /* Don't force if target is exit block. */
568 if (threaded
&& target
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
570 notice_new_block (redirect_edge_and_branch_force (e
, target
));
572 fprintf (dump_file
, "Conditionals threaded.\n");
574 else if (!redirect_edge_and_branch (e
, target
))
578 "Forwarding edge %i->%i to %i failed.\n",
579 b
->index
, e
->dest
->index
, target
->index
);
584 /* We successfully forwarded the edge. Now update profile
585 data: for each edge we traversed in the chain, remove
586 the original edge's execution count. */
587 edge_frequency
= apply_probability (b
->frequency
, edge_probability
);
593 if (!single_succ_p (first
))
595 gcc_assert (n
< nthreaded_edges
);
596 t
= threaded_edges
[n
++];
597 gcc_assert (t
->src
== first
);
598 update_bb_profile_for_threading (first
, edge_frequency
,
600 update_br_prob_note (first
);
604 first
->count
-= edge_count
;
605 if (first
->count
< 0)
607 first
->frequency
-= edge_frequency
;
608 if (first
->frequency
< 0)
609 first
->frequency
= 0;
610 /* It is possible that as the result of
611 threading we've removed edge as it is
612 threaded to the fallthru edge. Avoid
613 getting out of sync. */
614 if (n
< nthreaded_edges
615 && first
== threaded_edges
[n
]->src
)
617 t
= single_succ_edge (first
);
620 t
->count
-= edge_count
;
625 while (first
!= target
);
633 free (threaded_edges
);
638 /* Blocks A and B are to be merged into a single block. A has no incoming
639 fallthru edge, so it can be moved before B without adding or modifying
640 any jumps (aside from the jump from A to B). */
643 merge_blocks_move_predecessor_nojumps (basic_block a
, basic_block b
)
647 /* If we are partitioning hot/cold basic blocks, we don't want to
648 mess up unconditional or indirect jumps that cross between hot
651 Basic block partitioning may result in some jumps that appear to
652 be optimizable (or blocks that appear to be mergeable), but which really
653 must be left untouched (they are required to make it safely across
654 partition boundaries). See the comments at the top of
655 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
657 if (BB_PARTITION (a
) != BB_PARTITION (b
))
660 barrier
= next_nonnote_insn (BB_END (a
));
661 gcc_assert (BARRIER_P (barrier
));
662 delete_insn (barrier
);
664 /* Scramble the insn chain. */
665 if (BB_END (a
) != PREV_INSN (BB_HEAD (b
)))
666 reorder_insns_nobb (BB_HEAD (a
), BB_END (a
), PREV_INSN (BB_HEAD (b
)));
670 fprintf (dump_file
, "Moved block %d before %d and merged.\n",
673 /* Swap the records for the two blocks around. */
676 link_block (a
, b
->prev_bb
);
678 /* Now blocks A and B are contiguous. Merge them. */
682 /* Blocks A and B are to be merged into a single block. B has no outgoing
683 fallthru edge, so it can be moved after A without adding or modifying
684 any jumps (aside from the jump from A to B). */
687 merge_blocks_move_successor_nojumps (basic_block a
, basic_block b
)
689 rtx_insn
*barrier
, *real_b_end
;
691 rtx_jump_table_data
*table
;
693 /* If we are partitioning hot/cold basic blocks, we don't want to
694 mess up unconditional or indirect jumps that cross between hot
697 Basic block partitioning may result in some jumps that appear to
698 be optimizable (or blocks that appear to be mergeable), but which really
699 must be left untouched (they are required to make it safely across
700 partition boundaries). See the comments at the top of
701 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
703 if (BB_PARTITION (a
) != BB_PARTITION (b
))
706 real_b_end
= BB_END (b
);
708 /* If there is a jump table following block B temporarily add the jump table
709 to block B so that it will also be moved to the correct location. */
710 if (tablejump_p (BB_END (b
), &label
, &table
)
711 && prev_active_insn (as_a
<rtx_insn
*> (label
)) == BB_END (b
))
716 /* There had better have been a barrier there. Delete it. */
717 barrier
= NEXT_INSN (BB_END (b
));
718 if (barrier
&& BARRIER_P (barrier
))
719 delete_insn (barrier
);
722 /* Scramble the insn chain. */
723 reorder_insns_nobb (BB_HEAD (b
), BB_END (b
), BB_END (a
));
725 /* Restore the real end of b. */
726 BB_END (b
) = real_b_end
;
729 fprintf (dump_file
, "Moved block %d after %d and merged.\n",
732 /* Now blocks A and B are contiguous. Merge them. */
736 /* Attempt to merge basic blocks that are potentially non-adjacent.
737 Return NULL iff the attempt failed, otherwise return basic block
738 where cleanup_cfg should continue. Because the merging commonly
739 moves basic block away or introduces another optimization
740 possibility, return basic block just before B so cleanup_cfg don't
743 It may be good idea to return basic block before C in the case
744 C has been moved after B and originally appeared earlier in the
745 insn sequence, but we have no information available about the
746 relative ordering of these two. Hopefully it is not too common. */
749 merge_blocks_move (edge e
, basic_block b
, basic_block c
, int mode
)
753 /* If we are partitioning hot/cold basic blocks, we don't want to
754 mess up unconditional or indirect jumps that cross between hot
757 Basic block partitioning may result in some jumps that appear to
758 be optimizable (or blocks that appear to be mergeable), but which really
759 must be left untouched (they are required to make it safely across
760 partition boundaries). See the comments at the top of
761 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
763 if (BB_PARTITION (b
) != BB_PARTITION (c
))
766 /* If B has a fallthru edge to C, no need to move anything. */
767 if (e
->flags
& EDGE_FALLTHRU
)
769 int b_index
= b
->index
, c_index
= c
->index
;
771 /* Protect the loop latches. */
772 if (current_loops
&& c
->loop_father
->latch
== c
)
776 update_forwarder_flag (b
);
779 fprintf (dump_file
, "Merged %d and %d without moving.\n",
782 return b
->prev_bb
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) ? b
: b
->prev_bb
;
785 /* Otherwise we will need to move code around. Do that only if expensive
786 transformations are allowed. */
787 else if (mode
& CLEANUP_EXPENSIVE
)
789 edge tmp_edge
, b_fallthru_edge
;
790 bool c_has_outgoing_fallthru
;
791 bool b_has_incoming_fallthru
;
793 /* Avoid overactive code motion, as the forwarder blocks should be
794 eliminated by edge redirection instead. One exception might have
795 been if B is a forwarder block and C has no fallthru edge, but
796 that should be cleaned up by bb-reorder instead. */
797 if (FORWARDER_BLOCK_P (b
) || FORWARDER_BLOCK_P (c
))
800 /* We must make sure to not munge nesting of lexical blocks,
801 and loop notes. This is done by squeezing out all the notes
802 and leaving them there to lie. Not ideal, but functional. */
804 tmp_edge
= find_fallthru_edge (c
->succs
);
805 c_has_outgoing_fallthru
= (tmp_edge
!= NULL
);
807 tmp_edge
= find_fallthru_edge (b
->preds
);
808 b_has_incoming_fallthru
= (tmp_edge
!= NULL
);
809 b_fallthru_edge
= tmp_edge
;
812 next
= next
->prev_bb
;
814 /* Otherwise, we're going to try to move C after B. If C does
815 not have an outgoing fallthru, then it can be moved
816 immediately after B without introducing or modifying jumps. */
817 if (! c_has_outgoing_fallthru
)
819 merge_blocks_move_successor_nojumps (b
, c
);
820 return next
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) ? next
->next_bb
: next
;
823 /* If B does not have an incoming fallthru, then it can be moved
824 immediately before C without introducing or modifying jumps.
825 C cannot be the first block, so we do not have to worry about
826 accessing a non-existent block. */
828 if (b_has_incoming_fallthru
)
832 if (b_fallthru_edge
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
834 bb
= force_nonfallthru (b_fallthru_edge
);
836 notice_new_block (bb
);
839 merge_blocks_move_predecessor_nojumps (b
, c
);
840 return next
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) ? next
->next_bb
: next
;
847 /* Removes the memory attributes of MEM expression
848 if they are not equal. */
851 merge_memattrs (rtx x
, rtx y
)
860 if (x
== 0 || y
== 0)
865 if (code
!= GET_CODE (y
))
868 if (GET_MODE (x
) != GET_MODE (y
))
871 if (code
== MEM
&& !mem_attrs_eq_p (MEM_ATTRS (x
), MEM_ATTRS (y
)))
875 else if (! MEM_ATTRS (y
))
879 HOST_WIDE_INT mem_size
;
881 if (MEM_ALIAS_SET (x
) != MEM_ALIAS_SET (y
))
883 set_mem_alias_set (x
, 0);
884 set_mem_alias_set (y
, 0);
887 if (! mem_expr_equal_p (MEM_EXPR (x
), MEM_EXPR (y
)))
891 clear_mem_offset (x
);
892 clear_mem_offset (y
);
894 else if (MEM_OFFSET_KNOWN_P (x
) != MEM_OFFSET_KNOWN_P (y
)
895 || (MEM_OFFSET_KNOWN_P (x
)
896 && MEM_OFFSET (x
) != MEM_OFFSET (y
)))
898 clear_mem_offset (x
);
899 clear_mem_offset (y
);
902 if (MEM_SIZE_KNOWN_P (x
) && MEM_SIZE_KNOWN_P (y
))
904 mem_size
= MAX (MEM_SIZE (x
), MEM_SIZE (y
));
905 set_mem_size (x
, mem_size
);
906 set_mem_size (y
, mem_size
);
914 set_mem_align (x
, MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)));
915 set_mem_align (y
, MEM_ALIGN (x
));
920 if (MEM_READONLY_P (x
) != MEM_READONLY_P (y
))
922 MEM_READONLY_P (x
) = 0;
923 MEM_READONLY_P (y
) = 0;
925 if (MEM_NOTRAP_P (x
) != MEM_NOTRAP_P (y
))
927 MEM_NOTRAP_P (x
) = 0;
928 MEM_NOTRAP_P (y
) = 0;
930 if (MEM_VOLATILE_P (x
) != MEM_VOLATILE_P (y
))
932 MEM_VOLATILE_P (x
) = 1;
933 MEM_VOLATILE_P (y
) = 1;
937 fmt
= GET_RTX_FORMAT (code
);
938 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
943 /* Two vectors must have the same length. */
944 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
947 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
948 merge_memattrs (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
));
953 merge_memattrs (XEXP (x
, i
), XEXP (y
, i
));
960 /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
961 different single sets S1 and S2. */
964 equal_different_set_p (rtx p1
, rtx s1
, rtx p2
, rtx s2
)
969 if (p1
== s1
&& p2
== s2
)
972 if (GET_CODE (p1
) != PARALLEL
|| GET_CODE (p2
) != PARALLEL
)
975 if (XVECLEN (p1
, 0) != XVECLEN (p2
, 0))
978 for (i
= 0; i
< XVECLEN (p1
, 0); i
++)
980 e1
= XVECEXP (p1
, 0, i
);
981 e2
= XVECEXP (p2
, 0, i
);
982 if (e1
== s1
&& e2
== s2
)
985 ? rtx_renumbered_equal_p (e1
, e2
) : rtx_equal_p (e1
, e2
))
995 /* NOTE1 is the REG_EQUAL note, if any, attached to an insn
996 that is a single_set with a SET_SRC of SRC1. Similarly
999 So effectively NOTE1/NOTE2 are an alternate form of
1000 SRC1/SRC2 respectively.
1002 Return nonzero if SRC1 or NOTE1 has the same constant
1003 integer value as SRC2 or NOTE2. Else return zero. */
1005 values_equal_p (rtx note1
, rtx note2
, rtx src1
, rtx src2
)
1009 && CONST_INT_P (XEXP (note1
, 0))
1010 && rtx_equal_p (XEXP (note1
, 0), XEXP (note2
, 0)))
1015 && CONST_INT_P (src1
)
1016 && CONST_INT_P (src2
)
1017 && rtx_equal_p (src1
, src2
))
1021 && CONST_INT_P (src2
)
1022 && rtx_equal_p (XEXP (note1
, 0), src2
))
1026 && CONST_INT_P (src1
)
1027 && rtx_equal_p (XEXP (note2
, 0), src1
))
1033 /* Examine register notes on I1 and I2 and return:
1034 - dir_forward if I1 can be replaced by I2, or
1035 - dir_backward if I2 can be replaced by I1, or
1036 - dir_both if both are the case. */
1038 static enum replace_direction
1039 can_replace_by (rtx_insn
*i1
, rtx_insn
*i2
)
1041 rtx s1
, s2
, d1
, d2
, src1
, src2
, note1
, note2
;
1044 /* Check for 2 sets. */
1045 s1
= single_set (i1
);
1046 s2
= single_set (i2
);
1047 if (s1
== NULL_RTX
|| s2
== NULL_RTX
)
1050 /* Check that the 2 sets set the same dest. */
1053 if (!(reload_completed
1054 ? rtx_renumbered_equal_p (d1
, d2
) : rtx_equal_p (d1
, d2
)))
1057 /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1058 set dest to the same value. */
1059 note1
= find_reg_equal_equiv_note (i1
);
1060 note2
= find_reg_equal_equiv_note (i2
);
1062 src1
= SET_SRC (s1
);
1063 src2
= SET_SRC (s2
);
1065 if (!values_equal_p (note1
, note2
, src1
, src2
))
1068 if (!equal_different_set_p (PATTERN (i1
), s1
, PATTERN (i2
), s2
))
1071 /* Although the 2 sets set dest to the same value, we cannot replace
1072 (set (dest) (const_int))
1075 because we don't know if the reg is live and has the same value at the
1076 location of replacement. */
1077 c1
= CONST_INT_P (src1
);
1078 c2
= CONST_INT_P (src2
);
1084 return dir_backward
;
1089 /* Merges directions A and B. */
1091 static enum replace_direction
1092 merge_dir (enum replace_direction a
, enum replace_direction b
)
1094 /* Implements the following table:
1113 /* Examine I1 and I2 and return:
1114 - dir_forward if I1 can be replaced by I2, or
1115 - dir_backward if I2 can be replaced by I1, or
1116 - dir_both if both are the case. */
1118 static enum replace_direction
1119 old_insns_match_p (int mode ATTRIBUTE_UNUSED
, rtx_insn
*i1
, rtx_insn
*i2
)
1123 /* Verify that I1 and I2 are equivalent. */
1124 if (GET_CODE (i1
) != GET_CODE (i2
))
1127 /* __builtin_unreachable() may lead to empty blocks (ending with
1128 NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */
1129 if (NOTE_INSN_BASIC_BLOCK_P (i1
) && NOTE_INSN_BASIC_BLOCK_P (i2
))
1132 /* ??? Do not allow cross-jumping between different stack levels. */
1133 p1
= find_reg_note (i1
, REG_ARGS_SIZE
, NULL
);
1134 p2
= find_reg_note (i2
, REG_ARGS_SIZE
, NULL
);
1139 if (!rtx_equal_p (p1
, p2
))
1142 /* ??? Worse, this adjustment had better be constant lest we
1143 have differing incoming stack levels. */
1144 if (!frame_pointer_needed
1145 && find_args_size_adjust (i1
) == HOST_WIDE_INT_MIN
)
1154 if (GET_CODE (p1
) != GET_CODE (p2
))
1157 /* If this is a CALL_INSN, compare register usage information.
1158 If we don't check this on stack register machines, the two
1159 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1160 numbers of stack registers in the same basic block.
1161 If we don't check this on machines with delay slots, a delay slot may
1162 be filled that clobbers a parameter expected by the subroutine.
1164 ??? We take the simple route for now and assume that if they're
1165 equal, they were constructed identically.
1167 Also check for identical exception regions. */
1171 /* Ensure the same EH region. */
1172 rtx n1
= find_reg_note (i1
, REG_EH_REGION
, 0);
1173 rtx n2
= find_reg_note (i2
, REG_EH_REGION
, 0);
1178 if (n1
&& (!n2
|| XEXP (n1
, 0) != XEXP (n2
, 0)))
1181 if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1
),
1182 CALL_INSN_FUNCTION_USAGE (i2
))
1183 || SIBLING_CALL_P (i1
) != SIBLING_CALL_P (i2
))
1186 /* For address sanitizer, never crossjump __asan_report_* builtins,
1187 otherwise errors might be reported on incorrect lines. */
1188 if (flag_sanitize
& SANITIZE_ADDRESS
)
1190 rtx call
= get_call_rtx_from (i1
);
1191 if (call
&& GET_CODE (XEXP (XEXP (call
, 0), 0)) == SYMBOL_REF
)
1193 rtx symbol
= XEXP (XEXP (call
, 0), 0);
1194 if (SYMBOL_REF_DECL (symbol
)
1195 && TREE_CODE (SYMBOL_REF_DECL (symbol
)) == FUNCTION_DECL
)
1197 if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol
))
1199 && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol
))
1200 >= BUILT_IN_ASAN_REPORT_LOAD1
1201 && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol
))
1202 <= BUILT_IN_ASAN_STOREN
)
1210 /* If cross_jump_death_matters is not 0, the insn's mode
1211 indicates whether or not the insn contains any stack-like
1214 if ((mode
& CLEANUP_POST_REGSTACK
) && stack_regs_mentioned (i1
))
1216 /* If register stack conversion has already been done, then
1217 death notes must also be compared before it is certain that
1218 the two instruction streams match. */
1221 HARD_REG_SET i1_regset
, i2_regset
;
1223 CLEAR_HARD_REG_SET (i1_regset
);
1224 CLEAR_HARD_REG_SET (i2_regset
);
1226 for (note
= REG_NOTES (i1
); note
; note
= XEXP (note
, 1))
1227 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
1228 SET_HARD_REG_BIT (i1_regset
, REGNO (XEXP (note
, 0)));
1230 for (note
= REG_NOTES (i2
); note
; note
= XEXP (note
, 1))
1231 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
1232 SET_HARD_REG_BIT (i2_regset
, REGNO (XEXP (note
, 0)));
1234 if (!hard_reg_set_equal_p (i1_regset
, i2_regset
))
1239 if (reload_completed
1240 ? rtx_renumbered_equal_p (p1
, p2
) : rtx_equal_p (p1
, p2
))
1243 return can_replace_by (i1
, i2
);
1246 /* When comparing insns I1 and I2 in flow_find_cross_jump or
1247 flow_find_head_matching_sequence, ensure the notes match. */
1250 merge_notes (rtx_insn
*i1
, rtx_insn
*i2
)
1252 /* If the merged insns have different REG_EQUAL notes, then
1254 rtx equiv1
= find_reg_equal_equiv_note (i1
);
1255 rtx equiv2
= find_reg_equal_equiv_note (i2
);
1257 if (equiv1
&& !equiv2
)
1258 remove_note (i1
, equiv1
);
1259 else if (!equiv1
&& equiv2
)
1260 remove_note (i2
, equiv2
);
1261 else if (equiv1
&& equiv2
1262 && !rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))
1264 remove_note (i1
, equiv1
);
1265 remove_note (i2
, equiv2
);
1269 /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1270 resulting insn in I1, and the corresponding bb in BB1. At the head of a
1271 bb, if there is a predecessor bb that reaches this bb via fallthru, and
1272 FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1273 DID_FALLTHRU. Otherwise, stops at the head of the bb. */
1276 walk_to_nondebug_insn (rtx_insn
**i1
, basic_block
*bb1
, bool follow_fallthru
,
1281 *did_fallthru
= false;
1284 while (!NONDEBUG_INSN_P (*i1
))
1286 if (*i1
!= BB_HEAD (*bb1
))
1288 *i1
= PREV_INSN (*i1
);
1292 if (!follow_fallthru
)
1295 fallthru
= find_fallthru_edge ((*bb1
)->preds
);
1296 if (!fallthru
|| fallthru
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
)
1297 || !single_succ_p (fallthru
->src
))
1300 *bb1
= fallthru
->src
;
1301 *i1
= BB_END (*bb1
);
1302 *did_fallthru
= true;
1306 /* Look through the insns at the end of BB1 and BB2 and find the longest
1307 sequence that are either equivalent, or allow forward or backward
1308 replacement. Store the first insns for that sequence in *F1 and *F2 and
1309 return the sequence length.
1311 DIR_P indicates the allowed replacement direction on function entry, and
1312 the actual replacement direction on function exit. If NULL, only equivalent
1313 sequences are allowed.
1315 To simplify callers of this function, if the blocks match exactly,
1316 store the head of the blocks in *F1 and *F2. */
1319 flow_find_cross_jump (basic_block bb1
, basic_block bb2
, rtx_insn
**f1
,
1320 rtx_insn
**f2
, enum replace_direction
*dir_p
)
1322 rtx_insn
*i1
, *i2
, *last1
, *last2
, *afterlast1
, *afterlast2
;
1324 enum replace_direction dir
, last_dir
, afterlast_dir
;
1325 bool follow_fallthru
, did_fallthru
;
1331 afterlast_dir
= dir
;
1332 last_dir
= afterlast_dir
;
1334 /* Skip simple jumps at the end of the blocks. Complex jumps still
1335 need to be compared for equivalence, which we'll do below. */
1338 last1
= afterlast1
= last2
= afterlast2
= NULL
;
1340 || (returnjump_p (i1
) && !side_effects_p (PATTERN (i1
))))
1343 i1
= PREV_INSN (i1
);
1348 || (returnjump_p (i2
) && !side_effects_p (PATTERN (i2
))))
1351 /* Count everything except for unconditional jump as insn.
1352 Don't count any jumps if dir_p is NULL. */
1353 if (!simplejump_p (i2
) && !returnjump_p (i2
) && last1
&& dir_p
)
1355 i2
= PREV_INSN (i2
);
1360 /* In the following example, we can replace all jumps to C by jumps to A.
1362 This removes 4 duplicate insns.
1363 [bb A] insn1 [bb C] insn1
1369 We could also replace all jumps to A by jumps to C, but that leaves B
1370 alive, and removes only 2 duplicate insns. In a subsequent crossjump
1371 step, all jumps to B would be replaced with jumps to the middle of C,
1372 achieving the same result with more effort.
1373 So we allow only the first possibility, which means that we don't allow
1374 fallthru in the block that's being replaced. */
1376 follow_fallthru
= dir_p
&& dir
!= dir_forward
;
1377 walk_to_nondebug_insn (&i1
, &bb1
, follow_fallthru
, &did_fallthru
);
1381 follow_fallthru
= dir_p
&& dir
!= dir_backward
;
1382 walk_to_nondebug_insn (&i2
, &bb2
, follow_fallthru
, &did_fallthru
);
1386 if (i1
== BB_HEAD (bb1
) || i2
== BB_HEAD (bb2
))
1389 dir
= merge_dir (dir
, old_insns_match_p (0, i1
, i2
));
1390 if (dir
== dir_none
|| (!dir_p
&& dir
!= dir_both
))
1393 merge_memattrs (i1
, i2
);
1395 /* Don't begin a cross-jump with a NOTE insn. */
1398 merge_notes (i1
, i2
);
1400 afterlast1
= last1
, afterlast2
= last2
;
1401 last1
= i1
, last2
= i2
;
1402 afterlast_dir
= last_dir
;
1404 if (active_insn_p (i1
))
1408 i1
= PREV_INSN (i1
);
1409 i2
= PREV_INSN (i2
);
1412 /* Don't allow the insn after a compare to be shared by
1413 cross-jumping unless the compare is also shared. */
1414 if (HAVE_cc0
&& ninsns
&& reg_mentioned_p (cc0_rtx
, last1
)
1415 && ! sets_cc0_p (last1
))
1416 last1
= afterlast1
, last2
= afterlast2
, last_dir
= afterlast_dir
, ninsns
--;
1418 /* Include preceding notes and labels in the cross-jump. One,
1419 this may bring us to the head of the blocks as requested above.
1420 Two, it keeps line number notes as matched as may be. */
1423 bb1
= BLOCK_FOR_INSN (last1
);
1424 while (last1
!= BB_HEAD (bb1
) && !NONDEBUG_INSN_P (PREV_INSN (last1
)))
1425 last1
= PREV_INSN (last1
);
1427 if (last1
!= BB_HEAD (bb1
) && LABEL_P (PREV_INSN (last1
)))
1428 last1
= PREV_INSN (last1
);
1430 bb2
= BLOCK_FOR_INSN (last2
);
1431 while (last2
!= BB_HEAD (bb2
) && !NONDEBUG_INSN_P (PREV_INSN (last2
)))
1432 last2
= PREV_INSN (last2
);
1434 if (last2
!= BB_HEAD (bb2
) && LABEL_P (PREV_INSN (last2
)))
1435 last2
= PREV_INSN (last2
);
1446 /* Like flow_find_cross_jump, except start looking for a matching sequence from
1447 the head of the two blocks. Do not include jumps at the end.
1448 If STOP_AFTER is nonzero, stop after finding that many matching
1449 instructions. If STOP_AFTER is zero, count all INSN_P insns, if it is
1450 non-zero, only count active insns. */
1453 flow_find_head_matching_sequence (basic_block bb1
, basic_block bb2
, rtx_insn
**f1
,
1454 rtx_insn
**f2
, int stop_after
)
1456 rtx_insn
*i1
, *i2
, *last1
, *last2
, *beforelast1
, *beforelast2
;
1460 int nehedges1
= 0, nehedges2
= 0;
1462 FOR_EACH_EDGE (e
, ei
, bb1
->succs
)
1463 if (e
->flags
& EDGE_EH
)
1465 FOR_EACH_EDGE (e
, ei
, bb2
->succs
)
1466 if (e
->flags
& EDGE_EH
)
1471 last1
= beforelast1
= last2
= beforelast2
= NULL
;
1475 /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG. */
1476 while (!NONDEBUG_INSN_P (i1
) && i1
!= BB_END (bb1
))
1478 if (NOTE_P (i1
) && NOTE_KIND (i1
) == NOTE_INSN_EPILOGUE_BEG
)
1480 i1
= NEXT_INSN (i1
);
1483 while (!NONDEBUG_INSN_P (i2
) && i2
!= BB_END (bb2
))
1485 if (NOTE_P (i2
) && NOTE_KIND (i2
) == NOTE_INSN_EPILOGUE_BEG
)
1487 i2
= NEXT_INSN (i2
);
1490 if ((i1
== BB_END (bb1
) && !NONDEBUG_INSN_P (i1
))
1491 || (i2
== BB_END (bb2
) && !NONDEBUG_INSN_P (i2
)))
1494 if (NOTE_P (i1
) || NOTE_P (i2
)
1495 || JUMP_P (i1
) || JUMP_P (i2
))
1498 /* A sanity check to make sure we're not merging insns with different
1499 effects on EH. If only one of them ends a basic block, it shouldn't
1500 have an EH edge; if both end a basic block, there should be the same
1501 number of EH edges. */
1502 if ((i1
== BB_END (bb1
) && i2
!= BB_END (bb2
)
1504 || (i2
== BB_END (bb2
) && i1
!= BB_END (bb1
)
1506 || (i1
== BB_END (bb1
) && i2
== BB_END (bb2
)
1507 && nehedges1
!= nehedges2
))
1510 if (old_insns_match_p (0, i1
, i2
) != dir_both
)
1513 merge_memattrs (i1
, i2
);
1515 /* Don't begin a cross-jump with a NOTE insn. */
1518 merge_notes (i1
, i2
);
1520 beforelast1
= last1
, beforelast2
= last2
;
1521 last1
= i1
, last2
= i2
;
1522 if (!stop_after
|| active_insn_p (i1
))
1526 if (i1
== BB_END (bb1
) || i2
== BB_END (bb2
)
1527 || (stop_after
> 0 && ninsns
== stop_after
))
1530 i1
= NEXT_INSN (i1
);
1531 i2
= NEXT_INSN (i2
);
1534 /* Don't allow a compare to be shared by cross-jumping unless the insn
1535 after the compare is also shared. */
1536 if (HAVE_cc0
&& ninsns
&& reg_mentioned_p (cc0_rtx
, last1
)
1537 && sets_cc0_p (last1
))
1538 last1
= beforelast1
, last2
= beforelast2
, ninsns
--;
1549 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1550 the branch instruction. This means that if we commonize the control
1551 flow before end of the basic block, the semantic remains unchanged.
1553 We may assume that there exists one edge with a common destination. */
1556 outgoing_edges_match (int mode
, basic_block bb1
, basic_block bb2
)
1558 int nehedges1
= 0, nehedges2
= 0;
1559 edge fallthru1
= 0, fallthru2
= 0;
1563 /* If we performed shrink-wrapping, edges to the exit block can
1564 only be distinguished for JUMP_INSNs. The two paths may differ in
1565 whether they went through the prologue. Sibcalls are fine, we know
1566 that we either didn't need or inserted an epilogue before them. */
1567 if (crtl
->shrink_wrapped
1568 && single_succ_p (bb1
)
1569 && single_succ (bb1
) == EXIT_BLOCK_PTR_FOR_FN (cfun
)
1570 && !JUMP_P (BB_END (bb1
))
1571 && !(CALL_P (BB_END (bb1
)) && SIBLING_CALL_P (BB_END (bb1
))))
1574 /* If BB1 has only one successor, we may be looking at either an
1575 unconditional jump, or a fake edge to exit. */
1576 if (single_succ_p (bb1
)
1577 && (single_succ_edge (bb1
)->flags
& (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1578 && (!JUMP_P (BB_END (bb1
)) || simplejump_p (BB_END (bb1
))))
1579 return (single_succ_p (bb2
)
1580 && (single_succ_edge (bb2
)->flags
1581 & (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1582 && (!JUMP_P (BB_END (bb2
)) || simplejump_p (BB_END (bb2
))));
1584 /* Match conditional jumps - this may get tricky when fallthru and branch
1585 edges are crossed. */
1586 if (EDGE_COUNT (bb1
->succs
) == 2
1587 && any_condjump_p (BB_END (bb1
))
1588 && onlyjump_p (BB_END (bb1
)))
1590 edge b1
, f1
, b2
, f2
;
1591 bool reverse
, match
;
1592 rtx set1
, set2
, cond1
, cond2
;
1593 enum rtx_code code1
, code2
;
1595 if (EDGE_COUNT (bb2
->succs
) != 2
1596 || !any_condjump_p (BB_END (bb2
))
1597 || !onlyjump_p (BB_END (bb2
)))
1600 b1
= BRANCH_EDGE (bb1
);
1601 b2
= BRANCH_EDGE (bb2
);
1602 f1
= FALLTHRU_EDGE (bb1
);
1603 f2
= FALLTHRU_EDGE (bb2
);
1605 /* Get around possible forwarders on fallthru edges. Other cases
1606 should be optimized out already. */
1607 if (FORWARDER_BLOCK_P (f1
->dest
))
1608 f1
= single_succ_edge (f1
->dest
);
1610 if (FORWARDER_BLOCK_P (f2
->dest
))
1611 f2
= single_succ_edge (f2
->dest
);
1613 /* To simplify use of this function, return false if there are
1614 unneeded forwarder blocks. These will get eliminated later
1615 during cleanup_cfg. */
1616 if (FORWARDER_BLOCK_P (f1
->dest
)
1617 || FORWARDER_BLOCK_P (f2
->dest
)
1618 || FORWARDER_BLOCK_P (b1
->dest
)
1619 || FORWARDER_BLOCK_P (b2
->dest
))
1622 if (f1
->dest
== f2
->dest
&& b1
->dest
== b2
->dest
)
1624 else if (f1
->dest
== b2
->dest
&& b1
->dest
== f2
->dest
)
1629 set1
= pc_set (BB_END (bb1
));
1630 set2
= pc_set (BB_END (bb2
));
1631 if ((XEXP (SET_SRC (set1
), 1) == pc_rtx
)
1632 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
1635 cond1
= XEXP (SET_SRC (set1
), 0);
1636 cond2
= XEXP (SET_SRC (set2
), 0);
1637 code1
= GET_CODE (cond1
);
1639 code2
= reversed_comparison_code (cond2
, BB_END (bb2
));
1641 code2
= GET_CODE (cond2
);
1643 if (code2
== UNKNOWN
)
1646 /* Verify codes and operands match. */
1647 match
= ((code1
== code2
1648 && rtx_renumbered_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
1649 && rtx_renumbered_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
1650 || (code1
== swap_condition (code2
)
1651 && rtx_renumbered_equal_p (XEXP (cond1
, 1),
1653 && rtx_renumbered_equal_p (XEXP (cond1
, 0),
1656 /* If we return true, we will join the blocks. Which means that
1657 we will only have one branch prediction bit to work with. Thus
1658 we require the existing branches to have probabilities that are
1661 && optimize_bb_for_speed_p (bb1
)
1662 && optimize_bb_for_speed_p (bb2
))
1666 if (b1
->dest
== b2
->dest
)
1667 prob2
= b2
->probability
;
1669 /* Do not use f2 probability as f2 may be forwarded. */
1670 prob2
= REG_BR_PROB_BASE
- b2
->probability
;
1672 /* Fail if the difference in probabilities is greater than 50%.
1673 This rules out two well-predicted branches with opposite
1675 if (abs (b1
->probability
- prob2
) > REG_BR_PROB_BASE
/ 2)
1679 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1680 bb1
->index
, bb2
->index
, b1
->probability
, prob2
);
1686 if (dump_file
&& match
)
1687 fprintf (dump_file
, "Conditionals in bb %i and %i match.\n",
1688 bb1
->index
, bb2
->index
);
1693 /* Generic case - we are seeing a computed jump, table jump or trapping
1696 /* Check whether there are tablejumps in the end of BB1 and BB2.
1697 Return true if they are identical. */
1700 rtx_jump_table_data
*table1
, *table2
;
1702 if (tablejump_p (BB_END (bb1
), &label1
, &table1
)
1703 && tablejump_p (BB_END (bb2
), &label2
, &table2
)
1704 && GET_CODE (PATTERN (table1
)) == GET_CODE (PATTERN (table2
)))
1706 /* The labels should never be the same rtx. If they really are same
1707 the jump tables are same too. So disable crossjumping of blocks BB1
1708 and BB2 because when deleting the common insns in the end of BB1
1709 by delete_basic_block () the jump table would be deleted too. */
1710 /* If LABEL2 is referenced in BB1->END do not do anything
1711 because we would loose information when replacing
1712 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1713 if (label1
!= label2
&& !rtx_referenced_p (label2
, BB_END (bb1
)))
1715 /* Set IDENTICAL to true when the tables are identical. */
1716 bool identical
= false;
1719 p1
= PATTERN (table1
);
1720 p2
= PATTERN (table2
);
1721 if (GET_CODE (p1
) == ADDR_VEC
&& rtx_equal_p (p1
, p2
))
1725 else if (GET_CODE (p1
) == ADDR_DIFF_VEC
1726 && (XVECLEN (p1
, 1) == XVECLEN (p2
, 1))
1727 && rtx_equal_p (XEXP (p1
, 2), XEXP (p2
, 2))
1728 && rtx_equal_p (XEXP (p1
, 3), XEXP (p2
, 3)))
1733 for (i
= XVECLEN (p1
, 1) - 1; i
>= 0 && identical
; i
--)
1734 if (!rtx_equal_p (XVECEXP (p1
, 1, i
), XVECEXP (p2
, 1, i
)))
1742 /* Temporarily replace references to LABEL1 with LABEL2
1743 in BB1->END so that we could compare the instructions. */
1744 replace_label_in_insn (BB_END (bb1
), label1
, label2
, false);
1746 match
= (old_insns_match_p (mode
, BB_END (bb1
), BB_END (bb2
))
1748 if (dump_file
&& match
)
1750 "Tablejumps in bb %i and %i match.\n",
1751 bb1
->index
, bb2
->index
);
1753 /* Set the original label in BB1->END because when deleting
1754 a block whose end is a tablejump, the tablejump referenced
1755 from the instruction is deleted too. */
1756 replace_label_in_insn (BB_END (bb1
), label2
, label1
, false);
1765 /* Find the last non-debug non-note instruction in each bb, except
1766 stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
1767 handles that case specially. old_insns_match_p does not handle
1768 other types of instruction notes. */
1769 rtx_insn
*last1
= BB_END (bb1
);
1770 rtx_insn
*last2
= BB_END (bb2
);
1771 while (!NOTE_INSN_BASIC_BLOCK_P (last1
) &&
1772 (DEBUG_INSN_P (last1
) || NOTE_P (last1
)))
1773 last1
= PREV_INSN (last1
);
1774 while (!NOTE_INSN_BASIC_BLOCK_P (last2
) &&
1775 (DEBUG_INSN_P (last2
) || NOTE_P (last2
)))
1776 last2
= PREV_INSN (last2
);
1777 gcc_assert (last1
&& last2
);
1779 /* First ensure that the instructions match. There may be many outgoing
1780 edges so this test is generally cheaper. */
1781 if (old_insns_match_p (mode
, last1
, last2
) != dir_both
)
1784 /* Search the outgoing edges, ensure that the counts do match, find possible
1785 fallthru and exception handling edges since these needs more
1787 if (EDGE_COUNT (bb1
->succs
) != EDGE_COUNT (bb2
->succs
))
1790 bool nonfakeedges
= false;
1791 FOR_EACH_EDGE (e1
, ei
, bb1
->succs
)
1793 e2
= EDGE_SUCC (bb2
, ei
.index
);
1795 if ((e1
->flags
& EDGE_FAKE
) == 0)
1796 nonfakeedges
= true;
1798 if (e1
->flags
& EDGE_EH
)
1801 if (e2
->flags
& EDGE_EH
)
1804 if (e1
->flags
& EDGE_FALLTHRU
)
1806 if (e2
->flags
& EDGE_FALLTHRU
)
1810 /* If number of edges of various types does not match, fail. */
1811 if (nehedges1
!= nehedges2
1812 || (fallthru1
!= 0) != (fallthru2
!= 0))
1815 /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
1816 and the last real insn doesn't have REG_ARGS_SIZE note, don't
1817 attempt to optimize, as the two basic blocks might have different
1818 REG_ARGS_SIZE depths. For noreturn calls and unconditional
1819 traps there should be REG_ARG_SIZE notes, they could be missing
1820 for __builtin_unreachable () uses though. */
1822 && !ACCUMULATE_OUTGOING_ARGS
1824 || !find_reg_note (last1
, REG_ARGS_SIZE
, NULL
)))
1827 /* fallthru edges must be forwarded to the same destination. */
1830 basic_block d1
= (forwarder_block_p (fallthru1
->dest
)
1831 ? single_succ (fallthru1
->dest
): fallthru1
->dest
);
1832 basic_block d2
= (forwarder_block_p (fallthru2
->dest
)
1833 ? single_succ (fallthru2
->dest
): fallthru2
->dest
);
1839 /* Ensure the same EH region. */
1841 rtx n1
= find_reg_note (BB_END (bb1
), REG_EH_REGION
, 0);
1842 rtx n2
= find_reg_note (BB_END (bb2
), REG_EH_REGION
, 0);
1847 if (n1
&& (!n2
|| XEXP (n1
, 0) != XEXP (n2
, 0)))
1851 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1852 version of sequence abstraction. */
1853 FOR_EACH_EDGE (e1
, ei
, bb2
->succs
)
1857 basic_block d1
= e1
->dest
;
1859 if (FORWARDER_BLOCK_P (d1
))
1860 d1
= EDGE_SUCC (d1
, 0)->dest
;
1862 FOR_EACH_EDGE (e2
, ei
, bb1
->succs
)
1864 basic_block d2
= e2
->dest
;
1865 if (FORWARDER_BLOCK_P (d2
))
1866 d2
= EDGE_SUCC (d2
, 0)->dest
;
1878 /* Returns true if BB basic block has a preserve label. */
1881 block_has_preserve_label (basic_block bb
)
1885 && LABEL_PRESERVE_P (block_label (bb
)));
1888 /* E1 and E2 are edges with the same destination block. Search their
1889 predecessors for common code. If found, redirect control flow from
1890 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1891 or the other way around (dir_backward). DIR specifies the allowed
1892 replacement direction. */
1895 try_crossjump_to_edge (int mode
, edge e1
, edge e2
,
1896 enum replace_direction dir
)
1899 basic_block src1
= e1
->src
, src2
= e2
->src
;
1900 basic_block redirect_to
, redirect_from
, to_remove
;
1901 basic_block osrc1
, osrc2
, redirect_edges_to
, tmp
;
1902 rtx_insn
*newpos1
, *newpos2
;
1906 newpos1
= newpos2
= NULL
;
1908 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1909 to try this optimization.
1911 Basic block partitioning may result in some jumps that appear to
1912 be optimizable (or blocks that appear to be mergeable), but which really
1913 must be left untouched (they are required to make it safely across
1914 partition boundaries). See the comments at the top of
1915 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1917 if (crtl
->has_bb_partition
&& reload_completed
)
1920 /* Search backward through forwarder blocks. We don't need to worry
1921 about multiple entry or chained forwarders, as they will be optimized
1922 away. We do this to look past the unconditional jump following a
1923 conditional jump that is required due to the current CFG shape. */
1924 if (single_pred_p (src1
)
1925 && FORWARDER_BLOCK_P (src1
))
1926 e1
= single_pred_edge (src1
), src1
= e1
->src
;
1928 if (single_pred_p (src2
)
1929 && FORWARDER_BLOCK_P (src2
))
1930 e2
= single_pred_edge (src2
), src2
= e2
->src
;
1932 /* Nothing to do if we reach ENTRY, or a common source block. */
1933 if (src1
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) || src2
1934 == ENTRY_BLOCK_PTR_FOR_FN (cfun
))
1939 /* Seeing more than 1 forwarder blocks would confuse us later... */
1940 if (FORWARDER_BLOCK_P (e1
->dest
)
1941 && FORWARDER_BLOCK_P (single_succ (e1
->dest
)))
1944 if (FORWARDER_BLOCK_P (e2
->dest
)
1945 && FORWARDER_BLOCK_P (single_succ (e2
->dest
)))
1948 /* Likewise with dead code (possibly newly created by the other optimizations
1950 if (EDGE_COUNT (src1
->preds
) == 0 || EDGE_COUNT (src2
->preds
) == 0)
1953 /* Look for the common insn sequence, part the first ... */
1954 if (!outgoing_edges_match (mode
, src1
, src2
))
1957 /* ... and part the second. */
1958 nmatch
= flow_find_cross_jump (src1
, src2
, &newpos1
, &newpos2
, &dir
);
1962 if (newpos1
!= NULL_RTX
)
1963 src1
= BLOCK_FOR_INSN (newpos1
);
1964 if (newpos2
!= NULL_RTX
)
1965 src2
= BLOCK_FOR_INSN (newpos2
);
1967 if (dir
== dir_backward
)
1969 #define SWAP(T, X, Y) do { T tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
1970 SWAP (basic_block
, osrc1
, osrc2
);
1971 SWAP (basic_block
, src1
, src2
);
1972 SWAP (edge
, e1
, e2
);
1973 SWAP (rtx_insn
*, newpos1
, newpos2
);
1977 /* Don't proceed with the crossjump unless we found a sufficient number
1978 of matching instructions or the 'from' block was totally matched
1979 (such that its predecessors will hopefully be redirected and the
1981 if ((nmatch
< PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS
))
1982 && (newpos1
!= BB_HEAD (src1
)))
1985 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
1986 if (block_has_preserve_label (e1
->dest
)
1987 && (e1
->flags
& EDGE_ABNORMAL
))
1990 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1992 If we have tablejumps in the end of SRC1 and SRC2
1993 they have been already compared for equivalence in outgoing_edges_match ()
1994 so replace the references to TABLE1 by references to TABLE2. */
1997 rtx_jump_table_data
*table1
, *table2
;
1999 if (tablejump_p (BB_END (osrc1
), &label1
, &table1
)
2000 && tablejump_p (BB_END (osrc2
), &label2
, &table2
)
2001 && label1
!= label2
)
2005 /* Replace references to LABEL1 with LABEL2. */
2006 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
2008 /* Do not replace the label in SRC1->END because when deleting
2009 a block whose end is a tablejump, the tablejump referenced
2010 from the instruction is deleted too. */
2011 if (insn
!= BB_END (osrc1
))
2012 replace_label_in_insn (insn
, label1
, label2
, true);
2017 /* Avoid splitting if possible. We must always split when SRC2 has
2018 EH predecessor edges, or we may end up with basic blocks with both
2019 normal and EH predecessor edges. */
2020 if (newpos2
== BB_HEAD (src2
)
2021 && !(EDGE_PRED (src2
, 0)->flags
& EDGE_EH
))
2025 if (newpos2
== BB_HEAD (src2
))
2027 /* Skip possible basic block header. */
2028 if (LABEL_P (newpos2
))
2029 newpos2
= NEXT_INSN (newpos2
);
2030 while (DEBUG_INSN_P (newpos2
))
2031 newpos2
= NEXT_INSN (newpos2
);
2032 if (NOTE_P (newpos2
))
2033 newpos2
= NEXT_INSN (newpos2
);
2034 while (DEBUG_INSN_P (newpos2
))
2035 newpos2
= NEXT_INSN (newpos2
);
2039 fprintf (dump_file
, "Splitting bb %i before %i insns\n",
2040 src2
->index
, nmatch
);
2041 redirect_to
= split_block (src2
, PREV_INSN (newpos2
))->dest
;
2046 "Cross jumping from bb %i to bb %i; %i common insns\n",
2047 src1
->index
, src2
->index
, nmatch
);
2049 /* We may have some registers visible through the block. */
2050 df_set_bb_dirty (redirect_to
);
2053 redirect_edges_to
= redirect_to
;
2055 redirect_edges_to
= osrc2
;
2057 /* Recompute the frequencies and counts of outgoing edges. */
2058 FOR_EACH_EDGE (s
, ei
, redirect_edges_to
->succs
)
2062 basic_block d
= s
->dest
;
2064 if (FORWARDER_BLOCK_P (d
))
2065 d
= single_succ (d
);
2067 FOR_EACH_EDGE (s2
, ei
, src1
->succs
)
2069 basic_block d2
= s2
->dest
;
2070 if (FORWARDER_BLOCK_P (d2
))
2071 d2
= single_succ (d2
);
2076 s
->count
+= s2
->count
;
2078 /* Take care to update possible forwarder blocks. We verified
2079 that there is no more than one in the chain, so we can't run
2080 into infinite loop. */
2081 if (FORWARDER_BLOCK_P (s
->dest
))
2083 single_succ_edge (s
->dest
)->count
+= s2
->count
;
2084 s
->dest
->count
+= s2
->count
;
2085 s
->dest
->frequency
+= EDGE_FREQUENCY (s
);
2088 if (FORWARDER_BLOCK_P (s2
->dest
))
2090 single_succ_edge (s2
->dest
)->count
-= s2
->count
;
2091 if (single_succ_edge (s2
->dest
)->count
< 0)
2092 single_succ_edge (s2
->dest
)->count
= 0;
2093 s2
->dest
->count
-= s2
->count
;
2094 s2
->dest
->frequency
-= EDGE_FREQUENCY (s
);
2095 if (s2
->dest
->frequency
< 0)
2096 s2
->dest
->frequency
= 0;
2097 if (s2
->dest
->count
< 0)
2098 s2
->dest
->count
= 0;
2101 if (!redirect_edges_to
->frequency
&& !src1
->frequency
)
2102 s
->probability
= (s
->probability
+ s2
->probability
) / 2;
2105 = ((s
->probability
* redirect_edges_to
->frequency
+
2106 s2
->probability
* src1
->frequency
)
2107 / (redirect_edges_to
->frequency
+ src1
->frequency
));
2110 /* Adjust count and frequency for the block. An earlier jump
2111 threading pass may have left the profile in an inconsistent
2112 state (see update_bb_profile_for_threading) so we must be
2113 prepared for overflows. */
2117 tmp
->count
+= src1
->count
;
2118 tmp
->frequency
+= src1
->frequency
;
2119 if (tmp
->frequency
> BB_FREQ_MAX
)
2120 tmp
->frequency
= BB_FREQ_MAX
;
2121 if (tmp
== redirect_edges_to
)
2123 tmp
= find_fallthru_edge (tmp
->succs
)->dest
;
2126 update_br_prob_note (redirect_edges_to
);
2128 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
2130 /* Skip possible basic block header. */
2131 if (LABEL_P (newpos1
))
2132 newpos1
= NEXT_INSN (newpos1
);
2134 while (DEBUG_INSN_P (newpos1
))
2135 newpos1
= NEXT_INSN (newpos1
);
2137 if (NOTE_INSN_BASIC_BLOCK_P (newpos1
))
2138 newpos1
= NEXT_INSN (newpos1
);
2140 while (DEBUG_INSN_P (newpos1
))
2141 newpos1
= NEXT_INSN (newpos1
);
2143 redirect_from
= split_block (src1
, PREV_INSN (newpos1
))->src
;
2144 to_remove
= single_succ (redirect_from
);
2146 redirect_edge_and_branch_force (single_succ_edge (redirect_from
), redirect_to
);
2147 delete_basic_block (to_remove
);
2149 update_forwarder_flag (redirect_from
);
2150 if (redirect_to
!= src2
)
2151 update_forwarder_flag (src2
);
2156 /* Search the predecessors of BB for common insn sequences. When found,
2157 share code between them by redirecting control flow. Return true if
2158 any changes made. */
2161 try_crossjump_bb (int mode
, basic_block bb
)
2163 edge e
, e2
, fallthru
;
2165 unsigned max
, ix
, ix2
;
2167 /* Nothing to do if there is not at least two incoming edges. */
2168 if (EDGE_COUNT (bb
->preds
) < 2)
2171 /* Don't crossjump if this block ends in a computed jump,
2172 unless we are optimizing for size. */
2173 if (optimize_bb_for_size_p (bb
)
2174 && bb
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
2175 && computed_jump_p (BB_END (bb
)))
2178 /* If we are partitioning hot/cold basic blocks, we don't want to
2179 mess up unconditional or indirect jumps that cross between hot
2182 Basic block partitioning may result in some jumps that appear to
2183 be optimizable (or blocks that appear to be mergeable), but which really
2184 must be left untouched (they are required to make it safely across
2185 partition boundaries). See the comments at the top of
2186 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
2188 if (BB_PARTITION (EDGE_PRED (bb
, 0)->src
) !=
2189 BB_PARTITION (EDGE_PRED (bb
, 1)->src
)
2190 || (EDGE_PRED (bb
, 0)->flags
& EDGE_CROSSING
))
2193 /* It is always cheapest to redirect a block that ends in a branch to
2194 a block that falls through into BB, as that adds no branches to the
2195 program. We'll try that combination first. */
2197 max
= PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES
);
2199 if (EDGE_COUNT (bb
->preds
) > max
)
2202 fallthru
= find_fallthru_edge (bb
->preds
);
2205 for (ix
= 0; ix
< EDGE_COUNT (bb
->preds
);)
2207 e
= EDGE_PRED (bb
, ix
);
2210 /* As noted above, first try with the fallthru predecessor (or, a
2211 fallthru predecessor if we are in cfglayout mode). */
2214 /* Don't combine the fallthru edge into anything else.
2215 If there is a match, we'll do it the other way around. */
2218 /* If nothing changed since the last attempt, there is nothing
2221 && !((e
->src
->flags
& BB_MODIFIED
)
2222 || (fallthru
->src
->flags
& BB_MODIFIED
)))
2225 if (try_crossjump_to_edge (mode
, e
, fallthru
, dir_forward
))
2233 /* Non-obvious work limiting check: Recognize that we're going
2234 to call try_crossjump_bb on every basic block. So if we have
2235 two blocks with lots of outgoing edges (a switch) and they
2236 share lots of common destinations, then we would do the
2237 cross-jump check once for each common destination.
2239 Now, if the blocks actually are cross-jump candidates, then
2240 all of their destinations will be shared. Which means that
2241 we only need check them for cross-jump candidacy once. We
2242 can eliminate redundant checks of crossjump(A,B) by arbitrarily
2243 choosing to do the check from the block for which the edge
2244 in question is the first successor of A. */
2245 if (EDGE_SUCC (e
->src
, 0) != e
)
2248 for (ix2
= 0; ix2
< EDGE_COUNT (bb
->preds
); ix2
++)
2250 e2
= EDGE_PRED (bb
, ix2
);
2255 /* We've already checked the fallthru edge above. */
2259 /* The "first successor" check above only prevents multiple
2260 checks of crossjump(A,B). In order to prevent redundant
2261 checks of crossjump(B,A), require that A be the block
2262 with the lowest index. */
2263 if (e
->src
->index
> e2
->src
->index
)
2266 /* If nothing changed since the last attempt, there is nothing
2269 && !((e
->src
->flags
& BB_MODIFIED
)
2270 || (e2
->src
->flags
& BB_MODIFIED
)))
2273 /* Both e and e2 are not fallthru edges, so we can crossjump in either
2275 if (try_crossjump_to_edge (mode
, e
, e2
, dir_both
))
2285 crossjumps_occured
= true;
2290 /* Search the successors of BB for common insn sequences. When found,
2291 share code between them by moving it across the basic block
2292 boundary. Return true if any changes made. */
2295 try_head_merge_bb (basic_block bb
)
2297 basic_block final_dest_bb
= NULL
;
2298 int max_match
= INT_MAX
;
2300 rtx_insn
**headptr
, **currptr
, **nextptr
;
2301 bool changed
, moveall
;
2303 rtx_insn
*e0_last_head
;
2305 rtx_insn
*move_before
;
2306 unsigned nedges
= EDGE_COUNT (bb
->succs
);
2307 rtx_insn
*jump
= BB_END (bb
);
2308 regset live
, live_union
;
2310 /* Nothing to do if there is not at least two outgoing edges. */
2314 /* Don't crossjump if this block ends in a computed jump,
2315 unless we are optimizing for size. */
2316 if (optimize_bb_for_size_p (bb
)
2317 && bb
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
2318 && computed_jump_p (BB_END (bb
)))
2321 cond
= get_condition (jump
, &move_before
, true, false);
2322 if (cond
== NULL_RTX
)
2324 if (HAVE_cc0
&& reg_mentioned_p (cc0_rtx
, jump
))
2325 move_before
= prev_nonnote_nondebug_insn (jump
);
2330 for (ix
= 0; ix
< nedges
; ix
++)
2331 if (EDGE_SUCC (bb
, ix
)->dest
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
2334 for (ix
= 0; ix
< nedges
; ix
++)
2336 edge e
= EDGE_SUCC (bb
, ix
);
2337 basic_block other_bb
= e
->dest
;
2339 if (df_get_bb_dirty (other_bb
))
2341 block_was_dirty
= true;
2345 if (e
->flags
& EDGE_ABNORMAL
)
2348 /* Normally, all destination blocks must only be reachable from this
2349 block, i.e. they must have one incoming edge.
2351 There is one special case we can handle, that of multiple consecutive
2352 jumps where the first jumps to one of the targets of the second jump.
2353 This happens frequently in switch statements for default labels.
2354 The structure is as follows:
2360 jump with targets A, B, C, D...
2362 has two incoming edges, from FINAL_DEST_BB and BB
2364 In this case, we can try to move the insns through BB and into
2366 if (EDGE_COUNT (other_bb
->preds
) != 1)
2368 edge incoming_edge
, incoming_bb_other_edge
;
2371 if (final_dest_bb
!= NULL
2372 || EDGE_COUNT (other_bb
->preds
) != 2)
2375 /* We must be able to move the insns across the whole block. */
2376 move_before
= BB_HEAD (bb
);
2377 while (!NONDEBUG_INSN_P (move_before
))
2378 move_before
= NEXT_INSN (move_before
);
2380 if (EDGE_COUNT (bb
->preds
) != 1)
2382 incoming_edge
= EDGE_PRED (bb
, 0);
2383 final_dest_bb
= incoming_edge
->src
;
2384 if (EDGE_COUNT (final_dest_bb
->succs
) != 2)
2386 FOR_EACH_EDGE (incoming_bb_other_edge
, ei
, final_dest_bb
->succs
)
2387 if (incoming_bb_other_edge
!= incoming_edge
)
2389 if (incoming_bb_other_edge
->dest
!= other_bb
)
2394 e0
= EDGE_SUCC (bb
, 0);
2395 e0_last_head
= NULL
;
2398 for (ix
= 1; ix
< nedges
; ix
++)
2400 edge e
= EDGE_SUCC (bb
, ix
);
2401 rtx_insn
*e0_last
, *e_last
;
2404 nmatch
= flow_find_head_matching_sequence (e0
->dest
, e
->dest
,
2405 &e0_last
, &e_last
, 0);
2409 if (nmatch
< max_match
)
2412 e0_last_head
= e0_last
;
2416 /* If we matched an entire block, we probably have to avoid moving the
2419 && e0_last_head
== BB_END (e0
->dest
)
2420 && (find_reg_note (e0_last_head
, REG_EH_REGION
, 0)
2421 || control_flow_insn_p (e0_last_head
)))
2427 e0_last_head
= prev_real_insn (e0_last_head
);
2428 while (DEBUG_INSN_P (e0_last_head
));
2434 /* We must find a union of the live registers at each of the end points. */
2435 live
= BITMAP_ALLOC (NULL
);
2436 live_union
= BITMAP_ALLOC (NULL
);
2438 currptr
= XNEWVEC (rtx_insn
*, nedges
);
2439 headptr
= XNEWVEC (rtx_insn
*, nedges
);
2440 nextptr
= XNEWVEC (rtx_insn
*, nedges
);
2442 for (ix
= 0; ix
< nedges
; ix
++)
2445 basic_block merge_bb
= EDGE_SUCC (bb
, ix
)->dest
;
2446 rtx_insn
*head
= BB_HEAD (merge_bb
);
2448 while (!NONDEBUG_INSN_P (head
))
2449 head
= NEXT_INSN (head
);
2453 /* Compute the end point and live information */
2454 for (j
= 1; j
< max_match
; j
++)
2456 head
= NEXT_INSN (head
);
2457 while (!NONDEBUG_INSN_P (head
));
2458 simulate_backwards_to_point (merge_bb
, live
, head
);
2459 IOR_REG_SET (live_union
, live
);
2462 /* If we're moving across two blocks, verify the validity of the
2463 first move, then adjust the target and let the loop below deal
2464 with the final move. */
2465 if (final_dest_bb
!= NULL
)
2467 rtx_insn
*move_upto
;
2469 moveall
= can_move_insns_across (currptr
[0], e0_last_head
, move_before
,
2470 jump
, e0
->dest
, live_union
,
2474 if (move_upto
== NULL_RTX
)
2477 while (e0_last_head
!= move_upto
)
2479 df_simulate_one_insn_backwards (e0
->dest
, e0_last_head
,
2481 e0_last_head
= PREV_INSN (e0_last_head
);
2484 if (e0_last_head
== NULL_RTX
)
2487 jump
= BB_END (final_dest_bb
);
2488 cond
= get_condition (jump
, &move_before
, true, false);
2489 if (cond
== NULL_RTX
)
2491 if (HAVE_cc0
&& reg_mentioned_p (cc0_rtx
, jump
))
2492 move_before
= prev_nonnote_nondebug_insn (jump
);
2500 rtx_insn
*move_upto
;
2501 moveall
= can_move_insns_across (currptr
[0], e0_last_head
,
2502 move_before
, jump
, e0
->dest
, live_union
,
2504 if (!moveall
&& move_upto
== NULL_RTX
)
2506 if (jump
== move_before
)
2509 /* Try again, using a different insertion point. */
2512 /* Don't try moving before a cc0 user, as that may invalidate
2514 if (HAVE_cc0
&& reg_mentioned_p (cc0_rtx
, jump
))
2520 if (final_dest_bb
&& !moveall
)
2521 /* We haven't checked whether a partial move would be OK for the first
2522 move, so we have to fail this case. */
2528 if (currptr
[0] == move_upto
)
2530 for (ix
= 0; ix
< nedges
; ix
++)
2532 rtx_insn
*curr
= currptr
[ix
];
2534 curr
= NEXT_INSN (curr
);
2535 while (!NONDEBUG_INSN_P (curr
));
2540 /* If we can't currently move all of the identical insns, remember
2541 each insn after the range that we'll merge. */
2543 for (ix
= 0; ix
< nedges
; ix
++)
2545 rtx_insn
*curr
= currptr
[ix
];
2547 curr
= NEXT_INSN (curr
);
2548 while (!NONDEBUG_INSN_P (curr
));
2552 reorder_insns (headptr
[0], currptr
[0], PREV_INSN (move_before
));
2553 df_set_bb_dirty (EDGE_SUCC (bb
, 0)->dest
);
2554 if (final_dest_bb
!= NULL
)
2555 df_set_bb_dirty (final_dest_bb
);
2556 df_set_bb_dirty (bb
);
2557 for (ix
= 1; ix
< nedges
; ix
++)
2559 df_set_bb_dirty (EDGE_SUCC (bb
, ix
)->dest
);
2560 delete_insn_chain (headptr
[ix
], currptr
[ix
], false);
2564 if (jump
== move_before
)
2567 /* For the unmerged insns, try a different insertion point. */
2570 /* Don't try moving before a cc0 user, as that may invalidate
2572 if (HAVE_cc0
&& reg_mentioned_p (cc0_rtx
, jump
))
2575 for (ix
= 0; ix
< nedges
; ix
++)
2576 currptr
[ix
] = headptr
[ix
] = nextptr
[ix
];
2586 crossjumps_occured
|= changed
;
2591 /* Return true if BB contains just bb note, or bb note followed
2592 by only DEBUG_INSNs. */
2595 trivially_empty_bb_p (basic_block bb
)
2597 rtx_insn
*insn
= BB_END (bb
);
2601 if (insn
== BB_HEAD (bb
))
2603 if (!DEBUG_INSN_P (insn
))
2605 insn
= PREV_INSN (insn
);
2609 /* Return true if BB contains just a return and possibly a USE of the
2610 return value. Fill in *RET and *USE with the return and use insns
2611 if any found, otherwise NULL. */
2614 bb_is_just_return (basic_block bb
, rtx_insn
**ret
, rtx_insn
**use
)
2619 if (bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
2622 FOR_BB_INSNS (bb
, insn
)
2623 if (NONDEBUG_INSN_P (insn
))
2625 if (!*ret
&& ANY_RETURN_P (PATTERN (insn
)))
2627 else if (!*ret
&& !*use
&& GET_CODE (PATTERN (insn
)) == USE
2628 && REG_P (XEXP (PATTERN (insn
), 0))
2629 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn
), 0)))
2638 /* Do simple CFG optimizations - basic block merging, simplifying of jump
2639 instructions etc. Return nonzero if changes were made. */
2642 try_optimize_cfg (int mode
)
2644 bool changed_overall
= false;
2647 basic_block bb
, b
, next
;
2649 if (mode
& (CLEANUP_CROSSJUMP
| CLEANUP_THREADING
))
2652 crossjumps_occured
= false;
2654 FOR_EACH_BB_FN (bb
, cfun
)
2655 update_forwarder_flag (bb
);
2657 if (! targetm
.cannot_modify_jumps_p ())
2660 /* Attempt to merge blocks as made possible by edge removal. If
2661 a block has only one successor, and the successor has only
2662 one predecessor, they may be combined. */
2665 block_was_dirty
= false;
2671 "\n\ntry_optimize_cfg iteration %i\n\n",
2674 for (b
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
; b
2675 != EXIT_BLOCK_PTR_FOR_FN (cfun
);)
2679 bool changed_here
= false;
2681 /* Delete trivially dead basic blocks. This is either
2682 blocks with no predecessors, or empty blocks with no
2683 successors. However if the empty block with no
2684 successors is the successor of the ENTRY_BLOCK, it is
2685 kept. This ensures that the ENTRY_BLOCK will have a
2686 successor which is a precondition for many RTL
2687 passes. Empty blocks may result from expanding
2688 __builtin_unreachable (). */
2689 if (EDGE_COUNT (b
->preds
) == 0
2690 || (EDGE_COUNT (b
->succs
) == 0
2691 && trivially_empty_bb_p (b
)
2692 && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun
))->dest
2696 if (EDGE_COUNT (b
->preds
) > 0)
2701 if (current_ir_type () == IR_RTL_CFGLAYOUT
)
2704 && BARRIER_P (BB_FOOTER (b
)))
2705 FOR_EACH_EDGE (e
, ei
, b
->preds
)
2706 if ((e
->flags
& EDGE_FALLTHRU
)
2707 && BB_FOOTER (e
->src
) == NULL
)
2711 BB_FOOTER (e
->src
) = BB_FOOTER (b
);
2712 BB_FOOTER (b
) = NULL
;
2717 BB_FOOTER (e
->src
) = emit_barrier ();
2724 rtx_insn
*last
= get_last_bb_insn (b
);
2725 if (last
&& BARRIER_P (last
))
2726 FOR_EACH_EDGE (e
, ei
, b
->preds
)
2727 if ((e
->flags
& EDGE_FALLTHRU
))
2728 emit_barrier_after (BB_END (e
->src
));
2731 delete_basic_block (b
);
2733 /* Avoid trying to remove the exit block. */
2734 b
= (c
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) ? c
->next_bb
: c
);
2738 /* Remove code labels no longer used. */
2739 if (single_pred_p (b
)
2740 && (single_pred_edge (b
)->flags
& EDGE_FALLTHRU
)
2741 && !(single_pred_edge (b
)->flags
& EDGE_COMPLEX
)
2742 && LABEL_P (BB_HEAD (b
))
2743 && !LABEL_PRESERVE_P (BB_HEAD (b
))
2744 /* If the previous block ends with a branch to this
2745 block, we can't delete the label. Normally this
2746 is a condjump that is yet to be simplified, but
2747 if CASE_DROPS_THRU, this can be a tablejump with
2748 some element going to the same place as the
2749 default (fallthru). */
2750 && (single_pred (b
) == ENTRY_BLOCK_PTR_FOR_FN (cfun
)
2751 || !JUMP_P (BB_END (single_pred (b
)))
2752 || ! label_is_jump_target_p (BB_HEAD (b
),
2753 BB_END (single_pred (b
)))))
2755 delete_insn (BB_HEAD (b
));
2757 fprintf (dump_file
, "Deleted label in block %i.\n",
2761 /* If we fall through an empty block, we can remove it. */
2762 if (!(mode
& (CLEANUP_CFGLAYOUT
| CLEANUP_NO_INSN_DEL
))
2763 && single_pred_p (b
)
2764 && (single_pred_edge (b
)->flags
& EDGE_FALLTHRU
)
2765 && !LABEL_P (BB_HEAD (b
))
2766 && FORWARDER_BLOCK_P (b
)
2767 /* Note that forwarder_block_p true ensures that
2768 there is a successor for this block. */
2769 && (single_succ_edge (b
)->flags
& EDGE_FALLTHRU
)
2770 && n_basic_blocks_for_fn (cfun
) > NUM_FIXED_BLOCKS
+ 1)
2774 "Deleting fallthru block %i.\n",
2777 c
= ((b
->prev_bb
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
2778 ? b
->next_bb
: b
->prev_bb
);
2779 redirect_edge_succ_nodup (single_pred_edge (b
),
2781 delete_basic_block (b
);
2787 /* Merge B with its single successor, if any. */
2788 if (single_succ_p (b
)
2789 && (s
= single_succ_edge (b
))
2790 && !(s
->flags
& EDGE_COMPLEX
)
2791 && (c
= s
->dest
) != EXIT_BLOCK_PTR_FOR_FN (cfun
)
2792 && single_pred_p (c
)
2795 /* When not in cfg_layout mode use code aware of reordering
2796 INSN. This code possibly creates new basic blocks so it
2797 does not fit merge_blocks interface and is kept here in
2798 hope that it will become useless once more of compiler
2799 is transformed to use cfg_layout mode. */
2801 if ((mode
& CLEANUP_CFGLAYOUT
)
2802 && can_merge_blocks_p (b
, c
))
2804 merge_blocks (b
, c
);
2805 update_forwarder_flag (b
);
2806 changed_here
= true;
2808 else if (!(mode
& CLEANUP_CFGLAYOUT
)
2809 /* If the jump insn has side effects,
2810 we can't kill the edge. */
2811 && (!JUMP_P (BB_END (b
))
2812 || (reload_completed
2813 ? simplejump_p (BB_END (b
))
2814 : (onlyjump_p (BB_END (b
))
2815 && !tablejump_p (BB_END (b
),
2817 && (next
= merge_blocks_move (s
, b
, c
, mode
)))
2820 changed_here
= true;
2824 /* Try to change a branch to a return to just that return. */
2825 rtx_insn
*ret
, *use
;
2826 if (single_succ_p (b
)
2827 && onlyjump_p (BB_END (b
))
2828 && bb_is_just_return (single_succ (b
), &ret
, &use
))
2830 if (redirect_jump (as_a
<rtx_jump_insn
*> (BB_END (b
)),
2834 emit_insn_before (copy_insn (PATTERN (use
)),
2837 fprintf (dump_file
, "Changed jump %d->%d to return.\n",
2838 b
->index
, single_succ (b
)->index
);
2839 redirect_edge_succ (single_succ_edge (b
),
2840 EXIT_BLOCK_PTR_FOR_FN (cfun
));
2841 single_succ_edge (b
)->flags
&= ~EDGE_CROSSING
;
2842 changed_here
= true;
2846 /* Try to change a conditional branch to a return to the
2847 respective conditional return. */
2848 if (EDGE_COUNT (b
->succs
) == 2
2849 && any_condjump_p (BB_END (b
))
2850 && bb_is_just_return (BRANCH_EDGE (b
)->dest
, &ret
, &use
))
2852 if (redirect_jump (as_a
<rtx_jump_insn
*> (BB_END (b
)),
2856 emit_insn_before (copy_insn (PATTERN (use
)),
2859 fprintf (dump_file
, "Changed conditional jump %d->%d "
2860 "to conditional return.\n",
2861 b
->index
, BRANCH_EDGE (b
)->dest
->index
);
2862 redirect_edge_succ (BRANCH_EDGE (b
),
2863 EXIT_BLOCK_PTR_FOR_FN (cfun
));
2864 BRANCH_EDGE (b
)->flags
&= ~EDGE_CROSSING
;
2865 changed_here
= true;
2869 /* Try to flip a conditional branch that falls through to
2870 a return so that it becomes a conditional return and a
2871 new jump to the original branch target. */
2872 if (EDGE_COUNT (b
->succs
) == 2
2873 && BRANCH_EDGE (b
)->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
2874 && any_condjump_p (BB_END (b
))
2875 && bb_is_just_return (FALLTHRU_EDGE (b
)->dest
, &ret
, &use
))
2877 if (invert_jump (as_a
<rtx_jump_insn
*> (BB_END (b
)),
2878 JUMP_LABEL (BB_END (b
)), 0))
2880 basic_block new_ft
= BRANCH_EDGE (b
)->dest
;
2881 if (redirect_jump (as_a
<rtx_jump_insn
*> (BB_END (b
)),
2885 emit_insn_before (copy_insn (PATTERN (use
)),
2888 fprintf (dump_file
, "Changed conditional jump "
2889 "%d->%d to conditional return, adding "
2890 "fall-through jump.\n",
2891 b
->index
, BRANCH_EDGE (b
)->dest
->index
);
2892 redirect_edge_succ (BRANCH_EDGE (b
),
2893 EXIT_BLOCK_PTR_FOR_FN (cfun
));
2894 BRANCH_EDGE (b
)->flags
&= ~EDGE_CROSSING
;
2895 std::swap (BRANCH_EDGE (b
)->probability
,
2896 FALLTHRU_EDGE (b
)->probability
);
2897 update_br_prob_note (b
);
2898 basic_block jb
= force_nonfallthru (FALLTHRU_EDGE (b
));
2899 notice_new_block (jb
);
2900 if (!redirect_jump (as_a
<rtx_jump_insn
*> (BB_END (jb
)),
2901 block_label (new_ft
), 0))
2903 redirect_edge_succ (single_succ_edge (jb
), new_ft
);
2904 changed_here
= true;
2908 /* Invert the jump back to what it was. This should
2910 if (!invert_jump (as_a
<rtx_jump_insn
*> (BB_END (b
)),
2911 JUMP_LABEL (BB_END (b
)), 0))
2917 /* Simplify branch over branch. */
2918 if ((mode
& CLEANUP_EXPENSIVE
)
2919 && !(mode
& CLEANUP_CFGLAYOUT
)
2920 && try_simplify_condjump (b
))
2921 changed_here
= true;
2923 /* If B has a single outgoing edge, but uses a
2924 non-trivial jump instruction without side-effects, we
2925 can either delete the jump entirely, or replace it
2926 with a simple unconditional jump. */
2927 if (single_succ_p (b
)
2928 && single_succ (b
) != EXIT_BLOCK_PTR_FOR_FN (cfun
)
2929 && onlyjump_p (BB_END (b
))
2930 && !CROSSING_JUMP_P (BB_END (b
))
2931 && try_redirect_by_replacing_jump (single_succ_edge (b
),
2933 (mode
& CLEANUP_CFGLAYOUT
) != 0))
2935 update_forwarder_flag (b
);
2936 changed_here
= true;
2939 /* Simplify branch to branch. */
2940 if (try_forward_edges (mode
, b
))
2942 update_forwarder_flag (b
);
2943 changed_here
= true;
2946 /* Look for shared code between blocks. */
2947 if ((mode
& CLEANUP_CROSSJUMP
)
2948 && try_crossjump_bb (mode
, b
))
2949 changed_here
= true;
2951 if ((mode
& CLEANUP_CROSSJUMP
)
2952 /* This can lengthen register lifetimes. Do it only after
2955 && try_head_merge_bb (b
))
2956 changed_here
= true;
2958 /* Don't get confused by the index shift caused by
2966 if ((mode
& CLEANUP_CROSSJUMP
)
2967 && try_crossjump_bb (mode
, EXIT_BLOCK_PTR_FOR_FN (cfun
)))
2970 if (block_was_dirty
)
2972 /* This should only be set by head-merging. */
2973 gcc_assert (mode
& CLEANUP_CROSSJUMP
);
2979 /* Edge forwarding in particular can cause hot blocks previously
2980 reached by both hot and cold blocks to become dominated only
2981 by cold blocks. This will cause the verification below to fail,
2982 and lead to now cold code in the hot section. This is not easy
2983 to detect and fix during edge forwarding, and in some cases
2984 is only visible after newly unreachable blocks are deleted,
2985 which will be done in fixup_partitions. */
2986 fixup_partitions ();
2987 checking_verify_flow_info ();
2990 changed_overall
|= changed
;
2996 FOR_ALL_BB_FN (b
, cfun
)
2997 b
->flags
&= ~(BB_FORWARDER_BLOCK
| BB_NONTHREADABLE_BLOCK
);
2999 return changed_overall
;
3002 /* Delete all unreachable basic blocks. */
3005 delete_unreachable_blocks (void)
3007 bool changed
= false;
3008 basic_block b
, prev_bb
;
3010 find_unreachable_blocks ();
3012 /* When we're in GIMPLE mode and there may be debug insns, we should
3013 delete blocks in reverse dominator order, so as to get a chance
3014 to substitute all released DEFs into debug stmts. If we don't
3015 have dominators information, walking blocks backward gets us a
3016 better chance of retaining most debug information than
3018 if (MAY_HAVE_DEBUG_INSNS
&& current_ir_type () == IR_GIMPLE
3019 && dom_info_available_p (CDI_DOMINATORS
))
3021 for (b
= EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
;
3022 b
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
); b
= prev_bb
)
3024 prev_bb
= b
->prev_bb
;
3026 if (!(b
->flags
& BB_REACHABLE
))
3028 /* Speed up the removal of blocks that don't dominate
3029 others. Walking backwards, this should be the common
3031 if (!first_dom_son (CDI_DOMINATORS
, b
))
3032 delete_basic_block (b
);
3036 = get_all_dominated_blocks (CDI_DOMINATORS
, b
);
3042 prev_bb
= b
->prev_bb
;
3044 gcc_assert (!(b
->flags
& BB_REACHABLE
));
3046 delete_basic_block (b
);
3058 for (b
= EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
;
3059 b
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
); b
= prev_bb
)
3061 prev_bb
= b
->prev_bb
;
3063 if (!(b
->flags
& BB_REACHABLE
))
3065 delete_basic_block (b
);
3072 tidy_fallthru_edges ();
3076 /* Delete any jump tables never referenced. We can't delete them at the
3077 time of removing tablejump insn as they are referenced by the preceding
3078 insns computing the destination, so we delay deleting and garbagecollect
3079 them once life information is computed. */
3081 delete_dead_jumptables (void)
3085 /* A dead jump table does not belong to any basic block. Scan insns
3086 between two adjacent basic blocks. */
3087 FOR_EACH_BB_FN (bb
, cfun
)
3089 rtx_insn
*insn
, *next
;
3091 for (insn
= NEXT_INSN (BB_END (bb
));
3092 insn
&& !NOTE_INSN_BASIC_BLOCK_P (insn
);
3095 next
= NEXT_INSN (insn
);
3097 && LABEL_NUSES (insn
) == LABEL_PRESERVE_P (insn
)
3098 && JUMP_TABLE_DATA_P (next
))
3100 rtx_insn
*label
= insn
, *jump
= next
;
3103 fprintf (dump_file
, "Dead jumptable %i removed\n",
3106 next
= NEXT_INSN (next
);
3108 delete_insn (label
);
3115 /* Tidy the CFG by deleting unreachable code and whatnot. */
3118 cleanup_cfg (int mode
)
3120 bool changed
= false;
3122 /* Set the cfglayout mode flag here. We could update all the callers
3123 but that is just inconvenient, especially given that we eventually
3124 want to have cfglayout mode as the default. */
3125 if (current_ir_type () == IR_RTL_CFGLAYOUT
)
3126 mode
|= CLEANUP_CFGLAYOUT
;
3128 timevar_push (TV_CLEANUP_CFG
);
3129 if (delete_unreachable_blocks ())
3132 /* We've possibly created trivially dead code. Cleanup it right
3133 now to introduce more opportunities for try_optimize_cfg. */
3134 if (!(mode
& (CLEANUP_NO_INSN_DEL
))
3135 && !reload_completed
)
3136 delete_trivially_dead_insns (get_insns (), max_reg_num ());
3141 /* To tail-merge blocks ending in the same noreturn function (e.g.
3142 a call to abort) we have to insert fake edges to exit. Do this
3143 here once. The fake edges do not interfere with any other CFG
3145 if (mode
& CLEANUP_CROSSJUMP
)
3146 add_noreturn_fake_exit_edges ();
3148 if (!dbg_cnt (cfg_cleanup
))
3151 while (try_optimize_cfg (mode
))
3153 delete_unreachable_blocks (), changed
= true;
3154 if (!(mode
& CLEANUP_NO_INSN_DEL
))
3156 /* Try to remove some trivially dead insns when doing an expensive
3157 cleanup. But delete_trivially_dead_insns doesn't work after
3158 reload (it only handles pseudos) and run_fast_dce is too costly
3159 to run in every iteration.
3161 For effective cross jumping, we really want to run a fast DCE to
3162 clean up any dead conditions, or they get in the way of performing
3165 Other transformations in cleanup_cfg are not so sensitive to dead
3166 code, so delete_trivially_dead_insns or even doing nothing at all
3168 if ((mode
& CLEANUP_EXPENSIVE
) && !reload_completed
3169 && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3171 if ((mode
& CLEANUP_CROSSJUMP
) && crossjumps_occured
)
3178 if (mode
& CLEANUP_CROSSJUMP
)
3179 remove_fake_exit_edges ();
3181 /* Don't call delete_dead_jumptables in cfglayout mode, because
3182 that function assumes that jump tables are in the insns stream.
3183 But we also don't _have_ to delete dead jumptables in cfglayout
3184 mode because we shouldn't even be looking at things that are
3185 not in a basic block. Dead jumptables are cleaned up when
3186 going out of cfglayout mode. */
3187 if (!(mode
& CLEANUP_CFGLAYOUT
))
3188 delete_dead_jumptables ();
3190 /* ??? We probably do this way too often. */
3193 || (mode
& CLEANUP_CFG_CHANGED
)))
3195 timevar_push (TV_REPAIR_LOOPS
);
3196 /* The above doesn't preserve dominance info if available. */
3197 gcc_assert (!dom_info_available_p (CDI_DOMINATORS
));
3198 calculate_dominance_info (CDI_DOMINATORS
);
3199 fix_loop_structure (NULL
);
3200 free_dominance_info (CDI_DOMINATORS
);
3201 timevar_pop (TV_REPAIR_LOOPS
);
3204 timevar_pop (TV_CLEANUP_CFG
);
3211 const pass_data pass_data_jump
=
3213 RTL_PASS
, /* type */
3215 OPTGROUP_NONE
, /* optinfo_flags */
3216 TV_JUMP
, /* tv_id */
3217 0, /* properties_required */
3218 0, /* properties_provided */
3219 0, /* properties_destroyed */
3220 0, /* todo_flags_start */
3221 0, /* todo_flags_finish */
3224 class pass_jump
: public rtl_opt_pass
3227 pass_jump (gcc::context
*ctxt
)
3228 : rtl_opt_pass (pass_data_jump
, ctxt
)
3231 /* opt_pass methods: */
3232 virtual unsigned int execute (function
*);
3234 }; // class pass_jump
3237 pass_jump::execute (function
*)
3239 delete_trivially_dead_insns (get_insns (), max_reg_num ());
3241 dump_flow_info (dump_file
, dump_flags
);
3242 cleanup_cfg ((optimize
? CLEANUP_EXPENSIVE
: 0)
3243 | (flag_thread_jumps
? CLEANUP_THREADING
: 0));
3250 make_pass_jump (gcc::context
*ctxt
)
3252 return new pass_jump (ctxt
);
3257 const pass_data pass_data_jump2
=
3259 RTL_PASS
, /* type */
3261 OPTGROUP_NONE
, /* optinfo_flags */
3262 TV_JUMP
, /* tv_id */
3263 0, /* properties_required */
3264 0, /* properties_provided */
3265 0, /* properties_destroyed */
3266 0, /* todo_flags_start */
3267 0, /* todo_flags_finish */
3270 class pass_jump2
: public rtl_opt_pass
3273 pass_jump2 (gcc::context
*ctxt
)
3274 : rtl_opt_pass (pass_data_jump2
, ctxt
)
3277 /* opt_pass methods: */
3278 virtual unsigned int execute (function
*)
3280 cleanup_cfg (flag_crossjumping
? CLEANUP_CROSSJUMP
: 0);
3284 }; // class pass_jump2
3289 make_pass_jump2 (gcc::context
*ctxt
)
3291 return new pass_jump2 (ctxt
);