1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This file contains optimizer of the control flow. The main entry point is
21 cleanup_cfg. Following optimizations are performed:
23 - Unreachable blocks removal
24 - Edge forwarding (edge to the forwarder block is forwarded to its
25 successor. Simplification of the branch instruction is performed by
26 underlying infrastructure so branch can be converted to simplejump or
28 - Cross jumping (tail merging)
29 - Conditional jump-around-simplejump simplification
30 - Basic block merging. */
34 #include "coretypes.h"
42 #include "insn-config.h"
46 #include "tree-pass.h"
51 #include "cfgcleanup.h"
56 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
58 /* Set to true when we are running first pass of try_optimize_cfg loop. */
59 static bool first_pass
;
61 /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg. */
62 static bool crossjumps_occured
;
64 /* Set to true if we couldn't run an optimization due to stale liveness
65 information; we should run df_analyze to enable more opportunities. */
66 static bool block_was_dirty
;
68 static bool try_crossjump_to_edge (int, edge
, edge
, enum replace_direction
);
69 static bool try_crossjump_bb (int, basic_block
);
70 static bool outgoing_edges_match (int, basic_block
, basic_block
);
71 static enum replace_direction
old_insns_match_p (int, rtx_insn
*, rtx_insn
*);
73 static void merge_blocks_move_predecessor_nojumps (basic_block
, basic_block
);
74 static void merge_blocks_move_successor_nojumps (basic_block
, basic_block
);
75 static bool try_optimize_cfg (int);
76 static bool try_simplify_condjump (basic_block
);
77 static bool try_forward_edges (int, basic_block
);
78 static edge
thread_jump (edge
, basic_block
);
79 static bool mark_effect (rtx
, bitmap
);
80 static void notice_new_block (basic_block
);
81 static void update_forwarder_flag (basic_block
);
82 static void merge_memattrs (rtx
, rtx
);
84 /* Set flags for newly created block. */
87 notice_new_block (basic_block bb
)
92 if (forwarder_block_p (bb
))
93 bb
->flags
|= BB_FORWARDER_BLOCK
;
96 /* Recompute forwarder flag after block has been modified. */
99 update_forwarder_flag (basic_block bb
)
101 if (forwarder_block_p (bb
))
102 bb
->flags
|= BB_FORWARDER_BLOCK
;
104 bb
->flags
&= ~BB_FORWARDER_BLOCK
;
107 /* Simplify a conditional jump around an unconditional jump.
108 Return true if something changed. */
111 try_simplify_condjump (basic_block cbranch_block
)
113 basic_block jump_block
, jump_dest_block
, cbranch_dest_block
;
114 edge cbranch_jump_edge
, cbranch_fallthru_edge
;
115 rtx_insn
*cbranch_insn
;
117 /* Verify that there are exactly two successors. */
118 if (EDGE_COUNT (cbranch_block
->succs
) != 2)
121 /* Verify that we've got a normal conditional branch at the end
123 cbranch_insn
= BB_END (cbranch_block
);
124 if (!any_condjump_p (cbranch_insn
))
127 cbranch_fallthru_edge
= FALLTHRU_EDGE (cbranch_block
);
128 cbranch_jump_edge
= BRANCH_EDGE (cbranch_block
);
130 /* The next block must not have multiple predecessors, must not
131 be the last block in the function, and must contain just the
132 unconditional jump. */
133 jump_block
= cbranch_fallthru_edge
->dest
;
134 if (!single_pred_p (jump_block
)
135 || jump_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
136 || !FORWARDER_BLOCK_P (jump_block
))
138 jump_dest_block
= single_succ (jump_block
);
140 /* If we are partitioning hot/cold basic blocks, we don't want to
141 mess up unconditional or indirect jumps that cross between hot
144 Basic block partitioning may result in some jumps that appear to
145 be optimizable (or blocks that appear to be mergeable), but which really
146 must be left untouched (they are required to make it safely across
147 partition boundaries). See the comments at the top of
148 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
150 if (BB_PARTITION (jump_block
) != BB_PARTITION (jump_dest_block
)
151 || (cbranch_jump_edge
->flags
& EDGE_CROSSING
))
154 /* The conditional branch must target the block after the
155 unconditional branch. */
156 cbranch_dest_block
= cbranch_jump_edge
->dest
;
158 if (cbranch_dest_block
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
159 || !can_fallthru (jump_block
, cbranch_dest_block
))
162 /* Invert the conditional branch. */
163 if (!invert_jump (as_a
<rtx_jump_insn
*> (cbranch_insn
),
164 block_label (jump_dest_block
), 0))
168 fprintf (dump_file
, "Simplifying condjump %i around jump %i\n",
169 INSN_UID (cbranch_insn
), INSN_UID (BB_END (jump_block
)));
171 /* Success. Update the CFG to match. Note that after this point
172 the edge variable names appear backwards; the redirection is done
173 this way to preserve edge profile data. */
174 cbranch_jump_edge
= redirect_edge_succ_nodup (cbranch_jump_edge
,
176 cbranch_fallthru_edge
= redirect_edge_succ_nodup (cbranch_fallthru_edge
,
178 cbranch_jump_edge
->flags
|= EDGE_FALLTHRU
;
179 cbranch_fallthru_edge
->flags
&= ~EDGE_FALLTHRU
;
180 update_br_prob_note (cbranch_block
);
182 /* Delete the block with the unconditional jump, and clean up the mess. */
183 delete_basic_block (jump_block
);
184 tidy_fallthru_edge (cbranch_jump_edge
);
185 update_forwarder_flag (cbranch_block
);
190 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
191 on register. Used by jump threading. */
194 mark_effect (rtx exp
, regset nonequal
)
197 switch (GET_CODE (exp
))
199 /* In case we do clobber the register, mark it as equal, as we know the
200 value is dead so it don't have to match. */
202 dest
= XEXP (exp
, 0);
204 bitmap_clear_range (nonequal
, REGNO (dest
), REG_NREGS (dest
));
208 if (rtx_equal_for_cselib_p (SET_DEST (exp
), SET_SRC (exp
)))
210 dest
= SET_DEST (exp
);
215 bitmap_set_range (nonequal
, REGNO (dest
), REG_NREGS (dest
));
223 /* Return true if X contains a register in NONEQUAL. */
225 mentions_nonequal_regs (const_rtx x
, regset nonequal
)
227 subrtx_iterator::array_type array
;
228 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
233 unsigned int end_regno
= END_REGNO (x
);
234 for (unsigned int regno
= REGNO (x
); regno
< end_regno
; ++regno
)
235 if (REGNO_REG_SET_P (nonequal
, regno
))
242 /* Attempt to prove that the basic block B will have no side effects and
243 always continues in the same edge if reached via E. Return the edge
244 if exist, NULL otherwise. */
247 thread_jump (edge e
, basic_block b
)
249 rtx set1
, set2
, cond1
, cond2
;
251 enum rtx_code code1
, code2
, reversed_code2
;
252 bool reverse1
= false;
256 reg_set_iterator rsi
;
258 if (b
->flags
& BB_NONTHREADABLE_BLOCK
)
261 /* At the moment, we do handle only conditional jumps, but later we may
262 want to extend this code to tablejumps and others. */
263 if (EDGE_COUNT (e
->src
->succs
) != 2)
265 if (EDGE_COUNT (b
->succs
) != 2)
267 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
271 /* Second branch must end with onlyjump, as we will eliminate the jump. */
272 if (!any_condjump_p (BB_END (e
->src
)))
275 if (!any_condjump_p (BB_END (b
)) || !onlyjump_p (BB_END (b
)))
277 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
281 set1
= pc_set (BB_END (e
->src
));
282 set2
= pc_set (BB_END (b
));
283 if (((e
->flags
& EDGE_FALLTHRU
) != 0)
284 != (XEXP (SET_SRC (set1
), 1) == pc_rtx
))
287 cond1
= XEXP (SET_SRC (set1
), 0);
288 cond2
= XEXP (SET_SRC (set2
), 0);
290 code1
= reversed_comparison_code (cond1
, BB_END (e
->src
));
292 code1
= GET_CODE (cond1
);
294 code2
= GET_CODE (cond2
);
295 reversed_code2
= reversed_comparison_code (cond2
, BB_END (b
));
297 if (!comparison_dominates_p (code1
, code2
)
298 && !comparison_dominates_p (code1
, reversed_code2
))
301 /* Ensure that the comparison operators are equivalent.
302 ??? This is far too pessimistic. We should allow swapped operands,
303 different CCmodes, or for example comparisons for interval, that
304 dominate even when operands are not equivalent. */
305 if (!rtx_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
306 || !rtx_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
309 /* Short circuit cases where block B contains some side effects, as we can't
311 for (insn
= NEXT_INSN (BB_HEAD (b
)); insn
!= NEXT_INSN (BB_END (b
));
312 insn
= NEXT_INSN (insn
))
313 if (INSN_P (insn
) && side_effects_p (PATTERN (insn
)))
315 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
321 /* First process all values computed in the source basic block. */
322 for (insn
= NEXT_INSN (BB_HEAD (e
->src
));
323 insn
!= NEXT_INSN (BB_END (e
->src
));
324 insn
= NEXT_INSN (insn
))
326 cselib_process_insn (insn
);
328 nonequal
= BITMAP_ALLOC (NULL
);
329 CLEAR_REG_SET (nonequal
);
331 /* Now assume that we've continued by the edge E to B and continue
332 processing as if it were same basic block.
333 Our goal is to prove that whole block is an NOOP. */
335 for (insn
= NEXT_INSN (BB_HEAD (b
));
336 insn
!= NEXT_INSN (BB_END (b
)) && !failed
;
337 insn
= NEXT_INSN (insn
))
341 rtx pat
= PATTERN (insn
);
343 if (GET_CODE (pat
) == PARALLEL
)
345 for (i
= 0; i
< (unsigned)XVECLEN (pat
, 0); i
++)
346 failed
|= mark_effect (XVECEXP (pat
, 0, i
), nonequal
);
349 failed
|= mark_effect (pat
, nonequal
);
352 cselib_process_insn (insn
);
355 /* Later we should clear nonequal of dead registers. So far we don't
356 have life information in cfg_cleanup. */
359 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
363 /* cond2 must not mention any register that is not equal to the
365 if (mentions_nonequal_regs (cond2
, nonequal
))
368 EXECUTE_IF_SET_IN_REG_SET (nonequal
, 0, i
, rsi
)
371 BITMAP_FREE (nonequal
);
373 if ((comparison_dominates_p (code1
, code2
) != 0)
374 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
375 return BRANCH_EDGE (b
);
377 return FALLTHRU_EDGE (b
);
380 BITMAP_FREE (nonequal
);
385 /* Attempt to forward edges leaving basic block B.
386 Return true if successful. */
389 try_forward_edges (int mode
, basic_block b
)
391 bool changed
= false;
393 edge e
, *threaded_edges
= NULL
;
395 /* If we are partitioning hot/cold basic blocks, we don't want to
396 mess up unconditional or indirect jumps that cross between hot
399 Basic block partitioning may result in some jumps that appear to
400 be optimizable (or blocks that appear to be mergeable), but which really
401 must be left untouched (they are required to make it safely across
402 partition boundaries). See the comments at the top of
403 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
405 if (JUMP_P (BB_END (b
)) && CROSSING_JUMP_P (BB_END (b
)))
408 for (ei
= ei_start (b
->succs
); (e
= ei_safe_edge (ei
)); )
410 basic_block target
, first
;
411 location_t goto_locus
;
413 bool threaded
= false;
414 int nthreaded_edges
= 0;
415 bool may_thread
= first_pass
|| (b
->flags
& BB_MODIFIED
) != 0;
417 /* Skip complex edges because we don't know how to update them.
419 Still handle fallthru edges, as we can succeed to forward fallthru
420 edge to the same place as the branch edge of conditional branch
421 and turn conditional branch to an unconditional branch. */
422 if (e
->flags
& EDGE_COMPLEX
)
428 target
= first
= e
->dest
;
429 counter
= NUM_FIXED_BLOCKS
;
430 goto_locus
= e
->goto_locus
;
432 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
433 up jumps that cross between hot/cold sections.
435 Basic block partitioning may result in some jumps that appear
436 to be optimizable (or blocks that appear to be mergeable), but which
437 really must be left untouched (they are required to make it safely
438 across partition boundaries). See the comments at the top of
439 bb-reorder.c:partition_hot_cold_basic_blocks for complete
442 if (first
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
443 && JUMP_P (BB_END (first
))
444 && CROSSING_JUMP_P (BB_END (first
)))
447 while (counter
< n_basic_blocks_for_fn (cfun
))
449 basic_block new_target
= NULL
;
450 bool new_target_threaded
= false;
451 may_thread
|= (target
->flags
& BB_MODIFIED
) != 0;
453 if (FORWARDER_BLOCK_P (target
)
454 && !(single_succ_edge (target
)->flags
& EDGE_CROSSING
)
455 && single_succ (target
) != EXIT_BLOCK_PTR_FOR_FN (cfun
))
457 /* Bypass trivial infinite loops. */
458 new_target
= single_succ (target
);
459 if (target
== new_target
)
460 counter
= n_basic_blocks_for_fn (cfun
);
463 /* When not optimizing, ensure that edges or forwarder
464 blocks with different locus are not optimized out. */
465 location_t new_locus
= single_succ_edge (target
)->goto_locus
;
466 location_t locus
= goto_locus
;
468 if (LOCATION_LOCUS (new_locus
) != UNKNOWN_LOCATION
469 && LOCATION_LOCUS (locus
) != UNKNOWN_LOCATION
470 && new_locus
!= locus
)
474 if (LOCATION_LOCUS (new_locus
) != UNKNOWN_LOCATION
)
477 rtx_insn
*last
= BB_END (target
);
478 if (DEBUG_INSN_P (last
))
479 last
= prev_nondebug_insn (last
);
480 if (last
&& INSN_P (last
))
481 new_locus
= INSN_LOCATION (last
);
483 new_locus
= UNKNOWN_LOCATION
;
485 if (LOCATION_LOCUS (new_locus
) != UNKNOWN_LOCATION
486 && LOCATION_LOCUS (locus
) != UNKNOWN_LOCATION
487 && new_locus
!= locus
)
491 if (LOCATION_LOCUS (new_locus
) != UNKNOWN_LOCATION
)
500 /* Allow to thread only over one edge at time to simplify updating
502 else if ((mode
& CLEANUP_THREADING
) && may_thread
)
504 edge t
= thread_jump (e
, target
);
508 threaded_edges
= XNEWVEC (edge
,
509 n_basic_blocks_for_fn (cfun
));
514 /* Detect an infinite loop across blocks not
515 including the start block. */
516 for (i
= 0; i
< nthreaded_edges
; ++i
)
517 if (threaded_edges
[i
] == t
)
519 if (i
< nthreaded_edges
)
521 counter
= n_basic_blocks_for_fn (cfun
);
526 /* Detect an infinite loop across the start block. */
530 gcc_assert (nthreaded_edges
531 < (n_basic_blocks_for_fn (cfun
)
532 - NUM_FIXED_BLOCKS
));
533 threaded_edges
[nthreaded_edges
++] = t
;
535 new_target
= t
->dest
;
536 new_target_threaded
= true;
545 threaded
|= new_target_threaded
;
548 if (counter
>= n_basic_blocks_for_fn (cfun
))
551 fprintf (dump_file
, "Infinite loop in BB %i.\n",
554 else if (target
== first
)
555 ; /* We didn't do anything. */
558 /* Save the values now, as the edge may get removed. */
559 gcov_type edge_count
= e
->count
;
560 int edge_probability
= e
->probability
;
564 e
->goto_locus
= goto_locus
;
566 /* Don't force if target is exit block. */
567 if (threaded
&& target
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
569 notice_new_block (redirect_edge_and_branch_force (e
, target
));
571 fprintf (dump_file
, "Conditionals threaded.\n");
573 else if (!redirect_edge_and_branch (e
, target
))
577 "Forwarding edge %i->%i to %i failed.\n",
578 b
->index
, e
->dest
->index
, target
->index
);
583 /* We successfully forwarded the edge. Now update profile
584 data: for each edge we traversed in the chain, remove
585 the original edge's execution count. */
586 edge_frequency
= apply_probability (b
->frequency
, edge_probability
);
592 if (!single_succ_p (first
))
594 gcc_assert (n
< nthreaded_edges
);
595 t
= threaded_edges
[n
++];
596 gcc_assert (t
->src
== first
);
597 update_bb_profile_for_threading (first
, edge_frequency
,
599 update_br_prob_note (first
);
603 first
->count
-= edge_count
;
604 if (first
->count
< 0)
606 first
->frequency
-= edge_frequency
;
607 if (first
->frequency
< 0)
608 first
->frequency
= 0;
609 /* It is possible that as the result of
610 threading we've removed edge as it is
611 threaded to the fallthru edge. Avoid
612 getting out of sync. */
613 if (n
< nthreaded_edges
614 && first
== threaded_edges
[n
]->src
)
616 t
= single_succ_edge (first
);
619 t
->count
-= edge_count
;
624 while (first
!= target
);
632 free (threaded_edges
);
637 /* Blocks A and B are to be merged into a single block. A has no incoming
638 fallthru edge, so it can be moved before B without adding or modifying
639 any jumps (aside from the jump from A to B). */
642 merge_blocks_move_predecessor_nojumps (basic_block a
, basic_block b
)
646 /* If we are partitioning hot/cold basic blocks, we don't want to
647 mess up unconditional or indirect jumps that cross between hot
650 Basic block partitioning may result in some jumps that appear to
651 be optimizable (or blocks that appear to be mergeable), but which really
652 must be left untouched (they are required to make it safely across
653 partition boundaries). See the comments at the top of
654 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
656 if (BB_PARTITION (a
) != BB_PARTITION (b
))
659 barrier
= next_nonnote_insn (BB_END (a
));
660 gcc_assert (BARRIER_P (barrier
));
661 delete_insn (barrier
);
663 /* Scramble the insn chain. */
664 if (BB_END (a
) != PREV_INSN (BB_HEAD (b
)))
665 reorder_insns_nobb (BB_HEAD (a
), BB_END (a
), PREV_INSN (BB_HEAD (b
)));
669 fprintf (dump_file
, "Moved block %d before %d and merged.\n",
672 /* Swap the records for the two blocks around. */
675 link_block (a
, b
->prev_bb
);
677 /* Now blocks A and B are contiguous. Merge them. */
681 /* Blocks A and B are to be merged into a single block. B has no outgoing
682 fallthru edge, so it can be moved after A without adding or modifying
683 any jumps (aside from the jump from A to B). */
686 merge_blocks_move_successor_nojumps (basic_block a
, basic_block b
)
688 rtx_insn
*barrier
, *real_b_end
;
690 rtx_jump_table_data
*table
;
692 /* If we are partitioning hot/cold basic blocks, we don't want to
693 mess up unconditional or indirect jumps that cross between hot
696 Basic block partitioning may result in some jumps that appear to
697 be optimizable (or blocks that appear to be mergeable), but which really
698 must be left untouched (they are required to make it safely across
699 partition boundaries). See the comments at the top of
700 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
702 if (BB_PARTITION (a
) != BB_PARTITION (b
))
705 real_b_end
= BB_END (b
);
707 /* If there is a jump table following block B temporarily add the jump table
708 to block B so that it will also be moved to the correct location. */
709 if (tablejump_p (BB_END (b
), &label
, &table
)
710 && prev_active_insn (label
) == BB_END (b
))
715 /* There had better have been a barrier there. Delete it. */
716 barrier
= NEXT_INSN (BB_END (b
));
717 if (barrier
&& BARRIER_P (barrier
))
718 delete_insn (barrier
);
721 /* Scramble the insn chain. */
722 reorder_insns_nobb (BB_HEAD (b
), BB_END (b
), BB_END (a
));
724 /* Restore the real end of b. */
725 BB_END (b
) = real_b_end
;
728 fprintf (dump_file
, "Moved block %d after %d and merged.\n",
731 /* Now blocks A and B are contiguous. Merge them. */
735 /* Attempt to merge basic blocks that are potentially non-adjacent.
736 Return NULL iff the attempt failed, otherwise return basic block
737 where cleanup_cfg should continue. Because the merging commonly
738 moves basic block away or introduces another optimization
739 possibility, return basic block just before B so cleanup_cfg don't
742 It may be good idea to return basic block before C in the case
743 C has been moved after B and originally appeared earlier in the
744 insn sequence, but we have no information available about the
745 relative ordering of these two. Hopefully it is not too common. */
748 merge_blocks_move (edge e
, basic_block b
, basic_block c
, int mode
)
752 /* If we are partitioning hot/cold basic blocks, we don't want to
753 mess up unconditional or indirect jumps that cross between hot
756 Basic block partitioning may result in some jumps that appear to
757 be optimizable (or blocks that appear to be mergeable), but which really
758 must be left untouched (they are required to make it safely across
759 partition boundaries). See the comments at the top of
760 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
762 if (BB_PARTITION (b
) != BB_PARTITION (c
))
765 /* If B has a fallthru edge to C, no need to move anything. */
766 if (e
->flags
& EDGE_FALLTHRU
)
768 int b_index
= b
->index
, c_index
= c
->index
;
770 /* Protect the loop latches. */
771 if (current_loops
&& c
->loop_father
->latch
== c
)
775 update_forwarder_flag (b
);
778 fprintf (dump_file
, "Merged %d and %d without moving.\n",
781 return b
->prev_bb
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) ? b
: b
->prev_bb
;
784 /* Otherwise we will need to move code around. Do that only if expensive
785 transformations are allowed. */
786 else if (mode
& CLEANUP_EXPENSIVE
)
788 edge tmp_edge
, b_fallthru_edge
;
789 bool c_has_outgoing_fallthru
;
790 bool b_has_incoming_fallthru
;
792 /* Avoid overactive code motion, as the forwarder blocks should be
793 eliminated by edge redirection instead. One exception might have
794 been if B is a forwarder block and C has no fallthru edge, but
795 that should be cleaned up by bb-reorder instead. */
796 if (FORWARDER_BLOCK_P (b
) || FORWARDER_BLOCK_P (c
))
799 /* We must make sure to not munge nesting of lexical blocks,
800 and loop notes. This is done by squeezing out all the notes
801 and leaving them there to lie. Not ideal, but functional. */
803 tmp_edge
= find_fallthru_edge (c
->succs
);
804 c_has_outgoing_fallthru
= (tmp_edge
!= NULL
);
806 tmp_edge
= find_fallthru_edge (b
->preds
);
807 b_has_incoming_fallthru
= (tmp_edge
!= NULL
);
808 b_fallthru_edge
= tmp_edge
;
811 next
= next
->prev_bb
;
813 /* Otherwise, we're going to try to move C after B. If C does
814 not have an outgoing fallthru, then it can be moved
815 immediately after B without introducing or modifying jumps. */
816 if (! c_has_outgoing_fallthru
)
818 merge_blocks_move_successor_nojumps (b
, c
);
819 return next
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) ? next
->next_bb
: next
;
822 /* If B does not have an incoming fallthru, then it can be moved
823 immediately before C without introducing or modifying jumps.
824 C cannot be the first block, so we do not have to worry about
825 accessing a non-existent block. */
827 if (b_has_incoming_fallthru
)
831 if (b_fallthru_edge
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
833 bb
= force_nonfallthru (b_fallthru_edge
);
835 notice_new_block (bb
);
838 merge_blocks_move_predecessor_nojumps (b
, c
);
839 return next
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) ? next
->next_bb
: next
;
846 /* Removes the memory attributes of MEM expression
847 if they are not equal. */
850 merge_memattrs (rtx x
, rtx y
)
859 if (x
== 0 || y
== 0)
864 if (code
!= GET_CODE (y
))
867 if (GET_MODE (x
) != GET_MODE (y
))
870 if (code
== MEM
&& !mem_attrs_eq_p (MEM_ATTRS (x
), MEM_ATTRS (y
)))
874 else if (! MEM_ATTRS (y
))
878 HOST_WIDE_INT mem_size
;
880 if (MEM_ALIAS_SET (x
) != MEM_ALIAS_SET (y
))
882 set_mem_alias_set (x
, 0);
883 set_mem_alias_set (y
, 0);
886 if (! mem_expr_equal_p (MEM_EXPR (x
), MEM_EXPR (y
)))
890 clear_mem_offset (x
);
891 clear_mem_offset (y
);
893 else if (MEM_OFFSET_KNOWN_P (x
) != MEM_OFFSET_KNOWN_P (y
)
894 || (MEM_OFFSET_KNOWN_P (x
)
895 && MEM_OFFSET (x
) != MEM_OFFSET (y
)))
897 clear_mem_offset (x
);
898 clear_mem_offset (y
);
901 if (MEM_SIZE_KNOWN_P (x
) && MEM_SIZE_KNOWN_P (y
))
903 mem_size
= MAX (MEM_SIZE (x
), MEM_SIZE (y
));
904 set_mem_size (x
, mem_size
);
905 set_mem_size (y
, mem_size
);
913 set_mem_align (x
, MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)));
914 set_mem_align (y
, MEM_ALIGN (x
));
919 if (MEM_READONLY_P (x
) != MEM_READONLY_P (y
))
921 MEM_READONLY_P (x
) = 0;
922 MEM_READONLY_P (y
) = 0;
924 if (MEM_NOTRAP_P (x
) != MEM_NOTRAP_P (y
))
926 MEM_NOTRAP_P (x
) = 0;
927 MEM_NOTRAP_P (y
) = 0;
929 if (MEM_VOLATILE_P (x
) != MEM_VOLATILE_P (y
))
931 MEM_VOLATILE_P (x
) = 1;
932 MEM_VOLATILE_P (y
) = 1;
936 fmt
= GET_RTX_FORMAT (code
);
937 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
942 /* Two vectors must have the same length. */
943 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
946 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
947 merge_memattrs (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
));
952 merge_memattrs (XEXP (x
, i
), XEXP (y
, i
));
959 /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
960 different single sets S1 and S2. */
963 equal_different_set_p (rtx p1
, rtx s1
, rtx p2
, rtx s2
)
968 if (p1
== s1
&& p2
== s2
)
971 if (GET_CODE (p1
) != PARALLEL
|| GET_CODE (p2
) != PARALLEL
)
974 if (XVECLEN (p1
, 0) != XVECLEN (p2
, 0))
977 for (i
= 0; i
< XVECLEN (p1
, 0); i
++)
979 e1
= XVECEXP (p1
, 0, i
);
980 e2
= XVECEXP (p2
, 0, i
);
981 if (e1
== s1
&& e2
== s2
)
984 ? rtx_renumbered_equal_p (e1
, e2
) : rtx_equal_p (e1
, e2
))
994 /* NOTE1 is the REG_EQUAL note, if any, attached to an insn
995 that is a single_set with a SET_SRC of SRC1. Similarly
998 So effectively NOTE1/NOTE2 are an alternate form of
999 SRC1/SRC2 respectively.
1001 Return nonzero if SRC1 or NOTE1 has the same constant
1002 integer value as SRC2 or NOTE2. Else return zero. */
1004 values_equal_p (rtx note1
, rtx note2
, rtx src1
, rtx src2
)
1008 && CONST_INT_P (XEXP (note1
, 0))
1009 && rtx_equal_p (XEXP (note1
, 0), XEXP (note2
, 0)))
1014 && CONST_INT_P (src1
)
1015 && CONST_INT_P (src2
)
1016 && rtx_equal_p (src1
, src2
))
1020 && CONST_INT_P (src2
)
1021 && rtx_equal_p (XEXP (note1
, 0), src2
))
1025 && CONST_INT_P (src1
)
1026 && rtx_equal_p (XEXP (note2
, 0), src1
))
1032 /* Examine register notes on I1 and I2 and return:
1033 - dir_forward if I1 can be replaced by I2, or
1034 - dir_backward if I2 can be replaced by I1, or
1035 - dir_both if both are the case. */
1037 static enum replace_direction
1038 can_replace_by (rtx_insn
*i1
, rtx_insn
*i2
)
1040 rtx s1
, s2
, d1
, d2
, src1
, src2
, note1
, note2
;
1043 /* Check for 2 sets. */
1044 s1
= single_set (i1
);
1045 s2
= single_set (i2
);
1046 if (s1
== NULL_RTX
|| s2
== NULL_RTX
)
1049 /* Check that the 2 sets set the same dest. */
1052 if (!(reload_completed
1053 ? rtx_renumbered_equal_p (d1
, d2
) : rtx_equal_p (d1
, d2
)))
1056 /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1057 set dest to the same value. */
1058 note1
= find_reg_equal_equiv_note (i1
);
1059 note2
= find_reg_equal_equiv_note (i2
);
1061 src1
= SET_SRC (s1
);
1062 src2
= SET_SRC (s2
);
1064 if (!values_equal_p (note1
, note2
, src1
, src2
))
1067 if (!equal_different_set_p (PATTERN (i1
), s1
, PATTERN (i2
), s2
))
1070 /* Although the 2 sets set dest to the same value, we cannot replace
1071 (set (dest) (const_int))
1074 because we don't know if the reg is live and has the same value at the
1075 location of replacement. */
1076 c1
= CONST_INT_P (src1
);
1077 c2
= CONST_INT_P (src2
);
1083 return dir_backward
;
1088 /* Merges directions A and B. */
1090 static enum replace_direction
1091 merge_dir (enum replace_direction a
, enum replace_direction b
)
1093 /* Implements the following table:
1112 /* Examine I1 and I2 and return:
1113 - dir_forward if I1 can be replaced by I2, or
1114 - dir_backward if I2 can be replaced by I1, or
1115 - dir_both if both are the case. */
1117 static enum replace_direction
1118 old_insns_match_p (int mode ATTRIBUTE_UNUSED
, rtx_insn
*i1
, rtx_insn
*i2
)
1122 /* Verify that I1 and I2 are equivalent. */
1123 if (GET_CODE (i1
) != GET_CODE (i2
))
1126 /* __builtin_unreachable() may lead to empty blocks (ending with
1127 NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */
1128 if (NOTE_INSN_BASIC_BLOCK_P (i1
) && NOTE_INSN_BASIC_BLOCK_P (i2
))
1131 /* ??? Do not allow cross-jumping between different stack levels. */
1132 p1
= find_reg_note (i1
, REG_ARGS_SIZE
, NULL
);
1133 p2
= find_reg_note (i2
, REG_ARGS_SIZE
, NULL
);
1138 if (!rtx_equal_p (p1
, p2
))
1141 /* ??? Worse, this adjustment had better be constant lest we
1142 have differing incoming stack levels. */
1143 if (!frame_pointer_needed
1144 && find_args_size_adjust (i1
) == HOST_WIDE_INT_MIN
)
1153 if (GET_CODE (p1
) != GET_CODE (p2
))
1156 /* If this is a CALL_INSN, compare register usage information.
1157 If we don't check this on stack register machines, the two
1158 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1159 numbers of stack registers in the same basic block.
1160 If we don't check this on machines with delay slots, a delay slot may
1161 be filled that clobbers a parameter expected by the subroutine.
1163 ??? We take the simple route for now and assume that if they're
1164 equal, they were constructed identically.
1166 Also check for identical exception regions. */
1170 /* Ensure the same EH region. */
1171 rtx n1
= find_reg_note (i1
, REG_EH_REGION
, 0);
1172 rtx n2
= find_reg_note (i2
, REG_EH_REGION
, 0);
1177 if (n1
&& (!n2
|| XEXP (n1
, 0) != XEXP (n2
, 0)))
1180 if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1
),
1181 CALL_INSN_FUNCTION_USAGE (i2
))
1182 || SIBLING_CALL_P (i1
) != SIBLING_CALL_P (i2
))
1185 /* For address sanitizer, never crossjump __asan_report_* builtins,
1186 otherwise errors might be reported on incorrect lines. */
1187 if (flag_sanitize
& SANITIZE_ADDRESS
)
1189 rtx call
= get_call_rtx_from (i1
);
1190 if (call
&& GET_CODE (XEXP (XEXP (call
, 0), 0)) == SYMBOL_REF
)
1192 rtx symbol
= XEXP (XEXP (call
, 0), 0);
1193 if (SYMBOL_REF_DECL (symbol
)
1194 && TREE_CODE (SYMBOL_REF_DECL (symbol
)) == FUNCTION_DECL
)
1196 if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol
))
1198 && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol
))
1199 >= BUILT_IN_ASAN_REPORT_LOAD1
1200 && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol
))
1201 <= BUILT_IN_ASAN_STOREN
)
1209 /* If cross_jump_death_matters is not 0, the insn's mode
1210 indicates whether or not the insn contains any stack-like
1213 if ((mode
& CLEANUP_POST_REGSTACK
) && stack_regs_mentioned (i1
))
1215 /* If register stack conversion has already been done, then
1216 death notes must also be compared before it is certain that
1217 the two instruction streams match. */
1220 HARD_REG_SET i1_regset
, i2_regset
;
1222 CLEAR_HARD_REG_SET (i1_regset
);
1223 CLEAR_HARD_REG_SET (i2_regset
);
1225 for (note
= REG_NOTES (i1
); note
; note
= XEXP (note
, 1))
1226 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
1227 SET_HARD_REG_BIT (i1_regset
, REGNO (XEXP (note
, 0)));
1229 for (note
= REG_NOTES (i2
); note
; note
= XEXP (note
, 1))
1230 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
1231 SET_HARD_REG_BIT (i2_regset
, REGNO (XEXP (note
, 0)));
1233 if (!hard_reg_set_equal_p (i1_regset
, i2_regset
))
1238 if (reload_completed
1239 ? rtx_renumbered_equal_p (p1
, p2
) : rtx_equal_p (p1
, p2
))
1242 return can_replace_by (i1
, i2
);
1245 /* When comparing insns I1 and I2 in flow_find_cross_jump or
1246 flow_find_head_matching_sequence, ensure the notes match. */
1249 merge_notes (rtx_insn
*i1
, rtx_insn
*i2
)
1251 /* If the merged insns have different REG_EQUAL notes, then
1253 rtx equiv1
= find_reg_equal_equiv_note (i1
);
1254 rtx equiv2
= find_reg_equal_equiv_note (i2
);
1256 if (equiv1
&& !equiv2
)
1257 remove_note (i1
, equiv1
);
1258 else if (!equiv1
&& equiv2
)
1259 remove_note (i2
, equiv2
);
1260 else if (equiv1
&& equiv2
1261 && !rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))
1263 remove_note (i1
, equiv1
);
1264 remove_note (i2
, equiv2
);
1268 /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1269 resulting insn in I1, and the corresponding bb in BB1. At the head of a
1270 bb, if there is a predecessor bb that reaches this bb via fallthru, and
1271 FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1272 DID_FALLTHRU. Otherwise, stops at the head of the bb. */
1275 walk_to_nondebug_insn (rtx_insn
**i1
, basic_block
*bb1
, bool follow_fallthru
,
1280 *did_fallthru
= false;
1283 while (!NONDEBUG_INSN_P (*i1
))
1285 if (*i1
!= BB_HEAD (*bb1
))
1287 *i1
= PREV_INSN (*i1
);
1291 if (!follow_fallthru
)
1294 fallthru
= find_fallthru_edge ((*bb1
)->preds
);
1295 if (!fallthru
|| fallthru
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
)
1296 || !single_succ_p (fallthru
->src
))
1299 *bb1
= fallthru
->src
;
1300 *i1
= BB_END (*bb1
);
1301 *did_fallthru
= true;
1305 /* Look through the insns at the end of BB1 and BB2 and find the longest
1306 sequence that are either equivalent, or allow forward or backward
1307 replacement. Store the first insns for that sequence in *F1 and *F2 and
1308 return the sequence length.
1310 DIR_P indicates the allowed replacement direction on function entry, and
1311 the actual replacement direction on function exit. If NULL, only equivalent
1312 sequences are allowed.
1314 To simplify callers of this function, if the blocks match exactly,
1315 store the head of the blocks in *F1 and *F2. */
1318 flow_find_cross_jump (basic_block bb1
, basic_block bb2
, rtx_insn
**f1
,
1319 rtx_insn
**f2
, enum replace_direction
*dir_p
)
1321 rtx_insn
*i1
, *i2
, *last1
, *last2
, *afterlast1
, *afterlast2
;
1323 enum replace_direction dir
, last_dir
, afterlast_dir
;
1324 bool follow_fallthru
, did_fallthru
;
1330 afterlast_dir
= dir
;
1331 last_dir
= afterlast_dir
;
1333 /* Skip simple jumps at the end of the blocks. Complex jumps still
1334 need to be compared for equivalence, which we'll do below. */
1337 last1
= afterlast1
= last2
= afterlast2
= NULL
;
1339 || (returnjump_p (i1
) && !side_effects_p (PATTERN (i1
))))
1342 i1
= PREV_INSN (i1
);
1347 || (returnjump_p (i2
) && !side_effects_p (PATTERN (i2
))))
1350 /* Count everything except for unconditional jump as insn.
1351 Don't count any jumps if dir_p is NULL. */
1352 if (!simplejump_p (i2
) && !returnjump_p (i2
) && last1
&& dir_p
)
1354 i2
= PREV_INSN (i2
);
1359 /* In the following example, we can replace all jumps to C by jumps to A.
1361 This removes 4 duplicate insns.
1362 [bb A] insn1 [bb C] insn1
1368 We could also replace all jumps to A by jumps to C, but that leaves B
1369 alive, and removes only 2 duplicate insns. In a subsequent crossjump
1370 step, all jumps to B would be replaced with jumps to the middle of C,
1371 achieving the same result with more effort.
1372 So we allow only the first possibility, which means that we don't allow
1373 fallthru in the block that's being replaced. */
1375 follow_fallthru
= dir_p
&& dir
!= dir_forward
;
1376 walk_to_nondebug_insn (&i1
, &bb1
, follow_fallthru
, &did_fallthru
);
1380 follow_fallthru
= dir_p
&& dir
!= dir_backward
;
1381 walk_to_nondebug_insn (&i2
, &bb2
, follow_fallthru
, &did_fallthru
);
1385 if (i1
== BB_HEAD (bb1
) || i2
== BB_HEAD (bb2
))
1388 dir
= merge_dir (dir
, old_insns_match_p (0, i1
, i2
));
1389 if (dir
== dir_none
|| (!dir_p
&& dir
!= dir_both
))
1392 merge_memattrs (i1
, i2
);
1394 /* Don't begin a cross-jump with a NOTE insn. */
1397 merge_notes (i1
, i2
);
1399 afterlast1
= last1
, afterlast2
= last2
;
1400 last1
= i1
, last2
= i2
;
1401 afterlast_dir
= last_dir
;
1403 if (active_insn_p (i1
))
1407 i1
= PREV_INSN (i1
);
1408 i2
= PREV_INSN (i2
);
1411 /* Don't allow the insn after a compare to be shared by
1412 cross-jumping unless the compare is also shared. */
1413 if (HAVE_cc0
&& ninsns
&& reg_mentioned_p (cc0_rtx
, last1
)
1414 && ! sets_cc0_p (last1
))
1415 last1
= afterlast1
, last2
= afterlast2
, last_dir
= afterlast_dir
, ninsns
--;
1417 /* Include preceding notes and labels in the cross-jump. One,
1418 this may bring us to the head of the blocks as requested above.
1419 Two, it keeps line number notes as matched as may be. */
1422 bb1
= BLOCK_FOR_INSN (last1
);
1423 while (last1
!= BB_HEAD (bb1
) && !NONDEBUG_INSN_P (PREV_INSN (last1
)))
1424 last1
= PREV_INSN (last1
);
1426 if (last1
!= BB_HEAD (bb1
) && LABEL_P (PREV_INSN (last1
)))
1427 last1
= PREV_INSN (last1
);
1429 bb2
= BLOCK_FOR_INSN (last2
);
1430 while (last2
!= BB_HEAD (bb2
) && !NONDEBUG_INSN_P (PREV_INSN (last2
)))
1431 last2
= PREV_INSN (last2
);
1433 if (last2
!= BB_HEAD (bb2
) && LABEL_P (PREV_INSN (last2
)))
1434 last2
= PREV_INSN (last2
);
1445 /* Like flow_find_cross_jump, except start looking for a matching sequence from
1446 the head of the two blocks. Do not include jumps at the end.
1447 If STOP_AFTER is nonzero, stop after finding that many matching
1448 instructions. If STOP_AFTER is zero, count all INSN_P insns, if it is
1449 non-zero, only count active insns. */
1452 flow_find_head_matching_sequence (basic_block bb1
, basic_block bb2
, rtx_insn
**f1
,
1453 rtx_insn
**f2
, int stop_after
)
1455 rtx_insn
*i1
, *i2
, *last1
, *last2
, *beforelast1
, *beforelast2
;
1459 int nehedges1
= 0, nehedges2
= 0;
1461 FOR_EACH_EDGE (e
, ei
, bb1
->succs
)
1462 if (e
->flags
& EDGE_EH
)
1464 FOR_EACH_EDGE (e
, ei
, bb2
->succs
)
1465 if (e
->flags
& EDGE_EH
)
1470 last1
= beforelast1
= last2
= beforelast2
= NULL
;
1474 /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG. */
1475 while (!NONDEBUG_INSN_P (i1
) && i1
!= BB_END (bb1
))
1477 if (NOTE_P (i1
) && NOTE_KIND (i1
) == NOTE_INSN_EPILOGUE_BEG
)
1479 i1
= NEXT_INSN (i1
);
1482 while (!NONDEBUG_INSN_P (i2
) && i2
!= BB_END (bb2
))
1484 if (NOTE_P (i2
) && NOTE_KIND (i2
) == NOTE_INSN_EPILOGUE_BEG
)
1486 i2
= NEXT_INSN (i2
);
1489 if ((i1
== BB_END (bb1
) && !NONDEBUG_INSN_P (i1
))
1490 || (i2
== BB_END (bb2
) && !NONDEBUG_INSN_P (i2
)))
1493 if (NOTE_P (i1
) || NOTE_P (i2
)
1494 || JUMP_P (i1
) || JUMP_P (i2
))
1497 /* A sanity check to make sure we're not merging insns with different
1498 effects on EH. If only one of them ends a basic block, it shouldn't
1499 have an EH edge; if both end a basic block, there should be the same
1500 number of EH edges. */
1501 if ((i1
== BB_END (bb1
) && i2
!= BB_END (bb2
)
1503 || (i2
== BB_END (bb2
) && i1
!= BB_END (bb1
)
1505 || (i1
== BB_END (bb1
) && i2
== BB_END (bb2
)
1506 && nehedges1
!= nehedges2
))
1509 if (old_insns_match_p (0, i1
, i2
) != dir_both
)
1512 merge_memattrs (i1
, i2
);
1514 /* Don't begin a cross-jump with a NOTE insn. */
1517 merge_notes (i1
, i2
);
1519 beforelast1
= last1
, beforelast2
= last2
;
1520 last1
= i1
, last2
= i2
;
1521 if (!stop_after
|| active_insn_p (i1
))
1525 if (i1
== BB_END (bb1
) || i2
== BB_END (bb2
)
1526 || (stop_after
> 0 && ninsns
== stop_after
))
1529 i1
= NEXT_INSN (i1
);
1530 i2
= NEXT_INSN (i2
);
1533 /* Don't allow a compare to be shared by cross-jumping unless the insn
1534 after the compare is also shared. */
1535 if (HAVE_cc0
&& ninsns
&& reg_mentioned_p (cc0_rtx
, last1
)
1536 && sets_cc0_p (last1
))
1537 last1
= beforelast1
, last2
= beforelast2
, ninsns
--;
1548 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1549 the branch instruction. This means that if we commonize the control
1550 flow before end of the basic block, the semantic remains unchanged.
1552 We may assume that there exists one edge with a common destination. */
1555 outgoing_edges_match (int mode
, basic_block bb1
, basic_block bb2
)
1557 int nehedges1
= 0, nehedges2
= 0;
1558 edge fallthru1
= 0, fallthru2
= 0;
1562 /* If we performed shrink-wrapping, edges to the exit block can
1563 only be distinguished for JUMP_INSNs. The two paths may differ in
1564 whether they went through the prologue. Sibcalls are fine, we know
1565 that we either didn't need or inserted an epilogue before them. */
1566 if (crtl
->shrink_wrapped
1567 && single_succ_p (bb1
)
1568 && single_succ (bb1
) == EXIT_BLOCK_PTR_FOR_FN (cfun
)
1569 && !JUMP_P (BB_END (bb1
))
1570 && !(CALL_P (BB_END (bb1
)) && SIBLING_CALL_P (BB_END (bb1
))))
1573 /* If BB1 has only one successor, we may be looking at either an
1574 unconditional jump, or a fake edge to exit. */
1575 if (single_succ_p (bb1
)
1576 && (single_succ_edge (bb1
)->flags
& (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1577 && (!JUMP_P (BB_END (bb1
)) || simplejump_p (BB_END (bb1
))))
1578 return (single_succ_p (bb2
)
1579 && (single_succ_edge (bb2
)->flags
1580 & (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1581 && (!JUMP_P (BB_END (bb2
)) || simplejump_p (BB_END (bb2
))));
1583 /* Match conditional jumps - this may get tricky when fallthru and branch
1584 edges are crossed. */
1585 if (EDGE_COUNT (bb1
->succs
) == 2
1586 && any_condjump_p (BB_END (bb1
))
1587 && onlyjump_p (BB_END (bb1
)))
1589 edge b1
, f1
, b2
, f2
;
1590 bool reverse
, match
;
1591 rtx set1
, set2
, cond1
, cond2
;
1592 enum rtx_code code1
, code2
;
1594 if (EDGE_COUNT (bb2
->succs
) != 2
1595 || !any_condjump_p (BB_END (bb2
))
1596 || !onlyjump_p (BB_END (bb2
)))
1599 b1
= BRANCH_EDGE (bb1
);
1600 b2
= BRANCH_EDGE (bb2
);
1601 f1
= FALLTHRU_EDGE (bb1
);
1602 f2
= FALLTHRU_EDGE (bb2
);
1604 /* Get around possible forwarders on fallthru edges. Other cases
1605 should be optimized out already. */
1606 if (FORWARDER_BLOCK_P (f1
->dest
))
1607 f1
= single_succ_edge (f1
->dest
);
1609 if (FORWARDER_BLOCK_P (f2
->dest
))
1610 f2
= single_succ_edge (f2
->dest
);
1612 /* To simplify use of this function, return false if there are
1613 unneeded forwarder blocks. These will get eliminated later
1614 during cleanup_cfg. */
1615 if (FORWARDER_BLOCK_P (f1
->dest
)
1616 || FORWARDER_BLOCK_P (f2
->dest
)
1617 || FORWARDER_BLOCK_P (b1
->dest
)
1618 || FORWARDER_BLOCK_P (b2
->dest
))
1621 if (f1
->dest
== f2
->dest
&& b1
->dest
== b2
->dest
)
1623 else if (f1
->dest
== b2
->dest
&& b1
->dest
== f2
->dest
)
1628 set1
= pc_set (BB_END (bb1
));
1629 set2
= pc_set (BB_END (bb2
));
1630 if ((XEXP (SET_SRC (set1
), 1) == pc_rtx
)
1631 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
1634 cond1
= XEXP (SET_SRC (set1
), 0);
1635 cond2
= XEXP (SET_SRC (set2
), 0);
1636 code1
= GET_CODE (cond1
);
1638 code2
= reversed_comparison_code (cond2
, BB_END (bb2
));
1640 code2
= GET_CODE (cond2
);
1642 if (code2
== UNKNOWN
)
1645 /* Verify codes and operands match. */
1646 match
= ((code1
== code2
1647 && rtx_renumbered_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
1648 && rtx_renumbered_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
1649 || (code1
== swap_condition (code2
)
1650 && rtx_renumbered_equal_p (XEXP (cond1
, 1),
1652 && rtx_renumbered_equal_p (XEXP (cond1
, 0),
1655 /* If we return true, we will join the blocks. Which means that
1656 we will only have one branch prediction bit to work with. Thus
1657 we require the existing branches to have probabilities that are
1660 && optimize_bb_for_speed_p (bb1
)
1661 && optimize_bb_for_speed_p (bb2
))
1665 if (b1
->dest
== b2
->dest
)
1666 prob2
= b2
->probability
;
1668 /* Do not use f2 probability as f2 may be forwarded. */
1669 prob2
= REG_BR_PROB_BASE
- b2
->probability
;
1671 /* Fail if the difference in probabilities is greater than 50%.
1672 This rules out two well-predicted branches with opposite
1674 if (abs (b1
->probability
- prob2
) > REG_BR_PROB_BASE
/ 2)
1678 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1679 bb1
->index
, bb2
->index
, b1
->probability
, prob2
);
1685 if (dump_file
&& match
)
1686 fprintf (dump_file
, "Conditionals in bb %i and %i match.\n",
1687 bb1
->index
, bb2
->index
);
1692 /* Generic case - we are seeing a computed jump, table jump or trapping
1695 /* Check whether there are tablejumps in the end of BB1 and BB2.
1696 Return true if they are identical. */
1699 rtx_jump_table_data
*table1
, *table2
;
1701 if (tablejump_p (BB_END (bb1
), &label1
, &table1
)
1702 && tablejump_p (BB_END (bb2
), &label2
, &table2
)
1703 && GET_CODE (PATTERN (table1
)) == GET_CODE (PATTERN (table2
)))
1705 /* The labels should never be the same rtx. If they really are same
1706 the jump tables are same too. So disable crossjumping of blocks BB1
1707 and BB2 because when deleting the common insns in the end of BB1
1708 by delete_basic_block () the jump table would be deleted too. */
1709 /* If LABEL2 is referenced in BB1->END do not do anything
1710 because we would loose information when replacing
1711 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1712 if (label1
!= label2
&& !rtx_referenced_p (label2
, BB_END (bb1
)))
1714 /* Set IDENTICAL to true when the tables are identical. */
1715 bool identical
= false;
1718 p1
= PATTERN (table1
);
1719 p2
= PATTERN (table2
);
1720 if (GET_CODE (p1
) == ADDR_VEC
&& rtx_equal_p (p1
, p2
))
1724 else if (GET_CODE (p1
) == ADDR_DIFF_VEC
1725 && (XVECLEN (p1
, 1) == XVECLEN (p2
, 1))
1726 && rtx_equal_p (XEXP (p1
, 2), XEXP (p2
, 2))
1727 && rtx_equal_p (XEXP (p1
, 3), XEXP (p2
, 3)))
1732 for (i
= XVECLEN (p1
, 1) - 1; i
>= 0 && identical
; i
--)
1733 if (!rtx_equal_p (XVECEXP (p1
, 1, i
), XVECEXP (p2
, 1, i
)))
1741 /* Temporarily replace references to LABEL1 with LABEL2
1742 in BB1->END so that we could compare the instructions. */
1743 replace_label_in_insn (BB_END (bb1
), label1
, label2
, false);
1745 match
= (old_insns_match_p (mode
, BB_END (bb1
), BB_END (bb2
))
1747 if (dump_file
&& match
)
1749 "Tablejumps in bb %i and %i match.\n",
1750 bb1
->index
, bb2
->index
);
1752 /* Set the original label in BB1->END because when deleting
1753 a block whose end is a tablejump, the tablejump referenced
1754 from the instruction is deleted too. */
1755 replace_label_in_insn (BB_END (bb1
), label2
, label1
, false);
1764 /* Find the last non-debug non-note instruction in each bb, except
1765 stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
1766 handles that case specially. old_insns_match_p does not handle
1767 other types of instruction notes. */
1768 rtx_insn
*last1
= BB_END (bb1
);
1769 rtx_insn
*last2
= BB_END (bb2
);
1770 while (!NOTE_INSN_BASIC_BLOCK_P (last1
) &&
1771 (DEBUG_INSN_P (last1
) || NOTE_P (last1
)))
1772 last1
= PREV_INSN (last1
);
1773 while (!NOTE_INSN_BASIC_BLOCK_P (last2
) &&
1774 (DEBUG_INSN_P (last2
) || NOTE_P (last2
)))
1775 last2
= PREV_INSN (last2
);
1776 gcc_assert (last1
&& last2
);
1778 /* First ensure that the instructions match. There may be many outgoing
1779 edges so this test is generally cheaper. */
1780 if (old_insns_match_p (mode
, last1
, last2
) != dir_both
)
1783 /* Search the outgoing edges, ensure that the counts do match, find possible
1784 fallthru and exception handling edges since these needs more
1786 if (EDGE_COUNT (bb1
->succs
) != EDGE_COUNT (bb2
->succs
))
1789 bool nonfakeedges
= false;
1790 FOR_EACH_EDGE (e1
, ei
, bb1
->succs
)
1792 e2
= EDGE_SUCC (bb2
, ei
.index
);
1794 if ((e1
->flags
& EDGE_FAKE
) == 0)
1795 nonfakeedges
= true;
1797 if (e1
->flags
& EDGE_EH
)
1800 if (e2
->flags
& EDGE_EH
)
1803 if (e1
->flags
& EDGE_FALLTHRU
)
1805 if (e2
->flags
& EDGE_FALLTHRU
)
1809 /* If number of edges of various types does not match, fail. */
1810 if (nehedges1
!= nehedges2
1811 || (fallthru1
!= 0) != (fallthru2
!= 0))
1814 /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
1815 and the last real insn doesn't have REG_ARGS_SIZE note, don't
1816 attempt to optimize, as the two basic blocks might have different
1817 REG_ARGS_SIZE depths. For noreturn calls and unconditional
1818 traps there should be REG_ARG_SIZE notes, they could be missing
1819 for __builtin_unreachable () uses though. */
1821 && !ACCUMULATE_OUTGOING_ARGS
1823 || !find_reg_note (last1
, REG_ARGS_SIZE
, NULL
)))
1826 /* fallthru edges must be forwarded to the same destination. */
1829 basic_block d1
= (forwarder_block_p (fallthru1
->dest
)
1830 ? single_succ (fallthru1
->dest
): fallthru1
->dest
);
1831 basic_block d2
= (forwarder_block_p (fallthru2
->dest
)
1832 ? single_succ (fallthru2
->dest
): fallthru2
->dest
);
1838 /* Ensure the same EH region. */
1840 rtx n1
= find_reg_note (BB_END (bb1
), REG_EH_REGION
, 0);
1841 rtx n2
= find_reg_note (BB_END (bb2
), REG_EH_REGION
, 0);
1846 if (n1
&& (!n2
|| XEXP (n1
, 0) != XEXP (n2
, 0)))
1850 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1851 version of sequence abstraction. */
1852 FOR_EACH_EDGE (e1
, ei
, bb2
->succs
)
1856 basic_block d1
= e1
->dest
;
1858 if (FORWARDER_BLOCK_P (d1
))
1859 d1
= EDGE_SUCC (d1
, 0)->dest
;
1861 FOR_EACH_EDGE (e2
, ei
, bb1
->succs
)
1863 basic_block d2
= e2
->dest
;
1864 if (FORWARDER_BLOCK_P (d2
))
1865 d2
= EDGE_SUCC (d2
, 0)->dest
;
1877 /* Returns true if BB basic block has a preserve label. */
1880 block_has_preserve_label (basic_block bb
)
1884 && LABEL_PRESERVE_P (block_label (bb
)));
1887 /* E1 and E2 are edges with the same destination block. Search their
1888 predecessors for common code. If found, redirect control flow from
1889 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1890 or the other way around (dir_backward). DIR specifies the allowed
1891 replacement direction. */
1894 try_crossjump_to_edge (int mode
, edge e1
, edge e2
,
1895 enum replace_direction dir
)
1898 basic_block src1
= e1
->src
, src2
= e2
->src
;
1899 basic_block redirect_to
, redirect_from
, to_remove
;
1900 basic_block osrc1
, osrc2
, redirect_edges_to
, tmp
;
1901 rtx_insn
*newpos1
, *newpos2
;
1905 newpos1
= newpos2
= NULL
;
1907 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1908 to try this optimization.
1910 Basic block partitioning may result in some jumps that appear to
1911 be optimizable (or blocks that appear to be mergeable), but which really
1912 must be left untouched (they are required to make it safely across
1913 partition boundaries). See the comments at the top of
1914 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1916 if (crtl
->has_bb_partition
&& reload_completed
)
1919 /* Search backward through forwarder blocks. We don't need to worry
1920 about multiple entry or chained forwarders, as they will be optimized
1921 away. We do this to look past the unconditional jump following a
1922 conditional jump that is required due to the current CFG shape. */
1923 if (single_pred_p (src1
)
1924 && FORWARDER_BLOCK_P (src1
))
1925 e1
= single_pred_edge (src1
), src1
= e1
->src
;
1927 if (single_pred_p (src2
)
1928 && FORWARDER_BLOCK_P (src2
))
1929 e2
= single_pred_edge (src2
), src2
= e2
->src
;
1931 /* Nothing to do if we reach ENTRY, or a common source block. */
1932 if (src1
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) || src2
1933 == ENTRY_BLOCK_PTR_FOR_FN (cfun
))
1938 /* Seeing more than 1 forwarder blocks would confuse us later... */
1939 if (FORWARDER_BLOCK_P (e1
->dest
)
1940 && FORWARDER_BLOCK_P (single_succ (e1
->dest
)))
1943 if (FORWARDER_BLOCK_P (e2
->dest
)
1944 && FORWARDER_BLOCK_P (single_succ (e2
->dest
)))
1947 /* Likewise with dead code (possibly newly created by the other optimizations
1949 if (EDGE_COUNT (src1
->preds
) == 0 || EDGE_COUNT (src2
->preds
) == 0)
1952 /* Look for the common insn sequence, part the first ... */
1953 if (!outgoing_edges_match (mode
, src1
, src2
))
1956 /* ... and part the second. */
1957 nmatch
= flow_find_cross_jump (src1
, src2
, &newpos1
, &newpos2
, &dir
);
1961 if (newpos1
!= NULL_RTX
)
1962 src1
= BLOCK_FOR_INSN (newpos1
);
1963 if (newpos2
!= NULL_RTX
)
1964 src2
= BLOCK_FOR_INSN (newpos2
);
1966 if (dir
== dir_backward
)
1968 #define SWAP(T, X, Y) do { T tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
1969 SWAP (basic_block
, osrc1
, osrc2
);
1970 SWAP (basic_block
, src1
, src2
);
1971 SWAP (edge
, e1
, e2
);
1972 SWAP (rtx_insn
*, newpos1
, newpos2
);
1976 /* Don't proceed with the crossjump unless we found a sufficient number
1977 of matching instructions or the 'from' block was totally matched
1978 (such that its predecessors will hopefully be redirected and the
1980 if ((nmatch
< PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS
))
1981 && (newpos1
!= BB_HEAD (src1
)))
1984 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
1985 if (block_has_preserve_label (e1
->dest
)
1986 && (e1
->flags
& EDGE_ABNORMAL
))
1989 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1991 If we have tablejumps in the end of SRC1 and SRC2
1992 they have been already compared for equivalence in outgoing_edges_match ()
1993 so replace the references to TABLE1 by references to TABLE2. */
1996 rtx_jump_table_data
*table1
, *table2
;
1998 if (tablejump_p (BB_END (osrc1
), &label1
, &table1
)
1999 && tablejump_p (BB_END (osrc2
), &label2
, &table2
)
2000 && label1
!= label2
)
2004 /* Replace references to LABEL1 with LABEL2. */
2005 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
2007 /* Do not replace the label in SRC1->END because when deleting
2008 a block whose end is a tablejump, the tablejump referenced
2009 from the instruction is deleted too. */
2010 if (insn
!= BB_END (osrc1
))
2011 replace_label_in_insn (insn
, label1
, label2
, true);
2016 /* Avoid splitting if possible. We must always split when SRC2 has
2017 EH predecessor edges, or we may end up with basic blocks with both
2018 normal and EH predecessor edges. */
2019 if (newpos2
== BB_HEAD (src2
)
2020 && !(EDGE_PRED (src2
, 0)->flags
& EDGE_EH
))
2024 if (newpos2
== BB_HEAD (src2
))
2026 /* Skip possible basic block header. */
2027 if (LABEL_P (newpos2
))
2028 newpos2
= NEXT_INSN (newpos2
);
2029 while (DEBUG_INSN_P (newpos2
))
2030 newpos2
= NEXT_INSN (newpos2
);
2031 if (NOTE_P (newpos2
))
2032 newpos2
= NEXT_INSN (newpos2
);
2033 while (DEBUG_INSN_P (newpos2
))
2034 newpos2
= NEXT_INSN (newpos2
);
2038 fprintf (dump_file
, "Splitting bb %i before %i insns\n",
2039 src2
->index
, nmatch
);
2040 redirect_to
= split_block (src2
, PREV_INSN (newpos2
))->dest
;
2045 "Cross jumping from bb %i to bb %i; %i common insns\n",
2046 src1
->index
, src2
->index
, nmatch
);
2048 /* We may have some registers visible through the block. */
2049 df_set_bb_dirty (redirect_to
);
2052 redirect_edges_to
= redirect_to
;
2054 redirect_edges_to
= osrc2
;
2056 /* Recompute the frequencies and counts of outgoing edges. */
2057 FOR_EACH_EDGE (s
, ei
, redirect_edges_to
->succs
)
2061 basic_block d
= s
->dest
;
2063 if (FORWARDER_BLOCK_P (d
))
2064 d
= single_succ (d
);
2066 FOR_EACH_EDGE (s2
, ei
, src1
->succs
)
2068 basic_block d2
= s2
->dest
;
2069 if (FORWARDER_BLOCK_P (d2
))
2070 d2
= single_succ (d2
);
2075 s
->count
+= s2
->count
;
2077 /* Take care to update possible forwarder blocks. We verified
2078 that there is no more than one in the chain, so we can't run
2079 into infinite loop. */
2080 if (FORWARDER_BLOCK_P (s
->dest
))
2082 single_succ_edge (s
->dest
)->count
+= s2
->count
;
2083 s
->dest
->count
+= s2
->count
;
2084 s
->dest
->frequency
+= EDGE_FREQUENCY (s
);
2087 if (FORWARDER_BLOCK_P (s2
->dest
))
2089 single_succ_edge (s2
->dest
)->count
-= s2
->count
;
2090 if (single_succ_edge (s2
->dest
)->count
< 0)
2091 single_succ_edge (s2
->dest
)->count
= 0;
2092 s2
->dest
->count
-= s2
->count
;
2093 s2
->dest
->frequency
-= EDGE_FREQUENCY (s
);
2094 if (s2
->dest
->frequency
< 0)
2095 s2
->dest
->frequency
= 0;
2096 if (s2
->dest
->count
< 0)
2097 s2
->dest
->count
= 0;
2100 if (!redirect_edges_to
->frequency
&& !src1
->frequency
)
2101 s
->probability
= (s
->probability
+ s2
->probability
) / 2;
2104 = ((s
->probability
* redirect_edges_to
->frequency
+
2105 s2
->probability
* src1
->frequency
)
2106 / (redirect_edges_to
->frequency
+ src1
->frequency
));
2109 /* Adjust count and frequency for the block. An earlier jump
2110 threading pass may have left the profile in an inconsistent
2111 state (see update_bb_profile_for_threading) so we must be
2112 prepared for overflows. */
2116 tmp
->count
+= src1
->count
;
2117 tmp
->frequency
+= src1
->frequency
;
2118 if (tmp
->frequency
> BB_FREQ_MAX
)
2119 tmp
->frequency
= BB_FREQ_MAX
;
2120 if (tmp
== redirect_edges_to
)
2122 tmp
= find_fallthru_edge (tmp
->succs
)->dest
;
2125 update_br_prob_note (redirect_edges_to
);
2127 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
2129 /* Skip possible basic block header. */
2130 if (LABEL_P (newpos1
))
2131 newpos1
= NEXT_INSN (newpos1
);
2133 while (DEBUG_INSN_P (newpos1
))
2134 newpos1
= NEXT_INSN (newpos1
);
2136 if (NOTE_INSN_BASIC_BLOCK_P (newpos1
))
2137 newpos1
= NEXT_INSN (newpos1
);
2139 while (DEBUG_INSN_P (newpos1
))
2140 newpos1
= NEXT_INSN (newpos1
);
2142 redirect_from
= split_block (src1
, PREV_INSN (newpos1
))->src
;
2143 to_remove
= single_succ (redirect_from
);
2145 redirect_edge_and_branch_force (single_succ_edge (redirect_from
), redirect_to
);
2146 delete_basic_block (to_remove
);
2148 update_forwarder_flag (redirect_from
);
2149 if (redirect_to
!= src2
)
2150 update_forwarder_flag (src2
);
2155 /* Search the predecessors of BB for common insn sequences. When found,
2156 share code between them by redirecting control flow. Return true if
2157 any changes made. */
2160 try_crossjump_bb (int mode
, basic_block bb
)
2162 edge e
, e2
, fallthru
;
2164 unsigned max
, ix
, ix2
;
2166 /* Nothing to do if there is not at least two incoming edges. */
2167 if (EDGE_COUNT (bb
->preds
) < 2)
2170 /* Don't crossjump if this block ends in a computed jump,
2171 unless we are optimizing for size. */
2172 if (optimize_bb_for_size_p (bb
)
2173 && bb
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
2174 && computed_jump_p (BB_END (bb
)))
2177 /* If we are partitioning hot/cold basic blocks, we don't want to
2178 mess up unconditional or indirect jumps that cross between hot
2181 Basic block partitioning may result in some jumps that appear to
2182 be optimizable (or blocks that appear to be mergeable), but which really
2183 must be left untouched (they are required to make it safely across
2184 partition boundaries). See the comments at the top of
2185 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
2187 if (BB_PARTITION (EDGE_PRED (bb
, 0)->src
) !=
2188 BB_PARTITION (EDGE_PRED (bb
, 1)->src
)
2189 || (EDGE_PRED (bb
, 0)->flags
& EDGE_CROSSING
))
2192 /* It is always cheapest to redirect a block that ends in a branch to
2193 a block that falls through into BB, as that adds no branches to the
2194 program. We'll try that combination first. */
2196 max
= PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES
);
2198 if (EDGE_COUNT (bb
->preds
) > max
)
2201 fallthru
= find_fallthru_edge (bb
->preds
);
2204 for (ix
= 0; ix
< EDGE_COUNT (bb
->preds
);)
2206 e
= EDGE_PRED (bb
, ix
);
2209 /* As noted above, first try with the fallthru predecessor (or, a
2210 fallthru predecessor if we are in cfglayout mode). */
2213 /* Don't combine the fallthru edge into anything else.
2214 If there is a match, we'll do it the other way around. */
2217 /* If nothing changed since the last attempt, there is nothing
2220 && !((e
->src
->flags
& BB_MODIFIED
)
2221 || (fallthru
->src
->flags
& BB_MODIFIED
)))
2224 if (try_crossjump_to_edge (mode
, e
, fallthru
, dir_forward
))
2232 /* Non-obvious work limiting check: Recognize that we're going
2233 to call try_crossjump_bb on every basic block. So if we have
2234 two blocks with lots of outgoing edges (a switch) and they
2235 share lots of common destinations, then we would do the
2236 cross-jump check once for each common destination.
2238 Now, if the blocks actually are cross-jump candidates, then
2239 all of their destinations will be shared. Which means that
2240 we only need check them for cross-jump candidacy once. We
2241 can eliminate redundant checks of crossjump(A,B) by arbitrarily
2242 choosing to do the check from the block for which the edge
2243 in question is the first successor of A. */
2244 if (EDGE_SUCC (e
->src
, 0) != e
)
2247 for (ix2
= 0; ix2
< EDGE_COUNT (bb
->preds
); ix2
++)
2249 e2
= EDGE_PRED (bb
, ix2
);
2254 /* We've already checked the fallthru edge above. */
2258 /* The "first successor" check above only prevents multiple
2259 checks of crossjump(A,B). In order to prevent redundant
2260 checks of crossjump(B,A), require that A be the block
2261 with the lowest index. */
2262 if (e
->src
->index
> e2
->src
->index
)
2265 /* If nothing changed since the last attempt, there is nothing
2268 && !((e
->src
->flags
& BB_MODIFIED
)
2269 || (e2
->src
->flags
& BB_MODIFIED
)))
2272 /* Both e and e2 are not fallthru edges, so we can crossjump in either
2274 if (try_crossjump_to_edge (mode
, e
, e2
, dir_both
))
2284 crossjumps_occured
= true;
2289 /* Search the successors of BB for common insn sequences. When found,
2290 share code between them by moving it across the basic block
2291 boundary. Return true if any changes made. */
2294 try_head_merge_bb (basic_block bb
)
2296 basic_block final_dest_bb
= NULL
;
2297 int max_match
= INT_MAX
;
2299 rtx_insn
**headptr
, **currptr
, **nextptr
;
2300 bool changed
, moveall
;
2302 rtx_insn
*e0_last_head
;
2304 rtx_insn
*move_before
;
2305 unsigned nedges
= EDGE_COUNT (bb
->succs
);
2306 rtx_insn
*jump
= BB_END (bb
);
2307 regset live
, live_union
;
2309 /* Nothing to do if there is not at least two outgoing edges. */
2313 /* Don't crossjump if this block ends in a computed jump,
2314 unless we are optimizing for size. */
2315 if (optimize_bb_for_size_p (bb
)
2316 && bb
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
2317 && computed_jump_p (BB_END (bb
)))
2320 cond
= get_condition (jump
, &move_before
, true, false);
2321 if (cond
== NULL_RTX
)
2323 if (HAVE_cc0
&& reg_mentioned_p (cc0_rtx
, jump
))
2324 move_before
= prev_nonnote_nondebug_insn (jump
);
2329 for (ix
= 0; ix
< nedges
; ix
++)
2330 if (EDGE_SUCC (bb
, ix
)->dest
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
2333 for (ix
= 0; ix
< nedges
; ix
++)
2335 edge e
= EDGE_SUCC (bb
, ix
);
2336 basic_block other_bb
= e
->dest
;
2338 if (df_get_bb_dirty (other_bb
))
2340 block_was_dirty
= true;
2344 if (e
->flags
& EDGE_ABNORMAL
)
2347 /* Normally, all destination blocks must only be reachable from this
2348 block, i.e. they must have one incoming edge.
2350 There is one special case we can handle, that of multiple consecutive
2351 jumps where the first jumps to one of the targets of the second jump.
2352 This happens frequently in switch statements for default labels.
2353 The structure is as follows:
2359 jump with targets A, B, C, D...
2361 has two incoming edges, from FINAL_DEST_BB and BB
2363 In this case, we can try to move the insns through BB and into
2365 if (EDGE_COUNT (other_bb
->preds
) != 1)
2367 edge incoming_edge
, incoming_bb_other_edge
;
2370 if (final_dest_bb
!= NULL
2371 || EDGE_COUNT (other_bb
->preds
) != 2)
2374 /* We must be able to move the insns across the whole block. */
2375 move_before
= BB_HEAD (bb
);
2376 while (!NONDEBUG_INSN_P (move_before
))
2377 move_before
= NEXT_INSN (move_before
);
2379 if (EDGE_COUNT (bb
->preds
) != 1)
2381 incoming_edge
= EDGE_PRED (bb
, 0);
2382 final_dest_bb
= incoming_edge
->src
;
2383 if (EDGE_COUNT (final_dest_bb
->succs
) != 2)
2385 FOR_EACH_EDGE (incoming_bb_other_edge
, ei
, final_dest_bb
->succs
)
2386 if (incoming_bb_other_edge
!= incoming_edge
)
2388 if (incoming_bb_other_edge
->dest
!= other_bb
)
2393 e0
= EDGE_SUCC (bb
, 0);
2394 e0_last_head
= NULL
;
2397 for (ix
= 1; ix
< nedges
; ix
++)
2399 edge e
= EDGE_SUCC (bb
, ix
);
2400 rtx_insn
*e0_last
, *e_last
;
2403 nmatch
= flow_find_head_matching_sequence (e0
->dest
, e
->dest
,
2404 &e0_last
, &e_last
, 0);
2408 if (nmatch
< max_match
)
2411 e0_last_head
= e0_last
;
2415 /* If we matched an entire block, we probably have to avoid moving the
2418 && e0_last_head
== BB_END (e0
->dest
)
2419 && (find_reg_note (e0_last_head
, REG_EH_REGION
, 0)
2420 || control_flow_insn_p (e0_last_head
)))
2426 e0_last_head
= prev_real_insn (e0_last_head
);
2427 while (DEBUG_INSN_P (e0_last_head
));
2433 /* We must find a union of the live registers at each of the end points. */
2434 live
= BITMAP_ALLOC (NULL
);
2435 live_union
= BITMAP_ALLOC (NULL
);
2437 currptr
= XNEWVEC (rtx_insn
*, nedges
);
2438 headptr
= XNEWVEC (rtx_insn
*, nedges
);
2439 nextptr
= XNEWVEC (rtx_insn
*, nedges
);
2441 for (ix
= 0; ix
< nedges
; ix
++)
2444 basic_block merge_bb
= EDGE_SUCC (bb
, ix
)->dest
;
2445 rtx_insn
*head
= BB_HEAD (merge_bb
);
2447 while (!NONDEBUG_INSN_P (head
))
2448 head
= NEXT_INSN (head
);
2452 /* Compute the end point and live information */
2453 for (j
= 1; j
< max_match
; j
++)
2455 head
= NEXT_INSN (head
);
2456 while (!NONDEBUG_INSN_P (head
));
2457 simulate_backwards_to_point (merge_bb
, live
, head
);
2458 IOR_REG_SET (live_union
, live
);
2461 /* If we're moving across two blocks, verify the validity of the
2462 first move, then adjust the target and let the loop below deal
2463 with the final move. */
2464 if (final_dest_bb
!= NULL
)
2466 rtx_insn
*move_upto
;
2468 moveall
= can_move_insns_across (currptr
[0], e0_last_head
, move_before
,
2469 jump
, e0
->dest
, live_union
,
2473 if (move_upto
== NULL_RTX
)
2476 while (e0_last_head
!= move_upto
)
2478 df_simulate_one_insn_backwards (e0
->dest
, e0_last_head
,
2480 e0_last_head
= PREV_INSN (e0_last_head
);
2483 if (e0_last_head
== NULL_RTX
)
2486 jump
= BB_END (final_dest_bb
);
2487 cond
= get_condition (jump
, &move_before
, true, false);
2488 if (cond
== NULL_RTX
)
2490 if (HAVE_cc0
&& reg_mentioned_p (cc0_rtx
, jump
))
2491 move_before
= prev_nonnote_nondebug_insn (jump
);
2499 rtx_insn
*move_upto
;
2500 moveall
= can_move_insns_across (currptr
[0], e0_last_head
,
2501 move_before
, jump
, e0
->dest
, live_union
,
2503 if (!moveall
&& move_upto
== NULL_RTX
)
2505 if (jump
== move_before
)
2508 /* Try again, using a different insertion point. */
2511 /* Don't try moving before a cc0 user, as that may invalidate
2513 if (HAVE_cc0
&& reg_mentioned_p (cc0_rtx
, jump
))
2519 if (final_dest_bb
&& !moveall
)
2520 /* We haven't checked whether a partial move would be OK for the first
2521 move, so we have to fail this case. */
2527 if (currptr
[0] == move_upto
)
2529 for (ix
= 0; ix
< nedges
; ix
++)
2531 rtx_insn
*curr
= currptr
[ix
];
2533 curr
= NEXT_INSN (curr
);
2534 while (!NONDEBUG_INSN_P (curr
));
2539 /* If we can't currently move all of the identical insns, remember
2540 each insn after the range that we'll merge. */
2542 for (ix
= 0; ix
< nedges
; ix
++)
2544 rtx_insn
*curr
= currptr
[ix
];
2546 curr
= NEXT_INSN (curr
);
2547 while (!NONDEBUG_INSN_P (curr
));
2551 reorder_insns (headptr
[0], currptr
[0], PREV_INSN (move_before
));
2552 df_set_bb_dirty (EDGE_SUCC (bb
, 0)->dest
);
2553 if (final_dest_bb
!= NULL
)
2554 df_set_bb_dirty (final_dest_bb
);
2555 df_set_bb_dirty (bb
);
2556 for (ix
= 1; ix
< nedges
; ix
++)
2558 df_set_bb_dirty (EDGE_SUCC (bb
, ix
)->dest
);
2559 delete_insn_chain (headptr
[ix
], currptr
[ix
], false);
2563 if (jump
== move_before
)
2566 /* For the unmerged insns, try a different insertion point. */
2569 /* Don't try moving before a cc0 user, as that may invalidate
2571 if (HAVE_cc0
&& reg_mentioned_p (cc0_rtx
, jump
))
2574 for (ix
= 0; ix
< nedges
; ix
++)
2575 currptr
[ix
] = headptr
[ix
] = nextptr
[ix
];
2585 crossjumps_occured
|= changed
;
2590 /* Return true if BB contains just bb note, or bb note followed
2591 by only DEBUG_INSNs. */
2594 trivially_empty_bb_p (basic_block bb
)
2596 rtx_insn
*insn
= BB_END (bb
);
2600 if (insn
== BB_HEAD (bb
))
2602 if (!DEBUG_INSN_P (insn
))
2604 insn
= PREV_INSN (insn
);
2608 /* Do simple CFG optimizations - basic block merging, simplifying of jump
2609 instructions etc. Return nonzero if changes were made. */
2612 try_optimize_cfg (int mode
)
2614 bool changed_overall
= false;
2617 basic_block bb
, b
, next
;
2619 if (mode
& (CLEANUP_CROSSJUMP
| CLEANUP_THREADING
))
2622 crossjumps_occured
= false;
2624 FOR_EACH_BB_FN (bb
, cfun
)
2625 update_forwarder_flag (bb
);
2627 if (! targetm
.cannot_modify_jumps_p ())
2630 /* Attempt to merge blocks as made possible by edge removal. If
2631 a block has only one successor, and the successor has only
2632 one predecessor, they may be combined. */
2635 block_was_dirty
= false;
2641 "\n\ntry_optimize_cfg iteration %i\n\n",
2644 for (b
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
; b
2645 != EXIT_BLOCK_PTR_FOR_FN (cfun
);)
2649 bool changed_here
= false;
2651 /* Delete trivially dead basic blocks. This is either
2652 blocks with no predecessors, or empty blocks with no
2653 successors. However if the empty block with no
2654 successors is the successor of the ENTRY_BLOCK, it is
2655 kept. This ensures that the ENTRY_BLOCK will have a
2656 successor which is a precondition for many RTL
2657 passes. Empty blocks may result from expanding
2658 __builtin_unreachable (). */
2659 if (EDGE_COUNT (b
->preds
) == 0
2660 || (EDGE_COUNT (b
->succs
) == 0
2661 && trivially_empty_bb_p (b
)
2662 && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun
))->dest
2666 if (EDGE_COUNT (b
->preds
) > 0)
2671 if (current_ir_type () == IR_RTL_CFGLAYOUT
)
2674 && BARRIER_P (BB_FOOTER (b
)))
2675 FOR_EACH_EDGE (e
, ei
, b
->preds
)
2676 if ((e
->flags
& EDGE_FALLTHRU
)
2677 && BB_FOOTER (e
->src
) == NULL
)
2681 BB_FOOTER (e
->src
) = BB_FOOTER (b
);
2682 BB_FOOTER (b
) = NULL
;
2687 BB_FOOTER (e
->src
) = emit_barrier ();
2694 rtx_insn
*last
= get_last_bb_insn (b
);
2695 if (last
&& BARRIER_P (last
))
2696 FOR_EACH_EDGE (e
, ei
, b
->preds
)
2697 if ((e
->flags
& EDGE_FALLTHRU
))
2698 emit_barrier_after (BB_END (e
->src
));
2701 delete_basic_block (b
);
2703 /* Avoid trying to remove the exit block. */
2704 b
= (c
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) ? c
->next_bb
: c
);
2708 /* Remove code labels no longer used. */
2709 if (single_pred_p (b
)
2710 && (single_pred_edge (b
)->flags
& EDGE_FALLTHRU
)
2711 && !(single_pred_edge (b
)->flags
& EDGE_COMPLEX
)
2712 && LABEL_P (BB_HEAD (b
))
2713 && !LABEL_PRESERVE_P (BB_HEAD (b
))
2714 /* If the previous block ends with a branch to this
2715 block, we can't delete the label. Normally this
2716 is a condjump that is yet to be simplified, but
2717 if CASE_DROPS_THRU, this can be a tablejump with
2718 some element going to the same place as the
2719 default (fallthru). */
2720 && (single_pred (b
) == ENTRY_BLOCK_PTR_FOR_FN (cfun
)
2721 || !JUMP_P (BB_END (single_pred (b
)))
2722 || ! label_is_jump_target_p (BB_HEAD (b
),
2723 BB_END (single_pred (b
)))))
2725 delete_insn (BB_HEAD (b
));
2727 fprintf (dump_file
, "Deleted label in block %i.\n",
2731 /* If we fall through an empty block, we can remove it. */
2732 if (!(mode
& (CLEANUP_CFGLAYOUT
| CLEANUP_NO_INSN_DEL
))
2733 && single_pred_p (b
)
2734 && (single_pred_edge (b
)->flags
& EDGE_FALLTHRU
)
2735 && !LABEL_P (BB_HEAD (b
))
2736 && FORWARDER_BLOCK_P (b
)
2737 /* Note that forwarder_block_p true ensures that
2738 there is a successor for this block. */
2739 && (single_succ_edge (b
)->flags
& EDGE_FALLTHRU
)
2740 && n_basic_blocks_for_fn (cfun
) > NUM_FIXED_BLOCKS
+ 1)
2744 "Deleting fallthru block %i.\n",
2747 c
= ((b
->prev_bb
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
2748 ? b
->next_bb
: b
->prev_bb
);
2749 redirect_edge_succ_nodup (single_pred_edge (b
),
2751 delete_basic_block (b
);
2757 /* Merge B with its single successor, if any. */
2758 if (single_succ_p (b
)
2759 && (s
= single_succ_edge (b
))
2760 && !(s
->flags
& EDGE_COMPLEX
)
2761 && (c
= s
->dest
) != EXIT_BLOCK_PTR_FOR_FN (cfun
)
2762 && single_pred_p (c
)
2765 /* When not in cfg_layout mode use code aware of reordering
2766 INSN. This code possibly creates new basic blocks so it
2767 does not fit merge_blocks interface and is kept here in
2768 hope that it will become useless once more of compiler
2769 is transformed to use cfg_layout mode. */
2771 if ((mode
& CLEANUP_CFGLAYOUT
)
2772 && can_merge_blocks_p (b
, c
))
2774 merge_blocks (b
, c
);
2775 update_forwarder_flag (b
);
2776 changed_here
= true;
2778 else if (!(mode
& CLEANUP_CFGLAYOUT
)
2779 /* If the jump insn has side effects,
2780 we can't kill the edge. */
2781 && (!JUMP_P (BB_END (b
))
2782 || (reload_completed
2783 ? simplejump_p (BB_END (b
))
2784 : (onlyjump_p (BB_END (b
))
2785 && !tablejump_p (BB_END (b
),
2787 && (next
= merge_blocks_move (s
, b
, c
, mode
)))
2790 changed_here
= true;
2794 /* Simplify branch over branch. */
2795 if ((mode
& CLEANUP_EXPENSIVE
)
2796 && !(mode
& CLEANUP_CFGLAYOUT
)
2797 && try_simplify_condjump (b
))
2798 changed_here
= true;
2800 /* If B has a single outgoing edge, but uses a
2801 non-trivial jump instruction without side-effects, we
2802 can either delete the jump entirely, or replace it
2803 with a simple unconditional jump. */
2804 if (single_succ_p (b
)
2805 && single_succ (b
) != EXIT_BLOCK_PTR_FOR_FN (cfun
)
2806 && onlyjump_p (BB_END (b
))
2807 && !CROSSING_JUMP_P (BB_END (b
))
2808 && try_redirect_by_replacing_jump (single_succ_edge (b
),
2810 (mode
& CLEANUP_CFGLAYOUT
) != 0))
2812 update_forwarder_flag (b
);
2813 changed_here
= true;
2816 /* Simplify branch to branch. */
2817 if (try_forward_edges (mode
, b
))
2819 update_forwarder_flag (b
);
2820 changed_here
= true;
2823 /* Look for shared code between blocks. */
2824 if ((mode
& CLEANUP_CROSSJUMP
)
2825 && try_crossjump_bb (mode
, b
))
2826 changed_here
= true;
2828 if ((mode
& CLEANUP_CROSSJUMP
)
2829 /* This can lengthen register lifetimes. Do it only after
2832 && try_head_merge_bb (b
))
2833 changed_here
= true;
2835 /* Don't get confused by the index shift caused by
2843 if ((mode
& CLEANUP_CROSSJUMP
)
2844 && try_crossjump_bb (mode
, EXIT_BLOCK_PTR_FOR_FN (cfun
)))
2847 if (block_was_dirty
)
2849 /* This should only be set by head-merging. */
2850 gcc_assert (mode
& CLEANUP_CROSSJUMP
);
2856 /* Edge forwarding in particular can cause hot blocks previously
2857 reached by both hot and cold blocks to become dominated only
2858 by cold blocks. This will cause the verification below to fail,
2859 and lead to now cold code in the hot section. This is not easy
2860 to detect and fix during edge forwarding, and in some cases
2861 is only visible after newly unreachable blocks are deleted,
2862 which will be done in fixup_partitions. */
2863 fixup_partitions ();
2864 checking_verify_flow_info ();
2867 changed_overall
|= changed
;
2873 FOR_ALL_BB_FN (b
, cfun
)
2874 b
->flags
&= ~(BB_FORWARDER_BLOCK
| BB_NONTHREADABLE_BLOCK
);
2876 return changed_overall
;
2879 /* Delete all unreachable basic blocks. */
2882 delete_unreachable_blocks (void)
2884 bool changed
= false;
2885 basic_block b
, prev_bb
;
2887 find_unreachable_blocks ();
2889 /* When we're in GIMPLE mode and there may be debug insns, we should
2890 delete blocks in reverse dominator order, so as to get a chance
2891 to substitute all released DEFs into debug stmts. If we don't
2892 have dominators information, walking blocks backward gets us a
2893 better chance of retaining most debug information than
2895 if (MAY_HAVE_DEBUG_INSNS
&& current_ir_type () == IR_GIMPLE
2896 && dom_info_available_p (CDI_DOMINATORS
))
2898 for (b
= EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
;
2899 b
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
); b
= prev_bb
)
2901 prev_bb
= b
->prev_bb
;
2903 if (!(b
->flags
& BB_REACHABLE
))
2905 /* Speed up the removal of blocks that don't dominate
2906 others. Walking backwards, this should be the common
2908 if (!first_dom_son (CDI_DOMINATORS
, b
))
2909 delete_basic_block (b
);
2913 = get_all_dominated_blocks (CDI_DOMINATORS
, b
);
2919 prev_bb
= b
->prev_bb
;
2921 gcc_assert (!(b
->flags
& BB_REACHABLE
));
2923 delete_basic_block (b
);
2935 for (b
= EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
;
2936 b
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
); b
= prev_bb
)
2938 prev_bb
= b
->prev_bb
;
2940 if (!(b
->flags
& BB_REACHABLE
))
2942 delete_basic_block (b
);
2949 tidy_fallthru_edges ();
2953 /* Delete any jump tables never referenced. We can't delete them at the
2954 time of removing tablejump insn as they are referenced by the preceding
2955 insns computing the destination, so we delay deleting and garbagecollect
2956 them once life information is computed. */
2958 delete_dead_jumptables (void)
2962 /* A dead jump table does not belong to any basic block. Scan insns
2963 between two adjacent basic blocks. */
2964 FOR_EACH_BB_FN (bb
, cfun
)
2966 rtx_insn
*insn
, *next
;
2968 for (insn
= NEXT_INSN (BB_END (bb
));
2969 insn
&& !NOTE_INSN_BASIC_BLOCK_P (insn
);
2972 next
= NEXT_INSN (insn
);
2974 && LABEL_NUSES (insn
) == LABEL_PRESERVE_P (insn
)
2975 && JUMP_TABLE_DATA_P (next
))
2977 rtx_insn
*label
= insn
, *jump
= next
;
2980 fprintf (dump_file
, "Dead jumptable %i removed\n",
2983 next
= NEXT_INSN (next
);
2985 delete_insn (label
);
2992 /* Tidy the CFG by deleting unreachable code and whatnot. */
2995 cleanup_cfg (int mode
)
2997 bool changed
= false;
2999 /* Set the cfglayout mode flag here. We could update all the callers
3000 but that is just inconvenient, especially given that we eventually
3001 want to have cfglayout mode as the default. */
3002 if (current_ir_type () == IR_RTL_CFGLAYOUT
)
3003 mode
|= CLEANUP_CFGLAYOUT
;
3005 timevar_push (TV_CLEANUP_CFG
);
3006 if (delete_unreachable_blocks ())
3009 /* We've possibly created trivially dead code. Cleanup it right
3010 now to introduce more opportunities for try_optimize_cfg. */
3011 if (!(mode
& (CLEANUP_NO_INSN_DEL
))
3012 && !reload_completed
)
3013 delete_trivially_dead_insns (get_insns (), max_reg_num ());
3018 /* To tail-merge blocks ending in the same noreturn function (e.g.
3019 a call to abort) we have to insert fake edges to exit. Do this
3020 here once. The fake edges do not interfere with any other CFG
3022 if (mode
& CLEANUP_CROSSJUMP
)
3023 add_noreturn_fake_exit_edges ();
3025 if (!dbg_cnt (cfg_cleanup
))
3028 while (try_optimize_cfg (mode
))
3030 delete_unreachable_blocks (), changed
= true;
3031 if (!(mode
& CLEANUP_NO_INSN_DEL
))
3033 /* Try to remove some trivially dead insns when doing an expensive
3034 cleanup. But delete_trivially_dead_insns doesn't work after
3035 reload (it only handles pseudos) and run_fast_dce is too costly
3036 to run in every iteration.
3038 For effective cross jumping, we really want to run a fast DCE to
3039 clean up any dead conditions, or they get in the way of performing
3042 Other transformations in cleanup_cfg are not so sensitive to dead
3043 code, so delete_trivially_dead_insns or even doing nothing at all
3045 if ((mode
& CLEANUP_EXPENSIVE
) && !reload_completed
3046 && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3048 if ((mode
& CLEANUP_CROSSJUMP
) && crossjumps_occured
)
3055 if (mode
& CLEANUP_CROSSJUMP
)
3056 remove_fake_exit_edges ();
3058 /* Don't call delete_dead_jumptables in cfglayout mode, because
3059 that function assumes that jump tables are in the insns stream.
3060 But we also don't _have_ to delete dead jumptables in cfglayout
3061 mode because we shouldn't even be looking at things that are
3062 not in a basic block. Dead jumptables are cleaned up when
3063 going out of cfglayout mode. */
3064 if (!(mode
& CLEANUP_CFGLAYOUT
))
3065 delete_dead_jumptables ();
3067 /* ??? We probably do this way too often. */
3070 || (mode
& CLEANUP_CFG_CHANGED
)))
3072 timevar_push (TV_REPAIR_LOOPS
);
3073 /* The above doesn't preserve dominance info if available. */
3074 gcc_assert (!dom_info_available_p (CDI_DOMINATORS
));
3075 calculate_dominance_info (CDI_DOMINATORS
);
3076 fix_loop_structure (NULL
);
3077 free_dominance_info (CDI_DOMINATORS
);
3078 timevar_pop (TV_REPAIR_LOOPS
);
3081 timevar_pop (TV_CLEANUP_CFG
);
3088 const pass_data pass_data_jump
=
3090 RTL_PASS
, /* type */
3092 OPTGROUP_NONE
, /* optinfo_flags */
3093 TV_JUMP
, /* tv_id */
3094 0, /* properties_required */
3095 0, /* properties_provided */
3096 0, /* properties_destroyed */
3097 0, /* todo_flags_start */
3098 0, /* todo_flags_finish */
3101 class pass_jump
: public rtl_opt_pass
3104 pass_jump (gcc::context
*ctxt
)
3105 : rtl_opt_pass (pass_data_jump
, ctxt
)
3108 /* opt_pass methods: */
3109 virtual unsigned int execute (function
*);
3111 }; // class pass_jump
3114 pass_jump::execute (function
*)
3116 delete_trivially_dead_insns (get_insns (), max_reg_num ());
3118 dump_flow_info (dump_file
, dump_flags
);
3119 cleanup_cfg ((optimize
? CLEANUP_EXPENSIVE
: 0)
3120 | (flag_thread_jumps
? CLEANUP_THREADING
: 0));
3127 make_pass_jump (gcc::context
*ctxt
)
3129 return new pass_jump (ctxt
);
3134 const pass_data pass_data_jump2
=
3136 RTL_PASS
, /* type */
3138 OPTGROUP_NONE
, /* optinfo_flags */
3139 TV_JUMP
, /* tv_id */
3140 0, /* properties_required */
3141 0, /* properties_provided */
3142 0, /* properties_destroyed */
3143 0, /* todo_flags_start */
3144 0, /* todo_flags_finish */
3147 class pass_jump2
: public rtl_opt_pass
3150 pass_jump2 (gcc::context
*ctxt
)
3151 : rtl_opt_pass (pass_data_jump2
, ctxt
)
3154 /* opt_pass methods: */
3155 virtual unsigned int execute (function
*)
3157 cleanup_cfg (flag_crossjumping
? CLEANUP_CROSSJUMP
: 0);
3161 }; // class pass_jump2
3166 make_pass_jump2 (gcc::context
*ctxt
)
3168 return new pass_jump2 (ctxt
);