1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This file contains optimizer of the control flow. The main entry point is
21 cleanup_cfg. Following optimizations are performed:
23 - Unreachable blocks removal
24 - Edge forwarding (edge to the forwarder block is forwarded to its
25 successor. Simplification of the branch instruction is performed by
26 underlying infrastructure so branch can be converted to simplejump or
28 - Cross jumping (tail merging)
29 - Conditional jump-around-simplejump simplification
30 - Basic block merging. */
34 #include "coretypes.h"
43 #include "insn-config.h"
47 #include "tree-pass.h"
52 #include "cfgcleanup.h"
57 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
59 /* Set to true when we are running first pass of try_optimize_cfg loop. */
60 static bool first_pass
;
62 /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg. */
63 static bool crossjumps_occurred
;
65 /* Set to true if we couldn't run an optimization due to stale liveness
66 information; we should run df_analyze to enable more opportunities. */
67 static bool block_was_dirty
;
69 static bool try_crossjump_to_edge (int, edge
, edge
, enum replace_direction
);
70 static bool try_crossjump_bb (int, basic_block
);
71 static bool outgoing_edges_match (int, basic_block
, basic_block
);
72 static enum replace_direction
old_insns_match_p (int, rtx_insn
*, rtx_insn
*);
74 static void merge_blocks_move_predecessor_nojumps (basic_block
, basic_block
);
75 static void merge_blocks_move_successor_nojumps (basic_block
, basic_block
);
76 static bool try_optimize_cfg (int);
77 static bool try_simplify_condjump (basic_block
);
78 static bool try_forward_edges (int, basic_block
);
79 static edge
thread_jump (edge
, basic_block
);
80 static bool mark_effect (rtx
, bitmap
);
81 static void notice_new_block (basic_block
);
82 static void update_forwarder_flag (basic_block
);
83 static void merge_memattrs (rtx
, rtx
);
85 /* Set flags for newly created block. */
88 notice_new_block (basic_block bb
)
93 if (forwarder_block_p (bb
))
94 bb
->flags
|= BB_FORWARDER_BLOCK
;
97 /* Recompute forwarder flag after block has been modified. */
100 update_forwarder_flag (basic_block bb
)
102 if (forwarder_block_p (bb
))
103 bb
->flags
|= BB_FORWARDER_BLOCK
;
105 bb
->flags
&= ~BB_FORWARDER_BLOCK
;
108 /* Simplify a conditional jump around an unconditional jump.
109 Return true if something changed. */
112 try_simplify_condjump (basic_block cbranch_block
)
114 basic_block jump_block
, jump_dest_block
, cbranch_dest_block
;
115 edge cbranch_jump_edge
, cbranch_fallthru_edge
;
116 rtx_insn
*cbranch_insn
;
118 /* Verify that there are exactly two successors. */
119 if (EDGE_COUNT (cbranch_block
->succs
) != 2)
122 /* Verify that we've got a normal conditional branch at the end
124 cbranch_insn
= BB_END (cbranch_block
);
125 if (!any_condjump_p (cbranch_insn
))
128 cbranch_fallthru_edge
= FALLTHRU_EDGE (cbranch_block
);
129 cbranch_jump_edge
= BRANCH_EDGE (cbranch_block
);
131 /* The next block must not have multiple predecessors, must not
132 be the last block in the function, and must contain just the
133 unconditional jump. */
134 jump_block
= cbranch_fallthru_edge
->dest
;
135 if (!single_pred_p (jump_block
)
136 || jump_block
->next_bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
137 || !FORWARDER_BLOCK_P (jump_block
))
139 jump_dest_block
= single_succ (jump_block
);
141 /* If we are partitioning hot/cold basic blocks, we don't want to
142 mess up unconditional or indirect jumps that cross between hot
145 Basic block partitioning may result in some jumps that appear to
146 be optimizable (or blocks that appear to be mergeable), but which really
147 must be left untouched (they are required to make it safely across
148 partition boundaries). See the comments at the top of
149 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
151 if (BB_PARTITION (jump_block
) != BB_PARTITION (jump_dest_block
)
152 || (cbranch_jump_edge
->flags
& EDGE_CROSSING
))
155 /* The conditional branch must target the block after the
156 unconditional branch. */
157 cbranch_dest_block
= cbranch_jump_edge
->dest
;
159 if (cbranch_dest_block
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
160 || jump_dest_block
== EXIT_BLOCK_PTR_FOR_FN (cfun
)
161 || !can_fallthru (jump_block
, cbranch_dest_block
))
164 /* Invert the conditional branch. */
165 if (!invert_jump (as_a
<rtx_jump_insn
*> (cbranch_insn
),
166 block_label (jump_dest_block
), 0))
170 fprintf (dump_file
, "Simplifying condjump %i around jump %i\n",
171 INSN_UID (cbranch_insn
), INSN_UID (BB_END (jump_block
)));
173 /* Success. Update the CFG to match. Note that after this point
174 the edge variable names appear backwards; the redirection is done
175 this way to preserve edge profile data. */
176 cbranch_jump_edge
= redirect_edge_succ_nodup (cbranch_jump_edge
,
178 cbranch_fallthru_edge
= redirect_edge_succ_nodup (cbranch_fallthru_edge
,
180 cbranch_jump_edge
->flags
|= EDGE_FALLTHRU
;
181 cbranch_fallthru_edge
->flags
&= ~EDGE_FALLTHRU
;
182 update_br_prob_note (cbranch_block
);
184 /* Delete the block with the unconditional jump, and clean up the mess. */
185 delete_basic_block (jump_block
);
186 tidy_fallthru_edge (cbranch_jump_edge
);
187 update_forwarder_flag (cbranch_block
);
192 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
193 on register. Used by jump threading. */
196 mark_effect (rtx exp
, regset nonequal
)
199 switch (GET_CODE (exp
))
201 /* In case we do clobber the register, mark it as equal, as we know the
202 value is dead so it don't have to match. */
204 dest
= XEXP (exp
, 0);
206 bitmap_clear_range (nonequal
, REGNO (dest
), REG_NREGS (dest
));
210 if (rtx_equal_for_cselib_p (SET_DEST (exp
), SET_SRC (exp
)))
212 dest
= SET_DEST (exp
);
217 bitmap_set_range (nonequal
, REGNO (dest
), REG_NREGS (dest
));
225 /* Return true if X contains a register in NONEQUAL. */
227 mentions_nonequal_regs (const_rtx x
, regset nonequal
)
229 subrtx_iterator::array_type array
;
230 FOR_EACH_SUBRTX (iter
, array
, x
, NONCONST
)
235 unsigned int end_regno
= END_REGNO (x
);
236 for (unsigned int regno
= REGNO (x
); regno
< end_regno
; ++regno
)
237 if (REGNO_REG_SET_P (nonequal
, regno
))
244 /* Attempt to prove that the basic block B will have no side effects and
245 always continues in the same edge if reached via E. Return the edge
246 if exist, NULL otherwise. */
249 thread_jump (edge e
, basic_block b
)
251 rtx set1
, set2
, cond1
, cond2
;
253 enum rtx_code code1
, code2
, reversed_code2
;
254 bool reverse1
= false;
258 reg_set_iterator rsi
;
260 if (b
->flags
& BB_NONTHREADABLE_BLOCK
)
263 /* At the moment, we do handle only conditional jumps, but later we may
264 want to extend this code to tablejumps and others. */
265 if (EDGE_COUNT (e
->src
->succs
) != 2)
267 if (EDGE_COUNT (b
->succs
) != 2)
269 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
273 /* Second branch must end with onlyjump, as we will eliminate the jump. */
274 if (!any_condjump_p (BB_END (e
->src
)))
277 if (!any_condjump_p (BB_END (b
)) || !onlyjump_p (BB_END (b
)))
279 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
283 set1
= pc_set (BB_END (e
->src
));
284 set2
= pc_set (BB_END (b
));
285 if (((e
->flags
& EDGE_FALLTHRU
) != 0)
286 != (XEXP (SET_SRC (set1
), 1) == pc_rtx
))
289 cond1
= XEXP (SET_SRC (set1
), 0);
290 cond2
= XEXP (SET_SRC (set2
), 0);
292 code1
= reversed_comparison_code (cond1
, BB_END (e
->src
));
294 code1
= GET_CODE (cond1
);
296 code2
= GET_CODE (cond2
);
297 reversed_code2
= reversed_comparison_code (cond2
, BB_END (b
));
299 if (!comparison_dominates_p (code1
, code2
)
300 && !comparison_dominates_p (code1
, reversed_code2
))
303 /* Ensure that the comparison operators are equivalent.
304 ??? This is far too pessimistic. We should allow swapped operands,
305 different CCmodes, or for example comparisons for interval, that
306 dominate even when operands are not equivalent. */
307 if (!rtx_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
308 || !rtx_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
311 /* Punt if BB_END (e->src) is doloop-like conditional jump that modifies
312 the registers used in cond1. */
313 if (modified_in_p (cond1
, BB_END (e
->src
)))
316 /* Short circuit cases where block B contains some side effects, as we can't
318 for (insn
= NEXT_INSN (BB_HEAD (b
)); insn
!= NEXT_INSN (BB_END (b
));
319 insn
= NEXT_INSN (insn
))
320 if (INSN_P (insn
) && side_effects_p (PATTERN (insn
)))
322 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
328 /* First process all values computed in the source basic block. */
329 for (insn
= NEXT_INSN (BB_HEAD (e
->src
));
330 insn
!= NEXT_INSN (BB_END (e
->src
));
331 insn
= NEXT_INSN (insn
))
333 cselib_process_insn (insn
);
335 nonequal
= BITMAP_ALLOC (NULL
);
336 CLEAR_REG_SET (nonequal
);
338 /* Now assume that we've continued by the edge E to B and continue
339 processing as if it were same basic block.
340 Our goal is to prove that whole block is an NOOP. */
342 for (insn
= NEXT_INSN (BB_HEAD (b
));
343 insn
!= NEXT_INSN (BB_END (b
)) && !failed
;
344 insn
= NEXT_INSN (insn
))
346 /* cond2 must not mention any register that is not equal to the
347 former block. Check this before processing that instruction,
348 as BB_END (b) could contain also clobbers. */
349 if (insn
== BB_END (b
)
350 && mentions_nonequal_regs (cond2
, nonequal
))
355 rtx pat
= PATTERN (insn
);
357 if (GET_CODE (pat
) == PARALLEL
)
359 for (i
= 0; i
< (unsigned)XVECLEN (pat
, 0); i
++)
360 failed
|= mark_effect (XVECEXP (pat
, 0, i
), nonequal
);
363 failed
|= mark_effect (pat
, nonequal
);
366 cselib_process_insn (insn
);
369 /* Later we should clear nonequal of dead registers. So far we don't
370 have life information in cfg_cleanup. */
373 b
->flags
|= BB_NONTHREADABLE_BLOCK
;
377 EXECUTE_IF_SET_IN_REG_SET (nonequal
, 0, i
, rsi
)
380 BITMAP_FREE (nonequal
);
382 if ((comparison_dominates_p (code1
, code2
) != 0)
383 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
384 return BRANCH_EDGE (b
);
386 return FALLTHRU_EDGE (b
);
389 BITMAP_FREE (nonequal
);
394 /* Attempt to forward edges leaving basic block B.
395 Return true if successful. */
398 try_forward_edges (int mode
, basic_block b
)
400 bool changed
= false;
402 edge e
, *threaded_edges
= NULL
;
404 for (ei
= ei_start (b
->succs
); (e
= ei_safe_edge (ei
)); )
406 basic_block target
, first
;
407 location_t goto_locus
;
409 bool threaded
= false;
410 int nthreaded_edges
= 0;
411 bool may_thread
= first_pass
|| (b
->flags
& BB_MODIFIED
) != 0;
412 bool new_target_threaded
= false;
414 /* Skip complex edges because we don't know how to update them.
416 Still handle fallthru edges, as we can succeed to forward fallthru
417 edge to the same place as the branch edge of conditional branch
418 and turn conditional branch to an unconditional branch. */
419 if (e
->flags
& EDGE_COMPLEX
)
425 target
= first
= e
->dest
;
426 counter
= NUM_FIXED_BLOCKS
;
427 goto_locus
= e
->goto_locus
;
429 while (counter
< n_basic_blocks_for_fn (cfun
))
431 basic_block new_target
= NULL
;
432 may_thread
|= (target
->flags
& BB_MODIFIED
) != 0;
434 if (FORWARDER_BLOCK_P (target
)
435 && single_succ (target
) != EXIT_BLOCK_PTR_FOR_FN (cfun
))
437 /* Bypass trivial infinite loops. */
438 new_target
= single_succ (target
);
439 if (target
== new_target
)
440 counter
= n_basic_blocks_for_fn (cfun
);
443 /* When not optimizing, ensure that edges or forwarder
444 blocks with different locus are not optimized out. */
445 location_t new_locus
= single_succ_edge (target
)->goto_locus
;
446 location_t locus
= goto_locus
;
448 if (LOCATION_LOCUS (new_locus
) != UNKNOWN_LOCATION
449 && LOCATION_LOCUS (locus
) != UNKNOWN_LOCATION
450 && new_locus
!= locus
)
454 if (LOCATION_LOCUS (new_locus
) != UNKNOWN_LOCATION
)
457 rtx_insn
*last
= BB_END (target
);
458 if (DEBUG_INSN_P (last
))
459 last
= prev_nondebug_insn (last
);
460 if (last
&& INSN_P (last
))
461 new_locus
= INSN_LOCATION (last
);
463 new_locus
= UNKNOWN_LOCATION
;
465 if (LOCATION_LOCUS (new_locus
) != UNKNOWN_LOCATION
466 && LOCATION_LOCUS (locus
) != UNKNOWN_LOCATION
467 && new_locus
!= locus
)
471 if (LOCATION_LOCUS (new_locus
) != UNKNOWN_LOCATION
)
480 /* Allow to thread only over one edge at time to simplify updating
482 else if ((mode
& CLEANUP_THREADING
) && may_thread
)
484 edge t
= thread_jump (e
, target
);
488 threaded_edges
= XNEWVEC (edge
,
489 n_basic_blocks_for_fn (cfun
));
494 /* Detect an infinite loop across blocks not
495 including the start block. */
496 for (i
= 0; i
< nthreaded_edges
; ++i
)
497 if (threaded_edges
[i
] == t
)
499 if (i
< nthreaded_edges
)
501 counter
= n_basic_blocks_for_fn (cfun
);
506 /* Detect an infinite loop across the start block. */
510 gcc_assert (nthreaded_edges
511 < (n_basic_blocks_for_fn (cfun
)
512 - NUM_FIXED_BLOCKS
));
513 threaded_edges
[nthreaded_edges
++] = t
;
515 new_target
= t
->dest
;
516 new_target_threaded
= true;
524 /* Do not turn non-crossing jump to crossing. Depending on target
525 it may require different instruction pattern. */
526 if ((e
->flags
& EDGE_CROSSING
)
527 || BB_PARTITION (first
) == BB_PARTITION (new_target
))
530 threaded
|= new_target_threaded
;
534 if (counter
>= n_basic_blocks_for_fn (cfun
))
537 fprintf (dump_file
, "Infinite loop in BB %i.\n",
540 else if (target
== first
)
541 ; /* We didn't do anything. */
544 /* Save the values now, as the edge may get removed. */
545 profile_count edge_count
= e
->count ();
548 e
->goto_locus
= goto_locus
;
550 /* Don't force if target is exit block. */
551 if (threaded
&& target
!= EXIT_BLOCK_PTR_FOR_FN (cfun
))
553 notice_new_block (redirect_edge_and_branch_force (e
, target
));
555 fprintf (dump_file
, "Conditionals threaded.\n");
557 else if (!redirect_edge_and_branch (e
, target
))
561 "Forwarding edge %i->%i to %i failed.\n",
562 b
->index
, e
->dest
->index
, target
->index
);
567 /* We successfully forwarded the edge. Now update profile
568 data: for each edge we traversed in the chain, remove
569 the original edge's execution count. */
574 if (!single_succ_p (first
))
576 gcc_assert (n
< nthreaded_edges
);
577 t
= threaded_edges
[n
++];
578 gcc_assert (t
->src
== first
);
579 update_bb_profile_for_threading (first
, edge_count
, t
);
580 update_br_prob_note (first
);
584 first
->count
-= edge_count
;
585 /* It is possible that as the result of
586 threading we've removed edge as it is
587 threaded to the fallthru edge. Avoid
588 getting out of sync. */
589 if (n
< nthreaded_edges
590 && first
== threaded_edges
[n
]->src
)
592 t
= single_succ_edge (first
);
597 while (first
!= target
);
605 free (threaded_edges
);
610 /* Blocks A and B are to be merged into a single block. A has no incoming
611 fallthru edge, so it can be moved before B without adding or modifying
612 any jumps (aside from the jump from A to B). */
615 merge_blocks_move_predecessor_nojumps (basic_block a
, basic_block b
)
619 /* If we are partitioning hot/cold basic blocks, we don't want to
620 mess up unconditional or indirect jumps that cross between hot
623 Basic block partitioning may result in some jumps that appear to
624 be optimizable (or blocks that appear to be mergeable), but which really
625 must be left untouched (they are required to make it safely across
626 partition boundaries). See the comments at the top of
627 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
629 if (BB_PARTITION (a
) != BB_PARTITION (b
))
632 barrier
= next_nonnote_insn (BB_END (a
));
633 gcc_assert (BARRIER_P (barrier
));
634 delete_insn (barrier
);
636 /* Scramble the insn chain. */
637 if (BB_END (a
) != PREV_INSN (BB_HEAD (b
)))
638 reorder_insns_nobb (BB_HEAD (a
), BB_END (a
), PREV_INSN (BB_HEAD (b
)));
642 fprintf (dump_file
, "Moved block %d before %d and merged.\n",
645 /* Swap the records for the two blocks around. */
648 link_block (a
, b
->prev_bb
);
650 /* Now blocks A and B are contiguous. Merge them. */
654 /* Blocks A and B are to be merged into a single block. B has no outgoing
655 fallthru edge, so it can be moved after A without adding or modifying
656 any jumps (aside from the jump from A to B). */
659 merge_blocks_move_successor_nojumps (basic_block a
, basic_block b
)
661 rtx_insn
*barrier
, *real_b_end
;
663 rtx_jump_table_data
*table
;
665 /* If we are partitioning hot/cold basic blocks, we don't want to
666 mess up unconditional or indirect jumps that cross between hot
669 Basic block partitioning may result in some jumps that appear to
670 be optimizable (or blocks that appear to be mergeable), but which really
671 must be left untouched (they are required to make it safely across
672 partition boundaries). See the comments at the top of
673 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
675 if (BB_PARTITION (a
) != BB_PARTITION (b
))
678 real_b_end
= BB_END (b
);
680 /* If there is a jump table following block B temporarily add the jump table
681 to block B so that it will also be moved to the correct location. */
682 if (tablejump_p (BB_END (b
), &label
, &table
)
683 && prev_active_insn (label
) == BB_END (b
))
688 /* There had better have been a barrier there. Delete it. */
689 barrier
= NEXT_INSN (BB_END (b
));
690 if (barrier
&& BARRIER_P (barrier
))
691 delete_insn (barrier
);
694 /* Scramble the insn chain. */
695 reorder_insns_nobb (BB_HEAD (b
), BB_END (b
), BB_END (a
));
697 /* Restore the real end of b. */
698 BB_END (b
) = real_b_end
;
701 fprintf (dump_file
, "Moved block %d after %d and merged.\n",
704 /* Now blocks A and B are contiguous. Merge them. */
708 /* Attempt to merge basic blocks that are potentially non-adjacent.
709 Return NULL iff the attempt failed, otherwise return basic block
710 where cleanup_cfg should continue. Because the merging commonly
711 moves basic block away or introduces another optimization
712 possibility, return basic block just before B so cleanup_cfg don't
715 It may be good idea to return basic block before C in the case
716 C has been moved after B and originally appeared earlier in the
717 insn sequence, but we have no information available about the
718 relative ordering of these two. Hopefully it is not too common. */
721 merge_blocks_move (edge e
, basic_block b
, basic_block c
, int mode
)
725 /* If we are partitioning hot/cold basic blocks, we don't want to
726 mess up unconditional or indirect jumps that cross between hot
729 Basic block partitioning may result in some jumps that appear to
730 be optimizable (or blocks that appear to be mergeable), but which really
731 must be left untouched (they are required to make it safely across
732 partition boundaries). See the comments at the top of
733 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
735 if (BB_PARTITION (b
) != BB_PARTITION (c
))
738 /* If B has a fallthru edge to C, no need to move anything. */
739 if (e
->flags
& EDGE_FALLTHRU
)
741 int b_index
= b
->index
, c_index
= c
->index
;
743 /* Protect the loop latches. */
744 if (current_loops
&& c
->loop_father
->latch
== c
)
748 update_forwarder_flag (b
);
751 fprintf (dump_file
, "Merged %d and %d without moving.\n",
754 return b
->prev_bb
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) ? b
: b
->prev_bb
;
757 /* Otherwise we will need to move code around. Do that only if expensive
758 transformations are allowed. */
759 else if (mode
& CLEANUP_EXPENSIVE
)
761 edge tmp_edge
, b_fallthru_edge
;
762 bool c_has_outgoing_fallthru
;
763 bool b_has_incoming_fallthru
;
765 /* Avoid overactive code motion, as the forwarder blocks should be
766 eliminated by edge redirection instead. One exception might have
767 been if B is a forwarder block and C has no fallthru edge, but
768 that should be cleaned up by bb-reorder instead. */
769 if (FORWARDER_BLOCK_P (b
) || FORWARDER_BLOCK_P (c
))
772 /* We must make sure to not munge nesting of lexical blocks,
773 and loop notes. This is done by squeezing out all the notes
774 and leaving them there to lie. Not ideal, but functional. */
776 tmp_edge
= find_fallthru_edge (c
->succs
);
777 c_has_outgoing_fallthru
= (tmp_edge
!= NULL
);
779 tmp_edge
= find_fallthru_edge (b
->preds
);
780 b_has_incoming_fallthru
= (tmp_edge
!= NULL
);
781 b_fallthru_edge
= tmp_edge
;
784 next
= next
->prev_bb
;
786 /* Otherwise, we're going to try to move C after B. If C does
787 not have an outgoing fallthru, then it can be moved
788 immediately after B without introducing or modifying jumps. */
789 if (! c_has_outgoing_fallthru
)
791 merge_blocks_move_successor_nojumps (b
, c
);
792 return next
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) ? next
->next_bb
: next
;
795 /* If B does not have an incoming fallthru, then it can be moved
796 immediately before C without introducing or modifying jumps.
797 C cannot be the first block, so we do not have to worry about
798 accessing a non-existent block. */
800 if (b_has_incoming_fallthru
)
804 if (b_fallthru_edge
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
806 bb
= force_nonfallthru (b_fallthru_edge
);
808 notice_new_block (bb
);
811 merge_blocks_move_predecessor_nojumps (b
, c
);
812 return next
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) ? next
->next_bb
: next
;
819 /* Removes the memory attributes of MEM expression
820 if they are not equal. */
823 merge_memattrs (rtx x
, rtx y
)
832 if (x
== 0 || y
== 0)
837 if (code
!= GET_CODE (y
))
840 if (GET_MODE (x
) != GET_MODE (y
))
843 if (code
== MEM
&& !mem_attrs_eq_p (MEM_ATTRS (x
), MEM_ATTRS (y
)))
847 else if (! MEM_ATTRS (y
))
851 if (MEM_ALIAS_SET (x
) != MEM_ALIAS_SET (y
))
853 set_mem_alias_set (x
, 0);
854 set_mem_alias_set (y
, 0);
857 if (! mem_expr_equal_p (MEM_EXPR (x
), MEM_EXPR (y
)))
861 clear_mem_offset (x
);
862 clear_mem_offset (y
);
864 else if (MEM_OFFSET_KNOWN_P (x
) != MEM_OFFSET_KNOWN_P (y
)
865 || (MEM_OFFSET_KNOWN_P (x
)
866 && maybe_ne (MEM_OFFSET (x
), MEM_OFFSET (y
))))
868 clear_mem_offset (x
);
869 clear_mem_offset (y
);
872 if (!MEM_SIZE_KNOWN_P (x
))
874 else if (!MEM_SIZE_KNOWN_P (y
))
876 else if (known_le (MEM_SIZE (x
), MEM_SIZE (y
)))
877 set_mem_size (x
, MEM_SIZE (y
));
878 else if (known_le (MEM_SIZE (y
), MEM_SIZE (x
)))
879 set_mem_size (y
, MEM_SIZE (x
));
882 /* The sizes aren't ordered, so we can't merge them. */
887 set_mem_align (x
, MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)));
888 set_mem_align (y
, MEM_ALIGN (x
));
893 if (MEM_READONLY_P (x
) != MEM_READONLY_P (y
))
895 MEM_READONLY_P (x
) = 0;
896 MEM_READONLY_P (y
) = 0;
898 if (MEM_NOTRAP_P (x
) != MEM_NOTRAP_P (y
))
900 MEM_NOTRAP_P (x
) = 0;
901 MEM_NOTRAP_P (y
) = 0;
903 if (MEM_VOLATILE_P (x
) != MEM_VOLATILE_P (y
))
905 MEM_VOLATILE_P (x
) = 1;
906 MEM_VOLATILE_P (y
) = 1;
910 fmt
= GET_RTX_FORMAT (code
);
911 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
916 /* Two vectors must have the same length. */
917 if (XVECLEN (x
, i
) != XVECLEN (y
, i
))
920 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
921 merge_memattrs (XVECEXP (x
, i
, j
), XVECEXP (y
, i
, j
));
926 merge_memattrs (XEXP (x
, i
), XEXP (y
, i
));
933 /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
934 different single sets S1 and S2. */
937 equal_different_set_p (rtx p1
, rtx s1
, rtx p2
, rtx s2
)
942 if (p1
== s1
&& p2
== s2
)
945 if (GET_CODE (p1
) != PARALLEL
|| GET_CODE (p2
) != PARALLEL
)
948 if (XVECLEN (p1
, 0) != XVECLEN (p2
, 0))
951 for (i
= 0; i
< XVECLEN (p1
, 0); i
++)
953 e1
= XVECEXP (p1
, 0, i
);
954 e2
= XVECEXP (p2
, 0, i
);
955 if (e1
== s1
&& e2
== s2
)
958 ? rtx_renumbered_equal_p (e1
, e2
) : rtx_equal_p (e1
, e2
))
968 /* NOTE1 is the REG_EQUAL note, if any, attached to an insn
969 that is a single_set with a SET_SRC of SRC1. Similarly
972 So effectively NOTE1/NOTE2 are an alternate form of
973 SRC1/SRC2 respectively.
975 Return nonzero if SRC1 or NOTE1 has the same constant
976 integer value as SRC2 or NOTE2. Else return zero. */
978 values_equal_p (rtx note1
, rtx note2
, rtx src1
, rtx src2
)
982 && CONST_INT_P (XEXP (note1
, 0))
983 && rtx_equal_p (XEXP (note1
, 0), XEXP (note2
, 0)))
988 && CONST_INT_P (src1
)
989 && CONST_INT_P (src2
)
990 && rtx_equal_p (src1
, src2
))
994 && CONST_INT_P (src2
)
995 && rtx_equal_p (XEXP (note1
, 0), src2
))
999 && CONST_INT_P (src1
)
1000 && rtx_equal_p (XEXP (note2
, 0), src1
))
1006 /* Examine register notes on I1 and I2 and return:
1007 - dir_forward if I1 can be replaced by I2, or
1008 - dir_backward if I2 can be replaced by I1, or
1009 - dir_both if both are the case. */
1011 static enum replace_direction
1012 can_replace_by (rtx_insn
*i1
, rtx_insn
*i2
)
1014 rtx s1
, s2
, d1
, d2
, src1
, src2
, note1
, note2
;
1017 /* Check for 2 sets. */
1018 s1
= single_set (i1
);
1019 s2
= single_set (i2
);
1020 if (s1
== NULL_RTX
|| s2
== NULL_RTX
)
1023 /* Check that the 2 sets set the same dest. */
1026 if (!(reload_completed
1027 ? rtx_renumbered_equal_p (d1
, d2
) : rtx_equal_p (d1
, d2
)))
1030 /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1031 set dest to the same value. */
1032 note1
= find_reg_equal_equiv_note (i1
);
1033 note2
= find_reg_equal_equiv_note (i2
);
1035 src1
= SET_SRC (s1
);
1036 src2
= SET_SRC (s2
);
1038 if (!values_equal_p (note1
, note2
, src1
, src2
))
1041 if (!equal_different_set_p (PATTERN (i1
), s1
, PATTERN (i2
), s2
))
1044 /* Although the 2 sets set dest to the same value, we cannot replace
1045 (set (dest) (const_int))
1048 because we don't know if the reg is live and has the same value at the
1049 location of replacement. */
1050 c1
= CONST_INT_P (src1
);
1051 c2
= CONST_INT_P (src2
);
1057 return dir_backward
;
1062 /* Merges directions A and B. */
1064 static enum replace_direction
1065 merge_dir (enum replace_direction a
, enum replace_direction b
)
1067 /* Implements the following table:
1086 /* Array of flags indexed by reg note kind, true if the given
1087 reg note is CFA related. */
1088 static const bool reg_note_cfa_p
[] = {
1090 #define DEF_REG_NOTE(NAME) false,
1091 #define REG_CFA_NOTE(NAME) true,
1092 #include "reg-notes.def"
1098 /* Return true if I1 and I2 have identical CFA notes (the same order
1099 and equivalent content). */
1102 insns_have_identical_cfa_notes (rtx_insn
*i1
, rtx_insn
*i2
)
1105 for (n1
= REG_NOTES (i1
), n2
= REG_NOTES (i2
); ;
1106 n1
= XEXP (n1
, 1), n2
= XEXP (n2
, 1))
1108 /* Skip over reg notes not related to CFI information. */
1109 while (n1
&& !reg_note_cfa_p
[REG_NOTE_KIND (n1
)])
1111 while (n2
&& !reg_note_cfa_p
[REG_NOTE_KIND (n2
)])
1113 if (n1
== NULL_RTX
&& n2
== NULL_RTX
)
1115 if (n1
== NULL_RTX
|| n2
== NULL_RTX
)
1117 if (XEXP (n1
, 0) == XEXP (n2
, 0))
1119 else if (XEXP (n1
, 0) == NULL_RTX
|| XEXP (n2
, 0) == NULL_RTX
)
1121 else if (!(reload_completed
1122 ? rtx_renumbered_equal_p (XEXP (n1
, 0), XEXP (n2
, 0))
1123 : rtx_equal_p (XEXP (n1
, 0), XEXP (n2
, 0))))
1128 /* Examine I1 and I2 and return:
1129 - dir_forward if I1 can be replaced by I2, or
1130 - dir_backward if I2 can be replaced by I1, or
1131 - dir_both if both are the case. */
1133 static enum replace_direction
1134 old_insns_match_p (int mode ATTRIBUTE_UNUSED
, rtx_insn
*i1
, rtx_insn
*i2
)
1138 /* Verify that I1 and I2 are equivalent. */
1139 if (GET_CODE (i1
) != GET_CODE (i2
))
1142 /* __builtin_unreachable() may lead to empty blocks (ending with
1143 NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */
1144 if (NOTE_INSN_BASIC_BLOCK_P (i1
) && NOTE_INSN_BASIC_BLOCK_P (i2
))
1147 /* ??? Do not allow cross-jumping between different stack levels. */
1148 p1
= find_reg_note (i1
, REG_ARGS_SIZE
, NULL
);
1149 p2
= find_reg_note (i2
, REG_ARGS_SIZE
, NULL
);
1154 if (!rtx_equal_p (p1
, p2
))
1157 /* ??? Worse, this adjustment had better be constant lest we
1158 have differing incoming stack levels. */
1159 if (!frame_pointer_needed
1160 && known_eq (find_args_size_adjust (i1
), HOST_WIDE_INT_MIN
))
1166 /* Do not allow cross-jumping between frame related insns and other
1168 if (RTX_FRAME_RELATED_P (i1
) != RTX_FRAME_RELATED_P (i2
))
1174 if (GET_CODE (p1
) != GET_CODE (p2
))
1177 /* If this is a CALL_INSN, compare register usage information.
1178 If we don't check this on stack register machines, the two
1179 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1180 numbers of stack registers in the same basic block.
1181 If we don't check this on machines with delay slots, a delay slot may
1182 be filled that clobbers a parameter expected by the subroutine.
1184 ??? We take the simple route for now and assume that if they're
1185 equal, they were constructed identically.
1187 Also check for identical exception regions. */
1191 /* Ensure the same EH region. */
1192 rtx n1
= find_reg_note (i1
, REG_EH_REGION
, 0);
1193 rtx n2
= find_reg_note (i2
, REG_EH_REGION
, 0);
1198 if (n1
&& (!n2
|| XEXP (n1
, 0) != XEXP (n2
, 0)))
1201 if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1
),
1202 CALL_INSN_FUNCTION_USAGE (i2
))
1203 || SIBLING_CALL_P (i1
) != SIBLING_CALL_P (i2
))
1206 /* For address sanitizer, never crossjump __asan_report_* builtins,
1207 otherwise errors might be reported on incorrect lines. */
1208 if (flag_sanitize
& SANITIZE_ADDRESS
)
1210 rtx call
= get_call_rtx_from (i1
);
1211 if (call
&& GET_CODE (XEXP (XEXP (call
, 0), 0)) == SYMBOL_REF
)
1213 rtx symbol
= XEXP (XEXP (call
, 0), 0);
1214 if (SYMBOL_REF_DECL (symbol
)
1215 && TREE_CODE (SYMBOL_REF_DECL (symbol
)) == FUNCTION_DECL
)
1217 if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol
))
1219 && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol
))
1220 >= BUILT_IN_ASAN_REPORT_LOAD1
1221 && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol
))
1222 <= BUILT_IN_ASAN_STOREN
)
1229 /* If both i1 and i2 are frame related, verify all the CFA notes
1230 in the same order and with the same content. */
1231 if (RTX_FRAME_RELATED_P (i1
) && !insns_have_identical_cfa_notes (i1
, i2
))
1235 /* If cross_jump_death_matters is not 0, the insn's mode
1236 indicates whether or not the insn contains any stack-like
1239 if ((mode
& CLEANUP_POST_REGSTACK
) && stack_regs_mentioned (i1
))
1241 /* If register stack conversion has already been done, then
1242 death notes must also be compared before it is certain that
1243 the two instruction streams match. */
1246 HARD_REG_SET i1_regset
, i2_regset
;
1248 CLEAR_HARD_REG_SET (i1_regset
);
1249 CLEAR_HARD_REG_SET (i2_regset
);
1251 for (note
= REG_NOTES (i1
); note
; note
= XEXP (note
, 1))
1252 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
1253 SET_HARD_REG_BIT (i1_regset
, REGNO (XEXP (note
, 0)));
1255 for (note
= REG_NOTES (i2
); note
; note
= XEXP (note
, 1))
1256 if (REG_NOTE_KIND (note
) == REG_DEAD
&& STACK_REG_P (XEXP (note
, 0)))
1257 SET_HARD_REG_BIT (i2_regset
, REGNO (XEXP (note
, 0)));
1259 if (!hard_reg_set_equal_p (i1_regset
, i2_regset
))
1264 if (reload_completed
1265 ? rtx_renumbered_equal_p (p1
, p2
) : rtx_equal_p (p1
, p2
))
1268 return can_replace_by (i1
, i2
);
1271 /* When comparing insns I1 and I2 in flow_find_cross_jump or
1272 flow_find_head_matching_sequence, ensure the notes match. */
1275 merge_notes (rtx_insn
*i1
, rtx_insn
*i2
)
1277 /* If the merged insns have different REG_EQUAL notes, then
1279 rtx equiv1
= find_reg_equal_equiv_note (i1
);
1280 rtx equiv2
= find_reg_equal_equiv_note (i2
);
1282 if (equiv1
&& !equiv2
)
1283 remove_note (i1
, equiv1
);
1284 else if (!equiv1
&& equiv2
)
1285 remove_note (i2
, equiv2
);
1286 else if (equiv1
&& equiv2
1287 && !rtx_equal_p (XEXP (equiv1
, 0), XEXP (equiv2
, 0)))
1289 remove_note (i1
, equiv1
);
1290 remove_note (i2
, equiv2
);
1294 /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1295 resulting insn in I1, and the corresponding bb in BB1. At the head of a
1296 bb, if there is a predecessor bb that reaches this bb via fallthru, and
1297 FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1298 DID_FALLTHRU. Otherwise, stops at the head of the bb. */
1301 walk_to_nondebug_insn (rtx_insn
**i1
, basic_block
*bb1
, bool follow_fallthru
,
1306 *did_fallthru
= false;
1309 while (!NONDEBUG_INSN_P (*i1
))
1311 if (*i1
!= BB_HEAD (*bb1
))
1313 *i1
= PREV_INSN (*i1
);
1317 if (!follow_fallthru
)
1320 fallthru
= find_fallthru_edge ((*bb1
)->preds
);
1321 if (!fallthru
|| fallthru
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
)
1322 || !single_succ_p (fallthru
->src
))
1325 *bb1
= fallthru
->src
;
1326 *i1
= BB_END (*bb1
);
1327 *did_fallthru
= true;
1331 /* Look through the insns at the end of BB1 and BB2 and find the longest
1332 sequence that are either equivalent, or allow forward or backward
1333 replacement. Store the first insns for that sequence in *F1 and *F2 and
1334 return the sequence length.
1336 DIR_P indicates the allowed replacement direction on function entry, and
1337 the actual replacement direction on function exit. If NULL, only equivalent
1338 sequences are allowed.
1340 To simplify callers of this function, if the blocks match exactly,
1341 store the head of the blocks in *F1 and *F2. */
1344 flow_find_cross_jump (basic_block bb1
, basic_block bb2
, rtx_insn
**f1
,
1345 rtx_insn
**f2
, enum replace_direction
*dir_p
)
1347 rtx_insn
*i1
, *i2
, *last1
, *last2
, *afterlast1
, *afterlast2
;
1349 enum replace_direction dir
, last_dir
, afterlast_dir
;
1350 bool follow_fallthru
, did_fallthru
;
1356 afterlast_dir
= dir
;
1357 last_dir
= afterlast_dir
;
1359 /* Skip simple jumps at the end of the blocks. Complex jumps still
1360 need to be compared for equivalence, which we'll do below. */
1363 last1
= afterlast1
= last2
= afterlast2
= NULL
;
1365 || (returnjump_p (i1
) && !side_effects_p (PATTERN (i1
))))
1368 i1
= PREV_INSN (i1
);
1373 || (returnjump_p (i2
) && !side_effects_p (PATTERN (i2
))))
1376 /* Count everything except for unconditional jump as insn.
1377 Don't count any jumps if dir_p is NULL. */
1378 if (!simplejump_p (i2
) && !returnjump_p (i2
) && last1
&& dir_p
)
1380 i2
= PREV_INSN (i2
);
1385 /* In the following example, we can replace all jumps to C by jumps to A.
1387 This removes 4 duplicate insns.
1388 [bb A] insn1 [bb C] insn1
1394 We could also replace all jumps to A by jumps to C, but that leaves B
1395 alive, and removes only 2 duplicate insns. In a subsequent crossjump
1396 step, all jumps to B would be replaced with jumps to the middle of C,
1397 achieving the same result with more effort.
1398 So we allow only the first possibility, which means that we don't allow
1399 fallthru in the block that's being replaced. */
1401 follow_fallthru
= dir_p
&& dir
!= dir_forward
;
1402 walk_to_nondebug_insn (&i1
, &bb1
, follow_fallthru
, &did_fallthru
);
1406 follow_fallthru
= dir_p
&& dir
!= dir_backward
;
1407 walk_to_nondebug_insn (&i2
, &bb2
, follow_fallthru
, &did_fallthru
);
1411 if (i1
== BB_HEAD (bb1
) || i2
== BB_HEAD (bb2
))
1414 /* Do not turn corssing edge to non-crossing or vice versa after
1416 if (BB_PARTITION (BLOCK_FOR_INSN (i1
))
1417 != BB_PARTITION (BLOCK_FOR_INSN (i2
))
1418 && reload_completed
)
1421 dir
= merge_dir (dir
, old_insns_match_p (0, i1
, i2
));
1422 if (dir
== dir_none
|| (!dir_p
&& dir
!= dir_both
))
1425 merge_memattrs (i1
, i2
);
1427 /* Don't begin a cross-jump with a NOTE insn. */
1430 merge_notes (i1
, i2
);
1432 afterlast1
= last1
, afterlast2
= last2
;
1433 last1
= i1
, last2
= i2
;
1434 afterlast_dir
= last_dir
;
1436 if (active_insn_p (i1
))
1440 i1
= PREV_INSN (i1
);
1441 i2
= PREV_INSN (i2
);
1444 /* Don't allow the insn after a compare to be shared by
1445 cross-jumping unless the compare is also shared. */
1446 if (HAVE_cc0
&& ninsns
&& reg_mentioned_p (cc0_rtx
, last1
)
1447 && ! sets_cc0_p (last1
))
1448 last1
= afterlast1
, last2
= afterlast2
, last_dir
= afterlast_dir
, ninsns
--;
1450 /* Include preceding notes and labels in the cross-jump. One,
1451 this may bring us to the head of the blocks as requested above.
1452 Two, it keeps line number notes as matched as may be. */
1455 bb1
= BLOCK_FOR_INSN (last1
);
1456 while (last1
!= BB_HEAD (bb1
) && !NONDEBUG_INSN_P (PREV_INSN (last1
)))
1457 last1
= PREV_INSN (last1
);
1459 if (last1
!= BB_HEAD (bb1
) && LABEL_P (PREV_INSN (last1
)))
1460 last1
= PREV_INSN (last1
);
1462 bb2
= BLOCK_FOR_INSN (last2
);
1463 while (last2
!= BB_HEAD (bb2
) && !NONDEBUG_INSN_P (PREV_INSN (last2
)))
1464 last2
= PREV_INSN (last2
);
1466 if (last2
!= BB_HEAD (bb2
) && LABEL_P (PREV_INSN (last2
)))
1467 last2
= PREV_INSN (last2
);
1478 /* Like flow_find_cross_jump, except start looking for a matching sequence from
1479 the head of the two blocks. Do not include jumps at the end.
1480 If STOP_AFTER is nonzero, stop after finding that many matching
1481 instructions. If STOP_AFTER is zero, count all INSN_P insns, if it is
1482 non-zero, only count active insns. */
1485 flow_find_head_matching_sequence (basic_block bb1
, basic_block bb2
, rtx_insn
**f1
,
1486 rtx_insn
**f2
, int stop_after
)
1488 rtx_insn
*i1
, *i2
, *last1
, *last2
, *beforelast1
, *beforelast2
;
1492 int nehedges1
= 0, nehedges2
= 0;
1494 FOR_EACH_EDGE (e
, ei
, bb1
->succs
)
1495 if (e
->flags
& EDGE_EH
)
1497 FOR_EACH_EDGE (e
, ei
, bb2
->succs
)
1498 if (e
->flags
& EDGE_EH
)
1503 last1
= beforelast1
= last2
= beforelast2
= NULL
;
1507 /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG. */
1508 while (!NONDEBUG_INSN_P (i1
) && i1
!= BB_END (bb1
))
1510 if (NOTE_P (i1
) && NOTE_KIND (i1
) == NOTE_INSN_EPILOGUE_BEG
)
1512 i1
= NEXT_INSN (i1
);
1515 while (!NONDEBUG_INSN_P (i2
) && i2
!= BB_END (bb2
))
1517 if (NOTE_P (i2
) && NOTE_KIND (i2
) == NOTE_INSN_EPILOGUE_BEG
)
1519 i2
= NEXT_INSN (i2
);
1522 if ((i1
== BB_END (bb1
) && !NONDEBUG_INSN_P (i1
))
1523 || (i2
== BB_END (bb2
) && !NONDEBUG_INSN_P (i2
)))
1526 if (NOTE_P (i1
) || NOTE_P (i2
)
1527 || JUMP_P (i1
) || JUMP_P (i2
))
1530 /* A sanity check to make sure we're not merging insns with different
1531 effects on EH. If only one of them ends a basic block, it shouldn't
1532 have an EH edge; if both end a basic block, there should be the same
1533 number of EH edges. */
1534 if ((i1
== BB_END (bb1
) && i2
!= BB_END (bb2
)
1536 || (i2
== BB_END (bb2
) && i1
!= BB_END (bb1
)
1538 || (i1
== BB_END (bb1
) && i2
== BB_END (bb2
)
1539 && nehedges1
!= nehedges2
))
1542 if (old_insns_match_p (0, i1
, i2
) != dir_both
)
1545 merge_memattrs (i1
, i2
);
1547 /* Don't begin a cross-jump with a NOTE insn. */
1550 merge_notes (i1
, i2
);
1552 beforelast1
= last1
, beforelast2
= last2
;
1553 last1
= i1
, last2
= i2
;
1554 if (!stop_after
|| active_insn_p (i1
))
1558 if (i1
== BB_END (bb1
) || i2
== BB_END (bb2
)
1559 || (stop_after
> 0 && ninsns
== stop_after
))
1562 i1
= NEXT_INSN (i1
);
1563 i2
= NEXT_INSN (i2
);
1566 /* Don't allow a compare to be shared by cross-jumping unless the insn
1567 after the compare is also shared. */
1568 if (HAVE_cc0
&& ninsns
&& reg_mentioned_p (cc0_rtx
, last1
)
1569 && sets_cc0_p (last1
))
1570 last1
= beforelast1
, last2
= beforelast2
, ninsns
--;
1581 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1582 the branch instruction. This means that if we commonize the control
1583 flow before end of the basic block, the semantic remains unchanged.
1585 We may assume that there exists one edge with a common destination. */
1588 outgoing_edges_match (int mode
, basic_block bb1
, basic_block bb2
)
1590 int nehedges1
= 0, nehedges2
= 0;
1591 edge fallthru1
= 0, fallthru2
= 0;
1595 /* If we performed shrink-wrapping, edges to the exit block can
1596 only be distinguished for JUMP_INSNs. The two paths may differ in
1597 whether they went through the prologue. Sibcalls are fine, we know
1598 that we either didn't need or inserted an epilogue before them. */
1599 if (crtl
->shrink_wrapped
1600 && single_succ_p (bb1
)
1601 && single_succ (bb1
) == EXIT_BLOCK_PTR_FOR_FN (cfun
)
1602 && (!JUMP_P (BB_END (bb1
))
1603 /* Punt if the only successor is a fake edge to exit, the jump
1604 must be some weird one. */
1605 || (single_succ_edge (bb1
)->flags
& EDGE_FAKE
) != 0)
1606 && !(CALL_P (BB_END (bb1
)) && SIBLING_CALL_P (BB_END (bb1
))))
1609 /* If BB1 has only one successor, we may be looking at either an
1610 unconditional jump, or a fake edge to exit. */
1611 if (single_succ_p (bb1
)
1612 && (single_succ_edge (bb1
)->flags
& (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1613 && (!JUMP_P (BB_END (bb1
)) || simplejump_p (BB_END (bb1
))))
1614 return (single_succ_p (bb2
)
1615 && (single_succ_edge (bb2
)->flags
1616 & (EDGE_COMPLEX
| EDGE_FAKE
)) == 0
1617 && (!JUMP_P (BB_END (bb2
)) || simplejump_p (BB_END (bb2
))));
1619 /* Match conditional jumps - this may get tricky when fallthru and branch
1620 edges are crossed. */
1621 if (EDGE_COUNT (bb1
->succs
) == 2
1622 && any_condjump_p (BB_END (bb1
))
1623 && onlyjump_p (BB_END (bb1
)))
1625 edge b1
, f1
, b2
, f2
;
1626 bool reverse
, match
;
1627 rtx set1
, set2
, cond1
, cond2
;
1628 enum rtx_code code1
, code2
;
1630 if (EDGE_COUNT (bb2
->succs
) != 2
1631 || !any_condjump_p (BB_END (bb2
))
1632 || !onlyjump_p (BB_END (bb2
)))
1635 b1
= BRANCH_EDGE (bb1
);
1636 b2
= BRANCH_EDGE (bb2
);
1637 f1
= FALLTHRU_EDGE (bb1
);
1638 f2
= FALLTHRU_EDGE (bb2
);
1640 /* Get around possible forwarders on fallthru edges. Other cases
1641 should be optimized out already. */
1642 if (FORWARDER_BLOCK_P (f1
->dest
))
1643 f1
= single_succ_edge (f1
->dest
);
1645 if (FORWARDER_BLOCK_P (f2
->dest
))
1646 f2
= single_succ_edge (f2
->dest
);
1648 /* To simplify use of this function, return false if there are
1649 unneeded forwarder blocks. These will get eliminated later
1650 during cleanup_cfg. */
1651 if (FORWARDER_BLOCK_P (f1
->dest
)
1652 || FORWARDER_BLOCK_P (f2
->dest
)
1653 || FORWARDER_BLOCK_P (b1
->dest
)
1654 || FORWARDER_BLOCK_P (b2
->dest
))
1657 if (f1
->dest
== f2
->dest
&& b1
->dest
== b2
->dest
)
1659 else if (f1
->dest
== b2
->dest
&& b1
->dest
== f2
->dest
)
1664 set1
= pc_set (BB_END (bb1
));
1665 set2
= pc_set (BB_END (bb2
));
1666 if ((XEXP (SET_SRC (set1
), 1) == pc_rtx
)
1667 != (XEXP (SET_SRC (set2
), 1) == pc_rtx
))
1670 cond1
= XEXP (SET_SRC (set1
), 0);
1671 cond2
= XEXP (SET_SRC (set2
), 0);
1672 code1
= GET_CODE (cond1
);
1674 code2
= reversed_comparison_code (cond2
, BB_END (bb2
));
1676 code2
= GET_CODE (cond2
);
1678 if (code2
== UNKNOWN
)
1681 /* Verify codes and operands match. */
1682 match
= ((code1
== code2
1683 && rtx_renumbered_equal_p (XEXP (cond1
, 0), XEXP (cond2
, 0))
1684 && rtx_renumbered_equal_p (XEXP (cond1
, 1), XEXP (cond2
, 1)))
1685 || (code1
== swap_condition (code2
)
1686 && rtx_renumbered_equal_p (XEXP (cond1
, 1),
1688 && rtx_renumbered_equal_p (XEXP (cond1
, 0),
1691 /* If we return true, we will join the blocks. Which means that
1692 we will only have one branch prediction bit to work with. Thus
1693 we require the existing branches to have probabilities that are
1696 && optimize_bb_for_speed_p (bb1
)
1697 && optimize_bb_for_speed_p (bb2
))
1699 profile_probability prob2
;
1701 if (b1
->dest
== b2
->dest
)
1702 prob2
= b2
->probability
;
1704 /* Do not use f2 probability as f2 may be forwarded. */
1705 prob2
= b2
->probability
.invert ();
1707 /* Fail if the difference in probabilities is greater than 50%.
1708 This rules out two well-predicted branches with opposite
1710 if (b1
->probability
.differs_lot_from_p (prob2
))
1715 "Outcomes of branch in bb %i and %i differ too"
1716 " much (", bb1
->index
, bb2
->index
);
1717 b1
->probability
.dump (dump_file
);
1718 prob2
.dump (dump_file
);
1719 fprintf (dump_file
, ")\n");
1725 if (dump_file
&& match
)
1726 fprintf (dump_file
, "Conditionals in bb %i and %i match.\n",
1727 bb1
->index
, bb2
->index
);
1732 /* Generic case - we are seeing a computed jump, table jump or trapping
1735 /* Check whether there are tablejumps in the end of BB1 and BB2.
1736 Return true if they are identical. */
1738 rtx_insn
*label1
, *label2
;
1739 rtx_jump_table_data
*table1
, *table2
;
1741 if (tablejump_p (BB_END (bb1
), &label1
, &table1
)
1742 && tablejump_p (BB_END (bb2
), &label2
, &table2
)
1743 && GET_CODE (PATTERN (table1
)) == GET_CODE (PATTERN (table2
)))
1745 /* The labels should never be the same rtx. If they really are same
1746 the jump tables are same too. So disable crossjumping of blocks BB1
1747 and BB2 because when deleting the common insns in the end of BB1
1748 by delete_basic_block () the jump table would be deleted too. */
1749 /* If LABEL2 is referenced in BB1->END do not do anything
1750 because we would loose information when replacing
1751 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1752 if (label1
!= label2
&& !rtx_referenced_p (label2
, BB_END (bb1
)))
1754 /* Set IDENTICAL to true when the tables are identical. */
1755 bool identical
= false;
1758 p1
= PATTERN (table1
);
1759 p2
= PATTERN (table2
);
1760 if (GET_CODE (p1
) == ADDR_VEC
&& rtx_equal_p (p1
, p2
))
1764 else if (GET_CODE (p1
) == ADDR_DIFF_VEC
1765 && (XVECLEN (p1
, 1) == XVECLEN (p2
, 1))
1766 && rtx_equal_p (XEXP (p1
, 2), XEXP (p2
, 2))
1767 && rtx_equal_p (XEXP (p1
, 3), XEXP (p2
, 3)))
1772 for (i
= XVECLEN (p1
, 1) - 1; i
>= 0 && identical
; i
--)
1773 if (!rtx_equal_p (XVECEXP (p1
, 1, i
), XVECEXP (p2
, 1, i
)))
1781 /* Temporarily replace references to LABEL1 with LABEL2
1782 in BB1->END so that we could compare the instructions. */
1783 replace_label_in_insn (BB_END (bb1
), label1
, label2
, false);
1785 match
= (old_insns_match_p (mode
, BB_END (bb1
), BB_END (bb2
))
1787 if (dump_file
&& match
)
1789 "Tablejumps in bb %i and %i match.\n",
1790 bb1
->index
, bb2
->index
);
1792 /* Set the original label in BB1->END because when deleting
1793 a block whose end is a tablejump, the tablejump referenced
1794 from the instruction is deleted too. */
1795 replace_label_in_insn (BB_END (bb1
), label2
, label1
, false);
1804 /* Find the last non-debug non-note instruction in each bb, except
1805 stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
1806 handles that case specially. old_insns_match_p does not handle
1807 other types of instruction notes. */
1808 rtx_insn
*last1
= BB_END (bb1
);
1809 rtx_insn
*last2
= BB_END (bb2
);
1810 while (!NOTE_INSN_BASIC_BLOCK_P (last1
) &&
1811 (DEBUG_INSN_P (last1
) || NOTE_P (last1
)))
1812 last1
= PREV_INSN (last1
);
1813 while (!NOTE_INSN_BASIC_BLOCK_P (last2
) &&
1814 (DEBUG_INSN_P (last2
) || NOTE_P (last2
)))
1815 last2
= PREV_INSN (last2
);
1816 gcc_assert (last1
&& last2
);
1818 /* First ensure that the instructions match. There may be many outgoing
1819 edges so this test is generally cheaper. */
1820 if (old_insns_match_p (mode
, last1
, last2
) != dir_both
)
1823 /* Search the outgoing edges, ensure that the counts do match, find possible
1824 fallthru and exception handling edges since these needs more
1826 if (EDGE_COUNT (bb1
->succs
) != EDGE_COUNT (bb2
->succs
))
1829 bool nonfakeedges
= false;
1830 FOR_EACH_EDGE (e1
, ei
, bb1
->succs
)
1832 e2
= EDGE_SUCC (bb2
, ei
.index
);
1834 if ((e1
->flags
& EDGE_FAKE
) == 0)
1835 nonfakeedges
= true;
1837 if (e1
->flags
& EDGE_EH
)
1840 if (e2
->flags
& EDGE_EH
)
1843 if (e1
->flags
& EDGE_FALLTHRU
)
1845 if (e2
->flags
& EDGE_FALLTHRU
)
1849 /* If number of edges of various types does not match, fail. */
1850 if (nehedges1
!= nehedges2
1851 || (fallthru1
!= 0) != (fallthru2
!= 0))
1854 /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
1855 and the last real insn doesn't have REG_ARGS_SIZE note, don't
1856 attempt to optimize, as the two basic blocks might have different
1857 REG_ARGS_SIZE depths. For noreturn calls and unconditional
1858 traps there should be REG_ARG_SIZE notes, they could be missing
1859 for __builtin_unreachable () uses though. */
1861 && !ACCUMULATE_OUTGOING_ARGS
1863 || !find_reg_note (last1
, REG_ARGS_SIZE
, NULL
)))
1866 /* fallthru edges must be forwarded to the same destination. */
1869 basic_block d1
= (forwarder_block_p (fallthru1
->dest
)
1870 ? single_succ (fallthru1
->dest
): fallthru1
->dest
);
1871 basic_block d2
= (forwarder_block_p (fallthru2
->dest
)
1872 ? single_succ (fallthru2
->dest
): fallthru2
->dest
);
1878 /* Ensure the same EH region. */
1880 rtx n1
= find_reg_note (BB_END (bb1
), REG_EH_REGION
, 0);
1881 rtx n2
= find_reg_note (BB_END (bb2
), REG_EH_REGION
, 0);
1886 if (n1
&& (!n2
|| XEXP (n1
, 0) != XEXP (n2
, 0)))
1890 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1891 version of sequence abstraction. */
1892 FOR_EACH_EDGE (e1
, ei
, bb2
->succs
)
1896 basic_block d1
= e1
->dest
;
1898 if (FORWARDER_BLOCK_P (d1
))
1899 d1
= EDGE_SUCC (d1
, 0)->dest
;
1901 FOR_EACH_EDGE (e2
, ei
, bb1
->succs
)
1903 basic_block d2
= e2
->dest
;
1904 if (FORWARDER_BLOCK_P (d2
))
1905 d2
= EDGE_SUCC (d2
, 0)->dest
;
1917 /* Returns true if BB basic block has a preserve label. */
1920 block_has_preserve_label (basic_block bb
)
1924 && LABEL_PRESERVE_P (block_label (bb
)));
1927 /* E1 and E2 are edges with the same destination block. Search their
1928 predecessors for common code. If found, redirect control flow from
1929 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1930 or the other way around (dir_backward). DIR specifies the allowed
1931 replacement direction. */
1934 try_crossjump_to_edge (int mode
, edge e1
, edge e2
,
1935 enum replace_direction dir
)
1938 basic_block src1
= e1
->src
, src2
= e2
->src
;
1939 basic_block redirect_to
, redirect_from
, to_remove
;
1940 basic_block osrc1
, osrc2
, redirect_edges_to
, tmp
;
1941 rtx_insn
*newpos1
, *newpos2
;
1945 newpos1
= newpos2
= NULL
;
1947 /* Search backward through forwarder blocks. We don't need to worry
1948 about multiple entry or chained forwarders, as they will be optimized
1949 away. We do this to look past the unconditional jump following a
1950 conditional jump that is required due to the current CFG shape. */
1951 if (single_pred_p (src1
)
1952 && FORWARDER_BLOCK_P (src1
))
1953 e1
= single_pred_edge (src1
), src1
= e1
->src
;
1955 if (single_pred_p (src2
)
1956 && FORWARDER_BLOCK_P (src2
))
1957 e2
= single_pred_edge (src2
), src2
= e2
->src
;
1959 /* Nothing to do if we reach ENTRY, or a common source block. */
1960 if (src1
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) || src2
1961 == ENTRY_BLOCK_PTR_FOR_FN (cfun
))
1966 /* Seeing more than 1 forwarder blocks would confuse us later... */
1967 if (FORWARDER_BLOCK_P (e1
->dest
)
1968 && FORWARDER_BLOCK_P (single_succ (e1
->dest
)))
1971 if (FORWARDER_BLOCK_P (e2
->dest
)
1972 && FORWARDER_BLOCK_P (single_succ (e2
->dest
)))
1975 /* Likewise with dead code (possibly newly created by the other optimizations
1977 if (EDGE_COUNT (src1
->preds
) == 0 || EDGE_COUNT (src2
->preds
) == 0)
1980 /* Do not turn corssing edge to non-crossing or vice versa after reload. */
1981 if (BB_PARTITION (src1
) != BB_PARTITION (src2
)
1982 && reload_completed
)
1985 /* Look for the common insn sequence, part the first ... */
1986 if (!outgoing_edges_match (mode
, src1
, src2
))
1989 /* ... and part the second. */
1990 nmatch
= flow_find_cross_jump (src1
, src2
, &newpos1
, &newpos2
, &dir
);
1994 if (newpos1
!= NULL_RTX
)
1995 src1
= BLOCK_FOR_INSN (newpos1
);
1996 if (newpos2
!= NULL_RTX
)
1997 src2
= BLOCK_FOR_INSN (newpos2
);
1999 /* Check that SRC1 and SRC2 have preds again. They may have changed
2000 above due to the call to flow_find_cross_jump. */
2001 if (EDGE_COUNT (src1
->preds
) == 0 || EDGE_COUNT (src2
->preds
) == 0)
2004 if (dir
== dir_backward
)
2006 std::swap (osrc1
, osrc2
);
2007 std::swap (src1
, src2
);
2009 std::swap (newpos1
, newpos2
);
2012 /* Don't proceed with the crossjump unless we found a sufficient number
2013 of matching instructions or the 'from' block was totally matched
2014 (such that its predecessors will hopefully be redirected and the
2016 if ((nmatch
< PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS
))
2017 && (newpos1
!= BB_HEAD (src1
)))
2020 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
2021 if (block_has_preserve_label (e1
->dest
)
2022 && (e1
->flags
& EDGE_ABNORMAL
))
2025 /* Here we know that the insns in the end of SRC1 which are common with SRC2
2027 If we have tablejumps in the end of SRC1 and SRC2
2028 they have been already compared for equivalence in outgoing_edges_match ()
2029 so replace the references to TABLE1 by references to TABLE2. */
2031 rtx_insn
*label1
, *label2
;
2032 rtx_jump_table_data
*table1
, *table2
;
2034 if (tablejump_p (BB_END (osrc1
), &label1
, &table1
)
2035 && tablejump_p (BB_END (osrc2
), &label2
, &table2
)
2036 && label1
!= label2
)
2040 /* Replace references to LABEL1 with LABEL2. */
2041 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
2043 /* Do not replace the label in SRC1->END because when deleting
2044 a block whose end is a tablejump, the tablejump referenced
2045 from the instruction is deleted too. */
2046 if (insn
!= BB_END (osrc1
))
2047 replace_label_in_insn (insn
, label1
, label2
, true);
2052 /* Avoid splitting if possible. We must always split when SRC2 has
2053 EH predecessor edges, or we may end up with basic blocks with both
2054 normal and EH predecessor edges. */
2055 if (newpos2
== BB_HEAD (src2
)
2056 && !(EDGE_PRED (src2
, 0)->flags
& EDGE_EH
))
2060 if (newpos2
== BB_HEAD (src2
))
2062 /* Skip possible basic block header. */
2063 if (LABEL_P (newpos2
))
2064 newpos2
= NEXT_INSN (newpos2
);
2065 while (DEBUG_INSN_P (newpos2
))
2066 newpos2
= NEXT_INSN (newpos2
);
2067 if (NOTE_P (newpos2
))
2068 newpos2
= NEXT_INSN (newpos2
);
2069 while (DEBUG_INSN_P (newpos2
))
2070 newpos2
= NEXT_INSN (newpos2
);
2074 fprintf (dump_file
, "Splitting bb %i before %i insns\n",
2075 src2
->index
, nmatch
);
2076 redirect_to
= split_block (src2
, PREV_INSN (newpos2
))->dest
;
2081 "Cross jumping from bb %i to bb %i; %i common insns\n",
2082 src1
->index
, src2
->index
, nmatch
);
2084 /* We may have some registers visible through the block. */
2085 df_set_bb_dirty (redirect_to
);
2088 redirect_edges_to
= redirect_to
;
2090 redirect_edges_to
= osrc2
;
2092 /* Recompute the counts of destinations of outgoing edges. */
2093 FOR_EACH_EDGE (s
, ei
, redirect_edges_to
->succs
)
2097 basic_block d
= s
->dest
;
2099 if (FORWARDER_BLOCK_P (d
))
2100 d
= single_succ (d
);
2102 FOR_EACH_EDGE (s2
, ei
, src1
->succs
)
2104 basic_block d2
= s2
->dest
;
2105 if (FORWARDER_BLOCK_P (d2
))
2106 d2
= single_succ (d2
);
2111 /* Take care to update possible forwarder blocks. We verified
2112 that there is no more than one in the chain, so we can't run
2113 into infinite loop. */
2114 if (FORWARDER_BLOCK_P (s
->dest
))
2115 s
->dest
->count
+= s
->count ();
2117 if (FORWARDER_BLOCK_P (s2
->dest
))
2118 s2
->dest
->count
-= s
->count ();
2120 s
->probability
= s
->probability
.combine_with_count
2121 (redirect_edges_to
->count
,
2122 s2
->probability
, src1
->count
);
2125 /* Adjust count for the block. An earlier jump
2126 threading pass may have left the profile in an inconsistent
2127 state (see update_bb_profile_for_threading) so we must be
2128 prepared for overflows. */
2132 tmp
->count
+= src1
->count
;
2133 if (tmp
== redirect_edges_to
)
2135 tmp
= find_fallthru_edge (tmp
->succs
)->dest
;
2138 update_br_prob_note (redirect_edges_to
);
2140 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
2142 /* Skip possible basic block header. */
2143 if (LABEL_P (newpos1
))
2144 newpos1
= NEXT_INSN (newpos1
);
2146 while (DEBUG_INSN_P (newpos1
))
2147 newpos1
= NEXT_INSN (newpos1
);
2149 if (NOTE_INSN_BASIC_BLOCK_P (newpos1
))
2150 newpos1
= NEXT_INSN (newpos1
);
2152 while (DEBUG_INSN_P (newpos1
))
2153 newpos1
= NEXT_INSN (newpos1
);
2155 redirect_from
= split_block (src1
, PREV_INSN (newpos1
))->src
;
2156 to_remove
= single_succ (redirect_from
);
2158 redirect_edge_and_branch_force (single_succ_edge (redirect_from
), redirect_to
);
2159 delete_basic_block (to_remove
);
2161 update_forwarder_flag (redirect_from
);
2162 if (redirect_to
!= src2
)
2163 update_forwarder_flag (src2
);
2168 /* Search the predecessors of BB for common insn sequences. When found,
2169 share code between them by redirecting control flow. Return true if
2170 any changes made. */
2173 try_crossjump_bb (int mode
, basic_block bb
)
2175 edge e
, e2
, fallthru
;
2177 unsigned max
, ix
, ix2
;
2179 /* Nothing to do if there is not at least two incoming edges. */
2180 if (EDGE_COUNT (bb
->preds
) < 2)
2183 /* Don't crossjump if this block ends in a computed jump,
2184 unless we are optimizing for size. */
2185 if (optimize_bb_for_size_p (bb
)
2186 && bb
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
2187 && computed_jump_p (BB_END (bb
)))
2190 /* If we are partitioning hot/cold basic blocks, we don't want to
2191 mess up unconditional or indirect jumps that cross between hot
2194 Basic block partitioning may result in some jumps that appear to
2195 be optimizable (or blocks that appear to be mergeable), but which really
2196 must be left untouched (they are required to make it safely across
2197 partition boundaries). See the comments at the top of
2198 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
2200 if (BB_PARTITION (EDGE_PRED (bb
, 0)->src
) !=
2201 BB_PARTITION (EDGE_PRED (bb
, 1)->src
)
2202 || (EDGE_PRED (bb
, 0)->flags
& EDGE_CROSSING
))
2205 /* It is always cheapest to redirect a block that ends in a branch to
2206 a block that falls through into BB, as that adds no branches to the
2207 program. We'll try that combination first. */
2209 max
= PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES
);
2211 if (EDGE_COUNT (bb
->preds
) > max
)
2214 fallthru
= find_fallthru_edge (bb
->preds
);
2217 for (ix
= 0; ix
< EDGE_COUNT (bb
->preds
);)
2219 e
= EDGE_PRED (bb
, ix
);
2222 /* As noted above, first try with the fallthru predecessor (or, a
2223 fallthru predecessor if we are in cfglayout mode). */
2226 /* Don't combine the fallthru edge into anything else.
2227 If there is a match, we'll do it the other way around. */
2230 /* If nothing changed since the last attempt, there is nothing
2233 && !((e
->src
->flags
& BB_MODIFIED
)
2234 || (fallthru
->src
->flags
& BB_MODIFIED
)))
2237 if (try_crossjump_to_edge (mode
, e
, fallthru
, dir_forward
))
2245 /* Non-obvious work limiting check: Recognize that we're going
2246 to call try_crossjump_bb on every basic block. So if we have
2247 two blocks with lots of outgoing edges (a switch) and they
2248 share lots of common destinations, then we would do the
2249 cross-jump check once for each common destination.
2251 Now, if the blocks actually are cross-jump candidates, then
2252 all of their destinations will be shared. Which means that
2253 we only need check them for cross-jump candidacy once. We
2254 can eliminate redundant checks of crossjump(A,B) by arbitrarily
2255 choosing to do the check from the block for which the edge
2256 in question is the first successor of A. */
2257 if (EDGE_SUCC (e
->src
, 0) != e
)
2260 for (ix2
= 0; ix2
< EDGE_COUNT (bb
->preds
); ix2
++)
2262 e2
= EDGE_PRED (bb
, ix2
);
2267 /* We've already checked the fallthru edge above. */
2271 /* The "first successor" check above only prevents multiple
2272 checks of crossjump(A,B). In order to prevent redundant
2273 checks of crossjump(B,A), require that A be the block
2274 with the lowest index. */
2275 if (e
->src
->index
> e2
->src
->index
)
2278 /* If nothing changed since the last attempt, there is nothing
2281 && !((e
->src
->flags
& BB_MODIFIED
)
2282 || (e2
->src
->flags
& BB_MODIFIED
)))
2285 /* Both e and e2 are not fallthru edges, so we can crossjump in either
2287 if (try_crossjump_to_edge (mode
, e
, e2
, dir_both
))
2297 crossjumps_occurred
= true;
2302 /* Search the successors of BB for common insn sequences. When found,
2303 share code between them by moving it across the basic block
2304 boundary. Return true if any changes made. */
2307 try_head_merge_bb (basic_block bb
)
2309 basic_block final_dest_bb
= NULL
;
2310 int max_match
= INT_MAX
;
2312 rtx_insn
**headptr
, **currptr
, **nextptr
;
2313 bool changed
, moveall
;
2315 rtx_insn
*e0_last_head
;
2317 rtx_insn
*move_before
;
2318 unsigned nedges
= EDGE_COUNT (bb
->succs
);
2319 rtx_insn
*jump
= BB_END (bb
);
2320 regset live
, live_union
;
2322 /* Nothing to do if there is not at least two outgoing edges. */
2326 /* Don't crossjump if this block ends in a computed jump,
2327 unless we are optimizing for size. */
2328 if (optimize_bb_for_size_p (bb
)
2329 && bb
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
2330 && computed_jump_p (BB_END (bb
)))
2333 cond
= get_condition (jump
, &move_before
, true, false);
2334 if (cond
== NULL_RTX
)
2336 if (HAVE_cc0
&& reg_mentioned_p (cc0_rtx
, jump
))
2337 move_before
= prev_nonnote_nondebug_insn (jump
);
2342 for (ix
= 0; ix
< nedges
; ix
++)
2343 if (EDGE_SUCC (bb
, ix
)->dest
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
2346 for (ix
= 0; ix
< nedges
; ix
++)
2348 edge e
= EDGE_SUCC (bb
, ix
);
2349 basic_block other_bb
= e
->dest
;
2351 if (df_get_bb_dirty (other_bb
))
2353 block_was_dirty
= true;
2357 if (e
->flags
& EDGE_ABNORMAL
)
2360 /* Normally, all destination blocks must only be reachable from this
2361 block, i.e. they must have one incoming edge.
2363 There is one special case we can handle, that of multiple consecutive
2364 jumps where the first jumps to one of the targets of the second jump.
2365 This happens frequently in switch statements for default labels.
2366 The structure is as follows:
2372 jump with targets A, B, C, D...
2374 has two incoming edges, from FINAL_DEST_BB and BB
2376 In this case, we can try to move the insns through BB and into
2378 if (EDGE_COUNT (other_bb
->preds
) != 1)
2380 edge incoming_edge
, incoming_bb_other_edge
;
2383 if (final_dest_bb
!= NULL
2384 || EDGE_COUNT (other_bb
->preds
) != 2)
2387 /* We must be able to move the insns across the whole block. */
2388 move_before
= BB_HEAD (bb
);
2389 while (!NONDEBUG_INSN_P (move_before
))
2390 move_before
= NEXT_INSN (move_before
);
2392 if (EDGE_COUNT (bb
->preds
) != 1)
2394 incoming_edge
= EDGE_PRED (bb
, 0);
2395 final_dest_bb
= incoming_edge
->src
;
2396 if (EDGE_COUNT (final_dest_bb
->succs
) != 2)
2398 FOR_EACH_EDGE (incoming_bb_other_edge
, ei
, final_dest_bb
->succs
)
2399 if (incoming_bb_other_edge
!= incoming_edge
)
2401 if (incoming_bb_other_edge
->dest
!= other_bb
)
2406 e0
= EDGE_SUCC (bb
, 0);
2407 e0_last_head
= NULL
;
2410 for (ix
= 1; ix
< nedges
; ix
++)
2412 edge e
= EDGE_SUCC (bb
, ix
);
2413 rtx_insn
*e0_last
, *e_last
;
2416 nmatch
= flow_find_head_matching_sequence (e0
->dest
, e
->dest
,
2417 &e0_last
, &e_last
, 0);
2421 if (nmatch
< max_match
)
2424 e0_last_head
= e0_last
;
2428 /* If we matched an entire block, we probably have to avoid moving the
2431 && e0_last_head
== BB_END (e0
->dest
)
2432 && (find_reg_note (e0_last_head
, REG_EH_REGION
, 0)
2433 || control_flow_insn_p (e0_last_head
)))
2438 e0_last_head
= prev_real_nondebug_insn (e0_last_head
);
2444 /* We must find a union of the live registers at each of the end points. */
2445 live
= BITMAP_ALLOC (NULL
);
2446 live_union
= BITMAP_ALLOC (NULL
);
2448 currptr
= XNEWVEC (rtx_insn
*, nedges
);
2449 headptr
= XNEWVEC (rtx_insn
*, nedges
);
2450 nextptr
= XNEWVEC (rtx_insn
*, nedges
);
2452 for (ix
= 0; ix
< nedges
; ix
++)
2455 basic_block merge_bb
= EDGE_SUCC (bb
, ix
)->dest
;
2456 rtx_insn
*head
= BB_HEAD (merge_bb
);
2458 while (!NONDEBUG_INSN_P (head
))
2459 head
= NEXT_INSN (head
);
2463 /* Compute the end point and live information */
2464 for (j
= 1; j
< max_match
; j
++)
2466 head
= NEXT_INSN (head
);
2467 while (!NONDEBUG_INSN_P (head
));
2468 simulate_backwards_to_point (merge_bb
, live
, head
);
2469 IOR_REG_SET (live_union
, live
);
2472 /* If we're moving across two blocks, verify the validity of the
2473 first move, then adjust the target and let the loop below deal
2474 with the final move. */
2475 if (final_dest_bb
!= NULL
)
2477 rtx_insn
*move_upto
;
2479 moveall
= can_move_insns_across (currptr
[0], e0_last_head
, move_before
,
2480 jump
, e0
->dest
, live_union
,
2484 if (move_upto
== NULL_RTX
)
2487 while (e0_last_head
!= move_upto
)
2489 df_simulate_one_insn_backwards (e0
->dest
, e0_last_head
,
2491 e0_last_head
= PREV_INSN (e0_last_head
);
2494 if (e0_last_head
== NULL_RTX
)
2497 jump
= BB_END (final_dest_bb
);
2498 cond
= get_condition (jump
, &move_before
, true, false);
2499 if (cond
== NULL_RTX
)
2501 if (HAVE_cc0
&& reg_mentioned_p (cc0_rtx
, jump
))
2502 move_before
= prev_nonnote_nondebug_insn (jump
);
2510 rtx_insn
*move_upto
;
2511 moveall
= can_move_insns_across (currptr
[0], e0_last_head
,
2512 move_before
, jump
, e0
->dest
, live_union
,
2514 if (!moveall
&& move_upto
== NULL_RTX
)
2516 if (jump
== move_before
)
2519 /* Try again, using a different insertion point. */
2522 /* Don't try moving before a cc0 user, as that may invalidate
2524 if (HAVE_cc0
&& reg_mentioned_p (cc0_rtx
, jump
))
2530 if (final_dest_bb
&& !moveall
)
2531 /* We haven't checked whether a partial move would be OK for the first
2532 move, so we have to fail this case. */
2538 if (currptr
[0] == move_upto
)
2540 for (ix
= 0; ix
< nedges
; ix
++)
2542 rtx_insn
*curr
= currptr
[ix
];
2544 curr
= NEXT_INSN (curr
);
2545 while (!NONDEBUG_INSN_P (curr
));
2550 /* If we can't currently move all of the identical insns, remember
2551 each insn after the range that we'll merge. */
2553 for (ix
= 0; ix
< nedges
; ix
++)
2555 rtx_insn
*curr
= currptr
[ix
];
2557 curr
= NEXT_INSN (curr
);
2558 while (!NONDEBUG_INSN_P (curr
));
2562 reorder_insns (headptr
[0], currptr
[0], PREV_INSN (move_before
));
2563 df_set_bb_dirty (EDGE_SUCC (bb
, 0)->dest
);
2564 if (final_dest_bb
!= NULL
)
2565 df_set_bb_dirty (final_dest_bb
);
2566 df_set_bb_dirty (bb
);
2567 for (ix
= 1; ix
< nedges
; ix
++)
2569 df_set_bb_dirty (EDGE_SUCC (bb
, ix
)->dest
);
2570 delete_insn_chain (headptr
[ix
], currptr
[ix
], false);
2574 if (jump
== move_before
)
2577 /* For the unmerged insns, try a different insertion point. */
2580 /* Don't try moving before a cc0 user, as that may invalidate
2582 if (HAVE_cc0
&& reg_mentioned_p (cc0_rtx
, jump
))
2585 for (ix
= 0; ix
< nedges
; ix
++)
2586 currptr
[ix
] = headptr
[ix
] = nextptr
[ix
];
2596 crossjumps_occurred
|= changed
;
2601 /* Return true if BB contains just bb note, or bb note followed
2602 by only DEBUG_INSNs. */
2605 trivially_empty_bb_p (basic_block bb
)
2607 rtx_insn
*insn
= BB_END (bb
);
2611 if (insn
== BB_HEAD (bb
))
2613 if (!DEBUG_INSN_P (insn
))
2615 insn
= PREV_INSN (insn
);
2619 /* Return true if BB contains just a return and possibly a USE of the
2620 return value. Fill in *RET and *USE with the return and use insns
2621 if any found, otherwise NULL. All CLOBBERs are ignored. */
2624 bb_is_just_return (basic_block bb
, rtx_insn
**ret
, rtx_insn
**use
)
2629 if (bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
2632 FOR_BB_INSNS (bb
, insn
)
2633 if (NONDEBUG_INSN_P (insn
))
2635 rtx pat
= PATTERN (insn
);
2637 if (!*ret
&& ANY_RETURN_P (pat
))
2639 else if (!*ret
&& !*use
&& GET_CODE (pat
) == USE
2640 && REG_P (XEXP (pat
, 0))
2641 && REG_FUNCTION_VALUE_P (XEXP (pat
, 0)))
2643 else if (GET_CODE (pat
) != CLOBBER
)
2650 /* Do simple CFG optimizations - basic block merging, simplifying of jump
2651 instructions etc. Return nonzero if changes were made. */
2654 try_optimize_cfg (int mode
)
2656 bool changed_overall
= false;
2659 basic_block bb
, b
, next
;
2661 if (mode
& (CLEANUP_CROSSJUMP
| CLEANUP_THREADING
))
2664 crossjumps_occurred
= false;
2666 FOR_EACH_BB_FN (bb
, cfun
)
2667 update_forwarder_flag (bb
);
2669 if (! targetm
.cannot_modify_jumps_p ())
2672 /* Attempt to merge blocks as made possible by edge removal. If
2673 a block has only one successor, and the successor has only
2674 one predecessor, they may be combined. */
2677 block_was_dirty
= false;
2683 "\n\ntry_optimize_cfg iteration %i\n\n",
2686 for (b
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
; b
2687 != EXIT_BLOCK_PTR_FOR_FN (cfun
);)
2691 bool changed_here
= false;
2693 /* Delete trivially dead basic blocks. This is either
2694 blocks with no predecessors, or empty blocks with no
2695 successors. However if the empty block with no
2696 successors is the successor of the ENTRY_BLOCK, it is
2697 kept. This ensures that the ENTRY_BLOCK will have a
2698 successor which is a precondition for many RTL
2699 passes. Empty blocks may result from expanding
2700 __builtin_unreachable (). */
2701 if (EDGE_COUNT (b
->preds
) == 0
2702 || (EDGE_COUNT (b
->succs
) == 0
2703 && trivially_empty_bb_p (b
)
2704 && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun
))->dest
2708 if (EDGE_COUNT (b
->preds
) > 0)
2713 if (current_ir_type () == IR_RTL_CFGLAYOUT
)
2716 && BARRIER_P (BB_FOOTER (b
)))
2717 FOR_EACH_EDGE (e
, ei
, b
->preds
)
2718 if ((e
->flags
& EDGE_FALLTHRU
)
2719 && BB_FOOTER (e
->src
) == NULL
)
2723 BB_FOOTER (e
->src
) = BB_FOOTER (b
);
2724 BB_FOOTER (b
) = NULL
;
2729 BB_FOOTER (e
->src
) = emit_barrier ();
2736 rtx_insn
*last
= get_last_bb_insn (b
);
2737 if (last
&& BARRIER_P (last
))
2738 FOR_EACH_EDGE (e
, ei
, b
->preds
)
2739 if ((e
->flags
& EDGE_FALLTHRU
))
2740 emit_barrier_after (BB_END (e
->src
));
2743 delete_basic_block (b
);
2745 /* Avoid trying to remove the exit block. */
2746 b
= (c
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) ? c
->next_bb
: c
);
2750 /* Remove code labels no longer used. */
2751 if (single_pred_p (b
)
2752 && (single_pred_edge (b
)->flags
& EDGE_FALLTHRU
)
2753 && !(single_pred_edge (b
)->flags
& EDGE_COMPLEX
)
2754 && LABEL_P (BB_HEAD (b
))
2755 && !LABEL_PRESERVE_P (BB_HEAD (b
))
2756 /* If the previous block ends with a branch to this
2757 block, we can't delete the label. Normally this
2758 is a condjump that is yet to be simplified, but
2759 if CASE_DROPS_THRU, this can be a tablejump with
2760 some element going to the same place as the
2761 default (fallthru). */
2762 && (single_pred (b
) == ENTRY_BLOCK_PTR_FOR_FN (cfun
)
2763 || !JUMP_P (BB_END (single_pred (b
)))
2764 || ! label_is_jump_target_p (BB_HEAD (b
),
2765 BB_END (single_pred (b
)))))
2767 delete_insn (BB_HEAD (b
));
2769 fprintf (dump_file
, "Deleted label in block %i.\n",
2773 /* If we fall through an empty block, we can remove it. */
2774 if (!(mode
& (CLEANUP_CFGLAYOUT
| CLEANUP_NO_INSN_DEL
))
2775 && single_pred_p (b
)
2776 && (single_pred_edge (b
)->flags
& EDGE_FALLTHRU
)
2777 && !LABEL_P (BB_HEAD (b
))
2778 && FORWARDER_BLOCK_P (b
)
2779 /* Note that forwarder_block_p true ensures that
2780 there is a successor for this block. */
2781 && (single_succ_edge (b
)->flags
& EDGE_FALLTHRU
)
2782 && n_basic_blocks_for_fn (cfun
) > NUM_FIXED_BLOCKS
+ 1)
2786 "Deleting fallthru block %i.\n",
2789 c
= ((b
->prev_bb
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
2790 ? b
->next_bb
: b
->prev_bb
);
2791 redirect_edge_succ_nodup (single_pred_edge (b
),
2793 delete_basic_block (b
);
2799 /* Merge B with its single successor, if any. */
2800 if (single_succ_p (b
)
2801 && (s
= single_succ_edge (b
))
2802 && !(s
->flags
& EDGE_COMPLEX
)
2803 && (c
= s
->dest
) != EXIT_BLOCK_PTR_FOR_FN (cfun
)
2804 && single_pred_p (c
)
2807 /* When not in cfg_layout mode use code aware of reordering
2808 INSN. This code possibly creates new basic blocks so it
2809 does not fit merge_blocks interface and is kept here in
2810 hope that it will become useless once more of compiler
2811 is transformed to use cfg_layout mode. */
2813 if ((mode
& CLEANUP_CFGLAYOUT
)
2814 && can_merge_blocks_p (b
, c
))
2816 merge_blocks (b
, c
);
2817 update_forwarder_flag (b
);
2818 changed_here
= true;
2820 else if (!(mode
& CLEANUP_CFGLAYOUT
)
2821 /* If the jump insn has side effects,
2822 we can't kill the edge. */
2823 && (!JUMP_P (BB_END (b
))
2824 || (reload_completed
2825 ? simplejump_p (BB_END (b
))
2826 : (onlyjump_p (BB_END (b
))
2827 && !tablejump_p (BB_END (b
),
2829 && (next
= merge_blocks_move (s
, b
, c
, mode
)))
2832 changed_here
= true;
2836 /* Try to change a branch to a return to just that return. */
2837 rtx_insn
*ret
, *use
;
2838 if (single_succ_p (b
)
2839 && onlyjump_p (BB_END (b
))
2840 && bb_is_just_return (single_succ (b
), &ret
, &use
))
2842 if (redirect_jump (as_a
<rtx_jump_insn
*> (BB_END (b
)),
2846 emit_insn_before (copy_insn (PATTERN (use
)),
2849 fprintf (dump_file
, "Changed jump %d->%d to return.\n",
2850 b
->index
, single_succ (b
)->index
);
2851 redirect_edge_succ (single_succ_edge (b
),
2852 EXIT_BLOCK_PTR_FOR_FN (cfun
));
2853 single_succ_edge (b
)->flags
&= ~EDGE_CROSSING
;
2854 changed_here
= true;
2858 /* Try to change a conditional branch to a return to the
2859 respective conditional return. */
2860 if (EDGE_COUNT (b
->succs
) == 2
2861 && any_condjump_p (BB_END (b
))
2862 && bb_is_just_return (BRANCH_EDGE (b
)->dest
, &ret
, &use
))
2864 if (redirect_jump (as_a
<rtx_jump_insn
*> (BB_END (b
)),
2868 emit_insn_before (copy_insn (PATTERN (use
)),
2871 fprintf (dump_file
, "Changed conditional jump %d->%d "
2872 "to conditional return.\n",
2873 b
->index
, BRANCH_EDGE (b
)->dest
->index
);
2874 redirect_edge_succ (BRANCH_EDGE (b
),
2875 EXIT_BLOCK_PTR_FOR_FN (cfun
));
2876 BRANCH_EDGE (b
)->flags
&= ~EDGE_CROSSING
;
2877 changed_here
= true;
2881 /* Try to flip a conditional branch that falls through to
2882 a return so that it becomes a conditional return and a
2883 new jump to the original branch target. */
2884 if (EDGE_COUNT (b
->succs
) == 2
2885 && BRANCH_EDGE (b
)->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
2886 && any_condjump_p (BB_END (b
))
2887 && bb_is_just_return (FALLTHRU_EDGE (b
)->dest
, &ret
, &use
))
2889 if (invert_jump (as_a
<rtx_jump_insn
*> (BB_END (b
)),
2890 JUMP_LABEL (BB_END (b
)), 0))
2892 basic_block new_ft
= BRANCH_EDGE (b
)->dest
;
2893 if (redirect_jump (as_a
<rtx_jump_insn
*> (BB_END (b
)),
2897 emit_insn_before (copy_insn (PATTERN (use
)),
2900 fprintf (dump_file
, "Changed conditional jump "
2901 "%d->%d to conditional return, adding "
2902 "fall-through jump.\n",
2903 b
->index
, BRANCH_EDGE (b
)->dest
->index
);
2904 redirect_edge_succ (BRANCH_EDGE (b
),
2905 EXIT_BLOCK_PTR_FOR_FN (cfun
));
2906 BRANCH_EDGE (b
)->flags
&= ~EDGE_CROSSING
;
2907 std::swap (BRANCH_EDGE (b
)->probability
,
2908 FALLTHRU_EDGE (b
)->probability
);
2909 update_br_prob_note (b
);
2910 basic_block jb
= force_nonfallthru (FALLTHRU_EDGE (b
));
2911 notice_new_block (jb
);
2912 if (!redirect_jump (as_a
<rtx_jump_insn
*> (BB_END (jb
)),
2913 block_label (new_ft
), 0))
2915 redirect_edge_succ (single_succ_edge (jb
), new_ft
);
2916 changed_here
= true;
2920 /* Invert the jump back to what it was. This should
2922 if (!invert_jump (as_a
<rtx_jump_insn
*> (BB_END (b
)),
2923 JUMP_LABEL (BB_END (b
)), 0))
2929 /* Simplify branch over branch. */
2930 if ((mode
& CLEANUP_EXPENSIVE
)
2931 && !(mode
& CLEANUP_CFGLAYOUT
)
2932 && try_simplify_condjump (b
))
2933 changed_here
= true;
2935 /* If B has a single outgoing edge, but uses a
2936 non-trivial jump instruction without side-effects, we
2937 can either delete the jump entirely, or replace it
2938 with a simple unconditional jump. */
2939 if (single_succ_p (b
)
2940 && single_succ (b
) != EXIT_BLOCK_PTR_FOR_FN (cfun
)
2941 && onlyjump_p (BB_END (b
))
2942 && !CROSSING_JUMP_P (BB_END (b
))
2943 && try_redirect_by_replacing_jump (single_succ_edge (b
),
2945 (mode
& CLEANUP_CFGLAYOUT
) != 0))
2947 update_forwarder_flag (b
);
2948 changed_here
= true;
2951 /* Simplify branch to branch. */
2952 if (try_forward_edges (mode
, b
))
2954 update_forwarder_flag (b
);
2955 changed_here
= true;
2958 /* Look for shared code between blocks. */
2959 if ((mode
& CLEANUP_CROSSJUMP
)
2960 && try_crossjump_bb (mode
, b
))
2961 changed_here
= true;
2963 if ((mode
& CLEANUP_CROSSJUMP
)
2964 /* This can lengthen register lifetimes. Do it only after
2967 && try_head_merge_bb (b
))
2968 changed_here
= true;
2970 /* Don't get confused by the index shift caused by
2978 if ((mode
& CLEANUP_CROSSJUMP
)
2979 && try_crossjump_bb (mode
, EXIT_BLOCK_PTR_FOR_FN (cfun
)))
2982 if (block_was_dirty
)
2984 /* This should only be set by head-merging. */
2985 gcc_assert (mode
& CLEANUP_CROSSJUMP
);
2991 /* Edge forwarding in particular can cause hot blocks previously
2992 reached by both hot and cold blocks to become dominated only
2993 by cold blocks. This will cause the verification below to fail,
2994 and lead to now cold code in the hot section. This is not easy
2995 to detect and fix during edge forwarding, and in some cases
2996 is only visible after newly unreachable blocks are deleted,
2997 which will be done in fixup_partitions. */
2998 if ((mode
& CLEANUP_NO_PARTITIONING
) == 0)
3000 fixup_partitions ();
3001 checking_verify_flow_info ();
3005 changed_overall
|= changed
;
3011 FOR_ALL_BB_FN (b
, cfun
)
3012 b
->flags
&= ~(BB_FORWARDER_BLOCK
| BB_NONTHREADABLE_BLOCK
);
3014 return changed_overall
;
3017 /* Delete all unreachable basic blocks. */
3020 delete_unreachable_blocks (void)
3022 bool changed
= false;
3023 basic_block b
, prev_bb
;
3025 find_unreachable_blocks ();
3027 /* When we're in GIMPLE mode and there may be debug bind insns, we
3028 should delete blocks in reverse dominator order, so as to get a
3029 chance to substitute all released DEFs into debug bind stmts. If
3030 we don't have dominators information, walking blocks backward
3031 gets us a better chance of retaining most debug information than
3033 if (MAY_HAVE_DEBUG_BIND_INSNS
&& current_ir_type () == IR_GIMPLE
3034 && dom_info_available_p (CDI_DOMINATORS
))
3036 for (b
= EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
;
3037 b
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
); b
= prev_bb
)
3039 prev_bb
= b
->prev_bb
;
3041 if (!(b
->flags
& BB_REACHABLE
))
3043 /* Speed up the removal of blocks that don't dominate
3044 others. Walking backwards, this should be the common
3046 if (!first_dom_son (CDI_DOMINATORS
, b
))
3047 delete_basic_block (b
);
3051 = get_all_dominated_blocks (CDI_DOMINATORS
, b
);
3057 prev_bb
= b
->prev_bb
;
3059 gcc_assert (!(b
->flags
& BB_REACHABLE
));
3061 delete_basic_block (b
);
3073 for (b
= EXIT_BLOCK_PTR_FOR_FN (cfun
)->prev_bb
;
3074 b
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
); b
= prev_bb
)
3076 prev_bb
= b
->prev_bb
;
3078 if (!(b
->flags
& BB_REACHABLE
))
3080 delete_basic_block (b
);
3087 tidy_fallthru_edges ();
3091 /* Delete any jump tables never referenced. We can't delete them at the
3092 time of removing tablejump insn as they are referenced by the preceding
3093 insns computing the destination, so we delay deleting and garbagecollect
3094 them once life information is computed. */
3096 delete_dead_jumptables (void)
3100 /* A dead jump table does not belong to any basic block. Scan insns
3101 between two adjacent basic blocks. */
3102 FOR_EACH_BB_FN (bb
, cfun
)
3104 rtx_insn
*insn
, *next
;
3106 for (insn
= NEXT_INSN (BB_END (bb
));
3107 insn
&& !NOTE_INSN_BASIC_BLOCK_P (insn
);
3110 next
= NEXT_INSN (insn
);
3112 && LABEL_NUSES (insn
) == LABEL_PRESERVE_P (insn
)
3113 && JUMP_TABLE_DATA_P (next
))
3115 rtx_insn
*label
= insn
, *jump
= next
;
3118 fprintf (dump_file
, "Dead jumptable %i removed\n",
3121 next
= NEXT_INSN (next
);
3123 delete_insn (label
);
3130 /* Tidy the CFG by deleting unreachable code and whatnot. */
3133 cleanup_cfg (int mode
)
3135 bool changed
= false;
3137 /* Set the cfglayout mode flag here. We could update all the callers
3138 but that is just inconvenient, especially given that we eventually
3139 want to have cfglayout mode as the default. */
3140 if (current_ir_type () == IR_RTL_CFGLAYOUT
)
3141 mode
|= CLEANUP_CFGLAYOUT
;
3143 timevar_push (TV_CLEANUP_CFG
);
3144 if (delete_unreachable_blocks ())
3147 /* We've possibly created trivially dead code. Cleanup it right
3148 now to introduce more opportunities for try_optimize_cfg. */
3149 if (!(mode
& (CLEANUP_NO_INSN_DEL
))
3150 && !reload_completed
)
3151 delete_trivially_dead_insns (get_insns (), max_reg_num ());
3156 /* To tail-merge blocks ending in the same noreturn function (e.g.
3157 a call to abort) we have to insert fake edges to exit. Do this
3158 here once. The fake edges do not interfere with any other CFG
3160 if (mode
& CLEANUP_CROSSJUMP
)
3161 add_noreturn_fake_exit_edges ();
3163 if (!dbg_cnt (cfg_cleanup
))
3166 while (try_optimize_cfg (mode
))
3168 delete_unreachable_blocks (), changed
= true;
3169 if (!(mode
& CLEANUP_NO_INSN_DEL
))
3171 /* Try to remove some trivially dead insns when doing an expensive
3172 cleanup. But delete_trivially_dead_insns doesn't work after
3173 reload (it only handles pseudos) and run_fast_dce is too costly
3174 to run in every iteration.
3176 For effective cross jumping, we really want to run a fast DCE to
3177 clean up any dead conditions, or they get in the way of performing
3180 Other transformations in cleanup_cfg are not so sensitive to dead
3181 code, so delete_trivially_dead_insns or even doing nothing at all
3183 if ((mode
& CLEANUP_EXPENSIVE
) && !reload_completed
3184 && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3186 if ((mode
& CLEANUP_CROSSJUMP
) && crossjumps_occurred
)
3193 if (mode
& CLEANUP_CROSSJUMP
)
3194 remove_fake_exit_edges ();
3196 /* Don't call delete_dead_jumptables in cfglayout mode, because
3197 that function assumes that jump tables are in the insns stream.
3198 But we also don't _have_ to delete dead jumptables in cfglayout
3199 mode because we shouldn't even be looking at things that are
3200 not in a basic block. Dead jumptables are cleaned up when
3201 going out of cfglayout mode. */
3202 if (!(mode
& CLEANUP_CFGLAYOUT
))
3203 delete_dead_jumptables ();
3205 /* ??? We probably do this way too often. */
3208 || (mode
& CLEANUP_CFG_CHANGED
)))
3210 timevar_push (TV_REPAIR_LOOPS
);
3211 /* The above doesn't preserve dominance info if available. */
3212 gcc_assert (!dom_info_available_p (CDI_DOMINATORS
));
3213 calculate_dominance_info (CDI_DOMINATORS
);
3214 fix_loop_structure (NULL
);
3215 free_dominance_info (CDI_DOMINATORS
);
3216 timevar_pop (TV_REPAIR_LOOPS
);
3219 timevar_pop (TV_CLEANUP_CFG
);
3226 const pass_data pass_data_jump
=
3228 RTL_PASS
, /* type */
3230 OPTGROUP_NONE
, /* optinfo_flags */
3231 TV_JUMP
, /* tv_id */
3232 0, /* properties_required */
3233 0, /* properties_provided */
3234 0, /* properties_destroyed */
3235 0, /* todo_flags_start */
3236 0, /* todo_flags_finish */
3239 class pass_jump
: public rtl_opt_pass
3242 pass_jump (gcc::context
*ctxt
)
3243 : rtl_opt_pass (pass_data_jump
, ctxt
)
3246 /* opt_pass methods: */
3247 virtual unsigned int execute (function
*);
3249 }; // class pass_jump
3252 pass_jump::execute (function
*)
3254 delete_trivially_dead_insns (get_insns (), max_reg_num ());
3256 dump_flow_info (dump_file
, dump_flags
);
3257 cleanup_cfg ((optimize
? CLEANUP_EXPENSIVE
: 0)
3258 | (flag_thread_jumps
? CLEANUP_THREADING
: 0));
3265 make_pass_jump (gcc::context
*ctxt
)
3267 return new pass_jump (ctxt
);
3272 const pass_data pass_data_postreload_jump
=
3274 RTL_PASS
, /* type */
3275 "postreload_jump", /* name */
3276 OPTGROUP_NONE
, /* optinfo_flags */
3277 TV_JUMP
, /* tv_id */
3278 0, /* properties_required */
3279 0, /* properties_provided */
3280 0, /* properties_destroyed */
3281 0, /* todo_flags_start */
3282 0, /* todo_flags_finish */
3285 class pass_postreload_jump
: public rtl_opt_pass
3288 pass_postreload_jump (gcc::context
*ctxt
)
3289 : rtl_opt_pass (pass_data_postreload_jump
, ctxt
)
3292 /* opt_pass methods: */
3293 virtual unsigned int execute (function
*);
3295 }; // class pass_postreload_jump
3298 pass_postreload_jump::execute (function
*)
3300 cleanup_cfg (flag_thread_jumps
? CLEANUP_THREADING
: 0);
3307 make_pass_postreload_jump (gcc::context
*ctxt
)
3309 return new pass_postreload_jump (ctxt
);
3314 const pass_data pass_data_jump2
=
3316 RTL_PASS
, /* type */
3318 OPTGROUP_NONE
, /* optinfo_flags */
3319 TV_JUMP
, /* tv_id */
3320 0, /* properties_required */
3321 0, /* properties_provided */
3322 0, /* properties_destroyed */
3323 0, /* todo_flags_start */
3324 0, /* todo_flags_finish */
3327 class pass_jump2
: public rtl_opt_pass
3330 pass_jump2 (gcc::context
*ctxt
)
3331 : rtl_opt_pass (pass_data_jump2
, ctxt
)
3334 /* opt_pass methods: */
3335 virtual unsigned int execute (function
*)
3337 cleanup_cfg (flag_crossjumping
? CLEANUP_CROSSJUMP
: 0);
3341 }; // class pass_jump2
3346 make_pass_jump2 (gcc::context
*ctxt
)
3348 return new pass_jump2 (ctxt
);