* gcc.dg/vect/vect-outer-simd-1.c: Remove cleanup-tree-dump directive.
[official-gcc.git] / gcc / cfgcleanup.c
blobfc2ed31d0bf03f46116a9c41aae7999b18a0ec94
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This file contains optimizer of the control flow. The main entry point is
21 cleanup_cfg. Following optimizations are performed:
23 - Unreachable blocks removal
24 - Edge forwarding (edge to the forwarder block is forwarded to its
25 successor. Simplification of the branch instruction is performed by
26 underlying infrastructure so branch can be converted to simplejump or
27 eliminated).
28 - Cross jumping (tail merging)
29 - Conditional jump-around-simplejump simplification
30 - Basic block merging. */
32 #include "config.h"
33 #include "system.h"
34 #include "coretypes.h"
35 #include "tm.h"
36 #include "rtl.h"
37 #include "hash-set.h"
38 #include "machmode.h"
39 #include "vec.h"
40 #include "double-int.h"
41 #include "input.h"
42 #include "alias.h"
43 #include "symtab.h"
44 #include "wide-int.h"
45 #include "inchash.h"
46 #include "tree.h"
47 #include "hard-reg-set.h"
48 #include "regs.h"
49 #include "insn-config.h"
50 #include "flags.h"
51 #include "recog.h"
52 #include "diagnostic-core.h"
53 #include "alloc-pool.h"
54 #include "cselib.h"
55 #include "params.h"
56 #include "tm_p.h"
57 #include "target.h"
58 #include "hashtab.h"
59 #include "function.h" /* For inline functions in emit-rtl.h they need crtl. */
60 #include "emit-rtl.h"
61 #include "tree-pass.h"
62 #include "cfgloop.h"
63 #include "function.h"
64 #include "statistics.h"
65 #include "real.h"
66 #include "fixed-value.h"
67 #include "expmed.h"
68 #include "dojump.h"
69 #include "explow.h"
70 #include "calls.h"
71 #include "varasm.h"
72 #include "stmt.h"
73 #include "expr.h"
74 #include "dominance.h"
75 #include "cfg.h"
76 #include "cfgrtl.h"
77 #include "cfganal.h"
78 #include "cfgbuild.h"
79 #include "cfgcleanup.h"
80 #include "predict.h"
81 #include "basic-block.h"
82 #include "df.h"
83 #include "dce.h"
84 #include "dbgcnt.h"
85 #include "rtl-iter.h"
87 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
89 /* Set to true when we are running first pass of try_optimize_cfg loop. */
90 static bool first_pass;
92 /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg. */
93 static bool crossjumps_occured;
95 /* Set to true if we couldn't run an optimization due to stale liveness
96 information; we should run df_analyze to enable more opportunities. */
97 static bool block_was_dirty;
99 static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction);
100 static bool try_crossjump_bb (int, basic_block);
101 static bool outgoing_edges_match (int, basic_block, basic_block);
102 static enum replace_direction old_insns_match_p (int, rtx_insn *, rtx_insn *);
104 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
105 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
106 static bool try_optimize_cfg (int);
107 static bool try_simplify_condjump (basic_block);
108 static bool try_forward_edges (int, basic_block);
109 static edge thread_jump (edge, basic_block);
110 static bool mark_effect (rtx, bitmap);
111 static void notice_new_block (basic_block);
112 static void update_forwarder_flag (basic_block);
113 static void merge_memattrs (rtx, rtx);
115 /* Set flags for newly created block. */
117 static void
118 notice_new_block (basic_block bb)
120 if (!bb)
121 return;
123 if (forwarder_block_p (bb))
124 bb->flags |= BB_FORWARDER_BLOCK;
127 /* Recompute forwarder flag after block has been modified. */
129 static void
130 update_forwarder_flag (basic_block bb)
132 if (forwarder_block_p (bb))
133 bb->flags |= BB_FORWARDER_BLOCK;
134 else
135 bb->flags &= ~BB_FORWARDER_BLOCK;
138 /* Simplify a conditional jump around an unconditional jump.
139 Return true if something changed. */
141 static bool
142 try_simplify_condjump (basic_block cbranch_block)
144 basic_block jump_block, jump_dest_block, cbranch_dest_block;
145 edge cbranch_jump_edge, cbranch_fallthru_edge;
146 rtx_insn *cbranch_insn;
148 /* Verify that there are exactly two successors. */
149 if (EDGE_COUNT (cbranch_block->succs) != 2)
150 return false;
152 /* Verify that we've got a normal conditional branch at the end
153 of the block. */
154 cbranch_insn = BB_END (cbranch_block);
155 if (!any_condjump_p (cbranch_insn))
156 return false;
158 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
159 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
161 /* The next block must not have multiple predecessors, must not
162 be the last block in the function, and must contain just the
163 unconditional jump. */
164 jump_block = cbranch_fallthru_edge->dest;
165 if (!single_pred_p (jump_block)
166 || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
167 || !FORWARDER_BLOCK_P (jump_block))
168 return false;
169 jump_dest_block = single_succ (jump_block);
171 /* If we are partitioning hot/cold basic blocks, we don't want to
172 mess up unconditional or indirect jumps that cross between hot
173 and cold sections.
175 Basic block partitioning may result in some jumps that appear to
176 be optimizable (or blocks that appear to be mergeable), but which really
177 must be left untouched (they are required to make it safely across
178 partition boundaries). See the comments at the top of
179 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
181 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
182 || (cbranch_jump_edge->flags & EDGE_CROSSING))
183 return false;
185 /* The conditional branch must target the block after the
186 unconditional branch. */
187 cbranch_dest_block = cbranch_jump_edge->dest;
189 if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)
190 || !can_fallthru (jump_block, cbranch_dest_block))
191 return false;
193 /* Invert the conditional branch. */
194 if (!invert_jump (as_a <rtx_jump_insn *> (cbranch_insn),
195 block_label (jump_dest_block), 0))
196 return false;
198 if (dump_file)
199 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
200 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
202 /* Success. Update the CFG to match. Note that after this point
203 the edge variable names appear backwards; the redirection is done
204 this way to preserve edge profile data. */
205 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
206 cbranch_dest_block);
207 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
208 jump_dest_block);
209 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
210 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
211 update_br_prob_note (cbranch_block);
213 /* Delete the block with the unconditional jump, and clean up the mess. */
214 delete_basic_block (jump_block);
215 tidy_fallthru_edge (cbranch_jump_edge);
216 update_forwarder_flag (cbranch_block);
218 return true;
221 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
222 on register. Used by jump threading. */
224 static bool
225 mark_effect (rtx exp, regset nonequal)
227 rtx dest;
228 switch (GET_CODE (exp))
230 /* In case we do clobber the register, mark it as equal, as we know the
231 value is dead so it don't have to match. */
232 case CLOBBER:
233 dest = XEXP (exp, 0);
234 if (REG_P (dest))
235 bitmap_clear_range (nonequal, REGNO (dest), REG_NREGS (dest));
236 return false;
238 case SET:
239 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
240 return false;
241 dest = SET_DEST (exp);
242 if (dest == pc_rtx)
243 return false;
244 if (!REG_P (dest))
245 return true;
246 bitmap_set_range (nonequal, REGNO (dest), REG_NREGS (dest));
247 return false;
249 default:
250 return false;
254 /* Return true if X contains a register in NONEQUAL. */
255 static bool
256 mentions_nonequal_regs (const_rtx x, regset nonequal)
258 subrtx_iterator::array_type array;
259 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
261 const_rtx x = *iter;
262 if (REG_P (x))
264 unsigned int end_regno = END_REGNO (x);
265 for (unsigned int regno = REGNO (x); regno < end_regno; ++regno)
266 if (REGNO_REG_SET_P (nonequal, regno))
267 return true;
270 return false;
273 /* Attempt to prove that the basic block B will have no side effects and
274 always continues in the same edge if reached via E. Return the edge
275 if exist, NULL otherwise. */
277 static edge
278 thread_jump (edge e, basic_block b)
280 rtx set1, set2, cond1, cond2;
281 rtx_insn *insn;
282 enum rtx_code code1, code2, reversed_code2;
283 bool reverse1 = false;
284 unsigned i;
285 regset nonequal;
286 bool failed = false;
287 reg_set_iterator rsi;
289 if (b->flags & BB_NONTHREADABLE_BLOCK)
290 return NULL;
292 /* At the moment, we do handle only conditional jumps, but later we may
293 want to extend this code to tablejumps and others. */
294 if (EDGE_COUNT (e->src->succs) != 2)
295 return NULL;
296 if (EDGE_COUNT (b->succs) != 2)
298 b->flags |= BB_NONTHREADABLE_BLOCK;
299 return NULL;
302 /* Second branch must end with onlyjump, as we will eliminate the jump. */
303 if (!any_condjump_p (BB_END (e->src)))
304 return NULL;
306 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
308 b->flags |= BB_NONTHREADABLE_BLOCK;
309 return NULL;
312 set1 = pc_set (BB_END (e->src));
313 set2 = pc_set (BB_END (b));
314 if (((e->flags & EDGE_FALLTHRU) != 0)
315 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
316 reverse1 = true;
318 cond1 = XEXP (SET_SRC (set1), 0);
319 cond2 = XEXP (SET_SRC (set2), 0);
320 if (reverse1)
321 code1 = reversed_comparison_code (cond1, BB_END (e->src));
322 else
323 code1 = GET_CODE (cond1);
325 code2 = GET_CODE (cond2);
326 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
328 if (!comparison_dominates_p (code1, code2)
329 && !comparison_dominates_p (code1, reversed_code2))
330 return NULL;
332 /* Ensure that the comparison operators are equivalent.
333 ??? This is far too pessimistic. We should allow swapped operands,
334 different CCmodes, or for example comparisons for interval, that
335 dominate even when operands are not equivalent. */
336 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
337 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
338 return NULL;
340 /* Short circuit cases where block B contains some side effects, as we can't
341 safely bypass it. */
342 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
343 insn = NEXT_INSN (insn))
344 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
346 b->flags |= BB_NONTHREADABLE_BLOCK;
347 return NULL;
350 cselib_init (0);
352 /* First process all values computed in the source basic block. */
353 for (insn = NEXT_INSN (BB_HEAD (e->src));
354 insn != NEXT_INSN (BB_END (e->src));
355 insn = NEXT_INSN (insn))
356 if (INSN_P (insn))
357 cselib_process_insn (insn);
359 nonequal = BITMAP_ALLOC (NULL);
360 CLEAR_REG_SET (nonequal);
362 /* Now assume that we've continued by the edge E to B and continue
363 processing as if it were same basic block.
364 Our goal is to prove that whole block is an NOOP. */
366 for (insn = NEXT_INSN (BB_HEAD (b));
367 insn != NEXT_INSN (BB_END (b)) && !failed;
368 insn = NEXT_INSN (insn))
370 if (INSN_P (insn))
372 rtx pat = PATTERN (insn);
374 if (GET_CODE (pat) == PARALLEL)
376 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
377 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
379 else
380 failed |= mark_effect (pat, nonequal);
383 cselib_process_insn (insn);
386 /* Later we should clear nonequal of dead registers. So far we don't
387 have life information in cfg_cleanup. */
388 if (failed)
390 b->flags |= BB_NONTHREADABLE_BLOCK;
391 goto failed_exit;
394 /* cond2 must not mention any register that is not equal to the
395 former block. */
396 if (mentions_nonequal_regs (cond2, nonequal))
397 goto failed_exit;
399 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
400 goto failed_exit;
402 BITMAP_FREE (nonequal);
403 cselib_finish ();
404 if ((comparison_dominates_p (code1, code2) != 0)
405 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
406 return BRANCH_EDGE (b);
407 else
408 return FALLTHRU_EDGE (b);
410 failed_exit:
411 BITMAP_FREE (nonequal);
412 cselib_finish ();
413 return NULL;
416 /* Attempt to forward edges leaving basic block B.
417 Return true if successful. */
419 static bool
420 try_forward_edges (int mode, basic_block b)
422 bool changed = false;
423 edge_iterator ei;
424 edge e, *threaded_edges = NULL;
426 /* If we are partitioning hot/cold basic blocks, we don't want to
427 mess up unconditional or indirect jumps that cross between hot
428 and cold sections.
430 Basic block partitioning may result in some jumps that appear to
431 be optimizable (or blocks that appear to be mergeable), but which really
432 must be left untouched (they are required to make it safely across
433 partition boundaries). See the comments at the top of
434 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
436 if (JUMP_P (BB_END (b)) && CROSSING_JUMP_P (BB_END (b)))
437 return false;
439 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
441 basic_block target, first;
442 location_t goto_locus;
443 int counter;
444 bool threaded = false;
445 int nthreaded_edges = 0;
446 bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0;
448 /* Skip complex edges because we don't know how to update them.
450 Still handle fallthru edges, as we can succeed to forward fallthru
451 edge to the same place as the branch edge of conditional branch
452 and turn conditional branch to an unconditional branch. */
453 if (e->flags & EDGE_COMPLEX)
455 ei_next (&ei);
456 continue;
459 target = first = e->dest;
460 counter = NUM_FIXED_BLOCKS;
461 goto_locus = e->goto_locus;
463 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
464 up jumps that cross between hot/cold sections.
466 Basic block partitioning may result in some jumps that appear
467 to be optimizable (or blocks that appear to be mergeable), but which
468 really must be left untouched (they are required to make it safely
469 across partition boundaries). See the comments at the top of
470 bb-reorder.c:partition_hot_cold_basic_blocks for complete
471 details. */
473 if (first != EXIT_BLOCK_PTR_FOR_FN (cfun)
474 && JUMP_P (BB_END (first))
475 && CROSSING_JUMP_P (BB_END (first)))
476 return changed;
478 while (counter < n_basic_blocks_for_fn (cfun))
480 basic_block new_target = NULL;
481 bool new_target_threaded = false;
482 may_thread |= (target->flags & BB_MODIFIED) != 0;
484 if (FORWARDER_BLOCK_P (target)
485 && !(single_succ_edge (target)->flags & EDGE_CROSSING)
486 && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun))
488 /* Bypass trivial infinite loops. */
489 new_target = single_succ (target);
490 if (target == new_target)
491 counter = n_basic_blocks_for_fn (cfun);
492 else if (!optimize)
494 /* When not optimizing, ensure that edges or forwarder
495 blocks with different locus are not optimized out. */
496 location_t new_locus = single_succ_edge (target)->goto_locus;
497 location_t locus = goto_locus;
499 if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
500 && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
501 && new_locus != locus)
502 new_target = NULL;
503 else
505 if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
506 locus = new_locus;
508 rtx_insn *last = BB_END (target);
509 if (DEBUG_INSN_P (last))
510 last = prev_nondebug_insn (last);
511 if (last && INSN_P (last))
512 new_locus = INSN_LOCATION (last);
513 else
514 new_locus = UNKNOWN_LOCATION;
516 if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION
517 && LOCATION_LOCUS (locus) != UNKNOWN_LOCATION
518 && new_locus != locus)
519 new_target = NULL;
520 else
522 if (LOCATION_LOCUS (new_locus) != UNKNOWN_LOCATION)
523 locus = new_locus;
525 goto_locus = locus;
531 /* Allow to thread only over one edge at time to simplify updating
532 of probabilities. */
533 else if ((mode & CLEANUP_THREADING) && may_thread)
535 edge t = thread_jump (e, target);
536 if (t)
538 if (!threaded_edges)
539 threaded_edges = XNEWVEC (edge,
540 n_basic_blocks_for_fn (cfun));
541 else
543 int i;
545 /* Detect an infinite loop across blocks not
546 including the start block. */
547 for (i = 0; i < nthreaded_edges; ++i)
548 if (threaded_edges[i] == t)
549 break;
550 if (i < nthreaded_edges)
552 counter = n_basic_blocks_for_fn (cfun);
553 break;
557 /* Detect an infinite loop across the start block. */
558 if (t->dest == b)
559 break;
561 gcc_assert (nthreaded_edges
562 < (n_basic_blocks_for_fn (cfun)
563 - NUM_FIXED_BLOCKS));
564 threaded_edges[nthreaded_edges++] = t;
566 new_target = t->dest;
567 new_target_threaded = true;
571 if (!new_target)
572 break;
574 counter++;
575 target = new_target;
576 threaded |= new_target_threaded;
579 if (counter >= n_basic_blocks_for_fn (cfun))
581 if (dump_file)
582 fprintf (dump_file, "Infinite loop in BB %i.\n",
583 target->index);
585 else if (target == first)
586 ; /* We didn't do anything. */
587 else
589 /* Save the values now, as the edge may get removed. */
590 gcov_type edge_count = e->count;
591 int edge_probability = e->probability;
592 int edge_frequency;
593 int n = 0;
595 e->goto_locus = goto_locus;
597 /* Don't force if target is exit block. */
598 if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun))
600 notice_new_block (redirect_edge_and_branch_force (e, target));
601 if (dump_file)
602 fprintf (dump_file, "Conditionals threaded.\n");
604 else if (!redirect_edge_and_branch (e, target))
606 if (dump_file)
607 fprintf (dump_file,
608 "Forwarding edge %i->%i to %i failed.\n",
609 b->index, e->dest->index, target->index);
610 ei_next (&ei);
611 continue;
614 /* We successfully forwarded the edge. Now update profile
615 data: for each edge we traversed in the chain, remove
616 the original edge's execution count. */
617 edge_frequency = apply_probability (b->frequency, edge_probability);
621 edge t;
623 if (!single_succ_p (first))
625 gcc_assert (n < nthreaded_edges);
626 t = threaded_edges [n++];
627 gcc_assert (t->src == first);
628 update_bb_profile_for_threading (first, edge_frequency,
629 edge_count, t);
630 update_br_prob_note (first);
632 else
634 first->count -= edge_count;
635 if (first->count < 0)
636 first->count = 0;
637 first->frequency -= edge_frequency;
638 if (first->frequency < 0)
639 first->frequency = 0;
640 /* It is possible that as the result of
641 threading we've removed edge as it is
642 threaded to the fallthru edge. Avoid
643 getting out of sync. */
644 if (n < nthreaded_edges
645 && first == threaded_edges [n]->src)
646 n++;
647 t = single_succ_edge (first);
650 t->count -= edge_count;
651 if (t->count < 0)
652 t->count = 0;
653 first = t->dest;
655 while (first != target);
657 changed = true;
658 continue;
660 ei_next (&ei);
663 free (threaded_edges);
664 return changed;
668 /* Blocks A and B are to be merged into a single block. A has no incoming
669 fallthru edge, so it can be moved before B without adding or modifying
670 any jumps (aside from the jump from A to B). */
672 static void
673 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
675 rtx_insn *barrier;
677 /* If we are partitioning hot/cold basic blocks, we don't want to
678 mess up unconditional or indirect jumps that cross between hot
679 and cold sections.
681 Basic block partitioning may result in some jumps that appear to
682 be optimizable (or blocks that appear to be mergeable), but which really
683 must be left untouched (they are required to make it safely across
684 partition boundaries). See the comments at the top of
685 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
687 if (BB_PARTITION (a) != BB_PARTITION (b))
688 return;
690 barrier = next_nonnote_insn (BB_END (a));
691 gcc_assert (BARRIER_P (barrier));
692 delete_insn (barrier);
694 /* Scramble the insn chain. */
695 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
696 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
697 df_set_bb_dirty (a);
699 if (dump_file)
700 fprintf (dump_file, "Moved block %d before %d and merged.\n",
701 a->index, b->index);
703 /* Swap the records for the two blocks around. */
705 unlink_block (a);
706 link_block (a, b->prev_bb);
708 /* Now blocks A and B are contiguous. Merge them. */
709 merge_blocks (a, b);
712 /* Blocks A and B are to be merged into a single block. B has no outgoing
713 fallthru edge, so it can be moved after A without adding or modifying
714 any jumps (aside from the jump from A to B). */
716 static void
717 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
719 rtx_insn *barrier, *real_b_end;
720 rtx label;
721 rtx_jump_table_data *table;
723 /* If we are partitioning hot/cold basic blocks, we don't want to
724 mess up unconditional or indirect jumps that cross between hot
725 and cold sections.
727 Basic block partitioning may result in some jumps that appear to
728 be optimizable (or blocks that appear to be mergeable), but which really
729 must be left untouched (they are required to make it safely across
730 partition boundaries). See the comments at the top of
731 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
733 if (BB_PARTITION (a) != BB_PARTITION (b))
734 return;
736 real_b_end = BB_END (b);
738 /* If there is a jump table following block B temporarily add the jump table
739 to block B so that it will also be moved to the correct location. */
740 if (tablejump_p (BB_END (b), &label, &table)
741 && prev_active_insn (label) == BB_END (b))
743 BB_END (b) = table;
746 /* There had better have been a barrier there. Delete it. */
747 barrier = NEXT_INSN (BB_END (b));
748 if (barrier && BARRIER_P (barrier))
749 delete_insn (barrier);
752 /* Scramble the insn chain. */
753 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
755 /* Restore the real end of b. */
756 BB_END (b) = real_b_end;
758 if (dump_file)
759 fprintf (dump_file, "Moved block %d after %d and merged.\n",
760 b->index, a->index);
762 /* Now blocks A and B are contiguous. Merge them. */
763 merge_blocks (a, b);
766 /* Attempt to merge basic blocks that are potentially non-adjacent.
767 Return NULL iff the attempt failed, otherwise return basic block
768 where cleanup_cfg should continue. Because the merging commonly
769 moves basic block away or introduces another optimization
770 possibility, return basic block just before B so cleanup_cfg don't
771 need to iterate.
773 It may be good idea to return basic block before C in the case
774 C has been moved after B and originally appeared earlier in the
775 insn sequence, but we have no information available about the
776 relative ordering of these two. Hopefully it is not too common. */
778 static basic_block
779 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
781 basic_block next;
783 /* If we are partitioning hot/cold basic blocks, we don't want to
784 mess up unconditional or indirect jumps that cross between hot
785 and cold sections.
787 Basic block partitioning may result in some jumps that appear to
788 be optimizable (or blocks that appear to be mergeable), but which really
789 must be left untouched (they are required to make it safely across
790 partition boundaries). See the comments at the top of
791 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
793 if (BB_PARTITION (b) != BB_PARTITION (c))
794 return NULL;
796 /* If B has a fallthru edge to C, no need to move anything. */
797 if (e->flags & EDGE_FALLTHRU)
799 int b_index = b->index, c_index = c->index;
801 /* Protect the loop latches. */
802 if (current_loops && c->loop_father->latch == c)
803 return NULL;
805 merge_blocks (b, c);
806 update_forwarder_flag (b);
808 if (dump_file)
809 fprintf (dump_file, "Merged %d and %d without moving.\n",
810 b_index, c_index);
812 return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? b : b->prev_bb;
815 /* Otherwise we will need to move code around. Do that only if expensive
816 transformations are allowed. */
817 else if (mode & CLEANUP_EXPENSIVE)
819 edge tmp_edge, b_fallthru_edge;
820 bool c_has_outgoing_fallthru;
821 bool b_has_incoming_fallthru;
823 /* Avoid overactive code motion, as the forwarder blocks should be
824 eliminated by edge redirection instead. One exception might have
825 been if B is a forwarder block and C has no fallthru edge, but
826 that should be cleaned up by bb-reorder instead. */
827 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
828 return NULL;
830 /* We must make sure to not munge nesting of lexical blocks,
831 and loop notes. This is done by squeezing out all the notes
832 and leaving them there to lie. Not ideal, but functional. */
834 tmp_edge = find_fallthru_edge (c->succs);
835 c_has_outgoing_fallthru = (tmp_edge != NULL);
837 tmp_edge = find_fallthru_edge (b->preds);
838 b_has_incoming_fallthru = (tmp_edge != NULL);
839 b_fallthru_edge = tmp_edge;
840 next = b->prev_bb;
841 if (next == c)
842 next = next->prev_bb;
844 /* Otherwise, we're going to try to move C after B. If C does
845 not have an outgoing fallthru, then it can be moved
846 immediately after B without introducing or modifying jumps. */
847 if (! c_has_outgoing_fallthru)
849 merge_blocks_move_successor_nojumps (b, c);
850 return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
853 /* If B does not have an incoming fallthru, then it can be moved
854 immediately before C without introducing or modifying jumps.
855 C cannot be the first block, so we do not have to worry about
856 accessing a non-existent block. */
858 if (b_has_incoming_fallthru)
860 basic_block bb;
862 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
863 return NULL;
864 bb = force_nonfallthru (b_fallthru_edge);
865 if (bb)
866 notice_new_block (bb);
869 merge_blocks_move_predecessor_nojumps (b, c);
870 return next == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? next->next_bb : next;
873 return NULL;
877 /* Removes the memory attributes of MEM expression
878 if they are not equal. */
880 static void
881 merge_memattrs (rtx x, rtx y)
883 int i;
884 int j;
885 enum rtx_code code;
886 const char *fmt;
888 if (x == y)
889 return;
890 if (x == 0 || y == 0)
891 return;
893 code = GET_CODE (x);
895 if (code != GET_CODE (y))
896 return;
898 if (GET_MODE (x) != GET_MODE (y))
899 return;
901 if (code == MEM && !mem_attrs_eq_p (MEM_ATTRS (x), MEM_ATTRS (y)))
903 if (! MEM_ATTRS (x))
904 MEM_ATTRS (y) = 0;
905 else if (! MEM_ATTRS (y))
906 MEM_ATTRS (x) = 0;
907 else
909 HOST_WIDE_INT mem_size;
911 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
913 set_mem_alias_set (x, 0);
914 set_mem_alias_set (y, 0);
917 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
919 set_mem_expr (x, 0);
920 set_mem_expr (y, 0);
921 clear_mem_offset (x);
922 clear_mem_offset (y);
924 else if (MEM_OFFSET_KNOWN_P (x) != MEM_OFFSET_KNOWN_P (y)
925 || (MEM_OFFSET_KNOWN_P (x)
926 && MEM_OFFSET (x) != MEM_OFFSET (y)))
928 clear_mem_offset (x);
929 clear_mem_offset (y);
932 if (MEM_SIZE_KNOWN_P (x) && MEM_SIZE_KNOWN_P (y))
934 mem_size = MAX (MEM_SIZE (x), MEM_SIZE (y));
935 set_mem_size (x, mem_size);
936 set_mem_size (y, mem_size);
938 else
940 clear_mem_size (x);
941 clear_mem_size (y);
944 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
945 set_mem_align (y, MEM_ALIGN (x));
948 if (code == MEM)
950 if (MEM_READONLY_P (x) != MEM_READONLY_P (y))
952 MEM_READONLY_P (x) = 0;
953 MEM_READONLY_P (y) = 0;
955 if (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y))
957 MEM_NOTRAP_P (x) = 0;
958 MEM_NOTRAP_P (y) = 0;
960 if (MEM_VOLATILE_P (x) != MEM_VOLATILE_P (y))
962 MEM_VOLATILE_P (x) = 1;
963 MEM_VOLATILE_P (y) = 1;
967 fmt = GET_RTX_FORMAT (code);
968 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
970 switch (fmt[i])
972 case 'E':
973 /* Two vectors must have the same length. */
974 if (XVECLEN (x, i) != XVECLEN (y, i))
975 return;
977 for (j = 0; j < XVECLEN (x, i); j++)
978 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
980 break;
982 case 'e':
983 merge_memattrs (XEXP (x, i), XEXP (y, i));
986 return;
990 /* Checks if patterns P1 and P2 are equivalent, apart from the possibly
991 different single sets S1 and S2. */
993 static bool
994 equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2)
996 int i;
997 rtx e1, e2;
999 if (p1 == s1 && p2 == s2)
1000 return true;
1002 if (GET_CODE (p1) != PARALLEL || GET_CODE (p2) != PARALLEL)
1003 return false;
1005 if (XVECLEN (p1, 0) != XVECLEN (p2, 0))
1006 return false;
1008 for (i = 0; i < XVECLEN (p1, 0); i++)
1010 e1 = XVECEXP (p1, 0, i);
1011 e2 = XVECEXP (p2, 0, i);
1012 if (e1 == s1 && e2 == s2)
1013 continue;
1014 if (reload_completed
1015 ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2))
1016 continue;
1018 return false;
1021 return true;
1025 /* NOTE1 is the REG_EQUAL note, if any, attached to an insn
1026 that is a single_set with a SET_SRC of SRC1. Similarly
1027 for NOTE2/SRC2.
1029 So effectively NOTE1/NOTE2 are an alternate form of
1030 SRC1/SRC2 respectively.
1032 Return nonzero if SRC1 or NOTE1 has the same constant
1033 integer value as SRC2 or NOTE2. Else return zero. */
1034 static int
1035 values_equal_p (rtx note1, rtx note2, rtx src1, rtx src2)
1037 if (note1
1038 && note2
1039 && CONST_INT_P (XEXP (note1, 0))
1040 && rtx_equal_p (XEXP (note1, 0), XEXP (note2, 0)))
1041 return 1;
1043 if (!note1
1044 && !note2
1045 && CONST_INT_P (src1)
1046 && CONST_INT_P (src2)
1047 && rtx_equal_p (src1, src2))
1048 return 1;
1050 if (note1
1051 && CONST_INT_P (src2)
1052 && rtx_equal_p (XEXP (note1, 0), src2))
1053 return 1;
1055 if (note2
1056 && CONST_INT_P (src1)
1057 && rtx_equal_p (XEXP (note2, 0), src1))
1058 return 1;
1060 return 0;
1063 /* Examine register notes on I1 and I2 and return:
1064 - dir_forward if I1 can be replaced by I2, or
1065 - dir_backward if I2 can be replaced by I1, or
1066 - dir_both if both are the case. */
1068 static enum replace_direction
1069 can_replace_by (rtx_insn *i1, rtx_insn *i2)
1071 rtx s1, s2, d1, d2, src1, src2, note1, note2;
1072 bool c1, c2;
1074 /* Check for 2 sets. */
1075 s1 = single_set (i1);
1076 s2 = single_set (i2);
1077 if (s1 == NULL_RTX || s2 == NULL_RTX)
1078 return dir_none;
1080 /* Check that the 2 sets set the same dest. */
1081 d1 = SET_DEST (s1);
1082 d2 = SET_DEST (s2);
1083 if (!(reload_completed
1084 ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2)))
1085 return dir_none;
1087 /* Find identical req_equiv or reg_equal note, which implies that the 2 sets
1088 set dest to the same value. */
1089 note1 = find_reg_equal_equiv_note (i1);
1090 note2 = find_reg_equal_equiv_note (i2);
1092 src1 = SET_SRC (s1);
1093 src2 = SET_SRC (s2);
1095 if (!values_equal_p (note1, note2, src1, src2))
1096 return dir_none;
1098 if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2))
1099 return dir_none;
1101 /* Although the 2 sets set dest to the same value, we cannot replace
1102 (set (dest) (const_int))
1104 (set (dest) (reg))
1105 because we don't know if the reg is live and has the same value at the
1106 location of replacement. */
1107 c1 = CONST_INT_P (src1);
1108 c2 = CONST_INT_P (src2);
1109 if (c1 && c2)
1110 return dir_both;
1111 else if (c2)
1112 return dir_forward;
1113 else if (c1)
1114 return dir_backward;
1116 return dir_none;
1119 /* Merges directions A and B. */
1121 static enum replace_direction
1122 merge_dir (enum replace_direction a, enum replace_direction b)
1124 /* Implements the following table:
1125 |bo fw bw no
1126 ---+-----------
1127 bo |bo fw bw no
1128 fw |-- fw no no
1129 bw |-- -- bw no
1130 no |-- -- -- no. */
1132 if (a == b)
1133 return a;
1135 if (a == dir_both)
1136 return b;
1137 if (b == dir_both)
1138 return a;
1140 return dir_none;
1143 /* Examine I1 and I2 and return:
1144 - dir_forward if I1 can be replaced by I2, or
1145 - dir_backward if I2 can be replaced by I1, or
1146 - dir_both if both are the case. */
1148 static enum replace_direction
1149 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx_insn *i1, rtx_insn *i2)
1151 rtx p1, p2;
1153 /* Verify that I1 and I2 are equivalent. */
1154 if (GET_CODE (i1) != GET_CODE (i2))
1155 return dir_none;
1157 /* __builtin_unreachable() may lead to empty blocks (ending with
1158 NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */
1159 if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
1160 return dir_both;
1162 /* ??? Do not allow cross-jumping between different stack levels. */
1163 p1 = find_reg_note (i1, REG_ARGS_SIZE, NULL);
1164 p2 = find_reg_note (i2, REG_ARGS_SIZE, NULL);
1165 if (p1 && p2)
1167 p1 = XEXP (p1, 0);
1168 p2 = XEXP (p2, 0);
1169 if (!rtx_equal_p (p1, p2))
1170 return dir_none;
1172 /* ??? Worse, this adjustment had better be constant lest we
1173 have differing incoming stack levels. */
1174 if (!frame_pointer_needed
1175 && find_args_size_adjust (i1) == HOST_WIDE_INT_MIN)
1176 return dir_none;
1178 else if (p1 || p2)
1179 return dir_none;
1181 p1 = PATTERN (i1);
1182 p2 = PATTERN (i2);
1184 if (GET_CODE (p1) != GET_CODE (p2))
1185 return dir_none;
1187 /* If this is a CALL_INSN, compare register usage information.
1188 If we don't check this on stack register machines, the two
1189 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1190 numbers of stack registers in the same basic block.
1191 If we don't check this on machines with delay slots, a delay slot may
1192 be filled that clobbers a parameter expected by the subroutine.
1194 ??? We take the simple route for now and assume that if they're
1195 equal, they were constructed identically.
1197 Also check for identical exception regions. */
1199 if (CALL_P (i1))
1201 /* Ensure the same EH region. */
1202 rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
1203 rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
1205 if (!n1 && n2)
1206 return dir_none;
1208 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1209 return dir_none;
1211 if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1212 CALL_INSN_FUNCTION_USAGE (i2))
1213 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
1214 return dir_none;
1216 /* For address sanitizer, never crossjump __asan_report_* builtins,
1217 otherwise errors might be reported on incorrect lines. */
1218 if (flag_sanitize & SANITIZE_ADDRESS)
1220 rtx call = get_call_rtx_from (i1);
1221 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
1223 rtx symbol = XEXP (XEXP (call, 0), 0);
1224 if (SYMBOL_REF_DECL (symbol)
1225 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
1227 if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
1228 == BUILT_IN_NORMAL)
1229 && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1230 >= BUILT_IN_ASAN_REPORT_LOAD1
1231 && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol))
1232 <= BUILT_IN_ASAN_STOREN)
1233 return dir_none;
1239 #ifdef STACK_REGS
1240 /* If cross_jump_death_matters is not 0, the insn's mode
1241 indicates whether or not the insn contains any stack-like
1242 regs. */
1244 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1246 /* If register stack conversion has already been done, then
1247 death notes must also be compared before it is certain that
1248 the two instruction streams match. */
1250 rtx note;
1251 HARD_REG_SET i1_regset, i2_regset;
1253 CLEAR_HARD_REG_SET (i1_regset);
1254 CLEAR_HARD_REG_SET (i2_regset);
1256 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1257 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1258 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1260 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1261 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1262 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1264 if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1265 return dir_none;
1267 #endif
1269 if (reload_completed
1270 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1271 return dir_both;
1273 return can_replace_by (i1, i2);
1276 /* When comparing insns I1 and I2 in flow_find_cross_jump or
1277 flow_find_head_matching_sequence, ensure the notes match. */
1279 static void
1280 merge_notes (rtx_insn *i1, rtx_insn *i2)
1282 /* If the merged insns have different REG_EQUAL notes, then
1283 remove them. */
1284 rtx equiv1 = find_reg_equal_equiv_note (i1);
1285 rtx equiv2 = find_reg_equal_equiv_note (i2);
1287 if (equiv1 && !equiv2)
1288 remove_note (i1, equiv1);
1289 else if (!equiv1 && equiv2)
1290 remove_note (i2, equiv2);
1291 else if (equiv1 && equiv2
1292 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1294 remove_note (i1, equiv1);
1295 remove_note (i2, equiv2);
1299 /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the
1300 resulting insn in I1, and the corresponding bb in BB1. At the head of a
1301 bb, if there is a predecessor bb that reaches this bb via fallthru, and
1302 FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in
1303 DID_FALLTHRU. Otherwise, stops at the head of the bb. */
1305 static void
1306 walk_to_nondebug_insn (rtx_insn **i1, basic_block *bb1, bool follow_fallthru,
1307 bool *did_fallthru)
1309 edge fallthru;
1311 *did_fallthru = false;
1313 /* Ignore notes. */
1314 while (!NONDEBUG_INSN_P (*i1))
1316 if (*i1 != BB_HEAD (*bb1))
1318 *i1 = PREV_INSN (*i1);
1319 continue;
1322 if (!follow_fallthru)
1323 return;
1325 fallthru = find_fallthru_edge ((*bb1)->preds);
1326 if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1327 || !single_succ_p (fallthru->src))
1328 return;
1330 *bb1 = fallthru->src;
1331 *i1 = BB_END (*bb1);
1332 *did_fallthru = true;
1336 /* Look through the insns at the end of BB1 and BB2 and find the longest
1337 sequence that are either equivalent, or allow forward or backward
1338 replacement. Store the first insns for that sequence in *F1 and *F2 and
1339 return the sequence length.
1341 DIR_P indicates the allowed replacement direction on function entry, and
1342 the actual replacement direction on function exit. If NULL, only equivalent
1343 sequences are allowed.
1345 To simplify callers of this function, if the blocks match exactly,
1346 store the head of the blocks in *F1 and *F2. */
1349 flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx_insn **f1,
1350 rtx_insn **f2, enum replace_direction *dir_p)
1352 rtx_insn *i1, *i2, *last1, *last2, *afterlast1, *afterlast2;
1353 int ninsns = 0;
1354 enum replace_direction dir, last_dir, afterlast_dir;
1355 bool follow_fallthru, did_fallthru;
1357 if (dir_p)
1358 dir = *dir_p;
1359 else
1360 dir = dir_both;
1361 afterlast_dir = dir;
1362 last_dir = afterlast_dir;
1364 /* Skip simple jumps at the end of the blocks. Complex jumps still
1365 need to be compared for equivalence, which we'll do below. */
1367 i1 = BB_END (bb1);
1368 last1 = afterlast1 = last2 = afterlast2 = NULL;
1369 if (onlyjump_p (i1)
1370 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1372 last1 = i1;
1373 i1 = PREV_INSN (i1);
1376 i2 = BB_END (bb2);
1377 if (onlyjump_p (i2)
1378 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1380 last2 = i2;
1381 /* Count everything except for unconditional jump as insn.
1382 Don't count any jumps if dir_p is NULL. */
1383 if (!simplejump_p (i2) && !returnjump_p (i2) && last1 && dir_p)
1384 ninsns++;
1385 i2 = PREV_INSN (i2);
1388 while (true)
1390 /* In the following example, we can replace all jumps to C by jumps to A.
1392 This removes 4 duplicate insns.
1393 [bb A] insn1 [bb C] insn1
1394 insn2 insn2
1395 [bb B] insn3 insn3
1396 insn4 insn4
1397 jump_insn jump_insn
1399 We could also replace all jumps to A by jumps to C, but that leaves B
1400 alive, and removes only 2 duplicate insns. In a subsequent crossjump
1401 step, all jumps to B would be replaced with jumps to the middle of C,
1402 achieving the same result with more effort.
1403 So we allow only the first possibility, which means that we don't allow
1404 fallthru in the block that's being replaced. */
1406 follow_fallthru = dir_p && dir != dir_forward;
1407 walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru);
1408 if (did_fallthru)
1409 dir = dir_backward;
1411 follow_fallthru = dir_p && dir != dir_backward;
1412 walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru);
1413 if (did_fallthru)
1414 dir = dir_forward;
1416 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1417 break;
1419 dir = merge_dir (dir, old_insns_match_p (0, i1, i2));
1420 if (dir == dir_none || (!dir_p && dir != dir_both))
1421 break;
1423 merge_memattrs (i1, i2);
1425 /* Don't begin a cross-jump with a NOTE insn. */
1426 if (INSN_P (i1))
1428 merge_notes (i1, i2);
1430 afterlast1 = last1, afterlast2 = last2;
1431 last1 = i1, last2 = i2;
1432 afterlast_dir = last_dir;
1433 last_dir = dir;
1434 if (active_insn_p (i1))
1435 ninsns++;
1438 i1 = PREV_INSN (i1);
1439 i2 = PREV_INSN (i2);
1442 /* Don't allow the insn after a compare to be shared by
1443 cross-jumping unless the compare is also shared. */
1444 if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
1445 && ! sets_cc0_p (last1))
1446 last1 = afterlast1, last2 = afterlast2, last_dir = afterlast_dir, ninsns--;
1448 /* Include preceding notes and labels in the cross-jump. One,
1449 this may bring us to the head of the blocks as requested above.
1450 Two, it keeps line number notes as matched as may be. */
1451 if (ninsns)
1453 bb1 = BLOCK_FOR_INSN (last1);
1454 while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
1455 last1 = PREV_INSN (last1);
1457 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1458 last1 = PREV_INSN (last1);
1460 bb2 = BLOCK_FOR_INSN (last2);
1461 while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
1462 last2 = PREV_INSN (last2);
1464 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1465 last2 = PREV_INSN (last2);
1467 *f1 = last1;
1468 *f2 = last2;
1471 if (dir_p)
1472 *dir_p = last_dir;
1473 return ninsns;
1476 /* Like flow_find_cross_jump, except start looking for a matching sequence from
1477 the head of the two blocks. Do not include jumps at the end.
1478 If STOP_AFTER is nonzero, stop after finding that many matching
1479 instructions. If STOP_AFTER is zero, count all INSN_P insns, if it is
1480 non-zero, only count active insns. */
1483 flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx_insn **f1,
1484 rtx_insn **f2, int stop_after)
1486 rtx_insn *i1, *i2, *last1, *last2, *beforelast1, *beforelast2;
1487 int ninsns = 0;
1488 edge e;
1489 edge_iterator ei;
1490 int nehedges1 = 0, nehedges2 = 0;
1492 FOR_EACH_EDGE (e, ei, bb1->succs)
1493 if (e->flags & EDGE_EH)
1494 nehedges1++;
1495 FOR_EACH_EDGE (e, ei, bb2->succs)
1496 if (e->flags & EDGE_EH)
1497 nehedges2++;
1499 i1 = BB_HEAD (bb1);
1500 i2 = BB_HEAD (bb2);
1501 last1 = beforelast1 = last2 = beforelast2 = NULL;
1503 while (true)
1505 /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG. */
1506 while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
1508 if (NOTE_P (i1) && NOTE_KIND (i1) == NOTE_INSN_EPILOGUE_BEG)
1509 break;
1510 i1 = NEXT_INSN (i1);
1513 while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
1515 if (NOTE_P (i2) && NOTE_KIND (i2) == NOTE_INSN_EPILOGUE_BEG)
1516 break;
1517 i2 = NEXT_INSN (i2);
1520 if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
1521 || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
1522 break;
1524 if (NOTE_P (i1) || NOTE_P (i2)
1525 || JUMP_P (i1) || JUMP_P (i2))
1526 break;
1528 /* A sanity check to make sure we're not merging insns with different
1529 effects on EH. If only one of them ends a basic block, it shouldn't
1530 have an EH edge; if both end a basic block, there should be the same
1531 number of EH edges. */
1532 if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
1533 && nehedges1 > 0)
1534 || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
1535 && nehedges2 > 0)
1536 || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
1537 && nehedges1 != nehedges2))
1538 break;
1540 if (old_insns_match_p (0, i1, i2) != dir_both)
1541 break;
1543 merge_memattrs (i1, i2);
1545 /* Don't begin a cross-jump with a NOTE insn. */
1546 if (INSN_P (i1))
1548 merge_notes (i1, i2);
1550 beforelast1 = last1, beforelast2 = last2;
1551 last1 = i1, last2 = i2;
1552 if (!stop_after || active_insn_p (i1))
1553 ninsns++;
1556 if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
1557 || (stop_after > 0 && ninsns == stop_after))
1558 break;
1560 i1 = NEXT_INSN (i1);
1561 i2 = NEXT_INSN (i2);
1564 /* Don't allow a compare to be shared by cross-jumping unless the insn
1565 after the compare is also shared. */
1566 if (HAVE_cc0 && ninsns && reg_mentioned_p (cc0_rtx, last1)
1567 && sets_cc0_p (last1))
1568 last1 = beforelast1, last2 = beforelast2, ninsns--;
1570 if (ninsns)
1572 *f1 = last1;
1573 *f2 = last2;
1576 return ninsns;
1579 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1580 the branch instruction. This means that if we commonize the control
1581 flow before end of the basic block, the semantic remains unchanged.
1583 We may assume that there exists one edge with a common destination. */
1585 static bool
1586 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1588 int nehedges1 = 0, nehedges2 = 0;
1589 edge fallthru1 = 0, fallthru2 = 0;
1590 edge e1, e2;
1591 edge_iterator ei;
1593 /* If we performed shrink-wrapping, edges to the exit block can
1594 only be distinguished for JUMP_INSNs. The two paths may differ in
1595 whether they went through the prologue. Sibcalls are fine, we know
1596 that we either didn't need or inserted an epilogue before them. */
1597 if (crtl->shrink_wrapped
1598 && single_succ_p (bb1)
1599 && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun)
1600 && !JUMP_P (BB_END (bb1))
1601 && !(CALL_P (BB_END (bb1)) && SIBLING_CALL_P (BB_END (bb1))))
1602 return false;
1604 /* If BB1 has only one successor, we may be looking at either an
1605 unconditional jump, or a fake edge to exit. */
1606 if (single_succ_p (bb1)
1607 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1608 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1609 return (single_succ_p (bb2)
1610 && (single_succ_edge (bb2)->flags
1611 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1612 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1614 /* Match conditional jumps - this may get tricky when fallthru and branch
1615 edges are crossed. */
1616 if (EDGE_COUNT (bb1->succs) == 2
1617 && any_condjump_p (BB_END (bb1))
1618 && onlyjump_p (BB_END (bb1)))
1620 edge b1, f1, b2, f2;
1621 bool reverse, match;
1622 rtx set1, set2, cond1, cond2;
1623 enum rtx_code code1, code2;
1625 if (EDGE_COUNT (bb2->succs) != 2
1626 || !any_condjump_p (BB_END (bb2))
1627 || !onlyjump_p (BB_END (bb2)))
1628 return false;
1630 b1 = BRANCH_EDGE (bb1);
1631 b2 = BRANCH_EDGE (bb2);
1632 f1 = FALLTHRU_EDGE (bb1);
1633 f2 = FALLTHRU_EDGE (bb2);
1635 /* Get around possible forwarders on fallthru edges. Other cases
1636 should be optimized out already. */
1637 if (FORWARDER_BLOCK_P (f1->dest))
1638 f1 = single_succ_edge (f1->dest);
1640 if (FORWARDER_BLOCK_P (f2->dest))
1641 f2 = single_succ_edge (f2->dest);
1643 /* To simplify use of this function, return false if there are
1644 unneeded forwarder blocks. These will get eliminated later
1645 during cleanup_cfg. */
1646 if (FORWARDER_BLOCK_P (f1->dest)
1647 || FORWARDER_BLOCK_P (f2->dest)
1648 || FORWARDER_BLOCK_P (b1->dest)
1649 || FORWARDER_BLOCK_P (b2->dest))
1650 return false;
1652 if (f1->dest == f2->dest && b1->dest == b2->dest)
1653 reverse = false;
1654 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1655 reverse = true;
1656 else
1657 return false;
1659 set1 = pc_set (BB_END (bb1));
1660 set2 = pc_set (BB_END (bb2));
1661 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1662 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1663 reverse = !reverse;
1665 cond1 = XEXP (SET_SRC (set1), 0);
1666 cond2 = XEXP (SET_SRC (set2), 0);
1667 code1 = GET_CODE (cond1);
1668 if (reverse)
1669 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1670 else
1671 code2 = GET_CODE (cond2);
1673 if (code2 == UNKNOWN)
1674 return false;
1676 /* Verify codes and operands match. */
1677 match = ((code1 == code2
1678 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1679 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1680 || (code1 == swap_condition (code2)
1681 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1682 XEXP (cond2, 0))
1683 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1684 XEXP (cond2, 1))));
1686 /* If we return true, we will join the blocks. Which means that
1687 we will only have one branch prediction bit to work with. Thus
1688 we require the existing branches to have probabilities that are
1689 roughly similar. */
1690 if (match
1691 && optimize_bb_for_speed_p (bb1)
1692 && optimize_bb_for_speed_p (bb2))
1694 int prob2;
1696 if (b1->dest == b2->dest)
1697 prob2 = b2->probability;
1698 else
1699 /* Do not use f2 probability as f2 may be forwarded. */
1700 prob2 = REG_BR_PROB_BASE - b2->probability;
1702 /* Fail if the difference in probabilities is greater than 50%.
1703 This rules out two well-predicted branches with opposite
1704 outcomes. */
1705 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1707 if (dump_file)
1708 fprintf (dump_file,
1709 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1710 bb1->index, bb2->index, b1->probability, prob2);
1712 return false;
1716 if (dump_file && match)
1717 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1718 bb1->index, bb2->index);
1720 return match;
1723 /* Generic case - we are seeing a computed jump, table jump or trapping
1724 instruction. */
1726 /* Check whether there are tablejumps in the end of BB1 and BB2.
1727 Return true if they are identical. */
1729 rtx label1, label2;
1730 rtx_jump_table_data *table1, *table2;
1732 if (tablejump_p (BB_END (bb1), &label1, &table1)
1733 && tablejump_p (BB_END (bb2), &label2, &table2)
1734 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1736 /* The labels should never be the same rtx. If they really are same
1737 the jump tables are same too. So disable crossjumping of blocks BB1
1738 and BB2 because when deleting the common insns in the end of BB1
1739 by delete_basic_block () the jump table would be deleted too. */
1740 /* If LABEL2 is referenced in BB1->END do not do anything
1741 because we would loose information when replacing
1742 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1743 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1745 /* Set IDENTICAL to true when the tables are identical. */
1746 bool identical = false;
1747 rtx p1, p2;
1749 p1 = PATTERN (table1);
1750 p2 = PATTERN (table2);
1751 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1753 identical = true;
1755 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1756 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1757 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1758 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1760 int i;
1762 identical = true;
1763 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1764 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1765 identical = false;
1768 if (identical)
1770 bool match;
1772 /* Temporarily replace references to LABEL1 with LABEL2
1773 in BB1->END so that we could compare the instructions. */
1774 replace_label_in_insn (BB_END (bb1), label1, label2, false);
1776 match = (old_insns_match_p (mode, BB_END (bb1), BB_END (bb2))
1777 == dir_both);
1778 if (dump_file && match)
1779 fprintf (dump_file,
1780 "Tablejumps in bb %i and %i match.\n",
1781 bb1->index, bb2->index);
1783 /* Set the original label in BB1->END because when deleting
1784 a block whose end is a tablejump, the tablejump referenced
1785 from the instruction is deleted too. */
1786 replace_label_in_insn (BB_END (bb1), label2, label1, false);
1788 return match;
1791 return false;
1795 /* Find the last non-debug non-note instruction in each bb, except
1796 stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p
1797 handles that case specially. old_insns_match_p does not handle
1798 other types of instruction notes. */
1799 rtx_insn *last1 = BB_END (bb1);
1800 rtx_insn *last2 = BB_END (bb2);
1801 while (!NOTE_INSN_BASIC_BLOCK_P (last1) &&
1802 (DEBUG_INSN_P (last1) || NOTE_P (last1)))
1803 last1 = PREV_INSN (last1);
1804 while (!NOTE_INSN_BASIC_BLOCK_P (last2) &&
1805 (DEBUG_INSN_P (last2) || NOTE_P (last2)))
1806 last2 = PREV_INSN (last2);
1807 gcc_assert (last1 && last2);
1809 /* First ensure that the instructions match. There may be many outgoing
1810 edges so this test is generally cheaper. */
1811 if (old_insns_match_p (mode, last1, last2) != dir_both)
1812 return false;
1814 /* Search the outgoing edges, ensure that the counts do match, find possible
1815 fallthru and exception handling edges since these needs more
1816 validation. */
1817 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1818 return false;
1820 bool nonfakeedges = false;
1821 FOR_EACH_EDGE (e1, ei, bb1->succs)
1823 e2 = EDGE_SUCC (bb2, ei.index);
1825 if ((e1->flags & EDGE_FAKE) == 0)
1826 nonfakeedges = true;
1828 if (e1->flags & EDGE_EH)
1829 nehedges1++;
1831 if (e2->flags & EDGE_EH)
1832 nehedges2++;
1834 if (e1->flags & EDGE_FALLTHRU)
1835 fallthru1 = e1;
1836 if (e2->flags & EDGE_FALLTHRU)
1837 fallthru2 = e2;
1840 /* If number of edges of various types does not match, fail. */
1841 if (nehedges1 != nehedges2
1842 || (fallthru1 != 0) != (fallthru2 != 0))
1843 return false;
1845 /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors
1846 and the last real insn doesn't have REG_ARGS_SIZE note, don't
1847 attempt to optimize, as the two basic blocks might have different
1848 REG_ARGS_SIZE depths. For noreturn calls and unconditional
1849 traps there should be REG_ARG_SIZE notes, they could be missing
1850 for __builtin_unreachable () uses though. */
1851 if (!nonfakeedges
1852 && !ACCUMULATE_OUTGOING_ARGS
1853 && (!INSN_P (last1)
1854 || !find_reg_note (last1, REG_ARGS_SIZE, NULL)))
1855 return false;
1857 /* fallthru edges must be forwarded to the same destination. */
1858 if (fallthru1)
1860 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1861 ? single_succ (fallthru1->dest): fallthru1->dest);
1862 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1863 ? single_succ (fallthru2->dest): fallthru2->dest);
1865 if (d1 != d2)
1866 return false;
1869 /* Ensure the same EH region. */
1871 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1872 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1874 if (!n1 && n2)
1875 return false;
1877 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1878 return false;
1881 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1882 version of sequence abstraction. */
1883 FOR_EACH_EDGE (e1, ei, bb2->succs)
1885 edge e2;
1886 edge_iterator ei;
1887 basic_block d1 = e1->dest;
1889 if (FORWARDER_BLOCK_P (d1))
1890 d1 = EDGE_SUCC (d1, 0)->dest;
1892 FOR_EACH_EDGE (e2, ei, bb1->succs)
1894 basic_block d2 = e2->dest;
1895 if (FORWARDER_BLOCK_P (d2))
1896 d2 = EDGE_SUCC (d2, 0)->dest;
1897 if (d1 == d2)
1898 break;
1901 if (!e2)
1902 return false;
1905 return true;
1908 /* Returns true if BB basic block has a preserve label. */
1910 static bool
1911 block_has_preserve_label (basic_block bb)
1913 return (bb
1914 && block_label (bb)
1915 && LABEL_PRESERVE_P (block_label (bb)));
1918 /* E1 and E2 are edges with the same destination block. Search their
1919 predecessors for common code. If found, redirect control flow from
1920 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward),
1921 or the other way around (dir_backward). DIR specifies the allowed
1922 replacement direction. */
1924 static bool
1925 try_crossjump_to_edge (int mode, edge e1, edge e2,
1926 enum replace_direction dir)
1928 int nmatch;
1929 basic_block src1 = e1->src, src2 = e2->src;
1930 basic_block redirect_to, redirect_from, to_remove;
1931 basic_block osrc1, osrc2, redirect_edges_to, tmp;
1932 rtx_insn *newpos1, *newpos2;
1933 edge s;
1934 edge_iterator ei;
1936 newpos1 = newpos2 = NULL;
1938 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1939 to try this optimization.
1941 Basic block partitioning may result in some jumps that appear to
1942 be optimizable (or blocks that appear to be mergeable), but which really
1943 must be left untouched (they are required to make it safely across
1944 partition boundaries). See the comments at the top of
1945 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1947 if (crtl->has_bb_partition && reload_completed)
1948 return false;
1950 /* Search backward through forwarder blocks. We don't need to worry
1951 about multiple entry or chained forwarders, as they will be optimized
1952 away. We do this to look past the unconditional jump following a
1953 conditional jump that is required due to the current CFG shape. */
1954 if (single_pred_p (src1)
1955 && FORWARDER_BLOCK_P (src1))
1956 e1 = single_pred_edge (src1), src1 = e1->src;
1958 if (single_pred_p (src2)
1959 && FORWARDER_BLOCK_P (src2))
1960 e2 = single_pred_edge (src2), src2 = e2->src;
1962 /* Nothing to do if we reach ENTRY, or a common source block. */
1963 if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun) || src2
1964 == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1965 return false;
1966 if (src1 == src2)
1967 return false;
1969 /* Seeing more than 1 forwarder blocks would confuse us later... */
1970 if (FORWARDER_BLOCK_P (e1->dest)
1971 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1972 return false;
1974 if (FORWARDER_BLOCK_P (e2->dest)
1975 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1976 return false;
1978 /* Likewise with dead code (possibly newly created by the other optimizations
1979 of cfg_cleanup). */
1980 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1981 return false;
1983 /* Look for the common insn sequence, part the first ... */
1984 if (!outgoing_edges_match (mode, src1, src2))
1985 return false;
1987 /* ... and part the second. */
1988 nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir);
1990 osrc1 = src1;
1991 osrc2 = src2;
1992 if (newpos1 != NULL_RTX)
1993 src1 = BLOCK_FOR_INSN (newpos1);
1994 if (newpos2 != NULL_RTX)
1995 src2 = BLOCK_FOR_INSN (newpos2);
1997 if (dir == dir_backward)
1999 #define SWAP(T, X, Y) do { T tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
2000 SWAP (basic_block, osrc1, osrc2);
2001 SWAP (basic_block, src1, src2);
2002 SWAP (edge, e1, e2);
2003 SWAP (rtx_insn *, newpos1, newpos2);
2004 #undef SWAP
2007 /* Don't proceed with the crossjump unless we found a sufficient number
2008 of matching instructions or the 'from' block was totally matched
2009 (such that its predecessors will hopefully be redirected and the
2010 block removed). */
2011 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
2012 && (newpos1 != BB_HEAD (src1)))
2013 return false;
2015 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
2016 if (block_has_preserve_label (e1->dest)
2017 && (e1->flags & EDGE_ABNORMAL))
2018 return false;
2020 /* Here we know that the insns in the end of SRC1 which are common with SRC2
2021 will be deleted.
2022 If we have tablejumps in the end of SRC1 and SRC2
2023 they have been already compared for equivalence in outgoing_edges_match ()
2024 so replace the references to TABLE1 by references to TABLE2. */
2026 rtx label1, label2;
2027 rtx_jump_table_data *table1, *table2;
2029 if (tablejump_p (BB_END (osrc1), &label1, &table1)
2030 && tablejump_p (BB_END (osrc2), &label2, &table2)
2031 && label1 != label2)
2033 rtx_insn *insn;
2035 /* Replace references to LABEL1 with LABEL2. */
2036 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2038 /* Do not replace the label in SRC1->END because when deleting
2039 a block whose end is a tablejump, the tablejump referenced
2040 from the instruction is deleted too. */
2041 if (insn != BB_END (osrc1))
2042 replace_label_in_insn (insn, label1, label2, true);
2047 /* Avoid splitting if possible. We must always split when SRC2 has
2048 EH predecessor edges, or we may end up with basic blocks with both
2049 normal and EH predecessor edges. */
2050 if (newpos2 == BB_HEAD (src2)
2051 && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
2052 redirect_to = src2;
2053 else
2055 if (newpos2 == BB_HEAD (src2))
2057 /* Skip possible basic block header. */
2058 if (LABEL_P (newpos2))
2059 newpos2 = NEXT_INSN (newpos2);
2060 while (DEBUG_INSN_P (newpos2))
2061 newpos2 = NEXT_INSN (newpos2);
2062 if (NOTE_P (newpos2))
2063 newpos2 = NEXT_INSN (newpos2);
2064 while (DEBUG_INSN_P (newpos2))
2065 newpos2 = NEXT_INSN (newpos2);
2068 if (dump_file)
2069 fprintf (dump_file, "Splitting bb %i before %i insns\n",
2070 src2->index, nmatch);
2071 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
2074 if (dump_file)
2075 fprintf (dump_file,
2076 "Cross jumping from bb %i to bb %i; %i common insns\n",
2077 src1->index, src2->index, nmatch);
2079 /* We may have some registers visible through the block. */
2080 df_set_bb_dirty (redirect_to);
2082 if (osrc2 == src2)
2083 redirect_edges_to = redirect_to;
2084 else
2085 redirect_edges_to = osrc2;
2087 /* Recompute the frequencies and counts of outgoing edges. */
2088 FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)
2090 edge s2;
2091 edge_iterator ei;
2092 basic_block d = s->dest;
2094 if (FORWARDER_BLOCK_P (d))
2095 d = single_succ (d);
2097 FOR_EACH_EDGE (s2, ei, src1->succs)
2099 basic_block d2 = s2->dest;
2100 if (FORWARDER_BLOCK_P (d2))
2101 d2 = single_succ (d2);
2102 if (d == d2)
2103 break;
2106 s->count += s2->count;
2108 /* Take care to update possible forwarder blocks. We verified
2109 that there is no more than one in the chain, so we can't run
2110 into infinite loop. */
2111 if (FORWARDER_BLOCK_P (s->dest))
2113 single_succ_edge (s->dest)->count += s2->count;
2114 s->dest->count += s2->count;
2115 s->dest->frequency += EDGE_FREQUENCY (s);
2118 if (FORWARDER_BLOCK_P (s2->dest))
2120 single_succ_edge (s2->dest)->count -= s2->count;
2121 if (single_succ_edge (s2->dest)->count < 0)
2122 single_succ_edge (s2->dest)->count = 0;
2123 s2->dest->count -= s2->count;
2124 s2->dest->frequency -= EDGE_FREQUENCY (s);
2125 if (s2->dest->frequency < 0)
2126 s2->dest->frequency = 0;
2127 if (s2->dest->count < 0)
2128 s2->dest->count = 0;
2131 if (!redirect_edges_to->frequency && !src1->frequency)
2132 s->probability = (s->probability + s2->probability) / 2;
2133 else
2134 s->probability
2135 = ((s->probability * redirect_edges_to->frequency +
2136 s2->probability * src1->frequency)
2137 / (redirect_edges_to->frequency + src1->frequency));
2140 /* Adjust count and frequency for the block. An earlier jump
2141 threading pass may have left the profile in an inconsistent
2142 state (see update_bb_profile_for_threading) so we must be
2143 prepared for overflows. */
2144 tmp = redirect_to;
2147 tmp->count += src1->count;
2148 tmp->frequency += src1->frequency;
2149 if (tmp->frequency > BB_FREQ_MAX)
2150 tmp->frequency = BB_FREQ_MAX;
2151 if (tmp == redirect_edges_to)
2152 break;
2153 tmp = find_fallthru_edge (tmp->succs)->dest;
2155 while (true);
2156 update_br_prob_note (redirect_edges_to);
2158 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
2160 /* Skip possible basic block header. */
2161 if (LABEL_P (newpos1))
2162 newpos1 = NEXT_INSN (newpos1);
2164 while (DEBUG_INSN_P (newpos1))
2165 newpos1 = NEXT_INSN (newpos1);
2167 if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
2168 newpos1 = NEXT_INSN (newpos1);
2170 while (DEBUG_INSN_P (newpos1))
2171 newpos1 = NEXT_INSN (newpos1);
2173 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
2174 to_remove = single_succ (redirect_from);
2176 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
2177 delete_basic_block (to_remove);
2179 update_forwarder_flag (redirect_from);
2180 if (redirect_to != src2)
2181 update_forwarder_flag (src2);
2183 return true;
2186 /* Search the predecessors of BB for common insn sequences. When found,
2187 share code between them by redirecting control flow. Return true if
2188 any changes made. */
2190 static bool
2191 try_crossjump_bb (int mode, basic_block bb)
2193 edge e, e2, fallthru;
2194 bool changed;
2195 unsigned max, ix, ix2;
2197 /* Nothing to do if there is not at least two incoming edges. */
2198 if (EDGE_COUNT (bb->preds) < 2)
2199 return false;
2201 /* Don't crossjump if this block ends in a computed jump,
2202 unless we are optimizing for size. */
2203 if (optimize_bb_for_size_p (bb)
2204 && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2205 && computed_jump_p (BB_END (bb)))
2206 return false;
2208 /* If we are partitioning hot/cold basic blocks, we don't want to
2209 mess up unconditional or indirect jumps that cross between hot
2210 and cold sections.
2212 Basic block partitioning may result in some jumps that appear to
2213 be optimizable (or blocks that appear to be mergeable), but which really
2214 must be left untouched (they are required to make it safely across
2215 partition boundaries). See the comments at the top of
2216 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
2218 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
2219 BB_PARTITION (EDGE_PRED (bb, 1)->src)
2220 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
2221 return false;
2223 /* It is always cheapest to redirect a block that ends in a branch to
2224 a block that falls through into BB, as that adds no branches to the
2225 program. We'll try that combination first. */
2226 fallthru = NULL;
2227 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
2229 if (EDGE_COUNT (bb->preds) > max)
2230 return false;
2232 fallthru = find_fallthru_edge (bb->preds);
2234 changed = false;
2235 for (ix = 0; ix < EDGE_COUNT (bb->preds);)
2237 e = EDGE_PRED (bb, ix);
2238 ix++;
2240 /* As noted above, first try with the fallthru predecessor (or, a
2241 fallthru predecessor if we are in cfglayout mode). */
2242 if (fallthru)
2244 /* Don't combine the fallthru edge into anything else.
2245 If there is a match, we'll do it the other way around. */
2246 if (e == fallthru)
2247 continue;
2248 /* If nothing changed since the last attempt, there is nothing
2249 we can do. */
2250 if (!first_pass
2251 && !((e->src->flags & BB_MODIFIED)
2252 || (fallthru->src->flags & BB_MODIFIED)))
2253 continue;
2255 if (try_crossjump_to_edge (mode, e, fallthru, dir_forward))
2257 changed = true;
2258 ix = 0;
2259 continue;
2263 /* Non-obvious work limiting check: Recognize that we're going
2264 to call try_crossjump_bb on every basic block. So if we have
2265 two blocks with lots of outgoing edges (a switch) and they
2266 share lots of common destinations, then we would do the
2267 cross-jump check once for each common destination.
2269 Now, if the blocks actually are cross-jump candidates, then
2270 all of their destinations will be shared. Which means that
2271 we only need check them for cross-jump candidacy once. We
2272 can eliminate redundant checks of crossjump(A,B) by arbitrarily
2273 choosing to do the check from the block for which the edge
2274 in question is the first successor of A. */
2275 if (EDGE_SUCC (e->src, 0) != e)
2276 continue;
2278 for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds); ix2++)
2280 e2 = EDGE_PRED (bb, ix2);
2282 if (e2 == e)
2283 continue;
2285 /* We've already checked the fallthru edge above. */
2286 if (e2 == fallthru)
2287 continue;
2289 /* The "first successor" check above only prevents multiple
2290 checks of crossjump(A,B). In order to prevent redundant
2291 checks of crossjump(B,A), require that A be the block
2292 with the lowest index. */
2293 if (e->src->index > e2->src->index)
2294 continue;
2296 /* If nothing changed since the last attempt, there is nothing
2297 we can do. */
2298 if (!first_pass
2299 && !((e->src->flags & BB_MODIFIED)
2300 || (e2->src->flags & BB_MODIFIED)))
2301 continue;
2303 /* Both e and e2 are not fallthru edges, so we can crossjump in either
2304 direction. */
2305 if (try_crossjump_to_edge (mode, e, e2, dir_both))
2307 changed = true;
2308 ix = 0;
2309 break;
2314 if (changed)
2315 crossjumps_occured = true;
2317 return changed;
2320 /* Search the successors of BB for common insn sequences. When found,
2321 share code between them by moving it across the basic block
2322 boundary. Return true if any changes made. */
2324 static bool
2325 try_head_merge_bb (basic_block bb)
2327 basic_block final_dest_bb = NULL;
2328 int max_match = INT_MAX;
2329 edge e0;
2330 rtx_insn **headptr, **currptr, **nextptr;
2331 bool changed, moveall;
2332 unsigned ix;
2333 rtx_insn *e0_last_head;
2334 rtx cond;
2335 rtx_insn *move_before;
2336 unsigned nedges = EDGE_COUNT (bb->succs);
2337 rtx_insn *jump = BB_END (bb);
2338 regset live, live_union;
2340 /* Nothing to do if there is not at least two outgoing edges. */
2341 if (nedges < 2)
2342 return false;
2344 /* Don't crossjump if this block ends in a computed jump,
2345 unless we are optimizing for size. */
2346 if (optimize_bb_for_size_p (bb)
2347 && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
2348 && computed_jump_p (BB_END (bb)))
2349 return false;
2351 cond = get_condition (jump, &move_before, true, false);
2352 if (cond == NULL_RTX)
2354 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2355 move_before = prev_nonnote_nondebug_insn (jump);
2356 else
2357 move_before = jump;
2360 for (ix = 0; ix < nedges; ix++)
2361 if (EDGE_SUCC (bb, ix)->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
2362 return false;
2364 for (ix = 0; ix < nedges; ix++)
2366 edge e = EDGE_SUCC (bb, ix);
2367 basic_block other_bb = e->dest;
2369 if (df_get_bb_dirty (other_bb))
2371 block_was_dirty = true;
2372 return false;
2375 if (e->flags & EDGE_ABNORMAL)
2376 return false;
2378 /* Normally, all destination blocks must only be reachable from this
2379 block, i.e. they must have one incoming edge.
2381 There is one special case we can handle, that of multiple consecutive
2382 jumps where the first jumps to one of the targets of the second jump.
2383 This happens frequently in switch statements for default labels.
2384 The structure is as follows:
2385 FINAL_DEST_BB
2386 ....
2387 if (cond) jump A;
2388 fall through
2390 jump with targets A, B, C, D...
2392 has two incoming edges, from FINAL_DEST_BB and BB
2394 In this case, we can try to move the insns through BB and into
2395 FINAL_DEST_BB. */
2396 if (EDGE_COUNT (other_bb->preds) != 1)
2398 edge incoming_edge, incoming_bb_other_edge;
2399 edge_iterator ei;
2401 if (final_dest_bb != NULL
2402 || EDGE_COUNT (other_bb->preds) != 2)
2403 return false;
2405 /* We must be able to move the insns across the whole block. */
2406 move_before = BB_HEAD (bb);
2407 while (!NONDEBUG_INSN_P (move_before))
2408 move_before = NEXT_INSN (move_before);
2410 if (EDGE_COUNT (bb->preds) != 1)
2411 return false;
2412 incoming_edge = EDGE_PRED (bb, 0);
2413 final_dest_bb = incoming_edge->src;
2414 if (EDGE_COUNT (final_dest_bb->succs) != 2)
2415 return false;
2416 FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)
2417 if (incoming_bb_other_edge != incoming_edge)
2418 break;
2419 if (incoming_bb_other_edge->dest != other_bb)
2420 return false;
2424 e0 = EDGE_SUCC (bb, 0);
2425 e0_last_head = NULL;
2426 changed = false;
2428 for (ix = 1; ix < nedges; ix++)
2430 edge e = EDGE_SUCC (bb, ix);
2431 rtx_insn *e0_last, *e_last;
2432 int nmatch;
2434 nmatch = flow_find_head_matching_sequence (e0->dest, e->dest,
2435 &e0_last, &e_last, 0);
2436 if (nmatch == 0)
2437 return false;
2439 if (nmatch < max_match)
2441 max_match = nmatch;
2442 e0_last_head = e0_last;
2446 /* If we matched an entire block, we probably have to avoid moving the
2447 last insn. */
2448 if (max_match > 0
2449 && e0_last_head == BB_END (e0->dest)
2450 && (find_reg_note (e0_last_head, REG_EH_REGION, 0)
2451 || control_flow_insn_p (e0_last_head)))
2453 max_match--;
2454 if (max_match == 0)
2455 return false;
2457 e0_last_head = prev_real_insn (e0_last_head);
2458 while (DEBUG_INSN_P (e0_last_head));
2461 if (max_match == 0)
2462 return false;
2464 /* We must find a union of the live registers at each of the end points. */
2465 live = BITMAP_ALLOC (NULL);
2466 live_union = BITMAP_ALLOC (NULL);
2468 currptr = XNEWVEC (rtx_insn *, nedges);
2469 headptr = XNEWVEC (rtx_insn *, nedges);
2470 nextptr = XNEWVEC (rtx_insn *, nedges);
2472 for (ix = 0; ix < nedges; ix++)
2474 int j;
2475 basic_block merge_bb = EDGE_SUCC (bb, ix)->dest;
2476 rtx_insn *head = BB_HEAD (merge_bb);
2478 while (!NONDEBUG_INSN_P (head))
2479 head = NEXT_INSN (head);
2480 headptr[ix] = head;
2481 currptr[ix] = head;
2483 /* Compute the end point and live information */
2484 for (j = 1; j < max_match; j++)
2486 head = NEXT_INSN (head);
2487 while (!NONDEBUG_INSN_P (head));
2488 simulate_backwards_to_point (merge_bb, live, head);
2489 IOR_REG_SET (live_union, live);
2492 /* If we're moving across two blocks, verify the validity of the
2493 first move, then adjust the target and let the loop below deal
2494 with the final move. */
2495 if (final_dest_bb != NULL)
2497 rtx_insn *move_upto;
2499 moveall = can_move_insns_across (currptr[0], e0_last_head, move_before,
2500 jump, e0->dest, live_union,
2501 NULL, &move_upto);
2502 if (!moveall)
2504 if (move_upto == NULL_RTX)
2505 goto out;
2507 while (e0_last_head != move_upto)
2509 df_simulate_one_insn_backwards (e0->dest, e0_last_head,
2510 live_union);
2511 e0_last_head = PREV_INSN (e0_last_head);
2514 if (e0_last_head == NULL_RTX)
2515 goto out;
2517 jump = BB_END (final_dest_bb);
2518 cond = get_condition (jump, &move_before, true, false);
2519 if (cond == NULL_RTX)
2521 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2522 move_before = prev_nonnote_nondebug_insn (jump);
2523 else
2524 move_before = jump;
2530 rtx_insn *move_upto;
2531 moveall = can_move_insns_across (currptr[0], e0_last_head,
2532 move_before, jump, e0->dest, live_union,
2533 NULL, &move_upto);
2534 if (!moveall && move_upto == NULL_RTX)
2536 if (jump == move_before)
2537 break;
2539 /* Try again, using a different insertion point. */
2540 move_before = jump;
2542 /* Don't try moving before a cc0 user, as that may invalidate
2543 the cc0. */
2544 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2545 break;
2547 continue;
2550 if (final_dest_bb && !moveall)
2551 /* We haven't checked whether a partial move would be OK for the first
2552 move, so we have to fail this case. */
2553 break;
2555 changed = true;
2556 for (;;)
2558 if (currptr[0] == move_upto)
2559 break;
2560 for (ix = 0; ix < nedges; ix++)
2562 rtx_insn *curr = currptr[ix];
2564 curr = NEXT_INSN (curr);
2565 while (!NONDEBUG_INSN_P (curr));
2566 currptr[ix] = curr;
2570 /* If we can't currently move all of the identical insns, remember
2571 each insn after the range that we'll merge. */
2572 if (!moveall)
2573 for (ix = 0; ix < nedges; ix++)
2575 rtx_insn *curr = currptr[ix];
2577 curr = NEXT_INSN (curr);
2578 while (!NONDEBUG_INSN_P (curr));
2579 nextptr[ix] = curr;
2582 reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before));
2583 df_set_bb_dirty (EDGE_SUCC (bb, 0)->dest);
2584 if (final_dest_bb != NULL)
2585 df_set_bb_dirty (final_dest_bb);
2586 df_set_bb_dirty (bb);
2587 for (ix = 1; ix < nedges; ix++)
2589 df_set_bb_dirty (EDGE_SUCC (bb, ix)->dest);
2590 delete_insn_chain (headptr[ix], currptr[ix], false);
2592 if (!moveall)
2594 if (jump == move_before)
2595 break;
2597 /* For the unmerged insns, try a different insertion point. */
2598 move_before = jump;
2600 /* Don't try moving before a cc0 user, as that may invalidate
2601 the cc0. */
2602 if (HAVE_cc0 && reg_mentioned_p (cc0_rtx, jump))
2603 break;
2605 for (ix = 0; ix < nedges; ix++)
2606 currptr[ix] = headptr[ix] = nextptr[ix];
2609 while (!moveall);
2611 out:
2612 free (currptr);
2613 free (headptr);
2614 free (nextptr);
2616 crossjumps_occured |= changed;
2618 return changed;
2621 /* Return true if BB contains just bb note, or bb note followed
2622 by only DEBUG_INSNs. */
2624 static bool
2625 trivially_empty_bb_p (basic_block bb)
2627 rtx_insn *insn = BB_END (bb);
2629 while (1)
2631 if (insn == BB_HEAD (bb))
2632 return true;
2633 if (!DEBUG_INSN_P (insn))
2634 return false;
2635 insn = PREV_INSN (insn);
2639 /* Do simple CFG optimizations - basic block merging, simplifying of jump
2640 instructions etc. Return nonzero if changes were made. */
2642 static bool
2643 try_optimize_cfg (int mode)
2645 bool changed_overall = false;
2646 bool changed;
2647 int iterations = 0;
2648 basic_block bb, b, next;
2650 if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
2651 clear_bb_flags ();
2653 crossjumps_occured = false;
2655 FOR_EACH_BB_FN (bb, cfun)
2656 update_forwarder_flag (bb);
2658 if (! targetm.cannot_modify_jumps_p ())
2660 first_pass = true;
2661 /* Attempt to merge blocks as made possible by edge removal. If
2662 a block has only one successor, and the successor has only
2663 one predecessor, they may be combined. */
2666 block_was_dirty = false;
2667 changed = false;
2668 iterations++;
2670 if (dump_file)
2671 fprintf (dump_file,
2672 "\n\ntry_optimize_cfg iteration %i\n\n",
2673 iterations);
2675 for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
2676 != EXIT_BLOCK_PTR_FOR_FN (cfun);)
2678 basic_block c;
2679 edge s;
2680 bool changed_here = false;
2682 /* Delete trivially dead basic blocks. This is either
2683 blocks with no predecessors, or empty blocks with no
2684 successors. However if the empty block with no
2685 successors is the successor of the ENTRY_BLOCK, it is
2686 kept. This ensures that the ENTRY_BLOCK will have a
2687 successor which is a precondition for many RTL
2688 passes. Empty blocks may result from expanding
2689 __builtin_unreachable (). */
2690 if (EDGE_COUNT (b->preds) == 0
2691 || (EDGE_COUNT (b->succs) == 0
2692 && trivially_empty_bb_p (b)
2693 && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->dest
2694 != b))
2696 c = b->prev_bb;
2697 if (EDGE_COUNT (b->preds) > 0)
2699 edge e;
2700 edge_iterator ei;
2702 if (current_ir_type () == IR_RTL_CFGLAYOUT)
2704 if (BB_FOOTER (b)
2705 && BARRIER_P (BB_FOOTER (b)))
2706 FOR_EACH_EDGE (e, ei, b->preds)
2707 if ((e->flags & EDGE_FALLTHRU)
2708 && BB_FOOTER (e->src) == NULL)
2710 if (BB_FOOTER (b))
2712 BB_FOOTER (e->src) = BB_FOOTER (b);
2713 BB_FOOTER (b) = NULL;
2715 else
2717 start_sequence ();
2718 BB_FOOTER (e->src) = emit_barrier ();
2719 end_sequence ();
2723 else
2725 rtx_insn *last = get_last_bb_insn (b);
2726 if (last && BARRIER_P (last))
2727 FOR_EACH_EDGE (e, ei, b->preds)
2728 if ((e->flags & EDGE_FALLTHRU))
2729 emit_barrier_after (BB_END (e->src));
2732 delete_basic_block (b);
2733 changed = true;
2734 /* Avoid trying to remove the exit block. */
2735 b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun) ? c->next_bb : c);
2736 continue;
2739 /* Remove code labels no longer used. */
2740 if (single_pred_p (b)
2741 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2742 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2743 && LABEL_P (BB_HEAD (b))
2744 && !LABEL_PRESERVE_P (BB_HEAD (b))
2745 /* If the previous block ends with a branch to this
2746 block, we can't delete the label. Normally this
2747 is a condjump that is yet to be simplified, but
2748 if CASE_DROPS_THRU, this can be a tablejump with
2749 some element going to the same place as the
2750 default (fallthru). */
2751 && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun)
2752 || !JUMP_P (BB_END (single_pred (b)))
2753 || ! label_is_jump_target_p (BB_HEAD (b),
2754 BB_END (single_pred (b)))))
2756 delete_insn (BB_HEAD (b));
2757 if (dump_file)
2758 fprintf (dump_file, "Deleted label in block %i.\n",
2759 b->index);
2762 /* If we fall through an empty block, we can remove it. */
2763 if (!(mode & (CLEANUP_CFGLAYOUT | CLEANUP_NO_INSN_DEL))
2764 && single_pred_p (b)
2765 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2766 && !LABEL_P (BB_HEAD (b))
2767 && FORWARDER_BLOCK_P (b)
2768 /* Note that forwarder_block_p true ensures that
2769 there is a successor for this block. */
2770 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2771 && n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS + 1)
2773 if (dump_file)
2774 fprintf (dump_file,
2775 "Deleting fallthru block %i.\n",
2776 b->index);
2778 c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2779 ? b->next_bb : b->prev_bb);
2780 redirect_edge_succ_nodup (single_pred_edge (b),
2781 single_succ (b));
2782 delete_basic_block (b);
2783 changed = true;
2784 b = c;
2785 continue;
2788 /* Merge B with its single successor, if any. */
2789 if (single_succ_p (b)
2790 && (s = single_succ_edge (b))
2791 && !(s->flags & EDGE_COMPLEX)
2792 && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2793 && single_pred_p (c)
2794 && b != c)
2796 /* When not in cfg_layout mode use code aware of reordering
2797 INSN. This code possibly creates new basic blocks so it
2798 does not fit merge_blocks interface and is kept here in
2799 hope that it will become useless once more of compiler
2800 is transformed to use cfg_layout mode. */
2802 if ((mode & CLEANUP_CFGLAYOUT)
2803 && can_merge_blocks_p (b, c))
2805 merge_blocks (b, c);
2806 update_forwarder_flag (b);
2807 changed_here = true;
2809 else if (!(mode & CLEANUP_CFGLAYOUT)
2810 /* If the jump insn has side effects,
2811 we can't kill the edge. */
2812 && (!JUMP_P (BB_END (b))
2813 || (reload_completed
2814 ? simplejump_p (BB_END (b))
2815 : (onlyjump_p (BB_END (b))
2816 && !tablejump_p (BB_END (b),
2817 NULL, NULL))))
2818 && (next = merge_blocks_move (s, b, c, mode)))
2820 b = next;
2821 changed_here = true;
2825 /* Simplify branch over branch. */
2826 if ((mode & CLEANUP_EXPENSIVE)
2827 && !(mode & CLEANUP_CFGLAYOUT)
2828 && try_simplify_condjump (b))
2829 changed_here = true;
2831 /* If B has a single outgoing edge, but uses a
2832 non-trivial jump instruction without side-effects, we
2833 can either delete the jump entirely, or replace it
2834 with a simple unconditional jump. */
2835 if (single_succ_p (b)
2836 && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun)
2837 && onlyjump_p (BB_END (b))
2838 && !CROSSING_JUMP_P (BB_END (b))
2839 && try_redirect_by_replacing_jump (single_succ_edge (b),
2840 single_succ (b),
2841 (mode & CLEANUP_CFGLAYOUT) != 0))
2843 update_forwarder_flag (b);
2844 changed_here = true;
2847 /* Simplify branch to branch. */
2848 if (try_forward_edges (mode, b))
2850 update_forwarder_flag (b);
2851 changed_here = true;
2854 /* Look for shared code between blocks. */
2855 if ((mode & CLEANUP_CROSSJUMP)
2856 && try_crossjump_bb (mode, b))
2857 changed_here = true;
2859 if ((mode & CLEANUP_CROSSJUMP)
2860 /* This can lengthen register lifetimes. Do it only after
2861 reload. */
2862 && reload_completed
2863 && try_head_merge_bb (b))
2864 changed_here = true;
2866 /* Don't get confused by the index shift caused by
2867 deleting blocks. */
2868 if (!changed_here)
2869 b = b->next_bb;
2870 else
2871 changed = true;
2874 if ((mode & CLEANUP_CROSSJUMP)
2875 && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun)))
2876 changed = true;
2878 if (block_was_dirty)
2880 /* This should only be set by head-merging. */
2881 gcc_assert (mode & CLEANUP_CROSSJUMP);
2882 df_analyze ();
2885 if (changed)
2887 /* Edge forwarding in particular can cause hot blocks previously
2888 reached by both hot and cold blocks to become dominated only
2889 by cold blocks. This will cause the verification below to fail,
2890 and lead to now cold code in the hot section. This is not easy
2891 to detect and fix during edge forwarding, and in some cases
2892 is only visible after newly unreachable blocks are deleted,
2893 which will be done in fixup_partitions. */
2894 fixup_partitions ();
2896 #ifdef ENABLE_CHECKING
2897 verify_flow_info ();
2898 #endif
2901 changed_overall |= changed;
2902 first_pass = false;
2904 while (changed);
2907 FOR_ALL_BB_FN (b, cfun)
2908 b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2910 return changed_overall;
2913 /* Delete all unreachable basic blocks. */
2915 bool
2916 delete_unreachable_blocks (void)
2918 bool changed = false;
2919 basic_block b, prev_bb;
2921 find_unreachable_blocks ();
2923 /* When we're in GIMPLE mode and there may be debug insns, we should
2924 delete blocks in reverse dominator order, so as to get a chance
2925 to substitute all released DEFs into debug stmts. If we don't
2926 have dominators information, walking blocks backward gets us a
2927 better chance of retaining most debug information than
2928 otherwise. */
2929 if (MAY_HAVE_DEBUG_INSNS && current_ir_type () == IR_GIMPLE
2930 && dom_info_available_p (CDI_DOMINATORS))
2932 for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
2933 b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
2935 prev_bb = b->prev_bb;
2937 if (!(b->flags & BB_REACHABLE))
2939 /* Speed up the removal of blocks that don't dominate
2940 others. Walking backwards, this should be the common
2941 case. */
2942 if (!first_dom_son (CDI_DOMINATORS, b))
2943 delete_basic_block (b);
2944 else
2946 vec<basic_block> h
2947 = get_all_dominated_blocks (CDI_DOMINATORS, b);
2949 while (h.length ())
2951 b = h.pop ();
2953 prev_bb = b->prev_bb;
2955 gcc_assert (!(b->flags & BB_REACHABLE));
2957 delete_basic_block (b);
2960 h.release ();
2963 changed = true;
2967 else
2969 for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
2970 b != ENTRY_BLOCK_PTR_FOR_FN (cfun); b = prev_bb)
2972 prev_bb = b->prev_bb;
2974 if (!(b->flags & BB_REACHABLE))
2976 delete_basic_block (b);
2977 changed = true;
2982 if (changed)
2983 tidy_fallthru_edges ();
2984 return changed;
2987 /* Delete any jump tables never referenced. We can't delete them at the
2988 time of removing tablejump insn as they are referenced by the preceding
2989 insns computing the destination, so we delay deleting and garbagecollect
2990 them once life information is computed. */
2991 void
2992 delete_dead_jumptables (void)
2994 basic_block bb;
2996 /* A dead jump table does not belong to any basic block. Scan insns
2997 between two adjacent basic blocks. */
2998 FOR_EACH_BB_FN (bb, cfun)
3000 rtx_insn *insn, *next;
3002 for (insn = NEXT_INSN (BB_END (bb));
3003 insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
3004 insn = next)
3006 next = NEXT_INSN (insn);
3007 if (LABEL_P (insn)
3008 && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
3009 && JUMP_TABLE_DATA_P (next))
3011 rtx_insn *label = insn, *jump = next;
3013 if (dump_file)
3014 fprintf (dump_file, "Dead jumptable %i removed\n",
3015 INSN_UID (insn));
3017 next = NEXT_INSN (next);
3018 delete_insn (jump);
3019 delete_insn (label);
3026 /* Tidy the CFG by deleting unreachable code and whatnot. */
3028 bool
3029 cleanup_cfg (int mode)
3031 bool changed = false;
3033 /* Set the cfglayout mode flag here. We could update all the callers
3034 but that is just inconvenient, especially given that we eventually
3035 want to have cfglayout mode as the default. */
3036 if (current_ir_type () == IR_RTL_CFGLAYOUT)
3037 mode |= CLEANUP_CFGLAYOUT;
3039 timevar_push (TV_CLEANUP_CFG);
3040 if (delete_unreachable_blocks ())
3042 changed = true;
3043 /* We've possibly created trivially dead code. Cleanup it right
3044 now to introduce more opportunities for try_optimize_cfg. */
3045 if (!(mode & (CLEANUP_NO_INSN_DEL))
3046 && !reload_completed)
3047 delete_trivially_dead_insns (get_insns (), max_reg_num ());
3050 compact_blocks ();
3052 /* To tail-merge blocks ending in the same noreturn function (e.g.
3053 a call to abort) we have to insert fake edges to exit. Do this
3054 here once. The fake edges do not interfere with any other CFG
3055 cleanups. */
3056 if (mode & CLEANUP_CROSSJUMP)
3057 add_noreturn_fake_exit_edges ();
3059 if (!dbg_cnt (cfg_cleanup))
3060 return changed;
3062 while (try_optimize_cfg (mode))
3064 delete_unreachable_blocks (), changed = true;
3065 if (!(mode & CLEANUP_NO_INSN_DEL))
3067 /* Try to remove some trivially dead insns when doing an expensive
3068 cleanup. But delete_trivially_dead_insns doesn't work after
3069 reload (it only handles pseudos) and run_fast_dce is too costly
3070 to run in every iteration.
3072 For effective cross jumping, we really want to run a fast DCE to
3073 clean up any dead conditions, or they get in the way of performing
3074 useful tail merges.
3076 Other transformations in cleanup_cfg are not so sensitive to dead
3077 code, so delete_trivially_dead_insns or even doing nothing at all
3078 is good enough. */
3079 if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
3080 && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3081 break;
3082 if ((mode & CLEANUP_CROSSJUMP) && crossjumps_occured)
3083 run_fast_dce ();
3085 else
3086 break;
3089 if (mode & CLEANUP_CROSSJUMP)
3090 remove_fake_exit_edges ();
3092 /* Don't call delete_dead_jumptables in cfglayout mode, because
3093 that function assumes that jump tables are in the insns stream.
3094 But we also don't _have_ to delete dead jumptables in cfglayout
3095 mode because we shouldn't even be looking at things that are
3096 not in a basic block. Dead jumptables are cleaned up when
3097 going out of cfglayout mode. */
3098 if (!(mode & CLEANUP_CFGLAYOUT))
3099 delete_dead_jumptables ();
3101 /* ??? We probably do this way too often. */
3102 if (current_loops
3103 && (changed
3104 || (mode & CLEANUP_CFG_CHANGED)))
3106 timevar_push (TV_REPAIR_LOOPS);
3107 /* The above doesn't preserve dominance info if available. */
3108 gcc_assert (!dom_info_available_p (CDI_DOMINATORS));
3109 calculate_dominance_info (CDI_DOMINATORS);
3110 fix_loop_structure (NULL);
3111 free_dominance_info (CDI_DOMINATORS);
3112 timevar_pop (TV_REPAIR_LOOPS);
3115 timevar_pop (TV_CLEANUP_CFG);
3117 return changed;
3120 namespace {
3122 const pass_data pass_data_jump =
3124 RTL_PASS, /* type */
3125 "jump", /* name */
3126 OPTGROUP_NONE, /* optinfo_flags */
3127 TV_JUMP, /* tv_id */
3128 0, /* properties_required */
3129 0, /* properties_provided */
3130 0, /* properties_destroyed */
3131 0, /* todo_flags_start */
3132 0, /* todo_flags_finish */
3135 class pass_jump : public rtl_opt_pass
3137 public:
3138 pass_jump (gcc::context *ctxt)
3139 : rtl_opt_pass (pass_data_jump, ctxt)
3142 /* opt_pass methods: */
3143 virtual unsigned int execute (function *);
3145 }; // class pass_jump
3147 unsigned int
3148 pass_jump::execute (function *)
3150 delete_trivially_dead_insns (get_insns (), max_reg_num ());
3151 if (dump_file)
3152 dump_flow_info (dump_file, dump_flags);
3153 cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
3154 | (flag_thread_jumps ? CLEANUP_THREADING : 0));
3155 return 0;
3158 } // anon namespace
3160 rtl_opt_pass *
3161 make_pass_jump (gcc::context *ctxt)
3163 return new pass_jump (ctxt);
3166 namespace {
3168 const pass_data pass_data_jump2 =
3170 RTL_PASS, /* type */
3171 "jump2", /* name */
3172 OPTGROUP_NONE, /* optinfo_flags */
3173 TV_JUMP, /* tv_id */
3174 0, /* properties_required */
3175 0, /* properties_provided */
3176 0, /* properties_destroyed */
3177 0, /* todo_flags_start */
3178 0, /* todo_flags_finish */
3181 class pass_jump2 : public rtl_opt_pass
3183 public:
3184 pass_jump2 (gcc::context *ctxt)
3185 : rtl_opt_pass (pass_data_jump2, ctxt)
3188 /* opt_pass methods: */
3189 virtual unsigned int execute (function *)
3191 cleanup_cfg (flag_crossjumping ? CLEANUP_CROSSJUMP : 0);
3192 return 0;
3195 }; // class pass_jump2
3197 } // anon namespace
3199 rtl_opt_pass *
3200 make_pass_jump2 (gcc::context *ctxt)
3202 return new pass_jump2 (ctxt);