In libobjc/: 2010-12-11 Nicola Pero <nicola.pero@meta-innovation.com>
[official-gcc.git] / gcc / cfgcleanup.c
blobc365b5e8a2dff30aadf916401703a8954ac570ae
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "regs.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "diagnostic-core.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "cfglayout.h"
52 #include "emit-rtl.h"
53 #include "tree-pass.h"
54 #include "cfgloop.h"
55 #include "expr.h"
56 #include "df.h"
57 #include "dce.h"
58 #include "dbgcnt.h"
60 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
62 /* Set to true when we are running first pass of try_optimize_cfg loop. */
63 static bool first_pass;
65 /* Set to true if crossjumps occured in the latest run of try_optimize_cfg. */
66 static bool crossjumps_occured;
68 static bool try_crossjump_to_edge (int, edge, edge);
69 static bool try_crossjump_bb (int, basic_block);
70 static bool outgoing_edges_match (int, basic_block, basic_block);
71 static bool old_insns_match_p (int, rtx, rtx);
73 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
74 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
75 static bool try_optimize_cfg (int);
76 static bool try_simplify_condjump (basic_block);
77 static bool try_forward_edges (int, basic_block);
78 static edge thread_jump (edge, basic_block);
79 static bool mark_effect (rtx, bitmap);
80 static void notice_new_block (basic_block);
81 static void update_forwarder_flag (basic_block);
82 static int mentions_nonequal_regs (rtx *, void *);
83 static void merge_memattrs (rtx, rtx);
85 /* Set flags for newly created block. */
87 static void
88 notice_new_block (basic_block bb)
90 if (!bb)
91 return;
93 if (forwarder_block_p (bb))
94 bb->flags |= BB_FORWARDER_BLOCK;
97 /* Recompute forwarder flag after block has been modified. */
99 static void
100 update_forwarder_flag (basic_block bb)
102 if (forwarder_block_p (bb))
103 bb->flags |= BB_FORWARDER_BLOCK;
104 else
105 bb->flags &= ~BB_FORWARDER_BLOCK;
108 /* Simplify a conditional jump around an unconditional jump.
109 Return true if something changed. */
111 static bool
112 try_simplify_condjump (basic_block cbranch_block)
114 basic_block jump_block, jump_dest_block, cbranch_dest_block;
115 edge cbranch_jump_edge, cbranch_fallthru_edge;
116 rtx cbranch_insn;
118 /* Verify that there are exactly two successors. */
119 if (EDGE_COUNT (cbranch_block->succs) != 2)
120 return false;
122 /* Verify that we've got a normal conditional branch at the end
123 of the block. */
124 cbranch_insn = BB_END (cbranch_block);
125 if (!any_condjump_p (cbranch_insn))
126 return false;
128 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
129 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
131 /* The next block must not have multiple predecessors, must not
132 be the last block in the function, and must contain just the
133 unconditional jump. */
134 jump_block = cbranch_fallthru_edge->dest;
135 if (!single_pred_p (jump_block)
136 || jump_block->next_bb == EXIT_BLOCK_PTR
137 || !FORWARDER_BLOCK_P (jump_block))
138 return false;
139 jump_dest_block = single_succ (jump_block);
141 /* If we are partitioning hot/cold basic blocks, we don't want to
142 mess up unconditional or indirect jumps that cross between hot
143 and cold sections.
145 Basic block partitioning may result in some jumps that appear to
146 be optimizable (or blocks that appear to be mergeable), but which really
147 must be left untouched (they are required to make it safely across
148 partition boundaries). See the comments at the top of
149 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
151 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
152 || (cbranch_jump_edge->flags & EDGE_CROSSING))
153 return false;
155 /* The conditional branch must target the block after the
156 unconditional branch. */
157 cbranch_dest_block = cbranch_jump_edge->dest;
159 if (cbranch_dest_block == EXIT_BLOCK_PTR
160 || !can_fallthru (jump_block, cbranch_dest_block))
161 return false;
163 /* Invert the conditional branch. */
164 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
165 return false;
167 if (dump_file)
168 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
169 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
171 /* Success. Update the CFG to match. Note that after this point
172 the edge variable names appear backwards; the redirection is done
173 this way to preserve edge profile data. */
174 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
175 cbranch_dest_block);
176 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
177 jump_dest_block);
178 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
179 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
180 update_br_prob_note (cbranch_block);
182 /* Delete the block with the unconditional jump, and clean up the mess. */
183 delete_basic_block (jump_block);
184 tidy_fallthru_edge (cbranch_jump_edge);
185 update_forwarder_flag (cbranch_block);
187 return true;
190 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
191 on register. Used by jump threading. */
193 static bool
194 mark_effect (rtx exp, regset nonequal)
196 int regno;
197 rtx dest;
198 switch (GET_CODE (exp))
200 /* In case we do clobber the register, mark it as equal, as we know the
201 value is dead so it don't have to match. */
202 case CLOBBER:
203 if (REG_P (XEXP (exp, 0)))
205 dest = XEXP (exp, 0);
206 regno = REGNO (dest);
207 CLEAR_REGNO_REG_SET (nonequal, regno);
208 if (regno < FIRST_PSEUDO_REGISTER)
210 int n = hard_regno_nregs[regno][GET_MODE (dest)];
211 while (--n > 0)
212 CLEAR_REGNO_REG_SET (nonequal, regno + n);
215 return false;
217 case SET:
218 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
219 return false;
220 dest = SET_DEST (exp);
221 if (dest == pc_rtx)
222 return false;
223 if (!REG_P (dest))
224 return true;
225 regno = REGNO (dest);
226 SET_REGNO_REG_SET (nonequal, regno);
227 if (regno < FIRST_PSEUDO_REGISTER)
229 int n = hard_regno_nregs[regno][GET_MODE (dest)];
230 while (--n > 0)
231 SET_REGNO_REG_SET (nonequal, regno + n);
233 return false;
235 default:
236 return false;
240 /* Return nonzero if X is a register set in regset DATA.
241 Called via for_each_rtx. */
242 static int
243 mentions_nonequal_regs (rtx *x, void *data)
245 regset nonequal = (regset) data;
246 if (REG_P (*x))
248 int regno;
250 regno = REGNO (*x);
251 if (REGNO_REG_SET_P (nonequal, regno))
252 return 1;
253 if (regno < FIRST_PSEUDO_REGISTER)
255 int n = hard_regno_nregs[regno][GET_MODE (*x)];
256 while (--n > 0)
257 if (REGNO_REG_SET_P (nonequal, regno + n))
258 return 1;
261 return 0;
263 /* Attempt to prove that the basic block B will have no side effects and
264 always continues in the same edge if reached via E. Return the edge
265 if exist, NULL otherwise. */
267 static edge
268 thread_jump (edge e, basic_block b)
270 rtx set1, set2, cond1, cond2, insn;
271 enum rtx_code code1, code2, reversed_code2;
272 bool reverse1 = false;
273 unsigned i;
274 regset nonequal;
275 bool failed = false;
276 reg_set_iterator rsi;
278 if (b->flags & BB_NONTHREADABLE_BLOCK)
279 return NULL;
281 /* At the moment, we do handle only conditional jumps, but later we may
282 want to extend this code to tablejumps and others. */
283 if (EDGE_COUNT (e->src->succs) != 2)
284 return NULL;
285 if (EDGE_COUNT (b->succs) != 2)
287 b->flags |= BB_NONTHREADABLE_BLOCK;
288 return NULL;
291 /* Second branch must end with onlyjump, as we will eliminate the jump. */
292 if (!any_condjump_p (BB_END (e->src)))
293 return NULL;
295 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
297 b->flags |= BB_NONTHREADABLE_BLOCK;
298 return NULL;
301 set1 = pc_set (BB_END (e->src));
302 set2 = pc_set (BB_END (b));
303 if (((e->flags & EDGE_FALLTHRU) != 0)
304 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
305 reverse1 = true;
307 cond1 = XEXP (SET_SRC (set1), 0);
308 cond2 = XEXP (SET_SRC (set2), 0);
309 if (reverse1)
310 code1 = reversed_comparison_code (cond1, BB_END (e->src));
311 else
312 code1 = GET_CODE (cond1);
314 code2 = GET_CODE (cond2);
315 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
317 if (!comparison_dominates_p (code1, code2)
318 && !comparison_dominates_p (code1, reversed_code2))
319 return NULL;
321 /* Ensure that the comparison operators are equivalent.
322 ??? This is far too pessimistic. We should allow swapped operands,
323 different CCmodes, or for example comparisons for interval, that
324 dominate even when operands are not equivalent. */
325 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
326 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
327 return NULL;
329 /* Short circuit cases where block B contains some side effects, as we can't
330 safely bypass it. */
331 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
332 insn = NEXT_INSN (insn))
333 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
335 b->flags |= BB_NONTHREADABLE_BLOCK;
336 return NULL;
339 cselib_init (0);
341 /* First process all values computed in the source basic block. */
342 for (insn = NEXT_INSN (BB_HEAD (e->src));
343 insn != NEXT_INSN (BB_END (e->src));
344 insn = NEXT_INSN (insn))
345 if (INSN_P (insn))
346 cselib_process_insn (insn);
348 nonequal = BITMAP_ALLOC (NULL);
349 CLEAR_REG_SET (nonequal);
351 /* Now assume that we've continued by the edge E to B and continue
352 processing as if it were same basic block.
353 Our goal is to prove that whole block is an NOOP. */
355 for (insn = NEXT_INSN (BB_HEAD (b));
356 insn != NEXT_INSN (BB_END (b)) && !failed;
357 insn = NEXT_INSN (insn))
359 if (INSN_P (insn))
361 rtx pat = PATTERN (insn);
363 if (GET_CODE (pat) == PARALLEL)
365 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
366 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
368 else
369 failed |= mark_effect (pat, nonequal);
372 cselib_process_insn (insn);
375 /* Later we should clear nonequal of dead registers. So far we don't
376 have life information in cfg_cleanup. */
377 if (failed)
379 b->flags |= BB_NONTHREADABLE_BLOCK;
380 goto failed_exit;
383 /* cond2 must not mention any register that is not equal to the
384 former block. */
385 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
386 goto failed_exit;
388 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
389 goto failed_exit;
391 BITMAP_FREE (nonequal);
392 cselib_finish ();
393 if ((comparison_dominates_p (code1, code2) != 0)
394 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
395 return BRANCH_EDGE (b);
396 else
397 return FALLTHRU_EDGE (b);
399 failed_exit:
400 BITMAP_FREE (nonequal);
401 cselib_finish ();
402 return NULL;
405 /* Attempt to forward edges leaving basic block B.
406 Return true if successful. */
408 static bool
409 try_forward_edges (int mode, basic_block b)
411 bool changed = false;
412 edge_iterator ei;
413 edge e, *threaded_edges = NULL;
415 /* If we are partitioning hot/cold basic blocks, we don't want to
416 mess up unconditional or indirect jumps that cross between hot
417 and cold sections.
419 Basic block partitioning may result in some jumps that appear to
420 be optimizable (or blocks that appear to be mergeable), but which really
421 must be left untouched (they are required to make it safely across
422 partition boundaries). See the comments at the top of
423 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
425 if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
426 return false;
428 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
430 basic_block target, first;
431 int counter, goto_locus;
432 bool threaded = false;
433 int nthreaded_edges = 0;
434 bool may_thread = first_pass | df_get_bb_dirty (b);
436 /* Skip complex edges because we don't know how to update them.
438 Still handle fallthru edges, as we can succeed to forward fallthru
439 edge to the same place as the branch edge of conditional branch
440 and turn conditional branch to an unconditional branch. */
441 if (e->flags & EDGE_COMPLEX)
443 ei_next (&ei);
444 continue;
447 target = first = e->dest;
448 counter = NUM_FIXED_BLOCKS;
449 goto_locus = e->goto_locus;
451 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
452 up jumps that cross between hot/cold sections.
454 Basic block partitioning may result in some jumps that appear
455 to be optimizable (or blocks that appear to be mergeable), but which
456 really must be left untouched (they are required to make it safely
457 across partition boundaries). See the comments at the top of
458 bb-reorder.c:partition_hot_cold_basic_blocks for complete
459 details. */
461 if (first != EXIT_BLOCK_PTR
462 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
463 return false;
465 while (counter < n_basic_blocks)
467 basic_block new_target = NULL;
468 bool new_target_threaded = false;
469 may_thread |= df_get_bb_dirty (target);
471 if (FORWARDER_BLOCK_P (target)
472 && !(single_succ_edge (target)->flags & EDGE_CROSSING)
473 && single_succ (target) != EXIT_BLOCK_PTR)
475 /* Bypass trivial infinite loops. */
476 new_target = single_succ (target);
477 if (target == new_target)
478 counter = n_basic_blocks;
479 else if (!optimize)
481 /* When not optimizing, ensure that edges or forwarder
482 blocks with different locus are not optimized out. */
483 int new_locus = single_succ_edge (target)->goto_locus;
484 int locus = goto_locus;
486 if (new_locus && locus && !locator_eq (new_locus, locus))
487 new_target = NULL;
488 else
490 if (new_locus)
491 locus = new_locus;
493 new_locus = INSN_P (BB_END (target))
494 ? INSN_LOCATOR (BB_END (target)) : 0;
496 if (new_locus && locus && !locator_eq (new_locus, locus))
497 new_target = NULL;
498 else
500 if (new_locus)
501 locus = new_locus;
503 goto_locus = locus;
509 /* Allow to thread only over one edge at time to simplify updating
510 of probabilities. */
511 else if ((mode & CLEANUP_THREADING) && may_thread)
513 edge t = thread_jump (e, target);
514 if (t)
516 if (!threaded_edges)
517 threaded_edges = XNEWVEC (edge, n_basic_blocks);
518 else
520 int i;
522 /* Detect an infinite loop across blocks not
523 including the start block. */
524 for (i = 0; i < nthreaded_edges; ++i)
525 if (threaded_edges[i] == t)
526 break;
527 if (i < nthreaded_edges)
529 counter = n_basic_blocks;
530 break;
534 /* Detect an infinite loop across the start block. */
535 if (t->dest == b)
536 break;
538 gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
539 threaded_edges[nthreaded_edges++] = t;
541 new_target = t->dest;
542 new_target_threaded = true;
546 if (!new_target)
547 break;
549 counter++;
550 target = new_target;
551 threaded |= new_target_threaded;
554 if (counter >= n_basic_blocks)
556 if (dump_file)
557 fprintf (dump_file, "Infinite loop in BB %i.\n",
558 target->index);
560 else if (target == first)
561 ; /* We didn't do anything. */
562 else
564 /* Save the values now, as the edge may get removed. */
565 gcov_type edge_count = e->count;
566 int edge_probability = e->probability;
567 int edge_frequency;
568 int n = 0;
570 e->goto_locus = goto_locus;
572 /* Don't force if target is exit block. */
573 if (threaded && target != EXIT_BLOCK_PTR)
575 notice_new_block (redirect_edge_and_branch_force (e, target));
576 if (dump_file)
577 fprintf (dump_file, "Conditionals threaded.\n");
579 else if (!redirect_edge_and_branch (e, target))
581 if (dump_file)
582 fprintf (dump_file,
583 "Forwarding edge %i->%i to %i failed.\n",
584 b->index, e->dest->index, target->index);
585 ei_next (&ei);
586 continue;
589 /* We successfully forwarded the edge. Now update profile
590 data: for each edge we traversed in the chain, remove
591 the original edge's execution count. */
592 edge_frequency = ((edge_probability * b->frequency
593 + REG_BR_PROB_BASE / 2)
594 / REG_BR_PROB_BASE);
596 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
597 b->flags |= BB_FORWARDER_BLOCK;
601 edge t;
603 if (!single_succ_p (first))
605 gcc_assert (n < nthreaded_edges);
606 t = threaded_edges [n++];
607 gcc_assert (t->src == first);
608 update_bb_profile_for_threading (first, edge_frequency,
609 edge_count, t);
610 update_br_prob_note (first);
612 else
614 first->count -= edge_count;
615 if (first->count < 0)
616 first->count = 0;
617 first->frequency -= edge_frequency;
618 if (first->frequency < 0)
619 first->frequency = 0;
620 /* It is possible that as the result of
621 threading we've removed edge as it is
622 threaded to the fallthru edge. Avoid
623 getting out of sync. */
624 if (n < nthreaded_edges
625 && first == threaded_edges [n]->src)
626 n++;
627 t = single_succ_edge (first);
630 t->count -= edge_count;
631 if (t->count < 0)
632 t->count = 0;
633 first = t->dest;
635 while (first != target);
637 changed = true;
638 continue;
640 ei_next (&ei);
643 if (threaded_edges)
644 free (threaded_edges);
645 return changed;
649 /* Blocks A and B are to be merged into a single block. A has no incoming
650 fallthru edge, so it can be moved before B without adding or modifying
651 any jumps (aside from the jump from A to B). */
653 static void
654 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
656 rtx barrier;
658 /* If we are partitioning hot/cold basic blocks, we don't want to
659 mess up unconditional or indirect jumps that cross between hot
660 and cold sections.
662 Basic block partitioning may result in some jumps that appear to
663 be optimizable (or blocks that appear to be mergeable), but which really
664 must be left untouched (they are required to make it safely across
665 partition boundaries). See the comments at the top of
666 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
668 if (BB_PARTITION (a) != BB_PARTITION (b))
669 return;
671 barrier = next_nonnote_insn (BB_END (a));
672 gcc_assert (BARRIER_P (barrier));
673 delete_insn (barrier);
675 /* Scramble the insn chain. */
676 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
677 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
678 df_set_bb_dirty (a);
680 if (dump_file)
681 fprintf (dump_file, "Moved block %d before %d and merged.\n",
682 a->index, b->index);
684 /* Swap the records for the two blocks around. */
686 unlink_block (a);
687 link_block (a, b->prev_bb);
689 /* Now blocks A and B are contiguous. Merge them. */
690 merge_blocks (a, b);
693 /* Blocks A and B are to be merged into a single block. B has no outgoing
694 fallthru edge, so it can be moved after A without adding or modifying
695 any jumps (aside from the jump from A to B). */
697 static void
698 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
700 rtx barrier, real_b_end;
701 rtx label, table;
703 /* If we are partitioning hot/cold basic blocks, we don't want to
704 mess up unconditional or indirect jumps that cross between hot
705 and cold sections.
707 Basic block partitioning may result in some jumps that appear to
708 be optimizable (or blocks that appear to be mergeable), but which really
709 must be left untouched (they are required to make it safely across
710 partition boundaries). See the comments at the top of
711 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
713 if (BB_PARTITION (a) != BB_PARTITION (b))
714 return;
716 real_b_end = BB_END (b);
718 /* If there is a jump table following block B temporarily add the jump table
719 to block B so that it will also be moved to the correct location. */
720 if (tablejump_p (BB_END (b), &label, &table)
721 && prev_active_insn (label) == BB_END (b))
723 BB_END (b) = table;
726 /* There had better have been a barrier there. Delete it. */
727 barrier = NEXT_INSN (BB_END (b));
728 if (barrier && BARRIER_P (barrier))
729 delete_insn (barrier);
732 /* Scramble the insn chain. */
733 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
735 /* Restore the real end of b. */
736 BB_END (b) = real_b_end;
738 if (dump_file)
739 fprintf (dump_file, "Moved block %d after %d and merged.\n",
740 b->index, a->index);
742 /* Now blocks A and B are contiguous. Merge them. */
743 merge_blocks (a, b);
746 /* Attempt to merge basic blocks that are potentially non-adjacent.
747 Return NULL iff the attempt failed, otherwise return basic block
748 where cleanup_cfg should continue. Because the merging commonly
749 moves basic block away or introduces another optimization
750 possibility, return basic block just before B so cleanup_cfg don't
751 need to iterate.
753 It may be good idea to return basic block before C in the case
754 C has been moved after B and originally appeared earlier in the
755 insn sequence, but we have no information available about the
756 relative ordering of these two. Hopefully it is not too common. */
758 static basic_block
759 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
761 basic_block next;
763 /* If we are partitioning hot/cold basic blocks, we don't want to
764 mess up unconditional or indirect jumps that cross between hot
765 and cold sections.
767 Basic block partitioning may result in some jumps that appear to
768 be optimizable (or blocks that appear to be mergeable), but which really
769 must be left untouched (they are required to make it safely across
770 partition boundaries). See the comments at the top of
771 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
773 if (BB_PARTITION (b) != BB_PARTITION (c))
774 return NULL;
776 /* If B has a fallthru edge to C, no need to move anything. */
777 if (e->flags & EDGE_FALLTHRU)
779 int b_index = b->index, c_index = c->index;
780 merge_blocks (b, c);
781 update_forwarder_flag (b);
783 if (dump_file)
784 fprintf (dump_file, "Merged %d and %d without moving.\n",
785 b_index, c_index);
787 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
790 /* Otherwise we will need to move code around. Do that only if expensive
791 transformations are allowed. */
792 else if (mode & CLEANUP_EXPENSIVE)
794 edge tmp_edge, b_fallthru_edge;
795 bool c_has_outgoing_fallthru;
796 bool b_has_incoming_fallthru;
798 /* Avoid overactive code motion, as the forwarder blocks should be
799 eliminated by edge redirection instead. One exception might have
800 been if B is a forwarder block and C has no fallthru edge, but
801 that should be cleaned up by bb-reorder instead. */
802 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
803 return NULL;
805 /* We must make sure to not munge nesting of lexical blocks,
806 and loop notes. This is done by squeezing out all the notes
807 and leaving them there to lie. Not ideal, but functional. */
809 tmp_edge = find_fallthru_edge (c->succs);
810 c_has_outgoing_fallthru = (tmp_edge != NULL);
812 tmp_edge = find_fallthru_edge (b->preds);
813 b_has_incoming_fallthru = (tmp_edge != NULL);
814 b_fallthru_edge = tmp_edge;
815 next = b->prev_bb;
816 if (next == c)
817 next = next->prev_bb;
819 /* Otherwise, we're going to try to move C after B. If C does
820 not have an outgoing fallthru, then it can be moved
821 immediately after B without introducing or modifying jumps. */
822 if (! c_has_outgoing_fallthru)
824 merge_blocks_move_successor_nojumps (b, c);
825 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
828 /* If B does not have an incoming fallthru, then it can be moved
829 immediately before C without introducing or modifying jumps.
830 C cannot be the first block, so we do not have to worry about
831 accessing a non-existent block. */
833 if (b_has_incoming_fallthru)
835 basic_block bb;
837 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
838 return NULL;
839 bb = force_nonfallthru (b_fallthru_edge);
840 if (bb)
841 notice_new_block (bb);
844 merge_blocks_move_predecessor_nojumps (b, c);
845 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
848 return NULL;
852 /* Removes the memory attributes of MEM expression
853 if they are not equal. */
855 void
856 merge_memattrs (rtx x, rtx y)
858 int i;
859 int j;
860 enum rtx_code code;
861 const char *fmt;
863 if (x == y)
864 return;
865 if (x == 0 || y == 0)
866 return;
868 code = GET_CODE (x);
870 if (code != GET_CODE (y))
871 return;
873 if (GET_MODE (x) != GET_MODE (y))
874 return;
876 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
878 if (! MEM_ATTRS (x))
879 MEM_ATTRS (y) = 0;
880 else if (! MEM_ATTRS (y))
881 MEM_ATTRS (x) = 0;
882 else
884 rtx mem_size;
886 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
888 set_mem_alias_set (x, 0);
889 set_mem_alias_set (y, 0);
892 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
894 set_mem_expr (x, 0);
895 set_mem_expr (y, 0);
896 set_mem_offset (x, 0);
897 set_mem_offset (y, 0);
899 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
901 set_mem_offset (x, 0);
902 set_mem_offset (y, 0);
905 if (!MEM_SIZE (x))
906 mem_size = NULL_RTX;
907 else if (!MEM_SIZE (y))
908 mem_size = NULL_RTX;
909 else
910 mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
911 INTVAL (MEM_SIZE (y))));
912 set_mem_size (x, mem_size);
913 set_mem_size (y, mem_size);
915 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
916 set_mem_align (y, MEM_ALIGN (x));
920 fmt = GET_RTX_FORMAT (code);
921 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
923 switch (fmt[i])
925 case 'E':
926 /* Two vectors must have the same length. */
927 if (XVECLEN (x, i) != XVECLEN (y, i))
928 return;
930 for (j = 0; j < XVECLEN (x, i); j++)
931 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
933 break;
935 case 'e':
936 merge_memattrs (XEXP (x, i), XEXP (y, i));
939 return;
943 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
945 static bool
946 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
948 rtx p1, p2;
950 /* Verify that I1 and I2 are equivalent. */
951 if (GET_CODE (i1) != GET_CODE (i2))
952 return false;
954 /* __builtin_unreachable() may lead to empty blocks (ending with
955 NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */
956 if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
957 return true;
959 p1 = PATTERN (i1);
960 p2 = PATTERN (i2);
962 if (GET_CODE (p1) != GET_CODE (p2))
963 return false;
965 /* If this is a CALL_INSN, compare register usage information.
966 If we don't check this on stack register machines, the two
967 CALL_INSNs might be merged leaving reg-stack.c with mismatching
968 numbers of stack registers in the same basic block.
969 If we don't check this on machines with delay slots, a delay slot may
970 be filled that clobbers a parameter expected by the subroutine.
972 ??? We take the simple route for now and assume that if they're
973 equal, they were constructed identically.
975 Also check for identical exception regions. */
977 if (CALL_P (i1))
979 /* Ensure the same EH region. */
980 rtx n1 = find_reg_note (i1, REG_EH_REGION, 0);
981 rtx n2 = find_reg_note (i2, REG_EH_REGION, 0);
983 if (!n1 && n2)
984 return false;
986 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
987 return false;
989 if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
990 CALL_INSN_FUNCTION_USAGE (i2))
991 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2))
992 return false;
995 #ifdef STACK_REGS
996 /* If cross_jump_death_matters is not 0, the insn's mode
997 indicates whether or not the insn contains any stack-like
998 regs. */
1000 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1002 /* If register stack conversion has already been done, then
1003 death notes must also be compared before it is certain that
1004 the two instruction streams match. */
1006 rtx note;
1007 HARD_REG_SET i1_regset, i2_regset;
1009 CLEAR_HARD_REG_SET (i1_regset);
1010 CLEAR_HARD_REG_SET (i2_regset);
1012 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1013 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1014 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1016 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1017 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1018 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1020 if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1021 return false;
1023 #endif
1025 if (reload_completed
1026 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1027 return true;
1029 return false;
1032 /* When comparing insns I1 and I2 in flow_find_cross_jump or
1033 flow_find_head_matching_sequence, ensure the notes match. */
1035 static void
1036 merge_notes (rtx i1, rtx i2)
1038 /* If the merged insns have different REG_EQUAL notes, then
1039 remove them. */
1040 rtx equiv1 = find_reg_equal_equiv_note (i1);
1041 rtx equiv2 = find_reg_equal_equiv_note (i2);
1043 if (equiv1 && !equiv2)
1044 remove_note (i1, equiv1);
1045 else if (!equiv1 && equiv2)
1046 remove_note (i2, equiv2);
1047 else if (equiv1 && equiv2
1048 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1050 remove_note (i1, equiv1);
1051 remove_note (i2, equiv2);
1055 /* Look through the insns at the end of BB1 and BB2 and find the longest
1056 sequence that are equivalent. Store the first insns for that sequence
1057 in *F1 and *F2 and return the sequence length.
1059 To simplify callers of this function, if the blocks match exactly,
1060 store the head of the blocks in *F1 and *F2. */
1063 flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx *f1, rtx *f2)
1065 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1066 int ninsns = 0;
1068 /* Skip simple jumps at the end of the blocks. Complex jumps still
1069 need to be compared for equivalence, which we'll do below. */
1071 i1 = BB_END (bb1);
1072 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1073 if (onlyjump_p (i1)
1074 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1076 last1 = i1;
1077 i1 = PREV_INSN (i1);
1080 i2 = BB_END (bb2);
1081 if (onlyjump_p (i2)
1082 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1084 last2 = i2;
1085 /* Count everything except for unconditional jump as insn. */
1086 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1087 ninsns++;
1088 i2 = PREV_INSN (i2);
1091 while (true)
1093 /* Ignore notes. */
1094 while (!NONDEBUG_INSN_P (i1) && i1 != BB_HEAD (bb1))
1095 i1 = PREV_INSN (i1);
1097 while (!NONDEBUG_INSN_P (i2) && i2 != BB_HEAD (bb2))
1098 i2 = PREV_INSN (i2);
1100 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1101 break;
1103 if (!old_insns_match_p (0, i1, i2))
1104 break;
1106 merge_memattrs (i1, i2);
1108 /* Don't begin a cross-jump with a NOTE insn. */
1109 if (INSN_P (i1))
1111 merge_notes (i1, i2);
1113 afterlast1 = last1, afterlast2 = last2;
1114 last1 = i1, last2 = i2;
1115 ninsns++;
1118 i1 = PREV_INSN (i1);
1119 i2 = PREV_INSN (i2);
1122 #ifdef HAVE_cc0
1123 /* Don't allow the insn after a compare to be shared by
1124 cross-jumping unless the compare is also shared. */
1125 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1126 last1 = afterlast1, last2 = afterlast2, ninsns--;
1127 #endif
1129 /* Include preceding notes and labels in the cross-jump. One,
1130 this may bring us to the head of the blocks as requested above.
1131 Two, it keeps line number notes as matched as may be. */
1132 if (ninsns)
1134 while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
1135 last1 = PREV_INSN (last1);
1137 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1138 last1 = PREV_INSN (last1);
1140 while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
1141 last2 = PREV_INSN (last2);
1143 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1144 last2 = PREV_INSN (last2);
1146 *f1 = last1;
1147 *f2 = last2;
1150 return ninsns;
1153 /* Like flow_find_cross_jump, except start looking for a matching sequence from
1154 the head of the two blocks. Do not include jumps at the end.
1155 If STOP_AFTER is nonzero, stop after finding that many matching
1156 instructions. */
1159 flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx *f1,
1160 rtx *f2, int stop_after)
1162 rtx i1, i2, last1, last2, beforelast1, beforelast2;
1163 int ninsns = 0;
1164 edge e;
1165 edge_iterator ei;
1166 int nehedges1 = 0, nehedges2 = 0;
1168 FOR_EACH_EDGE (e, ei, bb1->succs)
1169 if (e->flags & EDGE_EH)
1170 nehedges1++;
1171 FOR_EACH_EDGE (e, ei, bb2->succs)
1172 if (e->flags & EDGE_EH)
1173 nehedges2++;
1175 i1 = BB_HEAD (bb1);
1176 i2 = BB_HEAD (bb2);
1177 last1 = beforelast1 = last2 = beforelast2 = NULL_RTX;
1179 while (true)
1181 /* Ignore notes. */
1182 while (!NONDEBUG_INSN_P (i1) && i1 != BB_END (bb1))
1183 i1 = NEXT_INSN (i1);
1185 while (!NONDEBUG_INSN_P (i2) && i2 != BB_END (bb2))
1186 i2 = NEXT_INSN (i2);
1188 if ((i1 == BB_END (bb1) && !NONDEBUG_INSN_P (i1))
1189 || (i2 == BB_END (bb2) && !NONDEBUG_INSN_P (i2)))
1190 break;
1192 if (NOTE_P (i1) || NOTE_P (i2)
1193 || JUMP_P (i1) || JUMP_P (i2))
1194 break;
1196 /* A sanity check to make sure we're not merging insns with different
1197 effects on EH. If only one of them ends a basic block, it shouldn't
1198 have an EH edge; if both end a basic block, there should be the same
1199 number of EH edges. */
1200 if ((i1 == BB_END (bb1) && i2 != BB_END (bb2)
1201 && nehedges1 > 0)
1202 || (i2 == BB_END (bb2) && i1 != BB_END (bb1)
1203 && nehedges2 > 0)
1204 || (i1 == BB_END (bb1) && i2 == BB_END (bb2)
1205 && nehedges1 != nehedges2))
1206 break;
1208 if (!old_insns_match_p (0, i1, i2))
1209 break;
1211 merge_memattrs (i1, i2);
1213 /* Don't begin a cross-jump with a NOTE insn. */
1214 if (INSN_P (i1))
1216 merge_notes (i1, i2);
1218 beforelast1 = last1, beforelast2 = last2;
1219 last1 = i1, last2 = i2;
1220 ninsns++;
1223 if (i1 == BB_END (bb1) || i2 == BB_END (bb2)
1224 || (stop_after > 0 && ninsns == stop_after))
1225 break;
1227 i1 = NEXT_INSN (i1);
1228 i2 = NEXT_INSN (i2);
1231 #ifdef HAVE_cc0
1232 /* Don't allow a compare to be shared by cross-jumping unless the insn
1233 after the compare is also shared. */
1234 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && sets_cc0_p (last1))
1235 last1 = beforelast1, last2 = beforelast2, ninsns--;
1236 #endif
1238 if (ninsns)
1240 *f1 = last1;
1241 *f2 = last2;
1244 return ninsns;
1247 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1248 the branch instruction. This means that if we commonize the control
1249 flow before end of the basic block, the semantic remains unchanged.
1251 We may assume that there exists one edge with a common destination. */
1253 static bool
1254 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1256 int nehedges1 = 0, nehedges2 = 0;
1257 edge fallthru1 = 0, fallthru2 = 0;
1258 edge e1, e2;
1259 edge_iterator ei;
1261 /* If BB1 has only one successor, we may be looking at either an
1262 unconditional jump, or a fake edge to exit. */
1263 if (single_succ_p (bb1)
1264 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1265 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1266 return (single_succ_p (bb2)
1267 && (single_succ_edge (bb2)->flags
1268 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1269 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1271 /* Match conditional jumps - this may get tricky when fallthru and branch
1272 edges are crossed. */
1273 if (EDGE_COUNT (bb1->succs) == 2
1274 && any_condjump_p (BB_END (bb1))
1275 && onlyjump_p (BB_END (bb1)))
1277 edge b1, f1, b2, f2;
1278 bool reverse, match;
1279 rtx set1, set2, cond1, cond2;
1280 enum rtx_code code1, code2;
1282 if (EDGE_COUNT (bb2->succs) != 2
1283 || !any_condjump_p (BB_END (bb2))
1284 || !onlyjump_p (BB_END (bb2)))
1285 return false;
1287 b1 = BRANCH_EDGE (bb1);
1288 b2 = BRANCH_EDGE (bb2);
1289 f1 = FALLTHRU_EDGE (bb1);
1290 f2 = FALLTHRU_EDGE (bb2);
1292 /* Get around possible forwarders on fallthru edges. Other cases
1293 should be optimized out already. */
1294 if (FORWARDER_BLOCK_P (f1->dest))
1295 f1 = single_succ_edge (f1->dest);
1297 if (FORWARDER_BLOCK_P (f2->dest))
1298 f2 = single_succ_edge (f2->dest);
1300 /* To simplify use of this function, return false if there are
1301 unneeded forwarder blocks. These will get eliminated later
1302 during cleanup_cfg. */
1303 if (FORWARDER_BLOCK_P (f1->dest)
1304 || FORWARDER_BLOCK_P (f2->dest)
1305 || FORWARDER_BLOCK_P (b1->dest)
1306 || FORWARDER_BLOCK_P (b2->dest))
1307 return false;
1309 if (f1->dest == f2->dest && b1->dest == b2->dest)
1310 reverse = false;
1311 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1312 reverse = true;
1313 else
1314 return false;
1316 set1 = pc_set (BB_END (bb1));
1317 set2 = pc_set (BB_END (bb2));
1318 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1319 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1320 reverse = !reverse;
1322 cond1 = XEXP (SET_SRC (set1), 0);
1323 cond2 = XEXP (SET_SRC (set2), 0);
1324 code1 = GET_CODE (cond1);
1325 if (reverse)
1326 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1327 else
1328 code2 = GET_CODE (cond2);
1330 if (code2 == UNKNOWN)
1331 return false;
1333 /* Verify codes and operands match. */
1334 match = ((code1 == code2
1335 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1336 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1337 || (code1 == swap_condition (code2)
1338 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1339 XEXP (cond2, 0))
1340 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1341 XEXP (cond2, 1))));
1343 /* If we return true, we will join the blocks. Which means that
1344 we will only have one branch prediction bit to work with. Thus
1345 we require the existing branches to have probabilities that are
1346 roughly similar. */
1347 if (match
1348 && optimize_bb_for_speed_p (bb1)
1349 && optimize_bb_for_speed_p (bb2))
1351 int prob2;
1353 if (b1->dest == b2->dest)
1354 prob2 = b2->probability;
1355 else
1356 /* Do not use f2 probability as f2 may be forwarded. */
1357 prob2 = REG_BR_PROB_BASE - b2->probability;
1359 /* Fail if the difference in probabilities is greater than 50%.
1360 This rules out two well-predicted branches with opposite
1361 outcomes. */
1362 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1364 if (dump_file)
1365 fprintf (dump_file,
1366 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1367 bb1->index, bb2->index, b1->probability, prob2);
1369 return false;
1373 if (dump_file && match)
1374 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1375 bb1->index, bb2->index);
1377 return match;
1380 /* Generic case - we are seeing a computed jump, table jump or trapping
1381 instruction. */
1383 /* Check whether there are tablejumps in the end of BB1 and BB2.
1384 Return true if they are identical. */
1386 rtx label1, label2;
1387 rtx table1, table2;
1389 if (tablejump_p (BB_END (bb1), &label1, &table1)
1390 && tablejump_p (BB_END (bb2), &label2, &table2)
1391 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1393 /* The labels should never be the same rtx. If they really are same
1394 the jump tables are same too. So disable crossjumping of blocks BB1
1395 and BB2 because when deleting the common insns in the end of BB1
1396 by delete_basic_block () the jump table would be deleted too. */
1397 /* If LABEL2 is referenced in BB1->END do not do anything
1398 because we would loose information when replacing
1399 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1400 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1402 /* Set IDENTICAL to true when the tables are identical. */
1403 bool identical = false;
1404 rtx p1, p2;
1406 p1 = PATTERN (table1);
1407 p2 = PATTERN (table2);
1408 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1410 identical = true;
1412 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1413 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1414 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1415 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1417 int i;
1419 identical = true;
1420 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1421 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1422 identical = false;
1425 if (identical)
1427 replace_label_data rr;
1428 bool match;
1430 /* Temporarily replace references to LABEL1 with LABEL2
1431 in BB1->END so that we could compare the instructions. */
1432 rr.r1 = label1;
1433 rr.r2 = label2;
1434 rr.update_label_nuses = false;
1435 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1437 match = old_insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1438 if (dump_file && match)
1439 fprintf (dump_file,
1440 "Tablejumps in bb %i and %i match.\n",
1441 bb1->index, bb2->index);
1443 /* Set the original label in BB1->END because when deleting
1444 a block whose end is a tablejump, the tablejump referenced
1445 from the instruction is deleted too. */
1446 rr.r1 = label2;
1447 rr.r2 = label1;
1448 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1450 return match;
1453 return false;
1457 /* First ensure that the instructions match. There may be many outgoing
1458 edges so this test is generally cheaper. */
1459 if (!old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1460 return false;
1462 /* Search the outgoing edges, ensure that the counts do match, find possible
1463 fallthru and exception handling edges since these needs more
1464 validation. */
1465 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1466 return false;
1468 FOR_EACH_EDGE (e1, ei, bb1->succs)
1470 e2 = EDGE_SUCC (bb2, ei.index);
1472 if (e1->flags & EDGE_EH)
1473 nehedges1++;
1475 if (e2->flags & EDGE_EH)
1476 nehedges2++;
1478 if (e1->flags & EDGE_FALLTHRU)
1479 fallthru1 = e1;
1480 if (e2->flags & EDGE_FALLTHRU)
1481 fallthru2 = e2;
1484 /* If number of edges of various types does not match, fail. */
1485 if (nehedges1 != nehedges2
1486 || (fallthru1 != 0) != (fallthru2 != 0))
1487 return false;
1489 /* fallthru edges must be forwarded to the same destination. */
1490 if (fallthru1)
1492 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1493 ? single_succ (fallthru1->dest): fallthru1->dest);
1494 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1495 ? single_succ (fallthru2->dest): fallthru2->dest);
1497 if (d1 != d2)
1498 return false;
1501 /* Ensure the same EH region. */
1503 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1504 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1506 if (!n1 && n2)
1507 return false;
1509 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1510 return false;
1513 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1514 version of sequence abstraction. */
1515 FOR_EACH_EDGE (e1, ei, bb2->succs)
1517 edge e2;
1518 edge_iterator ei;
1519 basic_block d1 = e1->dest;
1521 if (FORWARDER_BLOCK_P (d1))
1522 d1 = EDGE_SUCC (d1, 0)->dest;
1524 FOR_EACH_EDGE (e2, ei, bb1->succs)
1526 basic_block d2 = e2->dest;
1527 if (FORWARDER_BLOCK_P (d2))
1528 d2 = EDGE_SUCC (d2, 0)->dest;
1529 if (d1 == d2)
1530 break;
1533 if (!e2)
1534 return false;
1537 return true;
1540 /* Returns true if BB basic block has a preserve label. */
1542 static bool
1543 block_has_preserve_label (basic_block bb)
1545 return (bb
1546 && block_label (bb)
1547 && LABEL_PRESERVE_P (block_label (bb)));
1550 /* E1 and E2 are edges with the same destination block. Search their
1551 predecessors for common code. If found, redirect control flow from
1552 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1554 static bool
1555 try_crossjump_to_edge (int mode, edge e1, edge e2)
1557 int nmatch;
1558 basic_block src1 = e1->src, src2 = e2->src;
1559 basic_block redirect_to, redirect_from, to_remove;
1560 rtx newpos1, newpos2;
1561 edge s;
1562 edge_iterator ei;
1564 newpos1 = newpos2 = NULL_RTX;
1566 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1567 to try this optimization.
1569 Basic block partitioning may result in some jumps that appear to
1570 be optimizable (or blocks that appear to be mergeable), but which really
1571 must be left untouched (they are required to make it safely across
1572 partition boundaries). See the comments at the top of
1573 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1575 if (flag_reorder_blocks_and_partition && reload_completed)
1576 return false;
1578 /* Search backward through forwarder blocks. We don't need to worry
1579 about multiple entry or chained forwarders, as they will be optimized
1580 away. We do this to look past the unconditional jump following a
1581 conditional jump that is required due to the current CFG shape. */
1582 if (single_pred_p (src1)
1583 && FORWARDER_BLOCK_P (src1))
1584 e1 = single_pred_edge (src1), src1 = e1->src;
1586 if (single_pred_p (src2)
1587 && FORWARDER_BLOCK_P (src2))
1588 e2 = single_pred_edge (src2), src2 = e2->src;
1590 /* Nothing to do if we reach ENTRY, or a common source block. */
1591 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1592 return false;
1593 if (src1 == src2)
1594 return false;
1596 /* Seeing more than 1 forwarder blocks would confuse us later... */
1597 if (FORWARDER_BLOCK_P (e1->dest)
1598 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1599 return false;
1601 if (FORWARDER_BLOCK_P (e2->dest)
1602 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1603 return false;
1605 /* Likewise with dead code (possibly newly created by the other optimizations
1606 of cfg_cleanup). */
1607 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1608 return false;
1610 /* Look for the common insn sequence, part the first ... */
1611 if (!outgoing_edges_match (mode, src1, src2))
1612 return false;
1614 /* ... and part the second. */
1615 nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2);
1617 /* Don't proceed with the crossjump unless we found a sufficient number
1618 of matching instructions or the 'from' block was totally matched
1619 (such that its predecessors will hopefully be redirected and the
1620 block removed). */
1621 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1622 && (newpos1 != BB_HEAD (src1)))
1623 return false;
1625 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
1626 if (block_has_preserve_label (e1->dest)
1627 && (e1->flags & EDGE_ABNORMAL))
1628 return false;
1630 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1631 will be deleted.
1632 If we have tablejumps in the end of SRC1 and SRC2
1633 they have been already compared for equivalence in outgoing_edges_match ()
1634 so replace the references to TABLE1 by references to TABLE2. */
1636 rtx label1, label2;
1637 rtx table1, table2;
1639 if (tablejump_p (BB_END (src1), &label1, &table1)
1640 && tablejump_p (BB_END (src2), &label2, &table2)
1641 && label1 != label2)
1643 replace_label_data rr;
1644 rtx insn;
1646 /* Replace references to LABEL1 with LABEL2. */
1647 rr.r1 = label1;
1648 rr.r2 = label2;
1649 rr.update_label_nuses = true;
1650 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1652 /* Do not replace the label in SRC1->END because when deleting
1653 a block whose end is a tablejump, the tablejump referenced
1654 from the instruction is deleted too. */
1655 if (insn != BB_END (src1))
1656 for_each_rtx (&insn, replace_label, &rr);
1661 /* Avoid splitting if possible. We must always split when SRC2 has
1662 EH predecessor edges, or we may end up with basic blocks with both
1663 normal and EH predecessor edges. */
1664 if (newpos2 == BB_HEAD (src2)
1665 && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
1666 redirect_to = src2;
1667 else
1669 if (newpos2 == BB_HEAD (src2))
1671 /* Skip possible basic block header. */
1672 if (LABEL_P (newpos2))
1673 newpos2 = NEXT_INSN (newpos2);
1674 while (DEBUG_INSN_P (newpos2))
1675 newpos2 = NEXT_INSN (newpos2);
1676 if (NOTE_P (newpos2))
1677 newpos2 = NEXT_INSN (newpos2);
1678 while (DEBUG_INSN_P (newpos2))
1679 newpos2 = NEXT_INSN (newpos2);
1682 if (dump_file)
1683 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1684 src2->index, nmatch);
1685 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1688 if (dump_file)
1689 fprintf (dump_file,
1690 "Cross jumping from bb %i to bb %i; %i common insns\n",
1691 src1->index, src2->index, nmatch);
1693 /* We may have some registers visible through the block. */
1694 df_set_bb_dirty (redirect_to);
1696 /* Recompute the frequencies and counts of outgoing edges. */
1697 FOR_EACH_EDGE (s, ei, redirect_to->succs)
1699 edge s2;
1700 edge_iterator ei;
1701 basic_block d = s->dest;
1703 if (FORWARDER_BLOCK_P (d))
1704 d = single_succ (d);
1706 FOR_EACH_EDGE (s2, ei, src1->succs)
1708 basic_block d2 = s2->dest;
1709 if (FORWARDER_BLOCK_P (d2))
1710 d2 = single_succ (d2);
1711 if (d == d2)
1712 break;
1715 s->count += s2->count;
1717 /* Take care to update possible forwarder blocks. We verified
1718 that there is no more than one in the chain, so we can't run
1719 into infinite loop. */
1720 if (FORWARDER_BLOCK_P (s->dest))
1722 single_succ_edge (s->dest)->count += s2->count;
1723 s->dest->count += s2->count;
1724 s->dest->frequency += EDGE_FREQUENCY (s);
1727 if (FORWARDER_BLOCK_P (s2->dest))
1729 single_succ_edge (s2->dest)->count -= s2->count;
1730 if (single_succ_edge (s2->dest)->count < 0)
1731 single_succ_edge (s2->dest)->count = 0;
1732 s2->dest->count -= s2->count;
1733 s2->dest->frequency -= EDGE_FREQUENCY (s);
1734 if (s2->dest->frequency < 0)
1735 s2->dest->frequency = 0;
1736 if (s2->dest->count < 0)
1737 s2->dest->count = 0;
1740 if (!redirect_to->frequency && !src1->frequency)
1741 s->probability = (s->probability + s2->probability) / 2;
1742 else
1743 s->probability
1744 = ((s->probability * redirect_to->frequency +
1745 s2->probability * src1->frequency)
1746 / (redirect_to->frequency + src1->frequency));
1749 /* Adjust count and frequency for the block. An earlier jump
1750 threading pass may have left the profile in an inconsistent
1751 state (see update_bb_profile_for_threading) so we must be
1752 prepared for overflows. */
1753 redirect_to->count += src1->count;
1754 redirect_to->frequency += src1->frequency;
1755 if (redirect_to->frequency > BB_FREQ_MAX)
1756 redirect_to->frequency = BB_FREQ_MAX;
1757 update_br_prob_note (redirect_to);
1759 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1761 /* Skip possible basic block header. */
1762 if (LABEL_P (newpos1))
1763 newpos1 = NEXT_INSN (newpos1);
1765 while (DEBUG_INSN_P (newpos1))
1766 newpos1 = NEXT_INSN (newpos1);
1768 if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
1769 newpos1 = NEXT_INSN (newpos1);
1771 while (DEBUG_INSN_P (newpos1))
1772 newpos1 = NEXT_INSN (newpos1);
1774 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1775 to_remove = single_succ (redirect_from);
1777 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
1778 delete_basic_block (to_remove);
1780 update_forwarder_flag (redirect_from);
1781 if (redirect_to != src2)
1782 update_forwarder_flag (src2);
1784 return true;
1787 /* Search the predecessors of BB for common insn sequences. When found,
1788 share code between them by redirecting control flow. Return true if
1789 any changes made. */
1791 static bool
1792 try_crossjump_bb (int mode, basic_block bb)
1794 edge e, e2, fallthru;
1795 bool changed;
1796 unsigned max, ix, ix2;
1797 basic_block ev, ev2;
1799 /* Nothing to do if there is not at least two incoming edges. */
1800 if (EDGE_COUNT (bb->preds) < 2)
1801 return false;
1803 /* Don't crossjump if this block ends in a computed jump,
1804 unless we are optimizing for size. */
1805 if (optimize_bb_for_size_p (bb)
1806 && bb != EXIT_BLOCK_PTR
1807 && computed_jump_p (BB_END (bb)))
1808 return false;
1810 /* If we are partitioning hot/cold basic blocks, we don't want to
1811 mess up unconditional or indirect jumps that cross between hot
1812 and cold sections.
1814 Basic block partitioning may result in some jumps that appear to
1815 be optimizable (or blocks that appear to be mergeable), but which really
1816 must be left untouched (they are required to make it safely across
1817 partition boundaries). See the comments at the top of
1818 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1820 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
1821 BB_PARTITION (EDGE_PRED (bb, 1)->src)
1822 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
1823 return false;
1825 /* It is always cheapest to redirect a block that ends in a branch to
1826 a block that falls through into BB, as that adds no branches to the
1827 program. We'll try that combination first. */
1828 fallthru = NULL;
1829 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1831 if (EDGE_COUNT (bb->preds) > max)
1832 return false;
1834 fallthru = find_fallthru_edge (bb->preds);
1836 changed = false;
1837 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1839 e = EDGE_PRED (ev, ix);
1840 ix++;
1842 /* As noted above, first try with the fallthru predecessor (or, a
1843 fallthru predecessor if we are in cfglayout mode). */
1844 if (fallthru)
1846 /* Don't combine the fallthru edge into anything else.
1847 If there is a match, we'll do it the other way around. */
1848 if (e == fallthru)
1849 continue;
1850 /* If nothing changed since the last attempt, there is nothing
1851 we can do. */
1852 if (!first_pass
1853 && (!(df_get_bb_dirty (e->src))
1854 && !(df_get_bb_dirty (fallthru->src))))
1855 continue;
1857 if (try_crossjump_to_edge (mode, e, fallthru))
1859 changed = true;
1860 ix = 0;
1861 ev = bb;
1862 continue;
1866 /* Non-obvious work limiting check: Recognize that we're going
1867 to call try_crossjump_bb on every basic block. So if we have
1868 two blocks with lots of outgoing edges (a switch) and they
1869 share lots of common destinations, then we would do the
1870 cross-jump check once for each common destination.
1872 Now, if the blocks actually are cross-jump candidates, then
1873 all of their destinations will be shared. Which means that
1874 we only need check them for cross-jump candidacy once. We
1875 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1876 choosing to do the check from the block for which the edge
1877 in question is the first successor of A. */
1878 if (EDGE_SUCC (e->src, 0) != e)
1879 continue;
1881 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1883 e2 = EDGE_PRED (ev2, ix2);
1884 ix2++;
1886 if (e2 == e)
1887 continue;
1889 /* We've already checked the fallthru edge above. */
1890 if (e2 == fallthru)
1891 continue;
1893 /* The "first successor" check above only prevents multiple
1894 checks of crossjump(A,B). In order to prevent redundant
1895 checks of crossjump(B,A), require that A be the block
1896 with the lowest index. */
1897 if (e->src->index > e2->src->index)
1898 continue;
1900 /* If nothing changed since the last attempt, there is nothing
1901 we can do. */
1902 if (!first_pass
1903 && (!(df_get_bb_dirty (e->src))
1904 && !(df_get_bb_dirty (e2->src))))
1905 continue;
1907 if (try_crossjump_to_edge (mode, e, e2))
1909 changed = true;
1910 ev2 = bb;
1911 ix = 0;
1912 break;
1917 if (changed)
1918 crossjumps_occured = true;
1920 return changed;
1923 /* Return true if BB contains just bb note, or bb note followed
1924 by only DEBUG_INSNs. */
1926 static bool
1927 trivially_empty_bb_p (basic_block bb)
1929 rtx insn = BB_END (bb);
1931 while (1)
1933 if (insn == BB_HEAD (bb))
1934 return true;
1935 if (!DEBUG_INSN_P (insn))
1936 return false;
1937 insn = PREV_INSN (insn);
1941 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1942 instructions etc. Return nonzero if changes were made. */
1944 static bool
1945 try_optimize_cfg (int mode)
1947 bool changed_overall = false;
1948 bool changed;
1949 int iterations = 0;
1950 basic_block bb, b, next;
1952 if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1953 clear_bb_flags ();
1955 crossjumps_occured = false;
1957 FOR_EACH_BB (bb)
1958 update_forwarder_flag (bb);
1960 if (! targetm.cannot_modify_jumps_p ())
1962 first_pass = true;
1963 /* Attempt to merge blocks as made possible by edge removal. If
1964 a block has only one successor, and the successor has only
1965 one predecessor, they may be combined. */
1968 changed = false;
1969 iterations++;
1971 if (dump_file)
1972 fprintf (dump_file,
1973 "\n\ntry_optimize_cfg iteration %i\n\n",
1974 iterations);
1976 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1978 basic_block c;
1979 edge s;
1980 bool changed_here = false;
1982 /* Delete trivially dead basic blocks. This is either
1983 blocks with no predecessors, or empty blocks with no
1984 successors. However if the empty block with no
1985 successors is the successor of the ENTRY_BLOCK, it is
1986 kept. This ensures that the ENTRY_BLOCK will have a
1987 successor which is a precondition for many RTL
1988 passes. Empty blocks may result from expanding
1989 __builtin_unreachable (). */
1990 if (EDGE_COUNT (b->preds) == 0
1991 || (EDGE_COUNT (b->succs) == 0
1992 && trivially_empty_bb_p (b)
1993 && single_succ_edge (ENTRY_BLOCK_PTR)->dest != b))
1995 c = b->prev_bb;
1996 if (EDGE_COUNT (b->preds) > 0)
1998 edge e;
1999 edge_iterator ei;
2001 if (current_ir_type () == IR_RTL_CFGLAYOUT)
2003 if (b->il.rtl->footer
2004 && BARRIER_P (b->il.rtl->footer))
2005 FOR_EACH_EDGE (e, ei, b->preds)
2006 if ((e->flags & EDGE_FALLTHRU)
2007 && e->src->il.rtl->footer == NULL)
2009 if (b->il.rtl->footer)
2011 e->src->il.rtl->footer = b->il.rtl->footer;
2012 b->il.rtl->footer = NULL;
2014 else
2016 start_sequence ();
2017 e->src->il.rtl->footer = emit_barrier ();
2018 end_sequence ();
2022 else
2024 rtx last = get_last_bb_insn (b);
2025 if (last && BARRIER_P (last))
2026 FOR_EACH_EDGE (e, ei, b->preds)
2027 if ((e->flags & EDGE_FALLTHRU))
2028 emit_barrier_after (BB_END (e->src));
2031 delete_basic_block (b);
2032 if (!(mode & CLEANUP_CFGLAYOUT))
2033 changed = true;
2034 /* Avoid trying to remove ENTRY_BLOCK_PTR. */
2035 b = (c == ENTRY_BLOCK_PTR ? c->next_bb : c);
2036 continue;
2039 /* Remove code labels no longer used. */
2040 if (single_pred_p (b)
2041 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2042 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2043 && LABEL_P (BB_HEAD (b))
2044 /* If the previous block ends with a branch to this
2045 block, we can't delete the label. Normally this
2046 is a condjump that is yet to be simplified, but
2047 if CASE_DROPS_THRU, this can be a tablejump with
2048 some element going to the same place as the
2049 default (fallthru). */
2050 && (single_pred (b) == ENTRY_BLOCK_PTR
2051 || !JUMP_P (BB_END (single_pred (b)))
2052 || ! label_is_jump_target_p (BB_HEAD (b),
2053 BB_END (single_pred (b)))))
2055 rtx label = BB_HEAD (b);
2057 delete_insn_chain (label, label, false);
2058 /* If the case label is undeletable, move it after the
2059 BASIC_BLOCK note. */
2060 if (NOTE_KIND (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
2062 rtx bb_note = NEXT_INSN (BB_HEAD (b));
2064 reorder_insns_nobb (label, label, bb_note);
2065 BB_HEAD (b) = bb_note;
2066 if (BB_END (b) == bb_note)
2067 BB_END (b) = label;
2069 if (dump_file)
2070 fprintf (dump_file, "Deleted label in block %i.\n",
2071 b->index);
2074 /* If we fall through an empty block, we can remove it. */
2075 if (!(mode & CLEANUP_CFGLAYOUT)
2076 && single_pred_p (b)
2077 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2078 && !LABEL_P (BB_HEAD (b))
2079 && FORWARDER_BLOCK_P (b)
2080 /* Note that forwarder_block_p true ensures that
2081 there is a successor for this block. */
2082 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2083 && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
2085 if (dump_file)
2086 fprintf (dump_file,
2087 "Deleting fallthru block %i.\n",
2088 b->index);
2090 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
2091 redirect_edge_succ_nodup (single_pred_edge (b),
2092 single_succ (b));
2093 delete_basic_block (b);
2094 changed = true;
2095 b = c;
2096 continue;
2099 /* Merge B with its single successor, if any. */
2100 if (single_succ_p (b)
2101 && (s = single_succ_edge (b))
2102 && !(s->flags & EDGE_COMPLEX)
2103 && (c = s->dest) != EXIT_BLOCK_PTR
2104 && single_pred_p (c)
2105 && b != c)
2107 /* When not in cfg_layout mode use code aware of reordering
2108 INSN. This code possibly creates new basic blocks so it
2109 does not fit merge_blocks interface and is kept here in
2110 hope that it will become useless once more of compiler
2111 is transformed to use cfg_layout mode. */
2113 if ((mode & CLEANUP_CFGLAYOUT)
2114 && can_merge_blocks_p (b, c))
2116 merge_blocks (b, c);
2117 update_forwarder_flag (b);
2118 changed_here = true;
2120 else if (!(mode & CLEANUP_CFGLAYOUT)
2121 /* If the jump insn has side effects,
2122 we can't kill the edge. */
2123 && (!JUMP_P (BB_END (b))
2124 || (reload_completed
2125 ? simplejump_p (BB_END (b))
2126 : (onlyjump_p (BB_END (b))
2127 && !tablejump_p (BB_END (b),
2128 NULL, NULL))))
2129 && (next = merge_blocks_move (s, b, c, mode)))
2131 b = next;
2132 changed_here = true;
2136 /* Simplify branch over branch. */
2137 if ((mode & CLEANUP_EXPENSIVE)
2138 && !(mode & CLEANUP_CFGLAYOUT)
2139 && try_simplify_condjump (b))
2140 changed_here = true;
2142 /* If B has a single outgoing edge, but uses a
2143 non-trivial jump instruction without side-effects, we
2144 can either delete the jump entirely, or replace it
2145 with a simple unconditional jump. */
2146 if (single_succ_p (b)
2147 && single_succ (b) != EXIT_BLOCK_PTR
2148 && onlyjump_p (BB_END (b))
2149 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
2150 && try_redirect_by_replacing_jump (single_succ_edge (b),
2151 single_succ (b),
2152 (mode & CLEANUP_CFGLAYOUT) != 0))
2154 update_forwarder_flag (b);
2155 changed_here = true;
2158 /* Simplify branch to branch. */
2159 if (try_forward_edges (mode, b))
2160 changed_here = true;
2162 /* Look for shared code between blocks. */
2163 if ((mode & CLEANUP_CROSSJUMP)
2164 && try_crossjump_bb (mode, b))
2165 changed_here = true;
2167 /* Don't get confused by the index shift caused by
2168 deleting blocks. */
2169 if (!changed_here)
2170 b = b->next_bb;
2171 else
2172 changed = true;
2175 if ((mode & CLEANUP_CROSSJUMP)
2176 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2177 changed = true;
2179 #ifdef ENABLE_CHECKING
2180 if (changed)
2181 verify_flow_info ();
2182 #endif
2184 changed_overall |= changed;
2185 first_pass = false;
2187 while (changed);
2190 FOR_ALL_BB (b)
2191 b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2193 return changed_overall;
2196 /* Delete all unreachable basic blocks. */
2198 bool
2199 delete_unreachable_blocks (void)
2201 bool changed = false;
2202 basic_block b, prev_bb;
2204 find_unreachable_blocks ();
2206 /* When we're in GIMPLE mode and there may be debug insns, we should
2207 delete blocks in reverse dominator order, so as to get a chance
2208 to substitute all released DEFs into debug stmts. If we don't
2209 have dominators information, walking blocks backward gets us a
2210 better chance of retaining most debug information than
2211 otherwise. */
2212 if (MAY_HAVE_DEBUG_STMTS && current_ir_type () == IR_GIMPLE
2213 && dom_info_available_p (CDI_DOMINATORS))
2215 for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
2217 prev_bb = b->prev_bb;
2219 if (!(b->flags & BB_REACHABLE))
2221 /* Speed up the removal of blocks that don't dominate
2222 others. Walking backwards, this should be the common
2223 case. */
2224 if (!first_dom_son (CDI_DOMINATORS, b))
2225 delete_basic_block (b);
2226 else
2228 VEC (basic_block, heap) *h
2229 = get_all_dominated_blocks (CDI_DOMINATORS, b);
2231 while (VEC_length (basic_block, h))
2233 b = VEC_pop (basic_block, h);
2235 prev_bb = b->prev_bb;
2237 gcc_assert (!(b->flags & BB_REACHABLE));
2239 delete_basic_block (b);
2242 VEC_free (basic_block, heap, h);
2245 changed = true;
2249 else
2251 for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
2253 prev_bb = b->prev_bb;
2255 if (!(b->flags & BB_REACHABLE))
2257 delete_basic_block (b);
2258 changed = true;
2263 if (changed)
2264 tidy_fallthru_edges ();
2265 return changed;
2268 /* Delete any jump tables never referenced. We can't delete them at the
2269 time of removing tablejump insn as they are referenced by the preceding
2270 insns computing the destination, so we delay deleting and garbagecollect
2271 them once life information is computed. */
2272 void
2273 delete_dead_jumptables (void)
2275 basic_block bb;
2277 /* A dead jump table does not belong to any basic block. Scan insns
2278 between two adjacent basic blocks. */
2279 FOR_EACH_BB (bb)
2281 rtx insn, next;
2283 for (insn = NEXT_INSN (BB_END (bb));
2284 insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
2285 insn = next)
2287 next = NEXT_INSN (insn);
2288 if (LABEL_P (insn)
2289 && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
2290 && JUMP_TABLE_DATA_P (next))
2292 rtx label = insn, jump = next;
2294 if (dump_file)
2295 fprintf (dump_file, "Dead jumptable %i removed\n",
2296 INSN_UID (insn));
2298 next = NEXT_INSN (next);
2299 delete_insn (jump);
2300 delete_insn (label);
2307 /* Tidy the CFG by deleting unreachable code and whatnot. */
2309 bool
2310 cleanup_cfg (int mode)
2312 bool changed = false;
2314 /* Set the cfglayout mode flag here. We could update all the callers
2315 but that is just inconvenient, especially given that we eventually
2316 want to have cfglayout mode as the default. */
2317 if (current_ir_type () == IR_RTL_CFGLAYOUT)
2318 mode |= CLEANUP_CFGLAYOUT;
2320 timevar_push (TV_CLEANUP_CFG);
2321 if (delete_unreachable_blocks ())
2323 changed = true;
2324 /* We've possibly created trivially dead code. Cleanup it right
2325 now to introduce more opportunities for try_optimize_cfg. */
2326 if (!(mode & (CLEANUP_NO_INSN_DEL))
2327 && !reload_completed)
2328 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2331 compact_blocks ();
2333 /* To tail-merge blocks ending in the same noreturn function (e.g.
2334 a call to abort) we have to insert fake edges to exit. Do this
2335 here once. The fake edges do not interfere with any other CFG
2336 cleanups. */
2337 if (mode & CLEANUP_CROSSJUMP)
2338 add_noreturn_fake_exit_edges ();
2340 if (!dbg_cnt (cfg_cleanup))
2341 return changed;
2343 while (try_optimize_cfg (mode))
2345 delete_unreachable_blocks (), changed = true;
2346 if (!(mode & CLEANUP_NO_INSN_DEL))
2348 /* Try to remove some trivially dead insns when doing an expensive
2349 cleanup. But delete_trivially_dead_insns doesn't work after
2350 reload (it only handles pseudos) and run_fast_dce is too costly
2351 to run in every iteration.
2353 For effective cross jumping, we really want to run a fast DCE to
2354 clean up any dead conditions, or they get in the way of performing
2355 useful tail merges.
2357 Other transformations in cleanup_cfg are not so sensitive to dead
2358 code, so delete_trivially_dead_insns or even doing nothing at all
2359 is good enough. */
2360 if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
2361 && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
2362 break;
2363 else if ((mode & CLEANUP_CROSSJUMP)
2364 && crossjumps_occured)
2365 run_fast_dce ();
2367 else
2368 break;
2371 if (mode & CLEANUP_CROSSJUMP)
2372 remove_fake_exit_edges ();
2374 /* Don't call delete_dead_jumptables in cfglayout mode, because
2375 that function assumes that jump tables are in the insns stream.
2376 But we also don't _have_ to delete dead jumptables in cfglayout
2377 mode because we shouldn't even be looking at things that are
2378 not in a basic block. Dead jumptables are cleaned up when
2379 going out of cfglayout mode. */
2380 if (!(mode & CLEANUP_CFGLAYOUT))
2381 delete_dead_jumptables ();
2383 timevar_pop (TV_CLEANUP_CFG);
2385 return changed;
2388 static unsigned int
2389 rest_of_handle_jump (void)
2391 if (crtl->tail_call_emit)
2392 fixup_tail_calls ();
2393 return 0;
2396 struct rtl_opt_pass pass_jump =
2399 RTL_PASS,
2400 "sibling", /* name */
2401 NULL, /* gate */
2402 rest_of_handle_jump, /* execute */
2403 NULL, /* sub */
2404 NULL, /* next */
2405 0, /* static_pass_number */
2406 TV_JUMP, /* tv_id */
2407 0, /* properties_required */
2408 0, /* properties_provided */
2409 0, /* properties_destroyed */
2410 TODO_ggc_collect, /* todo_flags_start */
2411 TODO_verify_flow, /* todo_flags_finish */
2416 static unsigned int
2417 rest_of_handle_jump2 (void)
2419 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2420 if (dump_file)
2421 dump_flow_info (dump_file, dump_flags);
2422 cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
2423 | (flag_thread_jumps ? CLEANUP_THREADING : 0));
2424 return 0;
2428 struct rtl_opt_pass pass_jump2 =
2431 RTL_PASS,
2432 "jump", /* name */
2433 NULL, /* gate */
2434 rest_of_handle_jump2, /* execute */
2435 NULL, /* sub */
2436 NULL, /* next */
2437 0, /* static_pass_number */
2438 TV_JUMP, /* tv_id */
2439 0, /* properties_required */
2440 0, /* properties_provided */
2441 0, /* properties_destroyed */
2442 TODO_ggc_collect, /* todo_flags_start */
2443 TODO_dump_func | TODO_verify_rtl_sharing,/* todo_flags_finish */