Daily bump.
[official-gcc.git] / gcc / cfgcleanup.c
blobb1c94da8489e9439f09b27d3c4475ed53ce8c3ee
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "regs.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "cfglayout.h"
52 #include "emit-rtl.h"
53 #include "tree-pass.h"
54 #include "cfgloop.h"
55 #include "expr.h"
56 #include "df.h"
58 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
60 /* Set to true when we are running first pass of try_optimize_cfg loop. */
61 static bool first_pass;
62 static bool try_crossjump_to_edge (int, edge, edge);
63 static bool try_crossjump_bb (int, basic_block);
64 static bool outgoing_edges_match (int, basic_block, basic_block);
65 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
66 static bool old_insns_match_p (int, rtx, rtx);
68 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
69 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
70 static bool try_optimize_cfg (int);
71 static bool try_simplify_condjump (basic_block);
72 static bool try_forward_edges (int, basic_block);
73 static edge thread_jump (edge, basic_block);
74 static bool mark_effect (rtx, bitmap);
75 static void notice_new_block (basic_block);
76 static void update_forwarder_flag (basic_block);
77 static int mentions_nonequal_regs (rtx *, void *);
78 static void merge_memattrs (rtx, rtx);
80 /* Set flags for newly created block. */
82 static void
83 notice_new_block (basic_block bb)
85 if (!bb)
86 return;
88 if (forwarder_block_p (bb))
89 bb->flags |= BB_FORWARDER_BLOCK;
92 /* Recompute forwarder flag after block has been modified. */
94 static void
95 update_forwarder_flag (basic_block bb)
97 if (forwarder_block_p (bb))
98 bb->flags |= BB_FORWARDER_BLOCK;
99 else
100 bb->flags &= ~BB_FORWARDER_BLOCK;
103 /* Simplify a conditional jump around an unconditional jump.
104 Return true if something changed. */
106 static bool
107 try_simplify_condjump (basic_block cbranch_block)
109 basic_block jump_block, jump_dest_block, cbranch_dest_block;
110 edge cbranch_jump_edge, cbranch_fallthru_edge;
111 rtx cbranch_insn;
113 /* Verify that there are exactly two successors. */
114 if (EDGE_COUNT (cbranch_block->succs) != 2)
115 return false;
117 /* Verify that we've got a normal conditional branch at the end
118 of the block. */
119 cbranch_insn = BB_END (cbranch_block);
120 if (!any_condjump_p (cbranch_insn))
121 return false;
123 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
124 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
126 /* The next block must not have multiple predecessors, must not
127 be the last block in the function, and must contain just the
128 unconditional jump. */
129 jump_block = cbranch_fallthru_edge->dest;
130 if (!single_pred_p (jump_block)
131 || jump_block->next_bb == EXIT_BLOCK_PTR
132 || !FORWARDER_BLOCK_P (jump_block))
133 return false;
134 jump_dest_block = single_succ (jump_block);
136 /* If we are partitioning hot/cold basic blocks, we don't want to
137 mess up unconditional or indirect jumps that cross between hot
138 and cold sections.
140 Basic block partitioning may result in some jumps that appear to
141 be optimizable (or blocks that appear to be mergeable), but which really
142 must be left untouched (they are required to make it safely across
143 partition boundaries). See the comments at the top of
144 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
146 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
147 || (cbranch_jump_edge->flags & EDGE_CROSSING))
148 return false;
150 /* The conditional branch must target the block after the
151 unconditional branch. */
152 cbranch_dest_block = cbranch_jump_edge->dest;
154 if (cbranch_dest_block == EXIT_BLOCK_PTR
155 || !can_fallthru (jump_block, cbranch_dest_block))
156 return false;
158 /* Invert the conditional branch. */
159 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
160 return false;
162 if (dump_file)
163 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
164 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
166 /* Success. Update the CFG to match. Note that after this point
167 the edge variable names appear backwards; the redirection is done
168 this way to preserve edge profile data. */
169 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
170 cbranch_dest_block);
171 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
172 jump_dest_block);
173 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
174 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
175 update_br_prob_note (cbranch_block);
177 /* Delete the block with the unconditional jump, and clean up the mess. */
178 delete_basic_block (jump_block);
179 tidy_fallthru_edge (cbranch_jump_edge);
180 update_forwarder_flag (cbranch_block);
182 return true;
185 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
186 on register. Used by jump threading. */
188 static bool
189 mark_effect (rtx exp, regset nonequal)
191 int regno;
192 rtx dest;
193 switch (GET_CODE (exp))
195 /* In case we do clobber the register, mark it as equal, as we know the
196 value is dead so it don't have to match. */
197 case CLOBBER:
198 if (REG_P (XEXP (exp, 0)))
200 dest = XEXP (exp, 0);
201 regno = REGNO (dest);
202 CLEAR_REGNO_REG_SET (nonequal, regno);
203 if (regno < FIRST_PSEUDO_REGISTER)
205 int n = hard_regno_nregs[regno][GET_MODE (dest)];
206 while (--n > 0)
207 CLEAR_REGNO_REG_SET (nonequal, regno + n);
210 return false;
212 case SET:
213 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
214 return false;
215 dest = SET_DEST (exp);
216 if (dest == pc_rtx)
217 return false;
218 if (!REG_P (dest))
219 return true;
220 regno = REGNO (dest);
221 SET_REGNO_REG_SET (nonequal, regno);
222 if (regno < FIRST_PSEUDO_REGISTER)
224 int n = hard_regno_nregs[regno][GET_MODE (dest)];
225 while (--n > 0)
226 SET_REGNO_REG_SET (nonequal, regno + n);
228 return false;
230 default:
231 return false;
235 /* Return nonzero if X is a register set in regset DATA.
236 Called via for_each_rtx. */
237 static int
238 mentions_nonequal_regs (rtx *x, void *data)
240 regset nonequal = (regset) data;
241 if (REG_P (*x))
243 int regno;
245 regno = REGNO (*x);
246 if (REGNO_REG_SET_P (nonequal, regno))
247 return 1;
248 if (regno < FIRST_PSEUDO_REGISTER)
250 int n = hard_regno_nregs[regno][GET_MODE (*x)];
251 while (--n > 0)
252 if (REGNO_REG_SET_P (nonequal, regno + n))
253 return 1;
256 return 0;
258 /* Attempt to prove that the basic block B will have no side effects and
259 always continues in the same edge if reached via E. Return the edge
260 if exist, NULL otherwise. */
262 static edge
263 thread_jump (edge e, basic_block b)
265 rtx set1, set2, cond1, cond2, insn;
266 enum rtx_code code1, code2, reversed_code2;
267 bool reverse1 = false;
268 unsigned i;
269 regset nonequal;
270 bool failed = false;
271 reg_set_iterator rsi;
273 if (b->flags & BB_NONTHREADABLE_BLOCK)
274 return NULL;
276 /* At the moment, we do handle only conditional jumps, but later we may
277 want to extend this code to tablejumps and others. */
278 if (EDGE_COUNT (e->src->succs) != 2)
279 return NULL;
280 if (EDGE_COUNT (b->succs) != 2)
282 b->flags |= BB_NONTHREADABLE_BLOCK;
283 return NULL;
286 /* Second branch must end with onlyjump, as we will eliminate the jump. */
287 if (!any_condjump_p (BB_END (e->src)))
288 return NULL;
290 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
292 b->flags |= BB_NONTHREADABLE_BLOCK;
293 return NULL;
296 set1 = pc_set (BB_END (e->src));
297 set2 = pc_set (BB_END (b));
298 if (((e->flags & EDGE_FALLTHRU) != 0)
299 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
300 reverse1 = true;
302 cond1 = XEXP (SET_SRC (set1), 0);
303 cond2 = XEXP (SET_SRC (set2), 0);
304 if (reverse1)
305 code1 = reversed_comparison_code (cond1, BB_END (e->src));
306 else
307 code1 = GET_CODE (cond1);
309 code2 = GET_CODE (cond2);
310 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
312 if (!comparison_dominates_p (code1, code2)
313 && !comparison_dominates_p (code1, reversed_code2))
314 return NULL;
316 /* Ensure that the comparison operators are equivalent.
317 ??? This is far too pessimistic. We should allow swapped operands,
318 different CCmodes, or for example comparisons for interval, that
319 dominate even when operands are not equivalent. */
320 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
321 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
322 return NULL;
324 /* Short circuit cases where block B contains some side effects, as we can't
325 safely bypass it. */
326 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
327 insn = NEXT_INSN (insn))
328 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
330 b->flags |= BB_NONTHREADABLE_BLOCK;
331 return NULL;
334 cselib_init (false);
336 /* First process all values computed in the source basic block. */
337 for (insn = NEXT_INSN (BB_HEAD (e->src));
338 insn != NEXT_INSN (BB_END (e->src));
339 insn = NEXT_INSN (insn))
340 if (INSN_P (insn))
341 cselib_process_insn (insn);
343 nonequal = BITMAP_ALLOC (NULL);
344 CLEAR_REG_SET (nonequal);
346 /* Now assume that we've continued by the edge E to B and continue
347 processing as if it were same basic block.
348 Our goal is to prove that whole block is an NOOP. */
350 for (insn = NEXT_INSN (BB_HEAD (b));
351 insn != NEXT_INSN (BB_END (b)) && !failed;
352 insn = NEXT_INSN (insn))
354 if (INSN_P (insn))
356 rtx pat = PATTERN (insn);
358 if (GET_CODE (pat) == PARALLEL)
360 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
361 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
363 else
364 failed |= mark_effect (pat, nonequal);
367 cselib_process_insn (insn);
370 /* Later we should clear nonequal of dead registers. So far we don't
371 have life information in cfg_cleanup. */
372 if (failed)
374 b->flags |= BB_NONTHREADABLE_BLOCK;
375 goto failed_exit;
378 /* cond2 must not mention any register that is not equal to the
379 former block. */
380 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
381 goto failed_exit;
383 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
384 goto failed_exit;
386 BITMAP_FREE (nonequal);
387 cselib_finish ();
388 if ((comparison_dominates_p (code1, code2) != 0)
389 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
390 return BRANCH_EDGE (b);
391 else
392 return FALLTHRU_EDGE (b);
394 failed_exit:
395 BITMAP_FREE (nonequal);
396 cselib_finish ();
397 return NULL;
400 /* Attempt to forward edges leaving basic block B.
401 Return true if successful. */
403 static bool
404 try_forward_edges (int mode, basic_block b)
406 bool changed = false;
407 edge_iterator ei;
408 edge e, *threaded_edges = NULL;
410 /* If we are partitioning hot/cold basic blocks, we don't want to
411 mess up unconditional or indirect jumps that cross between hot
412 and cold sections.
414 Basic block partitioning may result in some jumps that appear to
415 be optimizable (or blocks that appear to be mergeable), but which really m
416 ust be left untouched (they are required to make it safely across
417 partition boundaries). See the comments at the top of
418 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
420 if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
421 return false;
423 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
425 basic_block target, first;
426 int counter;
427 bool threaded = false;
428 int nthreaded_edges = 0;
429 bool may_thread = first_pass | df_get_bb_dirty (b);
431 /* Skip complex edges because we don't know how to update them.
433 Still handle fallthru edges, as we can succeed to forward fallthru
434 edge to the same place as the branch edge of conditional branch
435 and turn conditional branch to an unconditional branch. */
436 if (e->flags & EDGE_COMPLEX)
438 ei_next (&ei);
439 continue;
442 target = first = e->dest;
443 counter = NUM_FIXED_BLOCKS;
445 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
446 up jumps that cross between hot/cold sections.
448 Basic block partitioning may result in some jumps that appear
449 to be optimizable (or blocks that appear to be mergeable), but which
450 really must be left untouched (they are required to make it safely
451 across partition boundaries). See the comments at the top of
452 bb-reorder.c:partition_hot_cold_basic_blocks for complete
453 details. */
455 if (first != EXIT_BLOCK_PTR
456 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
457 return false;
459 while (counter < n_basic_blocks)
461 basic_block new_target = NULL;
462 bool new_target_threaded = false;
463 may_thread |= df_get_bb_dirty (target);
465 if (FORWARDER_BLOCK_P (target)
466 && !(single_succ_edge (target)->flags & EDGE_CROSSING)
467 && single_succ (target) != EXIT_BLOCK_PTR)
469 /* Bypass trivial infinite loops. */
470 new_target = single_succ (target);
471 if (target == new_target)
472 counter = n_basic_blocks;
475 /* Allow to thread only over one edge at time to simplify updating
476 of probabilities. */
477 else if ((mode & CLEANUP_THREADING) && may_thread)
479 edge t = thread_jump (e, target);
480 if (t)
482 if (!threaded_edges)
483 threaded_edges = XNEWVEC (edge, n_basic_blocks);
484 else
486 int i;
488 /* Detect an infinite loop across blocks not
489 including the start block. */
490 for (i = 0; i < nthreaded_edges; ++i)
491 if (threaded_edges[i] == t)
492 break;
493 if (i < nthreaded_edges)
495 counter = n_basic_blocks;
496 break;
500 /* Detect an infinite loop across the start block. */
501 if (t->dest == b)
502 break;
504 gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
505 threaded_edges[nthreaded_edges++] = t;
507 new_target = t->dest;
508 new_target_threaded = true;
512 if (!new_target)
513 break;
515 counter++;
516 target = new_target;
517 threaded |= new_target_threaded;
520 if (counter >= n_basic_blocks)
522 if (dump_file)
523 fprintf (dump_file, "Infinite loop in BB %i.\n",
524 target->index);
526 else if (target == first)
527 ; /* We didn't do anything. */
528 else
530 /* Save the values now, as the edge may get removed. */
531 gcov_type edge_count = e->count;
532 int edge_probability = e->probability;
533 int edge_frequency;
534 int n = 0;
536 /* Don't force if target is exit block. */
537 if (threaded && target != EXIT_BLOCK_PTR)
539 notice_new_block (redirect_edge_and_branch_force (e, target));
540 if (dump_file)
541 fprintf (dump_file, "Conditionals threaded.\n");
543 else if (!redirect_edge_and_branch (e, target))
545 if (dump_file)
546 fprintf (dump_file,
547 "Forwarding edge %i->%i to %i failed.\n",
548 b->index, e->dest->index, target->index);
549 ei_next (&ei);
550 continue;
553 /* We successfully forwarded the edge. Now update profile
554 data: for each edge we traversed in the chain, remove
555 the original edge's execution count. */
556 edge_frequency = ((edge_probability * b->frequency
557 + REG_BR_PROB_BASE / 2)
558 / REG_BR_PROB_BASE);
560 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
561 b->flags |= BB_FORWARDER_BLOCK;
565 edge t;
567 if (!single_succ_p (first))
569 gcc_assert (n < nthreaded_edges);
570 t = threaded_edges [n++];
571 gcc_assert (t->src == first);
572 update_bb_profile_for_threading (first, edge_frequency,
573 edge_count, t);
574 update_br_prob_note (first);
576 else
578 first->count -= edge_count;
579 if (first->count < 0)
580 first->count = 0;
581 first->frequency -= edge_frequency;
582 if (first->frequency < 0)
583 first->frequency = 0;
584 /* It is possible that as the result of
585 threading we've removed edge as it is
586 threaded to the fallthru edge. Avoid
587 getting out of sync. */
588 if (n < nthreaded_edges
589 && first == threaded_edges [n]->src)
590 n++;
591 t = single_succ_edge (first);
594 t->count -= edge_count;
595 if (t->count < 0)
596 t->count = 0;
597 first = t->dest;
599 while (first != target);
601 changed = true;
602 continue;
604 ei_next (&ei);
607 if (threaded_edges)
608 free (threaded_edges);
609 return changed;
613 /* Blocks A and B are to be merged into a single block. A has no incoming
614 fallthru edge, so it can be moved before B without adding or modifying
615 any jumps (aside from the jump from A to B). */
617 static void
618 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
620 rtx barrier;
622 /* If we are partitioning hot/cold basic blocks, we don't want to
623 mess up unconditional or indirect jumps that cross between hot
624 and cold sections.
626 Basic block partitioning may result in some jumps that appear to
627 be optimizable (or blocks that appear to be mergeable), but which really
628 must be left untouched (they are required to make it safely across
629 partition boundaries). See the comments at the top of
630 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
632 if (BB_PARTITION (a) != BB_PARTITION (b))
633 return;
635 barrier = next_nonnote_insn (BB_END (a));
636 gcc_assert (BARRIER_P (barrier));
637 delete_insn (barrier);
639 /* Scramble the insn chain. */
640 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
641 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
642 df_set_bb_dirty (a);
644 if (dump_file)
645 fprintf (dump_file, "Moved block %d before %d and merged.\n",
646 a->index, b->index);
648 /* Swap the records for the two blocks around. */
650 unlink_block (a);
651 link_block (a, b->prev_bb);
653 /* Now blocks A and B are contiguous. Merge them. */
654 merge_blocks (a, b);
657 /* Blocks A and B are to be merged into a single block. B has no outgoing
658 fallthru edge, so it can be moved after A without adding or modifying
659 any jumps (aside from the jump from A to B). */
661 static void
662 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
664 rtx barrier, real_b_end;
665 rtx label, table;
667 /* If we are partitioning hot/cold basic blocks, we don't want to
668 mess up unconditional or indirect jumps that cross between hot
669 and cold sections.
671 Basic block partitioning may result in some jumps that appear to
672 be optimizable (or blocks that appear to be mergeable), but which really
673 must be left untouched (they are required to make it safely across
674 partition boundaries). See the comments at the top of
675 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
677 if (BB_PARTITION (a) != BB_PARTITION (b))
678 return;
680 real_b_end = BB_END (b);
682 /* If there is a jump table following block B temporarily add the jump table
683 to block B so that it will also be moved to the correct location. */
684 if (tablejump_p (BB_END (b), &label, &table)
685 && prev_active_insn (label) == BB_END (b))
687 BB_END (b) = table;
690 /* There had better have been a barrier there. Delete it. */
691 barrier = NEXT_INSN (BB_END (b));
692 if (barrier && BARRIER_P (barrier))
693 delete_insn (barrier);
696 /* Scramble the insn chain. */
697 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
699 /* Restore the real end of b. */
700 BB_END (b) = real_b_end;
702 if (dump_file)
703 fprintf (dump_file, "Moved block %d after %d and merged.\n",
704 b->index, a->index);
706 /* Now blocks A and B are contiguous. Merge them. */
707 merge_blocks (a, b);
710 /* Attempt to merge basic blocks that are potentially non-adjacent.
711 Return NULL iff the attempt failed, otherwise return basic block
712 where cleanup_cfg should continue. Because the merging commonly
713 moves basic block away or introduces another optimization
714 possibility, return basic block just before B so cleanup_cfg don't
715 need to iterate.
717 It may be good idea to return basic block before C in the case
718 C has been moved after B and originally appeared earlier in the
719 insn sequence, but we have no information available about the
720 relative ordering of these two. Hopefully it is not too common. */
722 static basic_block
723 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
725 basic_block next;
727 /* If we are partitioning hot/cold basic blocks, we don't want to
728 mess up unconditional or indirect jumps that cross between hot
729 and cold sections.
731 Basic block partitioning may result in some jumps that appear to
732 be optimizable (or blocks that appear to be mergeable), but which really
733 must be left untouched (they are required to make it safely across
734 partition boundaries). See the comments at the top of
735 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
737 if (BB_PARTITION (b) != BB_PARTITION (c))
738 return NULL;
740 /* If B has a fallthru edge to C, no need to move anything. */
741 if (e->flags & EDGE_FALLTHRU)
743 int b_index = b->index, c_index = c->index;
744 merge_blocks (b, c);
745 update_forwarder_flag (b);
747 if (dump_file)
748 fprintf (dump_file, "Merged %d and %d without moving.\n",
749 b_index, c_index);
751 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
754 /* Otherwise we will need to move code around. Do that only if expensive
755 transformations are allowed. */
756 else if (mode & CLEANUP_EXPENSIVE)
758 edge tmp_edge, b_fallthru_edge;
759 bool c_has_outgoing_fallthru;
760 bool b_has_incoming_fallthru;
761 edge_iterator ei;
763 /* Avoid overactive code motion, as the forwarder blocks should be
764 eliminated by edge redirection instead. One exception might have
765 been if B is a forwarder block and C has no fallthru edge, but
766 that should be cleaned up by bb-reorder instead. */
767 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
768 return NULL;
770 /* We must make sure to not munge nesting of lexical blocks,
771 and loop notes. This is done by squeezing out all the notes
772 and leaving them there to lie. Not ideal, but functional. */
774 FOR_EACH_EDGE (tmp_edge, ei, c->succs)
775 if (tmp_edge->flags & EDGE_FALLTHRU)
776 break;
778 c_has_outgoing_fallthru = (tmp_edge != NULL);
780 FOR_EACH_EDGE (tmp_edge, ei, b->preds)
781 if (tmp_edge->flags & EDGE_FALLTHRU)
782 break;
784 b_has_incoming_fallthru = (tmp_edge != NULL);
785 b_fallthru_edge = tmp_edge;
786 next = b->prev_bb;
787 if (next == c)
788 next = next->prev_bb;
790 /* Otherwise, we're going to try to move C after B. If C does
791 not have an outgoing fallthru, then it can be moved
792 immediately after B without introducing or modifying jumps. */
793 if (! c_has_outgoing_fallthru)
795 merge_blocks_move_successor_nojumps (b, c);
796 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
799 /* If B does not have an incoming fallthru, then it can be moved
800 immediately before C without introducing or modifying jumps.
801 C cannot be the first block, so we do not have to worry about
802 accessing a non-existent block. */
804 if (b_has_incoming_fallthru)
806 basic_block bb;
808 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
809 return NULL;
810 bb = force_nonfallthru (b_fallthru_edge);
811 if (bb)
812 notice_new_block (bb);
815 merge_blocks_move_predecessor_nojumps (b, c);
816 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
819 return NULL;
823 /* Removes the memory attributes of MEM expression
824 if they are not equal. */
826 void
827 merge_memattrs (rtx x, rtx y)
829 int i;
830 int j;
831 enum rtx_code code;
832 const char *fmt;
834 if (x == y)
835 return;
836 if (x == 0 || y == 0)
837 return;
839 code = GET_CODE (x);
841 if (code != GET_CODE (y))
842 return;
844 if (GET_MODE (x) != GET_MODE (y))
845 return;
847 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
849 if (! MEM_ATTRS (x))
850 MEM_ATTRS (y) = 0;
851 else if (! MEM_ATTRS (y))
852 MEM_ATTRS (x) = 0;
853 else
855 rtx mem_size;
857 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
859 set_mem_alias_set (x, 0);
860 set_mem_alias_set (y, 0);
863 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
865 set_mem_expr (x, 0);
866 set_mem_expr (y, 0);
867 set_mem_offset (x, 0);
868 set_mem_offset (y, 0);
870 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
872 set_mem_offset (x, 0);
873 set_mem_offset (y, 0);
876 if (!MEM_SIZE (x))
877 mem_size = NULL_RTX;
878 else if (!MEM_SIZE (y))
879 mem_size = NULL_RTX;
880 else
881 mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
882 INTVAL (MEM_SIZE (y))));
883 set_mem_size (x, mem_size);
884 set_mem_size (y, mem_size);
886 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
887 set_mem_align (y, MEM_ALIGN (x));
891 fmt = GET_RTX_FORMAT (code);
892 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
894 switch (fmt[i])
896 case 'E':
897 /* Two vectors must have the same length. */
898 if (XVECLEN (x, i) != XVECLEN (y, i))
899 return;
901 for (j = 0; j < XVECLEN (x, i); j++)
902 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
904 break;
906 case 'e':
907 merge_memattrs (XEXP (x, i), XEXP (y, i));
910 return;
914 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
916 static bool
917 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
919 rtx p1, p2;
921 /* Verify that I1 and I2 are equivalent. */
922 if (GET_CODE (i1) != GET_CODE (i2))
923 return false;
925 p1 = PATTERN (i1);
926 p2 = PATTERN (i2);
928 if (GET_CODE (p1) != GET_CODE (p2))
929 return false;
931 /* If this is a CALL_INSN, compare register usage information.
932 If we don't check this on stack register machines, the two
933 CALL_INSNs might be merged leaving reg-stack.c with mismatching
934 numbers of stack registers in the same basic block.
935 If we don't check this on machines with delay slots, a delay slot may
936 be filled that clobbers a parameter expected by the subroutine.
938 ??? We take the simple route for now and assume that if they're
939 equal, they were constructed identically. */
941 if (CALL_P (i1)
942 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
943 CALL_INSN_FUNCTION_USAGE (i2))
944 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
945 return false;
947 #ifdef STACK_REGS
948 /* If cross_jump_death_matters is not 0, the insn's mode
949 indicates whether or not the insn contains any stack-like
950 regs. */
952 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
954 /* If register stack conversion has already been done, then
955 death notes must also be compared before it is certain that
956 the two instruction streams match. */
958 rtx note;
959 HARD_REG_SET i1_regset, i2_regset;
961 CLEAR_HARD_REG_SET (i1_regset);
962 CLEAR_HARD_REG_SET (i2_regset);
964 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
965 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
966 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
968 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
969 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
970 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
972 if (!hard_reg_set_equal_p (i1_regset, i2_regset))
973 return false;
975 #endif
977 if (reload_completed
978 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
979 return true;
981 /* Do not do EQUIV substitution after reload. First, we're undoing the
982 work of reload_cse. Second, we may be undoing the work of the post-
983 reload splitting pass. */
984 /* ??? Possibly add a new phase switch variable that can be used by
985 targets to disallow the troublesome insns after splitting. */
986 if (!reload_completed)
988 /* The following code helps take care of G++ cleanups. */
989 rtx equiv1 = find_reg_equal_equiv_note (i1);
990 rtx equiv2 = find_reg_equal_equiv_note (i2);
992 if (equiv1 && equiv2
993 /* If the equivalences are not to a constant, they may
994 reference pseudos that no longer exist, so we can't
995 use them. */
996 && (! reload_completed
997 || (CONSTANT_P (XEXP (equiv1, 0))
998 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1000 rtx s1 = single_set (i1);
1001 rtx s2 = single_set (i2);
1002 if (s1 != 0 && s2 != 0
1003 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1005 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1006 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1007 if (! rtx_renumbered_equal_p (p1, p2))
1008 cancel_changes (0);
1009 else if (apply_change_group ())
1010 return true;
1015 return false;
1018 /* Look through the insns at the end of BB1 and BB2 and find the longest
1019 sequence that are equivalent. Store the first insns for that sequence
1020 in *F1 and *F2 and return the sequence length.
1022 To simplify callers of this function, if the blocks match exactly,
1023 store the head of the blocks in *F1 and *F2. */
1025 static int
1026 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1027 basic_block bb2, rtx *f1, rtx *f2)
1029 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1030 int ninsns = 0;
1032 /* Skip simple jumps at the end of the blocks. Complex jumps still
1033 need to be compared for equivalence, which we'll do below. */
1035 i1 = BB_END (bb1);
1036 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1037 if (onlyjump_p (i1)
1038 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1040 last1 = i1;
1041 i1 = PREV_INSN (i1);
1044 i2 = BB_END (bb2);
1045 if (onlyjump_p (i2)
1046 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1048 last2 = i2;
1049 /* Count everything except for unconditional jump as insn. */
1050 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1051 ninsns++;
1052 i2 = PREV_INSN (i2);
1055 while (true)
1057 /* Ignore notes. */
1058 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1059 i1 = PREV_INSN (i1);
1061 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1062 i2 = PREV_INSN (i2);
1064 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1065 break;
1067 if (!old_insns_match_p (mode, i1, i2))
1068 break;
1070 merge_memattrs (i1, i2);
1072 /* Don't begin a cross-jump with a NOTE insn. */
1073 if (INSN_P (i1))
1075 /* If the merged insns have different REG_EQUAL notes, then
1076 remove them. */
1077 rtx equiv1 = find_reg_equal_equiv_note (i1);
1078 rtx equiv2 = find_reg_equal_equiv_note (i2);
1080 if (equiv1 && !equiv2)
1081 remove_note (i1, equiv1);
1082 else if (!equiv1 && equiv2)
1083 remove_note (i2, equiv2);
1084 else if (equiv1 && equiv2
1085 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1087 remove_note (i1, equiv1);
1088 remove_note (i2, equiv2);
1091 afterlast1 = last1, afterlast2 = last2;
1092 last1 = i1, last2 = i2;
1093 ninsns++;
1096 i1 = PREV_INSN (i1);
1097 i2 = PREV_INSN (i2);
1100 #ifdef HAVE_cc0
1101 /* Don't allow the insn after a compare to be shared by
1102 cross-jumping unless the compare is also shared. */
1103 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1104 last1 = afterlast1, last2 = afterlast2, ninsns--;
1105 #endif
1107 /* Include preceding notes and labels in the cross-jump. One,
1108 this may bring us to the head of the blocks as requested above.
1109 Two, it keeps line number notes as matched as may be. */
1110 if (ninsns)
1112 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1113 last1 = PREV_INSN (last1);
1115 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1116 last1 = PREV_INSN (last1);
1118 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1119 last2 = PREV_INSN (last2);
1121 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1122 last2 = PREV_INSN (last2);
1124 *f1 = last1;
1125 *f2 = last2;
1128 return ninsns;
1131 /* Return true iff the condbranches at the end of BB1 and BB2 match. */
1132 bool
1133 condjump_equiv_p (struct equiv_info *info, bool call_init)
1135 basic_block bb1 = info->x_block;
1136 basic_block bb2 = info->y_block;
1137 edge b1 = BRANCH_EDGE (bb1);
1138 edge b2 = BRANCH_EDGE (bb2);
1139 edge f1 = FALLTHRU_EDGE (bb1);
1140 edge f2 = FALLTHRU_EDGE (bb2);
1141 bool reverse, match;
1142 rtx set1, set2, cond1, cond2;
1143 rtx src1, src2;
1144 enum rtx_code code1, code2;
1146 /* Get around possible forwarders on fallthru edges. Other cases
1147 should be optimized out already. */
1148 if (FORWARDER_BLOCK_P (f1->dest))
1149 f1 = single_succ_edge (f1->dest);
1151 if (FORWARDER_BLOCK_P (f2->dest))
1152 f2 = single_succ_edge (f2->dest);
1154 /* To simplify use of this function, return false if there are
1155 unneeded forwarder blocks. These will get eliminated later
1156 during cleanup_cfg. */
1157 if (FORWARDER_BLOCK_P (f1->dest)
1158 || FORWARDER_BLOCK_P (f2->dest)
1159 || FORWARDER_BLOCK_P (b1->dest)
1160 || FORWARDER_BLOCK_P (b2->dest))
1161 return false;
1163 if (f1->dest == f2->dest && b1->dest == b2->dest)
1164 reverse = false;
1165 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1166 reverse = true;
1167 else
1168 return false;
1170 set1 = pc_set (BB_END (bb1));
1171 set2 = pc_set (BB_END (bb2));
1172 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1173 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1174 reverse = !reverse;
1176 src1 = SET_SRC (set1);
1177 src2 = SET_SRC (set2);
1178 cond1 = XEXP (src1, 0);
1179 cond2 = XEXP (src2, 0);
1180 code1 = GET_CODE (cond1);
1181 if (reverse)
1182 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1183 else
1184 code2 = GET_CODE (cond2);
1186 if (code2 == UNKNOWN)
1187 return false;
1189 if (call_init && !struct_equiv_init (STRUCT_EQUIV_START | info->mode, info))
1190 gcc_unreachable ();
1191 /* Make the sources of the pc sets unreadable so that when we call
1192 insns_match_p it won't process them.
1193 The death_notes_match_p from insns_match_p won't see the local registers
1194 used for the pc set, but that could only cause missed optimizations when
1195 there are actually condjumps that use stack registers. */
1196 SET_SRC (set1) = pc_rtx;
1197 SET_SRC (set2) = pc_rtx;
1198 /* Verify codes and operands match. */
1199 if (code1 == code2)
1201 match = (insns_match_p (BB_END (bb1), BB_END (bb2), info)
1202 && rtx_equiv_p (&XEXP (cond1, 0), XEXP (cond2, 0), 1, info)
1203 && rtx_equiv_p (&XEXP (cond1, 1), XEXP (cond2, 1), 1, info));
1206 else if (code1 == swap_condition (code2))
1208 match = (insns_match_p (BB_END (bb1), BB_END (bb2), info)
1209 && rtx_equiv_p (&XEXP (cond1, 1), XEXP (cond2, 0), 1, info)
1210 && rtx_equiv_p (&XEXP (cond1, 0), XEXP (cond2, 1), 1, info));
1213 else
1214 match = false;
1215 SET_SRC (set1) = src1;
1216 SET_SRC (set2) = src2;
1217 match &= verify_changes (0);
1219 /* If we return true, we will join the blocks. Which means that
1220 we will only have one branch prediction bit to work with. Thus
1221 we require the existing branches to have probabilities that are
1222 roughly similar. */
1223 if (match
1224 && !optimize_size
1225 && maybe_hot_bb_p (bb1)
1226 && maybe_hot_bb_p (bb2))
1228 int prob2;
1230 if (b1->dest == b2->dest)
1231 prob2 = b2->probability;
1232 else
1233 /* Do not use f2 probability as f2 may be forwarded. */
1234 prob2 = REG_BR_PROB_BASE - b2->probability;
1236 /* Fail if the difference in probabilities is greater than 50%.
1237 This rules out two well-predicted branches with opposite
1238 outcomes. */
1239 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1241 if (dump_file)
1242 fprintf (dump_file,
1243 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1244 bb1->index, bb2->index, b1->probability, prob2);
1246 match = false;
1250 if (dump_file && match)
1251 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1252 bb1->index, bb2->index);
1254 if (!match)
1255 cancel_changes (0);
1256 return match;
1259 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1260 the branch instruction. This means that if we commonize the control
1261 flow before end of the basic block, the semantic remains unchanged.
1263 We may assume that there exists one edge with a common destination. */
1265 static bool
1266 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1268 int nehedges1 = 0, nehedges2 = 0;
1269 edge fallthru1 = 0, fallthru2 = 0;
1270 edge e1, e2;
1271 edge_iterator ei;
1273 /* If BB1 has only one successor, we may be looking at either an
1274 unconditional jump, or a fake edge to exit. */
1275 if (single_succ_p (bb1)
1276 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1277 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1278 return (single_succ_p (bb2)
1279 && (single_succ_edge (bb2)->flags
1280 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1281 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1283 /* Match conditional jumps - this may get tricky when fallthru and branch
1284 edges are crossed. */
1285 if (EDGE_COUNT (bb1->succs) == 2
1286 && any_condjump_p (BB_END (bb1))
1287 && onlyjump_p (BB_END (bb1)))
1289 edge b1, f1, b2, f2;
1290 bool reverse, match;
1291 rtx set1, set2, cond1, cond2;
1292 enum rtx_code code1, code2;
1294 if (EDGE_COUNT (bb2->succs) != 2
1295 || !any_condjump_p (BB_END (bb2))
1296 || !onlyjump_p (BB_END (bb2)))
1297 return false;
1299 b1 = BRANCH_EDGE (bb1);
1300 b2 = BRANCH_EDGE (bb2);
1301 f1 = FALLTHRU_EDGE (bb1);
1302 f2 = FALLTHRU_EDGE (bb2);
1304 /* Get around possible forwarders on fallthru edges. Other cases
1305 should be optimized out already. */
1306 if (FORWARDER_BLOCK_P (f1->dest))
1307 f1 = single_succ_edge (f1->dest);
1309 if (FORWARDER_BLOCK_P (f2->dest))
1310 f2 = single_succ_edge (f2->dest);
1312 /* To simplify use of this function, return false if there are
1313 unneeded forwarder blocks. These will get eliminated later
1314 during cleanup_cfg. */
1315 if (FORWARDER_BLOCK_P (f1->dest)
1316 || FORWARDER_BLOCK_P (f2->dest)
1317 || FORWARDER_BLOCK_P (b1->dest)
1318 || FORWARDER_BLOCK_P (b2->dest))
1319 return false;
1321 if (f1->dest == f2->dest && b1->dest == b2->dest)
1322 reverse = false;
1323 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1324 reverse = true;
1325 else
1326 return false;
1328 set1 = pc_set (BB_END (bb1));
1329 set2 = pc_set (BB_END (bb2));
1330 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1331 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1332 reverse = !reverse;
1334 cond1 = XEXP (SET_SRC (set1), 0);
1335 cond2 = XEXP (SET_SRC (set2), 0);
1336 code1 = GET_CODE (cond1);
1337 if (reverse)
1338 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1339 else
1340 code2 = GET_CODE (cond2);
1342 if (code2 == UNKNOWN)
1343 return false;
1345 /* Verify codes and operands match. */
1346 match = ((code1 == code2
1347 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1348 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1349 || (code1 == swap_condition (code2)
1350 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1351 XEXP (cond2, 0))
1352 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1353 XEXP (cond2, 1))));
1355 /* If we return true, we will join the blocks. Which means that
1356 we will only have one branch prediction bit to work with. Thus
1357 we require the existing branches to have probabilities that are
1358 roughly similar. */
1359 if (match
1360 && !optimize_size
1361 && maybe_hot_bb_p (bb1)
1362 && maybe_hot_bb_p (bb2))
1364 int prob2;
1366 if (b1->dest == b2->dest)
1367 prob2 = b2->probability;
1368 else
1369 /* Do not use f2 probability as f2 may be forwarded. */
1370 prob2 = REG_BR_PROB_BASE - b2->probability;
1372 /* Fail if the difference in probabilities is greater than 50%.
1373 This rules out two well-predicted branches with opposite
1374 outcomes. */
1375 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1377 if (dump_file)
1378 fprintf (dump_file,
1379 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1380 bb1->index, bb2->index, b1->probability, prob2);
1382 return false;
1386 if (dump_file && match)
1387 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1388 bb1->index, bb2->index);
1390 return match;
1393 /* Generic case - we are seeing a computed jump, table jump or trapping
1394 instruction. */
1396 /* Check whether there are tablejumps in the end of BB1 and BB2.
1397 Return true if they are identical. */
1399 rtx label1, label2;
1400 rtx table1, table2;
1402 if (tablejump_p (BB_END (bb1), &label1, &table1)
1403 && tablejump_p (BB_END (bb2), &label2, &table2)
1404 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1406 /* The labels should never be the same rtx. If they really are same
1407 the jump tables are same too. So disable crossjumping of blocks BB1
1408 and BB2 because when deleting the common insns in the end of BB1
1409 by delete_basic_block () the jump table would be deleted too. */
1410 /* If LABEL2 is referenced in BB1->END do not do anything
1411 because we would loose information when replacing
1412 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1413 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1415 /* Set IDENTICAL to true when the tables are identical. */
1416 bool identical = false;
1417 rtx p1, p2;
1419 p1 = PATTERN (table1);
1420 p2 = PATTERN (table2);
1421 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1423 identical = true;
1425 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1426 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1427 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1428 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1430 int i;
1432 identical = true;
1433 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1434 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1435 identical = false;
1438 if (identical)
1440 replace_label_data rr;
1441 bool match;
1443 /* Temporarily replace references to LABEL1 with LABEL2
1444 in BB1->END so that we could compare the instructions. */
1445 rr.r1 = label1;
1446 rr.r2 = label2;
1447 rr.update_label_nuses = false;
1448 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1450 match = old_insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1451 if (dump_file && match)
1452 fprintf (dump_file,
1453 "Tablejumps in bb %i and %i match.\n",
1454 bb1->index, bb2->index);
1456 /* Set the original label in BB1->END because when deleting
1457 a block whose end is a tablejump, the tablejump referenced
1458 from the instruction is deleted too. */
1459 rr.r1 = label2;
1460 rr.r2 = label1;
1461 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1463 return match;
1466 return false;
1470 /* First ensure that the instructions match. There may be many outgoing
1471 edges so this test is generally cheaper. */
1472 if (!old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1473 return false;
1475 /* Search the outgoing edges, ensure that the counts do match, find possible
1476 fallthru and exception handling edges since these needs more
1477 validation. */
1478 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1479 return false;
1481 FOR_EACH_EDGE (e1, ei, bb1->succs)
1483 e2 = EDGE_SUCC (bb2, ei.index);
1485 if (e1->flags & EDGE_EH)
1486 nehedges1++;
1488 if (e2->flags & EDGE_EH)
1489 nehedges2++;
1491 if (e1->flags & EDGE_FALLTHRU)
1492 fallthru1 = e1;
1493 if (e2->flags & EDGE_FALLTHRU)
1494 fallthru2 = e2;
1497 /* If number of edges of various types does not match, fail. */
1498 if (nehedges1 != nehedges2
1499 || (fallthru1 != 0) != (fallthru2 != 0))
1500 return false;
1502 /* fallthru edges must be forwarded to the same destination. */
1503 if (fallthru1)
1505 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1506 ? single_succ (fallthru1->dest): fallthru1->dest);
1507 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1508 ? single_succ (fallthru2->dest): fallthru2->dest);
1510 if (d1 != d2)
1511 return false;
1514 /* Ensure the same EH region. */
1516 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1517 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1519 if (!n1 && n2)
1520 return false;
1522 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1523 return false;
1526 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1527 version of sequence abstraction. */
1528 FOR_EACH_EDGE (e1, ei, bb2->succs)
1530 edge e2;
1531 edge_iterator ei;
1532 basic_block d1 = e1->dest;
1534 if (FORWARDER_BLOCK_P (d1))
1535 d1 = EDGE_SUCC (d1, 0)->dest;
1537 FOR_EACH_EDGE (e2, ei, bb1->succs)
1539 basic_block d2 = e2->dest;
1540 if (FORWARDER_BLOCK_P (d2))
1541 d2 = EDGE_SUCC (d2, 0)->dest;
1542 if (d1 == d2)
1543 break;
1546 if (!e2)
1547 return false;
1550 return true;
1553 /* Returns true if BB basic block has a preserve label. */
1555 static bool
1556 block_has_preserve_label (basic_block bb)
1558 return (bb
1559 && block_label (bb)
1560 && LABEL_PRESERVE_P (block_label (bb)));
1563 /* E1 and E2 are edges with the same destination block. Search their
1564 predecessors for common code. If found, redirect control flow from
1565 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1567 static bool
1568 try_crossjump_to_edge (int mode, edge e1, edge e2)
1570 int nmatch;
1571 basic_block src1 = e1->src, src2 = e2->src;
1572 basic_block redirect_to, redirect_from, to_remove;
1573 rtx newpos1, newpos2;
1574 edge s;
1575 edge_iterator ei;
1577 newpos1 = newpos2 = NULL_RTX;
1579 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1580 to try this optimization.
1582 Basic block partitioning may result in some jumps that appear to
1583 be optimizable (or blocks that appear to be mergeable), but which really
1584 must be left untouched (they are required to make it safely across
1585 partition boundaries). See the comments at the top of
1586 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1588 if (flag_reorder_blocks_and_partition && reload_completed)
1589 return false;
1591 /* Search backward through forwarder blocks. We don't need to worry
1592 about multiple entry or chained forwarders, as they will be optimized
1593 away. We do this to look past the unconditional jump following a
1594 conditional jump that is required due to the current CFG shape. */
1595 if (single_pred_p (src1)
1596 && FORWARDER_BLOCK_P (src1))
1597 e1 = single_pred_edge (src1), src1 = e1->src;
1599 if (single_pred_p (src2)
1600 && FORWARDER_BLOCK_P (src2))
1601 e2 = single_pred_edge (src2), src2 = e2->src;
1603 /* Nothing to do if we reach ENTRY, or a common source block. */
1604 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1605 return false;
1606 if (src1 == src2)
1607 return false;
1609 /* Seeing more than 1 forwarder blocks would confuse us later... */
1610 if (FORWARDER_BLOCK_P (e1->dest)
1611 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1612 return false;
1614 if (FORWARDER_BLOCK_P (e2->dest)
1615 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1616 return false;
1618 /* Likewise with dead code (possibly newly created by the other optimizations
1619 of cfg_cleanup). */
1620 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1621 return false;
1623 /* Look for the common insn sequence, part the first ... */
1624 if (!outgoing_edges_match (mode, src1, src2))
1625 return false;
1627 /* ... and part the second. */
1628 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1630 /* Don't proceed with the crossjump unless we found a sufficient number
1631 of matching instructions or the 'from' block was totally matched
1632 (such that its predecessors will hopefully be redirected and the
1633 block removed). */
1634 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1635 && (newpos1 != BB_HEAD (src1)))
1636 return false;
1638 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
1639 if (block_has_preserve_label (e1->dest)
1640 && (e1->flags & EDGE_ABNORMAL))
1641 return false;
1643 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1644 will be deleted.
1645 If we have tablejumps in the end of SRC1 and SRC2
1646 they have been already compared for equivalence in outgoing_edges_match ()
1647 so replace the references to TABLE1 by references to TABLE2. */
1649 rtx label1, label2;
1650 rtx table1, table2;
1652 if (tablejump_p (BB_END (src1), &label1, &table1)
1653 && tablejump_p (BB_END (src2), &label2, &table2)
1654 && label1 != label2)
1656 replace_label_data rr;
1657 rtx insn;
1659 /* Replace references to LABEL1 with LABEL2. */
1660 rr.r1 = label1;
1661 rr.r2 = label2;
1662 rr.update_label_nuses = true;
1663 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1665 /* Do not replace the label in SRC1->END because when deleting
1666 a block whose end is a tablejump, the tablejump referenced
1667 from the instruction is deleted too. */
1668 if (insn != BB_END (src1))
1669 for_each_rtx (&insn, replace_label, &rr);
1674 /* Avoid splitting if possible. We must always split when SRC2 has
1675 EH predecessor edges, or we may end up with basic blocks with both
1676 normal and EH predecessor edges. */
1677 if (newpos2 == BB_HEAD (src2)
1678 && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
1679 redirect_to = src2;
1680 else
1682 if (newpos2 == BB_HEAD (src2))
1684 /* Skip possible basic block header. */
1685 if (LABEL_P (newpos2))
1686 newpos2 = NEXT_INSN (newpos2);
1687 if (NOTE_P (newpos2))
1688 newpos2 = NEXT_INSN (newpos2);
1691 if (dump_file)
1692 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1693 src2->index, nmatch);
1694 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1697 if (dump_file)
1698 fprintf (dump_file,
1699 "Cross jumping from bb %i to bb %i; %i common insns\n",
1700 src1->index, src2->index, nmatch);
1702 /* We may have some registers visible through the block. */
1703 df_set_bb_dirty (redirect_to);
1705 /* Recompute the frequencies and counts of outgoing edges. */
1706 FOR_EACH_EDGE (s, ei, redirect_to->succs)
1708 edge s2;
1709 edge_iterator ei;
1710 basic_block d = s->dest;
1712 if (FORWARDER_BLOCK_P (d))
1713 d = single_succ (d);
1715 FOR_EACH_EDGE (s2, ei, src1->succs)
1717 basic_block d2 = s2->dest;
1718 if (FORWARDER_BLOCK_P (d2))
1719 d2 = single_succ (d2);
1720 if (d == d2)
1721 break;
1724 s->count += s2->count;
1726 /* Take care to update possible forwarder blocks. We verified
1727 that there is no more than one in the chain, so we can't run
1728 into infinite loop. */
1729 if (FORWARDER_BLOCK_P (s->dest))
1731 single_succ_edge (s->dest)->count += s2->count;
1732 s->dest->count += s2->count;
1733 s->dest->frequency += EDGE_FREQUENCY (s);
1736 if (FORWARDER_BLOCK_P (s2->dest))
1738 single_succ_edge (s2->dest)->count -= s2->count;
1739 if (single_succ_edge (s2->dest)->count < 0)
1740 single_succ_edge (s2->dest)->count = 0;
1741 s2->dest->count -= s2->count;
1742 s2->dest->frequency -= EDGE_FREQUENCY (s);
1743 if (s2->dest->frequency < 0)
1744 s2->dest->frequency = 0;
1745 if (s2->dest->count < 0)
1746 s2->dest->count = 0;
1749 if (!redirect_to->frequency && !src1->frequency)
1750 s->probability = (s->probability + s2->probability) / 2;
1751 else
1752 s->probability
1753 = ((s->probability * redirect_to->frequency +
1754 s2->probability * src1->frequency)
1755 / (redirect_to->frequency + src1->frequency));
1758 /* Adjust count and frequency for the block. An earlier jump
1759 threading pass may have left the profile in an inconsistent
1760 state (see update_bb_profile_for_threading) so we must be
1761 prepared for overflows. */
1762 redirect_to->count += src1->count;
1763 redirect_to->frequency += src1->frequency;
1764 if (redirect_to->frequency > BB_FREQ_MAX)
1765 redirect_to->frequency = BB_FREQ_MAX;
1766 update_br_prob_note (redirect_to);
1768 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1770 /* Skip possible basic block header. */
1771 if (LABEL_P (newpos1))
1772 newpos1 = NEXT_INSN (newpos1);
1774 if (NOTE_P (newpos1))
1775 newpos1 = NEXT_INSN (newpos1);
1777 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1778 to_remove = single_succ (redirect_from);
1780 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
1781 delete_basic_block (to_remove);
1783 update_forwarder_flag (redirect_from);
1784 if (redirect_to != src2)
1785 update_forwarder_flag (src2);
1787 return true;
1790 /* Search the predecessors of BB for common insn sequences. When found,
1791 share code between them by redirecting control flow. Return true if
1792 any changes made. */
1794 static bool
1795 try_crossjump_bb (int mode, basic_block bb)
1797 edge e, e2, fallthru;
1798 bool changed;
1799 unsigned max, ix, ix2;
1800 basic_block ev, ev2;
1801 edge_iterator ei;
1803 /* Nothing to do if there is not at least two incoming edges. */
1804 if (EDGE_COUNT (bb->preds) < 2)
1805 return false;
1807 /* Don't crossjump if this block ends in a computed jump,
1808 unless we are optimizing for size. */
1809 if (!optimize_size
1810 && bb != EXIT_BLOCK_PTR
1811 && computed_jump_p (BB_END (bb)))
1812 return false;
1814 /* If we are partitioning hot/cold basic blocks, we don't want to
1815 mess up unconditional or indirect jumps that cross between hot
1816 and cold sections.
1818 Basic block partitioning may result in some jumps that appear to
1819 be optimizable (or blocks that appear to be mergeable), but which really
1820 must be left untouched (they are required to make it safely across
1821 partition boundaries). See the comments at the top of
1822 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1824 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
1825 BB_PARTITION (EDGE_PRED (bb, 1)->src)
1826 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
1827 return false;
1829 /* It is always cheapest to redirect a block that ends in a branch to
1830 a block that falls through into BB, as that adds no branches to the
1831 program. We'll try that combination first. */
1832 fallthru = NULL;
1833 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1835 if (EDGE_COUNT (bb->preds) > max)
1836 return false;
1838 FOR_EACH_EDGE (e, ei, bb->preds)
1840 if (e->flags & EDGE_FALLTHRU)
1841 fallthru = e;
1844 changed = false;
1845 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1847 e = EDGE_PRED (ev, ix);
1848 ix++;
1850 /* As noted above, first try with the fallthru predecessor. */
1851 if (fallthru)
1853 /* Don't combine the fallthru edge into anything else.
1854 If there is a match, we'll do it the other way around. */
1855 if (e == fallthru)
1856 continue;
1857 /* If nothing changed since the last attempt, there is nothing
1858 we can do. */
1859 if (!first_pass
1860 && (!(df_get_bb_dirty (e->src))
1861 && !(df_get_bb_dirty (fallthru->src))))
1862 continue;
1864 if (try_crossjump_to_edge (mode, e, fallthru))
1866 changed = true;
1867 ix = 0;
1868 ev = bb;
1869 continue;
1873 /* Non-obvious work limiting check: Recognize that we're going
1874 to call try_crossjump_bb on every basic block. So if we have
1875 two blocks with lots of outgoing edges (a switch) and they
1876 share lots of common destinations, then we would do the
1877 cross-jump check once for each common destination.
1879 Now, if the blocks actually are cross-jump candidates, then
1880 all of their destinations will be shared. Which means that
1881 we only need check them for cross-jump candidacy once. We
1882 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1883 choosing to do the check from the block for which the edge
1884 in question is the first successor of A. */
1885 if (EDGE_SUCC (e->src, 0) != e)
1886 continue;
1888 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1890 e2 = EDGE_PRED (ev2, ix2);
1891 ix2++;
1893 if (e2 == e)
1894 continue;
1896 /* We've already checked the fallthru edge above. */
1897 if (e2 == fallthru)
1898 continue;
1900 /* The "first successor" check above only prevents multiple
1901 checks of crossjump(A,B). In order to prevent redundant
1902 checks of crossjump(B,A), require that A be the block
1903 with the lowest index. */
1904 if (e->src->index > e2->src->index)
1905 continue;
1907 /* If nothing changed since the last attempt, there is nothing
1908 we can do. */
1909 if (!first_pass
1910 && (!(df_get_bb_dirty (e->src))
1911 && !(df_get_bb_dirty (e2->src))))
1912 continue;
1914 if (try_crossjump_to_edge (mode, e, e2))
1916 changed = true;
1917 ev2 = bb;
1918 ix = 0;
1919 break;
1924 return changed;
1927 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1928 instructions etc. Return nonzero if changes were made. */
1930 static bool
1931 try_optimize_cfg (int mode)
1933 bool changed_overall = false;
1934 bool changed;
1935 int iterations = 0;
1936 basic_block bb, b, next;
1938 if (mode & CLEANUP_CROSSJUMP)
1939 add_noreturn_fake_exit_edges ();
1941 if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1942 clear_bb_flags ();
1944 FOR_EACH_BB (bb)
1945 update_forwarder_flag (bb);
1947 if (! targetm.cannot_modify_jumps_p ())
1949 first_pass = true;
1950 /* Attempt to merge blocks as made possible by edge removal. If
1951 a block has only one successor, and the successor has only
1952 one predecessor, they may be combined. */
1955 changed = false;
1956 iterations++;
1958 if (dump_file)
1959 fprintf (dump_file,
1960 "\n\ntry_optimize_cfg iteration %i\n\n",
1961 iterations);
1963 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1965 basic_block c;
1966 edge s;
1967 bool changed_here = false;
1969 /* Delete trivially dead basic blocks. */
1970 if (EDGE_COUNT (b->preds) == 0)
1972 c = b->prev_bb;
1973 if (dump_file)
1974 fprintf (dump_file, "Deleting block %i.\n",
1975 b->index);
1977 delete_basic_block (b);
1978 if (!(mode & CLEANUP_CFGLAYOUT))
1979 changed = true;
1980 /* Avoid trying to remove ENTRY_BLOCK_PTR. */
1981 b = (c == ENTRY_BLOCK_PTR ? c->next_bb : c);
1982 continue;
1985 /* Remove code labels no longer used. */
1986 if (single_pred_p (b)
1987 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1988 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
1989 && LABEL_P (BB_HEAD (b))
1990 /* If the previous block ends with a branch to this
1991 block, we can't delete the label. Normally this
1992 is a condjump that is yet to be simplified, but
1993 if CASE_DROPS_THRU, this can be a tablejump with
1994 some element going to the same place as the
1995 default (fallthru). */
1996 && (single_pred (b) == ENTRY_BLOCK_PTR
1997 || !JUMP_P (BB_END (single_pred (b)))
1998 || ! label_is_jump_target_p (BB_HEAD (b),
1999 BB_END (single_pred (b)))))
2001 rtx label = BB_HEAD (b);
2003 delete_insn_chain (label, label, false);
2004 /* If the case label is undeletable, move it after the
2005 BASIC_BLOCK note. */
2006 if (NOTE_KIND (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
2008 rtx bb_note = NEXT_INSN (BB_HEAD (b));
2010 reorder_insns_nobb (label, label, bb_note);
2011 BB_HEAD (b) = bb_note;
2012 if (BB_END (b) == bb_note)
2013 BB_END (b) = label;
2015 if (dump_file)
2016 fprintf (dump_file, "Deleted label in block %i.\n",
2017 b->index);
2020 /* If we fall through an empty block, we can remove it. */
2021 if (!(mode & CLEANUP_CFGLAYOUT)
2022 && single_pred_p (b)
2023 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2024 && !LABEL_P (BB_HEAD (b))
2025 && FORWARDER_BLOCK_P (b)
2026 /* Note that forwarder_block_p true ensures that
2027 there is a successor for this block. */
2028 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2029 && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
2031 if (dump_file)
2032 fprintf (dump_file,
2033 "Deleting fallthru block %i.\n",
2034 b->index);
2036 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
2037 redirect_edge_succ_nodup (single_pred_edge (b),
2038 single_succ (b));
2039 delete_basic_block (b);
2040 changed = true;
2041 b = c;
2044 if (single_succ_p (b)
2045 && (s = single_succ_edge (b))
2046 && !(s->flags & EDGE_COMPLEX)
2047 && (c = s->dest) != EXIT_BLOCK_PTR
2048 && single_pred_p (c)
2049 && b != c)
2051 /* When not in cfg_layout mode use code aware of reordering
2052 INSN. This code possibly creates new basic blocks so it
2053 does not fit merge_blocks interface and is kept here in
2054 hope that it will become useless once more of compiler
2055 is transformed to use cfg_layout mode. */
2057 if ((mode & CLEANUP_CFGLAYOUT)
2058 && can_merge_blocks_p (b, c))
2060 merge_blocks (b, c);
2061 update_forwarder_flag (b);
2062 changed_here = true;
2064 else if (!(mode & CLEANUP_CFGLAYOUT)
2065 /* If the jump insn has side effects,
2066 we can't kill the edge. */
2067 && (!JUMP_P (BB_END (b))
2068 || (reload_completed
2069 ? simplejump_p (BB_END (b))
2070 : (onlyjump_p (BB_END (b))
2071 && !tablejump_p (BB_END (b),
2072 NULL, NULL))))
2073 && (next = merge_blocks_move (s, b, c, mode)))
2075 b = next;
2076 changed_here = true;
2080 /* Simplify branch over branch. */
2081 if ((mode & CLEANUP_EXPENSIVE)
2082 && !(mode & CLEANUP_CFGLAYOUT)
2083 && try_simplify_condjump (b))
2084 changed_here = true;
2086 /* If B has a single outgoing edge, but uses a
2087 non-trivial jump instruction without side-effects, we
2088 can either delete the jump entirely, or replace it
2089 with a simple unconditional jump. */
2090 if (single_succ_p (b)
2091 && single_succ (b) != EXIT_BLOCK_PTR
2092 && onlyjump_p (BB_END (b))
2093 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
2094 && try_redirect_by_replacing_jump (single_succ_edge (b),
2095 single_succ (b),
2096 (mode & CLEANUP_CFGLAYOUT) != 0))
2098 update_forwarder_flag (b);
2099 changed_here = true;
2102 /* Simplify branch to branch. */
2103 if (try_forward_edges (mode, b))
2104 changed_here = true;
2106 /* Look for shared code between blocks. */
2107 if ((mode & CLEANUP_CROSSJUMP)
2108 && try_crossjump_bb (mode, b))
2109 changed_here = true;
2111 /* Don't get confused by the index shift caused by
2112 deleting blocks. */
2113 if (!changed_here)
2114 b = b->next_bb;
2115 else
2116 changed = true;
2119 if ((mode & CLEANUP_CROSSJUMP)
2120 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2121 changed = true;
2123 #ifdef ENABLE_CHECKING
2124 if (changed)
2125 verify_flow_info ();
2126 #endif
2128 changed_overall |= changed;
2129 first_pass = false;
2131 while (changed);
2134 if (mode & CLEANUP_CROSSJUMP)
2135 remove_fake_exit_edges ();
2137 FOR_ALL_BB (b)
2138 b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2140 return changed_overall;
2143 /* Delete all unreachable basic blocks. */
2145 bool
2146 delete_unreachable_blocks (void)
2148 bool changed = false;
2149 basic_block b, next_bb;
2151 find_unreachable_blocks ();
2153 /* Delete all unreachable basic blocks. */
2155 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2157 next_bb = b->next_bb;
2159 if (!(b->flags & BB_REACHABLE))
2161 delete_basic_block (b);
2162 changed = true;
2166 if (changed)
2167 tidy_fallthru_edges ();
2168 return changed;
2171 /* Delete any jump tables never referenced. We can't delete them at the
2172 time of removing tablejump insn as they are referenced by the preceding
2173 insns computing the destination, so we delay deleting and garbagecollect
2174 them once life information is computed. */
2175 void
2176 delete_dead_jumptables (void)
2178 basic_block bb;
2180 /* A dead jump table does not belong to any basic block. Scan insns
2181 between two adjacent basic blocks. */
2182 FOR_EACH_BB (bb)
2184 rtx insn, next;
2186 for (insn = NEXT_INSN (BB_END (bb));
2187 insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
2188 insn = next)
2190 next = NEXT_INSN (insn);
2191 if (LABEL_P (insn)
2192 && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
2193 && JUMP_P (next)
2194 && (GET_CODE (PATTERN (next)) == ADDR_VEC
2195 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC))
2197 rtx label = insn, jump = next;
2199 if (dump_file)
2200 fprintf (dump_file, "Dead jumptable %i removed\n",
2201 INSN_UID (insn));
2203 next = NEXT_INSN (next);
2204 delete_insn (jump);
2205 delete_insn (label);
2212 /* Tidy the CFG by deleting unreachable code and whatnot. */
2214 bool
2215 cleanup_cfg (int mode)
2217 bool changed = false;
2219 /* Set the cfglayout mode flag here. We could update all the callers
2220 but that is just inconvenient, especially given that we eventually
2221 want to have cfglayout mode as the default. */
2222 if (current_ir_type () == IR_RTL_CFGLAYOUT)
2223 mode |= CLEANUP_CFGLAYOUT;
2225 timevar_push (TV_CLEANUP_CFG);
2226 if (delete_unreachable_blocks ())
2228 changed = true;
2229 /* We've possibly created trivially dead code. Cleanup it right
2230 now to introduce more opportunities for try_optimize_cfg. */
2231 if (!(mode & (CLEANUP_NO_INSN_DEL))
2232 && !reload_completed)
2233 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2236 compact_blocks ();
2238 while (try_optimize_cfg (mode))
2240 delete_unreachable_blocks (), changed = true;
2241 if (!(mode & CLEANUP_NO_INSN_DEL)
2242 && (mode & CLEANUP_EXPENSIVE)
2243 && !reload_completed)
2245 if (!delete_trivially_dead_insns (get_insns (), max_reg_num ()))
2246 break;
2248 else
2249 break;
2252 /* Don't call delete_dead_jumptables in cfglayout mode, because
2253 that function assumes that jump tables are in the insns stream.
2254 But we also don't _have_ to delete dead jumptables in cfglayout
2255 mode because we shouldn't even be looking at things that are
2256 not in a basic block. Dead jumptables are cleaned up when
2257 going out of cfglayout mode. */
2258 if (!(mode & CLEANUP_CFGLAYOUT))
2259 delete_dead_jumptables ();
2261 timevar_pop (TV_CLEANUP_CFG);
2263 return changed;
2266 static unsigned int
2267 rest_of_handle_jump (void)
2269 delete_unreachable_blocks ();
2271 if (cfun->tail_call_emit)
2272 fixup_tail_calls ();
2273 return 0;
2276 struct tree_opt_pass pass_jump =
2278 "sibling", /* name */
2279 NULL, /* gate */
2280 rest_of_handle_jump, /* execute */
2281 NULL, /* sub */
2282 NULL, /* next */
2283 0, /* static_pass_number */
2284 TV_JUMP, /* tv_id */
2285 0, /* properties_required */
2286 0, /* properties_provided */
2287 0, /* properties_destroyed */
2288 TODO_ggc_collect, /* todo_flags_start */
2289 TODO_verify_flow, /* todo_flags_finish */
2290 'i' /* letter */
2294 static unsigned int
2295 rest_of_handle_jump2 (void)
2297 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2298 if (dump_file)
2299 dump_flow_info (dump_file, dump_flags);
2300 cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
2301 | (flag_thread_jumps ? CLEANUP_THREADING : 0));
2302 return 0;
2306 struct tree_opt_pass pass_jump2 =
2308 "jump", /* name */
2309 NULL, /* gate */
2310 rest_of_handle_jump2, /* execute */
2311 NULL, /* sub */
2312 NULL, /* next */
2313 0, /* static_pass_number */
2314 TV_JUMP, /* tv_id */
2315 0, /* properties_required */
2316 0, /* properties_provided */
2317 0, /* properties_destroyed */
2318 TODO_ggc_collect, /* todo_flags_start */
2319 TODO_dump_func | TODO_verify_rtl_sharing,/* todo_flags_finish */
2320 'j' /* letter */