EnumSet*.class: Regenerate
[official-gcc.git] / gcc / cfgcleanup.c
blob1f9304d43ba75835e13716b03a3abe9c7237ebfb
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "regs.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "cfglayout.h"
52 #include "emit-rtl.h"
53 #include "tree-pass.h"
54 #include "cfgloop.h"
55 #include "expr.h"
56 #include "df.h"
57 #include "dce.h"
59 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
61 /* Set to true when we are running first pass of try_optimize_cfg loop. */
62 static bool first_pass;
63 static bool try_crossjump_to_edge (int, edge, edge);
64 static bool try_crossjump_bb (int, basic_block);
65 static bool outgoing_edges_match (int, basic_block, basic_block);
66 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
67 static bool old_insns_match_p (int, rtx, rtx);
69 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
70 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
71 static bool try_optimize_cfg (int);
72 static bool try_simplify_condjump (basic_block);
73 static bool try_forward_edges (int, basic_block);
74 static edge thread_jump (edge, basic_block);
75 static bool mark_effect (rtx, bitmap);
76 static void notice_new_block (basic_block);
77 static void update_forwarder_flag (basic_block);
78 static int mentions_nonequal_regs (rtx *, void *);
79 static void merge_memattrs (rtx, rtx);
81 /* Set flags for newly created block. */
83 static void
84 notice_new_block (basic_block bb)
86 if (!bb)
87 return;
89 if (forwarder_block_p (bb))
90 bb->flags |= BB_FORWARDER_BLOCK;
93 /* Recompute forwarder flag after block has been modified. */
95 static void
96 update_forwarder_flag (basic_block bb)
98 if (forwarder_block_p (bb))
99 bb->flags |= BB_FORWARDER_BLOCK;
100 else
101 bb->flags &= ~BB_FORWARDER_BLOCK;
104 /* Simplify a conditional jump around an unconditional jump.
105 Return true if something changed. */
107 static bool
108 try_simplify_condjump (basic_block cbranch_block)
110 basic_block jump_block, jump_dest_block, cbranch_dest_block;
111 edge cbranch_jump_edge, cbranch_fallthru_edge;
112 rtx cbranch_insn;
114 /* Verify that there are exactly two successors. */
115 if (EDGE_COUNT (cbranch_block->succs) != 2)
116 return false;
118 /* Verify that we've got a normal conditional branch at the end
119 of the block. */
120 cbranch_insn = BB_END (cbranch_block);
121 if (!any_condjump_p (cbranch_insn))
122 return false;
124 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
125 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
127 /* The next block must not have multiple predecessors, must not
128 be the last block in the function, and must contain just the
129 unconditional jump. */
130 jump_block = cbranch_fallthru_edge->dest;
131 if (!single_pred_p (jump_block)
132 || jump_block->next_bb == EXIT_BLOCK_PTR
133 || !FORWARDER_BLOCK_P (jump_block))
134 return false;
135 jump_dest_block = single_succ (jump_block);
137 /* If we are partitioning hot/cold basic blocks, we don't want to
138 mess up unconditional or indirect jumps that cross between hot
139 and cold sections.
141 Basic block partitioning may result in some jumps that appear to
142 be optimizable (or blocks that appear to be mergeable), but which really
143 must be left untouched (they are required to make it safely across
144 partition boundaries). See the comments at the top of
145 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
147 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
148 || (cbranch_jump_edge->flags & EDGE_CROSSING))
149 return false;
151 /* The conditional branch must target the block after the
152 unconditional branch. */
153 cbranch_dest_block = cbranch_jump_edge->dest;
155 if (cbranch_dest_block == EXIT_BLOCK_PTR
156 || !can_fallthru (jump_block, cbranch_dest_block))
157 return false;
159 /* Invert the conditional branch. */
160 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
161 return false;
163 if (dump_file)
164 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
165 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
167 /* Success. Update the CFG to match. Note that after this point
168 the edge variable names appear backwards; the redirection is done
169 this way to preserve edge profile data. */
170 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
171 cbranch_dest_block);
172 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
173 jump_dest_block);
174 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
175 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
176 update_br_prob_note (cbranch_block);
178 /* Delete the block with the unconditional jump, and clean up the mess. */
179 delete_basic_block (jump_block);
180 tidy_fallthru_edge (cbranch_jump_edge);
181 update_forwarder_flag (cbranch_block);
183 return true;
186 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
187 on register. Used by jump threading. */
189 static bool
190 mark_effect (rtx exp, regset nonequal)
192 int regno;
193 rtx dest;
194 switch (GET_CODE (exp))
196 /* In case we do clobber the register, mark it as equal, as we know the
197 value is dead so it don't have to match. */
198 case CLOBBER:
199 if (REG_P (XEXP (exp, 0)))
201 dest = XEXP (exp, 0);
202 regno = REGNO (dest);
203 CLEAR_REGNO_REG_SET (nonequal, regno);
204 if (regno < FIRST_PSEUDO_REGISTER)
206 int n = hard_regno_nregs[regno][GET_MODE (dest)];
207 while (--n > 0)
208 CLEAR_REGNO_REG_SET (nonequal, regno + n);
211 return false;
213 case SET:
214 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
215 return false;
216 dest = SET_DEST (exp);
217 if (dest == pc_rtx)
218 return false;
219 if (!REG_P (dest))
220 return true;
221 regno = REGNO (dest);
222 SET_REGNO_REG_SET (nonequal, regno);
223 if (regno < FIRST_PSEUDO_REGISTER)
225 int n = hard_regno_nregs[regno][GET_MODE (dest)];
226 while (--n > 0)
227 SET_REGNO_REG_SET (nonequal, regno + n);
229 return false;
231 default:
232 return false;
236 /* Return nonzero if X is a register set in regset DATA.
237 Called via for_each_rtx. */
238 static int
239 mentions_nonequal_regs (rtx *x, void *data)
241 regset nonequal = (regset) data;
242 if (REG_P (*x))
244 int regno;
246 regno = REGNO (*x);
247 if (REGNO_REG_SET_P (nonequal, regno))
248 return 1;
249 if (regno < FIRST_PSEUDO_REGISTER)
251 int n = hard_regno_nregs[regno][GET_MODE (*x)];
252 while (--n > 0)
253 if (REGNO_REG_SET_P (nonequal, regno + n))
254 return 1;
257 return 0;
259 /* Attempt to prove that the basic block B will have no side effects and
260 always continues in the same edge if reached via E. Return the edge
261 if exist, NULL otherwise. */
263 static edge
264 thread_jump (edge e, basic_block b)
266 rtx set1, set2, cond1, cond2, insn;
267 enum rtx_code code1, code2, reversed_code2;
268 bool reverse1 = false;
269 unsigned i;
270 regset nonequal;
271 bool failed = false;
272 reg_set_iterator rsi;
274 if (b->flags & BB_NONTHREADABLE_BLOCK)
275 return NULL;
277 /* At the moment, we do handle only conditional jumps, but later we may
278 want to extend this code to tablejumps and others. */
279 if (EDGE_COUNT (e->src->succs) != 2)
280 return NULL;
281 if (EDGE_COUNT (b->succs) != 2)
283 b->flags |= BB_NONTHREADABLE_BLOCK;
284 return NULL;
287 /* Second branch must end with onlyjump, as we will eliminate the jump. */
288 if (!any_condjump_p (BB_END (e->src)))
289 return NULL;
291 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
293 b->flags |= BB_NONTHREADABLE_BLOCK;
294 return NULL;
297 set1 = pc_set (BB_END (e->src));
298 set2 = pc_set (BB_END (b));
299 if (((e->flags & EDGE_FALLTHRU) != 0)
300 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
301 reverse1 = true;
303 cond1 = XEXP (SET_SRC (set1), 0);
304 cond2 = XEXP (SET_SRC (set2), 0);
305 if (reverse1)
306 code1 = reversed_comparison_code (cond1, BB_END (e->src));
307 else
308 code1 = GET_CODE (cond1);
310 code2 = GET_CODE (cond2);
311 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
313 if (!comparison_dominates_p (code1, code2)
314 && !comparison_dominates_p (code1, reversed_code2))
315 return NULL;
317 /* Ensure that the comparison operators are equivalent.
318 ??? This is far too pessimistic. We should allow swapped operands,
319 different CCmodes, or for example comparisons for interval, that
320 dominate even when operands are not equivalent. */
321 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
322 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
323 return NULL;
325 /* Short circuit cases where block B contains some side effects, as we can't
326 safely bypass it. */
327 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
328 insn = NEXT_INSN (insn))
329 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
331 b->flags |= BB_NONTHREADABLE_BLOCK;
332 return NULL;
335 cselib_init (false);
337 /* First process all values computed in the source basic block. */
338 for (insn = NEXT_INSN (BB_HEAD (e->src));
339 insn != NEXT_INSN (BB_END (e->src));
340 insn = NEXT_INSN (insn))
341 if (INSN_P (insn))
342 cselib_process_insn (insn);
344 nonequal = BITMAP_ALLOC (NULL);
345 CLEAR_REG_SET (nonequal);
347 /* Now assume that we've continued by the edge E to B and continue
348 processing as if it were same basic block.
349 Our goal is to prove that whole block is an NOOP. */
351 for (insn = NEXT_INSN (BB_HEAD (b));
352 insn != NEXT_INSN (BB_END (b)) && !failed;
353 insn = NEXT_INSN (insn))
355 if (INSN_P (insn))
357 rtx pat = PATTERN (insn);
359 if (GET_CODE (pat) == PARALLEL)
361 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
362 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
364 else
365 failed |= mark_effect (pat, nonequal);
368 cselib_process_insn (insn);
371 /* Later we should clear nonequal of dead registers. So far we don't
372 have life information in cfg_cleanup. */
373 if (failed)
375 b->flags |= BB_NONTHREADABLE_BLOCK;
376 goto failed_exit;
379 /* cond2 must not mention any register that is not equal to the
380 former block. */
381 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
382 goto failed_exit;
384 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
385 goto failed_exit;
387 BITMAP_FREE (nonequal);
388 cselib_finish ();
389 if ((comparison_dominates_p (code1, code2) != 0)
390 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
391 return BRANCH_EDGE (b);
392 else
393 return FALLTHRU_EDGE (b);
395 failed_exit:
396 BITMAP_FREE (nonequal);
397 cselib_finish ();
398 return NULL;
401 /* Attempt to forward edges leaving basic block B.
402 Return true if successful. */
404 static bool
405 try_forward_edges (int mode, basic_block b)
407 bool changed = false;
408 edge_iterator ei;
409 edge e, *threaded_edges = NULL;
411 /* If we are partitioning hot/cold basic blocks, we don't want to
412 mess up unconditional or indirect jumps that cross between hot
413 and cold sections.
415 Basic block partitioning may result in some jumps that appear to
416 be optimizable (or blocks that appear to be mergeable), but which really m
417 ust be left untouched (they are required to make it safely across
418 partition boundaries). See the comments at the top of
419 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
421 if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
422 return false;
424 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
426 basic_block target, first;
427 int counter;
428 bool threaded = false;
429 int nthreaded_edges = 0;
430 bool may_thread = first_pass | df_get_bb_dirty (b);
432 /* Skip complex edges because we don't know how to update them.
434 Still handle fallthru edges, as we can succeed to forward fallthru
435 edge to the same place as the branch edge of conditional branch
436 and turn conditional branch to an unconditional branch. */
437 if (e->flags & EDGE_COMPLEX)
439 ei_next (&ei);
440 continue;
443 target = first = e->dest;
444 counter = NUM_FIXED_BLOCKS;
446 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
447 up jumps that cross between hot/cold sections.
449 Basic block partitioning may result in some jumps that appear
450 to be optimizable (or blocks that appear to be mergeable), but which
451 really must be left untouched (they are required to make it safely
452 across partition boundaries). See the comments at the top of
453 bb-reorder.c:partition_hot_cold_basic_blocks for complete
454 details. */
456 if (first != EXIT_BLOCK_PTR
457 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
458 return false;
460 while (counter < n_basic_blocks)
462 basic_block new_target = NULL;
463 bool new_target_threaded = false;
464 may_thread |= df_get_bb_dirty (target);
466 if (FORWARDER_BLOCK_P (target)
467 && !(single_succ_edge (target)->flags & EDGE_CROSSING)
468 && single_succ (target) != EXIT_BLOCK_PTR)
470 /* Bypass trivial infinite loops. */
471 new_target = single_succ (target);
472 if (target == new_target)
473 counter = n_basic_blocks;
476 /* Allow to thread only over one edge at time to simplify updating
477 of probabilities. */
478 else if ((mode & CLEANUP_THREADING) && may_thread)
480 edge t = thread_jump (e, target);
481 if (t)
483 if (!threaded_edges)
484 threaded_edges = XNEWVEC (edge, n_basic_blocks);
485 else
487 int i;
489 /* Detect an infinite loop across blocks not
490 including the start block. */
491 for (i = 0; i < nthreaded_edges; ++i)
492 if (threaded_edges[i] == t)
493 break;
494 if (i < nthreaded_edges)
496 counter = n_basic_blocks;
497 break;
501 /* Detect an infinite loop across the start block. */
502 if (t->dest == b)
503 break;
505 gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
506 threaded_edges[nthreaded_edges++] = t;
508 new_target = t->dest;
509 new_target_threaded = true;
513 if (!new_target)
514 break;
516 counter++;
517 target = new_target;
518 threaded |= new_target_threaded;
521 if (counter >= n_basic_blocks)
523 if (dump_file)
524 fprintf (dump_file, "Infinite loop in BB %i.\n",
525 target->index);
527 else if (target == first)
528 ; /* We didn't do anything. */
529 else
531 /* Save the values now, as the edge may get removed. */
532 gcov_type edge_count = e->count;
533 int edge_probability = e->probability;
534 int edge_frequency;
535 int n = 0;
537 /* Don't force if target is exit block. */
538 if (threaded && target != EXIT_BLOCK_PTR)
540 notice_new_block (redirect_edge_and_branch_force (e, target));
541 if (dump_file)
542 fprintf (dump_file, "Conditionals threaded.\n");
544 else if (!redirect_edge_and_branch (e, target))
546 if (dump_file)
547 fprintf (dump_file,
548 "Forwarding edge %i->%i to %i failed.\n",
549 b->index, e->dest->index, target->index);
550 ei_next (&ei);
551 continue;
554 /* We successfully forwarded the edge. Now update profile
555 data: for each edge we traversed in the chain, remove
556 the original edge's execution count. */
557 edge_frequency = ((edge_probability * b->frequency
558 + REG_BR_PROB_BASE / 2)
559 / REG_BR_PROB_BASE);
561 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
562 b->flags |= BB_FORWARDER_BLOCK;
566 edge t;
568 if (!single_succ_p (first))
570 gcc_assert (n < nthreaded_edges);
571 t = threaded_edges [n++];
572 gcc_assert (t->src == first);
573 update_bb_profile_for_threading (first, edge_frequency,
574 edge_count, t);
575 update_br_prob_note (first);
577 else
579 first->count -= edge_count;
580 if (first->count < 0)
581 first->count = 0;
582 first->frequency -= edge_frequency;
583 if (first->frequency < 0)
584 first->frequency = 0;
585 /* It is possible that as the result of
586 threading we've removed edge as it is
587 threaded to the fallthru edge. Avoid
588 getting out of sync. */
589 if (n < nthreaded_edges
590 && first == threaded_edges [n]->src)
591 n++;
592 t = single_succ_edge (first);
595 t->count -= edge_count;
596 if (t->count < 0)
597 t->count = 0;
598 first = t->dest;
600 while (first != target);
602 changed = true;
603 continue;
605 ei_next (&ei);
608 if (threaded_edges)
609 free (threaded_edges);
610 return changed;
614 /* Blocks A and B are to be merged into a single block. A has no incoming
615 fallthru edge, so it can be moved before B without adding or modifying
616 any jumps (aside from the jump from A to B). */
618 static void
619 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
621 rtx barrier;
623 /* If we are partitioning hot/cold basic blocks, we don't want to
624 mess up unconditional or indirect jumps that cross between hot
625 and cold sections.
627 Basic block partitioning may result in some jumps that appear to
628 be optimizable (or blocks that appear to be mergeable), but which really
629 must be left untouched (they are required to make it safely across
630 partition boundaries). See the comments at the top of
631 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
633 if (BB_PARTITION (a) != BB_PARTITION (b))
634 return;
636 barrier = next_nonnote_insn (BB_END (a));
637 gcc_assert (BARRIER_P (barrier));
638 delete_insn (barrier);
640 /* Scramble the insn chain. */
641 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
642 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
643 df_set_bb_dirty (a);
645 if (dump_file)
646 fprintf (dump_file, "Moved block %d before %d and merged.\n",
647 a->index, b->index);
649 /* Swap the records for the two blocks around. */
651 unlink_block (a);
652 link_block (a, b->prev_bb);
654 /* Now blocks A and B are contiguous. Merge them. */
655 merge_blocks (a, b);
658 /* Blocks A and B are to be merged into a single block. B has no outgoing
659 fallthru edge, so it can be moved after A without adding or modifying
660 any jumps (aside from the jump from A to B). */
662 static void
663 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
665 rtx barrier, real_b_end;
666 rtx label, table;
668 /* If we are partitioning hot/cold basic blocks, we don't want to
669 mess up unconditional or indirect jumps that cross between hot
670 and cold sections.
672 Basic block partitioning may result in some jumps that appear to
673 be optimizable (or blocks that appear to be mergeable), but which really
674 must be left untouched (they are required to make it safely across
675 partition boundaries). See the comments at the top of
676 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
678 if (BB_PARTITION (a) != BB_PARTITION (b))
679 return;
681 real_b_end = BB_END (b);
683 /* If there is a jump table following block B temporarily add the jump table
684 to block B so that it will also be moved to the correct location. */
685 if (tablejump_p (BB_END (b), &label, &table)
686 && prev_active_insn (label) == BB_END (b))
688 BB_END (b) = table;
691 /* There had better have been a barrier there. Delete it. */
692 barrier = NEXT_INSN (BB_END (b));
693 if (barrier && BARRIER_P (barrier))
694 delete_insn (barrier);
697 /* Scramble the insn chain. */
698 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
700 /* Restore the real end of b. */
701 BB_END (b) = real_b_end;
703 if (dump_file)
704 fprintf (dump_file, "Moved block %d after %d and merged.\n",
705 b->index, a->index);
707 /* Now blocks A and B are contiguous. Merge them. */
708 merge_blocks (a, b);
711 /* Attempt to merge basic blocks that are potentially non-adjacent.
712 Return NULL iff the attempt failed, otherwise return basic block
713 where cleanup_cfg should continue. Because the merging commonly
714 moves basic block away or introduces another optimization
715 possibility, return basic block just before B so cleanup_cfg don't
716 need to iterate.
718 It may be good idea to return basic block before C in the case
719 C has been moved after B and originally appeared earlier in the
720 insn sequence, but we have no information available about the
721 relative ordering of these two. Hopefully it is not too common. */
723 static basic_block
724 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
726 basic_block next;
728 /* If we are partitioning hot/cold basic blocks, we don't want to
729 mess up unconditional or indirect jumps that cross between hot
730 and cold sections.
732 Basic block partitioning may result in some jumps that appear to
733 be optimizable (or blocks that appear to be mergeable), but which really
734 must be left untouched (they are required to make it safely across
735 partition boundaries). See the comments at the top of
736 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
738 if (BB_PARTITION (b) != BB_PARTITION (c))
739 return NULL;
741 /* If B has a fallthru edge to C, no need to move anything. */
742 if (e->flags & EDGE_FALLTHRU)
744 int b_index = b->index, c_index = c->index;
745 merge_blocks (b, c);
746 update_forwarder_flag (b);
748 if (dump_file)
749 fprintf (dump_file, "Merged %d and %d without moving.\n",
750 b_index, c_index);
752 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
755 /* Otherwise we will need to move code around. Do that only if expensive
756 transformations are allowed. */
757 else if (mode & CLEANUP_EXPENSIVE)
759 edge tmp_edge, b_fallthru_edge;
760 bool c_has_outgoing_fallthru;
761 bool b_has_incoming_fallthru;
762 edge_iterator ei;
764 /* Avoid overactive code motion, as the forwarder blocks should be
765 eliminated by edge redirection instead. One exception might have
766 been if B is a forwarder block and C has no fallthru edge, but
767 that should be cleaned up by bb-reorder instead. */
768 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
769 return NULL;
771 /* We must make sure to not munge nesting of lexical blocks,
772 and loop notes. This is done by squeezing out all the notes
773 and leaving them there to lie. Not ideal, but functional. */
775 FOR_EACH_EDGE (tmp_edge, ei, c->succs)
776 if (tmp_edge->flags & EDGE_FALLTHRU)
777 break;
779 c_has_outgoing_fallthru = (tmp_edge != NULL);
781 FOR_EACH_EDGE (tmp_edge, ei, b->preds)
782 if (tmp_edge->flags & EDGE_FALLTHRU)
783 break;
785 b_has_incoming_fallthru = (tmp_edge != NULL);
786 b_fallthru_edge = tmp_edge;
787 next = b->prev_bb;
788 if (next == c)
789 next = next->prev_bb;
791 /* Otherwise, we're going to try to move C after B. If C does
792 not have an outgoing fallthru, then it can be moved
793 immediately after B without introducing or modifying jumps. */
794 if (! c_has_outgoing_fallthru)
796 merge_blocks_move_successor_nojumps (b, c);
797 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
800 /* If B does not have an incoming fallthru, then it can be moved
801 immediately before C without introducing or modifying jumps.
802 C cannot be the first block, so we do not have to worry about
803 accessing a non-existent block. */
805 if (b_has_incoming_fallthru)
807 basic_block bb;
809 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
810 return NULL;
811 bb = force_nonfallthru (b_fallthru_edge);
812 if (bb)
813 notice_new_block (bb);
816 merge_blocks_move_predecessor_nojumps (b, c);
817 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
820 return NULL;
824 /* Removes the memory attributes of MEM expression
825 if they are not equal. */
827 void
828 merge_memattrs (rtx x, rtx y)
830 int i;
831 int j;
832 enum rtx_code code;
833 const char *fmt;
835 if (x == y)
836 return;
837 if (x == 0 || y == 0)
838 return;
840 code = GET_CODE (x);
842 if (code != GET_CODE (y))
843 return;
845 if (GET_MODE (x) != GET_MODE (y))
846 return;
848 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
850 if (! MEM_ATTRS (x))
851 MEM_ATTRS (y) = 0;
852 else if (! MEM_ATTRS (y))
853 MEM_ATTRS (x) = 0;
854 else
856 rtx mem_size;
858 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
860 set_mem_alias_set (x, 0);
861 set_mem_alias_set (y, 0);
864 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
866 set_mem_expr (x, 0);
867 set_mem_expr (y, 0);
868 set_mem_offset (x, 0);
869 set_mem_offset (y, 0);
871 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
873 set_mem_offset (x, 0);
874 set_mem_offset (y, 0);
877 if (!MEM_SIZE (x))
878 mem_size = NULL_RTX;
879 else if (!MEM_SIZE (y))
880 mem_size = NULL_RTX;
881 else
882 mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
883 INTVAL (MEM_SIZE (y))));
884 set_mem_size (x, mem_size);
885 set_mem_size (y, mem_size);
887 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
888 set_mem_align (y, MEM_ALIGN (x));
892 fmt = GET_RTX_FORMAT (code);
893 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
895 switch (fmt[i])
897 case 'E':
898 /* Two vectors must have the same length. */
899 if (XVECLEN (x, i) != XVECLEN (y, i))
900 return;
902 for (j = 0; j < XVECLEN (x, i); j++)
903 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
905 break;
907 case 'e':
908 merge_memattrs (XEXP (x, i), XEXP (y, i));
911 return;
915 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
917 static bool
918 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
920 rtx p1, p2;
922 /* Verify that I1 and I2 are equivalent. */
923 if (GET_CODE (i1) != GET_CODE (i2))
924 return false;
926 p1 = PATTERN (i1);
927 p2 = PATTERN (i2);
929 if (GET_CODE (p1) != GET_CODE (p2))
930 return false;
932 /* If this is a CALL_INSN, compare register usage information.
933 If we don't check this on stack register machines, the two
934 CALL_INSNs might be merged leaving reg-stack.c with mismatching
935 numbers of stack registers in the same basic block.
936 If we don't check this on machines with delay slots, a delay slot may
937 be filled that clobbers a parameter expected by the subroutine.
939 ??? We take the simple route for now and assume that if they're
940 equal, they were constructed identically. */
942 if (CALL_P (i1)
943 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
944 CALL_INSN_FUNCTION_USAGE (i2))
945 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
946 return false;
948 #ifdef STACK_REGS
949 /* If cross_jump_death_matters is not 0, the insn's mode
950 indicates whether or not the insn contains any stack-like
951 regs. */
953 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
955 /* If register stack conversion has already been done, then
956 death notes must also be compared before it is certain that
957 the two instruction streams match. */
959 rtx note;
960 HARD_REG_SET i1_regset, i2_regset;
962 CLEAR_HARD_REG_SET (i1_regset);
963 CLEAR_HARD_REG_SET (i2_regset);
965 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
966 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
967 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
969 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
970 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
971 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
973 if (!hard_reg_set_equal_p (i1_regset, i2_regset))
974 return false;
976 #endif
978 if (reload_completed
979 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
980 return true;
982 /* Do not do EQUIV substitution after reload. First, we're undoing the
983 work of reload_cse. Second, we may be undoing the work of the post-
984 reload splitting pass. */
985 /* ??? Possibly add a new phase switch variable that can be used by
986 targets to disallow the troublesome insns after splitting. */
987 if (!reload_completed)
989 /* The following code helps take care of G++ cleanups. */
990 rtx equiv1 = find_reg_equal_equiv_note (i1);
991 rtx equiv2 = find_reg_equal_equiv_note (i2);
993 if (equiv1 && equiv2
994 /* If the equivalences are not to a constant, they may
995 reference pseudos that no longer exist, so we can't
996 use them. */
997 && (! reload_completed
998 || (CONSTANT_P (XEXP (equiv1, 0))
999 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1001 rtx s1 = single_set (i1);
1002 rtx s2 = single_set (i2);
1003 if (s1 != 0 && s2 != 0
1004 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1006 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1007 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1008 if (! rtx_renumbered_equal_p (p1, p2))
1009 cancel_changes (0);
1010 else if (apply_change_group ())
1011 return true;
1016 return false;
1019 /* Look through the insns at the end of BB1 and BB2 and find the longest
1020 sequence that are equivalent. Store the first insns for that sequence
1021 in *F1 and *F2 and return the sequence length.
1023 To simplify callers of this function, if the blocks match exactly,
1024 store the head of the blocks in *F1 and *F2. */
1026 static int
1027 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1028 basic_block bb2, rtx *f1, rtx *f2)
1030 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1031 int ninsns = 0;
1033 /* Skip simple jumps at the end of the blocks. Complex jumps still
1034 need to be compared for equivalence, which we'll do below. */
1036 i1 = BB_END (bb1);
1037 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1038 if (onlyjump_p (i1)
1039 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1041 last1 = i1;
1042 i1 = PREV_INSN (i1);
1045 i2 = BB_END (bb2);
1046 if (onlyjump_p (i2)
1047 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1049 last2 = i2;
1050 /* Count everything except for unconditional jump as insn. */
1051 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1052 ninsns++;
1053 i2 = PREV_INSN (i2);
1056 while (true)
1058 /* Ignore notes. */
1059 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1060 i1 = PREV_INSN (i1);
1062 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1063 i2 = PREV_INSN (i2);
1065 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1066 break;
1068 if (!old_insns_match_p (mode, i1, i2))
1069 break;
1071 merge_memattrs (i1, i2);
1073 /* Don't begin a cross-jump with a NOTE insn. */
1074 if (INSN_P (i1))
1076 /* If the merged insns have different REG_EQUAL notes, then
1077 remove them. */
1078 rtx equiv1 = find_reg_equal_equiv_note (i1);
1079 rtx equiv2 = find_reg_equal_equiv_note (i2);
1081 if (equiv1 && !equiv2)
1082 remove_note (i1, equiv1);
1083 else if (!equiv1 && equiv2)
1084 remove_note (i2, equiv2);
1085 else if (equiv1 && equiv2
1086 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1088 remove_note (i1, equiv1);
1089 remove_note (i2, equiv2);
1092 afterlast1 = last1, afterlast2 = last2;
1093 last1 = i1, last2 = i2;
1094 ninsns++;
1097 i1 = PREV_INSN (i1);
1098 i2 = PREV_INSN (i2);
1101 #ifdef HAVE_cc0
1102 /* Don't allow the insn after a compare to be shared by
1103 cross-jumping unless the compare is also shared. */
1104 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1105 last1 = afterlast1, last2 = afterlast2, ninsns--;
1106 #endif
1108 /* Include preceding notes and labels in the cross-jump. One,
1109 this may bring us to the head of the blocks as requested above.
1110 Two, it keeps line number notes as matched as may be. */
1111 if (ninsns)
1113 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1114 last1 = PREV_INSN (last1);
1116 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1117 last1 = PREV_INSN (last1);
1119 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1120 last2 = PREV_INSN (last2);
1122 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1123 last2 = PREV_INSN (last2);
1125 *f1 = last1;
1126 *f2 = last2;
1129 return ninsns;
1132 /* Return true iff the condbranches at the end of BB1 and BB2 match. */
1133 bool
1134 condjump_equiv_p (struct equiv_info *info, bool call_init)
1136 basic_block bb1 = info->x_block;
1137 basic_block bb2 = info->y_block;
1138 edge b1 = BRANCH_EDGE (bb1);
1139 edge b2 = BRANCH_EDGE (bb2);
1140 edge f1 = FALLTHRU_EDGE (bb1);
1141 edge f2 = FALLTHRU_EDGE (bb2);
1142 bool reverse, match;
1143 rtx set1, set2, cond1, cond2;
1144 rtx src1, src2;
1145 enum rtx_code code1, code2;
1147 /* Get around possible forwarders on fallthru edges. Other cases
1148 should be optimized out already. */
1149 if (FORWARDER_BLOCK_P (f1->dest))
1150 f1 = single_succ_edge (f1->dest);
1152 if (FORWARDER_BLOCK_P (f2->dest))
1153 f2 = single_succ_edge (f2->dest);
1155 /* To simplify use of this function, return false if there are
1156 unneeded forwarder blocks. These will get eliminated later
1157 during cleanup_cfg. */
1158 if (FORWARDER_BLOCK_P (f1->dest)
1159 || FORWARDER_BLOCK_P (f2->dest)
1160 || FORWARDER_BLOCK_P (b1->dest)
1161 || FORWARDER_BLOCK_P (b2->dest))
1162 return false;
1164 if (f1->dest == f2->dest && b1->dest == b2->dest)
1165 reverse = false;
1166 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1167 reverse = true;
1168 else
1169 return false;
1171 set1 = pc_set (BB_END (bb1));
1172 set2 = pc_set (BB_END (bb2));
1173 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1174 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1175 reverse = !reverse;
1177 src1 = SET_SRC (set1);
1178 src2 = SET_SRC (set2);
1179 cond1 = XEXP (src1, 0);
1180 cond2 = XEXP (src2, 0);
1181 code1 = GET_CODE (cond1);
1182 if (reverse)
1183 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1184 else
1185 code2 = GET_CODE (cond2);
1187 if (code2 == UNKNOWN)
1188 return false;
1190 if (call_init && !struct_equiv_init (STRUCT_EQUIV_START | info->mode, info))
1191 gcc_unreachable ();
1192 /* Make the sources of the pc sets unreadable so that when we call
1193 insns_match_p it won't process them.
1194 The death_notes_match_p from insns_match_p won't see the local registers
1195 used for the pc set, but that could only cause missed optimizations when
1196 there are actually condjumps that use stack registers. */
1197 SET_SRC (set1) = pc_rtx;
1198 SET_SRC (set2) = pc_rtx;
1199 /* Verify codes and operands match. */
1200 if (code1 == code2)
1202 match = (insns_match_p (BB_END (bb1), BB_END (bb2), info)
1203 && rtx_equiv_p (&XEXP (cond1, 0), XEXP (cond2, 0), 1, info)
1204 && rtx_equiv_p (&XEXP (cond1, 1), XEXP (cond2, 1), 1, info));
1207 else if (code1 == swap_condition (code2))
1209 match = (insns_match_p (BB_END (bb1), BB_END (bb2), info)
1210 && rtx_equiv_p (&XEXP (cond1, 1), XEXP (cond2, 0), 1, info)
1211 && rtx_equiv_p (&XEXP (cond1, 0), XEXP (cond2, 1), 1, info));
1214 else
1215 match = false;
1216 SET_SRC (set1) = src1;
1217 SET_SRC (set2) = src2;
1218 match &= verify_changes (0);
1220 /* If we return true, we will join the blocks. Which means that
1221 we will only have one branch prediction bit to work with. Thus
1222 we require the existing branches to have probabilities that are
1223 roughly similar. */
1224 if (match
1225 && !optimize_size
1226 && maybe_hot_bb_p (bb1)
1227 && maybe_hot_bb_p (bb2))
1229 int prob2;
1231 if (b1->dest == b2->dest)
1232 prob2 = b2->probability;
1233 else
1234 /* Do not use f2 probability as f2 may be forwarded. */
1235 prob2 = REG_BR_PROB_BASE - b2->probability;
1237 /* Fail if the difference in probabilities is greater than 50%.
1238 This rules out two well-predicted branches with opposite
1239 outcomes. */
1240 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1242 if (dump_file)
1243 fprintf (dump_file,
1244 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1245 bb1->index, bb2->index, b1->probability, prob2);
1247 match = false;
1251 if (dump_file && match)
1252 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1253 bb1->index, bb2->index);
1255 if (!match)
1256 cancel_changes (0);
1257 return match;
1260 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1261 the branch instruction. This means that if we commonize the control
1262 flow before end of the basic block, the semantic remains unchanged.
1264 We may assume that there exists one edge with a common destination. */
1266 static bool
1267 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1269 int nehedges1 = 0, nehedges2 = 0;
1270 edge fallthru1 = 0, fallthru2 = 0;
1271 edge e1, e2;
1272 edge_iterator ei;
1274 /* If BB1 has only one successor, we may be looking at either an
1275 unconditional jump, or a fake edge to exit. */
1276 if (single_succ_p (bb1)
1277 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1278 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1279 return (single_succ_p (bb2)
1280 && (single_succ_edge (bb2)->flags
1281 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1282 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1284 /* Match conditional jumps - this may get tricky when fallthru and branch
1285 edges are crossed. */
1286 if (EDGE_COUNT (bb1->succs) == 2
1287 && any_condjump_p (BB_END (bb1))
1288 && onlyjump_p (BB_END (bb1)))
1290 edge b1, f1, b2, f2;
1291 bool reverse, match;
1292 rtx set1, set2, cond1, cond2;
1293 enum rtx_code code1, code2;
1295 if (EDGE_COUNT (bb2->succs) != 2
1296 || !any_condjump_p (BB_END (bb2))
1297 || !onlyjump_p (BB_END (bb2)))
1298 return false;
1300 b1 = BRANCH_EDGE (bb1);
1301 b2 = BRANCH_EDGE (bb2);
1302 f1 = FALLTHRU_EDGE (bb1);
1303 f2 = FALLTHRU_EDGE (bb2);
1305 /* Get around possible forwarders on fallthru edges. Other cases
1306 should be optimized out already. */
1307 if (FORWARDER_BLOCK_P (f1->dest))
1308 f1 = single_succ_edge (f1->dest);
1310 if (FORWARDER_BLOCK_P (f2->dest))
1311 f2 = single_succ_edge (f2->dest);
1313 /* To simplify use of this function, return false if there are
1314 unneeded forwarder blocks. These will get eliminated later
1315 during cleanup_cfg. */
1316 if (FORWARDER_BLOCK_P (f1->dest)
1317 || FORWARDER_BLOCK_P (f2->dest)
1318 || FORWARDER_BLOCK_P (b1->dest)
1319 || FORWARDER_BLOCK_P (b2->dest))
1320 return false;
1322 if (f1->dest == f2->dest && b1->dest == b2->dest)
1323 reverse = false;
1324 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1325 reverse = true;
1326 else
1327 return false;
1329 set1 = pc_set (BB_END (bb1));
1330 set2 = pc_set (BB_END (bb2));
1331 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1332 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1333 reverse = !reverse;
1335 cond1 = XEXP (SET_SRC (set1), 0);
1336 cond2 = XEXP (SET_SRC (set2), 0);
1337 code1 = GET_CODE (cond1);
1338 if (reverse)
1339 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1340 else
1341 code2 = GET_CODE (cond2);
1343 if (code2 == UNKNOWN)
1344 return false;
1346 /* Verify codes and operands match. */
1347 match = ((code1 == code2
1348 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1349 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1350 || (code1 == swap_condition (code2)
1351 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1352 XEXP (cond2, 0))
1353 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1354 XEXP (cond2, 1))));
1356 /* If we return true, we will join the blocks. Which means that
1357 we will only have one branch prediction bit to work with. Thus
1358 we require the existing branches to have probabilities that are
1359 roughly similar. */
1360 if (match
1361 && !optimize_size
1362 && maybe_hot_bb_p (bb1)
1363 && maybe_hot_bb_p (bb2))
1365 int prob2;
1367 if (b1->dest == b2->dest)
1368 prob2 = b2->probability;
1369 else
1370 /* Do not use f2 probability as f2 may be forwarded. */
1371 prob2 = REG_BR_PROB_BASE - b2->probability;
1373 /* Fail if the difference in probabilities is greater than 50%.
1374 This rules out two well-predicted branches with opposite
1375 outcomes. */
1376 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1378 if (dump_file)
1379 fprintf (dump_file,
1380 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1381 bb1->index, bb2->index, b1->probability, prob2);
1383 return false;
1387 if (dump_file && match)
1388 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1389 bb1->index, bb2->index);
1391 return match;
1394 /* Generic case - we are seeing a computed jump, table jump or trapping
1395 instruction. */
1397 /* Check whether there are tablejumps in the end of BB1 and BB2.
1398 Return true if they are identical. */
1400 rtx label1, label2;
1401 rtx table1, table2;
1403 if (tablejump_p (BB_END (bb1), &label1, &table1)
1404 && tablejump_p (BB_END (bb2), &label2, &table2)
1405 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1407 /* The labels should never be the same rtx. If they really are same
1408 the jump tables are same too. So disable crossjumping of blocks BB1
1409 and BB2 because when deleting the common insns in the end of BB1
1410 by delete_basic_block () the jump table would be deleted too. */
1411 /* If LABEL2 is referenced in BB1->END do not do anything
1412 because we would loose information when replacing
1413 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1414 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1416 /* Set IDENTICAL to true when the tables are identical. */
1417 bool identical = false;
1418 rtx p1, p2;
1420 p1 = PATTERN (table1);
1421 p2 = PATTERN (table2);
1422 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1424 identical = true;
1426 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1427 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1428 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1429 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1431 int i;
1433 identical = true;
1434 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1435 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1436 identical = false;
1439 if (identical)
1441 replace_label_data rr;
1442 bool match;
1444 /* Temporarily replace references to LABEL1 with LABEL2
1445 in BB1->END so that we could compare the instructions. */
1446 rr.r1 = label1;
1447 rr.r2 = label2;
1448 rr.update_label_nuses = false;
1449 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1451 match = old_insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1452 if (dump_file && match)
1453 fprintf (dump_file,
1454 "Tablejumps in bb %i and %i match.\n",
1455 bb1->index, bb2->index);
1457 /* Set the original label in BB1->END because when deleting
1458 a block whose end is a tablejump, the tablejump referenced
1459 from the instruction is deleted too. */
1460 rr.r1 = label2;
1461 rr.r2 = label1;
1462 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1464 return match;
1467 return false;
1471 /* First ensure that the instructions match. There may be many outgoing
1472 edges so this test is generally cheaper. */
1473 if (!old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1474 return false;
1476 /* Search the outgoing edges, ensure that the counts do match, find possible
1477 fallthru and exception handling edges since these needs more
1478 validation. */
1479 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1480 return false;
1482 FOR_EACH_EDGE (e1, ei, bb1->succs)
1484 e2 = EDGE_SUCC (bb2, ei.index);
1486 if (e1->flags & EDGE_EH)
1487 nehedges1++;
1489 if (e2->flags & EDGE_EH)
1490 nehedges2++;
1492 if (e1->flags & EDGE_FALLTHRU)
1493 fallthru1 = e1;
1494 if (e2->flags & EDGE_FALLTHRU)
1495 fallthru2 = e2;
1498 /* If number of edges of various types does not match, fail. */
1499 if (nehedges1 != nehedges2
1500 || (fallthru1 != 0) != (fallthru2 != 0))
1501 return false;
1503 /* fallthru edges must be forwarded to the same destination. */
1504 if (fallthru1)
1506 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1507 ? single_succ (fallthru1->dest): fallthru1->dest);
1508 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1509 ? single_succ (fallthru2->dest): fallthru2->dest);
1511 if (d1 != d2)
1512 return false;
1515 /* Ensure the same EH region. */
1517 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1518 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1520 if (!n1 && n2)
1521 return false;
1523 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1524 return false;
1527 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1528 version of sequence abstraction. */
1529 FOR_EACH_EDGE (e1, ei, bb2->succs)
1531 edge e2;
1532 edge_iterator ei;
1533 basic_block d1 = e1->dest;
1535 if (FORWARDER_BLOCK_P (d1))
1536 d1 = EDGE_SUCC (d1, 0)->dest;
1538 FOR_EACH_EDGE (e2, ei, bb1->succs)
1540 basic_block d2 = e2->dest;
1541 if (FORWARDER_BLOCK_P (d2))
1542 d2 = EDGE_SUCC (d2, 0)->dest;
1543 if (d1 == d2)
1544 break;
1547 if (!e2)
1548 return false;
1551 return true;
1554 /* Returns true if BB basic block has a preserve label. */
1556 static bool
1557 block_has_preserve_label (basic_block bb)
1559 return (bb
1560 && block_label (bb)
1561 && LABEL_PRESERVE_P (block_label (bb)));
1564 /* E1 and E2 are edges with the same destination block. Search their
1565 predecessors for common code. If found, redirect control flow from
1566 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1568 static bool
1569 try_crossjump_to_edge (int mode, edge e1, edge e2)
1571 int nmatch;
1572 basic_block src1 = e1->src, src2 = e2->src;
1573 basic_block redirect_to, redirect_from, to_remove;
1574 rtx newpos1, newpos2;
1575 edge s;
1576 edge_iterator ei;
1578 newpos1 = newpos2 = NULL_RTX;
1580 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1581 to try this optimization.
1583 Basic block partitioning may result in some jumps that appear to
1584 be optimizable (or blocks that appear to be mergeable), but which really
1585 must be left untouched (they are required to make it safely across
1586 partition boundaries). See the comments at the top of
1587 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1589 if (flag_reorder_blocks_and_partition && reload_completed)
1590 return false;
1592 /* Search backward through forwarder blocks. We don't need to worry
1593 about multiple entry or chained forwarders, as they will be optimized
1594 away. We do this to look past the unconditional jump following a
1595 conditional jump that is required due to the current CFG shape. */
1596 if (single_pred_p (src1)
1597 && FORWARDER_BLOCK_P (src1))
1598 e1 = single_pred_edge (src1), src1 = e1->src;
1600 if (single_pred_p (src2)
1601 && FORWARDER_BLOCK_P (src2))
1602 e2 = single_pred_edge (src2), src2 = e2->src;
1604 /* Nothing to do if we reach ENTRY, or a common source block. */
1605 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1606 return false;
1607 if (src1 == src2)
1608 return false;
1610 /* Seeing more than 1 forwarder blocks would confuse us later... */
1611 if (FORWARDER_BLOCK_P (e1->dest)
1612 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1613 return false;
1615 if (FORWARDER_BLOCK_P (e2->dest)
1616 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1617 return false;
1619 /* Likewise with dead code (possibly newly created by the other optimizations
1620 of cfg_cleanup). */
1621 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1622 return false;
1624 /* Look for the common insn sequence, part the first ... */
1625 if (!outgoing_edges_match (mode, src1, src2))
1626 return false;
1628 /* ... and part the second. */
1629 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1631 /* Don't proceed with the crossjump unless we found a sufficient number
1632 of matching instructions or the 'from' block was totally matched
1633 (such that its predecessors will hopefully be redirected and the
1634 block removed). */
1635 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1636 && (newpos1 != BB_HEAD (src1)))
1637 return false;
1639 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
1640 if (block_has_preserve_label (e1->dest)
1641 && (e1->flags & EDGE_ABNORMAL))
1642 return false;
1644 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1645 will be deleted.
1646 If we have tablejumps in the end of SRC1 and SRC2
1647 they have been already compared for equivalence in outgoing_edges_match ()
1648 so replace the references to TABLE1 by references to TABLE2. */
1650 rtx label1, label2;
1651 rtx table1, table2;
1653 if (tablejump_p (BB_END (src1), &label1, &table1)
1654 && tablejump_p (BB_END (src2), &label2, &table2)
1655 && label1 != label2)
1657 replace_label_data rr;
1658 rtx insn;
1660 /* Replace references to LABEL1 with LABEL2. */
1661 rr.r1 = label1;
1662 rr.r2 = label2;
1663 rr.update_label_nuses = true;
1664 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1666 /* Do not replace the label in SRC1->END because when deleting
1667 a block whose end is a tablejump, the tablejump referenced
1668 from the instruction is deleted too. */
1669 if (insn != BB_END (src1))
1670 for_each_rtx (&insn, replace_label, &rr);
1675 /* Avoid splitting if possible. We must always split when SRC2 has
1676 EH predecessor edges, or we may end up with basic blocks with both
1677 normal and EH predecessor edges. */
1678 if (newpos2 == BB_HEAD (src2)
1679 && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
1680 redirect_to = src2;
1681 else
1683 if (newpos2 == BB_HEAD (src2))
1685 /* Skip possible basic block header. */
1686 if (LABEL_P (newpos2))
1687 newpos2 = NEXT_INSN (newpos2);
1688 if (NOTE_P (newpos2))
1689 newpos2 = NEXT_INSN (newpos2);
1692 if (dump_file)
1693 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1694 src2->index, nmatch);
1695 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1698 if (dump_file)
1699 fprintf (dump_file,
1700 "Cross jumping from bb %i to bb %i; %i common insns\n",
1701 src1->index, src2->index, nmatch);
1703 redirect_to->count += src1->count;
1704 redirect_to->frequency += src1->frequency;
1705 /* We may have some registers visible through the block. */
1706 df_set_bb_dirty (redirect_to);
1708 /* Recompute the frequencies and counts of outgoing edges. */
1709 FOR_EACH_EDGE (s, ei, redirect_to->succs)
1711 edge s2;
1712 edge_iterator ei;
1713 basic_block d = s->dest;
1715 if (FORWARDER_BLOCK_P (d))
1716 d = single_succ (d);
1718 FOR_EACH_EDGE (s2, ei, src1->succs)
1720 basic_block d2 = s2->dest;
1721 if (FORWARDER_BLOCK_P (d2))
1722 d2 = single_succ (d2);
1723 if (d == d2)
1724 break;
1727 s->count += s2->count;
1729 /* Take care to update possible forwarder blocks. We verified
1730 that there is no more than one in the chain, so we can't run
1731 into infinite loop. */
1732 if (FORWARDER_BLOCK_P (s->dest))
1734 single_succ_edge (s->dest)->count += s2->count;
1735 s->dest->count += s2->count;
1736 s->dest->frequency += EDGE_FREQUENCY (s);
1739 if (FORWARDER_BLOCK_P (s2->dest))
1741 single_succ_edge (s2->dest)->count -= s2->count;
1742 if (single_succ_edge (s2->dest)->count < 0)
1743 single_succ_edge (s2->dest)->count = 0;
1744 s2->dest->count -= s2->count;
1745 s2->dest->frequency -= EDGE_FREQUENCY (s);
1746 if (s2->dest->frequency < 0)
1747 s2->dest->frequency = 0;
1748 if (s2->dest->count < 0)
1749 s2->dest->count = 0;
1752 if (!redirect_to->frequency && !src1->frequency)
1753 s->probability = (s->probability + s2->probability) / 2;
1754 else
1755 s->probability
1756 = ((s->probability * redirect_to->frequency +
1757 s2->probability * src1->frequency)
1758 / (redirect_to->frequency + src1->frequency));
1761 update_br_prob_note (redirect_to);
1763 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1765 /* Skip possible basic block header. */
1766 if (LABEL_P (newpos1))
1767 newpos1 = NEXT_INSN (newpos1);
1769 if (NOTE_P (newpos1))
1770 newpos1 = NEXT_INSN (newpos1);
1772 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1773 to_remove = single_succ (redirect_from);
1775 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
1776 delete_basic_block (to_remove);
1778 update_forwarder_flag (redirect_from);
1779 if (redirect_to != src2)
1780 update_forwarder_flag (src2);
1782 return true;
1785 /* Search the predecessors of BB for common insn sequences. When found,
1786 share code between them by redirecting control flow. Return true if
1787 any changes made. */
1789 static bool
1790 try_crossjump_bb (int mode, basic_block bb)
1792 edge e, e2, fallthru;
1793 bool changed;
1794 unsigned max, ix, ix2;
1795 basic_block ev, ev2;
1796 edge_iterator ei;
1798 /* Nothing to do if there is not at least two incoming edges. */
1799 if (EDGE_COUNT (bb->preds) < 2)
1800 return false;
1802 /* Don't crossjump if this block ends in a computed jump,
1803 unless we are optimizing for size. */
1804 if (!optimize_size
1805 && bb != EXIT_BLOCK_PTR
1806 && computed_jump_p (BB_END (bb)))
1807 return false;
1809 /* If we are partitioning hot/cold basic blocks, we don't want to
1810 mess up unconditional or indirect jumps that cross between hot
1811 and cold sections.
1813 Basic block partitioning may result in some jumps that appear to
1814 be optimizable (or blocks that appear to be mergeable), but which really
1815 must be left untouched (they are required to make it safely across
1816 partition boundaries). See the comments at the top of
1817 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1819 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
1820 BB_PARTITION (EDGE_PRED (bb, 1)->src)
1821 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
1822 return false;
1824 /* It is always cheapest to redirect a block that ends in a branch to
1825 a block that falls through into BB, as that adds no branches to the
1826 program. We'll try that combination first. */
1827 fallthru = NULL;
1828 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1830 if (EDGE_COUNT (bb->preds) > max)
1831 return false;
1833 FOR_EACH_EDGE (e, ei, bb->preds)
1835 if (e->flags & EDGE_FALLTHRU)
1836 fallthru = e;
1839 changed = false;
1840 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1842 e = EDGE_PRED (ev, ix);
1843 ix++;
1845 /* As noted above, first try with the fallthru predecessor. */
1846 if (fallthru)
1848 /* Don't combine the fallthru edge into anything else.
1849 If there is a match, we'll do it the other way around. */
1850 if (e == fallthru)
1851 continue;
1852 /* If nothing changed since the last attempt, there is nothing
1853 we can do. */
1854 if (!first_pass
1855 && (!(df_get_bb_dirty (e->src))
1856 && !(df_get_bb_dirty (fallthru->src))))
1857 continue;
1859 if (try_crossjump_to_edge (mode, e, fallthru))
1861 changed = true;
1862 ix = 0;
1863 ev = bb;
1864 continue;
1868 /* Non-obvious work limiting check: Recognize that we're going
1869 to call try_crossjump_bb on every basic block. So if we have
1870 two blocks with lots of outgoing edges (a switch) and they
1871 share lots of common destinations, then we would do the
1872 cross-jump check once for each common destination.
1874 Now, if the blocks actually are cross-jump candidates, then
1875 all of their destinations will be shared. Which means that
1876 we only need check them for cross-jump candidacy once. We
1877 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1878 choosing to do the check from the block for which the edge
1879 in question is the first successor of A. */
1880 if (EDGE_SUCC (e->src, 0) != e)
1881 continue;
1883 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1885 e2 = EDGE_PRED (ev2, ix2);
1886 ix2++;
1888 if (e2 == e)
1889 continue;
1891 /* We've already checked the fallthru edge above. */
1892 if (e2 == fallthru)
1893 continue;
1895 /* The "first successor" check above only prevents multiple
1896 checks of crossjump(A,B). In order to prevent redundant
1897 checks of crossjump(B,A), require that A be the block
1898 with the lowest index. */
1899 if (e->src->index > e2->src->index)
1900 continue;
1902 /* If nothing changed since the last attempt, there is nothing
1903 we can do. */
1904 if (!first_pass
1905 && (!(df_get_bb_dirty (e->src))
1906 && !(df_get_bb_dirty (e2->src))))
1907 continue;
1909 if (try_crossjump_to_edge (mode, e, e2))
1911 changed = true;
1912 ev2 = bb;
1913 ix = 0;
1914 break;
1919 return changed;
1922 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1923 instructions etc. Return nonzero if changes were made. */
1925 static bool
1926 try_optimize_cfg (int mode)
1928 bool changed_overall = false;
1929 bool changed;
1930 int iterations = 0;
1931 basic_block bb, b, next;
1933 if (mode & CLEANUP_CROSSJUMP)
1934 add_noreturn_fake_exit_edges ();
1936 if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1937 clear_bb_flags ();
1939 FOR_EACH_BB (bb)
1940 update_forwarder_flag (bb);
1942 if (! targetm.cannot_modify_jumps_p ())
1944 first_pass = true;
1945 /* Attempt to merge blocks as made possible by edge removal. If
1946 a block has only one successor, and the successor has only
1947 one predecessor, they may be combined. */
1950 changed = false;
1951 iterations++;
1953 if (dump_file)
1954 fprintf (dump_file,
1955 "\n\ntry_optimize_cfg iteration %i\n\n",
1956 iterations);
1958 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1960 basic_block c;
1961 edge s;
1962 bool changed_here = false;
1964 /* Delete trivially dead basic blocks. */
1965 if (EDGE_COUNT (b->preds) == 0)
1967 c = b->prev_bb;
1968 if (dump_file)
1969 fprintf (dump_file, "Deleting block %i.\n",
1970 b->index);
1972 delete_basic_block (b);
1973 if (!(mode & CLEANUP_CFGLAYOUT))
1974 changed = true;
1975 /* Avoid trying to remove ENTRY_BLOCK_PTR. */
1976 b = (c == ENTRY_BLOCK_PTR ? c->next_bb : c);
1977 continue;
1980 /* Remove code labels no longer used. */
1981 if (single_pred_p (b)
1982 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1983 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
1984 && LABEL_P (BB_HEAD (b))
1985 /* If the previous block ends with a branch to this
1986 block, we can't delete the label. Normally this
1987 is a condjump that is yet to be simplified, but
1988 if CASE_DROPS_THRU, this can be a tablejump with
1989 some element going to the same place as the
1990 default (fallthru). */
1991 && (single_pred (b) == ENTRY_BLOCK_PTR
1992 || !JUMP_P (BB_END (single_pred (b)))
1993 || ! label_is_jump_target_p (BB_HEAD (b),
1994 BB_END (single_pred (b)))))
1996 rtx label = BB_HEAD (b);
1998 delete_insn_chain (label, label, false);
1999 /* If the case label is undeletable, move it after the
2000 BASIC_BLOCK note. */
2001 if (NOTE_KIND (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
2003 rtx bb_note = NEXT_INSN (BB_HEAD (b));
2005 reorder_insns_nobb (label, label, bb_note);
2006 BB_HEAD (b) = bb_note;
2007 if (BB_END (b) == bb_note)
2008 BB_END (b) = label;
2010 if (dump_file)
2011 fprintf (dump_file, "Deleted label in block %i.\n",
2012 b->index);
2015 /* If we fall through an empty block, we can remove it. */
2016 if (!(mode & CLEANUP_CFGLAYOUT)
2017 && single_pred_p (b)
2018 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2019 && !LABEL_P (BB_HEAD (b))
2020 && FORWARDER_BLOCK_P (b)
2021 /* Note that forwarder_block_p true ensures that
2022 there is a successor for this block. */
2023 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2024 && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
2026 if (dump_file)
2027 fprintf (dump_file,
2028 "Deleting fallthru block %i.\n",
2029 b->index);
2031 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
2032 redirect_edge_succ_nodup (single_pred_edge (b),
2033 single_succ (b));
2034 delete_basic_block (b);
2035 changed = true;
2036 b = c;
2039 if (single_succ_p (b)
2040 && (s = single_succ_edge (b))
2041 && !(s->flags & EDGE_COMPLEX)
2042 && (c = s->dest) != EXIT_BLOCK_PTR
2043 && single_pred_p (c)
2044 && b != c)
2046 /* When not in cfg_layout mode use code aware of reordering
2047 INSN. This code possibly creates new basic blocks so it
2048 does not fit merge_blocks interface and is kept here in
2049 hope that it will become useless once more of compiler
2050 is transformed to use cfg_layout mode. */
2052 if ((mode & CLEANUP_CFGLAYOUT)
2053 && can_merge_blocks_p (b, c))
2055 merge_blocks (b, c);
2056 update_forwarder_flag (b);
2057 changed_here = true;
2059 else if (!(mode & CLEANUP_CFGLAYOUT)
2060 /* If the jump insn has side effects,
2061 we can't kill the edge. */
2062 && (!JUMP_P (BB_END (b))
2063 || (reload_completed
2064 ? simplejump_p (BB_END (b))
2065 : (onlyjump_p (BB_END (b))
2066 && !tablejump_p (BB_END (b),
2067 NULL, NULL))))
2068 && (next = merge_blocks_move (s, b, c, mode)))
2070 b = next;
2071 changed_here = true;
2075 /* Simplify branch over branch. */
2076 if ((mode & CLEANUP_EXPENSIVE)
2077 && !(mode & CLEANUP_CFGLAYOUT)
2078 && try_simplify_condjump (b))
2079 changed_here = true;
2081 /* If B has a single outgoing edge, but uses a
2082 non-trivial jump instruction without side-effects, we
2083 can either delete the jump entirely, or replace it
2084 with a simple unconditional jump. */
2085 if (single_succ_p (b)
2086 && single_succ (b) != EXIT_BLOCK_PTR
2087 && onlyjump_p (BB_END (b))
2088 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
2089 && try_redirect_by_replacing_jump (single_succ_edge (b),
2090 single_succ (b),
2091 (mode & CLEANUP_CFGLAYOUT) != 0))
2093 update_forwarder_flag (b);
2094 changed_here = true;
2097 /* Simplify branch to branch. */
2098 if (try_forward_edges (mode, b))
2099 changed_here = true;
2101 /* Look for shared code between blocks. */
2102 if ((mode & CLEANUP_CROSSJUMP)
2103 && try_crossjump_bb (mode, b))
2104 changed_here = true;
2106 /* Don't get confused by the index shift caused by
2107 deleting blocks. */
2108 if (!changed_here)
2109 b = b->next_bb;
2110 else
2111 changed = true;
2114 if ((mode & CLEANUP_CROSSJUMP)
2115 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2116 changed = true;
2118 #ifdef ENABLE_CHECKING
2119 if (changed)
2120 verify_flow_info ();
2121 #endif
2123 changed_overall |= changed;
2124 first_pass = false;
2126 while (changed);
2129 if (mode & CLEANUP_CROSSJUMP)
2130 remove_fake_exit_edges ();
2132 FOR_ALL_BB (b)
2133 b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2135 return changed_overall;
2138 /* Delete all unreachable basic blocks. */
2140 bool
2141 delete_unreachable_blocks (void)
2143 bool changed = false;
2144 basic_block b, next_bb;
2146 find_unreachable_blocks ();
2148 /* Delete all unreachable basic blocks. */
2150 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2152 next_bb = b->next_bb;
2154 if (!(b->flags & BB_REACHABLE))
2156 delete_basic_block (b);
2157 changed = true;
2161 if (changed)
2162 tidy_fallthru_edges ();
2163 return changed;
2166 /* Delete any jump tables never referenced. We can't delete them at the
2167 time of removing tablejump insn as they are referenced by the preceding
2168 insns computing the destination, so we delay deleting and garbagecollect
2169 them once life information is computed. */
2170 void
2171 delete_dead_jumptables (void)
2173 basic_block bb;
2175 /* A dead jump table does not belong to any basic block. Scan insns
2176 between two adjacent basic blocks. */
2177 FOR_EACH_BB (bb)
2179 rtx insn, next;
2181 for (insn = NEXT_INSN (BB_END (bb));
2182 insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
2183 insn = next)
2185 next = NEXT_INSN (insn);
2186 if (LABEL_P (insn)
2187 && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
2188 && JUMP_P (next)
2189 && (GET_CODE (PATTERN (next)) == ADDR_VEC
2190 || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC))
2192 rtx label = insn, jump = next;
2194 if (dump_file)
2195 fprintf (dump_file, "Dead jumptable %i removed\n",
2196 INSN_UID (insn));
2198 next = NEXT_INSN (next);
2199 delete_insn (jump);
2200 delete_insn (label);
2207 /* Tidy the CFG by deleting unreachable code and whatnot. */
2209 bool
2210 cleanup_cfg (int mode)
2212 bool changed = false;
2214 /* Set the cfglayout mode flag here. We could update all the callers
2215 but that is just inconvenient, especially given that we eventually
2216 want to have cfglayout mode as the default. */
2217 if (current_ir_type () == IR_RTL_CFGLAYOUT)
2218 mode |= CLEANUP_CFGLAYOUT;
2220 timevar_push (TV_CLEANUP_CFG);
2221 if (delete_unreachable_blocks ())
2223 changed = true;
2224 /* We've possibly created trivially dead code. Cleanup it right
2225 now to introduce more opportunities for try_optimize_cfg. */
2226 if (!(mode & (CLEANUP_NO_INSN_DEL))
2227 && !reload_completed)
2228 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2231 compact_blocks ();
2233 while (try_optimize_cfg (mode))
2235 delete_unreachable_blocks (), changed = true;
2236 if (!(mode & CLEANUP_NO_INSN_DEL)
2237 && (mode & CLEANUP_EXPENSIVE)
2238 && !reload_completed)
2240 if (!delete_trivially_dead_insns (get_insns (), max_reg_num ()))
2241 break;
2243 else
2244 break;
2247 /* Don't call delete_dead_jumptables in cfglayout mode, because
2248 that function assumes that jump tables are in the insns stream.
2249 But we also don't _have_ to delete dead jumptables in cfglayout
2250 mode because we shouldn't even be looking at things that are
2251 not in a basic block. Dead jumptables are cleaned up when
2252 going out of cfglayout mode. */
2253 if (!(mode & CLEANUP_CFGLAYOUT))
2254 delete_dead_jumptables ();
2256 timevar_pop (TV_CLEANUP_CFG);
2258 return changed;
2261 static unsigned int
2262 rest_of_handle_jump (void)
2264 delete_unreachable_blocks ();
2266 if (cfun->tail_call_emit)
2267 fixup_tail_calls ();
2268 return 0;
2271 struct tree_opt_pass pass_jump =
2273 "sibling", /* name */
2274 NULL, /* gate */
2275 rest_of_handle_jump, /* execute */
2276 NULL, /* sub */
2277 NULL, /* next */
2278 0, /* static_pass_number */
2279 TV_JUMP, /* tv_id */
2280 0, /* properties_required */
2281 0, /* properties_provided */
2282 0, /* properties_destroyed */
2283 TODO_ggc_collect, /* todo_flags_start */
2284 TODO_dump_func |
2285 TODO_verify_flow, /* todo_flags_finish */
2286 'i' /* letter */
2290 static unsigned int
2291 rest_of_handle_jump2 (void)
2293 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2294 if (dump_file)
2295 dump_flow_info (dump_file, dump_flags);
2296 cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
2297 | (flag_thread_jumps ? CLEANUP_THREADING : 0));
2298 return 0;
2302 struct tree_opt_pass pass_jump2 =
2304 "jump", /* name */
2305 NULL, /* gate */
2306 rest_of_handle_jump2, /* execute */
2307 NULL, /* sub */
2308 NULL, /* next */
2309 0, /* static_pass_number */
2310 TV_JUMP, /* tv_id */
2311 0, /* properties_required */
2312 0, /* properties_provided */
2313 0, /* properties_destroyed */
2314 TODO_ggc_collect, /* todo_flags_start */
2315 TODO_dump_func, /* todo_flags_finish */
2316 'j' /* letter */