2006-02-09 Kaz Kojima <kkojima@gcc.gnu.org>
[official-gcc.git] / gcc / cfgcleanup.c
blob7be7e87818ceaf22b869d10004862a2b1b26c3ab
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301, USA. */
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "regs.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "cfglayout.h"
52 #include "emit-rtl.h"
53 #include "tree-pass.h"
54 #include "cfgloop.h"
55 #include "expr.h"
57 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
59 /* Set to true when we are running first pass of try_optimize_cfg loop. */
60 static bool first_pass;
61 static bool try_crossjump_to_edge (int, edge, edge);
62 static bool try_crossjump_bb (int, basic_block);
63 static bool outgoing_edges_match (int, basic_block, basic_block);
64 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
65 static bool old_insns_match_p (int, rtx, rtx);
67 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
68 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
69 static bool try_optimize_cfg (int);
70 static bool try_simplify_condjump (basic_block);
71 static bool try_forward_edges (int, basic_block);
72 static edge thread_jump (int, edge, basic_block);
73 static bool mark_effect (rtx, bitmap);
74 static void notice_new_block (basic_block);
75 static void update_forwarder_flag (basic_block);
76 static int mentions_nonequal_regs (rtx *, void *);
77 static void merge_memattrs (rtx, rtx);
79 /* Set flags for newly created block. */
81 static void
82 notice_new_block (basic_block bb)
84 if (!bb)
85 return;
87 if (forwarder_block_p (bb))
88 bb->flags |= BB_FORWARDER_BLOCK;
91 /* Recompute forwarder flag after block has been modified. */
93 static void
94 update_forwarder_flag (basic_block bb)
96 if (forwarder_block_p (bb))
97 bb->flags |= BB_FORWARDER_BLOCK;
98 else
99 bb->flags &= ~BB_FORWARDER_BLOCK;
102 /* Simplify a conditional jump around an unconditional jump.
103 Return true if something changed. */
105 static bool
106 try_simplify_condjump (basic_block cbranch_block)
108 basic_block jump_block, jump_dest_block, cbranch_dest_block;
109 edge cbranch_jump_edge, cbranch_fallthru_edge;
110 rtx cbranch_insn;
112 /* Verify that there are exactly two successors. */
113 if (EDGE_COUNT (cbranch_block->succs) != 2)
114 return false;
116 /* Verify that we've got a normal conditional branch at the end
117 of the block. */
118 cbranch_insn = BB_END (cbranch_block);
119 if (!any_condjump_p (cbranch_insn))
120 return false;
122 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
123 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
125 /* The next block must not have multiple predecessors, must not
126 be the last block in the function, and must contain just the
127 unconditional jump. */
128 jump_block = cbranch_fallthru_edge->dest;
129 if (!single_pred_p (jump_block)
130 || jump_block->next_bb == EXIT_BLOCK_PTR
131 || !FORWARDER_BLOCK_P (jump_block))
132 return false;
133 jump_dest_block = single_succ (jump_block);
135 /* If we are partitioning hot/cold basic blocks, we don't want to
136 mess up unconditional or indirect jumps that cross between hot
137 and cold sections.
139 Basic block partitioning may result in some jumps that appear to
140 be optimizable (or blocks that appear to be mergeable), but which really
141 must be left untouched (they are required to make it safely across
142 partition boundaries). See the comments at the top of
143 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
145 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
146 || (cbranch_jump_edge->flags & EDGE_CROSSING))
147 return false;
149 /* The conditional branch must target the block after the
150 unconditional branch. */
151 cbranch_dest_block = cbranch_jump_edge->dest;
153 if (cbranch_dest_block == EXIT_BLOCK_PTR
154 || !can_fallthru (jump_block, cbranch_dest_block))
155 return false;
157 /* Invert the conditional branch. */
158 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
159 return false;
161 if (dump_file)
162 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
163 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
165 /* Success. Update the CFG to match. Note that after this point
166 the edge variable names appear backwards; the redirection is done
167 this way to preserve edge profile data. */
168 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
169 cbranch_dest_block);
170 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
171 jump_dest_block);
172 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
173 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
174 update_br_prob_note (cbranch_block);
176 /* Delete the block with the unconditional jump, and clean up the mess. */
177 delete_basic_block (jump_block);
178 tidy_fallthru_edge (cbranch_jump_edge);
179 update_forwarder_flag (cbranch_block);
181 return true;
184 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
185 on register. Used by jump threading. */
187 static bool
188 mark_effect (rtx exp, regset nonequal)
190 int regno;
191 rtx dest;
192 switch (GET_CODE (exp))
194 /* In case we do clobber the register, mark it as equal, as we know the
195 value is dead so it don't have to match. */
196 case CLOBBER:
197 if (REG_P (XEXP (exp, 0)))
199 dest = XEXP (exp, 0);
200 regno = REGNO (dest);
201 CLEAR_REGNO_REG_SET (nonequal, regno);
202 if (regno < FIRST_PSEUDO_REGISTER)
204 int n = hard_regno_nregs[regno][GET_MODE (dest)];
205 while (--n > 0)
206 CLEAR_REGNO_REG_SET (nonequal, regno + n);
209 return false;
211 case SET:
212 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
213 return false;
214 dest = SET_DEST (exp);
215 if (dest == pc_rtx)
216 return false;
217 if (!REG_P (dest))
218 return true;
219 regno = REGNO (dest);
220 SET_REGNO_REG_SET (nonequal, regno);
221 if (regno < FIRST_PSEUDO_REGISTER)
223 int n = hard_regno_nregs[regno][GET_MODE (dest)];
224 while (--n > 0)
225 SET_REGNO_REG_SET (nonequal, regno + n);
227 return false;
229 default:
230 return false;
234 /* Return nonzero if X is a register set in regset DATA.
235 Called via for_each_rtx. */
236 static int
237 mentions_nonequal_regs (rtx *x, void *data)
239 regset nonequal = (regset) data;
240 if (REG_P (*x))
242 int regno;
244 regno = REGNO (*x);
245 if (REGNO_REG_SET_P (nonequal, regno))
246 return 1;
247 if (regno < FIRST_PSEUDO_REGISTER)
249 int n = hard_regno_nregs[regno][GET_MODE (*x)];
250 while (--n > 0)
251 if (REGNO_REG_SET_P (nonequal, regno + n))
252 return 1;
255 return 0;
257 /* Attempt to prove that the basic block B will have no side effects and
258 always continues in the same edge if reached via E. Return the edge
259 if exist, NULL otherwise. */
261 static edge
262 thread_jump (int mode, edge e, basic_block b)
264 rtx set1, set2, cond1, cond2, insn;
265 enum rtx_code code1, code2, reversed_code2;
266 bool reverse1 = false;
267 unsigned i;
268 regset nonequal;
269 bool failed = false;
270 reg_set_iterator rsi;
272 if (b->flags & BB_NONTHREADABLE_BLOCK)
273 return NULL;
275 /* At the moment, we do handle only conditional jumps, but later we may
276 want to extend this code to tablejumps and others. */
277 if (EDGE_COUNT (e->src->succs) != 2)
278 return NULL;
279 if (EDGE_COUNT (b->succs) != 2)
281 b->flags |= BB_NONTHREADABLE_BLOCK;
282 return NULL;
285 /* Second branch must end with onlyjump, as we will eliminate the jump. */
286 if (!any_condjump_p (BB_END (e->src)))
287 return NULL;
289 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
291 b->flags |= BB_NONTHREADABLE_BLOCK;
292 return NULL;
295 set1 = pc_set (BB_END (e->src));
296 set2 = pc_set (BB_END (b));
297 if (((e->flags & EDGE_FALLTHRU) != 0)
298 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
299 reverse1 = true;
301 cond1 = XEXP (SET_SRC (set1), 0);
302 cond2 = XEXP (SET_SRC (set2), 0);
303 if (reverse1)
304 code1 = reversed_comparison_code (cond1, BB_END (e->src));
305 else
306 code1 = GET_CODE (cond1);
308 code2 = GET_CODE (cond2);
309 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
311 if (!comparison_dominates_p (code1, code2)
312 && !comparison_dominates_p (code1, reversed_code2))
313 return NULL;
315 /* Ensure that the comparison operators are equivalent.
316 ??? This is far too pessimistic. We should allow swapped operands,
317 different CCmodes, or for example comparisons for interval, that
318 dominate even when operands are not equivalent. */
319 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
320 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
321 return NULL;
323 /* Short circuit cases where block B contains some side effects, as we can't
324 safely bypass it. */
325 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
326 insn = NEXT_INSN (insn))
327 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
329 b->flags |= BB_NONTHREADABLE_BLOCK;
330 return NULL;
333 cselib_init (false);
335 /* First process all values computed in the source basic block. */
336 for (insn = NEXT_INSN (BB_HEAD (e->src));
337 insn != NEXT_INSN (BB_END (e->src));
338 insn = NEXT_INSN (insn))
339 if (INSN_P (insn))
340 cselib_process_insn (insn);
342 nonequal = BITMAP_ALLOC (NULL);
343 CLEAR_REG_SET (nonequal);
345 /* Now assume that we've continued by the edge E to B and continue
346 processing as if it were same basic block.
347 Our goal is to prove that whole block is an NOOP. */
349 for (insn = NEXT_INSN (BB_HEAD (b));
350 insn != NEXT_INSN (BB_END (b)) && !failed;
351 insn = NEXT_INSN (insn))
353 if (INSN_P (insn))
355 rtx pat = PATTERN (insn);
357 if (GET_CODE (pat) == PARALLEL)
359 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
360 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
362 else
363 failed |= mark_effect (pat, nonequal);
366 cselib_process_insn (insn);
369 /* Later we should clear nonequal of dead registers. So far we don't
370 have life information in cfg_cleanup. */
371 if (failed)
373 b->flags |= BB_NONTHREADABLE_BLOCK;
374 goto failed_exit;
377 /* cond2 must not mention any register that is not equal to the
378 former block. */
379 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
380 goto failed_exit;
382 /* In case liveness information is available, we need to prove equivalence
383 only of the live values. */
384 if (mode & CLEANUP_UPDATE_LIFE)
385 AND_REG_SET (nonequal, b->il.rtl->global_live_at_end);
387 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
388 goto failed_exit;
390 BITMAP_FREE (nonequal);
391 cselib_finish ();
392 if ((comparison_dominates_p (code1, code2) != 0)
393 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
394 return BRANCH_EDGE (b);
395 else
396 return FALLTHRU_EDGE (b);
398 failed_exit:
399 BITMAP_FREE (nonequal);
400 cselib_finish ();
401 return NULL;
404 /* Attempt to forward edges leaving basic block B.
405 Return true if successful. */
407 static bool
408 try_forward_edges (int mode, basic_block b)
410 bool changed = false;
411 edge_iterator ei;
412 edge e, *threaded_edges = NULL;
414 /* If we are partitioning hot/cold basic blocks, we don't want to
415 mess up unconditional or indirect jumps that cross between hot
416 and cold sections.
418 Basic block partitioning may result in some jumps that appear to
419 be optimizable (or blocks that appear to be mergeable), but which really m
420 ust be left untouched (they are required to make it safely across
421 partition boundaries). See the comments at the top of
422 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
424 if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
425 return false;
427 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
429 basic_block target, first;
430 int counter;
431 bool threaded = false;
432 int nthreaded_edges = 0;
433 bool may_thread = first_pass | (b->flags & BB_DIRTY);
435 /* Skip complex edges because we don't know how to update them.
437 Still handle fallthru edges, as we can succeed to forward fallthru
438 edge to the same place as the branch edge of conditional branch
439 and turn conditional branch to an unconditional branch. */
440 if (e->flags & EDGE_COMPLEX)
442 ei_next (&ei);
443 continue;
446 target = first = e->dest;
447 counter = NUM_FIXED_BLOCKS;
449 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
450 up jumps that cross between hot/cold sections.
452 Basic block partitioning may result in some jumps that appear
453 to be optimizable (or blocks that appear to be mergeable), but which
454 really must be left untouched (they are required to make it safely
455 across partition boundaries). See the comments at the top of
456 bb-reorder.c:partition_hot_cold_basic_blocks for complete
457 details. */
459 if (first != EXIT_BLOCK_PTR
460 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
461 return false;
463 while (counter < n_basic_blocks)
465 basic_block new_target = NULL;
466 bool new_target_threaded = false;
467 may_thread |= target->flags & BB_DIRTY;
469 if (FORWARDER_BLOCK_P (target)
470 && !(single_succ_edge (target)->flags & EDGE_CROSSING)
471 && single_succ (target) != EXIT_BLOCK_PTR)
473 /* Bypass trivial infinite loops. */
474 new_target = single_succ (target);
475 if (target == new_target)
476 counter = n_basic_blocks;
479 /* Allow to thread only over one edge at time to simplify updating
480 of probabilities. */
481 else if ((mode & CLEANUP_THREADING) && may_thread)
483 edge t = thread_jump (mode, e, target);
484 if (t)
486 if (!threaded_edges)
487 threaded_edges = XNEWVEC (edge, n_basic_blocks);
488 else
490 int i;
492 /* Detect an infinite loop across blocks not
493 including the start block. */
494 for (i = 0; i < nthreaded_edges; ++i)
495 if (threaded_edges[i] == t)
496 break;
497 if (i < nthreaded_edges)
499 counter = n_basic_blocks;
500 break;
504 /* Detect an infinite loop across the start block. */
505 if (t->dest == b)
506 break;
508 gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
509 threaded_edges[nthreaded_edges++] = t;
511 new_target = t->dest;
512 new_target_threaded = true;
516 if (!new_target)
517 break;
519 /* Avoid killing of loop pre-headers, as it is the place loop
520 optimizer wants to hoist code to.
522 For fallthru forwarders, the LOOP_BEG note must appear between
523 the header of block and CODE_LABEL of the loop, for non forwarders
524 it must appear before the JUMP_INSN. */
525 if ((mode & CLEANUP_PRE_LOOP) && optimize && flag_loop_optimize)
527 rtx insn = (EDGE_SUCC (target, 0)->flags & EDGE_FALLTHRU
528 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
530 if (!NOTE_P (insn))
531 insn = NEXT_INSN (insn);
533 for (; insn && !LABEL_P (insn) && !INSN_P (insn);
534 insn = NEXT_INSN (insn))
535 if (NOTE_P (insn)
536 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
537 break;
539 if (insn && NOTE_P (insn))
540 break;
542 /* Do not clean up branches to just past the end of a loop
543 at this time; it can mess up the loop optimizer's
544 recognition of some patterns. */
546 insn = PREV_INSN (BB_HEAD (target));
547 if (insn && NOTE_P (insn)
548 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
549 break;
552 counter++;
553 target = new_target;
554 threaded |= new_target_threaded;
557 if (counter >= n_basic_blocks)
559 if (dump_file)
560 fprintf (dump_file, "Infinite loop in BB %i.\n",
561 target->index);
563 else if (target == first)
564 ; /* We didn't do anything. */
565 else
567 /* Save the values now, as the edge may get removed. */
568 gcov_type edge_count = e->count;
569 int edge_probability = e->probability;
570 int edge_frequency;
571 int n = 0;
573 /* Don't force if target is exit block. */
574 if (threaded && target != EXIT_BLOCK_PTR)
576 notice_new_block (redirect_edge_and_branch_force (e, target));
577 if (dump_file)
578 fprintf (dump_file, "Conditionals threaded.\n");
580 else if (!redirect_edge_and_branch (e, target))
582 if (dump_file)
583 fprintf (dump_file,
584 "Forwarding edge %i->%i to %i failed.\n",
585 b->index, e->dest->index, target->index);
586 ei_next (&ei);
587 continue;
590 /* We successfully forwarded the edge. Now update profile
591 data: for each edge we traversed in the chain, remove
592 the original edge's execution count. */
593 edge_frequency = ((edge_probability * b->frequency
594 + REG_BR_PROB_BASE / 2)
595 / REG_BR_PROB_BASE);
597 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
598 b->flags |= BB_FORWARDER_BLOCK;
602 edge t;
604 if (!single_succ_p (first))
606 gcc_assert (n < nthreaded_edges);
607 t = threaded_edges [n++];
608 gcc_assert (t->src == first);
609 update_bb_profile_for_threading (first, edge_frequency,
610 edge_count, t);
611 update_br_prob_note (first);
613 else
615 first->count -= edge_count;
616 if (first->count < 0)
617 first->count = 0;
618 first->frequency -= edge_frequency;
619 if (first->frequency < 0)
620 first->frequency = 0;
621 /* It is possible that as the result of
622 threading we've removed edge as it is
623 threaded to the fallthru edge. Avoid
624 getting out of sync. */
625 if (n < nthreaded_edges
626 && first == threaded_edges [n]->src)
627 n++;
628 t = single_succ_edge (first);
631 t->count -= edge_count;
632 if (t->count < 0)
633 t->count = 0;
634 first = t->dest;
636 while (first != target);
638 changed = true;
639 continue;
641 ei_next (&ei);
644 if (threaded_edges)
645 free (threaded_edges);
646 return changed;
650 /* Blocks A and B are to be merged into a single block. A has no incoming
651 fallthru edge, so it can be moved before B without adding or modifying
652 any jumps (aside from the jump from A to B). */
654 static void
655 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
657 rtx barrier;
658 bool only_notes;
660 /* If we are partitioning hot/cold basic blocks, we don't want to
661 mess up unconditional or indirect jumps that cross between hot
662 and cold sections.
664 Basic block partitioning may result in some jumps that appear to
665 be optimizable (or blocks that appear to be mergeable), but which really
666 must be left untouched (they are required to make it safely across
667 partition boundaries). See the comments at the top of
668 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
670 if (BB_PARTITION (a) != BB_PARTITION (b))
671 return;
673 barrier = next_nonnote_insn (BB_END (a));
674 gcc_assert (BARRIER_P (barrier));
675 delete_insn (barrier);
677 /* Move block and loop notes out of the chain so that we do not
678 disturb their order.
680 ??? A better solution would be to squeeze out all the non-nested notes
681 and adjust the block trees appropriately. Even better would be to have
682 a tighter connection between block trees and rtl so that this is not
683 necessary. */
684 only_notes = squeeze_notes (&BB_HEAD (a), &BB_END (a));
685 gcc_assert (!only_notes);
687 /* Scramble the insn chain. */
688 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
689 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
690 a->flags |= BB_DIRTY;
692 if (dump_file)
693 fprintf (dump_file, "Moved block %d before %d and merged.\n",
694 a->index, b->index);
696 /* Swap the records for the two blocks around. */
698 unlink_block (a);
699 link_block (a, b->prev_bb);
701 /* Now blocks A and B are contiguous. Merge them. */
702 merge_blocks (a, b);
705 /* Blocks A and B are to be merged into a single block. B has no outgoing
706 fallthru edge, so it can be moved after A without adding or modifying
707 any jumps (aside from the jump from A to B). */
709 static void
710 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
712 rtx barrier, real_b_end;
713 rtx label, table;
714 bool only_notes;
716 /* If we are partitioning hot/cold basic blocks, we don't want to
717 mess up unconditional or indirect jumps that cross between hot
718 and cold sections.
720 Basic block partitioning may result in some jumps that appear to
721 be optimizable (or blocks that appear to be mergeable), but which really
722 must be left untouched (they are required to make it safely across
723 partition boundaries). See the comments at the top of
724 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
726 if (BB_PARTITION (a) != BB_PARTITION (b))
727 return;
729 real_b_end = BB_END (b);
731 /* If there is a jump table following block B temporarily add the jump table
732 to block B so that it will also be moved to the correct location. */
733 if (tablejump_p (BB_END (b), &label, &table)
734 && prev_active_insn (label) == BB_END (b))
736 BB_END (b) = table;
739 /* There had better have been a barrier there. Delete it. */
740 barrier = NEXT_INSN (BB_END (b));
741 if (barrier && BARRIER_P (barrier))
742 delete_insn (barrier);
744 /* Move block and loop notes out of the chain so that we do not
745 disturb their order.
747 ??? A better solution would be to squeeze out all the non-nested notes
748 and adjust the block trees appropriately. Even better would be to have
749 a tighter connection between block trees and rtl so that this is not
750 necessary. */
751 only_notes = squeeze_notes (&BB_HEAD (b), &BB_END (b));
752 gcc_assert (!only_notes);
755 /* Scramble the insn chain. */
756 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
758 /* Restore the real end of b. */
759 BB_END (b) = real_b_end;
761 if (dump_file)
762 fprintf (dump_file, "Moved block %d after %d and merged.\n",
763 b->index, a->index);
765 /* Now blocks A and B are contiguous. Merge them. */
766 merge_blocks (a, b);
769 /* Attempt to merge basic blocks that are potentially non-adjacent.
770 Return NULL iff the attempt failed, otherwise return basic block
771 where cleanup_cfg should continue. Because the merging commonly
772 moves basic block away or introduces another optimization
773 possibility, return basic block just before B so cleanup_cfg don't
774 need to iterate.
776 It may be good idea to return basic block before C in the case
777 C has been moved after B and originally appeared earlier in the
778 insn sequence, but we have no information available about the
779 relative ordering of these two. Hopefully it is not too common. */
781 static basic_block
782 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
784 basic_block next;
786 /* If we are partitioning hot/cold basic blocks, we don't want to
787 mess up unconditional or indirect jumps that cross between hot
788 and cold sections.
790 Basic block partitioning may result in some jumps that appear to
791 be optimizable (or blocks that appear to be mergeable), but which really
792 must be left untouched (they are required to make it safely across
793 partition boundaries). See the comments at the top of
794 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
796 if (BB_PARTITION (b) != BB_PARTITION (c))
797 return NULL;
801 /* If B has a fallthru edge to C, no need to move anything. */
802 if (e->flags & EDGE_FALLTHRU)
804 int b_index = b->index, c_index = c->index;
805 merge_blocks (b, c);
806 update_forwarder_flag (b);
808 if (dump_file)
809 fprintf (dump_file, "Merged %d and %d without moving.\n",
810 b_index, c_index);
812 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
815 /* Otherwise we will need to move code around. Do that only if expensive
816 transformations are allowed. */
817 else if (mode & CLEANUP_EXPENSIVE)
819 edge tmp_edge, b_fallthru_edge;
820 bool c_has_outgoing_fallthru;
821 bool b_has_incoming_fallthru;
822 edge_iterator ei;
824 /* Avoid overactive code motion, as the forwarder blocks should be
825 eliminated by edge redirection instead. One exception might have
826 been if B is a forwarder block and C has no fallthru edge, but
827 that should be cleaned up by bb-reorder instead. */
828 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
829 return NULL;
831 /* We must make sure to not munge nesting of lexical blocks,
832 and loop notes. This is done by squeezing out all the notes
833 and leaving them there to lie. Not ideal, but functional. */
835 FOR_EACH_EDGE (tmp_edge, ei, c->succs)
836 if (tmp_edge->flags & EDGE_FALLTHRU)
837 break;
839 c_has_outgoing_fallthru = (tmp_edge != NULL);
841 FOR_EACH_EDGE (tmp_edge, ei, b->preds)
842 if (tmp_edge->flags & EDGE_FALLTHRU)
843 break;
845 b_has_incoming_fallthru = (tmp_edge != NULL);
846 b_fallthru_edge = tmp_edge;
847 next = b->prev_bb;
848 if (next == c)
849 next = next->prev_bb;
851 /* Otherwise, we're going to try to move C after B. If C does
852 not have an outgoing fallthru, then it can be moved
853 immediately after B without introducing or modifying jumps. */
854 if (! c_has_outgoing_fallthru)
856 merge_blocks_move_successor_nojumps (b, c);
857 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
860 /* If B does not have an incoming fallthru, then it can be moved
861 immediately before C without introducing or modifying jumps.
862 C cannot be the first block, so we do not have to worry about
863 accessing a non-existent block. */
865 if (b_has_incoming_fallthru)
867 basic_block bb;
869 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
870 return NULL;
871 bb = force_nonfallthru (b_fallthru_edge);
872 if (bb)
873 notice_new_block (bb);
876 merge_blocks_move_predecessor_nojumps (b, c);
877 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
880 return NULL;
884 /* Removes the memory attributes of MEM expression
885 if they are not equal. */
887 void
888 merge_memattrs (rtx x, rtx y)
890 int i;
891 int j;
892 enum rtx_code code;
893 const char *fmt;
895 if (x == y)
896 return;
897 if (x == 0 || y == 0)
898 return;
900 code = GET_CODE (x);
902 if (code != GET_CODE (y))
903 return;
905 if (GET_MODE (x) != GET_MODE (y))
906 return;
908 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
910 if (! MEM_ATTRS (x))
911 MEM_ATTRS (y) = 0;
912 else if (! MEM_ATTRS (y))
913 MEM_ATTRS (x) = 0;
914 else
916 rtx mem_size;
918 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
920 set_mem_alias_set (x, 0);
921 set_mem_alias_set (y, 0);
924 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
926 set_mem_expr (x, 0);
927 set_mem_expr (y, 0);
928 set_mem_offset (x, 0);
929 set_mem_offset (y, 0);
931 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
933 set_mem_offset (x, 0);
934 set_mem_offset (y, 0);
937 if (!MEM_SIZE (x))
938 mem_size = NULL_RTX;
939 else if (!MEM_SIZE (y))
940 mem_size = NULL_RTX;
941 else
942 mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
943 INTVAL (MEM_SIZE (y))));
944 set_mem_size (x, mem_size);
945 set_mem_size (y, mem_size);
947 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
948 set_mem_align (y, MEM_ALIGN (x));
952 fmt = GET_RTX_FORMAT (code);
953 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
955 switch (fmt[i])
957 case 'E':
958 /* Two vectors must have the same length. */
959 if (XVECLEN (x, i) != XVECLEN (y, i))
960 return;
962 for (j = 0; j < XVECLEN (x, i); j++)
963 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
965 break;
967 case 'e':
968 merge_memattrs (XEXP (x, i), XEXP (y, i));
971 return;
975 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
977 static bool
978 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
980 rtx p1, p2;
982 /* Verify that I1 and I2 are equivalent. */
983 if (GET_CODE (i1) != GET_CODE (i2))
984 return false;
986 p1 = PATTERN (i1);
987 p2 = PATTERN (i2);
989 if (GET_CODE (p1) != GET_CODE (p2))
990 return false;
992 /* If this is a CALL_INSN, compare register usage information.
993 If we don't check this on stack register machines, the two
994 CALL_INSNs might be merged leaving reg-stack.c with mismatching
995 numbers of stack registers in the same basic block.
996 If we don't check this on machines with delay slots, a delay slot may
997 be filled that clobbers a parameter expected by the subroutine.
999 ??? We take the simple route for now and assume that if they're
1000 equal, they were constructed identically. */
1002 if (CALL_P (i1)
1003 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1004 CALL_INSN_FUNCTION_USAGE (i2))
1005 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
1006 return false;
1008 #ifdef STACK_REGS
1009 /* If cross_jump_death_matters is not 0, the insn's mode
1010 indicates whether or not the insn contains any stack-like
1011 regs. */
1013 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1015 /* If register stack conversion has already been done, then
1016 death notes must also be compared before it is certain that
1017 the two instruction streams match. */
1019 rtx note;
1020 HARD_REG_SET i1_regset, i2_regset;
1022 CLEAR_HARD_REG_SET (i1_regset);
1023 CLEAR_HARD_REG_SET (i2_regset);
1025 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1026 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1027 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1029 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1030 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1031 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1033 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1035 return false;
1037 done:
1040 #endif
1042 if (reload_completed
1043 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1044 return true;
1046 /* Do not do EQUIV substitution after reload. First, we're undoing the
1047 work of reload_cse. Second, we may be undoing the work of the post-
1048 reload splitting pass. */
1049 /* ??? Possibly add a new phase switch variable that can be used by
1050 targets to disallow the troublesome insns after splitting. */
1051 if (!reload_completed)
1053 /* The following code helps take care of G++ cleanups. */
1054 rtx equiv1 = find_reg_equal_equiv_note (i1);
1055 rtx equiv2 = find_reg_equal_equiv_note (i2);
1057 if (equiv1 && equiv2
1058 /* If the equivalences are not to a constant, they may
1059 reference pseudos that no longer exist, so we can't
1060 use them. */
1061 && (! reload_completed
1062 || (CONSTANT_P (XEXP (equiv1, 0))
1063 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1065 rtx s1 = single_set (i1);
1066 rtx s2 = single_set (i2);
1067 if (s1 != 0 && s2 != 0
1068 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1070 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1071 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1072 if (! rtx_renumbered_equal_p (p1, p2))
1073 cancel_changes (0);
1074 else if (apply_change_group ())
1075 return true;
1080 return false;
1083 /* Look through the insns at the end of BB1 and BB2 and find the longest
1084 sequence that are equivalent. Store the first insns for that sequence
1085 in *F1 and *F2 and return the sequence length.
1087 To simplify callers of this function, if the blocks match exactly,
1088 store the head of the blocks in *F1 and *F2. */
1090 static int
1091 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1092 basic_block bb2, rtx *f1, rtx *f2)
1094 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1095 int ninsns = 0;
1097 /* Skip simple jumps at the end of the blocks. Complex jumps still
1098 need to be compared for equivalence, which we'll do below. */
1100 i1 = BB_END (bb1);
1101 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1102 if (onlyjump_p (i1)
1103 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1105 last1 = i1;
1106 i1 = PREV_INSN (i1);
1109 i2 = BB_END (bb2);
1110 if (onlyjump_p (i2)
1111 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1113 last2 = i2;
1114 /* Count everything except for unconditional jump as insn. */
1115 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1116 ninsns++;
1117 i2 = PREV_INSN (i2);
1120 while (true)
1122 /* Ignore notes. */
1123 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1124 i1 = PREV_INSN (i1);
1126 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1127 i2 = PREV_INSN (i2);
1129 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1130 break;
1132 if (!old_insns_match_p (mode, i1, i2))
1133 break;
1135 merge_memattrs (i1, i2);
1137 /* Don't begin a cross-jump with a NOTE insn. */
1138 if (INSN_P (i1))
1140 /* If the merged insns have different REG_EQUAL notes, then
1141 remove them. */
1142 rtx equiv1 = find_reg_equal_equiv_note (i1);
1143 rtx equiv2 = find_reg_equal_equiv_note (i2);
1145 if (equiv1 && !equiv2)
1146 remove_note (i1, equiv1);
1147 else if (!equiv1 && equiv2)
1148 remove_note (i2, equiv2);
1149 else if (equiv1 && equiv2
1150 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1152 remove_note (i1, equiv1);
1153 remove_note (i2, equiv2);
1156 afterlast1 = last1, afterlast2 = last2;
1157 last1 = i1, last2 = i2;
1158 ninsns++;
1161 i1 = PREV_INSN (i1);
1162 i2 = PREV_INSN (i2);
1165 #ifdef HAVE_cc0
1166 /* Don't allow the insn after a compare to be shared by
1167 cross-jumping unless the compare is also shared. */
1168 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1169 last1 = afterlast1, last2 = afterlast2, ninsns--;
1170 #endif
1172 /* Include preceding notes and labels in the cross-jump. One,
1173 this may bring us to the head of the blocks as requested above.
1174 Two, it keeps line number notes as matched as may be. */
1175 if (ninsns)
1177 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1178 last1 = PREV_INSN (last1);
1180 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1181 last1 = PREV_INSN (last1);
1183 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1184 last2 = PREV_INSN (last2);
1186 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1187 last2 = PREV_INSN (last2);
1189 *f1 = last1;
1190 *f2 = last2;
1193 return ninsns;
1196 /* Return true iff the condbranches at the end of BB1 and BB2 match. */
1197 bool
1198 condjump_equiv_p (struct equiv_info *info, bool call_init)
1200 basic_block bb1 = info->x_block;
1201 basic_block bb2 = info->y_block;
1202 edge b1 = BRANCH_EDGE (bb1);
1203 edge b2 = BRANCH_EDGE (bb2);
1204 edge f1 = FALLTHRU_EDGE (bb1);
1205 edge f2 = FALLTHRU_EDGE (bb2);
1206 bool reverse, match;
1207 rtx set1, set2, cond1, cond2;
1208 rtx src1, src2;
1209 enum rtx_code code1, code2;
1211 /* Get around possible forwarders on fallthru edges. Other cases
1212 should be optimized out already. */
1213 if (FORWARDER_BLOCK_P (f1->dest))
1214 f1 = single_succ_edge (f1->dest);
1216 if (FORWARDER_BLOCK_P (f2->dest))
1217 f2 = single_succ_edge (f2->dest);
1219 /* To simplify use of this function, return false if there are
1220 unneeded forwarder blocks. These will get eliminated later
1221 during cleanup_cfg. */
1222 if (FORWARDER_BLOCK_P (f1->dest)
1223 || FORWARDER_BLOCK_P (f2->dest)
1224 || FORWARDER_BLOCK_P (b1->dest)
1225 || FORWARDER_BLOCK_P (b2->dest))
1226 return false;
1228 if (f1->dest == f2->dest && b1->dest == b2->dest)
1229 reverse = false;
1230 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1231 reverse = true;
1232 else
1233 return false;
1235 set1 = pc_set (BB_END (bb1));
1236 set2 = pc_set (BB_END (bb2));
1237 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1238 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1239 reverse = !reverse;
1241 src1 = SET_SRC (set1);
1242 src2 = SET_SRC (set2);
1243 cond1 = XEXP (src1, 0);
1244 cond2 = XEXP (src2, 0);
1245 code1 = GET_CODE (cond1);
1246 if (reverse)
1247 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1248 else
1249 code2 = GET_CODE (cond2);
1251 if (code2 == UNKNOWN)
1252 return false;
1254 if (call_init && !struct_equiv_init (STRUCT_EQUIV_START | info->mode, info))
1255 gcc_unreachable ();
1256 /* Make the sources of the pc sets unreadable so that when we call
1257 insns_match_p it won't process them.
1258 The death_notes_match_p from insns_match_p won't see the local registers
1259 used for the pc set, but that could only cause missed optimizations when
1260 there are actually condjumps that use stack registers. */
1261 SET_SRC (set1) = pc_rtx;
1262 SET_SRC (set2) = pc_rtx;
1263 /* Verify codes and operands match. */
1264 if (code1 == code2)
1266 match = (insns_match_p (BB_END (bb1), BB_END (bb2), info)
1267 && rtx_equiv_p (&XEXP (cond1, 0), XEXP (cond2, 0), 1, info)
1268 && rtx_equiv_p (&XEXP (cond1, 1), XEXP (cond2, 1), 1, info));
1271 else if (code1 == swap_condition (code2))
1273 match = (insns_match_p (BB_END (bb1), BB_END (bb2), info)
1274 && rtx_equiv_p (&XEXP (cond1, 1), XEXP (cond2, 0), 1, info)
1275 && rtx_equiv_p (&XEXP (cond1, 0), XEXP (cond2, 1), 1, info));
1278 else
1279 match = false;
1280 SET_SRC (set1) = src1;
1281 SET_SRC (set2) = src2;
1282 match &= verify_changes (0);
1284 /* If we return true, we will join the blocks. Which means that
1285 we will only have one branch prediction bit to work with. Thus
1286 we require the existing branches to have probabilities that are
1287 roughly similar. */
1288 if (match
1289 && !optimize_size
1290 && maybe_hot_bb_p (bb1)
1291 && maybe_hot_bb_p (bb2))
1293 int prob2;
1295 if (b1->dest == b2->dest)
1296 prob2 = b2->probability;
1297 else
1298 /* Do not use f2 probability as f2 may be forwarded. */
1299 prob2 = REG_BR_PROB_BASE - b2->probability;
1301 /* Fail if the difference in probabilities is greater than 50%.
1302 This rules out two well-predicted branches with opposite
1303 outcomes. */
1304 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1306 if (dump_file)
1307 fprintf (dump_file,
1308 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1309 bb1->index, bb2->index, b1->probability, prob2);
1311 match = false;
1315 if (dump_file && match)
1316 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1317 bb1->index, bb2->index);
1319 if (!match)
1320 cancel_changes (0);
1321 return match;
1324 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1325 the branch instruction. This means that if we commonize the control
1326 flow before end of the basic block, the semantic remains unchanged.
1328 We may assume that there exists one edge with a common destination. */
1330 static bool
1331 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1333 int nehedges1 = 0, nehedges2 = 0;
1334 edge fallthru1 = 0, fallthru2 = 0;
1335 edge e1, e2;
1336 edge_iterator ei;
1338 /* If BB1 has only one successor, we may be looking at either an
1339 unconditional jump, or a fake edge to exit. */
1340 if (single_succ_p (bb1)
1341 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1342 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1343 return (single_succ_p (bb2)
1344 && (single_succ_edge (bb2)->flags
1345 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1346 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1348 /* Match conditional jumps - this may get tricky when fallthru and branch
1349 edges are crossed. */
1350 if (EDGE_COUNT (bb1->succs) == 2
1351 && any_condjump_p (BB_END (bb1))
1352 && onlyjump_p (BB_END (bb1)))
1354 edge b1, f1, b2, f2;
1355 bool reverse, match;
1356 rtx set1, set2, cond1, cond2;
1357 enum rtx_code code1, code2;
1359 if (EDGE_COUNT (bb2->succs) != 2
1360 || !any_condjump_p (BB_END (bb2))
1361 || !onlyjump_p (BB_END (bb2)))
1362 return false;
1364 b1 = BRANCH_EDGE (bb1);
1365 b2 = BRANCH_EDGE (bb2);
1366 f1 = FALLTHRU_EDGE (bb1);
1367 f2 = FALLTHRU_EDGE (bb2);
1369 /* Get around possible forwarders on fallthru edges. Other cases
1370 should be optimized out already. */
1371 if (FORWARDER_BLOCK_P (f1->dest))
1372 f1 = single_succ_edge (f1->dest);
1374 if (FORWARDER_BLOCK_P (f2->dest))
1375 f2 = single_succ_edge (f2->dest);
1377 /* To simplify use of this function, return false if there are
1378 unneeded forwarder blocks. These will get eliminated later
1379 during cleanup_cfg. */
1380 if (FORWARDER_BLOCK_P (f1->dest)
1381 || FORWARDER_BLOCK_P (f2->dest)
1382 || FORWARDER_BLOCK_P (b1->dest)
1383 || FORWARDER_BLOCK_P (b2->dest))
1384 return false;
1386 if (f1->dest == f2->dest && b1->dest == b2->dest)
1387 reverse = false;
1388 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1389 reverse = true;
1390 else
1391 return false;
1393 set1 = pc_set (BB_END (bb1));
1394 set2 = pc_set (BB_END (bb2));
1395 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1396 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1397 reverse = !reverse;
1399 cond1 = XEXP (SET_SRC (set1), 0);
1400 cond2 = XEXP (SET_SRC (set2), 0);
1401 code1 = GET_CODE (cond1);
1402 if (reverse)
1403 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1404 else
1405 code2 = GET_CODE (cond2);
1407 if (code2 == UNKNOWN)
1408 return false;
1410 /* Verify codes and operands match. */
1411 match = ((code1 == code2
1412 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1413 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1414 || (code1 == swap_condition (code2)
1415 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1416 XEXP (cond2, 0))
1417 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1418 XEXP (cond2, 1))));
1420 /* If we return true, we will join the blocks. Which means that
1421 we will only have one branch prediction bit to work with. Thus
1422 we require the existing branches to have probabilities that are
1423 roughly similar. */
1424 if (match
1425 && !optimize_size
1426 && maybe_hot_bb_p (bb1)
1427 && maybe_hot_bb_p (bb2))
1429 int prob2;
1431 if (b1->dest == b2->dest)
1432 prob2 = b2->probability;
1433 else
1434 /* Do not use f2 probability as f2 may be forwarded. */
1435 prob2 = REG_BR_PROB_BASE - b2->probability;
1437 /* Fail if the difference in probabilities is greater than 50%.
1438 This rules out two well-predicted branches with opposite
1439 outcomes. */
1440 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1442 if (dump_file)
1443 fprintf (dump_file,
1444 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1445 bb1->index, bb2->index, b1->probability, prob2);
1447 return false;
1451 if (dump_file && match)
1452 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1453 bb1->index, bb2->index);
1455 return match;
1458 /* Generic case - we are seeing a computed jump, table jump or trapping
1459 instruction. */
1461 /* Check whether there are tablejumps in the end of BB1 and BB2.
1462 Return true if they are identical. */
1464 rtx label1, label2;
1465 rtx table1, table2;
1467 if (tablejump_p (BB_END (bb1), &label1, &table1)
1468 && tablejump_p (BB_END (bb2), &label2, &table2)
1469 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1471 /* The labels should never be the same rtx. If they really are same
1472 the jump tables are same too. So disable crossjumping of blocks BB1
1473 and BB2 because when deleting the common insns in the end of BB1
1474 by delete_basic_block () the jump table would be deleted too. */
1475 /* If LABEL2 is referenced in BB1->END do not do anything
1476 because we would loose information when replacing
1477 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1478 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1480 /* Set IDENTICAL to true when the tables are identical. */
1481 bool identical = false;
1482 rtx p1, p2;
1484 p1 = PATTERN (table1);
1485 p2 = PATTERN (table2);
1486 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1488 identical = true;
1490 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1491 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1492 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1493 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1495 int i;
1497 identical = true;
1498 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1499 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1500 identical = false;
1503 if (identical)
1505 replace_label_data rr;
1506 bool match;
1508 /* Temporarily replace references to LABEL1 with LABEL2
1509 in BB1->END so that we could compare the instructions. */
1510 rr.r1 = label1;
1511 rr.r2 = label2;
1512 rr.update_label_nuses = false;
1513 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1515 match = old_insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1516 if (dump_file && match)
1517 fprintf (dump_file,
1518 "Tablejumps in bb %i and %i match.\n",
1519 bb1->index, bb2->index);
1521 /* Set the original label in BB1->END because when deleting
1522 a block whose end is a tablejump, the tablejump referenced
1523 from the instruction is deleted too. */
1524 rr.r1 = label2;
1525 rr.r2 = label1;
1526 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1528 return match;
1531 return false;
1535 /* First ensure that the instructions match. There may be many outgoing
1536 edges so this test is generally cheaper. */
1537 if (!old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1538 return false;
1540 /* Search the outgoing edges, ensure that the counts do match, find possible
1541 fallthru and exception handling edges since these needs more
1542 validation. */
1543 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1544 return false;
1546 FOR_EACH_EDGE (e1, ei, bb1->succs)
1548 e2 = EDGE_SUCC (bb2, ei.index);
1550 if (e1->flags & EDGE_EH)
1551 nehedges1++;
1553 if (e2->flags & EDGE_EH)
1554 nehedges2++;
1556 if (e1->flags & EDGE_FALLTHRU)
1557 fallthru1 = e1;
1558 if (e2->flags & EDGE_FALLTHRU)
1559 fallthru2 = e2;
1562 /* If number of edges of various types does not match, fail. */
1563 if (nehedges1 != nehedges2
1564 || (fallthru1 != 0) != (fallthru2 != 0))
1565 return false;
1567 /* fallthru edges must be forwarded to the same destination. */
1568 if (fallthru1)
1570 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1571 ? single_succ (fallthru1->dest): fallthru1->dest);
1572 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1573 ? single_succ (fallthru2->dest): fallthru2->dest);
1575 if (d1 != d2)
1576 return false;
1579 /* Ensure the same EH region. */
1581 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1582 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1584 if (!n1 && n2)
1585 return false;
1587 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1588 return false;
1591 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1592 version of sequence abstraction. */
1593 FOR_EACH_EDGE (e1, ei, bb2->succs)
1595 edge e2;
1596 edge_iterator ei;
1597 basic_block d1 = e1->dest;
1599 if (FORWARDER_BLOCK_P (d1))
1600 d1 = EDGE_SUCC (d1, 0)->dest;
1602 FOR_EACH_EDGE (e2, ei, bb1->succs)
1604 basic_block d2 = e2->dest;
1605 if (FORWARDER_BLOCK_P (d2))
1606 d2 = EDGE_SUCC (d2, 0)->dest;
1607 if (d1 == d2)
1608 break;
1611 if (!e2)
1612 return false;
1615 return true;
1618 /* Returns true if BB basic block has a preserve label. */
1620 static bool
1621 block_has_preserve_label (basic_block bb)
1623 return (bb
1624 && block_label (bb)
1625 && LABEL_PRESERVE_P (block_label (bb)));
1628 /* E1 and E2 are edges with the same destination block. Search their
1629 predecessors for common code. If found, redirect control flow from
1630 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1632 static bool
1633 try_crossjump_to_edge (int mode, edge e1, edge e2)
1635 int nmatch;
1636 basic_block src1 = e1->src, src2 = e2->src;
1637 basic_block redirect_to, redirect_from, to_remove;
1638 rtx newpos1, newpos2;
1639 edge s;
1640 edge_iterator ei;
1642 newpos1 = newpos2 = NULL_RTX;
1644 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1645 to try this optimization.
1647 Basic block partitioning may result in some jumps that appear to
1648 be optimizable (or blocks that appear to be mergeable), but which really
1649 must be left untouched (they are required to make it safely across
1650 partition boundaries). See the comments at the top of
1651 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1653 if (flag_reorder_blocks_and_partition && no_new_pseudos)
1654 return false;
1656 /* Search backward through forwarder blocks. We don't need to worry
1657 about multiple entry or chained forwarders, as they will be optimized
1658 away. We do this to look past the unconditional jump following a
1659 conditional jump that is required due to the current CFG shape. */
1660 if (single_pred_p (src1)
1661 && FORWARDER_BLOCK_P (src1))
1662 e1 = single_pred_edge (src1), src1 = e1->src;
1664 if (single_pred_p (src2)
1665 && FORWARDER_BLOCK_P (src2))
1666 e2 = single_pred_edge (src2), src2 = e2->src;
1668 /* Nothing to do if we reach ENTRY, or a common source block. */
1669 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1670 return false;
1671 if (src1 == src2)
1672 return false;
1674 /* Seeing more than 1 forwarder blocks would confuse us later... */
1675 if (FORWARDER_BLOCK_P (e1->dest)
1676 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1677 return false;
1679 if (FORWARDER_BLOCK_P (e2->dest)
1680 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1681 return false;
1683 /* Likewise with dead code (possibly newly created by the other optimizations
1684 of cfg_cleanup). */
1685 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1686 return false;
1688 /* Look for the common insn sequence, part the first ... */
1689 if (!outgoing_edges_match (mode, src1, src2))
1690 return false;
1692 /* ... and part the second. */
1693 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1695 /* Don't proceed with the crossjump unless we found a sufficient number
1696 of matching instructions or the 'from' block was totally matched
1697 (such that its predecessors will hopefully be redirected and the
1698 block removed). */
1699 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1700 && (newpos1 != BB_HEAD (src1)))
1701 return false;
1703 /* Avoid deleting preserve label when redirecting ABNORMAL edeges. */
1704 if (block_has_preserve_label (e1->dest)
1705 && (e1->flags & EDGE_ABNORMAL))
1706 return false;
1708 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1709 will be deleted.
1710 If we have tablejumps in the end of SRC1 and SRC2
1711 they have been already compared for equivalence in outgoing_edges_match ()
1712 so replace the references to TABLE1 by references to TABLE2. */
1714 rtx label1, label2;
1715 rtx table1, table2;
1717 if (tablejump_p (BB_END (src1), &label1, &table1)
1718 && tablejump_p (BB_END (src2), &label2, &table2)
1719 && label1 != label2)
1721 replace_label_data rr;
1722 rtx insn;
1724 /* Replace references to LABEL1 with LABEL2. */
1725 rr.r1 = label1;
1726 rr.r2 = label2;
1727 rr.update_label_nuses = true;
1728 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1730 /* Do not replace the label in SRC1->END because when deleting
1731 a block whose end is a tablejump, the tablejump referenced
1732 from the instruction is deleted too. */
1733 if (insn != BB_END (src1))
1734 for_each_rtx (&insn, replace_label, &rr);
1739 /* Avoid splitting if possible. We must always split when SRC2 has
1740 EH predecessor edges, or we may end up with basic blocks with both
1741 normal and EH predecessor edges. */
1742 if (newpos2 == BB_HEAD (src2)
1743 && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
1744 redirect_to = src2;
1745 else
1747 if (newpos2 == BB_HEAD (src2))
1749 /* Skip possible basic block header. */
1750 if (LABEL_P (newpos2))
1751 newpos2 = NEXT_INSN (newpos2);
1752 if (NOTE_P (newpos2))
1753 newpos2 = NEXT_INSN (newpos2);
1756 if (dump_file)
1757 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1758 src2->index, nmatch);
1759 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1762 if (dump_file)
1763 fprintf (dump_file,
1764 "Cross jumping from bb %i to bb %i; %i common insns\n",
1765 src1->index, src2->index, nmatch);
1767 redirect_to->count += src1->count;
1768 redirect_to->frequency += src1->frequency;
1769 /* We may have some registers visible trought the block. */
1770 redirect_to->flags |= BB_DIRTY;
1772 /* Recompute the frequencies and counts of outgoing edges. */
1773 FOR_EACH_EDGE (s, ei, redirect_to->succs)
1775 edge s2;
1776 edge_iterator ei;
1777 basic_block d = s->dest;
1779 if (FORWARDER_BLOCK_P (d))
1780 d = single_succ (d);
1782 FOR_EACH_EDGE (s2, ei, src1->succs)
1784 basic_block d2 = s2->dest;
1785 if (FORWARDER_BLOCK_P (d2))
1786 d2 = single_succ (d2);
1787 if (d == d2)
1788 break;
1791 s->count += s2->count;
1793 /* Take care to update possible forwarder blocks. We verified
1794 that there is no more than one in the chain, so we can't run
1795 into infinite loop. */
1796 if (FORWARDER_BLOCK_P (s->dest))
1798 single_succ_edge (s->dest)->count += s2->count;
1799 s->dest->count += s2->count;
1800 s->dest->frequency += EDGE_FREQUENCY (s);
1803 if (FORWARDER_BLOCK_P (s2->dest))
1805 single_succ_edge (s2->dest)->count -= s2->count;
1806 if (single_succ_edge (s2->dest)->count < 0)
1807 single_succ_edge (s2->dest)->count = 0;
1808 s2->dest->count -= s2->count;
1809 s2->dest->frequency -= EDGE_FREQUENCY (s);
1810 if (s2->dest->frequency < 0)
1811 s2->dest->frequency = 0;
1812 if (s2->dest->count < 0)
1813 s2->dest->count = 0;
1816 if (!redirect_to->frequency && !src1->frequency)
1817 s->probability = (s->probability + s2->probability) / 2;
1818 else
1819 s->probability
1820 = ((s->probability * redirect_to->frequency +
1821 s2->probability * src1->frequency)
1822 / (redirect_to->frequency + src1->frequency));
1825 update_br_prob_note (redirect_to);
1827 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1829 /* Skip possible basic block header. */
1830 if (LABEL_P (newpos1))
1831 newpos1 = NEXT_INSN (newpos1);
1833 if (NOTE_P (newpos1))
1834 newpos1 = NEXT_INSN (newpos1);
1836 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1837 to_remove = single_succ (redirect_from);
1839 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
1840 delete_basic_block (to_remove);
1842 update_forwarder_flag (redirect_from);
1843 if (redirect_to != src2)
1844 update_forwarder_flag (src2);
1846 return true;
1849 /* Search the predecessors of BB for common insn sequences. When found,
1850 share code between them by redirecting control flow. Return true if
1851 any changes made. */
1853 static bool
1854 try_crossjump_bb (int mode, basic_block bb)
1856 edge e, e2, fallthru;
1857 bool changed;
1858 unsigned max, ix, ix2;
1859 basic_block ev, ev2;
1860 edge_iterator ei;
1862 /* Nothing to do if there is not at least two incoming edges. */
1863 if (EDGE_COUNT (bb->preds) < 2)
1864 return false;
1866 /* Don't crossjump if this block ends in a computed jump,
1867 unless we are optimizing for size. */
1868 if (!optimize_size
1869 && bb != EXIT_BLOCK_PTR
1870 && computed_jump_p (BB_END (bb)))
1871 return false;
1873 /* If we are partitioning hot/cold basic blocks, we don't want to
1874 mess up unconditional or indirect jumps that cross between hot
1875 and cold sections.
1877 Basic block partitioning may result in some jumps that appear to
1878 be optimizable (or blocks that appear to be mergeable), but which really
1879 must be left untouched (they are required to make it safely across
1880 partition boundaries). See the comments at the top of
1881 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1883 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
1884 BB_PARTITION (EDGE_PRED (bb, 1)->src)
1885 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
1886 return false;
1888 /* It is always cheapest to redirect a block that ends in a branch to
1889 a block that falls through into BB, as that adds no branches to the
1890 program. We'll try that combination first. */
1891 fallthru = NULL;
1892 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1894 if (EDGE_COUNT (bb->preds) > max)
1895 return false;
1897 FOR_EACH_EDGE (e, ei, bb->preds)
1899 if (e->flags & EDGE_FALLTHRU)
1900 fallthru = e;
1903 changed = false;
1904 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1906 e = EDGE_PRED (ev, ix);
1907 ix++;
1909 /* As noted above, first try with the fallthru predecessor. */
1910 if (fallthru)
1912 /* Don't combine the fallthru edge into anything else.
1913 If there is a match, we'll do it the other way around. */
1914 if (e == fallthru)
1915 continue;
1916 /* If nothing changed since the last attempt, there is nothing
1917 we can do. */
1918 if (!first_pass
1919 && (!(e->src->flags & BB_DIRTY)
1920 && !(fallthru->src->flags & BB_DIRTY)))
1921 continue;
1923 if (try_crossjump_to_edge (mode, e, fallthru))
1925 changed = true;
1926 ix = 0;
1927 ev = bb;
1928 continue;
1932 /* Non-obvious work limiting check: Recognize that we're going
1933 to call try_crossjump_bb on every basic block. So if we have
1934 two blocks with lots of outgoing edges (a switch) and they
1935 share lots of common destinations, then we would do the
1936 cross-jump check once for each common destination.
1938 Now, if the blocks actually are cross-jump candidates, then
1939 all of their destinations will be shared. Which means that
1940 we only need check them for cross-jump candidacy once. We
1941 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1942 choosing to do the check from the block for which the edge
1943 in question is the first successor of A. */
1944 if (EDGE_SUCC (e->src, 0) != e)
1945 continue;
1947 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1949 e2 = EDGE_PRED (ev2, ix2);
1950 ix2++;
1952 if (e2 == e)
1953 continue;
1955 /* We've already checked the fallthru edge above. */
1956 if (e2 == fallthru)
1957 continue;
1959 /* The "first successor" check above only prevents multiple
1960 checks of crossjump(A,B). In order to prevent redundant
1961 checks of crossjump(B,A), require that A be the block
1962 with the lowest index. */
1963 if (e->src->index > e2->src->index)
1964 continue;
1966 /* If nothing changed since the last attempt, there is nothing
1967 we can do. */
1968 if (!first_pass
1969 && (!(e->src->flags & BB_DIRTY)
1970 && !(e2->src->flags & BB_DIRTY)))
1971 continue;
1973 if (try_crossjump_to_edge (mode, e, e2))
1975 changed = true;
1976 ev2 = bb;
1977 ix = 0;
1978 break;
1983 return changed;
1986 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1987 instructions etc. Return nonzero if changes were made. */
1989 static bool
1990 try_optimize_cfg (int mode)
1992 bool changed_overall = false;
1993 bool changed;
1994 int iterations = 0;
1995 basic_block bb, b, next;
1997 if (mode & CLEANUP_CROSSJUMP)
1998 add_noreturn_fake_exit_edges ();
2000 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
2001 clear_bb_flags ();
2003 FOR_EACH_BB (bb)
2004 update_forwarder_flag (bb);
2006 if (! targetm.cannot_modify_jumps_p ())
2008 first_pass = true;
2009 /* Attempt to merge blocks as made possible by edge removal. If
2010 a block has only one successor, and the successor has only
2011 one predecessor, they may be combined. */
2014 changed = false;
2015 iterations++;
2017 if (dump_file)
2018 fprintf (dump_file,
2019 "\n\ntry_optimize_cfg iteration %i\n\n",
2020 iterations);
2022 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
2024 basic_block c;
2025 edge s;
2026 bool changed_here = false;
2028 /* Delete trivially dead basic blocks. */
2029 while (EDGE_COUNT (b->preds) == 0)
2031 c = b->prev_bb;
2032 if (dump_file)
2033 fprintf (dump_file, "Deleting block %i.\n",
2034 b->index);
2036 delete_basic_block (b);
2037 if (!(mode & CLEANUP_CFGLAYOUT))
2038 changed = true;
2039 b = c;
2042 /* Remove code labels no longer used. */
2043 if (single_pred_p (b)
2044 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2045 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
2046 && LABEL_P (BB_HEAD (b))
2047 /* If the previous block ends with a branch to this
2048 block, we can't delete the label. Normally this
2049 is a condjump that is yet to be simplified, but
2050 if CASE_DROPS_THRU, this can be a tablejump with
2051 some element going to the same place as the
2052 default (fallthru). */
2053 && (single_pred (b) == ENTRY_BLOCK_PTR
2054 || !JUMP_P (BB_END (single_pred (b)))
2055 || ! label_is_jump_target_p (BB_HEAD (b),
2056 BB_END (single_pred (b)))))
2058 rtx label = BB_HEAD (b);
2060 delete_insn_chain (label, label);
2061 /* In the case label is undeletable, move it after the
2062 BASIC_BLOCK note. */
2063 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
2065 rtx bb_note = NEXT_INSN (BB_HEAD (b));
2067 reorder_insns_nobb (label, label, bb_note);
2068 BB_HEAD (b) = bb_note;
2070 if (dump_file)
2071 fprintf (dump_file, "Deleted label in block %i.\n",
2072 b->index);
2075 /* If we fall through an empty block, we can remove it. */
2076 if (!(mode & CLEANUP_CFGLAYOUT)
2077 && single_pred_p (b)
2078 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
2079 && !LABEL_P (BB_HEAD (b))
2080 && FORWARDER_BLOCK_P (b)
2081 /* Note that forwarder_block_p true ensures that
2082 there is a successor for this block. */
2083 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
2084 && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
2086 if (dump_file)
2087 fprintf (dump_file,
2088 "Deleting fallthru block %i.\n",
2089 b->index);
2091 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
2092 redirect_edge_succ_nodup (single_pred_edge (b),
2093 single_succ (b));
2094 delete_basic_block (b);
2095 changed = true;
2096 b = c;
2099 if (single_succ_p (b)
2100 && (s = single_succ_edge (b))
2101 && !(s->flags & EDGE_COMPLEX)
2102 && (c = s->dest) != EXIT_BLOCK_PTR
2103 && single_pred_p (c)
2104 && b != c)
2106 /* When not in cfg_layout mode use code aware of reordering
2107 INSN. This code possibly creates new basic blocks so it
2108 does not fit merge_blocks interface and is kept here in
2109 hope that it will become useless once more of compiler
2110 is transformed to use cfg_layout mode. */
2112 if ((mode & CLEANUP_CFGLAYOUT)
2113 && can_merge_blocks_p (b, c))
2115 merge_blocks (b, c);
2116 update_forwarder_flag (b);
2117 changed_here = true;
2119 else if (!(mode & CLEANUP_CFGLAYOUT)
2120 /* If the jump insn has side effects,
2121 we can't kill the edge. */
2122 && (!JUMP_P (BB_END (b))
2123 || (reload_completed
2124 ? simplejump_p (BB_END (b))
2125 : (onlyjump_p (BB_END (b))
2126 && !tablejump_p (BB_END (b),
2127 NULL, NULL))))
2128 && (next = merge_blocks_move (s, b, c, mode)))
2130 b = next;
2131 changed_here = true;
2135 /* Simplify branch over branch. */
2136 if ((mode & CLEANUP_EXPENSIVE)
2137 && !(mode & CLEANUP_CFGLAYOUT)
2138 && try_simplify_condjump (b))
2139 changed_here = true;
2141 /* If B has a single outgoing edge, but uses a
2142 non-trivial jump instruction without side-effects, we
2143 can either delete the jump entirely, or replace it
2144 with a simple unconditional jump. */
2145 if (single_succ_p (b)
2146 && single_succ (b) != EXIT_BLOCK_PTR
2147 && onlyjump_p (BB_END (b))
2148 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
2149 && try_redirect_by_replacing_jump (single_succ_edge (b),
2150 single_succ (b),
2151 (mode & CLEANUP_CFGLAYOUT) != 0))
2153 update_forwarder_flag (b);
2154 changed_here = true;
2157 /* Simplify branch to branch. */
2158 if (try_forward_edges (mode, b))
2159 changed_here = true;
2161 /* Look for shared code between blocks. */
2162 if ((mode & CLEANUP_CROSSJUMP)
2163 && try_crossjump_bb (mode, b))
2164 changed_here = true;
2166 /* Don't get confused by the index shift caused by
2167 deleting blocks. */
2168 if (!changed_here)
2169 b = b->next_bb;
2170 else
2171 changed = true;
2174 if ((mode & CLEANUP_CROSSJUMP)
2175 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2176 changed = true;
2178 #ifdef ENABLE_CHECKING
2179 if (changed)
2180 verify_flow_info ();
2181 #endif
2183 changed_overall |= changed;
2184 first_pass = false;
2186 while (changed);
2189 if (mode & CLEANUP_CROSSJUMP)
2190 remove_fake_exit_edges ();
2192 FOR_ALL_BB (b)
2193 b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2195 return changed_overall;
2198 /* Delete all unreachable basic blocks. */
2200 bool
2201 delete_unreachable_blocks (void)
2203 bool changed = false;
2204 basic_block b, next_bb;
2206 find_unreachable_blocks ();
2208 /* Delete all unreachable basic blocks. */
2210 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2212 next_bb = b->next_bb;
2214 if (!(b->flags & BB_REACHABLE))
2216 delete_basic_block (b);
2217 changed = true;
2221 if (changed)
2222 tidy_fallthru_edges ();
2223 return changed;
2226 /* Merges sequential blocks if possible. */
2228 bool
2229 merge_seq_blocks (void)
2231 basic_block bb;
2232 bool changed = false;
2234 for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2236 if (single_succ_p (bb)
2237 && can_merge_blocks_p (bb, single_succ (bb)))
2239 /* Merge the blocks and retry. */
2240 merge_blocks (bb, single_succ (bb));
2241 changed = true;
2242 continue;
2245 bb = bb->next_bb;
2248 return changed;
2251 /* Tidy the CFG by deleting unreachable code and whatnot. */
2253 bool
2254 cleanup_cfg (int mode)
2256 bool changed = false;
2258 timevar_push (TV_CLEANUP_CFG);
2259 if (delete_unreachable_blocks ())
2261 changed = true;
2262 /* We've possibly created trivially dead code. Cleanup it right
2263 now to introduce more opportunities for try_optimize_cfg. */
2264 if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
2265 && !reload_completed)
2266 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2269 compact_blocks ();
2271 while (try_optimize_cfg (mode))
2273 delete_unreachable_blocks (), changed = true;
2274 if (mode & CLEANUP_UPDATE_LIFE)
2276 /* Cleaning up CFG introduces more opportunities for dead code
2277 removal that in turn may introduce more opportunities for
2278 cleaning up the CFG. */
2279 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2280 PROP_DEATH_NOTES
2281 | PROP_SCAN_DEAD_CODE
2282 | PROP_KILL_DEAD_CODE
2283 | ((mode & CLEANUP_LOG_LINKS)
2284 ? PROP_LOG_LINKS : 0)))
2285 break;
2287 else if (!(mode & CLEANUP_NO_INSN_DEL)
2288 && (mode & CLEANUP_EXPENSIVE)
2289 && !reload_completed)
2291 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2292 break;
2294 else
2295 break;
2296 delete_dead_jumptables ();
2299 timevar_pop (TV_CLEANUP_CFG);
2301 return changed;
2304 static void
2305 rest_of_handle_jump (void)
2307 delete_unreachable_blocks ();
2309 if (cfun->tail_call_emit)
2310 fixup_tail_calls ();
2313 struct tree_opt_pass pass_jump =
2315 "sibling", /* name */
2316 NULL, /* gate */
2317 rest_of_handle_jump, /* execute */
2318 NULL, /* sub */
2319 NULL, /* next */
2320 0, /* static_pass_number */
2321 TV_JUMP, /* tv_id */
2322 0, /* properties_required */
2323 0, /* properties_provided */
2324 0, /* properties_destroyed */
2325 TODO_ggc_collect, /* todo_flags_start */
2326 TODO_dump_func |
2327 TODO_verify_flow, /* todo_flags_finish */
2328 'i' /* letter */
2332 static void
2333 rest_of_handle_jump2 (void)
2335 /* Turn NOTE_INSN_EXPECTED_VALUE into REG_BR_PROB. Do this
2336 before jump optimization switches branch directions. */
2337 if (flag_guess_branch_prob)
2338 expected_value_to_br_prob ();
2340 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2341 reg_scan (get_insns (), max_reg_num ());
2342 if (dump_file)
2343 dump_flow_info (dump_file);
2344 cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0) | CLEANUP_PRE_LOOP
2345 | (flag_thread_jumps ? CLEANUP_THREADING : 0));
2347 create_loop_notes ();
2349 purge_line_number_notes ();
2351 if (optimize)
2352 cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_PRE_LOOP);
2354 /* Jump optimization, and the removal of NULL pointer checks, may
2355 have reduced the number of instructions substantially. CSE, and
2356 future passes, allocate arrays whose dimensions involve the
2357 maximum instruction UID, so if we can reduce the maximum UID
2358 we'll save big on memory. */
2359 renumber_insns ();
2363 struct tree_opt_pass pass_jump2 =
2365 "jump", /* name */
2366 NULL, /* gate */
2367 rest_of_handle_jump2, /* execute */
2368 NULL, /* sub */
2369 NULL, /* next */
2370 0, /* static_pass_number */
2371 TV_JUMP, /* tv_id */
2372 0, /* properties_required */
2373 0, /* properties_provided */
2374 0, /* properties_destroyed */
2375 TODO_ggc_collect, /* todo_flags_start */
2376 TODO_dump_func, /* todo_flags_finish */
2377 'j' /* letter */