* basic-block.h: Include "errors.h".
[official-gcc.git] / gcc / cfgcleanup.c
blob51e0b8241d5ca7aeacc96df4a8b5f100766dd3a1
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "basic-block.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "regs.h"
52 #include "cfglayout.h"
53 #include "emit-rtl.h"
55 /* cleanup_cfg maintains following flags for each basic block. */
57 enum bb_flags
59 /* Set if BB is the forwarder block to avoid too many
60 forwarder_block_p calls. */
61 BB_FORWARDER_BLOCK = 1,
62 BB_NONTHREADABLE_BLOCK = 2
65 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
66 #define BB_SET_FLAG(BB, FLAG) \
67 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
68 #define BB_CLEAR_FLAG(BB, FLAG) \
69 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
71 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
73 /* Set to true when we are running first pass of try_optimize_cfg loop. */
74 static bool first_pass;
75 static bool try_crossjump_to_edge (int, edge, edge);
76 static bool try_crossjump_bb (int, basic_block);
77 static bool outgoing_edges_match (int, basic_block, basic_block);
78 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
79 static bool insns_match_p (int, rtx, rtx);
81 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
82 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
83 static bool try_optimize_cfg (int);
84 static bool try_simplify_condjump (basic_block);
85 static bool try_forward_edges (int, basic_block);
86 static edge thread_jump (int, edge, basic_block);
87 static bool mark_effect (rtx, bitmap);
88 static void notice_new_block (basic_block);
89 static void update_forwarder_flag (basic_block);
90 static int mentions_nonequal_regs (rtx *, void *);
91 static void merge_memattrs (rtx, rtx);
93 /* Set flags for newly created block. */
95 static void
96 notice_new_block (basic_block bb)
98 if (!bb)
99 return;
101 if (forwarder_block_p (bb))
102 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
105 /* Recompute forwarder flag after block has been modified. */
107 static void
108 update_forwarder_flag (basic_block bb)
110 if (forwarder_block_p (bb))
111 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
112 else
113 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
116 /* Simplify a conditional jump around an unconditional jump.
117 Return true if something changed. */
119 static bool
120 try_simplify_condjump (basic_block cbranch_block)
122 basic_block jump_block, jump_dest_block, cbranch_dest_block;
123 edge cbranch_jump_edge, cbranch_fallthru_edge;
124 rtx cbranch_insn;
126 /* Verify that there are exactly two successors. */
127 if (EDGE_COUNT (cbranch_block->succs) != 2)
128 return false;
130 /* Verify that we've got a normal conditional branch at the end
131 of the block. */
132 cbranch_insn = BB_END (cbranch_block);
133 if (!any_condjump_p (cbranch_insn))
134 return false;
136 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
137 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
139 /* The next block must not have multiple predecessors, must not
140 be the last block in the function, and must contain just the
141 unconditional jump. */
142 jump_block = cbranch_fallthru_edge->dest;
143 if (EDGE_COUNT (jump_block->preds) >= 2
144 || jump_block->next_bb == EXIT_BLOCK_PTR
145 || !FORWARDER_BLOCK_P (jump_block))
146 return false;
147 jump_dest_block = EDGE_SUCC (jump_block, 0)->dest;
149 /* If we are partitioning hot/cold basic blocks, we don't want to
150 mess up unconditional or indirect jumps that cross between hot
151 and cold sections. */
153 if (flag_reorder_blocks_and_partition
154 && (jump_block->partition != jump_dest_block->partition
155 || cbranch_jump_edge->crossing_edge))
156 return false;
158 /* The conditional branch must target the block after the
159 unconditional branch. */
160 cbranch_dest_block = cbranch_jump_edge->dest;
162 if (cbranch_dest_block == EXIT_BLOCK_PTR
163 || !can_fallthru (jump_block, cbranch_dest_block))
164 return false;
166 /* Invert the conditional branch. */
167 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
168 return false;
170 if (dump_file)
171 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
172 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
174 /* Success. Update the CFG to match. Note that after this point
175 the edge variable names appear backwards; the redirection is done
176 this way to preserve edge profile data. */
177 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
178 cbranch_dest_block);
179 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
180 jump_dest_block);
181 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
182 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
183 update_br_prob_note (cbranch_block);
185 /* Delete the block with the unconditional jump, and clean up the mess. */
186 delete_basic_block (jump_block);
187 tidy_fallthru_edge (cbranch_jump_edge);
188 update_forwarder_flag (cbranch_block);
190 return true;
193 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
194 on register. Used by jump threading. */
196 static bool
197 mark_effect (rtx exp, regset nonequal)
199 int regno;
200 rtx dest;
201 switch (GET_CODE (exp))
203 /* In case we do clobber the register, mark it as equal, as we know the
204 value is dead so it don't have to match. */
205 case CLOBBER:
206 if (REG_P (XEXP (exp, 0)))
208 dest = XEXP (exp, 0);
209 regno = REGNO (dest);
210 CLEAR_REGNO_REG_SET (nonequal, regno);
211 if (regno < FIRST_PSEUDO_REGISTER)
213 int n = hard_regno_nregs[regno][GET_MODE (dest)];
214 while (--n > 0)
215 CLEAR_REGNO_REG_SET (nonequal, regno + n);
218 return false;
220 case SET:
221 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
222 return false;
223 dest = SET_DEST (exp);
224 if (dest == pc_rtx)
225 return false;
226 if (!REG_P (dest))
227 return true;
228 regno = REGNO (dest);
229 SET_REGNO_REG_SET (nonequal, regno);
230 if (regno < FIRST_PSEUDO_REGISTER)
232 int n = hard_regno_nregs[regno][GET_MODE (dest)];
233 while (--n > 0)
234 SET_REGNO_REG_SET (nonequal, regno + n);
236 return false;
238 default:
239 return false;
243 /* Return nonzero if X is a register set in regset DATA.
244 Called via for_each_rtx. */
245 static int
246 mentions_nonequal_regs (rtx *x, void *data)
248 regset nonequal = (regset) data;
249 if (REG_P (*x))
251 int regno;
253 regno = REGNO (*x);
254 if (REGNO_REG_SET_P (nonequal, regno))
255 return 1;
256 if (regno < FIRST_PSEUDO_REGISTER)
258 int n = hard_regno_nregs[regno][GET_MODE (*x)];
259 while (--n > 0)
260 if (REGNO_REG_SET_P (nonequal, regno + n))
261 return 1;
264 return 0;
266 /* Attempt to prove that the basic block B will have no side effects and
267 always continues in the same edge if reached via E. Return the edge
268 if exist, NULL otherwise. */
270 static edge
271 thread_jump (int mode, edge e, basic_block b)
273 rtx set1, set2, cond1, cond2, insn;
274 enum rtx_code code1, code2, reversed_code2;
275 bool reverse1 = false;
276 int i;
277 regset nonequal;
278 bool failed = false;
280 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
281 return NULL;
283 /* At the moment, we do handle only conditional jumps, but later we may
284 want to extend this code to tablejumps and others. */
285 if (EDGE_COUNT (e->src->succs) != 2)
286 return NULL;
287 if (EDGE_COUNT (b->succs) != 2)
289 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
290 return NULL;
293 /* Second branch must end with onlyjump, as we will eliminate the jump. */
294 if (!any_condjump_p (BB_END (e->src)))
295 return NULL;
297 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
299 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
300 return NULL;
303 set1 = pc_set (BB_END (e->src));
304 set2 = pc_set (BB_END (b));
305 if (((e->flags & EDGE_FALLTHRU) != 0)
306 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
307 reverse1 = true;
309 cond1 = XEXP (SET_SRC (set1), 0);
310 cond2 = XEXP (SET_SRC (set2), 0);
311 if (reverse1)
312 code1 = reversed_comparison_code (cond1, BB_END (e->src));
313 else
314 code1 = GET_CODE (cond1);
316 code2 = GET_CODE (cond2);
317 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
319 if (!comparison_dominates_p (code1, code2)
320 && !comparison_dominates_p (code1, reversed_code2))
321 return NULL;
323 /* Ensure that the comparison operators are equivalent.
324 ??? This is far too pessimistic. We should allow swapped operands,
325 different CCmodes, or for example comparisons for interval, that
326 dominate even when operands are not equivalent. */
327 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
328 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
329 return NULL;
331 /* Short circuit cases where block B contains some side effects, as we can't
332 safely bypass it. */
333 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
334 insn = NEXT_INSN (insn))
335 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
337 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
338 return NULL;
341 cselib_init (false);
343 /* First process all values computed in the source basic block. */
344 for (insn = NEXT_INSN (BB_HEAD (e->src)); insn != NEXT_INSN (BB_END (e->src));
345 insn = NEXT_INSN (insn))
346 if (INSN_P (insn))
347 cselib_process_insn (insn);
349 nonequal = BITMAP_XMALLOC();
350 CLEAR_REG_SET (nonequal);
352 /* Now assume that we've continued by the edge E to B and continue
353 processing as if it were same basic block.
354 Our goal is to prove that whole block is an NOOP. */
356 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)) && !failed;
357 insn = NEXT_INSN (insn))
359 if (INSN_P (insn))
361 rtx pat = PATTERN (insn);
363 if (GET_CODE (pat) == PARALLEL)
365 for (i = 0; i < XVECLEN (pat, 0); i++)
366 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
368 else
369 failed |= mark_effect (pat, nonequal);
372 cselib_process_insn (insn);
375 /* Later we should clear nonequal of dead registers. So far we don't
376 have life information in cfg_cleanup. */
377 if (failed)
379 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
380 goto failed_exit;
383 /* cond2 must not mention any register that is not equal to the
384 former block. */
385 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
386 goto failed_exit;
388 /* In case liveness information is available, we need to prove equivalence
389 only of the live values. */
390 if (mode & CLEANUP_UPDATE_LIFE)
391 AND_REG_SET (nonequal, b->global_live_at_end);
393 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
395 BITMAP_XFREE (nonequal);
396 cselib_finish ();
397 if ((comparison_dominates_p (code1, code2) != 0)
398 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
399 return BRANCH_EDGE (b);
400 else
401 return FALLTHRU_EDGE (b);
403 failed_exit:
404 BITMAP_XFREE (nonequal);
405 cselib_finish ();
406 return NULL;
409 /* Attempt to forward edges leaving basic block B.
410 Return true if successful. */
412 static bool
413 try_forward_edges (int mode, basic_block b)
415 bool changed = false;
416 edge e, *threaded_edges = NULL;
418 /* If we are partitioning hot/cold basic blocks, we don't want to
419 mess up unconditional or indirect jumps that cross between hot
420 and cold sections. */
422 if (flag_reorder_blocks_and_partition
423 && find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
424 return false;
426 FOR_EACH_EDGE (e, b->succs)
428 basic_block target, first;
429 int counter;
430 bool threaded = false;
431 int nthreaded_edges = 0;
432 bool may_thread = first_pass | (b->flags & BB_DIRTY);
434 /* Skip complex edges because we don't know how to update them.
436 Still handle fallthru edges, as we can succeed to forward fallthru
437 edge to the same place as the branch edge of conditional branch
438 and turn conditional branch to an unconditional branch. */
439 if (e->flags & EDGE_COMPLEX)
440 continue;
442 target = first = e->dest;
443 counter = 0;
445 while (counter < n_basic_blocks)
447 basic_block new_target = NULL;
448 bool new_target_threaded = false;
449 may_thread |= target->flags & BB_DIRTY;
451 if (FORWARDER_BLOCK_P (target)
452 && EDGE_SUCC (target, 0)->dest != EXIT_BLOCK_PTR)
454 /* Bypass trivial infinite loops. */
455 if (target == EDGE_SUCC (target, 0)->dest)
456 counter = n_basic_blocks;
457 new_target = EDGE_SUCC (target, 0)->dest;
460 /* Allow to thread only over one edge at time to simplify updating
461 of probabilities. */
462 else if ((mode & CLEANUP_THREADING) && may_thread)
464 edge t = thread_jump (mode, e, target);
465 if (t)
467 if (!threaded_edges)
468 threaded_edges = xmalloc (sizeof (*threaded_edges)
469 * n_basic_blocks);
470 else
472 int i;
474 /* Detect an infinite loop across blocks not
475 including the start block. */
476 for (i = 0; i < nthreaded_edges; ++i)
477 if (threaded_edges[i] == t)
478 break;
479 if (i < nthreaded_edges)
481 counter = n_basic_blocks;
482 break;
486 /* Detect an infinite loop across the start block. */
487 if (t->dest == b)
488 break;
490 if (nthreaded_edges >= n_basic_blocks)
491 abort ();
492 threaded_edges[nthreaded_edges++] = t;
494 new_target = t->dest;
495 new_target_threaded = true;
496 /* ix = 0; */
500 if (!new_target)
501 break;
503 /* Avoid killing of loop pre-headers, as it is the place loop
504 optimizer wants to hoist code to.
506 For fallthru forwarders, the LOOP_BEG note must appear between
507 the header of block and CODE_LABEL of the loop, for non forwarders
508 it must appear before the JUMP_INSN. */
509 if ((mode & CLEANUP_PRE_LOOP) && optimize)
511 rtx insn = (EDGE_SUCC (target, 0)->flags & EDGE_FALLTHRU
512 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
514 if (!NOTE_P (insn))
515 insn = NEXT_INSN (insn);
517 for (; insn && !LABEL_P (insn) && !INSN_P (insn);
518 insn = NEXT_INSN (insn))
519 if (NOTE_P (insn)
520 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
521 break;
523 if (NOTE_P (insn))
524 break;
526 /* Do not clean up branches to just past the end of a loop
527 at this time; it can mess up the loop optimizer's
528 recognition of some patterns. */
530 insn = PREV_INSN (BB_HEAD (target));
531 if (insn && NOTE_P (insn)
532 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
533 break;
536 counter++;
537 target = new_target;
538 threaded |= new_target_threaded;
541 if (counter >= n_basic_blocks)
543 if (dump_file)
544 fprintf (dump_file, "Infinite loop in BB %i.\n",
545 target->index);
547 else if (target == first)
548 ; /* We didn't do anything. */
549 else
551 /* Save the values now, as the edge may get removed. */
552 gcov_type edge_count = e->count;
553 int edge_probability = e->probability;
554 int edge_frequency;
555 int n = 0;
557 /* Don't force if target is exit block. */
558 if (threaded && target != EXIT_BLOCK_PTR)
560 notice_new_block (redirect_edge_and_branch_force (e, target));
561 if (dump_file)
562 fprintf (dump_file, "Conditionals threaded.\n");
564 else if (!redirect_edge_and_branch (e, target))
566 if (dump_file)
567 fprintf (dump_file,
568 "Forwarding edge %i->%i to %i failed.\n",
569 b->index, e->dest->index, target->index);
570 continue;
573 /* We successfully forwarded the edge. Now update profile
574 data: for each edge we traversed in the chain, remove
575 the original edge's execution count. */
576 edge_frequency = ((edge_probability * b->frequency
577 + REG_BR_PROB_BASE / 2)
578 / REG_BR_PROB_BASE);
580 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
581 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
585 edge t;
587 first->count -= edge_count;
588 if (first->count < 0)
589 first->count = 0;
590 first->frequency -= edge_frequency;
591 if (first->frequency < 0)
592 first->frequency = 0;
593 if (EDGE_COUNT (first->succs) > 1)
595 edge e;
596 int prob;
597 if (n >= nthreaded_edges)
598 abort ();
599 t = threaded_edges [n++];
600 if (t->src != first)
601 abort ();
602 if (first->frequency)
603 prob = edge_frequency * REG_BR_PROB_BASE / first->frequency;
604 else
605 prob = 0;
606 if (prob > t->probability)
607 prob = t->probability;
608 t->probability -= prob;
609 prob = REG_BR_PROB_BASE - prob;
610 if (prob <= 0)
612 EDGE_SUCC (first, 0)->probability = REG_BR_PROB_BASE;
613 EDGE_SUCC (first, 1)->probability = 0;
615 else
617 FOR_EACH_EDGE (e, first->succs)
619 e->probability = ((e->probability * REG_BR_PROB_BASE)
620 / (double) prob);
622 END_FOR_EACH_EDGE;
624 update_br_prob_note (first);
626 else
628 /* It is possible that as the result of
629 threading we've removed edge as it is
630 threaded to the fallthru edge. Avoid
631 getting out of sync. */
632 if (n < nthreaded_edges
633 && first == threaded_edges [n]->src)
634 n++;
635 t = EDGE_SUCC (first, 0);
638 t->count -= edge_count;
639 if (t->count < 0)
640 t->count = 0;
641 first = t->dest;
643 while (first != target);
645 changed = true;
648 END_FOR_EACH_EDGE;
650 if (threaded_edges)
651 free (threaded_edges);
652 return changed;
656 /* Blocks A and B are to be merged into a single block. A has no incoming
657 fallthru edge, so it can be moved before B without adding or modifying
658 any jumps (aside from the jump from A to B). */
660 static void
661 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
663 rtx barrier;
665 /* If we are partitioning hot/cold basic blocks, we don't want to
666 mess up unconditional or indirect jumps that cross between hot
667 and cold sections. */
669 if (flag_reorder_blocks_and_partition
670 && (a->partition != b->partition
671 || find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)))
672 return;
674 barrier = next_nonnote_insn (BB_END (a));
675 if (!BARRIER_P (barrier))
676 abort ();
677 delete_insn (barrier);
679 /* Move block and loop notes out of the chain so that we do not
680 disturb their order.
682 ??? A better solution would be to squeeze out all the non-nested notes
683 and adjust the block trees appropriately. Even better would be to have
684 a tighter connection between block trees and rtl so that this is not
685 necessary. */
686 if (squeeze_notes (&BB_HEAD (a), &BB_END (a)))
687 abort ();
689 /* Scramble the insn chain. */
690 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
691 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
692 a->flags |= BB_DIRTY;
694 if (dump_file)
695 fprintf (dump_file, "Moved block %d before %d and merged.\n",
696 a->index, b->index);
698 /* Swap the records for the two blocks around. */
700 unlink_block (a);
701 link_block (a, b->prev_bb);
703 /* Now blocks A and B are contiguous. Merge them. */
704 merge_blocks (a, b);
707 /* Blocks A and B are to be merged into a single block. B has no outgoing
708 fallthru edge, so it can be moved after A without adding or modifying
709 any jumps (aside from the jump from A to B). */
711 static void
712 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
714 rtx barrier, real_b_end;
715 rtx label, table;
717 /* If we are partitioning hot/cold basic blocks, we don't want to
718 mess up unconditional or indirect jumps that cross between hot
719 and cold sections. */
721 if (flag_reorder_blocks_and_partition
722 && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
723 || a->partition != b->partition))
724 return;
726 real_b_end = BB_END (b);
728 /* If there is a jump table following block B temporarily add the jump table
729 to block B so that it will also be moved to the correct location. */
730 if (tablejump_p (BB_END (b), &label, &table)
731 && prev_active_insn (label) == BB_END (b))
733 BB_END (b) = table;
736 /* There had better have been a barrier there. Delete it. */
737 barrier = NEXT_INSN (BB_END (b));
738 if (barrier && BARRIER_P (barrier))
739 delete_insn (barrier);
741 /* Move block and loop notes out of the chain so that we do not
742 disturb their order.
744 ??? A better solution would be to squeeze out all the non-nested notes
745 and adjust the block trees appropriately. Even better would be to have
746 a tighter connection between block trees and rtl so that this is not
747 necessary. */
748 if (squeeze_notes (&BB_HEAD (b), &BB_END (b)))
749 abort ();
751 /* Scramble the insn chain. */
752 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
754 /* Restore the real end of b. */
755 BB_END (b) = real_b_end;
757 if (dump_file)
758 fprintf (dump_file, "Moved block %d after %d and merged.\n",
759 b->index, a->index);
761 /* Now blocks A and B are contiguous. Merge them. */
762 merge_blocks (a, b);
765 /* Attempt to merge basic blocks that are potentially non-adjacent.
766 Return NULL iff the attempt failed, otherwise return basic block
767 where cleanup_cfg should continue. Because the merging commonly
768 moves basic block away or introduces another optimization
769 possibility, return basic block just before B so cleanup_cfg don't
770 need to iterate.
772 It may be good idea to return basic block before C in the case
773 C has been moved after B and originally appeared earlier in the
774 insn sequence, but we have no information available about the
775 relative ordering of these two. Hopefully it is not too common. */
777 static basic_block
778 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
780 basic_block next;
782 /* If we are partitioning hot/cold basic blocks, we don't want to
783 mess up unconditional or indirect jumps that cross between hot
784 and cold sections. */
786 if (flag_reorder_blocks_and_partition
787 && (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
788 || find_reg_note (BB_END (c), REG_CROSSING_JUMP, NULL_RTX)
789 || b->partition != c->partition))
790 return NULL;
794 /* If B has a fallthru edge to C, no need to move anything. */
795 if (e->flags & EDGE_FALLTHRU)
797 int b_index = b->index, c_index = c->index;
798 merge_blocks (b, c);
799 update_forwarder_flag (b);
801 if (dump_file)
802 fprintf (dump_file, "Merged %d and %d without moving.\n",
803 b_index, c_index);
805 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
808 /* Otherwise we will need to move code around. Do that only if expensive
809 transformations are allowed. */
810 else if (mode & CLEANUP_EXPENSIVE)
812 edge tmp_edge, b_fallthru_edge;
813 bool c_has_outgoing_fallthru;
814 bool b_has_incoming_fallthru;
816 /* Avoid overactive code motion, as the forwarder blocks should be
817 eliminated by edge redirection instead. One exception might have
818 been if B is a forwarder block and C has no fallthru edge, but
819 that should be cleaned up by bb-reorder instead. */
820 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
821 return NULL;
823 /* We must make sure to not munge nesting of lexical blocks,
824 and loop notes. This is done by squeezing out all the notes
825 and leaving them there to lie. Not ideal, but functional. */
827 FOR_EACH_EDGE (tmp_edge, c->succs)
829 if (tmp_edge->flags & EDGE_FALLTHRU)
830 break;
832 END_FOR_EACH_EDGE;
834 c_has_outgoing_fallthru = (tmp_edge != NULL);
836 FOR_EACH_EDGE (tmp_edge, b->preds)
838 if (tmp_edge->flags & EDGE_FALLTHRU)
839 break;
841 END_FOR_EACH_EDGE;
843 b_has_incoming_fallthru = (tmp_edge != NULL);
844 b_fallthru_edge = tmp_edge;
845 next = b->prev_bb;
846 if (next == c)
847 next = next->prev_bb;
849 /* Otherwise, we're going to try to move C after B. If C does
850 not have an outgoing fallthru, then it can be moved
851 immediately after B without introducing or modifying jumps. */
852 if (! c_has_outgoing_fallthru)
854 merge_blocks_move_successor_nojumps (b, c);
855 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
858 /* If B does not have an incoming fallthru, then it can be moved
859 immediately before C without introducing or modifying jumps.
860 C cannot be the first block, so we do not have to worry about
861 accessing a non-existent block. */
863 if (b_has_incoming_fallthru)
865 basic_block bb;
867 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
868 return NULL;
869 bb = force_nonfallthru (b_fallthru_edge);
870 if (bb)
871 notice_new_block (bb);
874 merge_blocks_move_predecessor_nojumps (b, c);
875 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
878 return NULL;
882 /* Removes the memory attributes of MEM expression
883 if they are not equal. */
885 void
886 merge_memattrs (rtx x, rtx y)
888 int i;
889 int j;
890 enum rtx_code code;
891 const char *fmt;
893 if (x == y)
894 return;
895 if (x == 0 || y == 0)
896 return;
898 code = GET_CODE (x);
900 if (code != GET_CODE (y))
901 return;
903 if (GET_MODE (x) != GET_MODE (y))
904 return;
906 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
908 if (! MEM_ATTRS (x))
909 MEM_ATTRS (y) = 0;
910 else if (! MEM_ATTRS (y))
911 MEM_ATTRS (x) = 0;
912 else
914 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
916 set_mem_alias_set (x, 0);
917 set_mem_alias_set (y, 0);
920 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
922 set_mem_expr (x, 0);
923 set_mem_expr (y, 0);
924 set_mem_offset (x, 0);
925 set_mem_offset (y, 0);
927 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
929 set_mem_offset (x, 0);
930 set_mem_offset (y, 0);
933 set_mem_size (x, MAX (MEM_SIZE (x), MEM_SIZE (y)));
934 set_mem_size (y, MEM_SIZE (x));
936 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
937 set_mem_align (y, MEM_ALIGN (x));
941 fmt = GET_RTX_FORMAT (code);
942 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
944 switch (fmt[i])
946 case 'E':
947 /* Two vectors must have the same length. */
948 if (XVECLEN (x, i) != XVECLEN (y, i))
949 return;
951 for (j = 0; j < XVECLEN (x, i); j++)
952 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
954 break;
956 case 'e':
957 merge_memattrs (XEXP (x, i), XEXP (y, i));
960 return;
964 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
966 static bool
967 insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
969 rtx p1, p2;
971 /* Verify that I1 and I2 are equivalent. */
972 if (GET_CODE (i1) != GET_CODE (i2))
973 return false;
975 p1 = PATTERN (i1);
976 p2 = PATTERN (i2);
978 if (GET_CODE (p1) != GET_CODE (p2))
979 return false;
981 /* If this is a CALL_INSN, compare register usage information.
982 If we don't check this on stack register machines, the two
983 CALL_INSNs might be merged leaving reg-stack.c with mismatching
984 numbers of stack registers in the same basic block.
985 If we don't check this on machines with delay slots, a delay slot may
986 be filled that clobbers a parameter expected by the subroutine.
988 ??? We take the simple route for now and assume that if they're
989 equal, they were constructed identically. */
991 if (CALL_P (i1)
992 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
993 CALL_INSN_FUNCTION_USAGE (i2))
994 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
995 return false;
997 #ifdef STACK_REGS
998 /* If cross_jump_death_matters is not 0, the insn's mode
999 indicates whether or not the insn contains any stack-like
1000 regs. */
1002 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1004 /* If register stack conversion has already been done, then
1005 death notes must also be compared before it is certain that
1006 the two instruction streams match. */
1008 rtx note;
1009 HARD_REG_SET i1_regset, i2_regset;
1011 CLEAR_HARD_REG_SET (i1_regset);
1012 CLEAR_HARD_REG_SET (i2_regset);
1014 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1015 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1016 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1018 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1019 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1020 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1022 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1024 return false;
1026 done:
1029 #endif
1031 if (reload_completed
1032 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1033 return true;
1035 /* Do not do EQUIV substitution after reload. First, we're undoing the
1036 work of reload_cse. Second, we may be undoing the work of the post-
1037 reload splitting pass. */
1038 /* ??? Possibly add a new phase switch variable that can be used by
1039 targets to disallow the troublesome insns after splitting. */
1040 if (!reload_completed)
1042 /* The following code helps take care of G++ cleanups. */
1043 rtx equiv1 = find_reg_equal_equiv_note (i1);
1044 rtx equiv2 = find_reg_equal_equiv_note (i2);
1046 if (equiv1 && equiv2
1047 /* If the equivalences are not to a constant, they may
1048 reference pseudos that no longer exist, so we can't
1049 use them. */
1050 && (! reload_completed
1051 || (CONSTANT_P (XEXP (equiv1, 0))
1052 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1054 rtx s1 = single_set (i1);
1055 rtx s2 = single_set (i2);
1056 if (s1 != 0 && s2 != 0
1057 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1059 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1060 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1061 if (! rtx_renumbered_equal_p (p1, p2))
1062 cancel_changes (0);
1063 else if (apply_change_group ())
1064 return true;
1069 return false;
1072 /* Look through the insns at the end of BB1 and BB2 and find the longest
1073 sequence that are equivalent. Store the first insns for that sequence
1074 in *F1 and *F2 and return the sequence length.
1076 To simplify callers of this function, if the blocks match exactly,
1077 store the head of the blocks in *F1 and *F2. */
1079 static int
1080 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1081 basic_block bb2, rtx *f1, rtx *f2)
1083 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1084 int ninsns = 0;
1086 /* Skip simple jumps at the end of the blocks. Complex jumps still
1087 need to be compared for equivalence, which we'll do below. */
1089 i1 = BB_END (bb1);
1090 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1091 if (onlyjump_p (i1)
1092 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1094 last1 = i1;
1095 i1 = PREV_INSN (i1);
1098 i2 = BB_END (bb2);
1099 if (onlyjump_p (i2)
1100 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1102 last2 = i2;
1103 /* Count everything except for unconditional jump as insn. */
1104 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1105 ninsns++;
1106 i2 = PREV_INSN (i2);
1109 while (true)
1111 /* Ignore notes. */
1112 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1113 i1 = PREV_INSN (i1);
1115 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1116 i2 = PREV_INSN (i2);
1118 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1119 break;
1121 if (!insns_match_p (mode, i1, i2))
1122 break;
1124 merge_memattrs (i1, i2);
1126 /* Don't begin a cross-jump with a NOTE insn. */
1127 if (INSN_P (i1))
1129 /* If the merged insns have different REG_EQUAL notes, then
1130 remove them. */
1131 rtx equiv1 = find_reg_equal_equiv_note (i1);
1132 rtx equiv2 = find_reg_equal_equiv_note (i2);
1134 if (equiv1 && !equiv2)
1135 remove_note (i1, equiv1);
1136 else if (!equiv1 && equiv2)
1137 remove_note (i2, equiv2);
1138 else if (equiv1 && equiv2
1139 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1141 remove_note (i1, equiv1);
1142 remove_note (i2, equiv2);
1145 afterlast1 = last1, afterlast2 = last2;
1146 last1 = i1, last2 = i2;
1147 ninsns++;
1150 i1 = PREV_INSN (i1);
1151 i2 = PREV_INSN (i2);
1154 #ifdef HAVE_cc0
1155 /* Don't allow the insn after a compare to be shared by
1156 cross-jumping unless the compare is also shared. */
1157 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1158 last1 = afterlast1, last2 = afterlast2, ninsns--;
1159 #endif
1161 /* Include preceding notes and labels in the cross-jump. One,
1162 this may bring us to the head of the blocks as requested above.
1163 Two, it keeps line number notes as matched as may be. */
1164 if (ninsns)
1166 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1167 last1 = PREV_INSN (last1);
1169 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1170 last1 = PREV_INSN (last1);
1172 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1173 last2 = PREV_INSN (last2);
1175 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1176 last2 = PREV_INSN (last2);
1178 *f1 = last1;
1179 *f2 = last2;
1182 return ninsns;
1185 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1186 the branch instruction. This means that if we commonize the control
1187 flow before end of the basic block, the semantic remains unchanged.
1189 We may assume that there exists one edge with a common destination. */
1191 static bool
1192 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1194 int nehedges1 = 0, nehedges2 = 0;
1195 edge fallthru1 = 0, fallthru2 = 0;
1196 edge e1, e2;
1198 /* If BB1 has only one successor, we may be looking at either an
1199 unconditional jump, or a fake edge to exit. */
1200 if (EDGE_COUNT (bb1->succs) == 1
1201 && (EDGE_SUCC (bb1, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1202 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1203 return (EDGE_COUNT (bb2->succs) == 1
1204 && (EDGE_SUCC (bb2, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1205 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1207 /* Match conditional jumps - this may get tricky when fallthru and branch
1208 edges are crossed. */
1209 if (EDGE_COUNT (bb1->succs) == 2
1210 && any_condjump_p (BB_END (bb1))
1211 && onlyjump_p (BB_END (bb1)))
1213 edge b1, f1, b2, f2;
1214 bool reverse, match;
1215 rtx set1, set2, cond1, cond2;
1216 enum rtx_code code1, code2;
1218 if (EDGE_COUNT (bb2->succs) != 2
1219 || !any_condjump_p (BB_END (bb2))
1220 || !onlyjump_p (BB_END (bb2)))
1221 return false;
1223 b1 = BRANCH_EDGE (bb1);
1224 b2 = BRANCH_EDGE (bb2);
1225 f1 = FALLTHRU_EDGE (bb1);
1226 f2 = FALLTHRU_EDGE (bb2);
1228 /* Get around possible forwarders on fallthru edges. Other cases
1229 should be optimized out already. */
1230 if (FORWARDER_BLOCK_P (f1->dest))
1231 f1 = EDGE_SUCC (f1->dest, 0);
1233 if (FORWARDER_BLOCK_P (f2->dest))
1234 f2 = EDGE_SUCC (f2->dest, 0);
1236 /* To simplify use of this function, return false if there are
1237 unneeded forwarder blocks. These will get eliminated later
1238 during cleanup_cfg. */
1239 if (FORWARDER_BLOCK_P (f1->dest)
1240 || FORWARDER_BLOCK_P (f2->dest)
1241 || FORWARDER_BLOCK_P (b1->dest)
1242 || FORWARDER_BLOCK_P (b2->dest))
1243 return false;
1245 if (f1->dest == f2->dest && b1->dest == b2->dest)
1246 reverse = false;
1247 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1248 reverse = true;
1249 else
1250 return false;
1252 set1 = pc_set (BB_END (bb1));
1253 set2 = pc_set (BB_END (bb2));
1254 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1255 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1256 reverse = !reverse;
1258 cond1 = XEXP (SET_SRC (set1), 0);
1259 cond2 = XEXP (SET_SRC (set2), 0);
1260 code1 = GET_CODE (cond1);
1261 if (reverse)
1262 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1263 else
1264 code2 = GET_CODE (cond2);
1266 if (code2 == UNKNOWN)
1267 return false;
1269 /* Verify codes and operands match. */
1270 match = ((code1 == code2
1271 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1272 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1273 || (code1 == swap_condition (code2)
1274 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1275 XEXP (cond2, 0))
1276 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1277 XEXP (cond2, 1))));
1279 /* If we return true, we will join the blocks. Which means that
1280 we will only have one branch prediction bit to work with. Thus
1281 we require the existing branches to have probabilities that are
1282 roughly similar. */
1283 if (match
1284 && !optimize_size
1285 && maybe_hot_bb_p (bb1)
1286 && maybe_hot_bb_p (bb2))
1288 int prob2;
1290 if (b1->dest == b2->dest)
1291 prob2 = b2->probability;
1292 else
1293 /* Do not use f2 probability as f2 may be forwarded. */
1294 prob2 = REG_BR_PROB_BASE - b2->probability;
1296 /* Fail if the difference in probabilities is greater than 50%.
1297 This rules out two well-predicted branches with opposite
1298 outcomes. */
1299 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1301 if (dump_file)
1302 fprintf (dump_file,
1303 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1304 bb1->index, bb2->index, b1->probability, prob2);
1306 return false;
1310 if (dump_file && match)
1311 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1312 bb1->index, bb2->index);
1314 return match;
1317 /* Generic case - we are seeing a computed jump, table jump or trapping
1318 instruction. */
1320 #ifndef CASE_DROPS_THROUGH
1321 /* Check whether there are tablejumps in the end of BB1 and BB2.
1322 Return true if they are identical. */
1324 rtx label1, label2;
1325 rtx table1, table2;
1327 if (tablejump_p (BB_END (bb1), &label1, &table1)
1328 && tablejump_p (BB_END (bb2), &label2, &table2)
1329 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1331 /* The labels should never be the same rtx. If they really are same
1332 the jump tables are same too. So disable crossjumping of blocks BB1
1333 and BB2 because when deleting the common insns in the end of BB1
1334 by delete_basic_block () the jump table would be deleted too. */
1335 /* If LABEL2 is referenced in BB1->END do not do anything
1336 because we would loose information when replacing
1337 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1338 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1340 /* Set IDENTICAL to true when the tables are identical. */
1341 bool identical = false;
1342 rtx p1, p2;
1344 p1 = PATTERN (table1);
1345 p2 = PATTERN (table2);
1346 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1348 identical = true;
1350 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1351 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1352 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1353 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1355 int i;
1357 identical = true;
1358 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1359 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1360 identical = false;
1363 if (identical)
1365 replace_label_data rr;
1366 bool match;
1368 /* Temporarily replace references to LABEL1 with LABEL2
1369 in BB1->END so that we could compare the instructions. */
1370 rr.r1 = label1;
1371 rr.r2 = label2;
1372 rr.update_label_nuses = false;
1373 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1375 match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1376 if (dump_file && match)
1377 fprintf (dump_file,
1378 "Tablejumps in bb %i and %i match.\n",
1379 bb1->index, bb2->index);
1381 /* Set the original label in BB1->END because when deleting
1382 a block whose end is a tablejump, the tablejump referenced
1383 from the instruction is deleted too. */
1384 rr.r1 = label2;
1385 rr.r2 = label1;
1386 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1388 return match;
1391 return false;
1394 #endif
1396 /* First ensure that the instructions match. There may be many outgoing
1397 edges so this test is generally cheaper. */
1398 if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1399 return false;
1401 /* Search the outgoing edges, ensure that the counts do match, find possible
1402 fallthru and exception handling edges since these needs more
1403 validation. */
1404 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1405 return false;
1407 FOR_EACH_EDGE (e1, bb1->succs)
1409 e2 = EDGE_SUCC (bb2, __ix);
1411 if (e1->flags & EDGE_EH)
1412 nehedges1++;
1414 if (e2->flags & EDGE_EH)
1415 nehedges2++;
1417 if (e1->flags & EDGE_FALLTHRU)
1418 fallthru1 = e1;
1419 if (e2->flags & EDGE_FALLTHRU)
1420 fallthru2 = e2;
1422 END_FOR_EACH_EDGE;
1424 /* If number of edges of various types does not match, fail. */
1425 if (nehedges1 != nehedges2
1426 || (fallthru1 != 0) != (fallthru2 != 0))
1427 return false;
1429 /* fallthru edges must be forwarded to the same destination. */
1430 if (fallthru1)
1432 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1433 ? EDGE_SUCC (fallthru1->dest, 0)->dest: fallthru1->dest);
1434 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1435 ? EDGE_SUCC (fallthru2->dest, 0)->dest: fallthru2->dest);
1437 if (d1 != d2)
1438 return false;
1441 /* Ensure the same EH region. */
1443 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1444 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1446 if (!n1 && n2)
1447 return false;
1449 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1450 return false;
1453 /* We don't need to match the rest of edges as above checks should be enough
1454 to ensure that they are equivalent. */
1455 return true;
1458 /* E1 and E2 are edges with the same destination block. Search their
1459 predecessors for common code. If found, redirect control flow from
1460 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1462 static bool
1463 try_crossjump_to_edge (int mode, edge e1, edge e2)
1465 int nmatch;
1466 basic_block src1 = e1->src, src2 = e2->src;
1467 basic_block redirect_to, redirect_from, to_remove;
1468 rtx newpos1, newpos2;
1469 edge s;
1471 newpos1 = newpos2 = NULL_RTX;
1473 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1474 to try this optimization. */
1476 if (flag_reorder_blocks_and_partition && no_new_pseudos)
1477 return false;
1479 /* Search backward through forwarder blocks. We don't need to worry
1480 about multiple entry or chained forwarders, as they will be optimized
1481 away. We do this to look past the unconditional jump following a
1482 conditional jump that is required due to the current CFG shape. */
1483 if (EDGE_COUNT (src1->preds) == 1
1484 && FORWARDER_BLOCK_P (src1))
1485 e1 = EDGE_PRED (src1, 0), src1 = e1->src;
1487 if (EDGE_COUNT (src2->preds) == 1
1488 && FORWARDER_BLOCK_P (src2))
1489 e2 = EDGE_PRED (src2, 0), src2 = e2->src;
1491 /* Nothing to do if we reach ENTRY, or a common source block. */
1492 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1493 return false;
1494 if (src1 == src2)
1495 return false;
1497 /* Seeing more than 1 forwarder blocks would confuse us later... */
1498 if (FORWARDER_BLOCK_P (e1->dest)
1499 && FORWARDER_BLOCK_P (EDGE_SUCC (e1->dest, 0)->dest))
1500 return false;
1502 if (FORWARDER_BLOCK_P (e2->dest)
1503 && FORWARDER_BLOCK_P (EDGE_SUCC (e2->dest, 0)->dest))
1504 return false;
1506 /* Likewise with dead code (possibly newly created by the other optimizations
1507 of cfg_cleanup). */
1508 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1509 return false;
1511 /* Look for the common insn sequence, part the first ... */
1512 if (!outgoing_edges_match (mode, src1, src2))
1513 return false;
1515 /* ... and part the second. */
1516 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1517 if (!nmatch)
1518 return false;
1520 #ifndef CASE_DROPS_THROUGH
1521 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1522 will be deleted.
1523 If we have tablejumps in the end of SRC1 and SRC2
1524 they have been already compared for equivalence in outgoing_edges_match ()
1525 so replace the references to TABLE1 by references to TABLE2. */
1527 rtx label1, label2;
1528 rtx table1, table2;
1530 if (tablejump_p (BB_END (src1), &label1, &table1)
1531 && tablejump_p (BB_END (src2), &label2, &table2)
1532 && label1 != label2)
1534 replace_label_data rr;
1535 rtx insn;
1537 /* Replace references to LABEL1 with LABEL2. */
1538 rr.r1 = label1;
1539 rr.r2 = label2;
1540 rr.update_label_nuses = true;
1541 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1543 /* Do not replace the label in SRC1->END because when deleting
1544 a block whose end is a tablejump, the tablejump referenced
1545 from the instruction is deleted too. */
1546 if (insn != BB_END (src1))
1547 for_each_rtx (&insn, replace_label, &rr);
1551 #endif
1553 /* Avoid splitting if possible. */
1554 if (newpos2 == BB_HEAD (src2))
1555 redirect_to = src2;
1556 else
1558 if (dump_file)
1559 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1560 src2->index, nmatch);
1561 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1564 if (dump_file)
1565 fprintf (dump_file,
1566 "Cross jumping from bb %i to bb %i; %i common insns\n",
1567 src1->index, src2->index, nmatch);
1569 redirect_to->count += src1->count;
1570 redirect_to->frequency += src1->frequency;
1571 /* We may have some registers visible trought the block. */
1572 redirect_to->flags |= BB_DIRTY;
1574 /* Recompute the frequencies and counts of outgoing edges. */
1575 FOR_EACH_EDGE (s, redirect_to->succs)
1577 edge s2;
1578 basic_block d = s->dest;
1580 if (FORWARDER_BLOCK_P (d))
1581 d = EDGE_SUCC (d, 0)->dest;
1583 FOR_EACH_EDGE (s2, src1->succs)
1585 basic_block d2 = s2->dest;
1586 if (FORWARDER_BLOCK_P (d2))
1587 d2 = EDGE_SUCC (d2, 0)->dest;
1588 if (d == d2)
1589 break;
1591 END_FOR_EACH_EDGE;
1593 s->count += s2->count;
1595 /* Take care to update possible forwarder blocks. We verified
1596 that there is no more than one in the chain, so we can't run
1597 into infinite loop. */
1598 if (FORWARDER_BLOCK_P (s->dest))
1600 EDGE_SUCC (s->dest, 0)->count += s2->count;
1601 s->dest->count += s2->count;
1602 s->dest->frequency += EDGE_FREQUENCY (s);
1605 if (FORWARDER_BLOCK_P (s2->dest))
1607 EDGE_SUCC (s2->dest, 0)->count -= s2->count;
1608 if (EDGE_SUCC (s2->dest, 0)->count < 0)
1609 EDGE_SUCC (s2->dest, 0)->count = 0;
1610 s2->dest->count -= s2->count;
1611 s2->dest->frequency -= EDGE_FREQUENCY (s);
1612 if (s2->dest->frequency < 0)
1613 s2->dest->frequency = 0;
1614 if (s2->dest->count < 0)
1615 s2->dest->count = 0;
1618 if (!redirect_to->frequency && !src1->frequency)
1619 s->probability = (s->probability + s2->probability) / 2;
1620 else
1621 s->probability
1622 = ((s->probability * redirect_to->frequency +
1623 s2->probability * src1->frequency)
1624 / (redirect_to->frequency + src1->frequency));
1626 END_FOR_EACH_EDGE;
1628 update_br_prob_note (redirect_to);
1630 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1632 /* Skip possible basic block header. */
1633 if (LABEL_P (newpos1))
1634 newpos1 = NEXT_INSN (newpos1);
1636 if (NOTE_P (newpos1))
1637 newpos1 = NEXT_INSN (newpos1);
1639 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1640 to_remove = EDGE_SUCC (redirect_from, 0)->dest;
1642 redirect_edge_and_branch_force (EDGE_SUCC (redirect_from, 0), redirect_to);
1643 delete_basic_block (to_remove);
1645 update_forwarder_flag (redirect_from);
1647 return true;
1650 /* Search the predecessors of BB for common insn sequences. When found,
1651 share code between them by redirecting control flow. Return true if
1652 any changes made. */
1654 static bool
1655 try_crossjump_bb (int mode, basic_block bb)
1657 edge e, e2, fallthru;
1658 bool changed;
1659 unsigned max, ix, ix2;
1660 basic_block ev, ev2;
1662 /* Nothing to do if there is not at least two incoming edges. */
1663 if (EDGE_COUNT (bb->preds) < 2)
1664 return false;
1666 /* If we are partitioning hot/cold basic blocks, we don't want to
1667 mess up unconditional or indirect jumps that cross between hot
1668 and cold sections. */
1670 if (flag_reorder_blocks_and_partition
1671 && (EDGE_PRED (bb, 0)->src->partition != EDGE_PRED (bb, 1)->src->partition
1672 || EDGE_PRED (bb, 0)->crossing_edge))
1673 return false;
1675 /* It is always cheapest to redirect a block that ends in a branch to
1676 a block that falls through into BB, as that adds no branches to the
1677 program. We'll try that combination first. */
1678 fallthru = NULL;
1679 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1681 if (EDGE_COUNT (bb->preds) > max)
1682 return false;
1684 FOR_EACH_EDGE (e, bb->preds);
1686 if (e->flags & EDGE_FALLTHRU)
1687 fallthru = e;
1689 END_FOR_EACH_EDGE;
1691 changed = false;
1692 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1694 e = EDGE_PRED (ev, ix);
1695 ix++;
1697 /* As noted above, first try with the fallthru predecessor. */
1698 if (fallthru)
1700 /* Don't combine the fallthru edge into anything else.
1701 If there is a match, we'll do it the other way around. */
1702 if (e == fallthru)
1703 continue;
1704 /* If nothing changed since the last attempt, there is nothing
1705 we can do. */
1706 if (!first_pass
1707 && (!(e->src->flags & BB_DIRTY)
1708 && !(fallthru->src->flags & BB_DIRTY)))
1709 continue;
1711 if (try_crossjump_to_edge (mode, e, fallthru))
1713 changed = true;
1714 ix = 0;
1715 ev = bb;
1716 continue;
1720 /* Non-obvious work limiting check: Recognize that we're going
1721 to call try_crossjump_bb on every basic block. So if we have
1722 two blocks with lots of outgoing edges (a switch) and they
1723 share lots of common destinations, then we would do the
1724 cross-jump check once for each common destination.
1726 Now, if the blocks actually are cross-jump candidates, then
1727 all of their destinations will be shared. Which means that
1728 we only need check them for cross-jump candidacy once. We
1729 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1730 choosing to do the check from the block for which the edge
1731 in question is the first successor of A. */
1732 if (EDGE_SUCC (e->src, 0) != e)
1733 continue;
1735 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1737 e2 = EDGE_PRED (ev2, ix2);
1738 ix2++;
1740 if (e2 == e)
1741 continue;
1743 /* We've already checked the fallthru edge above. */
1744 if (e2 == fallthru)
1745 continue;
1747 /* The "first successor" check above only prevents multiple
1748 checks of crossjump(A,B). In order to prevent redundant
1749 checks of crossjump(B,A), require that A be the block
1750 with the lowest index. */
1751 if (e->src->index > e2->src->index)
1752 continue;
1754 /* If nothing changed since the last attempt, there is nothing
1755 we can do. */
1756 if (!first_pass
1757 && (!(e->src->flags & BB_DIRTY)
1758 && !(e2->src->flags & BB_DIRTY)))
1759 continue;
1761 if (try_crossjump_to_edge (mode, e, e2))
1763 changed = true;
1764 ev2 = bb;
1765 ix = 0;
1766 break;
1771 return changed;
1774 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1775 instructions etc. Return nonzero if changes were made. */
1777 static bool
1778 try_optimize_cfg (int mode)
1780 bool changed_overall = false;
1781 bool changed;
1782 int iterations = 0;
1783 basic_block bb, b, next;
1785 if (mode & CLEANUP_CROSSJUMP)
1786 add_noreturn_fake_exit_edges ();
1788 FOR_EACH_BB (bb)
1789 update_forwarder_flag (bb);
1791 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1792 clear_bb_flags ();
1794 if (! targetm.cannot_modify_jumps_p ())
1796 first_pass = true;
1797 /* Attempt to merge blocks as made possible by edge removal. If
1798 a block has only one successor, and the successor has only
1799 one predecessor, they may be combined. */
1802 changed = false;
1803 iterations++;
1805 if (dump_file)
1806 fprintf (dump_file,
1807 "\n\ntry_optimize_cfg iteration %i\n\n",
1808 iterations);
1810 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1812 basic_block c;
1813 edge s;
1814 bool changed_here = false;
1816 /* Delete trivially dead basic blocks. */
1817 while (EDGE_COUNT (b->preds) == 0)
1819 c = b->prev_bb;
1820 if (dump_file)
1821 fprintf (dump_file, "Deleting block %i.\n",
1822 b->index);
1824 delete_basic_block (b);
1825 if (!(mode & CLEANUP_CFGLAYOUT))
1826 changed = true;
1827 b = c;
1830 /* Remove code labels no longer used. */
1831 if (EDGE_COUNT (b->preds) == 1
1832 && (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU)
1833 && !(EDGE_PRED (b, 0)->flags & EDGE_COMPLEX)
1834 && LABEL_P (BB_HEAD (b))
1835 /* If the previous block ends with a branch to this
1836 block, we can't delete the label. Normally this
1837 is a condjump that is yet to be simplified, but
1838 if CASE_DROPS_THRU, this can be a tablejump with
1839 some element going to the same place as the
1840 default (fallthru). */
1841 && (EDGE_PRED (b, 0)->src == ENTRY_BLOCK_PTR
1842 || !JUMP_P (BB_END (EDGE_PRED (b, 0)->src))
1843 || ! label_is_jump_target_p (BB_HEAD (b),
1844 BB_END (EDGE_PRED (b, 0)->src))))
1846 rtx label = BB_HEAD (b);
1848 delete_insn_chain (label, label);
1849 /* In the case label is undeletable, move it after the
1850 BASIC_BLOCK note. */
1851 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1853 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1855 reorder_insns_nobb (label, label, bb_note);
1856 BB_HEAD (b) = bb_note;
1858 if (dump_file)
1859 fprintf (dump_file, "Deleted label in block %i.\n",
1860 b->index);
1863 /* If we fall through an empty block, we can remove it. */
1864 if (!(mode & CLEANUP_CFGLAYOUT)
1865 && EDGE_COUNT (b->preds) == 1
1866 && (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU)
1867 && !LABEL_P (BB_HEAD (b))
1868 && FORWARDER_BLOCK_P (b)
1869 /* Note that forwarder_block_p true ensures that
1870 there is a successor for this block. */
1871 && (EDGE_SUCC (b, 0)->flags & EDGE_FALLTHRU)
1872 && n_basic_blocks > 1)
1874 if (dump_file)
1875 fprintf (dump_file,
1876 "Deleting fallthru block %i.\n",
1877 b->index);
1879 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1880 redirect_edge_succ_nodup (EDGE_PRED (b, 0), EDGE_SUCC (b, 0)->dest);
1881 delete_basic_block (b);
1882 changed = true;
1883 b = c;
1886 if (EDGE_COUNT (b->succs) == 1
1887 && (s = EDGE_SUCC (b, 0))
1888 && !(s->flags & EDGE_COMPLEX)
1889 && (c = s->dest) != EXIT_BLOCK_PTR
1890 && EDGE_COUNT (c->preds) == 1
1891 && b != c)
1893 /* When not in cfg_layout mode use code aware of reordering
1894 INSN. This code possibly creates new basic blocks so it
1895 does not fit merge_blocks interface and is kept here in
1896 hope that it will become useless once more of compiler
1897 is transformed to use cfg_layout mode. */
1899 if ((mode & CLEANUP_CFGLAYOUT)
1900 && can_merge_blocks_p (b, c))
1902 merge_blocks (b, c);
1903 update_forwarder_flag (b);
1904 changed_here = true;
1906 else if (!(mode & CLEANUP_CFGLAYOUT)
1907 /* If the jump insn has side effects,
1908 we can't kill the edge. */
1909 && (!JUMP_P (BB_END (b))
1910 || (reload_completed
1911 ? simplejump_p (BB_END (b))
1912 : (onlyjump_p (BB_END (b))
1913 && !tablejump_p (BB_END (b),
1914 NULL, NULL))))
1915 && (next = merge_blocks_move (s, b, c, mode)))
1917 b = next;
1918 changed_here = true;
1922 /* Simplify branch over branch. */
1923 if ((mode & CLEANUP_EXPENSIVE)
1924 && !(mode & CLEANUP_CFGLAYOUT)
1925 && try_simplify_condjump (b))
1926 changed_here = true;
1928 /* If B has a single outgoing edge, but uses a
1929 non-trivial jump instruction without side-effects, we
1930 can either delete the jump entirely, or replace it
1931 with a simple unconditional jump. */
1932 if (EDGE_COUNT (b->succs) == 1
1933 && EDGE_SUCC (b, 0)->dest != EXIT_BLOCK_PTR
1934 && onlyjump_p (BB_END (b))
1935 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
1936 && try_redirect_by_replacing_jump (EDGE_SUCC (b, 0), EDGE_SUCC (b, 0)->dest,
1937 (mode & CLEANUP_CFGLAYOUT) != 0))
1939 update_forwarder_flag (b);
1940 changed_here = true;
1943 /* Simplify branch to branch. */
1944 if (try_forward_edges (mode, b))
1945 changed_here = true;
1947 /* Look for shared code between blocks. */
1948 if ((mode & CLEANUP_CROSSJUMP)
1949 && try_crossjump_bb (mode, b))
1950 changed_here = true;
1952 /* Don't get confused by the index shift caused by
1953 deleting blocks. */
1954 if (!changed_here)
1955 b = b->next_bb;
1956 else
1957 changed = true;
1960 if ((mode & CLEANUP_CROSSJUMP)
1961 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1962 changed = true;
1964 #ifdef ENABLE_CHECKING
1965 if (changed)
1966 verify_flow_info ();
1967 #endif
1969 changed_overall |= changed;
1970 first_pass = false;
1972 while (changed);
1975 if (mode & CLEANUP_CROSSJUMP)
1976 remove_fake_exit_edges ();
1978 clear_aux_for_blocks ();
1980 return changed_overall;
1983 /* Delete all unreachable basic blocks. */
1985 bool
1986 delete_unreachable_blocks (void)
1988 bool changed = false;
1989 basic_block b, next_bb;
1991 find_unreachable_blocks ();
1993 /* Delete all unreachable basic blocks. */
1995 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
1997 next_bb = b->next_bb;
1999 if (!(b->flags & BB_REACHABLE))
2001 delete_basic_block (b);
2002 changed = true;
2006 if (changed)
2007 tidy_fallthru_edges ();
2008 return changed;
2011 /* Merges sequential blocks if possible. */
2013 bool
2014 merge_seq_blocks (void)
2016 basic_block bb;
2017 bool changed = false;
2019 for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2021 if (EDGE_COUNT (bb->succs) == 1
2022 && can_merge_blocks_p (bb, EDGE_SUCC (bb, 0)->dest))
2024 /* Merge the blocks and retry. */
2025 merge_blocks (bb, EDGE_SUCC (bb, 0)->dest);
2026 changed = true;
2027 continue;
2030 bb = bb->next_bb;
2033 return changed;
2036 /* Tidy the CFG by deleting unreachable code and whatnot. */
2038 bool
2039 cleanup_cfg (int mode)
2041 bool changed = false;
2043 timevar_push (TV_CLEANUP_CFG);
2044 if (delete_unreachable_blocks ())
2046 changed = true;
2047 /* We've possibly created trivially dead code. Cleanup it right
2048 now to introduce more opportunities for try_optimize_cfg. */
2049 if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
2050 && !reload_completed)
2051 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2054 compact_blocks ();
2056 while (try_optimize_cfg (mode))
2058 delete_unreachable_blocks (), changed = true;
2059 if (mode & CLEANUP_UPDATE_LIFE)
2061 /* Cleaning up CFG introduces more opportunities for dead code
2062 removal that in turn may introduce more opportunities for
2063 cleaning up the CFG. */
2064 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2065 PROP_DEATH_NOTES
2066 | PROP_SCAN_DEAD_CODE
2067 | PROP_KILL_DEAD_CODE
2068 | ((mode & CLEANUP_LOG_LINKS)
2069 ? PROP_LOG_LINKS : 0)))
2070 break;
2072 else if (!(mode & CLEANUP_NO_INSN_DEL)
2073 && (mode & CLEANUP_EXPENSIVE)
2074 && !reload_completed)
2076 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2077 break;
2079 else
2080 break;
2081 delete_dead_jumptables ();
2084 /* Kill the data we won't maintain. */
2085 free_EXPR_LIST_list (&label_value_list);
2086 timevar_pop (TV_CLEANUP_CFG);
2088 return changed;