Use 'a' operand code for prefetch instruction.
[official-gcc.git] / gcc / cfgcleanup.c
blobabb0217711c624de913782e9d2887051291cf21b
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to it's
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
34 #include "config.h"
35 #include "system.h"
36 #include "rtl.h"
37 #include "hard-reg-set.h"
38 #include "basic-block.h"
39 #include "timevar.h"
40 #include "output.h"
41 #include "insn-config.h"
42 #include "flags.h"
43 #include "recog.h"
44 #include "toplev.h"
45 #include "cselib.h"
47 #include "obstack.h"
49 /* cleanup_cfg maintains following flags for each basic block. */
50 enum bb_flags {
51 /* Set if life info needs to be recomputed for given BB. */
52 BB_UPDATE_LIFE = 1,
53 /* Set if BB is the forwarder block to avoid too many
54 forwarder_block_p calls. */
55 BB_FORWARDER_BLOCK = 2
58 #define BB_FLAGS(bb) (enum bb_flags)(bb)->aux
59 #define BB_SET_FLAG(bb,flag) \
60 (bb)->aux = (void *) (long) ((enum bb_flags)(bb)->aux | (flag))
61 #define BB_CLEAR_FLAG(bb,flag) \
62 (bb)->aux = (void *) (long) ((enum bb_flags)(bb)->aux & ~(flag))
64 #define FORWARDER_BLOCK_P(bb) (BB_FLAGS(bb) & BB_FORWARDER_BLOCK)
66 static bool try_crossjump_to_edge PARAMS ((int, edge, edge));
67 static bool try_crossjump_bb PARAMS ((int, basic_block));
68 static bool outgoing_edges_match PARAMS ((int,
69 basic_block, basic_block));
70 static int flow_find_cross_jump PARAMS ((int, basic_block, basic_block,
71 rtx *, rtx *));
72 static bool insns_match_p PARAMS ((int, rtx, rtx));
74 static bool delete_unreachable_blocks PARAMS ((void));
75 static bool label_is_jump_target_p PARAMS ((rtx, rtx));
76 static bool tail_recursion_label_p PARAMS ((rtx));
77 static void merge_blocks_move_predecessor_nojumps PARAMS ((basic_block,
78 basic_block));
79 static void merge_blocks_move_successor_nojumps PARAMS ((basic_block,
80 basic_block));
81 static bool merge_blocks PARAMS ((edge,basic_block,basic_block,
82 int));
83 static bool try_optimize_cfg PARAMS ((int));
84 static bool try_simplify_condjump PARAMS ((basic_block));
85 static bool try_forward_edges PARAMS ((int, basic_block));
86 static edge thread_jump PARAMS ((int, edge, basic_block));
87 static bool mark_effect PARAMS ((rtx, bitmap));
88 static void notice_new_block PARAMS ((basic_block));
89 static void update_forwarder_flag PARAMS ((basic_block));
91 /* Set flags for newly created block. */
93 static void
94 notice_new_block (bb)
95 basic_block bb;
97 if (!bb)
98 return;
99 BB_SET_FLAG (bb, BB_UPDATE_LIFE);
100 if (forwarder_block_p (bb))
101 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
104 /* Recompute forwarder flag after block has been modified. */
106 static void
107 update_forwarder_flag (bb)
108 basic_block bb;
110 if (forwarder_block_p (bb))
111 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
112 else
113 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
116 /* Simplify a conditional jump around an unconditional jump.
117 Return true if something changed. */
119 static bool
120 try_simplify_condjump (cbranch_block)
121 basic_block cbranch_block;
123 basic_block jump_block, jump_dest_block, cbranch_dest_block;
124 edge cbranch_jump_edge, cbranch_fallthru_edge;
125 rtx cbranch_insn;
127 /* Verify that there are exactly two successors. */
128 if (!cbranch_block->succ
129 || !cbranch_block->succ->succ_next
130 || cbranch_block->succ->succ_next->succ_next)
131 return false;
133 /* Verify that we've got a normal conditional branch at the end
134 of the block. */
135 cbranch_insn = cbranch_block->end;
136 if (!any_condjump_p (cbranch_insn))
137 return false;
139 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
140 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
142 /* The next block must not have multiple predecessors, must not
143 be the last block in the function, and must contain just the
144 unconditional jump. */
145 jump_block = cbranch_fallthru_edge->dest;
146 if (jump_block->pred->pred_next
147 || jump_block->index == n_basic_blocks - 1
148 || !FORWARDER_BLOCK_P (jump_block))
149 return false;
150 jump_dest_block = jump_block->succ->dest;
152 /* The conditional branch must target the block after the
153 unconditional branch. */
154 cbranch_dest_block = cbranch_jump_edge->dest;
156 if (!can_fallthru (jump_block, cbranch_dest_block))
157 return false;
159 /* Invert the conditional branch. */
160 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
161 return false;
163 if (rtl_dump_file)
164 fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n",
165 INSN_UID (cbranch_insn), INSN_UID (jump_block->end));
167 /* Success. Update the CFG to match. Note that after this point
168 the edge variable names appear backwards; the redirection is done
169 this way to preserve edge profile data. */
170 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
171 cbranch_dest_block);
172 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
173 jump_dest_block);
174 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
175 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
177 /* Delete the block with the unconditional jump, and clean up the mess. */
178 flow_delete_block (jump_block);
179 tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block);
181 return true;
184 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
185 on register. Used by jump threading. */
186 static bool
187 mark_effect (exp, nonequal)
188 rtx exp;
189 regset nonequal;
191 switch (GET_CODE (exp))
193 /* In case we do clobber the register, mark it as equal, as we know the
194 value is dead so it don't have to match. */
195 case CLOBBER:
196 if (REG_P (XEXP (exp, 0)))
197 CLEAR_REGNO_REG_SET (nonequal, REGNO (XEXP (exp, 0)));
198 return false;
199 case SET:
200 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
201 return false;
202 if (GET_CODE (SET_SRC (exp)) != REG)
203 return true;
204 SET_REGNO_REG_SET (nonequal, REGNO (SET_SRC (exp)));
205 return false;
206 default:
207 return false;
210 /* Attempt to prove that the basic block B will have no side effects and
211 allways continues in the same edge if reached via E. Return the edge
212 if exist, NULL otherwise. */
214 static edge
215 thread_jump (mode, e, b)
216 int mode;
217 edge e;
218 basic_block b;
220 rtx set1, set2, cond1, cond2, insn;
221 enum rtx_code code1, code2, reversed_code2;
222 bool reverse1 = false;
223 int i;
224 regset nonequal;
225 bool failed = false;
227 /* At the moment, we do handle only conditional jumps, but later we may
228 want to extend this code to tablejumps and others. */
229 if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
230 return NULL;
231 if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
232 return NULL;
234 /* Second branch must end with onlyjump, as we will eliminate the jump. */
235 if (!any_condjump_p (e->src->end) || !any_condjump_p (b->end)
236 || !onlyjump_p (b->end))
237 return NULL;
239 set1 = pc_set (e->src->end);
240 set2 = pc_set (b->end);
241 if (((e->flags & EDGE_FALLTHRU) != 0)
242 != (XEXP (SET_SRC (set1), 0) == pc_rtx))
243 reverse1 = true;
245 cond1 = XEXP (SET_SRC (set1), 0);
246 cond2 = XEXP (SET_SRC (set2), 0);
247 if (reverse1)
248 code1 = reversed_comparison_code (cond1, b->end);
249 else
250 code1 = GET_CODE (cond1);
252 code2 = GET_CODE (cond2);
253 reversed_code2 = reversed_comparison_code (cond2, b->end);
255 if (!comparison_dominates_p (code1, code2)
256 && !comparison_dominates_p (code1, reversed_code2))
257 return NULL;
259 /* Ensure that the comparison operators are equivalent.
260 ??? This is far too pesimistic. We should allow swapped operands,
261 different CCmodes, or for example comparisons for interval, that
262 dominate even when operands are not equivalent. */
263 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
264 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
265 return NULL;
267 /* Short circuit cases where block B contains some side effects, as we can't
268 safely bypass it. */
269 for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end);
270 insn = NEXT_INSN (insn))
271 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
272 return NULL;
274 cselib_init ();
276 /* First process all values computed in the source basic block. */
277 for (insn = NEXT_INSN (e->src->head); insn != NEXT_INSN (e->src->end);
278 insn = NEXT_INSN (insn))
279 if (INSN_P (insn))
280 cselib_process_insn (insn);
282 nonequal = BITMAP_XMALLOC();
283 CLEAR_REG_SET (nonequal);
284 /* Now assume that we've continued by the edge E to B and continue
285 processing as if it were same basic block.
287 Our goal is to prove that whole block is an NOOP. */
288 for (insn = NEXT_INSN (b->head); insn != b->end && !failed;
289 insn = NEXT_INSN (insn))
291 if (INSN_P (insn))
293 rtx pat = PATTERN (insn);
295 if (GET_CODE (pat) == PARALLEL)
297 for (i = 0; i < XVECLEN (pat, 0); i++)
298 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
300 else
301 failed |= mark_effect (pat, nonequal);
303 cselib_process_insn (insn);
306 /* Later we should clear nonequal of dead registers. So far we don't
307 have life information in cfg_cleanup. */
308 if (failed)
309 goto failed_exit;
311 /* In case liveness information is available, we need to prove equivalence
312 only of the live values. */
313 if (mode & CLEANUP_UPDATE_LIFE)
314 AND_REG_SET (nonequal, b->global_live_at_end);
316 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
318 BITMAP_XFREE (nonequal);
319 cselib_finish ();
320 if ((comparison_dominates_p (code1, code2) != 0)
321 != (XEXP (SET_SRC (set2), 0) == pc_rtx))
322 return BRANCH_EDGE (b);
323 else
324 return FALLTHRU_EDGE (b);
326 failed_exit:
327 BITMAP_XFREE (nonequal);
328 cselib_finish ();
329 return NULL;
332 /* Attempt to forward edges leaving basic block B.
333 Return true if successful. */
335 static bool
336 try_forward_edges (mode, b)
337 basic_block b;
338 int mode;
340 bool changed = false;
341 edge e, next, threaded_edge;
343 for (e = b->succ; e ; e = next)
345 basic_block target, first;
346 int counter;
347 bool threaded = false;
349 next = e->succ_next;
351 /* Skip complex edges because we don't know how to update them.
353 Still handle fallthru edges, as we can succeed to forward fallthru
354 edge to the same place as the branch edge of conditional branch
355 and turn conditional branch to an unconditional branch. */
356 if (e->flags & EDGE_COMPLEX)
357 continue;
359 target = first = e->dest;
360 counter = 0;
362 while (counter < n_basic_blocks)
364 basic_block new_target = NULL;
365 bool new_target_threaded = false;
367 if (FORWARDER_BLOCK_P (target)
368 && target->succ->dest != EXIT_BLOCK_PTR)
370 /* Bypass trivial infinite loops. */
371 if (target == target->succ->dest)
372 counter = n_basic_blocks;
373 new_target = target->succ->dest;
375 /* Allow to thread only over one edge at time to simplify updating
376 of probabilities. */
377 else if ((mode & CLEANUP_THREADING) && !threaded)
379 threaded_edge = thread_jump (mode, e, target);
380 if (threaded_edge)
382 new_target = threaded_edge->dest;
383 new_target_threaded = true;
386 if (!new_target)
387 break;
389 /* Avoid killing of loop pre-headers, as it is the place loop
390 optimizer wants to hoist code to.
392 For fallthru forwarders, the LOOP_BEG note must appear between
393 the header of block and CODE_LABEL of the loop, for non forwarders
394 it must appear before the JUMP_INSN. */
395 if (mode & CLEANUP_PRE_LOOP)
397 rtx insn = (target->succ->flags & EDGE_FALLTHRU
398 ? target->head : prev_nonnote_insn (target->end));
400 if (GET_CODE (insn) != NOTE)
401 insn = NEXT_INSN (insn);
403 for (;insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
404 insn = NEXT_INSN (insn))
405 if (GET_CODE (insn) == NOTE
406 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
407 break;
409 if (GET_CODE (insn) == NOTE)
410 break;
412 counter++;
413 target = new_target;
414 threaded |= new_target_threaded;
417 if (counter >= n_basic_blocks)
419 if (rtl_dump_file)
420 fprintf (rtl_dump_file, "Infinite loop in BB %i.\n",
421 target->index);
423 else if (target == first)
424 ; /* We didn't do anything. */
425 else
427 /* Save the values now, as the edge may get removed. */
428 gcov_type edge_count = e->count;
429 int edge_probability = e->probability;
430 int edge_frequency;
432 if (threaded)
434 notice_new_block (redirect_edge_and_branch_force (e, target));
435 if (rtl_dump_file)
436 fprintf (rtl_dump_file, "Conditionals threaded.\n");
438 else if (!redirect_edge_and_branch (e, target))
440 if (rtl_dump_file)
441 fprintf (rtl_dump_file, "Forwarding edge %i->%i to %i failed.\n",
442 b->index, e->dest->index, target->index);
443 continue;
445 /* We successfully forwarded the edge. Now update profile
446 data: for each edge we traversed in the chain, remove
447 the original edge's execution count. */
448 edge_frequency = ((edge_probability * b->frequency
449 + REG_BR_PROB_BASE / 2)
450 / REG_BR_PROB_BASE);
452 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
453 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
454 BB_SET_FLAG (b, BB_UPDATE_LIFE);
458 edge t;
459 first->count -= edge_count;
460 first->succ->count -= edge_count;
461 first->frequency -= edge_frequency;
462 if (first->succ->succ_next)
463 t = threaded_edge;
464 else
465 t = first->succ;
466 first = t->dest;
468 while (first != target);
470 changed = true;
474 return changed;
477 /* Return true if LABEL is a target of JUMP_INSN. This applies only
478 to non-complex jumps. That is, direct unconditional, conditional,
479 and tablejumps, but not computed jumps or returns. It also does
480 not apply to the fallthru case of a conditional jump. */
482 static bool
483 label_is_jump_target_p (label, jump_insn)
484 rtx label, jump_insn;
486 rtx tmp = JUMP_LABEL (jump_insn);
488 if (label == tmp)
489 return true;
491 if (tmp != NULL_RTX
492 && (tmp = NEXT_INSN (tmp)) != NULL_RTX
493 && GET_CODE (tmp) == JUMP_INSN
494 && (tmp = PATTERN (tmp),
495 GET_CODE (tmp) == ADDR_VEC
496 || GET_CODE (tmp) == ADDR_DIFF_VEC))
498 rtvec vec = XVEC (tmp, GET_CODE (tmp) == ADDR_DIFF_VEC);
499 int i, veclen = GET_NUM_ELEM (vec);
501 for (i = 0; i < veclen; ++i)
502 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
503 return true;
506 return false;
509 /* Return true if LABEL is used for tail recursion. */
511 static bool
512 tail_recursion_label_p (label)
513 rtx label;
515 rtx x;
517 for (x = tail_recursion_label_list; x; x = XEXP (x, 1))
518 if (label == XEXP (x, 0))
519 return true;
521 return false;
524 /* Blocks A and B are to be merged into a single block. A has no incoming
525 fallthru edge, so it can be moved before B without adding or modifying
526 any jumps (aside from the jump from A to B). */
528 static void
529 merge_blocks_move_predecessor_nojumps (a, b)
530 basic_block a, b;
532 rtx barrier;
533 int index;
535 barrier = next_nonnote_insn (a->end);
536 if (GET_CODE (barrier) != BARRIER)
537 abort ();
538 delete_insn (barrier);
540 /* Move block and loop notes out of the chain so that we do not
541 disturb their order.
543 ??? A better solution would be to squeeze out all the non-nested notes
544 and adjust the block trees appropriately. Even better would be to have
545 a tighter connection between block trees and rtl so that this is not
546 necessary. */
547 if (squeeze_notes (&a->head, &a->end))
548 abort ();
550 /* Scramble the insn chain. */
551 if (a->end != PREV_INSN (b->head))
552 reorder_insns_nobb (a->head, a->end, PREV_INSN (b->head));
553 BB_SET_FLAG (a, BB_UPDATE_LIFE);
555 if (rtl_dump_file)
557 fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
558 a->index, b->index);
561 /* Swap the records for the two blocks around. Although we are deleting B,
562 A is now where B was and we want to compact the BB array from where
563 A used to be. */
564 BASIC_BLOCK (a->index) = b;
565 BASIC_BLOCK (b->index) = a;
566 index = a->index;
567 a->index = b->index;
568 b->index = index;
570 /* Now blocks A and B are contiguous. Merge them. */
571 merge_blocks_nomove (a, b);
574 /* Blocks A and B are to be merged into a single block. B has no outgoing
575 fallthru edge, so it can be moved after A without adding or modifying
576 any jumps (aside from the jump from A to B). */
578 static void
579 merge_blocks_move_successor_nojumps (a, b)
580 basic_block a, b;
582 rtx barrier, real_b_end;
584 real_b_end = b->end;
585 barrier = NEXT_INSN (b->end);
587 /* Recognize a jump table following block B. */
588 if (barrier
589 && GET_CODE (barrier) == CODE_LABEL
590 && NEXT_INSN (barrier)
591 && GET_CODE (NEXT_INSN (barrier)) == JUMP_INSN
592 && (GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_VEC
593 || GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_DIFF_VEC))
595 /* Temporarily add the table jump insn to b, so that it will also
596 be moved to the correct location. */
597 b->end = NEXT_INSN (barrier);
598 barrier = NEXT_INSN (b->end);
601 /* There had better have been a barrier there. Delete it. */
602 if (barrier && GET_CODE (barrier) == BARRIER)
603 delete_insn (barrier);
605 /* Move block and loop notes out of the chain so that we do not
606 disturb their order.
608 ??? A better solution would be to squeeze out all the non-nested notes
609 and adjust the block trees appropriately. Even better would be to have
610 a tighter connection between block trees and rtl so that this is not
611 necessary. */
612 if (squeeze_notes (&b->head, &b->end))
613 abort ();
615 /* Scramble the insn chain. */
616 reorder_insns_nobb (b->head, b->end, a->end);
618 /* Restore the real end of b. */
619 b->end = real_b_end;
621 /* Now blocks A and B are contiguous. Merge them. */
622 merge_blocks_nomove (a, b);
623 BB_SET_FLAG (a, BB_UPDATE_LIFE);
625 if (rtl_dump_file)
627 fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
628 b->index, a->index);
632 /* Attempt to merge basic blocks that are potentially non-adjacent.
633 Return true iff the attempt succeeded. */
635 static bool
636 merge_blocks (e, b, c, mode)
637 edge e;
638 basic_block b, c;
639 int mode;
641 /* If C has a tail recursion label, do not merge. There is no
642 edge recorded from the call_placeholder back to this label, as
643 that would make optimize_sibling_and_tail_recursive_calls more
644 complex for no gain. */
645 if ((mode & CLEANUP_PRE_SIBCALL)
646 && GET_CODE (c->head) == CODE_LABEL
647 && tail_recursion_label_p (c->head))
648 return false;
650 /* If B has a fallthru edge to C, no need to move anything. */
651 if (e->flags & EDGE_FALLTHRU)
653 /* We need to update liveness in case C already has broken liveness
654 or B ends by conditional jump to next instructions that will be
655 removed. */
656 if ((BB_FLAGS (c) & BB_UPDATE_LIFE)
657 || GET_CODE (b->end) == JUMP_INSN)
658 BB_SET_FLAG (b, BB_UPDATE_LIFE);
659 merge_blocks_nomove (b, c);
660 update_forwarder_flag (b);
662 if (rtl_dump_file)
664 fprintf (rtl_dump_file, "Merged %d and %d without moving.\n",
665 b->index, c->index);
668 return true;
670 /* Otherwise we will need to move code around. Do that only if expensive
671 transformations are allowed. */
672 else if (mode & CLEANUP_EXPENSIVE)
674 edge tmp_edge, b_fallthru_edge;
675 bool c_has_outgoing_fallthru;
676 bool b_has_incoming_fallthru;
678 /* Avoid overactive code motion, as the forwarder blocks should be
679 eliminated by edge redirection instead. One exception might have
680 been if B is a forwarder block and C has no fallthru edge, but
681 that should be cleaned up by bb-reorder instead. */
682 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
683 return false;
685 /* We must make sure to not munge nesting of lexical blocks,
686 and loop notes. This is done by squeezing out all the notes
687 and leaving them there to lie. Not ideal, but functional. */
689 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
690 if (tmp_edge->flags & EDGE_FALLTHRU)
691 break;
692 c_has_outgoing_fallthru = (tmp_edge != NULL);
694 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
695 if (tmp_edge->flags & EDGE_FALLTHRU)
696 break;
697 b_has_incoming_fallthru = (tmp_edge != NULL);
698 b_fallthru_edge = tmp_edge;
700 /* Otherwise, we're going to try to move C after B. If C does
701 not have an outgoing fallthru, then it can be moved
702 immediately after B without introducing or modifying jumps. */
703 if (! c_has_outgoing_fallthru)
705 merge_blocks_move_successor_nojumps (b, c);
706 return true;
709 /* If B does not have an incoming fallthru, then it can be moved
710 immediately before C without introducing or modifying jumps.
711 C cannot be the first block, so we do not have to worry about
712 accessing a non-existent block. */
714 if (b_has_incoming_fallthru)
716 basic_block bb;
717 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
718 return false;
719 bb = force_nonfallthru (b_fallthru_edge);
720 if (bb)
721 notice_new_block (bb);
722 else
723 BB_SET_FLAG (b_fallthru_edge->src, BB_UPDATE_LIFE);
725 merge_blocks_move_predecessor_nojumps (b, c);
726 return true;
728 return false;
732 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
734 static bool
735 insns_match_p (mode, i1, i2)
736 int mode ATTRIBUTE_UNUSED;
737 rtx i1, i2;
739 rtx p1, p2;
741 /* Verify that I1 and I2 are equivalent. */
742 if (GET_CODE (i1) != GET_CODE (i2))
743 return false;
745 p1 = PATTERN (i1);
746 p2 = PATTERN (i2);
748 if (GET_CODE (p1) != GET_CODE (p2))
749 return false;
751 /* If this is a CALL_INSN, compare register usage information.
752 If we don't check this on stack register machines, the two
753 CALL_INSNs might be merged leaving reg-stack.c with mismatching
754 numbers of stack registers in the same basic block.
755 If we don't check this on machines with delay slots, a delay slot may
756 be filled that clobbers a parameter expected by the subroutine.
758 ??? We take the simple route for now and assume that if they're
759 equal, they were constructed identically. */
761 if (GET_CODE (i1) == CALL_INSN
762 && !rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
763 CALL_INSN_FUNCTION_USAGE (i2)))
764 return false;
766 #ifdef STACK_REGS
767 /* If cross_jump_death_matters is not 0, the insn's mode
768 indicates whether or not the insn contains any stack-like
769 regs. */
771 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
773 /* If register stack conversion has already been done, then
774 death notes must also be compared before it is certain that
775 the two instruction streams match. */
777 rtx note;
778 HARD_REG_SET i1_regset, i2_regset;
780 CLEAR_HARD_REG_SET (i1_regset);
781 CLEAR_HARD_REG_SET (i2_regset);
783 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
784 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
785 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
787 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
788 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
789 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
791 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
793 return false;
795 done:
798 #endif
800 if (reload_completed
801 ? ! rtx_renumbered_equal_p (p1, p2) : ! rtx_equal_p (p1, p2))
803 /* The following code helps take care of G++ cleanups. */
804 rtx equiv1 = find_reg_equal_equiv_note (i1);
805 rtx equiv2 = find_reg_equal_equiv_note (i2);
807 if (equiv1 && equiv2
808 /* If the equivalences are not to a constant, they may
809 reference pseudos that no longer exist, so we can't
810 use them. */
811 && (! reload_completed
812 || (CONSTANT_P (XEXP (equiv1, 0))
813 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
815 rtx s1 = single_set (i1);
816 rtx s2 = single_set (i2);
817 if (s1 != 0 && s2 != 0
818 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
820 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
821 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
822 if (! rtx_renumbered_equal_p (p1, p2))
823 cancel_changes (0);
824 else if (apply_change_group ())
825 return true;
828 return false;
830 return true;
833 /* Look through the insns at the end of BB1 and BB2 and find the longest
834 sequence that are equivalent. Store the first insns for that sequence
835 in *F1 and *F2 and return the sequence length.
837 To simplify callers of this function, if the blocks match exactly,
838 store the head of the blocks in *F1 and *F2. */
840 static int
841 flow_find_cross_jump (mode, bb1, bb2, f1, f2)
842 int mode ATTRIBUTE_UNUSED;
843 basic_block bb1, bb2;
844 rtx *f1, *f2;
846 rtx i1, i2, last1, last2, afterlast1, afterlast2;
847 int ninsns = 0;
849 /* Skip simple jumps at the end of the blocks. Complex jumps still
850 need to be compared for equivalence, which we'll do below. */
852 i1 = bb1->end;
853 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
854 if (onlyjump_p (i1)
855 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
857 last1 = i1;
858 i1 = PREV_INSN (i1);
860 i2 = bb2->end;
861 if (onlyjump_p (i2)
862 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
864 last2 = i2;
865 /* Count everything except for unconditional jump as insn. */
866 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
867 ninsns++;
868 i2 = PREV_INSN (i2);
871 while (true)
873 /* Ignore notes. */
874 while (!active_insn_p (i1) && i1 != bb1->head)
875 i1 = PREV_INSN (i1);
876 while (!active_insn_p (i2) && i2 != bb2->head)
877 i2 = PREV_INSN (i2);
879 if (i1 == bb1->head || i2 == bb2->head)
880 break;
882 if (!insns_match_p (mode, i1, i2))
883 break;
885 /* Don't begin a cross-jump with a USE or CLOBBER insn. */
886 if (active_insn_p (i1))
888 /* If the merged insns have different REG_EQUAL notes, then
889 remove them. */
890 rtx equiv1 = find_reg_equal_equiv_note (i1);
891 rtx equiv2 = find_reg_equal_equiv_note (i2);
893 if (equiv1 && !equiv2)
894 remove_note (i1, equiv1);
895 else if (!equiv1 && equiv2)
896 remove_note (i2, equiv2);
897 else if (equiv1 && equiv2
898 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
900 remove_note (i1, equiv1);
901 remove_note (i2, equiv2);
904 afterlast1 = last1, afterlast2 = last2;
905 last1 = i1, last2 = i2;
906 ninsns++;
908 i1 = PREV_INSN (i1);
909 i2 = PREV_INSN (i2);
912 #ifdef HAVE_cc0
913 if (ninsns)
915 /* Don't allow the insn after a compare to be shared by
916 cross-jumping unless the compare is also shared. */
917 if (reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
918 last1 = afterlast1, last2 = afterlast2, ninsns--;
920 #endif
922 /* Include preceding notes and labels in the cross-jump. One,
923 this may bring us to the head of the blocks as requested above.
924 Two, it keeps line number notes as matched as may be. */
925 if (ninsns)
927 while (last1 != bb1->head && !active_insn_p (PREV_INSN (last1)))
928 last1 = PREV_INSN (last1);
929 if (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
930 last1 = PREV_INSN (last1);
931 while (last2 != bb2->head && !active_insn_p (PREV_INSN (last2)))
932 last2 = PREV_INSN (last2);
933 if (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
934 last2 = PREV_INSN (last2);
936 *f1 = last1;
937 *f2 = last2;
940 return ninsns;
943 /* Return true iff outgoing edges of BB1 and BB2 match, together with
944 the branch instruction. This means that if we commonize the control
945 flow before end of the basic block, the semantic remains unchanged.
947 We may assume that there exists one edge with a common destination. */
949 static bool
950 outgoing_edges_match (mode, bb1, bb2)
951 int mode;
952 basic_block bb1;
953 basic_block bb2;
955 int nehedges1 = 0, nehedges2 = 0;
956 edge fallthru1 = 0, fallthru2 = 0;
957 edge e1, e2;
959 /* If BB1 has only one successor, we may be looking at either an
960 unconditional jump, or a fake edge to exit. */
961 if (bb1->succ && !bb1->succ->succ_next
962 && !(bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)))
964 if (! bb2->succ || bb2->succ->succ_next
965 || (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)))
966 return false;
967 return true;
970 /* Match conditional jumps - this may get tricky when fallthru and branch
971 edges are crossed. */
972 if (bb1->succ
973 && bb1->succ->succ_next
974 && !bb1->succ->succ_next->succ_next
975 && any_condjump_p (bb1->end)
976 && onlyjump_p (bb1->end))
978 edge b1, f1, b2, f2;
979 bool reverse, match;
980 rtx set1, set2, cond1, cond2;
981 enum rtx_code code1, code2;
983 if (!bb2->succ
984 || !bb2->succ->succ_next
985 || bb1->succ->succ_next->succ_next
986 || !any_condjump_p (bb2->end)
987 || !onlyjump_p (bb1->end))
988 return false;
990 b1 = BRANCH_EDGE (bb1);
991 b2 = BRANCH_EDGE (bb2);
992 f1 = FALLTHRU_EDGE (bb1);
993 f2 = FALLTHRU_EDGE (bb2);
995 /* Get around possible forwarders on fallthru edges. Other cases
996 should be optimized out already. */
997 if (FORWARDER_BLOCK_P (f1->dest))
998 f1 = f1->dest->succ;
999 if (FORWARDER_BLOCK_P (f2->dest))
1000 f2 = f2->dest->succ;
1002 /* To simplify use of this function, return false if there are
1003 unneeded forwarder blocks. These will get eliminated later
1004 during cleanup_cfg. */
1005 if (FORWARDER_BLOCK_P (f1->dest)
1006 || FORWARDER_BLOCK_P (f2->dest)
1007 || FORWARDER_BLOCK_P (b1->dest)
1008 || FORWARDER_BLOCK_P (b2->dest))
1009 return false;
1011 if (f1->dest == f2->dest && b1->dest == b2->dest)
1012 reverse = false;
1013 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1014 reverse = true;
1015 else
1016 return false;
1018 set1 = pc_set (bb1->end);
1019 set2 = pc_set (bb2->end);
1020 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1021 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1022 reverse = !reverse;
1024 cond1 = XEXP (SET_SRC (set1), 0);
1025 cond2 = XEXP (SET_SRC (set2), 0);
1026 code1 = GET_CODE (cond1);
1027 if (reverse)
1028 code2 = reversed_comparison_code (cond2, bb2->end);
1029 else
1030 code2 = GET_CODE (cond2);
1031 if (code2 == UNKNOWN)
1032 return false;
1034 /* Verify codes and operands match. */
1035 match = ((code1 == code2
1036 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1037 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1038 || (code1 == swap_condition (code2)
1039 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1040 XEXP (cond2, 0))
1041 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1042 XEXP (cond2, 1))));
1044 /* If we return true, we will join the blocks. Which means that
1045 we will only have one branch prediction bit to work with. Thus
1046 we require the existing branches to have probabilities that are
1047 roughly similar. */
1048 /* ??? We should use bb->frequency to allow merging in infrequently
1049 executed blocks, but at the moment it is not available when
1050 cleanup_cfg is run. */
1051 if (match && !optimize_size)
1053 rtx note1, note2;
1054 int prob1, prob2;
1055 note1 = find_reg_note (bb1->end, REG_BR_PROB, 0);
1056 note2 = find_reg_note (bb2->end, REG_BR_PROB, 0);
1058 if (note1 && note2)
1060 prob1 = INTVAL (XEXP (note1, 0));
1061 prob2 = INTVAL (XEXP (note2, 0));
1062 if (reverse)
1063 prob2 = REG_BR_PROB_BASE - prob2;
1065 /* Fail if the difference in probabilities is
1066 greater than 5%. */
1067 if (abs (prob1 - prob2) > REG_BR_PROB_BASE / 20)
1068 return false;
1070 else if (note1 || note2)
1071 return false;
1074 if (rtl_dump_file && match)
1075 fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n",
1076 bb1->index, bb2->index);
1078 return match;
1081 /* Generic case - we are seeing an computed jump, table jump or trapping
1082 instruction. */
1084 /* First ensure that the instructions match. There may be many outgoing
1085 edges so this test is generally cheaper.
1086 ??? Currently the tablejumps will never match, as they do have
1087 different tables. */
1088 if (!insns_match_p (mode, bb1->end, bb2->end))
1089 return false;
1091 /* Search the outgoing edges, ensure that the counts do match, find possible
1092 fallthru and exception handling edges since these needs more
1093 validation. */
1094 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
1095 e1 = e1->succ_next, e2 = e2->succ_next)
1097 if (e1->flags & EDGE_EH)
1098 nehedges1++;
1099 if (e2->flags & EDGE_EH)
1100 nehedges2++;
1101 if (e1->flags & EDGE_FALLTHRU)
1102 fallthru1 = e1;
1103 if (e2->flags & EDGE_FALLTHRU)
1104 fallthru2 = e2;
1106 /* If number of edges of various types does not match, fail. */
1107 if (e1 || e2)
1108 return false;
1109 if (nehedges1 != nehedges2)
1110 return false;
1111 if ((fallthru1 != 0) != (fallthru2 != 0))
1112 return false;
1114 /* fallthru edges must be forwarded to the same destination. */
1115 if (fallthru1)
1117 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1118 ? fallthru1->dest->succ->dest: fallthru1->dest);
1119 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1120 ? fallthru2->dest->succ->dest: fallthru2->dest);
1121 if (d1 != d2)
1122 return false;
1124 /* In case we do have EH edges, ensure we are in the same region. */
1125 if (nehedges1)
1127 rtx n1 = find_reg_note (bb1->end, REG_EH_REGION, 0);
1128 rtx n2 = find_reg_note (bb2->end, REG_EH_REGION, 0);
1129 if (XEXP (n1, 0) != XEXP (n2, 0))
1130 return false;
1132 /* We don't need to match the rest of edges as above checks should be enought
1133 to ensure that they are equivalent. */
1134 return true;
1137 /* E1 and E2 are edges with the same destination block. Search their
1138 predecessors for common code. If found, redirect control flow from
1139 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1141 static bool
1142 try_crossjump_to_edge (mode, e1, e2)
1143 int mode;
1144 edge e1, e2;
1146 int nmatch;
1147 basic_block src1 = e1->src, src2 = e2->src;
1148 basic_block redirect_to;
1149 rtx newpos1, newpos2;
1150 edge s;
1151 rtx last;
1152 rtx label;
1153 rtx note;
1155 /* Search backward through forwarder blocks. We don't need to worry
1156 about multiple entry or chained forwarders, as they will be optimized
1157 away. We do this to look past the unconditional jump following a
1158 conditional jump that is required due to the current CFG shape. */
1159 if (src1->pred
1160 && !src1->pred->pred_next
1161 && FORWARDER_BLOCK_P (src1))
1163 e1 = src1->pred;
1164 src1 = e1->src;
1166 if (src2->pred
1167 && !src2->pred->pred_next
1168 && FORWARDER_BLOCK_P (src2))
1170 e2 = src2->pred;
1171 src2 = e2->src;
1174 /* Nothing to do if we reach ENTRY, or a common source block. */
1175 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1176 return false;
1177 if (src1 == src2)
1178 return false;
1180 /* Seeing more than 1 forwarder blocks would confuse us later... */
1181 if (FORWARDER_BLOCK_P (e1->dest)
1182 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
1183 return false;
1184 if (FORWARDER_BLOCK_P (e2->dest)
1185 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
1186 return false;
1188 /* Likewise with dead code (possibly newly created by the other optimizations
1189 of cfg_cleanup). */
1190 if (!src1->pred || !src2->pred)
1191 return false;
1193 /* Look for the common insn sequence, part the first ... */
1194 if (!outgoing_edges_match (mode, src1, src2))
1195 return false;
1197 /* ... and part the second. */
1198 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1199 if (!nmatch)
1200 return false;
1202 /* Avoid splitting if possible. */
1203 if (newpos2 == src2->head)
1204 redirect_to = src2;
1205 else
1207 if (rtl_dump_file)
1208 fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n",
1209 src2->index, nmatch);
1210 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1213 if (rtl_dump_file)
1214 fprintf (rtl_dump_file,
1215 "Cross jumping from bb %i to bb %i; %i common insns\n",
1216 src1->index, src2->index, nmatch);
1218 redirect_to->count += src1->count;
1219 redirect_to->frequency += src1->frequency;
1221 /* Recompute the frequencies and counts of outgoing edges. */
1222 for (s = redirect_to->succ; s; s = s->succ_next)
1224 edge s2;
1225 basic_block d = s->dest;
1227 if (FORWARDER_BLOCK_P (d))
1228 d = d->succ->dest;
1229 for (s2 = src1->succ; ; s2 = s2->succ_next)
1231 basic_block d2 = s2->dest;
1232 if (FORWARDER_BLOCK_P (d2))
1233 d2 = d2->succ->dest;
1234 if (d == d2)
1235 break;
1237 s->count += s2->count;
1239 /* Take care to update possible forwarder blocks. We verified
1240 that there is no more than one in the chain, so we can't run
1241 into infinite loop. */
1242 if (FORWARDER_BLOCK_P (s->dest))
1244 s->dest->succ->count += s2->count;
1245 s->dest->count += s2->count;
1246 s->dest->frequency += EDGE_FREQUENCY (s);
1248 if (FORWARDER_BLOCK_P (s2->dest))
1250 s2->dest->succ->count -= s2->count;
1251 s2->dest->count -= s2->count;
1252 s2->dest->frequency -= EDGE_FREQUENCY (s);
1254 if (!redirect_to->frequency && !src1->frequency)
1255 s->probability = (s->probability + s2->probability) / 2;
1256 else
1257 s->probability =
1258 ((s->probability * redirect_to->frequency +
1259 s2->probability * src1->frequency)
1260 / (redirect_to->frequency + src1->frequency));
1263 note = find_reg_note (redirect_to->end, REG_BR_PROB, 0);
1264 if (note)
1265 XEXP (note, 0) = GEN_INT (BRANCH_EDGE (redirect_to)->probability);
1267 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1269 /* Skip possible basic block header. */
1270 if (GET_CODE (newpos1) == CODE_LABEL)
1271 newpos1 = NEXT_INSN (newpos1);
1272 if (GET_CODE (newpos1) == NOTE)
1273 newpos1 = NEXT_INSN (newpos1);
1274 last = src1->end;
1276 /* Emit the jump insn. */
1277 label = block_label (redirect_to);
1278 emit_jump_insn_after (gen_jump (label), src1->end);
1279 JUMP_LABEL (src1->end) = label;
1280 LABEL_NUSES (label)++;
1282 /* Delete the now unreachable instructions. */
1283 delete_insn_chain (newpos1, last);
1285 /* Make sure there is a barrier after the new jump. */
1286 last = next_nonnote_insn (src1->end);
1287 if (!last || GET_CODE (last) != BARRIER)
1288 emit_barrier_after (src1->end);
1290 /* Update CFG. */
1291 while (src1->succ)
1292 remove_edge (src1->succ);
1293 make_single_succ_edge (src1, redirect_to, 0);
1295 BB_SET_FLAG (src1, BB_UPDATE_LIFE);
1296 update_forwarder_flag (src1);
1298 return true;
1301 /* Search the predecessors of BB for common insn sequences. When found,
1302 share code between them by redirecting control flow. Return true if
1303 any changes made. */
1305 static bool
1306 try_crossjump_bb (mode, bb)
1307 int mode;
1308 basic_block bb;
1310 edge e, e2, nexte2, nexte, fallthru;
1311 bool changed;
1313 /* Nothing to do if there is not at least two incoming edges. */
1314 if (!bb->pred || !bb->pred->pred_next)
1315 return false;
1317 /* It is always cheapest to redirect a block that ends in a branch to
1318 a block that falls through into BB, as that adds no branches to the
1319 program. We'll try that combination first. */
1320 for (fallthru = bb->pred; fallthru; fallthru = fallthru->pred_next)
1321 if (fallthru->flags & EDGE_FALLTHRU)
1322 break;
1324 changed = false;
1325 for (e = bb->pred; e; e = nexte)
1327 nexte = e->pred_next;
1329 /* As noted above, first try with the fallthru predecessor. */
1330 if (fallthru)
1332 /* Don't combine the fallthru edge into anything else.
1333 If there is a match, we'll do it the other way around. */
1334 if (e == fallthru)
1335 continue;
1337 if (try_crossjump_to_edge (mode, e, fallthru))
1339 changed = true;
1340 nexte = bb->pred;
1341 continue;
1345 /* Non-obvious work limiting check: Recognize that we're going
1346 to call try_crossjump_bb on every basic block. So if we have
1347 two blocks with lots of outgoing edges (a switch) and they
1348 share lots of common destinations, then we would do the
1349 cross-jump check once for each common destination.
1351 Now, if the blocks actually are cross-jump candidates, then
1352 all of their destinations will be shared. Which means that
1353 we only need check them for cross-jump candidacy once. We
1354 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1355 choosing to do the check from the block for which the edge
1356 in question is the first successor of A. */
1357 if (e->src->succ != e)
1358 continue;
1360 for (e2 = bb->pred; e2; e2 = nexte2)
1362 nexte2 = e2->pred_next;
1364 if (e2 == e)
1365 continue;
1367 /* We've already checked the fallthru edge above. */
1368 if (e2 == fallthru)
1369 continue;
1371 /* The "first successor" check above only prevents multiple
1372 checks of crossjump(A,B). In order to prevent redundant
1373 checks of crossjump(B,A), require that A be the block
1374 with the lowest index. */
1375 if (e->src->index > e2->src->index)
1376 continue;
1378 if (try_crossjump_to_edge (mode, e, e2))
1380 changed = true;
1381 nexte = bb->pred;
1382 break;
1387 return changed;
1390 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1391 instructions etc. Return nonzero if changes were made. */
1393 static bool
1394 try_optimize_cfg (mode)
1395 int mode;
1397 int i;
1398 bool changed_overall = false;
1399 bool changed;
1400 int iterations = 0;
1401 sbitmap blocks;
1403 if (mode & CLEANUP_CROSSJUMP)
1404 add_noreturn_fake_exit_edges ();
1406 for (i = 0; i < n_basic_blocks; i++)
1407 update_forwarder_flag (BASIC_BLOCK (i));
1409 /* Attempt to merge blocks as made possible by edge removal. If a block
1410 has only one successor, and the successor has only one predecessor,
1411 they may be combined. */
1415 changed = false;
1416 iterations++;
1418 if (rtl_dump_file)
1419 fprintf (rtl_dump_file, "\n\ntry_optimize_cfg iteration %i\n\n",
1420 iterations);
1422 for (i = 0; i < n_basic_blocks;)
1424 basic_block c, b = BASIC_BLOCK (i);
1425 edge s;
1426 bool changed_here = false;
1428 /* Delete trivially dead basic blocks. */
1429 while (b->pred == NULL)
1431 c = BASIC_BLOCK (b->index - 1);
1432 if (rtl_dump_file)
1433 fprintf (rtl_dump_file, "Deleting block %i.\n", b->index);
1434 flow_delete_block (b);
1435 changed = true;
1436 b = c;
1439 /* Remove code labels no longer used. Don't do this before
1440 CALL_PLACEHOLDER is removed, as some branches may be hidden
1441 within. */
1442 if (b->pred->pred_next == NULL
1443 && (b->pred->flags & EDGE_FALLTHRU)
1444 && !(b->pred->flags & EDGE_COMPLEX)
1445 && GET_CODE (b->head) == CODE_LABEL
1446 && (!(mode & CLEANUP_PRE_SIBCALL)
1447 || !tail_recursion_label_p (b->head))
1448 /* If the previous block ends with a branch to this block,
1449 we can't delete the label. Normally this is a condjump
1450 that is yet to be simplified, but if CASE_DROPS_THRU,
1451 this can be a tablejump with some element going to the
1452 same place as the default (fallthru). */
1453 && (b->pred->src == ENTRY_BLOCK_PTR
1454 || GET_CODE (b->pred->src->end) != JUMP_INSN
1455 || ! label_is_jump_target_p (b->head, b->pred->src->end)))
1457 rtx label = b->head;
1458 b->head = NEXT_INSN (b->head);
1459 delete_insn_chain (label, label);
1460 if (rtl_dump_file)
1461 fprintf (rtl_dump_file, "Deleted label in block %i.\n",
1462 b->index);
1465 /* If we fall through an empty block, we can remove it. */
1466 if (b->pred->pred_next == NULL
1467 && (b->pred->flags & EDGE_FALLTHRU)
1468 && GET_CODE (b->head) != CODE_LABEL
1469 && FORWARDER_BLOCK_P (b)
1470 /* Note that forwarder_block_p true ensures that there
1471 is a successor for this block. */
1472 && (b->succ->flags & EDGE_FALLTHRU)
1473 && n_basic_blocks > 1)
1475 if (rtl_dump_file)
1476 fprintf (rtl_dump_file, "Deleting fallthru block %i.\n",
1477 b->index);
1478 c = BASIC_BLOCK (b->index ? b->index - 1 : 1);
1479 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1480 flow_delete_block (b);
1481 changed = true;
1482 b = c;
1485 /* Merge blocks. Loop because chains of blocks might be
1486 combineable. */
1487 while ((s = b->succ) != NULL
1488 && s->succ_next == NULL
1489 && !(s->flags & EDGE_COMPLEX)
1490 && (c = s->dest) != EXIT_BLOCK_PTR
1491 && c->pred->pred_next == NULL
1492 /* If the jump insn has side effects,
1493 we can't kill the edge. */
1494 && (GET_CODE (b->end) != JUMP_INSN
1495 || onlyjump_p (b->end))
1496 && merge_blocks (s, b, c, mode))
1497 changed_here = true;
1499 /* Simplify branch over branch. */
1500 if ((mode & CLEANUP_EXPENSIVE) && try_simplify_condjump (b))
1502 BB_SET_FLAG (b, BB_UPDATE_LIFE);
1503 changed_here = true;
1506 /* If B has a single outgoing edge, but uses a non-trivial jump
1507 instruction without side-effects, we can either delete the
1508 jump entirely, or replace it with a simple unconditional jump.
1509 Use redirect_edge_and_branch to do the dirty work. */
1510 if (b->succ
1511 && ! b->succ->succ_next
1512 && b->succ->dest != EXIT_BLOCK_PTR
1513 && onlyjump_p (b->end)
1514 && redirect_edge_and_branch (b->succ, b->succ->dest))
1516 BB_SET_FLAG (b, BB_UPDATE_LIFE);
1517 update_forwarder_flag (b);
1518 changed_here = true;
1521 /* Simplify branch to branch. */
1522 if (try_forward_edges (mode, b))
1523 changed_here = true;
1525 /* Look for shared code between blocks. */
1526 if ((mode & CLEANUP_CROSSJUMP)
1527 && try_crossjump_bb (mode, b))
1528 changed_here = true;
1530 /* Don't get confused by the index shift caused by deleting
1531 blocks. */
1532 if (!changed_here)
1533 i = b->index + 1;
1534 else
1535 changed = true;
1538 if ((mode & CLEANUP_CROSSJUMP)
1539 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1540 changed = true;
1542 #ifdef ENABLE_CHECKING
1543 if (changed)
1544 verify_flow_info ();
1545 #endif
1547 changed_overall |= changed;
1549 while (changed);
1551 if (mode & CLEANUP_CROSSJUMP)
1552 remove_fake_edges ();
1554 if ((mode & CLEANUP_UPDATE_LIFE) && changed_overall)
1556 bool found = 0;
1557 blocks = sbitmap_alloc (n_basic_blocks);
1558 sbitmap_zero (blocks);
1559 for (i = 0; i < n_basic_blocks; i++)
1560 if (BB_FLAGS (BASIC_BLOCK (i)) & BB_UPDATE_LIFE)
1562 found = 1;
1563 SET_BIT (blocks, i);
1565 if (found)
1566 update_life_info (blocks, UPDATE_LIFE_GLOBAL,
1567 PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE
1568 | PROP_KILL_DEAD_CODE);
1569 sbitmap_free (blocks);
1571 for (i = 0; i < n_basic_blocks; i++)
1572 BASIC_BLOCK (i)->aux = NULL;
1574 return changed_overall;
1577 /* Delete all unreachable basic blocks. */
1579 static bool
1580 delete_unreachable_blocks ()
1582 int i;
1583 bool changed = false;
1585 find_unreachable_blocks ();
1587 /* Delete all unreachable basic blocks. Count down so that we
1588 don't interfere with the block renumbering that happens in
1589 flow_delete_block. */
1591 for (i = n_basic_blocks - 1; i >= 0; --i)
1593 basic_block b = BASIC_BLOCK (i);
1595 if (!(b->flags & BB_REACHABLE))
1596 flow_delete_block (b), changed = true;
1599 if (changed)
1600 tidy_fallthru_edges ();
1601 return changed;
1604 /* Tidy the CFG by deleting unreachable code and whatnot. */
1606 bool
1607 cleanup_cfg (mode)
1608 int mode;
1610 bool changed = false;
1612 timevar_push (TV_CLEANUP_CFG);
1613 changed = delete_unreachable_blocks ();
1614 if (try_optimize_cfg (mode))
1615 delete_unreachable_blocks (), changed = true;
1617 /* Kill the data we won't maintain. */
1618 free_EXPR_LIST_list (&label_value_list);
1619 free_EXPR_LIST_list (&tail_recursion_label_list);
1620 timevar_pop (TV_CLEANUP_CFG);
1622 return changed;