* regrename.c (struct value_data): Add max_value_regs.
[official-gcc.git] / gcc / cfgcleanup.c
blob2f0115323a8ec33aff9913c468f6548aba5552fb
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to it's
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
34 #include "config.h"
35 #include "system.h"
36 #include "rtl.h"
37 #include "hard-reg-set.h"
38 #include "basic-block.h"
39 #include "timevar.h"
40 #include "output.h"
41 #include "insn-config.h"
42 #include "flags.h"
43 #include "recog.h"
44 #include "toplev.h"
46 #include "obstack.h"
48 /* cleanup_cfg maintains following flags for each basic block. */
49 enum bb_flags {
50 /* Set if life info needs to be recomputed for given BB. */
51 BB_UPDATE_LIFE = 1,
52 /* Set if BB is the forwarder block to avoid too many
53 forwarder_block_p calls. */
54 BB_FORWARDER_BLOCK = 2
57 #define BB_FLAGS(bb) (enum bb_flags)(bb)->aux
58 #define BB_SET_FLAG(bb,flag) \
59 (bb)->aux = (void *) (long) ((enum bb_flags)(bb)->aux | (flag))
60 #define BB_CLEAR_FLAG(bb,flag) \
61 (bb)->aux = (void *) (long) ((enum bb_flags)(bb)->aux & ~(flag))
63 #define FORWARDER_BLOCK_P(bb) (BB_FLAGS(bb) & BB_FORWARDER_BLOCK)
65 static bool try_crossjump_to_edge PARAMS ((int, edge, edge));
66 static bool try_crossjump_bb PARAMS ((int, basic_block));
67 static bool outgoing_edges_match PARAMS ((int,
68 basic_block, basic_block));
69 static int flow_find_cross_jump PARAMS ((int, basic_block, basic_block,
70 rtx *, rtx *));
71 static bool insns_match_p PARAMS ((int, rtx, rtx));
73 static bool delete_unreachable_blocks PARAMS ((void));
74 static bool label_is_jump_target_p PARAMS ((rtx, rtx));
75 static bool tail_recursion_label_p PARAMS ((rtx));
76 static void merge_blocks_move_predecessor_nojumps PARAMS ((basic_block,
77 basic_block));
78 static void merge_blocks_move_successor_nojumps PARAMS ((basic_block,
79 basic_block));
80 static bool merge_blocks PARAMS ((edge,basic_block,basic_block,
81 int));
82 static bool try_optimize_cfg PARAMS ((int));
83 static bool try_simplify_condjump PARAMS ((basic_block));
84 static bool try_forward_edges PARAMS ((int, basic_block));
85 static void notice_new_block PARAMS ((basic_block));
86 static void update_forwarder_flag PARAMS ((basic_block));
88 /* Set flags for newly created block. */
90 static void
91 notice_new_block (bb)
92 basic_block bb;
94 if (!bb)
95 return;
96 BB_SET_FLAG (bb, BB_UPDATE_LIFE);
97 if (forwarder_block_p (bb))
98 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
101 /* Recompute forwarder flag after block has been modified. */
103 static void
104 update_forwarder_flag (bb)
105 basic_block bb;
107 if (forwarder_block_p (bb))
108 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
109 else
110 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
113 /* Simplify a conditional jump around an unconditional jump.
114 Return true if something changed. */
116 static bool
117 try_simplify_condjump (cbranch_block)
118 basic_block cbranch_block;
120 basic_block jump_block, jump_dest_block, cbranch_dest_block;
121 edge cbranch_jump_edge, cbranch_fallthru_edge;
122 rtx cbranch_insn;
124 /* Verify that there are exactly two successors. */
125 if (!cbranch_block->succ
126 || !cbranch_block->succ->succ_next
127 || cbranch_block->succ->succ_next->succ_next)
128 return false;
130 /* Verify that we've got a normal conditional branch at the end
131 of the block. */
132 cbranch_insn = cbranch_block->end;
133 if (!any_condjump_p (cbranch_insn))
134 return false;
136 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
137 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
139 /* The next block must not have multiple predecessors, must not
140 be the last block in the function, and must contain just the
141 unconditional jump. */
142 jump_block = cbranch_fallthru_edge->dest;
143 if (jump_block->pred->pred_next
144 || jump_block->index == n_basic_blocks - 1
145 || !FORWARDER_BLOCK_P (jump_block))
146 return false;
147 jump_dest_block = jump_block->succ->dest;
149 /* The conditional branch must target the block after the
150 unconditional branch. */
151 cbranch_dest_block = cbranch_jump_edge->dest;
153 if (!can_fallthru (jump_block, cbranch_dest_block))
154 return false;
156 /* Invert the conditional branch. */
157 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
158 return false;
160 if (rtl_dump_file)
161 fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n",
162 INSN_UID (cbranch_insn), INSN_UID (jump_block->end));
164 /* Success. Update the CFG to match. Note that after this point
165 the edge variable names appear backwards; the redirection is done
166 this way to preserve edge profile data. */
167 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
168 cbranch_dest_block);
169 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
170 jump_dest_block);
171 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
172 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
174 /* Delete the block with the unconditional jump, and clean up the mess. */
175 flow_delete_block (jump_block);
176 tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block);
178 return true;
181 /* Attempt to forward edges leaving basic block B.
182 Return true if successful. */
184 static bool
185 try_forward_edges (mode, b)
186 basic_block b;
187 int mode;
189 bool changed = false;
190 edge e, next;
192 for (e = b->succ; e ; e = next)
194 basic_block target, first;
195 int counter;
197 next = e->succ_next;
199 /* Skip complex edges because we don't know how to update them.
201 Still handle fallthru edges, as we can succeed to forward fallthru
202 edge to the same place as the branch edge of conditional branch
203 and turn conditional branch to an unconditional branch. */
204 if (e->flags & EDGE_COMPLEX)
205 continue;
207 target = first = e->dest;
208 counter = 0;
210 /* Look for the real destination of the jump.
211 Avoid infinite loop in the infinite empty loop by counting
212 up to n_basic_blocks. */
213 while (FORWARDER_BLOCK_P (target)
214 && target->succ->dest != EXIT_BLOCK_PTR
215 && counter < n_basic_blocks)
217 /* Bypass trivial infinite loops. */
218 if (target == target->succ->dest)
219 counter = n_basic_blocks;
221 /* Avoid killing of loop pre-headers, as it is the place loop
222 optimizer wants to hoist code to.
224 For fallthru forwarders, the LOOP_BEG note must appear between
225 the header of block and CODE_LABEL of the loop, for non forwarders
226 it must appear before the JUMP_INSN. */
227 if (mode & CLEANUP_PRE_LOOP)
229 rtx insn = (target->succ->flags & EDGE_FALLTHRU
230 ? target->head : prev_nonnote_insn (target->end));
232 if (GET_CODE (insn) != NOTE)
233 insn = NEXT_INSN (insn);
235 for (;insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
236 insn = NEXT_INSN (insn))
237 if (GET_CODE (insn) == NOTE
238 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
239 break;
241 if (GET_CODE (insn) == NOTE)
242 break;
244 target = target->succ->dest, counter++;
247 if (counter >= n_basic_blocks)
249 if (rtl_dump_file)
250 fprintf (rtl_dump_file, "Infinite loop in BB %i.\n",
251 target->index);
253 else if (target == first)
254 ; /* We didn't do anything. */
255 else
257 /* Save the values now, as the edge may get removed. */
258 gcov_type edge_count = e->count;
259 int edge_probability = e->probability;
261 if (redirect_edge_and_branch (e, target))
263 /* We successfully forwarded the edge. Now update profile
264 data: for each edge we traversed in the chain, remove
265 the original edge's execution count. */
266 int edge_frequency = ((edge_probability * b->frequency
267 + REG_BR_PROB_BASE / 2)
268 / REG_BR_PROB_BASE);
270 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
271 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
272 BB_SET_FLAG (b, BB_UPDATE_LIFE);
276 first->count -= edge_count;
277 first->succ->count -= edge_count;
278 first->frequency -= edge_frequency;
279 first = first->succ->dest;
281 while (first != target);
283 changed = true;
285 else
287 if (rtl_dump_file)
288 fprintf (rtl_dump_file, "Forwarding edge %i->%i to %i failed.\n",
289 b->index, e->dest->index, target->index);
294 return changed;
297 /* Return true if LABEL is a target of JUMP_INSN. This applies only
298 to non-complex jumps. That is, direct unconditional, conditional,
299 and tablejumps, but not computed jumps or returns. It also does
300 not apply to the fallthru case of a conditional jump. */
302 static bool
303 label_is_jump_target_p (label, jump_insn)
304 rtx label, jump_insn;
306 rtx tmp = JUMP_LABEL (jump_insn);
308 if (label == tmp)
309 return true;
311 if (tmp != NULL_RTX
312 && (tmp = NEXT_INSN (tmp)) != NULL_RTX
313 && GET_CODE (tmp) == JUMP_INSN
314 && (tmp = PATTERN (tmp),
315 GET_CODE (tmp) == ADDR_VEC
316 || GET_CODE (tmp) == ADDR_DIFF_VEC))
318 rtvec vec = XVEC (tmp, GET_CODE (tmp) == ADDR_DIFF_VEC);
319 int i, veclen = GET_NUM_ELEM (vec);
321 for (i = 0; i < veclen; ++i)
322 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
323 return true;
326 return false;
329 /* Return true if LABEL is used for tail recursion. */
331 static bool
332 tail_recursion_label_p (label)
333 rtx label;
335 rtx x;
337 for (x = tail_recursion_label_list; x; x = XEXP (x, 1))
338 if (label == XEXP (x, 0))
339 return true;
341 return false;
344 /* Blocks A and B are to be merged into a single block. A has no incoming
345 fallthru edge, so it can be moved before B without adding or modifying
346 any jumps (aside from the jump from A to B). */
348 static void
349 merge_blocks_move_predecessor_nojumps (a, b)
350 basic_block a, b;
352 rtx barrier;
353 int index;
355 barrier = next_nonnote_insn (a->end);
356 if (GET_CODE (barrier) != BARRIER)
357 abort ();
358 delete_insn (barrier);
360 /* Move block and loop notes out of the chain so that we do not
361 disturb their order.
363 ??? A better solution would be to squeeze out all the non-nested notes
364 and adjust the block trees appropriately. Even better would be to have
365 a tighter connection between block trees and rtl so that this is not
366 necessary. */
367 if (squeeze_notes (&a->head, &a->end))
368 abort ();
370 /* Scramble the insn chain. */
371 if (a->end != PREV_INSN (b->head))
372 reorder_insns_nobb (a->head, a->end, PREV_INSN (b->head));
373 BB_SET_FLAG (a, BB_UPDATE_LIFE);
375 if (rtl_dump_file)
377 fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
378 a->index, b->index);
381 /* Swap the records for the two blocks around. Although we are deleting B,
382 A is now where B was and we want to compact the BB array from where
383 A used to be. */
384 BASIC_BLOCK (a->index) = b;
385 BASIC_BLOCK (b->index) = a;
386 index = a->index;
387 a->index = b->index;
388 b->index = index;
390 /* Now blocks A and B are contiguous. Merge them. */
391 merge_blocks_nomove (a, b);
394 /* Blocks A and B are to be merged into a single block. B has no outgoing
395 fallthru edge, so it can be moved after A without adding or modifying
396 any jumps (aside from the jump from A to B). */
398 static void
399 merge_blocks_move_successor_nojumps (a, b)
400 basic_block a, b;
402 rtx barrier, real_b_end;
404 real_b_end = b->end;
405 barrier = NEXT_INSN (b->end);
407 /* Recognize a jump table following block B. */
408 if (barrier
409 && GET_CODE (barrier) == CODE_LABEL
410 && NEXT_INSN (barrier)
411 && GET_CODE (NEXT_INSN (barrier)) == JUMP_INSN
412 && (GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_VEC
413 || GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_DIFF_VEC))
415 /* Temporarily add the table jump insn to b, so that it will also
416 be moved to the correct location. */
417 b->end = NEXT_INSN (barrier);
418 barrier = NEXT_INSN (b->end);
421 /* There had better have been a barrier there. Delete it. */
422 if (barrier && GET_CODE (barrier) == BARRIER)
423 delete_insn (barrier);
425 /* Move block and loop notes out of the chain so that we do not
426 disturb their order.
428 ??? A better solution would be to squeeze out all the non-nested notes
429 and adjust the block trees appropriately. Even better would be to have
430 a tighter connection between block trees and rtl so that this is not
431 necessary. */
432 if (squeeze_notes (&b->head, &b->end))
433 abort ();
435 /* Scramble the insn chain. */
436 reorder_insns_nobb (b->head, b->end, a->end);
438 /* Restore the real end of b. */
439 b->end = real_b_end;
441 /* Now blocks A and B are contiguous. Merge them. */
442 merge_blocks_nomove (a, b);
443 BB_SET_FLAG (a, BB_UPDATE_LIFE);
445 if (rtl_dump_file)
447 fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
448 b->index, a->index);
452 /* Attempt to merge basic blocks that are potentially non-adjacent.
453 Return true iff the attempt succeeded. */
455 static bool
456 merge_blocks (e, b, c, mode)
457 edge e;
458 basic_block b, c;
459 int mode;
461 /* If C has a tail recursion label, do not merge. There is no
462 edge recorded from the call_placeholder back to this label, as
463 that would make optimize_sibling_and_tail_recursive_calls more
464 complex for no gain. */
465 if ((mode & CLEANUP_PRE_SIBCALL)
466 && GET_CODE (c->head) == CODE_LABEL
467 && tail_recursion_label_p (c->head))
468 return false;
470 /* If B has a fallthru edge to C, no need to move anything. */
471 if (e->flags & EDGE_FALLTHRU)
473 /* We need to update liveness in case C already has broken liveness
474 or B ends by conditional jump to next instructions that will be
475 removed. */
476 if ((BB_FLAGS (c) & BB_UPDATE_LIFE)
477 || GET_CODE (b->end) == JUMP_INSN)
478 BB_SET_FLAG (b, BB_UPDATE_LIFE);
479 merge_blocks_nomove (b, c);
480 update_forwarder_flag (b);
482 if (rtl_dump_file)
484 fprintf (rtl_dump_file, "Merged %d and %d without moving.\n",
485 b->index, c->index);
488 return true;
490 /* Otherwise we will need to move code around. Do that only if expensive
491 transformations are allowed. */
492 else if (mode & CLEANUP_EXPENSIVE)
494 edge tmp_edge, b_fallthru_edge;
495 bool c_has_outgoing_fallthru;
496 bool b_has_incoming_fallthru;
498 /* Avoid overactive code motion, as the forwarder blocks should be
499 eliminated by edge redirection instead. One exception might have
500 been if B is a forwarder block and C has no fallthru edge, but
501 that should be cleaned up by bb-reorder instead. */
502 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
503 return false;
505 /* We must make sure to not munge nesting of lexical blocks,
506 and loop notes. This is done by squeezing out all the notes
507 and leaving them there to lie. Not ideal, but functional. */
509 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
510 if (tmp_edge->flags & EDGE_FALLTHRU)
511 break;
512 c_has_outgoing_fallthru = (tmp_edge != NULL);
514 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
515 if (tmp_edge->flags & EDGE_FALLTHRU)
516 break;
517 b_has_incoming_fallthru = (tmp_edge != NULL);
518 b_fallthru_edge = tmp_edge;
520 /* Otherwise, we're going to try to move C after B. If C does
521 not have an outgoing fallthru, then it can be moved
522 immediately after B without introducing or modifying jumps. */
523 if (! c_has_outgoing_fallthru)
525 merge_blocks_move_successor_nojumps (b, c);
526 return true;
529 /* If B does not have an incoming fallthru, then it can be moved
530 immediately before C without introducing or modifying jumps.
531 C cannot be the first block, so we do not have to worry about
532 accessing a non-existent block. */
534 if (b_has_incoming_fallthru)
536 basic_block bb;
537 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
538 return false;
539 bb = force_nonfallthru (b_fallthru_edge);
540 if (bb)
541 notice_new_block (bb);
542 else
543 BB_SET_FLAG (b_fallthru_edge->src, BB_UPDATE_LIFE);
545 merge_blocks_move_predecessor_nojumps (b, c);
546 return true;
548 return false;
552 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
554 static bool
555 insns_match_p (mode, i1, i2)
556 int mode;
557 rtx i1, i2;
559 rtx p1, p2;
561 /* Verify that I1 and I2 are equivalent. */
562 if (GET_CODE (i1) != GET_CODE (i2))
563 return false;
565 p1 = PATTERN (i1);
566 p2 = PATTERN (i2);
568 if (GET_CODE (p1) != GET_CODE (p2))
569 return false;
571 /* If this is a CALL_INSN, compare register usage information.
572 If we don't check this on stack register machines, the two
573 CALL_INSNs might be merged leaving reg-stack.c with mismatching
574 numbers of stack registers in the same basic block.
575 If we don't check this on machines with delay slots, a delay slot may
576 be filled that clobbers a parameter expected by the subroutine.
578 ??? We take the simple route for now and assume that if they're
579 equal, they were constructed identically. */
581 if (GET_CODE (i1) == CALL_INSN
582 && !rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
583 CALL_INSN_FUNCTION_USAGE (i2)))
584 return false;
586 #ifdef STACK_REGS
587 /* If cross_jump_death_matters is not 0, the insn's mode
588 indicates whether or not the insn contains any stack-like
589 regs. */
591 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
593 /* If register stack conversion has already been done, then
594 death notes must also be compared before it is certain that
595 the two instruction streams match. */
597 rtx note;
598 HARD_REG_SET i1_regset, i2_regset;
600 CLEAR_HARD_REG_SET (i1_regset);
601 CLEAR_HARD_REG_SET (i2_regset);
603 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
604 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
605 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
607 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
608 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
609 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
611 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
613 return false;
615 done:
618 #endif
620 if (reload_completed
621 ? ! rtx_renumbered_equal_p (p1, p2) : ! rtx_equal_p (p1, p2))
623 /* The following code helps take care of G++ cleanups. */
624 rtx equiv1 = find_reg_equal_equiv_note (i1);
625 rtx equiv2 = find_reg_equal_equiv_note (i2);
627 if (equiv1 && equiv2
628 /* If the equivalences are not to a constant, they may
629 reference pseudos that no longer exist, so we can't
630 use them. */
631 && (! reload_completed
632 || (CONSTANT_P (XEXP (equiv1, 0))
633 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
635 rtx s1 = single_set (i1);
636 rtx s2 = single_set (i2);
637 if (s1 != 0 && s2 != 0
638 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
640 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
641 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
642 if (! rtx_renumbered_equal_p (p1, p2))
643 cancel_changes (0);
644 else if (apply_change_group ())
645 return true;
648 return false;
650 return true;
653 /* Look through the insns at the end of BB1 and BB2 and find the longest
654 sequence that are equivalent. Store the first insns for that sequence
655 in *F1 and *F2 and return the sequence length.
657 To simplify callers of this function, if the blocks match exactly,
658 store the head of the blocks in *F1 and *F2. */
660 static int
661 flow_find_cross_jump (mode, bb1, bb2, f1, f2)
662 int mode ATTRIBUTE_UNUSED;
663 basic_block bb1, bb2;
664 rtx *f1, *f2;
666 rtx i1, i2, last1, last2, afterlast1, afterlast2;
667 int ninsns = 0;
669 /* Skip simple jumps at the end of the blocks. Complex jumps still
670 need to be compared for equivalence, which we'll do below. */
672 i1 = bb1->end;
673 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
674 if (onlyjump_p (i1)
675 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
677 last1 = i1;
678 /* Count everything except for unconditional jump as insn. */
679 if (!simplejump_p (i1) && !returnjump_p (i1))
680 ninsns++;
681 i1 = PREV_INSN (i1);
683 i2 = bb2->end;
684 if (onlyjump_p (i2)
685 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
687 last2 = i2;
688 i2 = PREV_INSN (i2);
691 while (true)
693 /* Ignore notes. */
694 while (!active_insn_p (i1) && i1 != bb1->head)
695 i1 = PREV_INSN (i1);
696 while (!active_insn_p (i2) && i2 != bb2->head)
697 i2 = PREV_INSN (i2);
699 if (i1 == bb1->head || i2 == bb2->head)
700 break;
702 if (!insns_match_p (mode, i1, i2))
703 break;
705 /* Don't begin a cross-jump with a USE or CLOBBER insn. */
706 if (active_insn_p (i1))
708 /* If the merged insns have different REG_EQUAL notes, then
709 remove them. */
710 rtx equiv1 = find_reg_equal_equiv_note (i1);
711 rtx equiv2 = find_reg_equal_equiv_note (i2);
713 if (equiv1 && !equiv2)
714 remove_note (i1, equiv1);
715 else if (!equiv1 && equiv2)
716 remove_note (i2, equiv2);
717 else if (equiv1 && equiv2
718 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
720 remove_note (i1, equiv1);
721 remove_note (i2, equiv2);
724 afterlast1 = last1, afterlast2 = last2;
725 last1 = i1, last2 = i2;
726 ninsns++;
728 i1 = PREV_INSN (i1);
729 i2 = PREV_INSN (i2);
732 #ifdef HAVE_cc0
733 if (ninsns)
735 /* Don't allow the insn after a compare to be shared by
736 cross-jumping unless the compare is also shared. */
737 if (reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
738 last1 = afterlast1, last2 = afterlast2, ninsns--;
740 #endif
742 /* Include preceding notes and labels in the cross-jump. One,
743 this may bring us to the head of the blocks as requested above.
744 Two, it keeps line number notes as matched as may be. */
745 if (ninsns)
747 while (last1 != bb1->head && !active_insn_p (PREV_INSN (last1)))
748 last1 = PREV_INSN (last1);
749 if (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
750 last1 = PREV_INSN (last1);
751 while (last2 != bb2->head && !active_insn_p (PREV_INSN (last2)))
752 last2 = PREV_INSN (last2);
753 if (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
754 last2 = PREV_INSN (last2);
756 *f1 = last1;
757 *f2 = last2;
760 return ninsns;
763 /* Return true iff outgoing edges of BB1 and BB2 match, together with
764 the branch instruction. This means that if we commonize the control
765 flow before end of the basic block, the semantic remains unchanged.
767 We may assume that there exists one edge with a common destination. */
769 static bool
770 outgoing_edges_match (mode, bb1, bb2)
771 int mode;
772 basic_block bb1;
773 basic_block bb2;
775 int nehedges1 = 0, nehedges2 = 0;
776 edge fallthru1 = 0, fallthru2 = 0;
777 edge e1, e2;
779 /* If BB1 has only one successor, we must be looking at an unconditional
780 jump. Which, by the assumption above, means that we only need to check
781 that BB2 has one successor. */
782 if (bb1->succ && !bb1->succ->succ_next)
783 return (bb2->succ && !bb2->succ->succ_next);
785 /* Match conditional jumps - this may get tricky when fallthru and branch
786 edges are crossed. */
787 if (bb1->succ
788 && bb1->succ->succ_next
789 && !bb1->succ->succ_next->succ_next
790 && any_condjump_p (bb1->end))
792 edge b1, f1, b2, f2;
793 bool reverse, match;
794 rtx set1, set2, cond1, cond2;
795 enum rtx_code code1, code2;
797 if (!bb2->succ
798 || !bb2->succ->succ_next
799 || bb1->succ->succ_next->succ_next
800 || !any_condjump_p (bb2->end))
801 return false;
803 b1 = BRANCH_EDGE (bb1);
804 b2 = BRANCH_EDGE (bb2);
805 f1 = FALLTHRU_EDGE (bb1);
806 f2 = FALLTHRU_EDGE (bb2);
808 /* Get around possible forwarders on fallthru edges. Other cases
809 should be optimized out already. */
810 if (FORWARDER_BLOCK_P (f1->dest))
811 f1 = f1->dest->succ;
812 if (FORWARDER_BLOCK_P (f2->dest))
813 f2 = f2->dest->succ;
815 /* To simplify use of this function, return false if there are
816 unneeded forwarder blocks. These will get eliminated later
817 during cleanup_cfg. */
818 if (FORWARDER_BLOCK_P (f1->dest)
819 || FORWARDER_BLOCK_P (f2->dest)
820 || FORWARDER_BLOCK_P (b1->dest)
821 || FORWARDER_BLOCK_P (b2->dest))
822 return false;
824 if (f1->dest == f2->dest && b1->dest == b2->dest)
825 reverse = false;
826 else if (f1->dest == b2->dest && b1->dest == f2->dest)
827 reverse = true;
828 else
829 return false;
831 set1 = pc_set (bb1->end);
832 set2 = pc_set (bb2->end);
833 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
834 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
835 reverse = !reverse;
837 cond1 = XEXP (SET_SRC (set1), 0);
838 cond2 = XEXP (SET_SRC (set2), 0);
839 code1 = GET_CODE (cond1);
840 if (reverse)
841 code2 = reversed_comparison_code (cond2, bb2->end);
842 else
843 code2 = GET_CODE (cond2);
844 if (code2 == UNKNOWN)
845 return false;
847 /* Verify codes and operands match. */
848 match = ((code1 == code2
849 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
850 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
851 || (code1 == swap_condition (code2)
852 && rtx_renumbered_equal_p (XEXP (cond1, 1),
853 XEXP (cond2, 0))
854 && rtx_renumbered_equal_p (XEXP (cond1, 0),
855 XEXP (cond2, 1))));
857 /* If we return true, we will join the blocks. Which means that
858 we will only have one branch prediction bit to work with. Thus
859 we require the existing branches to have probabilities that are
860 roughly similar. */
861 /* ??? We should use bb->frequency to allow merging in infrequently
862 executed blocks, but at the moment it is not available when
863 cleanup_cfg is run. */
864 if (match && !optimize_size)
866 rtx note1, note2;
867 int prob1, prob2;
868 note1 = find_reg_note (bb1->end, REG_BR_PROB, 0);
869 note2 = find_reg_note (bb2->end, REG_BR_PROB, 0);
871 if (note1 && note2)
873 prob1 = INTVAL (XEXP (note1, 0));
874 prob2 = INTVAL (XEXP (note2, 0));
875 if (reverse)
876 prob2 = REG_BR_PROB_BASE - prob2;
878 /* Fail if the difference in probabilities is
879 greater than 5%. */
880 if (abs (prob1 - prob2) > REG_BR_PROB_BASE / 20)
881 return false;
883 else if (note1 || note2)
884 return false;
887 if (rtl_dump_file && match)
888 fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n",
889 bb1->index, bb2->index);
891 return match;
894 /* Generic case - we are seeing an computed jump, table jump or trapping
895 instruction. */
897 /* First ensure that the instructions match. There may be many outgoing
898 edges so this test is generally cheaper.
899 ??? Currently the tablejumps will never match, as they do have
900 different tables. */
901 if (!insns_match_p (mode, bb1->end, bb2->end))
902 return false;
904 /* Search the outgoing edges, ensure that the counts do match, find possible
905 fallthru and exception handling edges since these needs more
906 validation. */
907 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
908 e1 = e1->succ_next, e2 = e2->succ_next)
910 if (e1->flags & EDGE_EH)
911 nehedges1++;
912 if (e2->flags & EDGE_EH)
913 nehedges2++;
914 if (e1->flags & EDGE_FALLTHRU)
915 fallthru1 = e1;
916 if (e2->flags & EDGE_FALLTHRU)
917 fallthru2 = e2;
919 /* If number of edges of various types does not match, fail. */
920 if (e1 || e2)
921 return false;
922 if (nehedges1 != nehedges2)
923 return false;
924 if ((fallthru1 != 0) != (fallthru2 != 0))
925 return false;
927 /* fallthru edges must be forwarded to the same destination. */
928 if (fallthru1)
930 basic_block d1 = (forwarder_block_p (fallthru1->dest)
931 ? fallthru1->dest->succ->dest: fallthru1->dest);
932 basic_block d2 = (forwarder_block_p (fallthru2->dest)
933 ? fallthru2->dest->succ->dest: fallthru2->dest);
934 if (d1 != d2)
935 return false;
937 /* In case we do have EH edges, ensure we are in the same region. */
938 if (nehedges1)
940 rtx n1 = find_reg_note (bb1->end, REG_EH_REGION, 0);
941 rtx n2 = find_reg_note (bb2->end, REG_EH_REGION, 0);
942 if (XEXP (n1, 0) != XEXP (n2, 0))
943 return false;
945 /* We don't need to match the rest of edges as above checks should be enought
946 to ensure that they are equivalent. */
947 return true;
950 /* E1 and E2 are edges with the same destination block. Search their
951 predecessors for common code. If found, redirect control flow from
952 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
954 static bool
955 try_crossjump_to_edge (mode, e1, e2)
956 int mode;
957 edge e1, e2;
959 int nmatch;
960 basic_block src1 = e1->src, src2 = e2->src;
961 basic_block redirect_to;
962 rtx newpos1, newpos2;
963 edge s;
964 rtx last;
965 rtx label;
966 rtx note;
968 /* Search backward through forwarder blocks. We don't need to worry
969 about multiple entry or chained forwarders, as they will be optimized
970 away. We do this to look past the unconditional jump following a
971 conditional jump that is required due to the current CFG shape. */
972 if (src1->pred
973 && !src1->pred->pred_next
974 && FORWARDER_BLOCK_P (src1))
976 e1 = src1->pred;
977 src1 = e1->src;
979 if (src2->pred
980 && !src2->pred->pred_next
981 && FORWARDER_BLOCK_P (src2))
983 e2 = src2->pred;
984 src2 = e2->src;
987 /* Nothing to do if we reach ENTRY, or a common source block. */
988 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
989 return false;
990 if (src1 == src2)
991 return false;
993 /* Seeing more than 1 forwarder blocks would confuse us later... */
994 if (FORWARDER_BLOCK_P (e1->dest)
995 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
996 return false;
997 if (FORWARDER_BLOCK_P (e2->dest)
998 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
999 return false;
1001 /* Likewise with dead code (possibly newly created by the other optimizations
1002 of cfg_cleanup). */
1003 if (!src1->pred || !src2->pred)
1004 return false;
1006 /* Look for the common insn sequence, part the first ... */
1007 if (!outgoing_edges_match (mode, src1, src2))
1008 return false;
1010 /* ... and part the second. */
1011 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1012 if (!nmatch)
1013 return false;
1015 /* Avoid splitting if possible. */
1016 if (newpos2 == src2->head)
1017 redirect_to = src2;
1018 else
1020 if (rtl_dump_file)
1021 fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n",
1022 src2->index, nmatch);
1023 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1026 if (rtl_dump_file)
1027 fprintf (rtl_dump_file,
1028 "Cross jumping from bb %i to bb %i; %i common insns\n",
1029 src1->index, src2->index, nmatch);
1031 redirect_to->count += src1->count;
1032 redirect_to->frequency += src1->frequency;
1034 /* Recompute the frequencies and counts of outgoing edges. */
1035 for (s = redirect_to->succ; s; s = s->succ_next)
1037 edge s2;
1038 basic_block d = s->dest;
1040 if (FORWARDER_BLOCK_P (d))
1041 d = d->succ->dest;
1042 for (s2 = src1->succ; ; s2 = s2->succ_next)
1044 basic_block d2 = s2->dest;
1045 if (FORWARDER_BLOCK_P (d2))
1046 d2 = d2->succ->dest;
1047 if (d == d2)
1048 break;
1050 s->count += s2->count;
1052 /* Take care to update possible forwarder blocks. We verified
1053 that there is no more than one in the chain, so we can't run
1054 into infinite loop. */
1055 if (FORWARDER_BLOCK_P (s->dest))
1057 s->dest->succ->count += s2->count;
1058 s->dest->count += s2->count;
1059 s->dest->frequency += EDGE_FREQUENCY (s);
1061 if (FORWARDER_BLOCK_P (s2->dest))
1063 s2->dest->succ->count -= s2->count;
1064 s2->dest->count -= s2->count;
1065 s2->dest->frequency -= EDGE_FREQUENCY (s);
1067 if (!redirect_to->frequency && !src1->frequency)
1068 s->probability = (s->probability + s2->probability) / 2;
1069 else
1070 s->probability =
1071 ((s->probability * redirect_to->frequency +
1072 s2->probability * src1->frequency)
1073 / (redirect_to->frequency + src1->frequency));
1076 note = find_reg_note (redirect_to->end, REG_BR_PROB, 0);
1077 if (note)
1078 XEXP (note, 0) = GEN_INT (BRANCH_EDGE (redirect_to)->probability);
1080 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1082 /* Skip possible basic block header. */
1083 if (GET_CODE (newpos1) == CODE_LABEL)
1084 newpos1 = NEXT_INSN (newpos1);
1085 if (GET_CODE (newpos1) == NOTE)
1086 newpos1 = NEXT_INSN (newpos1);
1087 last = src1->end;
1089 /* Emit the jump insn. */
1090 label = block_label (redirect_to);
1091 emit_jump_insn_after (gen_jump (label), src1->end);
1092 JUMP_LABEL (src1->end) = label;
1093 LABEL_NUSES (label)++;
1095 /* Delete the now unreachable instructions. */
1096 delete_insn_chain (newpos1, last);
1098 /* Make sure there is a barrier after the new jump. */
1099 last = next_nonnote_insn (src1->end);
1100 if (!last || GET_CODE (last) != BARRIER)
1101 emit_barrier_after (src1->end);
1103 /* Update CFG. */
1104 while (src1->succ)
1105 remove_edge (src1->succ);
1106 make_single_succ_edge (src1, redirect_to, 0);
1108 BB_SET_FLAG (src1, BB_UPDATE_LIFE);
1109 update_forwarder_flag (src1);
1111 return true;
1114 /* Search the predecessors of BB for common insn sequences. When found,
1115 share code between them by redirecting control flow. Return true if
1116 any changes made. */
1118 static bool
1119 try_crossjump_bb (mode, bb)
1120 int mode;
1121 basic_block bb;
1123 edge e, e2, nexte2, nexte, fallthru;
1124 bool changed;
1126 /* Nothing to do if there is not at least two incoming edges. */
1127 if (!bb->pred || !bb->pred->pred_next)
1128 return false;
1130 /* It is always cheapest to redirect a block that ends in a branch to
1131 a block that falls through into BB, as that adds no branches to the
1132 program. We'll try that combination first. */
1133 for (fallthru = bb->pred; fallthru; fallthru = fallthru->pred_next)
1134 if (fallthru->flags & EDGE_FALLTHRU)
1135 break;
1137 changed = false;
1138 for (e = bb->pred; e; e = nexte)
1140 nexte = e->pred_next;
1142 /* As noted above, first try with the fallthru predecessor. */
1143 if (fallthru)
1145 /* Don't combine the fallthru edge into anything else.
1146 If there is a match, we'll do it the other way around. */
1147 if (e == fallthru)
1148 continue;
1150 if (try_crossjump_to_edge (mode, e, fallthru))
1152 changed = true;
1153 nexte = bb->pred;
1154 continue;
1158 /* Non-obvious work limiting check: Recognize that we're going
1159 to call try_crossjump_bb on every basic block. So if we have
1160 two blocks with lots of outgoing edges (a switch) and they
1161 share lots of common destinations, then we would do the
1162 cross-jump check once for each common destination.
1164 Now, if the blocks actually are cross-jump candidates, then
1165 all of their destinations will be shared. Which means that
1166 we only need check them for cross-jump candidacy once. We
1167 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1168 choosing to do the check from the block for which the edge
1169 in question is the first successor of A. */
1170 if (e->src->succ != e)
1171 continue;
1173 for (e2 = bb->pred; e2; e2 = nexte2)
1175 nexte2 = e2->pred_next;
1177 if (e2 == e)
1178 continue;
1180 /* We've already checked the fallthru edge above. */
1181 if (e2 == fallthru)
1182 continue;
1184 /* The "first successor" check above only prevents multiple
1185 checks of crossjump(A,B). In order to prevent redundant
1186 checks of crossjump(B,A), require that A be the block
1187 with the lowest index. */
1188 if (e->src->index > e2->src->index)
1189 continue;
1191 if (try_crossjump_to_edge (mode, e, e2))
1193 changed = true;
1194 nexte = bb->pred;
1195 break;
1200 return changed;
1203 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1204 instructions etc. Return nonzero if changes were made. */
1206 static bool
1207 try_optimize_cfg (mode)
1208 int mode;
1210 int i;
1211 bool changed_overall = false;
1212 bool changed;
1213 int iterations = 0;
1214 sbitmap blocks;
1216 if (mode & CLEANUP_CROSSJUMP)
1217 add_noreturn_fake_exit_edges ();
1219 for (i = 0; i < n_basic_blocks; i++)
1220 update_forwarder_flag (BASIC_BLOCK (i));
1222 /* Attempt to merge blocks as made possible by edge removal. If a block
1223 has only one successor, and the successor has only one predecessor,
1224 they may be combined. */
1228 changed = false;
1229 iterations++;
1231 if (rtl_dump_file)
1232 fprintf (rtl_dump_file, "\n\ntry_optimize_cfg iteration %i\n\n",
1233 iterations);
1235 for (i = 0; i < n_basic_blocks;)
1237 basic_block c, b = BASIC_BLOCK (i);
1238 edge s;
1239 bool changed_here = false;
1241 /* Delete trivially dead basic blocks. */
1242 while (b->pred == NULL)
1244 c = BASIC_BLOCK (b->index - 1);
1245 if (rtl_dump_file)
1246 fprintf (rtl_dump_file, "Deleting block %i.\n", b->index);
1247 flow_delete_block (b);
1248 changed = true;
1249 b = c;
1252 /* Remove code labels no longer used. Don't do this before
1253 CALL_PLACEHOLDER is removed, as some branches may be hidden
1254 within. */
1255 if (b->pred->pred_next == NULL
1256 && (b->pred->flags & EDGE_FALLTHRU)
1257 && !(b->pred->flags & EDGE_COMPLEX)
1258 && GET_CODE (b->head) == CODE_LABEL
1259 && (!(mode & CLEANUP_PRE_SIBCALL)
1260 || !tail_recursion_label_p (b->head))
1261 /* If the previous block ends with a branch to this block,
1262 we can't delete the label. Normally this is a condjump
1263 that is yet to be simplified, but if CASE_DROPS_THRU,
1264 this can be a tablejump with some element going to the
1265 same place as the default (fallthru). */
1266 && (b->pred->src == ENTRY_BLOCK_PTR
1267 || GET_CODE (b->pred->src->end) != JUMP_INSN
1268 || ! label_is_jump_target_p (b->head, b->pred->src->end)))
1270 rtx label = b->head;
1271 b->head = NEXT_INSN (b->head);
1272 delete_insn_chain (label, label);
1273 if (rtl_dump_file)
1274 fprintf (rtl_dump_file, "Deleted label in block %i.\n",
1275 b->index);
1278 /* If we fall through an empty block, we can remove it. */
1279 if (b->pred->pred_next == NULL
1280 && (b->pred->flags & EDGE_FALLTHRU)
1281 && GET_CODE (b->head) != CODE_LABEL
1282 && FORWARDER_BLOCK_P (b)
1283 /* Note that forwarder_block_p true ensures that there
1284 is a successor for this block. */
1285 && (b->succ->flags & EDGE_FALLTHRU)
1286 && n_basic_blocks > 1)
1288 if (rtl_dump_file)
1289 fprintf (rtl_dump_file, "Deleting fallthru block %i.\n",
1290 b->index);
1291 c = BASIC_BLOCK (b->index ? b->index - 1 : 1);
1292 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1293 flow_delete_block (b);
1294 changed = true;
1295 b = c;
1298 /* Merge blocks. Loop because chains of blocks might be
1299 combineable. */
1300 while ((s = b->succ) != NULL
1301 && s->succ_next == NULL
1302 && !(s->flags & EDGE_COMPLEX)
1303 && (c = s->dest) != EXIT_BLOCK_PTR
1304 && c->pred->pred_next == NULL
1305 /* If the jump insn has side effects,
1306 we can't kill the edge. */
1307 && (GET_CODE (b->end) != JUMP_INSN
1308 || onlyjump_p (b->end))
1309 && merge_blocks (s, b, c, mode))
1310 changed_here = true;
1312 /* Simplify branch over branch. */
1313 if ((mode & CLEANUP_EXPENSIVE) && try_simplify_condjump (b))
1315 BB_SET_FLAG (b, BB_UPDATE_LIFE);
1316 changed_here = true;
1319 /* If B has a single outgoing edge, but uses a non-trivial jump
1320 instruction without side-effects, we can either delete the
1321 jump entirely, or replace it with a simple unconditional jump.
1322 Use redirect_edge_and_branch to do the dirty work. */
1323 if (b->succ
1324 && ! b->succ->succ_next
1325 && b->succ->dest != EXIT_BLOCK_PTR
1326 && onlyjump_p (b->end)
1327 && redirect_edge_and_branch (b->succ, b->succ->dest))
1329 BB_SET_FLAG (b, BB_UPDATE_LIFE);
1330 update_forwarder_flag (b);
1331 changed_here = true;
1334 /* Simplify branch to branch. */
1335 if (try_forward_edges (mode, b))
1336 changed_here = true;
1338 /* Look for shared code between blocks. */
1339 if ((mode & CLEANUP_CROSSJUMP)
1340 && try_crossjump_bb (mode, b))
1341 changed_here = true;
1343 /* Don't get confused by the index shift caused by deleting
1344 blocks. */
1345 if (!changed_here)
1346 i = b->index + 1;
1347 else
1348 changed = true;
1351 if ((mode & CLEANUP_CROSSJUMP)
1352 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1353 changed = true;
1355 #ifdef ENABLE_CHECKING
1356 if (changed)
1357 verify_flow_info ();
1358 #endif
1360 changed_overall |= changed;
1362 while (changed);
1364 if (mode & CLEANUP_CROSSJUMP)
1365 remove_fake_edges ();
1367 if ((mode & CLEANUP_UPDATE_LIFE) && changed_overall)
1369 bool found = 0;
1370 blocks = sbitmap_alloc (n_basic_blocks);
1371 sbitmap_zero (blocks);
1372 for (i = 0; i < n_basic_blocks; i++)
1373 if (BB_FLAGS (BASIC_BLOCK (i)) & BB_UPDATE_LIFE)
1375 found = 1;
1376 SET_BIT (blocks, i);
1378 if (found)
1379 update_life_info (blocks, UPDATE_LIFE_GLOBAL,
1380 PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE
1381 | PROP_KILL_DEAD_CODE);
1382 sbitmap_free (blocks);
1384 for (i = 0; i < n_basic_blocks; i++)
1385 BASIC_BLOCK (i)->aux = NULL;
1387 return changed_overall;
1390 /* Delete all unreachable basic blocks. */
1392 static bool
1393 delete_unreachable_blocks ()
1395 int i;
1396 bool changed = false;
1398 find_unreachable_blocks ();
1400 /* Delete all unreachable basic blocks. Count down so that we
1401 don't interfere with the block renumbering that happens in
1402 flow_delete_block. */
1404 for (i = n_basic_blocks - 1; i >= 0; --i)
1406 basic_block b = BASIC_BLOCK (i);
1408 if (!(b->flags & BB_REACHABLE))
1409 flow_delete_block (b), changed = true;
1412 if (changed)
1413 tidy_fallthru_edges ();
1414 return changed;
1417 /* Tidy the CFG by deleting unreachable code and whatnot. */
1419 bool
1420 cleanup_cfg (mode)
1421 int mode;
1423 bool changed = false;
1425 timevar_push (TV_CLEANUP_CFG);
1426 changed = delete_unreachable_blocks ();
1427 if (try_optimize_cfg (mode))
1428 delete_unreachable_blocks (), changed = true;
1430 /* Kill the data we won't maintain. */
1431 free_EXPR_LIST_list (&label_value_list);
1432 free_EXPR_LIST_list (&tail_recursion_label_list);
1433 timevar_pop (TV_CLEANUP_CFG);
1435 return changed;