* aclocal.m4 (libiberty_AC_FUNC_STRNCMP): Use anon mmap as 2nd try.
[official-gcc.git] / gcc / cfgcleanup.c
blob98030325f294b62f7620f8a6c31ea6e62431595f
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to it's
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
34 #include "config.h"
35 #include "system.h"
36 #include "rtl.h"
37 #include "hard-reg-set.h"
38 #include "basic-block.h"
39 #include "timevar.h"
40 #include "output.h"
41 #include "insn-config.h"
42 #include "flags.h"
43 #include "recog.h"
44 #include "toplev.h"
45 #include "cselib.h"
47 #include "obstack.h"
49 /* cleanup_cfg maintains following flags for each basic block. */
51 enum bb_flags
53 /* Set if life info needs to be recomputed for given BB. */
54 BB_UPDATE_LIFE = 1,
55 /* Set if BB is the forwarder block to avoid too many
56 forwarder_block_p calls. */
57 BB_FORWARDER_BLOCK = 2
60 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
61 #define BB_SET_FLAG(BB, FLAG) \
62 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
63 #define BB_CLEAR_FLAG(BB, FLAG) \
64 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
66 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
68 static bool try_crossjump_to_edge PARAMS ((int, edge, edge));
69 static bool try_crossjump_bb PARAMS ((int, basic_block));
70 static bool outgoing_edges_match PARAMS ((int,
71 basic_block, basic_block));
72 static int flow_find_cross_jump PARAMS ((int, basic_block, basic_block,
73 rtx *, rtx *));
74 static bool insns_match_p PARAMS ((int, rtx, rtx));
76 static bool delete_unreachable_blocks PARAMS ((void));
77 static bool label_is_jump_target_p PARAMS ((rtx, rtx));
78 static bool tail_recursion_label_p PARAMS ((rtx));
79 static void merge_blocks_move_predecessor_nojumps PARAMS ((basic_block,
80 basic_block));
81 static void merge_blocks_move_successor_nojumps PARAMS ((basic_block,
82 basic_block));
83 static bool merge_blocks PARAMS ((edge,basic_block,basic_block,
84 int));
85 static bool try_optimize_cfg PARAMS ((int));
86 static bool try_simplify_condjump PARAMS ((basic_block));
87 static bool try_forward_edges PARAMS ((int, basic_block));
88 static edge thread_jump PARAMS ((int, edge, basic_block));
89 static bool mark_effect PARAMS ((rtx, bitmap));
90 static void notice_new_block PARAMS ((basic_block));
91 static void update_forwarder_flag PARAMS ((basic_block));
93 /* Set flags for newly created block. */
95 static void
96 notice_new_block (bb)
97 basic_block bb;
99 if (!bb)
100 return;
102 BB_SET_FLAG (bb, BB_UPDATE_LIFE);
103 if (forwarder_block_p (bb))
104 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
107 /* Recompute forwarder flag after block has been modified. */
109 static void
110 update_forwarder_flag (bb)
111 basic_block bb;
113 if (forwarder_block_p (bb))
114 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
115 else
116 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
119 /* Simplify a conditional jump around an unconditional jump.
120 Return true if something changed. */
122 static bool
123 try_simplify_condjump (cbranch_block)
124 basic_block cbranch_block;
126 basic_block jump_block, jump_dest_block, cbranch_dest_block;
127 edge cbranch_jump_edge, cbranch_fallthru_edge;
128 rtx cbranch_insn;
130 /* Verify that there are exactly two successors. */
131 if (!cbranch_block->succ
132 || !cbranch_block->succ->succ_next
133 || cbranch_block->succ->succ_next->succ_next)
134 return false;
136 /* Verify that we've got a normal conditional branch at the end
137 of the block. */
138 cbranch_insn = cbranch_block->end;
139 if (!any_condjump_p (cbranch_insn))
140 return false;
142 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
143 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
145 /* The next block must not have multiple predecessors, must not
146 be the last block in the function, and must contain just the
147 unconditional jump. */
148 jump_block = cbranch_fallthru_edge->dest;
149 if (jump_block->pred->pred_next
150 || jump_block->index == n_basic_blocks - 1
151 || !FORWARDER_BLOCK_P (jump_block))
152 return false;
153 jump_dest_block = jump_block->succ->dest;
155 /* The conditional branch must target the block after the
156 unconditional branch. */
157 cbranch_dest_block = cbranch_jump_edge->dest;
159 if (!can_fallthru (jump_block, cbranch_dest_block))
160 return false;
162 /* Invert the conditional branch. */
163 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
164 return false;
166 if (rtl_dump_file)
167 fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n",
168 INSN_UID (cbranch_insn), INSN_UID (jump_block->end));
170 /* Success. Update the CFG to match. Note that after this point
171 the edge variable names appear backwards; the redirection is done
172 this way to preserve edge profile data. */
173 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
174 cbranch_dest_block);
175 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
176 jump_dest_block);
177 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
178 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
180 /* Delete the block with the unconditional jump, and clean up the mess. */
181 flow_delete_block (jump_block);
182 tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block);
184 return true;
187 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
188 on register. Used by jump threading. */
190 static bool
191 mark_effect (exp, nonequal)
192 rtx exp;
193 regset nonequal;
195 switch (GET_CODE (exp))
197 /* In case we do clobber the register, mark it as equal, as we know the
198 value is dead so it don't have to match. */
199 case CLOBBER:
200 if (REG_P (XEXP (exp, 0)))
201 CLEAR_REGNO_REG_SET (nonequal, REGNO (XEXP (exp, 0)));
202 return false;
204 case SET:
205 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
206 return false;
207 if (GET_CODE (SET_SRC (exp)) != REG)
208 return true;
209 SET_REGNO_REG_SET (nonequal, REGNO (SET_SRC (exp)));
210 return false;
212 default:
213 return false;
216 /* Attempt to prove that the basic block B will have no side effects and
217 allways continues in the same edge if reached via E. Return the edge
218 if exist, NULL otherwise. */
220 static edge
221 thread_jump (mode, e, b)
222 int mode;
223 edge e;
224 basic_block b;
226 rtx set1, set2, cond1, cond2, insn;
227 enum rtx_code code1, code2, reversed_code2;
228 bool reverse1 = false;
229 int i;
230 regset nonequal;
231 bool failed = false;
233 /* At the moment, we do handle only conditional jumps, but later we may
234 want to extend this code to tablejumps and others. */
235 if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
236 return NULL;
237 if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
238 return NULL;
240 /* Second branch must end with onlyjump, as we will eliminate the jump. */
241 if (!any_condjump_p (e->src->end) || !any_condjump_p (b->end)
242 || !onlyjump_p (b->end))
243 return NULL;
245 set1 = pc_set (e->src->end);
246 set2 = pc_set (b->end);
247 if (((e->flags & EDGE_FALLTHRU) != 0)
248 != (XEXP (SET_SRC (set1), 0) == pc_rtx))
249 reverse1 = true;
251 cond1 = XEXP (SET_SRC (set1), 0);
252 cond2 = XEXP (SET_SRC (set2), 0);
253 if (reverse1)
254 code1 = reversed_comparison_code (cond1, b->end);
255 else
256 code1 = GET_CODE (cond1);
258 code2 = GET_CODE (cond2);
259 reversed_code2 = reversed_comparison_code (cond2, b->end);
261 if (!comparison_dominates_p (code1, code2)
262 && !comparison_dominates_p (code1, reversed_code2))
263 return NULL;
265 /* Ensure that the comparison operators are equivalent.
266 ??? This is far too pesimistic. We should allow swapped operands,
267 different CCmodes, or for example comparisons for interval, that
268 dominate even when operands are not equivalent. */
269 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
270 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
271 return NULL;
273 /* Short circuit cases where block B contains some side effects, as we can't
274 safely bypass it. */
275 for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end);
276 insn = NEXT_INSN (insn))
277 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
278 return NULL;
280 cselib_init ();
282 /* First process all values computed in the source basic block. */
283 for (insn = NEXT_INSN (e->src->head); insn != NEXT_INSN (e->src->end);
284 insn = NEXT_INSN (insn))
285 if (INSN_P (insn))
286 cselib_process_insn (insn);
288 nonequal = BITMAP_XMALLOC();
289 CLEAR_REG_SET (nonequal);
291 /* Now assume that we've continued by the edge E to B and continue
292 processing as if it were same basic block.
293 Our goal is to prove that whole block is an NOOP. */
295 for (insn = NEXT_INSN (b->head); insn != b->end && !failed;
296 insn = NEXT_INSN (insn))
298 if (INSN_P (insn))
300 rtx pat = PATTERN (insn);
302 if (GET_CODE (pat) == PARALLEL)
304 for (i = 0; i < XVECLEN (pat, 0); i++)
305 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
307 else
308 failed |= mark_effect (pat, nonequal);
311 cselib_process_insn (insn);
314 /* Later we should clear nonequal of dead registers. So far we don't
315 have life information in cfg_cleanup. */
316 if (failed)
317 goto failed_exit;
319 /* In case liveness information is available, we need to prove equivalence
320 only of the live values. */
321 if (mode & CLEANUP_UPDATE_LIFE)
322 AND_REG_SET (nonequal, b->global_live_at_end);
324 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
326 BITMAP_XFREE (nonequal);
327 cselib_finish ();
328 if ((comparison_dominates_p (code1, code2) != 0)
329 != (XEXP (SET_SRC (set2), 0) == pc_rtx))
330 return BRANCH_EDGE (b);
331 else
332 return FALLTHRU_EDGE (b);
334 failed_exit:
335 BITMAP_XFREE (nonequal);
336 cselib_finish ();
337 return NULL;
340 /* Attempt to forward edges leaving basic block B.
341 Return true if successful. */
343 static bool
344 try_forward_edges (mode, b)
345 basic_block b;
346 int mode;
348 bool changed = false;
349 edge e, next, threaded_edge;
351 for (e = b->succ; e; e = next)
353 basic_block target, first;
354 int counter;
355 bool threaded = false;
357 next = e->succ_next;
359 /* Skip complex edges because we don't know how to update them.
361 Still handle fallthru edges, as we can succeed to forward fallthru
362 edge to the same place as the branch edge of conditional branch
363 and turn conditional branch to an unconditional branch. */
364 if (e->flags & EDGE_COMPLEX)
365 continue;
367 target = first = e->dest;
368 counter = 0;
370 while (counter < n_basic_blocks)
372 basic_block new_target = NULL;
373 bool new_target_threaded = false;
375 if (FORWARDER_BLOCK_P (target)
376 && target->succ->dest != EXIT_BLOCK_PTR)
378 /* Bypass trivial infinite loops. */
379 if (target == target->succ->dest)
380 counter = n_basic_blocks;
381 new_target = target->succ->dest;
384 /* Allow to thread only over one edge at time to simplify updating
385 of probabilities. */
386 else if ((mode & CLEANUP_THREADING) && !threaded)
388 threaded_edge = thread_jump (mode, e, target);
389 if (threaded_edge)
391 new_target = threaded_edge->dest;
392 new_target_threaded = true;
396 if (!new_target)
397 break;
399 /* Avoid killing of loop pre-headers, as it is the place loop
400 optimizer wants to hoist code to.
402 For fallthru forwarders, the LOOP_BEG note must appear between
403 the header of block and CODE_LABEL of the loop, for non forwarders
404 it must appear before the JUMP_INSN. */
405 if (mode & CLEANUP_PRE_LOOP)
407 rtx insn = (target->succ->flags & EDGE_FALLTHRU
408 ? target->head : prev_nonnote_insn (target->end));
410 if (GET_CODE (insn) != NOTE)
411 insn = NEXT_INSN (insn);
413 for (; insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
414 insn = NEXT_INSN (insn))
415 if (GET_CODE (insn) == NOTE
416 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
417 break;
419 if (GET_CODE (insn) == NOTE)
420 break;
423 counter++;
424 target = new_target;
425 threaded |= new_target_threaded;
428 if (counter >= n_basic_blocks)
430 if (rtl_dump_file)
431 fprintf (rtl_dump_file, "Infinite loop in BB %i.\n",
432 target->index);
434 else if (target == first)
435 ; /* We didn't do anything. */
436 else
438 /* Save the values now, as the edge may get removed. */
439 gcov_type edge_count = e->count;
440 int edge_probability = e->probability;
441 int edge_frequency;
443 /* Don't force if target is exit block. */
444 if (threaded && target != EXIT_BLOCK_PTR)
446 notice_new_block (redirect_edge_and_branch_force (e, target));
447 if (rtl_dump_file)
448 fprintf (rtl_dump_file, "Conditionals threaded.\n");
450 else if (!redirect_edge_and_branch (e, target))
452 if (rtl_dump_file)
453 fprintf (rtl_dump_file,
454 "Forwarding edge %i->%i to %i failed.\n",
455 b->index, e->dest->index, target->index);
456 continue;
459 /* We successfully forwarded the edge. Now update profile
460 data: for each edge we traversed in the chain, remove
461 the original edge's execution count. */
462 edge_frequency = ((edge_probability * b->frequency
463 + REG_BR_PROB_BASE / 2)
464 / REG_BR_PROB_BASE);
466 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
467 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
468 BB_SET_FLAG (b, BB_UPDATE_LIFE);
472 edge t;
474 first->count -= edge_count;
475 first->succ->count -= edge_count;
476 first->frequency -= edge_frequency;
477 if (first->succ->succ_next)
478 t = threaded_edge;
479 else
480 t = first->succ;
482 first = t->dest;
484 while (first != target);
486 changed = true;
490 return changed;
493 /* Return true if LABEL is a target of JUMP_INSN. This applies only
494 to non-complex jumps. That is, direct unconditional, conditional,
495 and tablejumps, but not computed jumps or returns. It also does
496 not apply to the fallthru case of a conditional jump. */
498 static bool
499 label_is_jump_target_p (label, jump_insn)
500 rtx label, jump_insn;
502 rtx tmp = JUMP_LABEL (jump_insn);
504 if (label == tmp)
505 return true;
507 if (tmp != NULL_RTX
508 && (tmp = NEXT_INSN (tmp)) != NULL_RTX
509 && GET_CODE (tmp) == JUMP_INSN
510 && (tmp = PATTERN (tmp),
511 GET_CODE (tmp) == ADDR_VEC
512 || GET_CODE (tmp) == ADDR_DIFF_VEC))
514 rtvec vec = XVEC (tmp, GET_CODE (tmp) == ADDR_DIFF_VEC);
515 int i, veclen = GET_NUM_ELEM (vec);
517 for (i = 0; i < veclen; ++i)
518 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
519 return true;
522 return false;
525 /* Return true if LABEL is used for tail recursion. */
527 static bool
528 tail_recursion_label_p (label)
529 rtx label;
531 rtx x;
533 for (x = tail_recursion_label_list; x; x = XEXP (x, 1))
534 if (label == XEXP (x, 0))
535 return true;
537 return false;
540 /* Blocks A and B are to be merged into a single block. A has no incoming
541 fallthru edge, so it can be moved before B without adding or modifying
542 any jumps (aside from the jump from A to B). */
544 static void
545 merge_blocks_move_predecessor_nojumps (a, b)
546 basic_block a, b;
548 rtx barrier;
549 int index;
551 barrier = next_nonnote_insn (a->end);
552 if (GET_CODE (barrier) != BARRIER)
553 abort ();
554 delete_insn (barrier);
556 /* Move block and loop notes out of the chain so that we do not
557 disturb their order.
559 ??? A better solution would be to squeeze out all the non-nested notes
560 and adjust the block trees appropriately. Even better would be to have
561 a tighter connection between block trees and rtl so that this is not
562 necessary. */
563 if (squeeze_notes (&a->head, &a->end))
564 abort ();
566 /* Scramble the insn chain. */
567 if (a->end != PREV_INSN (b->head))
568 reorder_insns_nobb (a->head, a->end, PREV_INSN (b->head));
569 BB_SET_FLAG (a, BB_UPDATE_LIFE);
571 if (rtl_dump_file)
572 fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
573 a->index, b->index);
575 /* Swap the records for the two blocks around. Although we are deleting B,
576 A is now where B was and we want to compact the BB array from where
577 A used to be. */
578 BASIC_BLOCK (a->index) = b;
579 BASIC_BLOCK (b->index) = a;
580 index = a->index;
581 a->index = b->index;
582 b->index = index;
584 /* Now blocks A and B are contiguous. Merge them. */
585 merge_blocks_nomove (a, b);
588 /* Blocks A and B are to be merged into a single block. B has no outgoing
589 fallthru edge, so it can be moved after A without adding or modifying
590 any jumps (aside from the jump from A to B). */
592 static void
593 merge_blocks_move_successor_nojumps (a, b)
594 basic_block a, b;
596 rtx barrier, real_b_end;
598 real_b_end = b->end;
599 barrier = NEXT_INSN (b->end);
601 /* Recognize a jump table following block B. */
602 if (barrier
603 && GET_CODE (barrier) == CODE_LABEL
604 && NEXT_INSN (barrier)
605 && GET_CODE (NEXT_INSN (barrier)) == JUMP_INSN
606 && (GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_VEC
607 || GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_DIFF_VEC))
609 /* Temporarily add the table jump insn to b, so that it will also
610 be moved to the correct location. */
611 b->end = NEXT_INSN (barrier);
612 barrier = NEXT_INSN (b->end);
615 /* There had better have been a barrier there. Delete it. */
616 if (barrier && GET_CODE (barrier) == BARRIER)
617 delete_insn (barrier);
619 /* Move block and loop notes out of the chain so that we do not
620 disturb their order.
622 ??? A better solution would be to squeeze out all the non-nested notes
623 and adjust the block trees appropriately. Even better would be to have
624 a tighter connection between block trees and rtl so that this is not
625 necessary. */
626 if (squeeze_notes (&b->head, &b->end))
627 abort ();
629 /* Scramble the insn chain. */
630 reorder_insns_nobb (b->head, b->end, a->end);
632 /* Restore the real end of b. */
633 b->end = real_b_end;
635 /* Now blocks A and B are contiguous. Merge them. */
636 merge_blocks_nomove (a, b);
637 BB_SET_FLAG (a, BB_UPDATE_LIFE);
639 if (rtl_dump_file)
640 fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
641 b->index, a->index);
644 /* Attempt to merge basic blocks that are potentially non-adjacent.
645 Return true iff the attempt succeeded. */
647 static bool
648 merge_blocks (e, b, c, mode)
649 edge e;
650 basic_block b, c;
651 int mode;
653 /* If C has a tail recursion label, do not merge. There is no
654 edge recorded from the call_placeholder back to this label, as
655 that would make optimize_sibling_and_tail_recursive_calls more
656 complex for no gain. */
657 if ((mode & CLEANUP_PRE_SIBCALL)
658 && GET_CODE (c->head) == CODE_LABEL
659 && tail_recursion_label_p (c->head))
660 return false;
662 /* If B has a fallthru edge to C, no need to move anything. */
663 if (e->flags & EDGE_FALLTHRU)
665 /* We need to update liveness in case C already has broken liveness
666 or B ends by conditional jump to next instructions that will be
667 removed. */
668 if ((BB_FLAGS (c) & BB_UPDATE_LIFE)
669 || GET_CODE (b->end) == JUMP_INSN)
670 BB_SET_FLAG (b, BB_UPDATE_LIFE);
671 merge_blocks_nomove (b, c);
672 update_forwarder_flag (b);
674 if (rtl_dump_file)
675 fprintf (rtl_dump_file, "Merged %d and %d without moving.\n",
676 b->index, c->index);
678 return true;
681 /* Otherwise we will need to move code around. Do that only if expensive
682 transformations are allowed. */
683 else if (mode & CLEANUP_EXPENSIVE)
685 edge tmp_edge, b_fallthru_edge;
686 bool c_has_outgoing_fallthru;
687 bool b_has_incoming_fallthru;
689 /* Avoid overactive code motion, as the forwarder blocks should be
690 eliminated by edge redirection instead. One exception might have
691 been if B is a forwarder block and C has no fallthru edge, but
692 that should be cleaned up by bb-reorder instead. */
693 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
694 return false;
696 /* We must make sure to not munge nesting of lexical blocks,
697 and loop notes. This is done by squeezing out all the notes
698 and leaving them there to lie. Not ideal, but functional. */
700 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
701 if (tmp_edge->flags & EDGE_FALLTHRU)
702 break;
704 c_has_outgoing_fallthru = (tmp_edge != NULL);
706 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
707 if (tmp_edge->flags & EDGE_FALLTHRU)
708 break;
710 b_has_incoming_fallthru = (tmp_edge != NULL);
711 b_fallthru_edge = tmp_edge;
713 /* Otherwise, we're going to try to move C after B. If C does
714 not have an outgoing fallthru, then it can be moved
715 immediately after B without introducing or modifying jumps. */
716 if (! c_has_outgoing_fallthru)
718 merge_blocks_move_successor_nojumps (b, c);
719 return true;
722 /* If B does not have an incoming fallthru, then it can be moved
723 immediately before C without introducing or modifying jumps.
724 C cannot be the first block, so we do not have to worry about
725 accessing a non-existent block. */
727 if (b_has_incoming_fallthru)
729 basic_block bb;
731 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
732 return false;
733 bb = force_nonfallthru (b_fallthru_edge);
734 if (bb)
735 notice_new_block (bb);
736 else
737 BB_SET_FLAG (b_fallthru_edge->src, BB_UPDATE_LIFE);
740 merge_blocks_move_predecessor_nojumps (b, c);
741 return true;
744 return false;
748 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
750 static bool
751 insns_match_p (mode, i1, i2)
752 int mode ATTRIBUTE_UNUSED;
753 rtx i1, i2;
755 rtx p1, p2;
757 /* Verify that I1 and I2 are equivalent. */
758 if (GET_CODE (i1) != GET_CODE (i2))
759 return false;
761 p1 = PATTERN (i1);
762 p2 = PATTERN (i2);
764 if (GET_CODE (p1) != GET_CODE (p2))
765 return false;
767 /* If this is a CALL_INSN, compare register usage information.
768 If we don't check this on stack register machines, the two
769 CALL_INSNs might be merged leaving reg-stack.c with mismatching
770 numbers of stack registers in the same basic block.
771 If we don't check this on machines with delay slots, a delay slot may
772 be filled that clobbers a parameter expected by the subroutine.
774 ??? We take the simple route for now and assume that if they're
775 equal, they were constructed identically. */
777 if (GET_CODE (i1) == CALL_INSN
778 && !rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
779 CALL_INSN_FUNCTION_USAGE (i2)))
780 return false;
782 #ifdef STACK_REGS
783 /* If cross_jump_death_matters is not 0, the insn's mode
784 indicates whether or not the insn contains any stack-like
785 regs. */
787 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
789 /* If register stack conversion has already been done, then
790 death notes must also be compared before it is certain that
791 the two instruction streams match. */
793 rtx note;
794 HARD_REG_SET i1_regset, i2_regset;
796 CLEAR_HARD_REG_SET (i1_regset);
797 CLEAR_HARD_REG_SET (i2_regset);
799 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
800 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
801 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
803 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
804 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
805 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
807 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
809 return false;
811 done:
814 #endif
816 if (reload_completed
817 ? ! rtx_renumbered_equal_p (p1, p2) : ! rtx_equal_p (p1, p2))
819 /* The following code helps take care of G++ cleanups. */
820 rtx equiv1 = find_reg_equal_equiv_note (i1);
821 rtx equiv2 = find_reg_equal_equiv_note (i2);
823 if (equiv1 && equiv2
824 /* If the equivalences are not to a constant, they may
825 reference pseudos that no longer exist, so we can't
826 use them. */
827 && (! reload_completed
828 || (CONSTANT_P (XEXP (equiv1, 0))
829 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
831 rtx s1 = single_set (i1);
832 rtx s2 = single_set (i2);
833 if (s1 != 0 && s2 != 0
834 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
836 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
837 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
838 if (! rtx_renumbered_equal_p (p1, p2))
839 cancel_changes (0);
840 else if (apply_change_group ())
841 return true;
845 return false;
848 return true;
851 /* Look through the insns at the end of BB1 and BB2 and find the longest
852 sequence that are equivalent. Store the first insns for that sequence
853 in *F1 and *F2 and return the sequence length.
855 To simplify callers of this function, if the blocks match exactly,
856 store the head of the blocks in *F1 and *F2. */
858 static int
859 flow_find_cross_jump (mode, bb1, bb2, f1, f2)
860 int mode ATTRIBUTE_UNUSED;
861 basic_block bb1, bb2;
862 rtx *f1, *f2;
864 rtx i1, i2, last1, last2, afterlast1, afterlast2;
865 int ninsns = 0;
867 /* Skip simple jumps at the end of the blocks. Complex jumps still
868 need to be compared for equivalence, which we'll do below. */
870 i1 = bb1->end;
871 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
872 if (onlyjump_p (i1)
873 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
875 last1 = i1;
876 i1 = PREV_INSN (i1);
879 i2 = bb2->end;
880 if (onlyjump_p (i2)
881 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
883 last2 = i2;
884 /* Count everything except for unconditional jump as insn. */
885 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
886 ninsns++;
887 i2 = PREV_INSN (i2);
890 while (true)
892 /* Ignore notes. */
893 while (!active_insn_p (i1) && i1 != bb1->head)
894 i1 = PREV_INSN (i1);
896 while (!active_insn_p (i2) && i2 != bb2->head)
897 i2 = PREV_INSN (i2);
899 if (i1 == bb1->head || i2 == bb2->head)
900 break;
902 if (!insns_match_p (mode, i1, i2))
903 break;
905 /* Don't begin a cross-jump with a USE or CLOBBER insn. */
906 if (active_insn_p (i1))
908 /* If the merged insns have different REG_EQUAL notes, then
909 remove them. */
910 rtx equiv1 = find_reg_equal_equiv_note (i1);
911 rtx equiv2 = find_reg_equal_equiv_note (i2);
913 if (equiv1 && !equiv2)
914 remove_note (i1, equiv1);
915 else if (!equiv1 && equiv2)
916 remove_note (i2, equiv2);
917 else if (equiv1 && equiv2
918 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
920 remove_note (i1, equiv1);
921 remove_note (i2, equiv2);
924 afterlast1 = last1, afterlast2 = last2;
925 last1 = i1, last2 = i2;
926 ninsns++;
929 i1 = PREV_INSN (i1);
930 i2 = PREV_INSN (i2);
933 #ifdef HAVE_cc0
934 /* Don't allow the insn after a compare to be shared by
935 cross-jumping unless the compare is also shared. */
936 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
937 last1 = afterlast1, last2 = afterlast2, ninsns--;
938 #endif
940 /* Include preceding notes and labels in the cross-jump. One,
941 this may bring us to the head of the blocks as requested above.
942 Two, it keeps line number notes as matched as may be. */
943 if (ninsns)
945 while (last1 != bb1->head && !active_insn_p (PREV_INSN (last1)))
946 last1 = PREV_INSN (last1);
948 if (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
949 last1 = PREV_INSN (last1);
951 while (last2 != bb2->head && !active_insn_p (PREV_INSN (last2)))
952 last2 = PREV_INSN (last2);
954 if (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
955 last2 = PREV_INSN (last2);
957 *f1 = last1;
958 *f2 = last2;
961 return ninsns;
964 /* Return true iff outgoing edges of BB1 and BB2 match, together with
965 the branch instruction. This means that if we commonize the control
966 flow before end of the basic block, the semantic remains unchanged.
968 We may assume that there exists one edge with a common destination. */
970 static bool
971 outgoing_edges_match (mode, bb1, bb2)
972 int mode;
973 basic_block bb1;
974 basic_block bb2;
976 int nehedges1 = 0, nehedges2 = 0;
977 edge fallthru1 = 0, fallthru2 = 0;
978 edge e1, e2;
980 /* If BB1 has only one successor, we may be looking at either an
981 unconditional jump, or a fake edge to exit. */
982 if (bb1->succ && !bb1->succ->succ_next
983 && !(bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)))
984 return (bb2->succ && !bb2->succ->succ_next
985 && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0);
987 /* Match conditional jumps - this may get tricky when fallthru and branch
988 edges are crossed. */
989 if (bb1->succ
990 && bb1->succ->succ_next
991 && !bb1->succ->succ_next->succ_next
992 && any_condjump_p (bb1->end)
993 && onlyjump_p (bb1->end))
995 edge b1, f1, b2, f2;
996 bool reverse, match;
997 rtx set1, set2, cond1, cond2;
998 enum rtx_code code1, code2;
1000 if (!bb2->succ
1001 || !bb2->succ->succ_next
1002 || bb1->succ->succ_next->succ_next
1003 || !any_condjump_p (bb2->end)
1004 || !onlyjump_p (bb1->end))
1005 return false;
1007 b1 = BRANCH_EDGE (bb1);
1008 b2 = BRANCH_EDGE (bb2);
1009 f1 = FALLTHRU_EDGE (bb1);
1010 f2 = FALLTHRU_EDGE (bb2);
1012 /* Get around possible forwarders on fallthru edges. Other cases
1013 should be optimized out already. */
1014 if (FORWARDER_BLOCK_P (f1->dest))
1015 f1 = f1->dest->succ;
1017 if (FORWARDER_BLOCK_P (f2->dest))
1018 f2 = f2->dest->succ;
1020 /* To simplify use of this function, return false if there are
1021 unneeded forwarder blocks. These will get eliminated later
1022 during cleanup_cfg. */
1023 if (FORWARDER_BLOCK_P (f1->dest)
1024 || FORWARDER_BLOCK_P (f2->dest)
1025 || FORWARDER_BLOCK_P (b1->dest)
1026 || FORWARDER_BLOCK_P (b2->dest))
1027 return false;
1029 if (f1->dest == f2->dest && b1->dest == b2->dest)
1030 reverse = false;
1031 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1032 reverse = true;
1033 else
1034 return false;
1036 set1 = pc_set (bb1->end);
1037 set2 = pc_set (bb2->end);
1038 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1039 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1040 reverse = !reverse;
1042 cond1 = XEXP (SET_SRC (set1), 0);
1043 cond2 = XEXP (SET_SRC (set2), 0);
1044 code1 = GET_CODE (cond1);
1045 if (reverse)
1046 code2 = reversed_comparison_code (cond2, bb2->end);
1047 else
1048 code2 = GET_CODE (cond2);
1050 if (code2 == UNKNOWN)
1051 return false;
1053 /* Verify codes and operands match. */
1054 match = ((code1 == code2
1055 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1056 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1057 || (code1 == swap_condition (code2)
1058 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1059 XEXP (cond2, 0))
1060 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1061 XEXP (cond2, 1))));
1063 /* If we return true, we will join the blocks. Which means that
1064 we will only have one branch prediction bit to work with. Thus
1065 we require the existing branches to have probabilities that are
1066 roughly similar. */
1067 /* ??? We should use bb->frequency to allow merging in infrequently
1068 executed blocks, but at the moment it is not available when
1069 cleanup_cfg is run. */
1070 if (match && !optimize_size)
1072 rtx note1, note2;
1073 int prob1, prob2;
1075 note1 = find_reg_note (bb1->end, REG_BR_PROB, 0);
1076 note2 = find_reg_note (bb2->end, REG_BR_PROB, 0);
1078 if (note1 && note2)
1080 prob1 = INTVAL (XEXP (note1, 0));
1081 prob2 = INTVAL (XEXP (note2, 0));
1082 if (reverse)
1083 prob2 = REG_BR_PROB_BASE - prob2;
1085 /* Fail if the difference in probabilities is
1086 greater than 5%. */
1087 if (abs (prob1 - prob2) > REG_BR_PROB_BASE / 20)
1088 return false;
1091 else if (note1 || note2)
1092 return false;
1095 if (rtl_dump_file && match)
1096 fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n",
1097 bb1->index, bb2->index);
1099 return match;
1102 /* Generic case - we are seeing an computed jump, table jump or trapping
1103 instruction. */
1105 /* First ensure that the instructions match. There may be many outgoing
1106 edges so this test is generally cheaper.
1107 ??? Currently the tablejumps will never match, as they do have
1108 different tables. */
1109 if (!insns_match_p (mode, bb1->end, bb2->end))
1110 return false;
1112 /* Search the outgoing edges, ensure that the counts do match, find possible
1113 fallthru and exception handling edges since these needs more
1114 validation. */
1115 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
1116 e1 = e1->succ_next, e2 = e2->succ_next)
1118 if (e1->flags & EDGE_EH)
1119 nehedges1++;
1121 if (e2->flags & EDGE_EH)
1122 nehedges2++;
1124 if (e1->flags & EDGE_FALLTHRU)
1125 fallthru1 = e1;
1126 if (e2->flags & EDGE_FALLTHRU)
1127 fallthru2 = e2;
1130 /* If number of edges of various types does not match, fail. */
1131 if (e1 || e2
1132 || nehedges1 != nehedges2
1133 || (fallthru1 != 0) != (fallthru2 != 0))
1134 return false;
1136 /* fallthru edges must be forwarded to the same destination. */
1137 if (fallthru1)
1139 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1140 ? fallthru1->dest->succ->dest: fallthru1->dest);
1141 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1142 ? fallthru2->dest->succ->dest: fallthru2->dest);
1144 if (d1 != d2)
1145 return false;
1148 /* In case we do have EH edges, ensure we are in the same region. */
1149 if (nehedges1)
1151 rtx n1 = find_reg_note (bb1->end, REG_EH_REGION, 0);
1152 rtx n2 = find_reg_note (bb2->end, REG_EH_REGION, 0);
1154 if (XEXP (n1, 0) != XEXP (n2, 0))
1155 return false;
1158 /* We don't need to match the rest of edges as above checks should be enought
1159 to ensure that they are equivalent. */
1160 return true;
1163 /* E1 and E2 are edges with the same destination block. Search their
1164 predecessors for common code. If found, redirect control flow from
1165 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1167 static bool
1168 try_crossjump_to_edge (mode, e1, e2)
1169 int mode;
1170 edge e1, e2;
1172 int nmatch;
1173 basic_block src1 = e1->src, src2 = e2->src;
1174 basic_block redirect_to;
1175 rtx newpos1, newpos2;
1176 edge s;
1177 rtx last;
1178 rtx label;
1179 rtx note;
1181 /* Search backward through forwarder blocks. We don't need to worry
1182 about multiple entry or chained forwarders, as they will be optimized
1183 away. We do this to look past the unconditional jump following a
1184 conditional jump that is required due to the current CFG shape. */
1185 if (src1->pred
1186 && !src1->pred->pred_next
1187 && FORWARDER_BLOCK_P (src1))
1188 e1 = src1->pred, src1 = e1->src;
1190 if (src2->pred
1191 && !src2->pred->pred_next
1192 && FORWARDER_BLOCK_P (src2))
1193 e2 = src2->pred, src2 = e2->src;
1195 /* Nothing to do if we reach ENTRY, or a common source block. */
1196 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1197 return false;
1198 if (src1 == src2)
1199 return false;
1201 /* Seeing more than 1 forwarder blocks would confuse us later... */
1202 if (FORWARDER_BLOCK_P (e1->dest)
1203 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
1204 return false;
1206 if (FORWARDER_BLOCK_P (e2->dest)
1207 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
1208 return false;
1210 /* Likewise with dead code (possibly newly created by the other optimizations
1211 of cfg_cleanup). */
1212 if (!src1->pred || !src2->pred)
1213 return false;
1215 /* Look for the common insn sequence, part the first ... */
1216 if (!outgoing_edges_match (mode, src1, src2))
1217 return false;
1219 /* ... and part the second. */
1220 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1221 if (!nmatch)
1222 return false;
1224 /* Avoid splitting if possible. */
1225 if (newpos2 == src2->head)
1226 redirect_to = src2;
1227 else
1229 if (rtl_dump_file)
1230 fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n",
1231 src2->index, nmatch);
1232 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1235 if (rtl_dump_file)
1236 fprintf (rtl_dump_file,
1237 "Cross jumping from bb %i to bb %i; %i common insns\n",
1238 src1->index, src2->index, nmatch);
1240 redirect_to->count += src1->count;
1241 redirect_to->frequency += src1->frequency;
1243 /* Recompute the frequencies and counts of outgoing edges. */
1244 for (s = redirect_to->succ; s; s = s->succ_next)
1246 edge s2;
1247 basic_block d = s->dest;
1249 if (FORWARDER_BLOCK_P (d))
1250 d = d->succ->dest;
1252 for (s2 = src1->succ; ; s2 = s2->succ_next)
1254 basic_block d2 = s2->dest;
1255 if (FORWARDER_BLOCK_P (d2))
1256 d2 = d2->succ->dest;
1257 if (d == d2)
1258 break;
1261 s->count += s2->count;
1263 /* Take care to update possible forwarder blocks. We verified
1264 that there is no more than one in the chain, so we can't run
1265 into infinite loop. */
1266 if (FORWARDER_BLOCK_P (s->dest))
1268 s->dest->succ->count += s2->count;
1269 s->dest->count += s2->count;
1270 s->dest->frequency += EDGE_FREQUENCY (s);
1273 if (FORWARDER_BLOCK_P (s2->dest))
1275 s2->dest->succ->count -= s2->count;
1276 s2->dest->count -= s2->count;
1277 s2->dest->frequency -= EDGE_FREQUENCY (s);
1280 if (!redirect_to->frequency && !src1->frequency)
1281 s->probability = (s->probability + s2->probability) / 2;
1282 else
1283 s->probability
1284 = ((s->probability * redirect_to->frequency +
1285 s2->probability * src1->frequency)
1286 / (redirect_to->frequency + src1->frequency));
1289 note = find_reg_note (redirect_to->end, REG_BR_PROB, 0);
1290 if (note)
1291 XEXP (note, 0) = GEN_INT (BRANCH_EDGE (redirect_to)->probability);
1293 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1295 /* Skip possible basic block header. */
1296 if (GET_CODE (newpos1) == CODE_LABEL)
1297 newpos1 = NEXT_INSN (newpos1);
1299 if (GET_CODE (newpos1) == NOTE)
1300 newpos1 = NEXT_INSN (newpos1);
1301 last = src1->end;
1303 /* Emit the jump insn. */
1304 label = block_label (redirect_to);
1305 emit_jump_insn_after (gen_jump (label), src1->end);
1306 JUMP_LABEL (src1->end) = label;
1307 LABEL_NUSES (label)++;
1309 /* Delete the now unreachable instructions. */
1310 delete_insn_chain (newpos1, last);
1312 /* Make sure there is a barrier after the new jump. */
1313 last = next_nonnote_insn (src1->end);
1314 if (!last || GET_CODE (last) != BARRIER)
1315 emit_barrier_after (src1->end);
1317 /* Update CFG. */
1318 while (src1->succ)
1319 remove_edge (src1->succ);
1320 make_single_succ_edge (src1, redirect_to, 0);
1322 BB_SET_FLAG (src1, BB_UPDATE_LIFE);
1323 update_forwarder_flag (src1);
1325 return true;
1328 /* Search the predecessors of BB for common insn sequences. When found,
1329 share code between them by redirecting control flow. Return true if
1330 any changes made. */
1332 static bool
1333 try_crossjump_bb (mode, bb)
1334 int mode;
1335 basic_block bb;
1337 edge e, e2, nexte2, nexte, fallthru;
1338 bool changed;
1340 /* Nothing to do if there is not at least two incoming edges. */
1341 if (!bb->pred || !bb->pred->pred_next)
1342 return false;
1344 /* It is always cheapest to redirect a block that ends in a branch to
1345 a block that falls through into BB, as that adds no branches to the
1346 program. We'll try that combination first. */
1347 for (fallthru = bb->pred; fallthru; fallthru = fallthru->pred_next)
1348 if (fallthru->flags & EDGE_FALLTHRU)
1349 break;
1351 changed = false;
1352 for (e = bb->pred; e; e = nexte)
1354 nexte = e->pred_next;
1356 /* As noted above, first try with the fallthru predecessor. */
1357 if (fallthru)
1359 /* Don't combine the fallthru edge into anything else.
1360 If there is a match, we'll do it the other way around. */
1361 if (e == fallthru)
1362 continue;
1364 if (try_crossjump_to_edge (mode, e, fallthru))
1366 changed = true;
1367 nexte = bb->pred;
1368 continue;
1372 /* Non-obvious work limiting check: Recognize that we're going
1373 to call try_crossjump_bb on every basic block. So if we have
1374 two blocks with lots of outgoing edges (a switch) and they
1375 share lots of common destinations, then we would do the
1376 cross-jump check once for each common destination.
1378 Now, if the blocks actually are cross-jump candidates, then
1379 all of their destinations will be shared. Which means that
1380 we only need check them for cross-jump candidacy once. We
1381 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1382 choosing to do the check from the block for which the edge
1383 in question is the first successor of A. */
1384 if (e->src->succ != e)
1385 continue;
1387 for (e2 = bb->pred; e2; e2 = nexte2)
1389 nexte2 = e2->pred_next;
1391 if (e2 == e)
1392 continue;
1394 /* We've already checked the fallthru edge above. */
1395 if (e2 == fallthru)
1396 continue;
1398 /* The "first successor" check above only prevents multiple
1399 checks of crossjump(A,B). In order to prevent redundant
1400 checks of crossjump(B,A), require that A be the block
1401 with the lowest index. */
1402 if (e->src->index > e2->src->index)
1403 continue;
1405 if (try_crossjump_to_edge (mode, e, e2))
1407 changed = true;
1408 nexte = bb->pred;
1409 break;
1414 return changed;
1417 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1418 instructions etc. Return nonzero if changes were made. */
1420 static bool
1421 try_optimize_cfg (mode)
1422 int mode;
1424 int i;
1425 bool changed_overall = false;
1426 bool changed;
1427 int iterations = 0;
1428 sbitmap blocks;
1430 if (mode & CLEANUP_CROSSJUMP)
1431 add_noreturn_fake_exit_edges ();
1433 for (i = 0; i < n_basic_blocks; i++)
1434 update_forwarder_flag (BASIC_BLOCK (i));
1436 /* Attempt to merge blocks as made possible by edge removal. If a block
1437 has only one successor, and the successor has only one predecessor,
1438 they may be combined. */
1441 changed = false;
1442 iterations++;
1444 if (rtl_dump_file)
1445 fprintf (rtl_dump_file, "\n\ntry_optimize_cfg iteration %i\n\n",
1446 iterations);
1448 for (i = 0; i < n_basic_blocks;)
1450 basic_block c, b = BASIC_BLOCK (i);
1451 edge s;
1452 bool changed_here = false;
1454 /* Delete trivially dead basic blocks. */
1455 while (b->pred == NULL)
1457 c = BASIC_BLOCK (b->index - 1);
1458 if (rtl_dump_file)
1459 fprintf (rtl_dump_file, "Deleting block %i.\n", b->index);
1461 flow_delete_block (b);
1462 changed = true;
1463 b = c;
1466 /* Remove code labels no longer used. Don't do this before
1467 CALL_PLACEHOLDER is removed, as some branches may be hidden
1468 within. */
1469 if (b->pred->pred_next == NULL
1470 && (b->pred->flags & EDGE_FALLTHRU)
1471 && !(b->pred->flags & EDGE_COMPLEX)
1472 && GET_CODE (b->head) == CODE_LABEL
1473 && (!(mode & CLEANUP_PRE_SIBCALL)
1474 || !tail_recursion_label_p (b->head))
1475 /* If the previous block ends with a branch to this block,
1476 we can't delete the label. Normally this is a condjump
1477 that is yet to be simplified, but if CASE_DROPS_THRU,
1478 this can be a tablejump with some element going to the
1479 same place as the default (fallthru). */
1480 && (b->pred->src == ENTRY_BLOCK_PTR
1481 || GET_CODE (b->pred->src->end) != JUMP_INSN
1482 || ! label_is_jump_target_p (b->head, b->pred->src->end)))
1484 rtx label = b->head;
1486 b->head = NEXT_INSN (b->head);
1487 delete_insn_chain (label, label);
1488 if (rtl_dump_file)
1489 fprintf (rtl_dump_file, "Deleted label in block %i.\n",
1490 b->index);
1493 /* If we fall through an empty block, we can remove it. */
1494 if (b->pred->pred_next == NULL
1495 && (b->pred->flags & EDGE_FALLTHRU)
1496 && GET_CODE (b->head) != CODE_LABEL
1497 && FORWARDER_BLOCK_P (b)
1498 /* Note that forwarder_block_p true ensures that there
1499 is a successor for this block. */
1500 && (b->succ->flags & EDGE_FALLTHRU)
1501 && n_basic_blocks > 1)
1503 if (rtl_dump_file)
1504 fprintf (rtl_dump_file, "Deleting fallthru block %i.\n",
1505 b->index);
1507 c = BASIC_BLOCK (b->index ? b->index - 1 : 1);
1508 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1509 flow_delete_block (b);
1510 changed = true;
1511 b = c;
1514 /* Merge blocks. Loop because chains of blocks might be
1515 combineable. */
1516 while ((s = b->succ) != NULL
1517 && s->succ_next == NULL
1518 && !(s->flags & EDGE_COMPLEX)
1519 && (c = s->dest) != EXIT_BLOCK_PTR
1520 && c->pred->pred_next == NULL
1521 /* If the jump insn has side effects,
1522 we can't kill the edge. */
1523 && (GET_CODE (b->end) != JUMP_INSN
1524 || onlyjump_p (b->end))
1525 && merge_blocks (s, b, c, mode))
1526 changed_here = true;
1528 /* Simplify branch over branch. */
1529 if ((mode & CLEANUP_EXPENSIVE) && try_simplify_condjump (b))
1531 BB_SET_FLAG (b, BB_UPDATE_LIFE);
1532 changed_here = true;
1535 /* If B has a single outgoing edge, but uses a non-trivial jump
1536 instruction without side-effects, we can either delete the
1537 jump entirely, or replace it with a simple unconditional jump.
1538 Use redirect_edge_and_branch to do the dirty work. */
1539 if (b->succ
1540 && ! b->succ->succ_next
1541 && b->succ->dest != EXIT_BLOCK_PTR
1542 && onlyjump_p (b->end)
1543 && redirect_edge_and_branch (b->succ, b->succ->dest))
1545 BB_SET_FLAG (b, BB_UPDATE_LIFE);
1546 update_forwarder_flag (b);
1547 changed_here = true;
1550 /* Simplify branch to branch. */
1551 if (try_forward_edges (mode, b))
1552 changed_here = true;
1554 /* Look for shared code between blocks. */
1555 if ((mode & CLEANUP_CROSSJUMP)
1556 && try_crossjump_bb (mode, b))
1557 changed_here = true;
1559 /* Don't get confused by the index shift caused by deleting
1560 blocks. */
1561 if (!changed_here)
1562 i = b->index + 1;
1563 else
1564 changed = true;
1567 if ((mode & CLEANUP_CROSSJUMP)
1568 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1569 changed = true;
1571 #ifdef ENABLE_CHECKING
1572 if (changed)
1573 verify_flow_info ();
1574 #endif
1576 changed_overall |= changed;
1578 while (changed);
1580 if (mode & CLEANUP_CROSSJUMP)
1581 remove_fake_edges ();
1583 if ((mode & CLEANUP_UPDATE_LIFE) && changed_overall)
1585 bool found = 0;
1587 blocks = sbitmap_alloc (n_basic_blocks);
1588 sbitmap_zero (blocks);
1589 for (i = 0; i < n_basic_blocks; i++)
1590 if (BB_FLAGS (BASIC_BLOCK (i)) & BB_UPDATE_LIFE)
1592 found = 1;
1593 SET_BIT (blocks, i);
1596 if (found)
1597 update_life_info (blocks, UPDATE_LIFE_GLOBAL,
1598 PROP_DEATH_NOTES | PROP_SCAN_DEAD_CODE
1599 | PROP_KILL_DEAD_CODE);
1600 sbitmap_free (blocks);
1603 for (i = 0; i < n_basic_blocks; i++)
1604 BASIC_BLOCK (i)->aux = NULL;
1606 return changed_overall;
1609 /* Delete all unreachable basic blocks. */
1611 static bool
1612 delete_unreachable_blocks ()
1614 int i;
1615 bool changed = false;
1617 find_unreachable_blocks ();
1619 /* Delete all unreachable basic blocks. Count down so that we
1620 don't interfere with the block renumbering that happens in
1621 flow_delete_block. */
1623 for (i = n_basic_blocks - 1; i >= 0; --i)
1625 basic_block b = BASIC_BLOCK (i);
1627 if (!(b->flags & BB_REACHABLE))
1628 flow_delete_block (b), changed = true;
1631 if (changed)
1632 tidy_fallthru_edges ();
1633 return changed;
1636 /* Tidy the CFG by deleting unreachable code and whatnot. */
1638 bool
1639 cleanup_cfg (mode)
1640 int mode;
1642 bool changed = false;
1644 timevar_push (TV_CLEANUP_CFG);
1645 changed = delete_unreachable_blocks ();
1646 if (try_optimize_cfg (mode))
1647 delete_unreachable_blocks (), changed = true;
1649 /* Kill the data we won't maintain. */
1650 free_EXPR_LIST_list (&label_value_list);
1651 free_EXPR_LIST_list (&tail_recursion_label_list);
1652 timevar_pop (TV_CLEANUP_CFG);
1654 return changed;