* gcc.c-torture/execute/20020720-1.x: Skip test on ARM-based systems.
[official-gcc.git] / gcc / cfgcleanup.c
blobfe221036b99c3979e4a4d0fa4cd3ae99dc26454b
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to it's
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
34 #include "config.h"
35 #include "system.h"
36 #include "rtl.h"
37 #include "hard-reg-set.h"
38 #include "basic-block.h"
39 #include "timevar.h"
40 #include "output.h"
41 #include "insn-config.h"
42 #include "flags.h"
43 #include "recog.h"
44 #include "toplev.h"
45 #include "cselib.h"
46 #include "tm_p.h"
47 #include "target.h"
49 /* cleanup_cfg maintains following flags for each basic block. */
51 enum bb_flags
53 /* Set if BB is the forwarder block to avoid too many
54 forwarder_block_p calls. */
55 BB_FORWARDER_BLOCK = 1,
56 BB_NONTHREADABLE_BLOCK = 2
59 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
60 #define BB_SET_FLAG(BB, FLAG) \
61 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
62 #define BB_CLEAR_FLAG(BB, FLAG) \
63 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
65 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
67 static bool try_crossjump_to_edge PARAMS ((int, edge, edge));
68 static bool try_crossjump_bb PARAMS ((int, basic_block));
69 static bool outgoing_edges_match PARAMS ((int,
70 basic_block, basic_block));
71 static int flow_find_cross_jump PARAMS ((int, basic_block, basic_block,
72 rtx *, rtx *));
73 static bool insns_match_p PARAMS ((int, rtx, rtx));
75 static bool label_is_jump_target_p PARAMS ((rtx, rtx));
76 static bool tail_recursion_label_p PARAMS ((rtx));
77 static void merge_blocks_move_predecessor_nojumps PARAMS ((basic_block,
78 basic_block));
79 static void merge_blocks_move_successor_nojumps PARAMS ((basic_block,
80 basic_block));
81 static bool merge_blocks PARAMS ((edge,basic_block,basic_block,
82 int));
83 static bool try_optimize_cfg PARAMS ((int));
84 static bool try_simplify_condjump PARAMS ((basic_block));
85 static bool try_forward_edges PARAMS ((int, basic_block));
86 static edge thread_jump PARAMS ((int, edge, basic_block));
87 static bool mark_effect PARAMS ((rtx, bitmap));
88 static void notice_new_block PARAMS ((basic_block));
89 static void update_forwarder_flag PARAMS ((basic_block));
90 static int mentions_nonequal_regs PARAMS ((rtx *, void *));
92 /* Set flags for newly created block. */
94 static void
95 notice_new_block (bb)
96 basic_block bb;
98 if (!bb)
99 return;
101 if (forwarder_block_p (bb))
102 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
105 /* Recompute forwarder flag after block has been modified. */
107 static void
108 update_forwarder_flag (bb)
109 basic_block bb;
111 if (forwarder_block_p (bb))
112 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
113 else
114 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
117 /* Simplify a conditional jump around an unconditional jump.
118 Return true if something changed. */
120 static bool
121 try_simplify_condjump (cbranch_block)
122 basic_block cbranch_block;
124 basic_block jump_block, jump_dest_block, cbranch_dest_block;
125 edge cbranch_jump_edge, cbranch_fallthru_edge;
126 rtx cbranch_insn;
128 /* Verify that there are exactly two successors. */
129 if (!cbranch_block->succ
130 || !cbranch_block->succ->succ_next
131 || cbranch_block->succ->succ_next->succ_next)
132 return false;
134 /* Verify that we've got a normal conditional branch at the end
135 of the block. */
136 cbranch_insn = cbranch_block->end;
137 if (!any_condjump_p (cbranch_insn))
138 return false;
140 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
141 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
143 /* The next block must not have multiple predecessors, must not
144 be the last block in the function, and must contain just the
145 unconditional jump. */
146 jump_block = cbranch_fallthru_edge->dest;
147 if (jump_block->pred->pred_next
148 || jump_block->next_bb == EXIT_BLOCK_PTR
149 || !FORWARDER_BLOCK_P (jump_block))
150 return false;
151 jump_dest_block = jump_block->succ->dest;
153 /* The conditional branch must target the block after the
154 unconditional branch. */
155 cbranch_dest_block = cbranch_jump_edge->dest;
157 if (!can_fallthru (jump_block, cbranch_dest_block))
158 return false;
160 /* Invert the conditional branch. */
161 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
162 return false;
164 if (rtl_dump_file)
165 fprintf (rtl_dump_file, "Simplifying condjump %i around jump %i\n",
166 INSN_UID (cbranch_insn), INSN_UID (jump_block->end));
168 /* Success. Update the CFG to match. Note that after this point
169 the edge variable names appear backwards; the redirection is done
170 this way to preserve edge profile data. */
171 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
172 cbranch_dest_block);
173 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
174 jump_dest_block);
175 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
176 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
177 update_br_prob_note (cbranch_block);
179 /* Delete the block with the unconditional jump, and clean up the mess. */
180 flow_delete_block (jump_block);
181 tidy_fallthru_edge (cbranch_jump_edge, cbranch_block, cbranch_dest_block);
183 return true;
186 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
187 on register. Used by jump threading. */
189 static bool
190 mark_effect (exp, nonequal)
191 rtx exp;
192 regset nonequal;
194 int regno;
195 rtx dest;
196 switch (GET_CODE (exp))
198 /* In case we do clobber the register, mark it as equal, as we know the
199 value is dead so it don't have to match. */
200 case CLOBBER:
201 if (REG_P (XEXP (exp, 0)))
203 dest = XEXP (exp, 0);
204 regno = REGNO (dest);
205 CLEAR_REGNO_REG_SET (nonequal, regno);
206 if (regno < FIRST_PSEUDO_REGISTER)
208 int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
209 while (--n > 0)
210 CLEAR_REGNO_REG_SET (nonequal, regno + n);
213 return false;
215 case SET:
216 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
217 return false;
218 dest = SET_DEST (exp);
219 if (dest == pc_rtx)
220 return false;
221 if (!REG_P (dest))
222 return true;
223 regno = REGNO (dest);
224 SET_REGNO_REG_SET (nonequal, regno);
225 if (regno < FIRST_PSEUDO_REGISTER)
227 int n = HARD_REGNO_NREGS (regno, GET_MODE (dest));
228 while (--n > 0)
229 SET_REGNO_REG_SET (nonequal, regno + n);
231 return false;
233 default:
234 return false;
238 /* Return nonzero if X is an register set in regset DATA.
239 Called via for_each_rtx. */
240 static int
241 mentions_nonequal_regs (x, data)
242 rtx *x;
243 void *data;
245 regset nonequal = (regset) data;
246 if (REG_P (*x))
248 int regno;
250 regno = REGNO (*x);
251 if (REGNO_REG_SET_P (nonequal, regno))
252 return 1;
253 if (regno < FIRST_PSEUDO_REGISTER)
255 int n = HARD_REGNO_NREGS (regno, GET_MODE (*x));
256 while (--n > 0)
257 if (REGNO_REG_SET_P (nonequal, regno + n))
258 return 1;
261 return 0;
263 /* Attempt to prove that the basic block B will have no side effects and
264 allways continues in the same edge if reached via E. Return the edge
265 if exist, NULL otherwise. */
267 static edge
268 thread_jump (mode, e, b)
269 int mode;
270 edge e;
271 basic_block b;
273 rtx set1, set2, cond1, cond2, insn;
274 enum rtx_code code1, code2, reversed_code2;
275 bool reverse1 = false;
276 int i;
277 regset nonequal;
278 bool failed = false;
280 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
281 return NULL;
283 /* At the moment, we do handle only conditional jumps, but later we may
284 want to extend this code to tablejumps and others. */
285 if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
286 return NULL;
287 if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
289 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
290 return NULL;
293 /* Second branch must end with onlyjump, as we will eliminate the jump. */
294 if (!any_condjump_p (e->src->end))
295 return NULL;
297 if (!any_condjump_p (b->end) || !onlyjump_p (b->end))
299 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
300 return NULL;
303 set1 = pc_set (e->src->end);
304 set2 = pc_set (b->end);
305 if (((e->flags & EDGE_FALLTHRU) != 0)
306 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
307 reverse1 = true;
309 cond1 = XEXP (SET_SRC (set1), 0);
310 cond2 = XEXP (SET_SRC (set2), 0);
311 if (reverse1)
312 code1 = reversed_comparison_code (cond1, e->src->end);
313 else
314 code1 = GET_CODE (cond1);
316 code2 = GET_CODE (cond2);
317 reversed_code2 = reversed_comparison_code (cond2, b->end);
319 if (!comparison_dominates_p (code1, code2)
320 && !comparison_dominates_p (code1, reversed_code2))
321 return NULL;
323 /* Ensure that the comparison operators are equivalent.
324 ??? This is far too pesimistic. We should allow swapped operands,
325 different CCmodes, or for example comparisons for interval, that
326 dominate even when operands are not equivalent. */
327 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
328 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
329 return NULL;
331 /* Short circuit cases where block B contains some side effects, as we can't
332 safely bypass it. */
333 for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end);
334 insn = NEXT_INSN (insn))
335 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
337 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
338 return NULL;
341 cselib_init ();
343 /* First process all values computed in the source basic block. */
344 for (insn = NEXT_INSN (e->src->head); insn != NEXT_INSN (e->src->end);
345 insn = NEXT_INSN (insn))
346 if (INSN_P (insn))
347 cselib_process_insn (insn);
349 nonequal = BITMAP_XMALLOC();
350 CLEAR_REG_SET (nonequal);
352 /* Now assume that we've continued by the edge E to B and continue
353 processing as if it were same basic block.
354 Our goal is to prove that whole block is an NOOP. */
356 for (insn = NEXT_INSN (b->head); insn != NEXT_INSN (b->end) && !failed;
357 insn = NEXT_INSN (insn))
359 if (INSN_P (insn))
361 rtx pat = PATTERN (insn);
363 if (GET_CODE (pat) == PARALLEL)
365 for (i = 0; i < XVECLEN (pat, 0); i++)
366 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
368 else
369 failed |= mark_effect (pat, nonequal);
372 cselib_process_insn (insn);
375 /* Later we should clear nonequal of dead registers. So far we don't
376 have life information in cfg_cleanup. */
377 if (failed)
379 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
380 goto failed_exit;
383 /* cond2 must not mention any register that is not equal to the
384 former block. */
385 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
386 goto failed_exit;
388 /* In case liveness information is available, we need to prove equivalence
389 only of the live values. */
390 if (mode & CLEANUP_UPDATE_LIFE)
391 AND_REG_SET (nonequal, b->global_live_at_end);
393 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
395 BITMAP_XFREE (nonequal);
396 cselib_finish ();
397 if ((comparison_dominates_p (code1, code2) != 0)
398 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
399 return BRANCH_EDGE (b);
400 else
401 return FALLTHRU_EDGE (b);
403 failed_exit:
404 BITMAP_XFREE (nonequal);
405 cselib_finish ();
406 return NULL;
409 /* Attempt to forward edges leaving basic block B.
410 Return true if successful. */
412 static bool
413 try_forward_edges (mode, b)
414 basic_block b;
415 int mode;
417 bool changed = false;
418 edge e, next, *threaded_edges = NULL;
420 for (e = b->succ; e; e = next)
422 basic_block target, first;
423 int counter;
424 bool threaded = false;
425 int nthreaded_edges = 0;
427 next = e->succ_next;
429 /* Skip complex edges because we don't know how to update them.
431 Still handle fallthru edges, as we can succeed to forward fallthru
432 edge to the same place as the branch edge of conditional branch
433 and turn conditional branch to an unconditional branch. */
434 if (e->flags & EDGE_COMPLEX)
435 continue;
437 target = first = e->dest;
438 counter = 0;
440 while (counter < n_basic_blocks)
442 basic_block new_target = NULL;
443 bool new_target_threaded = false;
445 if (FORWARDER_BLOCK_P (target)
446 && target->succ->dest != EXIT_BLOCK_PTR)
448 /* Bypass trivial infinite loops. */
449 if (target == target->succ->dest)
450 counter = n_basic_blocks;
451 new_target = target->succ->dest;
454 /* Allow to thread only over one edge at time to simplify updating
455 of probabilities. */
456 else if (mode & CLEANUP_THREADING)
458 edge t = thread_jump (mode, e, target);
459 if (t)
461 if (!threaded_edges)
462 threaded_edges = xmalloc (sizeof (*threaded_edges)
463 * n_basic_blocks);
464 else
466 int i;
468 /* Detect an infinite loop across blocks not
469 including the start block. */
470 for (i = 0; i < nthreaded_edges; ++i)
471 if (threaded_edges[i] == t)
472 break;
473 if (i < nthreaded_edges)
475 counter = n_basic_blocks;
476 break;
480 /* Detect an infinite loop across the start block. */
481 if (t->dest == b)
482 break;
484 if (nthreaded_edges >= n_basic_blocks)
485 abort ();
486 threaded_edges[nthreaded_edges++] = t;
488 new_target = t->dest;
489 new_target_threaded = true;
493 if (!new_target)
494 break;
496 /* Avoid killing of loop pre-headers, as it is the place loop
497 optimizer wants to hoist code to.
499 For fallthru forwarders, the LOOP_BEG note must appear between
500 the header of block and CODE_LABEL of the loop, for non forwarders
501 it must appear before the JUMP_INSN. */
502 if (mode & CLEANUP_PRE_LOOP)
504 rtx insn = (target->succ->flags & EDGE_FALLTHRU
505 ? target->head : prev_nonnote_insn (target->end));
507 if (GET_CODE (insn) != NOTE)
508 insn = NEXT_INSN (insn);
510 for (; insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
511 insn = NEXT_INSN (insn))
512 if (GET_CODE (insn) == NOTE
513 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
514 break;
516 if (GET_CODE (insn) == NOTE)
517 break;
519 /* Do not clean up branches to just past the end of a loop
520 at this time; it can mess up the loop optimizer's
521 recognition of some patterns. */
523 insn = PREV_INSN (target->head);
524 if (insn && GET_CODE (insn) == NOTE
525 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
526 break;
529 counter++;
530 target = new_target;
531 threaded |= new_target_threaded;
534 if (counter >= n_basic_blocks)
536 if (rtl_dump_file)
537 fprintf (rtl_dump_file, "Infinite loop in BB %i.\n",
538 target->index);
540 else if (target == first)
541 ; /* We didn't do anything. */
542 else
544 /* Save the values now, as the edge may get removed. */
545 gcov_type edge_count = e->count;
546 int edge_probability = e->probability;
547 int edge_frequency;
548 int n = 0;
550 /* Don't force if target is exit block. */
551 if (threaded && target != EXIT_BLOCK_PTR)
553 notice_new_block (redirect_edge_and_branch_force (e, target));
554 if (rtl_dump_file)
555 fprintf (rtl_dump_file, "Conditionals threaded.\n");
557 else if (!redirect_edge_and_branch (e, target))
559 if (rtl_dump_file)
560 fprintf (rtl_dump_file,
561 "Forwarding edge %i->%i to %i failed.\n",
562 b->index, e->dest->index, target->index);
563 continue;
566 /* We successfully forwarded the edge. Now update profile
567 data: for each edge we traversed in the chain, remove
568 the original edge's execution count. */
569 edge_frequency = ((edge_probability * b->frequency
570 + REG_BR_PROB_BASE / 2)
571 / REG_BR_PROB_BASE);
573 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
574 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
578 edge t;
580 first->count -= edge_count;
581 if (first->count < 0)
582 first->count = 0;
583 first->frequency -= edge_frequency;
584 if (first->frequency < 0)
585 first->frequency = 0;
586 if (first->succ->succ_next)
588 edge e;
589 int prob;
590 if (n >= nthreaded_edges)
591 abort ();
592 t = threaded_edges [n++];
593 if (t->src != first)
594 abort ();
595 if (first->frequency)
596 prob = edge_frequency * REG_BR_PROB_BASE / first->frequency;
597 else
598 prob = 0;
599 if (prob > t->probability)
600 prob = t->probability;
601 t->probability -= prob;
602 prob = REG_BR_PROB_BASE - prob;
603 if (prob <= 0)
605 first->succ->probability = REG_BR_PROB_BASE;
606 first->succ->succ_next->probability = 0;
608 else
609 for (e = first->succ; e; e = e->succ_next)
610 e->probability = ((e->probability * REG_BR_PROB_BASE)
611 / (double) prob);
612 update_br_prob_note (first);
614 else
616 /* It is possible that as the result of
617 threading we've removed edge as it is
618 threaded to the fallthru edge. Avoid
619 getting out of sync. */
620 if (n < nthreaded_edges
621 && first == threaded_edges [n]->src)
622 n++;
623 t = first->succ;
626 t->count -= edge_count;
627 if (t->count < 0)
628 t->count = 0;
629 first = t->dest;
631 while (first != target);
633 changed = true;
637 if (threaded_edges)
638 free (threaded_edges);
639 return changed;
642 /* Return true if LABEL is a target of JUMP_INSN. This applies only
643 to non-complex jumps. That is, direct unconditional, conditional,
644 and tablejumps, but not computed jumps or returns. It also does
645 not apply to the fallthru case of a conditional jump. */
647 static bool
648 label_is_jump_target_p (label, jump_insn)
649 rtx label, jump_insn;
651 rtx tmp = JUMP_LABEL (jump_insn);
653 if (label == tmp)
654 return true;
656 if (tmp != NULL_RTX
657 && (tmp = NEXT_INSN (tmp)) != NULL_RTX
658 && GET_CODE (tmp) == JUMP_INSN
659 && (tmp = PATTERN (tmp),
660 GET_CODE (tmp) == ADDR_VEC
661 || GET_CODE (tmp) == ADDR_DIFF_VEC))
663 rtvec vec = XVEC (tmp, GET_CODE (tmp) == ADDR_DIFF_VEC);
664 int i, veclen = GET_NUM_ELEM (vec);
666 for (i = 0; i < veclen; ++i)
667 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
668 return true;
671 return false;
674 /* Return true if LABEL is used for tail recursion. */
676 static bool
677 tail_recursion_label_p (label)
678 rtx label;
680 rtx x;
682 for (x = tail_recursion_label_list; x; x = XEXP (x, 1))
683 if (label == XEXP (x, 0))
684 return true;
686 return false;
689 /* Blocks A and B are to be merged into a single block. A has no incoming
690 fallthru edge, so it can be moved before B without adding or modifying
691 any jumps (aside from the jump from A to B). */
693 static void
694 merge_blocks_move_predecessor_nojumps (a, b)
695 basic_block a, b;
697 rtx barrier;
699 barrier = next_nonnote_insn (a->end);
700 if (GET_CODE (barrier) != BARRIER)
701 abort ();
702 delete_insn (barrier);
704 /* Move block and loop notes out of the chain so that we do not
705 disturb their order.
707 ??? A better solution would be to squeeze out all the non-nested notes
708 and adjust the block trees appropriately. Even better would be to have
709 a tighter connection between block trees and rtl so that this is not
710 necessary. */
711 if (squeeze_notes (&a->head, &a->end))
712 abort ();
714 /* Scramble the insn chain. */
715 if (a->end != PREV_INSN (b->head))
716 reorder_insns_nobb (a->head, a->end, PREV_INSN (b->head));
717 a->flags |= BB_DIRTY;
719 if (rtl_dump_file)
720 fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
721 a->index, b->index);
723 /* Swap the records for the two blocks around. */
725 unlink_block (a);
726 link_block (a, b->prev_bb);
728 /* Now blocks A and B are contiguous. Merge them. */
729 merge_blocks_nomove (a, b);
732 /* Blocks A and B are to be merged into a single block. B has no outgoing
733 fallthru edge, so it can be moved after A without adding or modifying
734 any jumps (aside from the jump from A to B). */
736 static void
737 merge_blocks_move_successor_nojumps (a, b)
738 basic_block a, b;
740 rtx barrier, real_b_end;
742 real_b_end = b->end;
743 barrier = NEXT_INSN (b->end);
745 /* Recognize a jump table following block B. */
746 if (barrier
747 && GET_CODE (barrier) == CODE_LABEL
748 && NEXT_INSN (barrier)
749 && GET_CODE (NEXT_INSN (barrier)) == JUMP_INSN
750 && (GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_VEC
751 || GET_CODE (PATTERN (NEXT_INSN (barrier))) == ADDR_DIFF_VEC))
753 /* Temporarily add the table jump insn to b, so that it will also
754 be moved to the correct location. */
755 b->end = NEXT_INSN (barrier);
756 barrier = NEXT_INSN (b->end);
759 /* There had better have been a barrier there. Delete it. */
760 if (barrier && GET_CODE (barrier) == BARRIER)
761 delete_insn (barrier);
763 /* Move block and loop notes out of the chain so that we do not
764 disturb their order.
766 ??? A better solution would be to squeeze out all the non-nested notes
767 and adjust the block trees appropriately. Even better would be to have
768 a tighter connection between block trees and rtl so that this is not
769 necessary. */
770 if (squeeze_notes (&b->head, &b->end))
771 abort ();
773 /* Scramble the insn chain. */
774 reorder_insns_nobb (b->head, b->end, a->end);
776 /* Restore the real end of b. */
777 b->end = real_b_end;
779 if (rtl_dump_file)
780 fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
781 b->index, a->index);
783 /* Now blocks A and B are contiguous. Merge them. */
784 merge_blocks_nomove (a, b);
787 /* Attempt to merge basic blocks that are potentially non-adjacent.
788 Return true iff the attempt succeeded. */
790 static bool
791 merge_blocks (e, b, c, mode)
792 edge e;
793 basic_block b, c;
794 int mode;
796 /* If C has a tail recursion label, do not merge. There is no
797 edge recorded from the call_placeholder back to this label, as
798 that would make optimize_sibling_and_tail_recursive_calls more
799 complex for no gain. */
800 if ((mode & CLEANUP_PRE_SIBCALL)
801 && GET_CODE (c->head) == CODE_LABEL
802 && tail_recursion_label_p (c->head))
803 return false;
805 /* If B has a fallthru edge to C, no need to move anything. */
806 if (e->flags & EDGE_FALLTHRU)
808 int b_index = b->index, c_index = c->index;
809 merge_blocks_nomove (b, c);
810 update_forwarder_flag (b);
812 if (rtl_dump_file)
813 fprintf (rtl_dump_file, "Merged %d and %d without moving.\n",
814 b_index, c_index);
816 return true;
819 /* Otherwise we will need to move code around. Do that only if expensive
820 transformations are allowed. */
821 else if (mode & CLEANUP_EXPENSIVE)
823 edge tmp_edge, b_fallthru_edge;
824 bool c_has_outgoing_fallthru;
825 bool b_has_incoming_fallthru;
827 /* Avoid overactive code motion, as the forwarder blocks should be
828 eliminated by edge redirection instead. One exception might have
829 been if B is a forwarder block and C has no fallthru edge, but
830 that should be cleaned up by bb-reorder instead. */
831 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
832 return false;
834 /* We must make sure to not munge nesting of lexical blocks,
835 and loop notes. This is done by squeezing out all the notes
836 and leaving them there to lie. Not ideal, but functional. */
838 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
839 if (tmp_edge->flags & EDGE_FALLTHRU)
840 break;
842 c_has_outgoing_fallthru = (tmp_edge != NULL);
844 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
845 if (tmp_edge->flags & EDGE_FALLTHRU)
846 break;
848 b_has_incoming_fallthru = (tmp_edge != NULL);
849 b_fallthru_edge = tmp_edge;
851 /* Otherwise, we're going to try to move C after B. If C does
852 not have an outgoing fallthru, then it can be moved
853 immediately after B without introducing or modifying jumps. */
854 if (! c_has_outgoing_fallthru)
856 merge_blocks_move_successor_nojumps (b, c);
857 return true;
860 /* If B does not have an incoming fallthru, then it can be moved
861 immediately before C without introducing or modifying jumps.
862 C cannot be the first block, so we do not have to worry about
863 accessing a non-existent block. */
865 if (b_has_incoming_fallthru)
867 basic_block bb;
869 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
870 return false;
871 bb = force_nonfallthru (b_fallthru_edge);
872 if (bb)
873 notice_new_block (bb);
876 merge_blocks_move_predecessor_nojumps (b, c);
877 return true;
880 return false;
884 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
886 static bool
887 insns_match_p (mode, i1, i2)
888 int mode ATTRIBUTE_UNUSED;
889 rtx i1, i2;
891 rtx p1, p2;
893 /* Verify that I1 and I2 are equivalent. */
894 if (GET_CODE (i1) != GET_CODE (i2))
895 return false;
897 p1 = PATTERN (i1);
898 p2 = PATTERN (i2);
900 if (GET_CODE (p1) != GET_CODE (p2))
901 return false;
903 /* If this is a CALL_INSN, compare register usage information.
904 If we don't check this on stack register machines, the two
905 CALL_INSNs might be merged leaving reg-stack.c with mismatching
906 numbers of stack registers in the same basic block.
907 If we don't check this on machines with delay slots, a delay slot may
908 be filled that clobbers a parameter expected by the subroutine.
910 ??? We take the simple route for now and assume that if they're
911 equal, they were constructed identically. */
913 if (GET_CODE (i1) == CALL_INSN
914 && !rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
915 CALL_INSN_FUNCTION_USAGE (i2)))
916 return false;
918 #ifdef STACK_REGS
919 /* If cross_jump_death_matters is not 0, the insn's mode
920 indicates whether or not the insn contains any stack-like
921 regs. */
923 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
925 /* If register stack conversion has already been done, then
926 death notes must also be compared before it is certain that
927 the two instruction streams match. */
929 rtx note;
930 HARD_REG_SET i1_regset, i2_regset;
932 CLEAR_HARD_REG_SET (i1_regset);
933 CLEAR_HARD_REG_SET (i2_regset);
935 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
936 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
937 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
939 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
940 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
941 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
943 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
945 return false;
947 done:
950 #endif
952 if (reload_completed
953 ? ! rtx_renumbered_equal_p (p1, p2) : ! rtx_equal_p (p1, p2))
955 /* The following code helps take care of G++ cleanups. */
956 rtx equiv1 = find_reg_equal_equiv_note (i1);
957 rtx equiv2 = find_reg_equal_equiv_note (i2);
959 if (equiv1 && equiv2
960 /* If the equivalences are not to a constant, they may
961 reference pseudos that no longer exist, so we can't
962 use them. */
963 && (! reload_completed
964 || (CONSTANT_P (XEXP (equiv1, 0))
965 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
967 rtx s1 = single_set (i1);
968 rtx s2 = single_set (i2);
969 if (s1 != 0 && s2 != 0
970 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
972 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
973 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
974 if (! rtx_renumbered_equal_p (p1, p2))
975 cancel_changes (0);
976 else if (apply_change_group ())
977 return true;
981 return false;
984 return true;
987 /* Look through the insns at the end of BB1 and BB2 and find the longest
988 sequence that are equivalent. Store the first insns for that sequence
989 in *F1 and *F2 and return the sequence length.
991 To simplify callers of this function, if the blocks match exactly,
992 store the head of the blocks in *F1 and *F2. */
994 static int
995 flow_find_cross_jump (mode, bb1, bb2, f1, f2)
996 int mode ATTRIBUTE_UNUSED;
997 basic_block bb1, bb2;
998 rtx *f1, *f2;
1000 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1001 int ninsns = 0;
1003 /* Skip simple jumps at the end of the blocks. Complex jumps still
1004 need to be compared for equivalence, which we'll do below. */
1006 i1 = bb1->end;
1007 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1008 if (onlyjump_p (i1)
1009 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1011 last1 = i1;
1012 i1 = PREV_INSN (i1);
1015 i2 = bb2->end;
1016 if (onlyjump_p (i2)
1017 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1019 last2 = i2;
1020 /* Count everything except for unconditional jump as insn. */
1021 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1022 ninsns++;
1023 i2 = PREV_INSN (i2);
1026 while (true)
1028 /* Ignore notes. */
1029 while (!active_insn_p (i1) && i1 != bb1->head)
1030 i1 = PREV_INSN (i1);
1032 while (!active_insn_p (i2) && i2 != bb2->head)
1033 i2 = PREV_INSN (i2);
1035 if (i1 == bb1->head || i2 == bb2->head)
1036 break;
1038 if (!insns_match_p (mode, i1, i2))
1039 break;
1041 /* Don't begin a cross-jump with a USE or CLOBBER insn. */
1042 if (active_insn_p (i1))
1044 /* If the merged insns have different REG_EQUAL notes, then
1045 remove them. */
1046 rtx equiv1 = find_reg_equal_equiv_note (i1);
1047 rtx equiv2 = find_reg_equal_equiv_note (i2);
1049 if (equiv1 && !equiv2)
1050 remove_note (i1, equiv1);
1051 else if (!equiv1 && equiv2)
1052 remove_note (i2, equiv2);
1053 else if (equiv1 && equiv2
1054 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1056 remove_note (i1, equiv1);
1057 remove_note (i2, equiv2);
1060 afterlast1 = last1, afterlast2 = last2;
1061 last1 = i1, last2 = i2;
1062 ninsns++;
1065 i1 = PREV_INSN (i1);
1066 i2 = PREV_INSN (i2);
1069 #ifdef HAVE_cc0
1070 /* Don't allow the insn after a compare to be shared by
1071 cross-jumping unless the compare is also shared. */
1072 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1073 last1 = afterlast1, last2 = afterlast2, ninsns--;
1074 #endif
1076 /* Include preceding notes and labels in the cross-jump. One,
1077 this may bring us to the head of the blocks as requested above.
1078 Two, it keeps line number notes as matched as may be. */
1079 if (ninsns)
1081 while (last1 != bb1->head && !active_insn_p (PREV_INSN (last1)))
1082 last1 = PREV_INSN (last1);
1084 if (last1 != bb1->head && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
1085 last1 = PREV_INSN (last1);
1087 while (last2 != bb2->head && !active_insn_p (PREV_INSN (last2)))
1088 last2 = PREV_INSN (last2);
1090 if (last2 != bb2->head && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
1091 last2 = PREV_INSN (last2);
1093 *f1 = last1;
1094 *f2 = last2;
1097 return ninsns;
1100 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1101 the branch instruction. This means that if we commonize the control
1102 flow before end of the basic block, the semantic remains unchanged.
1104 We may assume that there exists one edge with a common destination. */
1106 static bool
1107 outgoing_edges_match (mode, bb1, bb2)
1108 int mode;
1109 basic_block bb1;
1110 basic_block bb2;
1112 int nehedges1 = 0, nehedges2 = 0;
1113 edge fallthru1 = 0, fallthru2 = 0;
1114 edge e1, e2;
1116 /* If BB1 has only one successor, we may be looking at either an
1117 unconditional jump, or a fake edge to exit. */
1118 if (bb1->succ && !bb1->succ->succ_next
1119 && !(bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)))
1120 return (bb2->succ && !bb2->succ->succ_next
1121 && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0);
1123 /* Match conditional jumps - this may get tricky when fallthru and branch
1124 edges are crossed. */
1125 if (bb1->succ
1126 && bb1->succ->succ_next
1127 && !bb1->succ->succ_next->succ_next
1128 && any_condjump_p (bb1->end)
1129 && onlyjump_p (bb1->end))
1131 edge b1, f1, b2, f2;
1132 bool reverse, match;
1133 rtx set1, set2, cond1, cond2;
1134 enum rtx_code code1, code2;
1136 if (!bb2->succ
1137 || !bb2->succ->succ_next
1138 || bb2->succ->succ_next->succ_next
1139 || !any_condjump_p (bb2->end)
1140 || !onlyjump_p (bb2->end))
1141 return false;
1143 /* Do not crossjump across loop boundaries. This is a temporary
1144 workaround for the common scenario in which crossjumping results
1145 in killing the duplicated loop condition, making bb-reorder rotate
1146 the loop incorectly, leaving an extra unconditional jump inside
1147 the loop.
1149 This check should go away once bb-reorder knows how to duplicate
1150 code in this case or rotate the loops to avoid this scenario. */
1151 if (bb1->loop_depth != bb2->loop_depth)
1152 return false;
1154 b1 = BRANCH_EDGE (bb1);
1155 b2 = BRANCH_EDGE (bb2);
1156 f1 = FALLTHRU_EDGE (bb1);
1157 f2 = FALLTHRU_EDGE (bb2);
1159 /* Get around possible forwarders on fallthru edges. Other cases
1160 should be optimized out already. */
1161 if (FORWARDER_BLOCK_P (f1->dest))
1162 f1 = f1->dest->succ;
1164 if (FORWARDER_BLOCK_P (f2->dest))
1165 f2 = f2->dest->succ;
1167 /* To simplify use of this function, return false if there are
1168 unneeded forwarder blocks. These will get eliminated later
1169 during cleanup_cfg. */
1170 if (FORWARDER_BLOCK_P (f1->dest)
1171 || FORWARDER_BLOCK_P (f2->dest)
1172 || FORWARDER_BLOCK_P (b1->dest)
1173 || FORWARDER_BLOCK_P (b2->dest))
1174 return false;
1176 if (f1->dest == f2->dest && b1->dest == b2->dest)
1177 reverse = false;
1178 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1179 reverse = true;
1180 else
1181 return false;
1183 set1 = pc_set (bb1->end);
1184 set2 = pc_set (bb2->end);
1185 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1186 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1187 reverse = !reverse;
1189 cond1 = XEXP (SET_SRC (set1), 0);
1190 cond2 = XEXP (SET_SRC (set2), 0);
1191 code1 = GET_CODE (cond1);
1192 if (reverse)
1193 code2 = reversed_comparison_code (cond2, bb2->end);
1194 else
1195 code2 = GET_CODE (cond2);
1197 if (code2 == UNKNOWN)
1198 return false;
1200 /* Verify codes and operands match. */
1201 match = ((code1 == code2
1202 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1203 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1204 || (code1 == swap_condition (code2)
1205 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1206 XEXP (cond2, 0))
1207 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1208 XEXP (cond2, 1))));
1210 /* If we return true, we will join the blocks. Which means that
1211 we will only have one branch prediction bit to work with. Thus
1212 we require the existing branches to have probabilities that are
1213 roughly similar. */
1214 if (match
1215 && !optimize_size
1216 && maybe_hot_bb_p (bb1)
1217 && maybe_hot_bb_p (bb2))
1219 int prob2;
1221 if (b1->dest == b2->dest)
1222 prob2 = b2->probability;
1223 else
1224 /* Do not use f2 probability as f2 may be forwarded. */
1225 prob2 = REG_BR_PROB_BASE - b2->probability;
1227 /* Fail if the difference in probabilities is greater than 50%.
1228 This rules out two well-predicted branches with opposite
1229 outcomes. */
1230 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1232 if (rtl_dump_file)
1233 fprintf (rtl_dump_file,
1234 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1235 bb1->index, bb2->index, b1->probability, prob2);
1237 return false;
1241 if (rtl_dump_file && match)
1242 fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n",
1243 bb1->index, bb2->index);
1245 return match;
1248 /* Generic case - we are seeing an computed jump, table jump or trapping
1249 instruction. */
1251 /* First ensure that the instructions match. There may be many outgoing
1252 edges so this test is generally cheaper.
1253 ??? Currently the tablejumps will never match, as they do have
1254 different tables. */
1255 if (!insns_match_p (mode, bb1->end, bb2->end))
1256 return false;
1258 /* Search the outgoing edges, ensure that the counts do match, find possible
1259 fallthru and exception handling edges since these needs more
1260 validation. */
1261 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
1262 e1 = e1->succ_next, e2 = e2->succ_next)
1264 if (e1->flags & EDGE_EH)
1265 nehedges1++;
1267 if (e2->flags & EDGE_EH)
1268 nehedges2++;
1270 if (e1->flags & EDGE_FALLTHRU)
1271 fallthru1 = e1;
1272 if (e2->flags & EDGE_FALLTHRU)
1273 fallthru2 = e2;
1276 /* If number of edges of various types does not match, fail. */
1277 if (e1 || e2
1278 || nehedges1 != nehedges2
1279 || (fallthru1 != 0) != (fallthru2 != 0))
1280 return false;
1282 /* fallthru edges must be forwarded to the same destination. */
1283 if (fallthru1)
1285 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1286 ? fallthru1->dest->succ->dest: fallthru1->dest);
1287 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1288 ? fallthru2->dest->succ->dest: fallthru2->dest);
1290 if (d1 != d2)
1291 return false;
1294 /* In case we do have EH edges, ensure we are in the same region. */
1295 if (nehedges1)
1297 rtx n1 = find_reg_note (bb1->end, REG_EH_REGION, 0);
1298 rtx n2 = find_reg_note (bb2->end, REG_EH_REGION, 0);
1300 if (XEXP (n1, 0) != XEXP (n2, 0))
1301 return false;
1304 /* We don't need to match the rest of edges as above checks should be enought
1305 to ensure that they are equivalent. */
1306 return true;
1309 /* E1 and E2 are edges with the same destination block. Search their
1310 predecessors for common code. If found, redirect control flow from
1311 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1313 static bool
1314 try_crossjump_to_edge (mode, e1, e2)
1315 int mode;
1316 edge e1, e2;
1318 int nmatch;
1319 basic_block src1 = e1->src, src2 = e2->src;
1320 basic_block redirect_to, redirect_from, to_remove;
1321 rtx newpos1, newpos2;
1322 edge s;
1324 /* Search backward through forwarder blocks. We don't need to worry
1325 about multiple entry or chained forwarders, as they will be optimized
1326 away. We do this to look past the unconditional jump following a
1327 conditional jump that is required due to the current CFG shape. */
1328 if (src1->pred
1329 && !src1->pred->pred_next
1330 && FORWARDER_BLOCK_P (src1))
1331 e1 = src1->pred, src1 = e1->src;
1333 if (src2->pred
1334 && !src2->pred->pred_next
1335 && FORWARDER_BLOCK_P (src2))
1336 e2 = src2->pred, src2 = e2->src;
1338 /* Nothing to do if we reach ENTRY, or a common source block. */
1339 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1340 return false;
1341 if (src1 == src2)
1342 return false;
1344 /* Seeing more than 1 forwarder blocks would confuse us later... */
1345 if (FORWARDER_BLOCK_P (e1->dest)
1346 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
1347 return false;
1349 if (FORWARDER_BLOCK_P (e2->dest)
1350 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
1351 return false;
1353 /* Likewise with dead code (possibly newly created by the other optimizations
1354 of cfg_cleanup). */
1355 if (!src1->pred || !src2->pred)
1356 return false;
1358 /* Look for the common insn sequence, part the first ... */
1359 if (!outgoing_edges_match (mode, src1, src2))
1360 return false;
1362 /* ... and part the second. */
1363 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1364 if (!nmatch)
1365 return false;
1367 /* Avoid splitting if possible. */
1368 if (newpos2 == src2->head)
1369 redirect_to = src2;
1370 else
1372 if (rtl_dump_file)
1373 fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n",
1374 src2->index, nmatch);
1375 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1378 if (rtl_dump_file)
1379 fprintf (rtl_dump_file,
1380 "Cross jumping from bb %i to bb %i; %i common insns\n",
1381 src1->index, src2->index, nmatch);
1383 redirect_to->count += src1->count;
1384 redirect_to->frequency += src1->frequency;
1385 /* We may have some registers visible trought the block. */
1386 redirect_to->flags |= BB_DIRTY;
1388 /* Recompute the frequencies and counts of outgoing edges. */
1389 for (s = redirect_to->succ; s; s = s->succ_next)
1391 edge s2;
1392 basic_block d = s->dest;
1394 if (FORWARDER_BLOCK_P (d))
1395 d = d->succ->dest;
1397 for (s2 = src1->succ; ; s2 = s2->succ_next)
1399 basic_block d2 = s2->dest;
1400 if (FORWARDER_BLOCK_P (d2))
1401 d2 = d2->succ->dest;
1402 if (d == d2)
1403 break;
1406 s->count += s2->count;
1408 /* Take care to update possible forwarder blocks. We verified
1409 that there is no more than one in the chain, so we can't run
1410 into infinite loop. */
1411 if (FORWARDER_BLOCK_P (s->dest))
1413 s->dest->succ->count += s2->count;
1414 s->dest->count += s2->count;
1415 s->dest->frequency += EDGE_FREQUENCY (s);
1418 if (FORWARDER_BLOCK_P (s2->dest))
1420 s2->dest->succ->count -= s2->count;
1421 if (s2->dest->succ->count < 0)
1422 s2->dest->succ->count = 0;
1423 s2->dest->count -= s2->count;
1424 s2->dest->frequency -= EDGE_FREQUENCY (s);
1425 if (s2->dest->frequency < 0)
1426 s2->dest->frequency = 0;
1427 if (s2->dest->count < 0)
1428 s2->dest->count = 0;
1431 if (!redirect_to->frequency && !src1->frequency)
1432 s->probability = (s->probability + s2->probability) / 2;
1433 else
1434 s->probability
1435 = ((s->probability * redirect_to->frequency +
1436 s2->probability * src1->frequency)
1437 / (redirect_to->frequency + src1->frequency));
1440 update_br_prob_note (redirect_to);
1442 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1444 /* Skip possible basic block header. */
1445 if (GET_CODE (newpos1) == CODE_LABEL)
1446 newpos1 = NEXT_INSN (newpos1);
1448 if (GET_CODE (newpos1) == NOTE)
1449 newpos1 = NEXT_INSN (newpos1);
1451 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1452 to_remove = redirect_from->succ->dest;
1454 redirect_edge_and_branch_force (redirect_from->succ, redirect_to);
1455 flow_delete_block (to_remove);
1457 update_forwarder_flag (redirect_from);
1459 return true;
1462 /* Search the predecessors of BB for common insn sequences. When found,
1463 share code between them by redirecting control flow. Return true if
1464 any changes made. */
1466 static bool
1467 try_crossjump_bb (mode, bb)
1468 int mode;
1469 basic_block bb;
1471 edge e, e2, nexte2, nexte, fallthru;
1472 bool changed;
1473 int n = 0;
1475 /* Nothing to do if there is not at least two incoming edges. */
1476 if (!bb->pred || !bb->pred->pred_next)
1477 return false;
1479 /* It is always cheapest to redirect a block that ends in a branch to
1480 a block that falls through into BB, as that adds no branches to the
1481 program. We'll try that combination first. */
1482 for (fallthru = bb->pred; fallthru; fallthru = fallthru->pred_next, n++)
1484 if (fallthru->flags & EDGE_FALLTHRU)
1485 break;
1486 if (n > 100)
1487 return false;
1490 changed = false;
1491 for (e = bb->pred; e; e = nexte)
1493 nexte = e->pred_next;
1495 /* As noted above, first try with the fallthru predecessor. */
1496 if (fallthru)
1498 /* Don't combine the fallthru edge into anything else.
1499 If there is a match, we'll do it the other way around. */
1500 if (e == fallthru)
1501 continue;
1503 if (try_crossjump_to_edge (mode, e, fallthru))
1505 changed = true;
1506 nexte = bb->pred;
1507 continue;
1511 /* Non-obvious work limiting check: Recognize that we're going
1512 to call try_crossjump_bb on every basic block. So if we have
1513 two blocks with lots of outgoing edges (a switch) and they
1514 share lots of common destinations, then we would do the
1515 cross-jump check once for each common destination.
1517 Now, if the blocks actually are cross-jump candidates, then
1518 all of their destinations will be shared. Which means that
1519 we only need check them for cross-jump candidacy once. We
1520 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1521 choosing to do the check from the block for which the edge
1522 in question is the first successor of A. */
1523 if (e->src->succ != e)
1524 continue;
1526 for (e2 = bb->pred; e2; e2 = nexte2)
1528 nexte2 = e2->pred_next;
1530 if (e2 == e)
1531 continue;
1533 /* We've already checked the fallthru edge above. */
1534 if (e2 == fallthru)
1535 continue;
1537 /* The "first successor" check above only prevents multiple
1538 checks of crossjump(A,B). In order to prevent redundant
1539 checks of crossjump(B,A), require that A be the block
1540 with the lowest index. */
1541 if (e->src->index > e2->src->index)
1542 continue;
1544 if (try_crossjump_to_edge (mode, e, e2))
1546 changed = true;
1547 nexte = bb->pred;
1548 break;
1553 return changed;
1556 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1557 instructions etc. Return nonzero if changes were made. */
1559 static bool
1560 try_optimize_cfg (mode)
1561 int mode;
1563 bool changed_overall = false;
1564 bool changed;
1565 int iterations = 0;
1566 basic_block bb, b;
1568 if (mode & CLEANUP_CROSSJUMP)
1569 add_noreturn_fake_exit_edges ();
1571 FOR_EACH_BB (bb)
1572 update_forwarder_flag (bb);
1574 if (mode & CLEANUP_UPDATE_LIFE)
1575 clear_bb_flags ();
1577 if (! (* targetm.cannot_modify_jumps_p) ())
1579 /* Attempt to merge blocks as made possible by edge removal. If
1580 a block has only one successor, and the successor has only
1581 one predecessor, they may be combined. */
1584 changed = false;
1585 iterations++;
1587 if (rtl_dump_file)
1588 fprintf (rtl_dump_file,
1589 "\n\ntry_optimize_cfg iteration %i\n\n",
1590 iterations);
1592 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1594 basic_block c;
1595 edge s;
1596 bool changed_here = false;
1598 /* Delete trivially dead basic blocks. */
1599 while (b->pred == NULL)
1601 c = b->prev_bb;
1602 if (rtl_dump_file)
1603 fprintf (rtl_dump_file, "Deleting block %i.\n",
1604 b->index);
1606 flow_delete_block (b);
1607 changed = true;
1608 b = c;
1611 /* Remove code labels no longer used. Don't do this
1612 before CALL_PLACEHOLDER is removed, as some branches
1613 may be hidden within. */
1614 if (b->pred->pred_next == NULL
1615 && (b->pred->flags & EDGE_FALLTHRU)
1616 && !(b->pred->flags & EDGE_COMPLEX)
1617 && GET_CODE (b->head) == CODE_LABEL
1618 && (!(mode & CLEANUP_PRE_SIBCALL)
1619 || !tail_recursion_label_p (b->head))
1620 /* If the previous block ends with a branch to this
1621 block, we can't delete the label. Normally this
1622 is a condjump that is yet to be simplified, but
1623 if CASE_DROPS_THRU, this can be a tablejump with
1624 some element going to the same place as the
1625 default (fallthru). */
1626 && (b->pred->src == ENTRY_BLOCK_PTR
1627 || GET_CODE (b->pred->src->end) != JUMP_INSN
1628 || ! label_is_jump_target_p (b->head,
1629 b->pred->src->end)))
1631 rtx label = b->head;
1633 b->head = NEXT_INSN (b->head);
1634 delete_insn_chain (label, label);
1635 if (rtl_dump_file)
1636 fprintf (rtl_dump_file, "Deleted label in block %i.\n",
1637 b->index);
1640 /* If we fall through an empty block, we can remove it. */
1641 if (b->pred->pred_next == NULL
1642 && (b->pred->flags & EDGE_FALLTHRU)
1643 && GET_CODE (b->head) != CODE_LABEL
1644 && FORWARDER_BLOCK_P (b)
1645 /* Note that forwarder_block_p true ensures that
1646 there is a successor for this block. */
1647 && (b->succ->flags & EDGE_FALLTHRU)
1648 && n_basic_blocks > 1)
1650 if (rtl_dump_file)
1651 fprintf (rtl_dump_file,
1652 "Deleting fallthru block %i.\n",
1653 b->index);
1655 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1656 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1657 flow_delete_block (b);
1658 changed = true;
1659 b = c;
1662 /* Merge blocks. Loop because chains of blocks might be
1663 combineable. */
1664 while ((s = b->succ) != NULL
1665 && s->succ_next == NULL
1666 && !(s->flags & EDGE_COMPLEX)
1667 && (c = s->dest) != EXIT_BLOCK_PTR
1668 && c->pred->pred_next == NULL
1669 && b != c
1670 /* If the jump insn has side effects,
1671 we can't kill the edge. */
1672 && (GET_CODE (b->end) != JUMP_INSN
1673 || simplejump_p (b->end))
1674 && merge_blocks (s, b, c, mode))
1675 changed_here = true;
1677 /* Simplify branch over branch. */
1678 if ((mode & CLEANUP_EXPENSIVE) && try_simplify_condjump (b))
1679 changed_here = true;
1681 /* If B has a single outgoing edge, but uses a
1682 non-trivial jump instruction without side-effects, we
1683 can either delete the jump entirely, or replace it
1684 with a simple unconditional jump. Use
1685 redirect_edge_and_branch to do the dirty work. */
1686 if (b->succ
1687 && ! b->succ->succ_next
1688 && b->succ->dest != EXIT_BLOCK_PTR
1689 && onlyjump_p (b->end)
1690 && redirect_edge_and_branch (b->succ, b->succ->dest))
1692 update_forwarder_flag (b);
1693 changed_here = true;
1696 /* Simplify branch to branch. */
1697 if (try_forward_edges (mode, b))
1698 changed_here = true;
1700 /* Look for shared code between blocks. */
1701 if ((mode & CLEANUP_CROSSJUMP)
1702 && try_crossjump_bb (mode, b))
1703 changed_here = true;
1705 /* Don't get confused by the index shift caused by
1706 deleting blocks. */
1707 if (!changed_here)
1708 b = b->next_bb;
1709 else
1710 changed = true;
1713 if ((mode & CLEANUP_CROSSJUMP)
1714 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1715 changed = true;
1717 #ifdef ENABLE_CHECKING
1718 if (changed)
1719 verify_flow_info ();
1720 #endif
1722 changed_overall |= changed;
1724 while (changed);
1727 if (mode & CLEANUP_CROSSJUMP)
1728 remove_fake_edges ();
1730 clear_aux_for_blocks ();
1732 return changed_overall;
1735 /* Delete all unreachable basic blocks. */
1737 bool
1738 delete_unreachable_blocks ()
1740 bool changed = false;
1741 basic_block b, next_bb;
1743 find_unreachable_blocks ();
1745 /* Delete all unreachable basic blocks. */
1747 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
1749 next_bb = b->next_bb;
1751 if (!(b->flags & BB_REACHABLE))
1753 flow_delete_block (b);
1754 changed = true;
1758 if (changed)
1759 tidy_fallthru_edges ();
1760 return changed;
1763 /* Tidy the CFG by deleting unreachable code and whatnot. */
1765 bool
1766 cleanup_cfg (mode)
1767 int mode;
1769 bool changed = false;
1771 timevar_push (TV_CLEANUP_CFG);
1772 if (delete_unreachable_blocks ())
1774 changed = true;
1775 /* We've possibly created trivially dead code. Cleanup it right
1776 now to introduce more oppurtunities for try_optimize_cfg. */
1777 if (!(mode & (CLEANUP_NO_INSN_DEL
1778 | CLEANUP_UPDATE_LIFE | CLEANUP_PRE_SIBCALL))
1779 && !reload_completed)
1780 delete_trivially_dead_insns (get_insns(), max_reg_num ());
1783 compact_blocks ();
1785 while (try_optimize_cfg (mode))
1787 delete_unreachable_blocks (), changed = true;
1788 if (mode & CLEANUP_UPDATE_LIFE)
1790 /* Cleaning up CFG introduces more oppurtunities for dead code
1791 removal that in turn may introduce more oppurtunities for
1792 cleaning up the CFG. */
1793 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
1794 PROP_DEATH_NOTES
1795 | PROP_SCAN_DEAD_CODE
1796 | PROP_KILL_DEAD_CODE
1797 | PROP_LOG_LINKS))
1798 break;
1800 else if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_PRE_SIBCALL))
1801 && !reload_completed)
1803 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
1804 break;
1806 else
1807 break;
1808 delete_dead_jumptables ();
1811 /* Kill the data we won't maintain. */
1812 free_EXPR_LIST_list (&label_value_list);
1813 timevar_pop (TV_CLEANUP_CFG);
1815 return changed;