* pa64-hpux.h (LIB_SPEC): Fix library specification used with GNU ld.
[official-gcc.git] / gcc / cfgcleanup.c
blob51182e3f400db64d7933cb480cf985e73f4a2f8b
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This file contains optimizer of the control flow. The main entrypoint is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "basic-block.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "regs.h"
52 #include "cfglayout.h"
53 #include "expr.h"
55 /* cleanup_cfg maintains following flags for each basic block. */
57 enum bb_flags
59 /* Set if BB is the forwarder block to avoid too many
60 forwarder_block_p calls. */
61 BB_FORWARDER_BLOCK = 1,
62 BB_NONTHREADABLE_BLOCK = 2
65 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
66 #define BB_SET_FLAG(BB, FLAG) \
67 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
68 #define BB_CLEAR_FLAG(BB, FLAG) \
69 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
71 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
73 /* Set to true when we are running first pass of try_optimize_cfg loop. */
74 static bool first_pass;
75 static bool try_crossjump_to_edge (int, edge, edge);
76 static bool try_crossjump_bb (int, basic_block);
77 static bool outgoing_edges_match (int, basic_block, basic_block);
78 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
79 static bool insns_match_p (int, rtx, rtx);
81 static bool tail_recursion_label_p (rtx);
82 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
83 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
84 static bool try_optimize_cfg (int);
85 static bool try_simplify_condjump (basic_block);
86 static bool try_forward_edges (int, basic_block);
87 static edge thread_jump (int, edge, basic_block);
88 static bool mark_effect (rtx, bitmap);
89 static void notice_new_block (basic_block);
90 static void update_forwarder_flag (basic_block);
91 static int mentions_nonequal_regs (rtx *, void *);
92 static void merge_memattrs (rtx, rtx);
94 /* Set flags for newly created block. */
96 static void
97 notice_new_block (basic_block bb)
99 if (!bb)
100 return;
102 if (forwarder_block_p (bb))
103 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
106 /* Recompute forwarder flag after block has been modified. */
108 static void
109 update_forwarder_flag (basic_block bb)
111 if (forwarder_block_p (bb))
112 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
113 else
114 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
117 /* Simplify a conditional jump around an unconditional jump.
118 Return true if something changed. */
120 static bool
121 try_simplify_condjump (basic_block cbranch_block)
123 basic_block jump_block, jump_dest_block, cbranch_dest_block;
124 edge cbranch_jump_edge, cbranch_fallthru_edge;
125 rtx cbranch_insn;
126 rtx insn, next;
127 rtx end;
129 /* Verify that there are exactly two successors. */
130 if (!cbranch_block->succ
131 || !cbranch_block->succ->succ_next
132 || cbranch_block->succ->succ_next->succ_next)
133 return false;
135 /* Verify that we've got a normal conditional branch at the end
136 of the block. */
137 cbranch_insn = BB_END (cbranch_block);
138 if (!any_condjump_p (cbranch_insn))
139 return false;
141 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
142 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
144 /* The next block must not have multiple predecessors, must not
145 be the last block in the function, and must contain just the
146 unconditional jump. */
147 jump_block = cbranch_fallthru_edge->dest;
148 if (jump_block->pred->pred_next
149 || jump_block->next_bb == EXIT_BLOCK_PTR
150 || !FORWARDER_BLOCK_P (jump_block))
151 return false;
152 jump_dest_block = jump_block->succ->dest;
154 /* If we are partitioning hot/cold basic blocks, we don't want to
155 mess up unconditional or indirect jumps that cross between hot
156 and cold sections. */
158 if (flag_reorder_blocks_and_partition
159 && (jump_block->partition != jump_dest_block->partition
160 || cbranch_jump_edge->crossing_edge))
161 return false;
163 /* The conditional branch must target the block after the
164 unconditional branch. */
165 cbranch_dest_block = cbranch_jump_edge->dest;
167 if (!can_fallthru (jump_block, cbranch_dest_block))
168 return false;
170 /* Invert the conditional branch. */
171 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
172 return false;
174 if (dump_file)
175 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
176 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
178 /* Success. Update the CFG to match. Note that after this point
179 the edge variable names appear backwards; the redirection is done
180 this way to preserve edge profile data. */
181 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
182 cbranch_dest_block);
183 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
184 jump_dest_block);
185 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
186 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
187 update_br_prob_note (cbranch_block);
189 end = BB_END (jump_block);
190 /* Deleting a block may produce unreachable code warning even when we are
191 not deleting anything live. Suppress it by moving all the line number
192 notes out of the block. */
193 for (insn = BB_HEAD (jump_block); insn != NEXT_INSN (BB_END (jump_block));
194 insn = next)
196 next = NEXT_INSN (insn);
197 if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
199 if (insn == BB_END (jump_block))
201 BB_END (jump_block) = PREV_INSN (insn);
202 if (insn == end)
203 break;
205 reorder_insns_nobb (insn, insn, end);
206 end = insn;
209 /* Delete the block with the unconditional jump, and clean up the mess. */
210 delete_basic_block (jump_block);
211 tidy_fallthru_edge (cbranch_jump_edge);
213 return true;
216 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
217 on register. Used by jump threading. */
219 static bool
220 mark_effect (rtx exp, regset nonequal)
222 int regno;
223 rtx dest;
224 switch (GET_CODE (exp))
226 /* In case we do clobber the register, mark it as equal, as we know the
227 value is dead so it don't have to match. */
228 case CLOBBER:
229 if (REG_P (XEXP (exp, 0)))
231 dest = XEXP (exp, 0);
232 regno = REGNO (dest);
233 CLEAR_REGNO_REG_SET (nonequal, regno);
234 if (regno < FIRST_PSEUDO_REGISTER)
236 int n = hard_regno_nregs[regno][GET_MODE (dest)];
237 while (--n > 0)
238 CLEAR_REGNO_REG_SET (nonequal, regno + n);
241 return false;
243 case SET:
244 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
245 return false;
246 dest = SET_DEST (exp);
247 if (dest == pc_rtx)
248 return false;
249 if (!REG_P (dest))
250 return true;
251 regno = REGNO (dest);
252 SET_REGNO_REG_SET (nonequal, regno);
253 if (regno < FIRST_PSEUDO_REGISTER)
255 int n = hard_regno_nregs[regno][GET_MODE (dest)];
256 while (--n > 0)
257 SET_REGNO_REG_SET (nonequal, regno + n);
259 return false;
261 default:
262 return false;
266 /* Return nonzero if X is a register set in regset DATA.
267 Called via for_each_rtx. */
268 static int
269 mentions_nonequal_regs (rtx *x, void *data)
271 regset nonequal = (regset) data;
272 if (REG_P (*x))
274 int regno;
276 regno = REGNO (*x);
277 if (REGNO_REG_SET_P (nonequal, regno))
278 return 1;
279 if (regno < FIRST_PSEUDO_REGISTER)
281 int n = hard_regno_nregs[regno][GET_MODE (*x)];
282 while (--n > 0)
283 if (REGNO_REG_SET_P (nonequal, regno + n))
284 return 1;
287 return 0;
289 /* Attempt to prove that the basic block B will have no side effects and
290 always continues in the same edge if reached via E. Return the edge
291 if exist, NULL otherwise. */
293 static edge
294 thread_jump (int mode, edge e, basic_block b)
296 rtx set1, set2, cond1, cond2, insn;
297 enum rtx_code code1, code2, reversed_code2;
298 bool reverse1 = false;
299 int i;
300 regset nonequal;
301 bool failed = false;
303 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
304 return NULL;
306 /* At the moment, we do handle only conditional jumps, but later we may
307 want to extend this code to tablejumps and others. */
308 if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
309 return NULL;
310 if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
312 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
313 return NULL;
316 /* Second branch must end with onlyjump, as we will eliminate the jump. */
317 if (!any_condjump_p (BB_END (e->src)))
318 return NULL;
320 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
322 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
323 return NULL;
326 set1 = pc_set (BB_END (e->src));
327 set2 = pc_set (BB_END (b));
328 if (((e->flags & EDGE_FALLTHRU) != 0)
329 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
330 reverse1 = true;
332 cond1 = XEXP (SET_SRC (set1), 0);
333 cond2 = XEXP (SET_SRC (set2), 0);
334 if (reverse1)
335 code1 = reversed_comparison_code (cond1, BB_END (e->src));
336 else
337 code1 = GET_CODE (cond1);
339 code2 = GET_CODE (cond2);
340 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
342 if (!comparison_dominates_p (code1, code2)
343 && !comparison_dominates_p (code1, reversed_code2))
344 return NULL;
346 /* Ensure that the comparison operators are equivalent.
347 ??? This is far too pessimistic. We should allow swapped operands,
348 different CCmodes, or for example comparisons for interval, that
349 dominate even when operands are not equivalent. */
350 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
351 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
352 return NULL;
354 /* Short circuit cases where block B contains some side effects, as we can't
355 safely bypass it. */
356 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
357 insn = NEXT_INSN (insn))
358 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
360 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
361 return NULL;
364 cselib_init (false);
366 /* First process all values computed in the source basic block. */
367 for (insn = NEXT_INSN (BB_HEAD (e->src)); insn != NEXT_INSN (BB_END (e->src));
368 insn = NEXT_INSN (insn))
369 if (INSN_P (insn))
370 cselib_process_insn (insn);
372 nonequal = BITMAP_XMALLOC();
373 CLEAR_REG_SET (nonequal);
375 /* Now assume that we've continued by the edge E to B and continue
376 processing as if it were same basic block.
377 Our goal is to prove that whole block is an NOOP. */
379 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)) && !failed;
380 insn = NEXT_INSN (insn))
382 if (INSN_P (insn))
384 rtx pat = PATTERN (insn);
386 if (GET_CODE (pat) == PARALLEL)
388 for (i = 0; i < XVECLEN (pat, 0); i++)
389 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
391 else
392 failed |= mark_effect (pat, nonequal);
395 cselib_process_insn (insn);
398 /* Later we should clear nonequal of dead registers. So far we don't
399 have life information in cfg_cleanup. */
400 if (failed)
402 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
403 goto failed_exit;
406 /* cond2 must not mention any register that is not equal to the
407 former block. */
408 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
409 goto failed_exit;
411 /* In case liveness information is available, we need to prove equivalence
412 only of the live values. */
413 if (mode & CLEANUP_UPDATE_LIFE)
414 AND_REG_SET (nonequal, b->global_live_at_end);
416 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
418 BITMAP_XFREE (nonequal);
419 cselib_finish ();
420 if ((comparison_dominates_p (code1, code2) != 0)
421 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
422 return BRANCH_EDGE (b);
423 else
424 return FALLTHRU_EDGE (b);
426 failed_exit:
427 BITMAP_XFREE (nonequal);
428 cselib_finish ();
429 return NULL;
432 /* Attempt to forward edges leaving basic block B.
433 Return true if successful. */
435 static bool
436 try_forward_edges (int mode, basic_block b)
438 bool changed = false;
439 edge e, next, *threaded_edges = NULL;
441 /* If we are partitioning hot/cold basic blocks, we don't want to
442 mess up unconditional or indirect jumps that cross between hot
443 and cold sections. */
445 if (flag_reorder_blocks_and_partition
446 && find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
447 return false;
449 for (e = b->succ; e; e = next)
451 basic_block target, first;
452 int counter;
453 bool threaded = false;
454 int nthreaded_edges = 0;
455 bool may_thread = first_pass | (b->flags & BB_DIRTY);
457 next = e->succ_next;
459 /* Skip complex edges because we don't know how to update them.
461 Still handle fallthru edges, as we can succeed to forward fallthru
462 edge to the same place as the branch edge of conditional branch
463 and turn conditional branch to an unconditional branch. */
464 if (e->flags & EDGE_COMPLEX)
465 continue;
467 target = first = e->dest;
468 counter = 0;
470 while (counter < n_basic_blocks)
472 basic_block new_target = NULL;
473 bool new_target_threaded = false;
474 may_thread |= target->flags & BB_DIRTY;
476 if (FORWARDER_BLOCK_P (target)
477 && target->succ->dest != EXIT_BLOCK_PTR)
479 /* Bypass trivial infinite loops. */
480 if (target == target->succ->dest)
481 counter = n_basic_blocks;
482 new_target = target->succ->dest;
485 /* Allow to thread only over one edge at time to simplify updating
486 of probabilities. */
487 else if ((mode & CLEANUP_THREADING) && may_thread)
489 edge t = thread_jump (mode, e, target);
490 if (t)
492 if (!threaded_edges)
493 threaded_edges = xmalloc (sizeof (*threaded_edges)
494 * n_basic_blocks);
495 else
497 int i;
499 /* Detect an infinite loop across blocks not
500 including the start block. */
501 for (i = 0; i < nthreaded_edges; ++i)
502 if (threaded_edges[i] == t)
503 break;
504 if (i < nthreaded_edges)
506 counter = n_basic_blocks;
507 break;
511 /* Detect an infinite loop across the start block. */
512 if (t->dest == b)
513 break;
515 if (nthreaded_edges >= n_basic_blocks)
516 abort ();
517 threaded_edges[nthreaded_edges++] = t;
519 new_target = t->dest;
520 new_target_threaded = true;
524 if (!new_target)
525 break;
527 /* Avoid killing of loop pre-headers, as it is the place loop
528 optimizer wants to hoist code to.
530 For fallthru forwarders, the LOOP_BEG note must appear between
531 the header of block and CODE_LABEL of the loop, for non forwarders
532 it must appear before the JUMP_INSN. */
533 if ((mode & CLEANUP_PRE_LOOP) && optimize)
535 rtx insn = (target->succ->flags & EDGE_FALLTHRU
536 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
538 if (GET_CODE (insn) != NOTE)
539 insn = NEXT_INSN (insn);
541 for (; insn && GET_CODE (insn) != CODE_LABEL && !INSN_P (insn);
542 insn = NEXT_INSN (insn))
543 if (GET_CODE (insn) == NOTE
544 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
545 break;
547 if (GET_CODE (insn) == NOTE)
548 break;
550 /* Do not clean up branches to just past the end of a loop
551 at this time; it can mess up the loop optimizer's
552 recognition of some patterns. */
554 insn = PREV_INSN (BB_HEAD (target));
555 if (insn && GET_CODE (insn) == NOTE
556 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
557 break;
560 counter++;
561 target = new_target;
562 threaded |= new_target_threaded;
565 if (counter >= n_basic_blocks)
567 if (dump_file)
568 fprintf (dump_file, "Infinite loop in BB %i.\n",
569 target->index);
571 else if (target == first)
572 ; /* We didn't do anything. */
573 else
575 /* Save the values now, as the edge may get removed. */
576 gcov_type edge_count = e->count;
577 int edge_probability = e->probability;
578 int edge_frequency;
579 int n = 0;
581 /* Don't force if target is exit block. */
582 if (threaded && target != EXIT_BLOCK_PTR)
584 notice_new_block (redirect_edge_and_branch_force (e, target));
585 if (dump_file)
586 fprintf (dump_file, "Conditionals threaded.\n");
588 else if (!redirect_edge_and_branch (e, target))
590 if (dump_file)
591 fprintf (dump_file,
592 "Forwarding edge %i->%i to %i failed.\n",
593 b->index, e->dest->index, target->index);
594 continue;
597 /* We successfully forwarded the edge. Now update profile
598 data: for each edge we traversed in the chain, remove
599 the original edge's execution count. */
600 edge_frequency = ((edge_probability * b->frequency
601 + REG_BR_PROB_BASE / 2)
602 / REG_BR_PROB_BASE);
604 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
605 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
609 edge t;
611 first->count -= edge_count;
612 if (first->count < 0)
613 first->count = 0;
614 first->frequency -= edge_frequency;
615 if (first->frequency < 0)
616 first->frequency = 0;
617 if (first->succ->succ_next)
619 edge e;
620 int prob;
621 if (n >= nthreaded_edges)
622 abort ();
623 t = threaded_edges [n++];
624 if (t->src != first)
625 abort ();
626 if (first->frequency)
627 prob = edge_frequency * REG_BR_PROB_BASE / first->frequency;
628 else
629 prob = 0;
630 if (prob > t->probability)
631 prob = t->probability;
632 t->probability -= prob;
633 prob = REG_BR_PROB_BASE - prob;
634 if (prob <= 0)
636 first->succ->probability = REG_BR_PROB_BASE;
637 first->succ->succ_next->probability = 0;
639 else
640 for (e = first->succ; e; e = e->succ_next)
641 e->probability = ((e->probability * REG_BR_PROB_BASE)
642 / (double) prob);
643 update_br_prob_note (first);
645 else
647 /* It is possible that as the result of
648 threading we've removed edge as it is
649 threaded to the fallthru edge. Avoid
650 getting out of sync. */
651 if (n < nthreaded_edges
652 && first == threaded_edges [n]->src)
653 n++;
654 t = first->succ;
657 t->count -= edge_count;
658 if (t->count < 0)
659 t->count = 0;
660 first = t->dest;
662 while (first != target);
664 changed = true;
668 if (threaded_edges)
669 free (threaded_edges);
670 return changed;
673 /* Return true if LABEL is used for tail recursion. */
675 static bool
676 tail_recursion_label_p (rtx label)
678 rtx x;
680 for (x = tail_recursion_label_list; x; x = XEXP (x, 1))
681 if (label == XEXP (x, 0))
682 return true;
684 return false;
687 /* Blocks A and B are to be merged into a single block. A has no incoming
688 fallthru edge, so it can be moved before B without adding or modifying
689 any jumps (aside from the jump from A to B). */
691 static void
692 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
694 rtx barrier;
696 /* If we are partitioning hot/cold basic blocks, we don't want to
697 mess up unconditional or indirect jumps that cross between hot
698 and cold sections. */
700 if (flag_reorder_blocks_and_partition
701 && (a->partition != b->partition
702 || find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)))
703 return;
705 barrier = next_nonnote_insn (BB_END (a));
706 if (GET_CODE (barrier) != BARRIER)
707 abort ();
708 delete_insn (barrier);
710 /* Move block and loop notes out of the chain so that we do not
711 disturb their order.
713 ??? A better solution would be to squeeze out all the non-nested notes
714 and adjust the block trees appropriately. Even better would be to have
715 a tighter connection between block trees and rtl so that this is not
716 necessary. */
717 if (squeeze_notes (&BB_HEAD (a), &BB_END (a)))
718 abort ();
720 /* Scramble the insn chain. */
721 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
722 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
723 a->flags |= BB_DIRTY;
725 if (dump_file)
726 fprintf (dump_file, "Moved block %d before %d and merged.\n",
727 a->index, b->index);
729 /* Swap the records for the two blocks around. */
731 unlink_block (a);
732 link_block (a, b->prev_bb);
734 /* Now blocks A and B are contiguous. Merge them. */
735 merge_blocks (a, b);
738 /* Blocks A and B are to be merged into a single block. B has no outgoing
739 fallthru edge, so it can be moved after A without adding or modifying
740 any jumps (aside from the jump from A to B). */
742 static void
743 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
745 rtx barrier, real_b_end;
746 rtx label, table;
748 /* If we are partitioning hot/cold basic blocks, we don't want to
749 mess up unconditional or indirect jumps that cross between hot
750 and cold sections. */
752 if (flag_reorder_blocks_and_partition
753 && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
754 || a->partition != b->partition))
755 return;
757 real_b_end = BB_END (b);
759 /* If there is a jump table following block B temporarily add the jump table
760 to block B so that it will also be moved to the correct location. */
761 if (tablejump_p (BB_END (b), &label, &table)
762 && prev_active_insn (label) == BB_END (b))
764 BB_END (b) = table;
767 /* There had better have been a barrier there. Delete it. */
768 barrier = NEXT_INSN (BB_END (b));
769 if (barrier && GET_CODE (barrier) == BARRIER)
770 delete_insn (barrier);
772 /* Move block and loop notes out of the chain so that we do not
773 disturb their order.
775 ??? A better solution would be to squeeze out all the non-nested notes
776 and adjust the block trees appropriately. Even better would be to have
777 a tighter connection between block trees and rtl so that this is not
778 necessary. */
779 if (squeeze_notes (&BB_HEAD (b), &BB_END (b)))
780 abort ();
782 /* Scramble the insn chain. */
783 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
785 /* Restore the real end of b. */
786 BB_END (b) = real_b_end;
788 if (dump_file)
789 fprintf (dump_file, "Moved block %d after %d and merged.\n",
790 b->index, a->index);
792 /* Now blocks A and B are contiguous. Merge them. */
793 merge_blocks (a, b);
796 /* Attempt to merge basic blocks that are potentially non-adjacent.
797 Return NULL iff the attempt failed, otherwise return basic block
798 where cleanup_cfg should continue. Because the merging commonly
799 moves basic block away or introduces another optimization
800 possibility, return basic block just before B so cleanup_cfg don't
801 need to iterate.
803 It may be good idea to return basic block before C in the case
804 C has been moved after B and originally appeared earlier in the
805 insn sequence, but we have no information available about the
806 relative ordering of these two. Hopefully it is not too common. */
808 static basic_block
809 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
811 basic_block next;
812 /* If C has a tail recursion label, do not merge. There is no
813 edge recorded from the call_placeholder back to this label, as
814 that would make optimize_sibling_and_tail_recursive_calls more
815 complex for no gain. */
816 if ((mode & CLEANUP_PRE_SIBCALL)
817 && GET_CODE (BB_HEAD (c)) == CODE_LABEL
818 && tail_recursion_label_p (BB_HEAD (c)))
819 return NULL;
821 /* If we are partitioning hot/cold basic blocks, we don't want to
822 mess up unconditional or indirect jumps that cross between hot
823 and cold sections. */
825 if (flag_reorder_blocks_and_partition
826 && (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
827 || find_reg_note (BB_END (c), REG_CROSSING_JUMP, NULL_RTX)
828 || b->partition != c->partition))
829 return NULL;
833 /* If B has a fallthru edge to C, no need to move anything. */
834 if (e->flags & EDGE_FALLTHRU)
836 int b_index = b->index, c_index = c->index;
837 merge_blocks (b, c);
838 update_forwarder_flag (b);
840 if (dump_file)
841 fprintf (dump_file, "Merged %d and %d without moving.\n",
842 b_index, c_index);
844 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
847 /* Otherwise we will need to move code around. Do that only if expensive
848 transformations are allowed. */
849 else if (mode & CLEANUP_EXPENSIVE)
851 edge tmp_edge, b_fallthru_edge;
852 bool c_has_outgoing_fallthru;
853 bool b_has_incoming_fallthru;
855 /* Avoid overactive code motion, as the forwarder blocks should be
856 eliminated by edge redirection instead. One exception might have
857 been if B is a forwarder block and C has no fallthru edge, but
858 that should be cleaned up by bb-reorder instead. */
859 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
860 return NULL;
862 /* We must make sure to not munge nesting of lexical blocks,
863 and loop notes. This is done by squeezing out all the notes
864 and leaving them there to lie. Not ideal, but functional. */
866 for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
867 if (tmp_edge->flags & EDGE_FALLTHRU)
868 break;
870 c_has_outgoing_fallthru = (tmp_edge != NULL);
872 for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
873 if (tmp_edge->flags & EDGE_FALLTHRU)
874 break;
876 b_has_incoming_fallthru = (tmp_edge != NULL);
877 b_fallthru_edge = tmp_edge;
878 next = b->prev_bb;
879 if (next == c)
880 next = next->prev_bb;
882 /* Otherwise, we're going to try to move C after B. If C does
883 not have an outgoing fallthru, then it can be moved
884 immediately after B without introducing or modifying jumps. */
885 if (! c_has_outgoing_fallthru)
887 merge_blocks_move_successor_nojumps (b, c);
888 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
891 /* If B does not have an incoming fallthru, then it can be moved
892 immediately before C without introducing or modifying jumps.
893 C cannot be the first block, so we do not have to worry about
894 accessing a non-existent block. */
896 if (b_has_incoming_fallthru)
898 basic_block bb;
900 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
901 return NULL;
902 bb = force_nonfallthru (b_fallthru_edge);
903 if (bb)
904 notice_new_block (bb);
907 merge_blocks_move_predecessor_nojumps (b, c);
908 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
911 return NULL;
915 /* Removes the memory attributes of MEM expression
916 if they are not equal. */
918 void
919 merge_memattrs (rtx x, rtx y)
921 int i;
922 int j;
923 enum rtx_code code;
924 const char *fmt;
926 if (x == y)
927 return;
928 if (x == 0 || y == 0)
929 return;
931 code = GET_CODE (x);
933 if (code != GET_CODE (y))
934 return;
936 if (GET_MODE (x) != GET_MODE (y))
937 return;
939 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
941 if (! MEM_ATTRS (x))
942 MEM_ATTRS (y) = 0;
943 else if (! MEM_ATTRS (y))
944 MEM_ATTRS (x) = 0;
945 else
947 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
949 set_mem_alias_set (x, 0);
950 set_mem_alias_set (y, 0);
953 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
955 set_mem_expr (x, 0);
956 set_mem_expr (y, 0);
957 set_mem_offset (x, 0);
958 set_mem_offset (y, 0);
960 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
962 set_mem_offset (x, 0);
963 set_mem_offset (y, 0);
966 set_mem_size (x, MAX (MEM_SIZE (x), MEM_SIZE (y)));
967 set_mem_size (y, MEM_SIZE (x));
969 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
970 set_mem_align (y, MEM_ALIGN (x));
974 fmt = GET_RTX_FORMAT (code);
975 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
977 switch (fmt[i])
979 case 'E':
980 /* Two vectors must have the same length. */
981 if (XVECLEN (x, i) != XVECLEN (y, i))
982 return;
984 for (j = 0; j < XVECLEN (x, i); j++)
985 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
987 break;
989 case 'e':
990 merge_memattrs (XEXP (x, i), XEXP (y, i));
993 return;
997 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
999 static bool
1000 insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
1002 rtx p1, p2;
1004 /* Verify that I1 and I2 are equivalent. */
1005 if (GET_CODE (i1) != GET_CODE (i2))
1006 return false;
1008 p1 = PATTERN (i1);
1009 p2 = PATTERN (i2);
1011 if (GET_CODE (p1) != GET_CODE (p2))
1012 return false;
1014 /* If this is a CALL_INSN, compare register usage information.
1015 If we don't check this on stack register machines, the two
1016 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1017 numbers of stack registers in the same basic block.
1018 If we don't check this on machines with delay slots, a delay slot may
1019 be filled that clobbers a parameter expected by the subroutine.
1021 ??? We take the simple route for now and assume that if they're
1022 equal, they were constructed identically. */
1024 if (GET_CODE (i1) == CALL_INSN
1025 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1026 CALL_INSN_FUNCTION_USAGE (i2))
1027 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
1028 return false;
1030 #ifdef STACK_REGS
1031 /* If cross_jump_death_matters is not 0, the insn's mode
1032 indicates whether or not the insn contains any stack-like
1033 regs. */
1035 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1037 /* If register stack conversion has already been done, then
1038 death notes must also be compared before it is certain that
1039 the two instruction streams match. */
1041 rtx note;
1042 HARD_REG_SET i1_regset, i2_regset;
1044 CLEAR_HARD_REG_SET (i1_regset);
1045 CLEAR_HARD_REG_SET (i2_regset);
1047 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1048 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1049 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1051 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1052 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1053 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1055 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1057 return false;
1059 done:
1062 #endif
1064 if (reload_completed
1065 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1066 return true;
1068 /* Do not do EQUIV substitution after reload. First, we're undoing the
1069 work of reload_cse. Second, we may be undoing the work of the post-
1070 reload splitting pass. */
1071 /* ??? Possibly add a new phase switch variable that can be used by
1072 targets to disallow the troublesome insns after splitting. */
1073 if (!reload_completed)
1075 /* The following code helps take care of G++ cleanups. */
1076 rtx equiv1 = find_reg_equal_equiv_note (i1);
1077 rtx equiv2 = find_reg_equal_equiv_note (i2);
1079 if (equiv1 && equiv2
1080 /* If the equivalences are not to a constant, they may
1081 reference pseudos that no longer exist, so we can't
1082 use them. */
1083 && (! reload_completed
1084 || (CONSTANT_P (XEXP (equiv1, 0))
1085 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1087 rtx s1 = single_set (i1);
1088 rtx s2 = single_set (i2);
1089 if (s1 != 0 && s2 != 0
1090 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1092 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1093 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1094 if (! rtx_renumbered_equal_p (p1, p2))
1095 cancel_changes (0);
1096 else if (apply_change_group ())
1097 return true;
1102 return false;
1105 /* Look through the insns at the end of BB1 and BB2 and find the longest
1106 sequence that are equivalent. Store the first insns for that sequence
1107 in *F1 and *F2 and return the sequence length.
1109 To simplify callers of this function, if the blocks match exactly,
1110 store the head of the blocks in *F1 and *F2. */
1112 static int
1113 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1114 basic_block bb2, rtx *f1, rtx *f2)
1116 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1117 int ninsns = 0;
1119 /* Skip simple jumps at the end of the blocks. Complex jumps still
1120 need to be compared for equivalence, which we'll do below. */
1122 i1 = BB_END (bb1);
1123 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1124 if (onlyjump_p (i1)
1125 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1127 last1 = i1;
1128 i1 = PREV_INSN (i1);
1131 i2 = BB_END (bb2);
1132 if (onlyjump_p (i2)
1133 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1135 last2 = i2;
1136 /* Count everything except for unconditional jump as insn. */
1137 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1138 ninsns++;
1139 i2 = PREV_INSN (i2);
1142 while (true)
1144 /* Ignore notes. */
1145 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1146 i1 = PREV_INSN (i1);
1148 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1149 i2 = PREV_INSN (i2);
1151 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1152 break;
1154 if (!insns_match_p (mode, i1, i2))
1155 break;
1157 merge_memattrs (i1, i2);
1159 /* Don't begin a cross-jump with a NOTE insn. */
1160 if (INSN_P (i1))
1162 /* If the merged insns have different REG_EQUAL notes, then
1163 remove them. */
1164 rtx equiv1 = find_reg_equal_equiv_note (i1);
1165 rtx equiv2 = find_reg_equal_equiv_note (i2);
1167 if (equiv1 && !equiv2)
1168 remove_note (i1, equiv1);
1169 else if (!equiv1 && equiv2)
1170 remove_note (i2, equiv2);
1171 else if (equiv1 && equiv2
1172 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1174 remove_note (i1, equiv1);
1175 remove_note (i2, equiv2);
1178 afterlast1 = last1, afterlast2 = last2;
1179 last1 = i1, last2 = i2;
1180 ninsns++;
1183 i1 = PREV_INSN (i1);
1184 i2 = PREV_INSN (i2);
1187 #ifdef HAVE_cc0
1188 /* Don't allow the insn after a compare to be shared by
1189 cross-jumping unless the compare is also shared. */
1190 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1191 last1 = afterlast1, last2 = afterlast2, ninsns--;
1192 #endif
1194 /* Include preceding notes and labels in the cross-jump. One,
1195 this may bring us to the head of the blocks as requested above.
1196 Two, it keeps line number notes as matched as may be. */
1197 if (ninsns)
1199 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1200 last1 = PREV_INSN (last1);
1202 if (last1 != BB_HEAD (bb1) && GET_CODE (PREV_INSN (last1)) == CODE_LABEL)
1203 last1 = PREV_INSN (last1);
1205 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1206 last2 = PREV_INSN (last2);
1208 if (last2 != BB_HEAD (bb2) && GET_CODE (PREV_INSN (last2)) == CODE_LABEL)
1209 last2 = PREV_INSN (last2);
1211 *f1 = last1;
1212 *f2 = last2;
1215 return ninsns;
1218 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1219 the branch instruction. This means that if we commonize the control
1220 flow before end of the basic block, the semantic remains unchanged.
1222 We may assume that there exists one edge with a common destination. */
1224 static bool
1225 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1227 int nehedges1 = 0, nehedges2 = 0;
1228 edge fallthru1 = 0, fallthru2 = 0;
1229 edge e1, e2;
1231 /* If BB1 has only one successor, we may be looking at either an
1232 unconditional jump, or a fake edge to exit. */
1233 if (bb1->succ && !bb1->succ->succ_next
1234 && (bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1235 && (GET_CODE (BB_END (bb1)) != JUMP_INSN || simplejump_p (BB_END (bb1))))
1236 return (bb2->succ && !bb2->succ->succ_next
1237 && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1238 && (GET_CODE (BB_END (bb2)) != JUMP_INSN || simplejump_p (BB_END (bb2))));
1240 /* Match conditional jumps - this may get tricky when fallthru and branch
1241 edges are crossed. */
1242 if (bb1->succ
1243 && bb1->succ->succ_next
1244 && !bb1->succ->succ_next->succ_next
1245 && any_condjump_p (BB_END (bb1))
1246 && onlyjump_p (BB_END (bb1)))
1248 edge b1, f1, b2, f2;
1249 bool reverse, match;
1250 rtx set1, set2, cond1, cond2;
1251 enum rtx_code code1, code2;
1253 if (!bb2->succ
1254 || !bb2->succ->succ_next
1255 || bb2->succ->succ_next->succ_next
1256 || !any_condjump_p (BB_END (bb2))
1257 || !onlyjump_p (BB_END (bb2)))
1258 return false;
1260 b1 = BRANCH_EDGE (bb1);
1261 b2 = BRANCH_EDGE (bb2);
1262 f1 = FALLTHRU_EDGE (bb1);
1263 f2 = FALLTHRU_EDGE (bb2);
1265 /* Get around possible forwarders on fallthru edges. Other cases
1266 should be optimized out already. */
1267 if (FORWARDER_BLOCK_P (f1->dest))
1268 f1 = f1->dest->succ;
1270 if (FORWARDER_BLOCK_P (f2->dest))
1271 f2 = f2->dest->succ;
1273 /* To simplify use of this function, return false if there are
1274 unneeded forwarder blocks. These will get eliminated later
1275 during cleanup_cfg. */
1276 if (FORWARDER_BLOCK_P (f1->dest)
1277 || FORWARDER_BLOCK_P (f2->dest)
1278 || FORWARDER_BLOCK_P (b1->dest)
1279 || FORWARDER_BLOCK_P (b2->dest))
1280 return false;
1282 if (f1->dest == f2->dest && b1->dest == b2->dest)
1283 reverse = false;
1284 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1285 reverse = true;
1286 else
1287 return false;
1289 set1 = pc_set (BB_END (bb1));
1290 set2 = pc_set (BB_END (bb2));
1291 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1292 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1293 reverse = !reverse;
1295 cond1 = XEXP (SET_SRC (set1), 0);
1296 cond2 = XEXP (SET_SRC (set2), 0);
1297 code1 = GET_CODE (cond1);
1298 if (reverse)
1299 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1300 else
1301 code2 = GET_CODE (cond2);
1303 if (code2 == UNKNOWN)
1304 return false;
1306 /* Verify codes and operands match. */
1307 match = ((code1 == code2
1308 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1309 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1310 || (code1 == swap_condition (code2)
1311 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1312 XEXP (cond2, 0))
1313 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1314 XEXP (cond2, 1))));
1316 /* If we return true, we will join the blocks. Which means that
1317 we will only have one branch prediction bit to work with. Thus
1318 we require the existing branches to have probabilities that are
1319 roughly similar. */
1320 if (match
1321 && !optimize_size
1322 && maybe_hot_bb_p (bb1)
1323 && maybe_hot_bb_p (bb2))
1325 int prob2;
1327 if (b1->dest == b2->dest)
1328 prob2 = b2->probability;
1329 else
1330 /* Do not use f2 probability as f2 may be forwarded. */
1331 prob2 = REG_BR_PROB_BASE - b2->probability;
1333 /* Fail if the difference in probabilities is greater than 50%.
1334 This rules out two well-predicted branches with opposite
1335 outcomes. */
1336 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1338 if (dump_file)
1339 fprintf (dump_file,
1340 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1341 bb1->index, bb2->index, b1->probability, prob2);
1343 return false;
1347 if (dump_file && match)
1348 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1349 bb1->index, bb2->index);
1351 return match;
1354 /* Generic case - we are seeing a computed jump, table jump or trapping
1355 instruction. */
1357 #ifndef CASE_DROPS_THROUGH
1358 /* Check whether there are tablejumps in the end of BB1 and BB2.
1359 Return true if they are identical. */
1361 rtx label1, label2;
1362 rtx table1, table2;
1364 if (tablejump_p (BB_END (bb1), &label1, &table1)
1365 && tablejump_p (BB_END (bb2), &label2, &table2)
1366 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1368 /* The labels should never be the same rtx. If they really are same
1369 the jump tables are same too. So disable crossjumping of blocks BB1
1370 and BB2 because when deleting the common insns in the end of BB1
1371 by delete_block () the jump table would be deleted too. */
1372 /* If LABEL2 is referenced in BB1->END do not do anything
1373 because we would loose information when replacing
1374 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1375 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1377 /* Set IDENTICAL to true when the tables are identical. */
1378 bool identical = false;
1379 rtx p1, p2;
1381 p1 = PATTERN (table1);
1382 p2 = PATTERN (table2);
1383 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1385 identical = true;
1387 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1388 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1389 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1390 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1392 int i;
1394 identical = true;
1395 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1396 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1397 identical = false;
1400 if (identical)
1402 replace_label_data rr;
1403 bool match;
1405 /* Temporarily replace references to LABEL1 with LABEL2
1406 in BB1->END so that we could compare the instructions. */
1407 rr.r1 = label1;
1408 rr.r2 = label2;
1409 rr.update_label_nuses = false;
1410 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1412 match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1413 if (dump_file && match)
1414 fprintf (dump_file,
1415 "Tablejumps in bb %i and %i match.\n",
1416 bb1->index, bb2->index);
1418 /* Set the original label in BB1->END because when deleting
1419 a block whose end is a tablejump, the tablejump referenced
1420 from the instruction is deleted too. */
1421 rr.r1 = label2;
1422 rr.r2 = label1;
1423 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1425 return match;
1428 return false;
1431 #endif
1433 /* First ensure that the instructions match. There may be many outgoing
1434 edges so this test is generally cheaper. */
1435 if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1436 return false;
1438 /* Search the outgoing edges, ensure that the counts do match, find possible
1439 fallthru and exception handling edges since these needs more
1440 validation. */
1441 for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
1442 e1 = e1->succ_next, e2 = e2->succ_next)
1444 if (e1->flags & EDGE_EH)
1445 nehedges1++;
1447 if (e2->flags & EDGE_EH)
1448 nehedges2++;
1450 if (e1->flags & EDGE_FALLTHRU)
1451 fallthru1 = e1;
1452 if (e2->flags & EDGE_FALLTHRU)
1453 fallthru2 = e2;
1456 /* If number of edges of various types does not match, fail. */
1457 if (e1 || e2
1458 || nehedges1 != nehedges2
1459 || (fallthru1 != 0) != (fallthru2 != 0))
1460 return false;
1462 /* fallthru edges must be forwarded to the same destination. */
1463 if (fallthru1)
1465 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1466 ? fallthru1->dest->succ->dest: fallthru1->dest);
1467 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1468 ? fallthru2->dest->succ->dest: fallthru2->dest);
1470 if (d1 != d2)
1471 return false;
1474 /* Ensure the same EH region. */
1476 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1477 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1479 if (!n1 && n2)
1480 return false;
1482 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1483 return false;
1486 /* We don't need to match the rest of edges as above checks should be enough
1487 to ensure that they are equivalent. */
1488 return true;
1491 /* E1 and E2 are edges with the same destination block. Search their
1492 predecessors for common code. If found, redirect control flow from
1493 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1495 static bool
1496 try_crossjump_to_edge (int mode, edge e1, edge e2)
1498 int nmatch;
1499 basic_block src1 = e1->src, src2 = e2->src;
1500 basic_block redirect_to, redirect_from, to_remove;
1501 rtx newpos1, newpos2;
1502 edge s;
1504 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1505 to try this optimization. */
1507 if (flag_reorder_blocks_and_partition && no_new_pseudos)
1508 return false;
1510 /* Search backward through forwarder blocks. We don't need to worry
1511 about multiple entry or chained forwarders, as they will be optimized
1512 away. We do this to look past the unconditional jump following a
1513 conditional jump that is required due to the current CFG shape. */
1514 if (src1->pred
1515 && !src1->pred->pred_next
1516 && FORWARDER_BLOCK_P (src1))
1517 e1 = src1->pred, src1 = e1->src;
1519 if (src2->pred
1520 && !src2->pred->pred_next
1521 && FORWARDER_BLOCK_P (src2))
1522 e2 = src2->pred, src2 = e2->src;
1524 /* Nothing to do if we reach ENTRY, or a common source block. */
1525 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1526 return false;
1527 if (src1 == src2)
1528 return false;
1530 /* Seeing more than 1 forwarder blocks would confuse us later... */
1531 if (FORWARDER_BLOCK_P (e1->dest)
1532 && FORWARDER_BLOCK_P (e1->dest->succ->dest))
1533 return false;
1535 if (FORWARDER_BLOCK_P (e2->dest)
1536 && FORWARDER_BLOCK_P (e2->dest->succ->dest))
1537 return false;
1539 /* Likewise with dead code (possibly newly created by the other optimizations
1540 of cfg_cleanup). */
1541 if (!src1->pred || !src2->pred)
1542 return false;
1544 /* Look for the common insn sequence, part the first ... */
1545 if (!outgoing_edges_match (mode, src1, src2))
1546 return false;
1548 /* ... and part the second. */
1549 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1550 if (!nmatch)
1551 return false;
1553 #ifndef CASE_DROPS_THROUGH
1554 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1555 will be deleted.
1556 If we have tablejumps in the end of SRC1 and SRC2
1557 they have been already compared for equivalence in outgoing_edges_match ()
1558 so replace the references to TABLE1 by references to TABLE2. */
1560 rtx label1, label2;
1561 rtx table1, table2;
1563 if (tablejump_p (BB_END (src1), &label1, &table1)
1564 && tablejump_p (BB_END (src2), &label2, &table2)
1565 && label1 != label2)
1567 replace_label_data rr;
1568 rtx insn;
1570 /* Replace references to LABEL1 with LABEL2. */
1571 rr.r1 = label1;
1572 rr.r2 = label2;
1573 rr.update_label_nuses = true;
1574 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1576 /* Do not replace the label in SRC1->END because when deleting
1577 a block whose end is a tablejump, the tablejump referenced
1578 from the instruction is deleted too. */
1579 if (insn != BB_END (src1))
1580 for_each_rtx (&insn, replace_label, &rr);
1584 #endif
1586 /* Avoid splitting if possible. */
1587 if (newpos2 == BB_HEAD (src2))
1588 redirect_to = src2;
1589 else
1591 if (dump_file)
1592 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1593 src2->index, nmatch);
1594 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1597 if (dump_file)
1598 fprintf (dump_file,
1599 "Cross jumping from bb %i to bb %i; %i common insns\n",
1600 src1->index, src2->index, nmatch);
1602 redirect_to->count += src1->count;
1603 redirect_to->frequency += src1->frequency;
1604 /* We may have some registers visible trought the block. */
1605 redirect_to->flags |= BB_DIRTY;
1607 /* Recompute the frequencies and counts of outgoing edges. */
1608 for (s = redirect_to->succ; s; s = s->succ_next)
1610 edge s2;
1611 basic_block d = s->dest;
1613 if (FORWARDER_BLOCK_P (d))
1614 d = d->succ->dest;
1616 for (s2 = src1->succ; ; s2 = s2->succ_next)
1618 basic_block d2 = s2->dest;
1619 if (FORWARDER_BLOCK_P (d2))
1620 d2 = d2->succ->dest;
1621 if (d == d2)
1622 break;
1625 s->count += s2->count;
1627 /* Take care to update possible forwarder blocks. We verified
1628 that there is no more than one in the chain, so we can't run
1629 into infinite loop. */
1630 if (FORWARDER_BLOCK_P (s->dest))
1632 s->dest->succ->count += s2->count;
1633 s->dest->count += s2->count;
1634 s->dest->frequency += EDGE_FREQUENCY (s);
1637 if (FORWARDER_BLOCK_P (s2->dest))
1639 s2->dest->succ->count -= s2->count;
1640 if (s2->dest->succ->count < 0)
1641 s2->dest->succ->count = 0;
1642 s2->dest->count -= s2->count;
1643 s2->dest->frequency -= EDGE_FREQUENCY (s);
1644 if (s2->dest->frequency < 0)
1645 s2->dest->frequency = 0;
1646 if (s2->dest->count < 0)
1647 s2->dest->count = 0;
1650 if (!redirect_to->frequency && !src1->frequency)
1651 s->probability = (s->probability + s2->probability) / 2;
1652 else
1653 s->probability
1654 = ((s->probability * redirect_to->frequency +
1655 s2->probability * src1->frequency)
1656 / (redirect_to->frequency + src1->frequency));
1659 update_br_prob_note (redirect_to);
1661 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1663 /* Skip possible basic block header. */
1664 if (GET_CODE (newpos1) == CODE_LABEL)
1665 newpos1 = NEXT_INSN (newpos1);
1667 if (GET_CODE (newpos1) == NOTE)
1668 newpos1 = NEXT_INSN (newpos1);
1670 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1671 to_remove = redirect_from->succ->dest;
1673 redirect_edge_and_branch_force (redirect_from->succ, redirect_to);
1674 delete_basic_block (to_remove);
1676 update_forwarder_flag (redirect_from);
1678 return true;
1681 /* Search the predecessors of BB for common insn sequences. When found,
1682 share code between them by redirecting control flow. Return true if
1683 any changes made. */
1685 static bool
1686 try_crossjump_bb (int mode, basic_block bb)
1688 edge e, e2, nexte2, nexte, fallthru;
1689 bool changed;
1690 int n = 0, max;
1692 /* Nothing to do if there is not at least two incoming edges. */
1693 if (!bb->pred || !bb->pred->pred_next)
1694 return false;
1696 /* If we are partitioning hot/cold basic blocks, we don't want to
1697 mess up unconditional or indirect jumps that cross between hot
1698 and cold sections. */
1700 if (flag_reorder_blocks_and_partition
1701 && (bb->pred->src->partition != bb->pred->pred_next->src->partition
1702 || bb->pred->crossing_edge))
1703 return false;
1705 /* It is always cheapest to redirect a block that ends in a branch to
1706 a block that falls through into BB, as that adds no branches to the
1707 program. We'll try that combination first. */
1708 fallthru = NULL;
1709 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1710 for (e = bb->pred; e ; e = e->pred_next, n++)
1712 if (e->flags & EDGE_FALLTHRU)
1713 fallthru = e;
1714 if (n > max)
1715 return false;
1718 changed = false;
1719 for (e = bb->pred; e; e = nexte)
1721 nexte = e->pred_next;
1723 /* As noted above, first try with the fallthru predecessor. */
1724 if (fallthru)
1726 /* Don't combine the fallthru edge into anything else.
1727 If there is a match, we'll do it the other way around. */
1728 if (e == fallthru)
1729 continue;
1730 /* If nothing changed since the last attempt, there is nothing
1731 we can do. */
1732 if (!first_pass
1733 && (!(e->src->flags & BB_DIRTY)
1734 && !(fallthru->src->flags & BB_DIRTY)))
1735 continue;
1737 if (try_crossjump_to_edge (mode, e, fallthru))
1739 changed = true;
1740 nexte = bb->pred;
1741 continue;
1745 /* Non-obvious work limiting check: Recognize that we're going
1746 to call try_crossjump_bb on every basic block. So if we have
1747 two blocks with lots of outgoing edges (a switch) and they
1748 share lots of common destinations, then we would do the
1749 cross-jump check once for each common destination.
1751 Now, if the blocks actually are cross-jump candidates, then
1752 all of their destinations will be shared. Which means that
1753 we only need check them for cross-jump candidacy once. We
1754 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1755 choosing to do the check from the block for which the edge
1756 in question is the first successor of A. */
1757 if (e->src->succ != e)
1758 continue;
1760 for (e2 = bb->pred; e2; e2 = nexte2)
1762 nexte2 = e2->pred_next;
1764 if (e2 == e)
1765 continue;
1767 /* We've already checked the fallthru edge above. */
1768 if (e2 == fallthru)
1769 continue;
1771 /* The "first successor" check above only prevents multiple
1772 checks of crossjump(A,B). In order to prevent redundant
1773 checks of crossjump(B,A), require that A be the block
1774 with the lowest index. */
1775 if (e->src->index > e2->src->index)
1776 continue;
1778 /* If nothing changed since the last attempt, there is nothing
1779 we can do. */
1780 if (!first_pass
1781 && (!(e->src->flags & BB_DIRTY)
1782 && !(e2->src->flags & BB_DIRTY)))
1783 continue;
1785 if (try_crossjump_to_edge (mode, e, e2))
1787 changed = true;
1788 nexte = bb->pred;
1789 break;
1794 return changed;
1797 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1798 instructions etc. Return nonzero if changes were made. */
1800 static bool
1801 try_optimize_cfg (int mode)
1803 bool changed_overall = false;
1804 bool changed;
1805 int iterations = 0;
1806 basic_block bb, b, next;
1808 if (mode & CLEANUP_CROSSJUMP)
1809 add_noreturn_fake_exit_edges ();
1811 FOR_EACH_BB (bb)
1812 update_forwarder_flag (bb);
1814 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1815 clear_bb_flags ();
1817 if (! targetm.cannot_modify_jumps_p ())
1819 first_pass = true;
1820 /* Attempt to merge blocks as made possible by edge removal. If
1821 a block has only one successor, and the successor has only
1822 one predecessor, they may be combined. */
1825 changed = false;
1826 iterations++;
1828 if (dump_file)
1829 fprintf (dump_file,
1830 "\n\ntry_optimize_cfg iteration %i\n\n",
1831 iterations);
1833 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1835 basic_block c;
1836 edge s;
1837 bool changed_here = false;
1839 /* Delete trivially dead basic blocks. */
1840 while (b->pred == NULL)
1842 c = b->prev_bb;
1843 if (dump_file)
1844 fprintf (dump_file, "Deleting block %i.\n",
1845 b->index);
1847 delete_basic_block (b);
1848 if (!(mode & CLEANUP_CFGLAYOUT))
1849 changed = true;
1850 b = c;
1853 /* Remove code labels no longer used. Don't do this
1854 before CALL_PLACEHOLDER is removed, as some branches
1855 may be hidden within. */
1856 if (b->pred->pred_next == NULL
1857 && (b->pred->flags & EDGE_FALLTHRU)
1858 && !(b->pred->flags & EDGE_COMPLEX)
1859 && GET_CODE (BB_HEAD (b)) == CODE_LABEL
1860 && (!(mode & CLEANUP_PRE_SIBCALL)
1861 || !tail_recursion_label_p (BB_HEAD (b)))
1862 /* If the previous block ends with a branch to this
1863 block, we can't delete the label. Normally this
1864 is a condjump that is yet to be simplified, but
1865 if CASE_DROPS_THRU, this can be a tablejump with
1866 some element going to the same place as the
1867 default (fallthru). */
1868 && (b->pred->src == ENTRY_BLOCK_PTR
1869 || GET_CODE (BB_END (b->pred->src)) != JUMP_INSN
1870 || ! label_is_jump_target_p (BB_HEAD (b),
1871 BB_END (b->pred->src))))
1873 rtx label = BB_HEAD (b);
1875 delete_insn_chain (label, label);
1876 /* In the case label is undeletable, move it after the
1877 BASIC_BLOCK note. */
1878 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1880 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1882 reorder_insns_nobb (label, label, bb_note);
1883 BB_HEAD (b) = bb_note;
1885 if (dump_file)
1886 fprintf (dump_file, "Deleted label in block %i.\n",
1887 b->index);
1890 /* If we fall through an empty block, we can remove it. */
1891 if (!(mode & CLEANUP_CFGLAYOUT)
1892 && b->pred->pred_next == NULL
1893 && (b->pred->flags & EDGE_FALLTHRU)
1894 && GET_CODE (BB_HEAD (b)) != CODE_LABEL
1895 && FORWARDER_BLOCK_P (b)
1896 /* Note that forwarder_block_p true ensures that
1897 there is a successor for this block. */
1898 && (b->succ->flags & EDGE_FALLTHRU)
1899 && n_basic_blocks > 1)
1901 if (dump_file)
1902 fprintf (dump_file,
1903 "Deleting fallthru block %i.\n",
1904 b->index);
1906 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1907 redirect_edge_succ_nodup (b->pred, b->succ->dest);
1908 delete_basic_block (b);
1909 changed = true;
1910 b = c;
1913 if ((s = b->succ) != NULL
1914 && s->succ_next == NULL
1915 && !(s->flags & EDGE_COMPLEX)
1916 && (c = s->dest) != EXIT_BLOCK_PTR
1917 && c->pred->pred_next == NULL
1918 && b != c)
1920 /* When not in cfg_layout mode use code aware of reordering
1921 INSN. This code possibly creates new basic blocks so it
1922 does not fit merge_blocks interface and is kept here in
1923 hope that it will become useless once more of compiler
1924 is transformed to use cfg_layout mode. */
1926 if ((mode & CLEANUP_CFGLAYOUT)
1927 && can_merge_blocks_p (b, c))
1929 merge_blocks (b, c);
1930 update_forwarder_flag (b);
1931 changed_here = true;
1933 else if (!(mode & CLEANUP_CFGLAYOUT)
1934 /* If the jump insn has side effects,
1935 we can't kill the edge. */
1936 && (GET_CODE (BB_END (b)) != JUMP_INSN
1937 || (reload_completed
1938 ? simplejump_p (BB_END (b))
1939 : onlyjump_p (BB_END (b))))
1940 && (next = merge_blocks_move (s, b, c, mode)))
1942 b = next;
1943 changed_here = true;
1947 /* Simplify branch over branch. */
1948 if ((mode & CLEANUP_EXPENSIVE)
1949 && !(mode & CLEANUP_CFGLAYOUT)
1950 && try_simplify_condjump (b))
1951 changed_here = true;
1953 /* If B has a single outgoing edge, but uses a
1954 non-trivial jump instruction without side-effects, we
1955 can either delete the jump entirely, or replace it
1956 with a simple unconditional jump. */
1957 if (b->succ
1958 && ! b->succ->succ_next
1959 && b->succ->dest != EXIT_BLOCK_PTR
1960 && onlyjump_p (BB_END (b))
1961 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
1962 && try_redirect_by_replacing_jump (b->succ, b->succ->dest,
1963 (mode & CLEANUP_CFGLAYOUT) != 0))
1965 update_forwarder_flag (b);
1966 changed_here = true;
1969 /* Simplify branch to branch. */
1970 if (try_forward_edges (mode, b))
1971 changed_here = true;
1973 /* Look for shared code between blocks. */
1974 if ((mode & CLEANUP_CROSSJUMP)
1975 && try_crossjump_bb (mode, b))
1976 changed_here = true;
1978 /* Don't get confused by the index shift caused by
1979 deleting blocks. */
1980 if (!changed_here)
1981 b = b->next_bb;
1982 else
1983 changed = true;
1986 if ((mode & CLEANUP_CROSSJUMP)
1987 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
1988 changed = true;
1990 #ifdef ENABLE_CHECKING
1991 if (changed)
1992 verify_flow_info ();
1993 #endif
1995 changed_overall |= changed;
1996 first_pass = false;
1998 while (changed);
2001 if (mode & CLEANUP_CROSSJUMP)
2002 remove_fake_edges ();
2004 clear_aux_for_blocks ();
2006 return changed_overall;
2009 /* Delete all unreachable basic blocks. */
2011 bool
2012 delete_unreachable_blocks (void)
2014 bool changed = false;
2015 basic_block b, next_bb;
2017 find_unreachable_blocks ();
2019 /* Delete all unreachable basic blocks. */
2021 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2023 next_bb = b->next_bb;
2025 if (!(b->flags & BB_REACHABLE))
2027 delete_basic_block (b);
2028 changed = true;
2032 if (changed)
2033 tidy_fallthru_edges ();
2034 return changed;
2037 /* Tidy the CFG by deleting unreachable code and whatnot. */
2039 bool
2040 cleanup_cfg (int mode)
2042 bool changed = false;
2044 timevar_push (TV_CLEANUP_CFG);
2045 if (delete_unreachable_blocks ())
2047 changed = true;
2048 /* We've possibly created trivially dead code. Cleanup it right
2049 now to introduce more opportunities for try_optimize_cfg. */
2050 if (!(mode & (CLEANUP_NO_INSN_DEL
2051 | CLEANUP_UPDATE_LIFE | CLEANUP_PRE_SIBCALL))
2052 && !reload_completed)
2053 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2056 compact_blocks ();
2058 while (try_optimize_cfg (mode))
2060 delete_unreachable_blocks (), changed = true;
2061 if (mode & CLEANUP_UPDATE_LIFE)
2063 /* Cleaning up CFG introduces more opportunities for dead code
2064 removal that in turn may introduce more opportunities for
2065 cleaning up the CFG. */
2066 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2067 PROP_DEATH_NOTES
2068 | PROP_SCAN_DEAD_CODE
2069 | PROP_KILL_DEAD_CODE
2070 | ((mode & CLEANUP_LOG_LINKS)
2071 ? PROP_LOG_LINKS : 0)))
2072 break;
2074 else if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_PRE_SIBCALL))
2075 && (mode & CLEANUP_EXPENSIVE)
2076 && !reload_completed)
2078 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2079 break;
2081 else
2082 break;
2083 delete_dead_jumptables ();
2086 /* Kill the data we won't maintain. */
2087 free_EXPR_LIST_list (&label_value_list);
2088 timevar_pop (TV_CLEANUP_CFG);
2090 return changed;