Fix DealII type problems.
[official-gcc/Ramakrishna.git] / gcc / cfgcleanup.c
blobffe36e45a9e6968235bc41a703bcd7ecf9652a3b
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "regs.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "cfglayout.h"
52 #include "emit-rtl.h"
53 #include "tree-pass.h"
54 #include "cfgloop.h"
55 #include "expr.h"
56 #include "df.h"
57 #include "dce.h"
58 #include "dbgcnt.h"
60 #define FORWARDER_BLOCK_P(BB) ((BB)->flags & BB_FORWARDER_BLOCK)
62 /* Set to true when we are running first pass of try_optimize_cfg loop. */
63 static bool first_pass;
65 /* Set to true if crossjumps occured in the latest run of try_optimize_cfg. */
66 static bool crossjumps_occured;
68 static bool try_crossjump_to_edge (int, edge, edge);
69 static bool try_crossjump_bb (int, basic_block);
70 static bool outgoing_edges_match (int, basic_block, basic_block);
71 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
72 static bool old_insns_match_p (int, rtx, rtx);
74 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
75 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
76 static bool try_optimize_cfg (int);
77 static bool try_simplify_condjump (basic_block);
78 static bool try_forward_edges (int, basic_block);
79 static edge thread_jump (edge, basic_block);
80 static bool mark_effect (rtx, bitmap);
81 static void notice_new_block (basic_block);
82 static void update_forwarder_flag (basic_block);
83 static int mentions_nonequal_regs (rtx *, void *);
84 static void merge_memattrs (rtx, rtx);
86 /* Set flags for newly created block. */
88 static void
89 notice_new_block (basic_block bb)
91 if (!bb)
92 return;
94 if (forwarder_block_p (bb))
95 bb->flags |= BB_FORWARDER_BLOCK;
98 /* Recompute forwarder flag after block has been modified. */
100 static void
101 update_forwarder_flag (basic_block bb)
103 if (forwarder_block_p (bb))
104 bb->flags |= BB_FORWARDER_BLOCK;
105 else
106 bb->flags &= ~BB_FORWARDER_BLOCK;
109 /* Simplify a conditional jump around an unconditional jump.
110 Return true if something changed. */
112 static bool
113 try_simplify_condjump (basic_block cbranch_block)
115 basic_block jump_block, jump_dest_block, cbranch_dest_block;
116 edge cbranch_jump_edge, cbranch_fallthru_edge;
117 rtx cbranch_insn;
119 /* Verify that there are exactly two successors. */
120 if (EDGE_COUNT (cbranch_block->succs) != 2)
121 return false;
123 /* Verify that we've got a normal conditional branch at the end
124 of the block. */
125 cbranch_insn = BB_END (cbranch_block);
126 if (!any_condjump_p (cbranch_insn))
127 return false;
129 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
130 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
132 /* The next block must not have multiple predecessors, must not
133 be the last block in the function, and must contain just the
134 unconditional jump. */
135 jump_block = cbranch_fallthru_edge->dest;
136 if (!single_pred_p (jump_block)
137 || jump_block->next_bb == EXIT_BLOCK_PTR
138 || !FORWARDER_BLOCK_P (jump_block))
139 return false;
140 jump_dest_block = single_succ (jump_block);
142 /* If we are partitioning hot/cold basic blocks, we don't want to
143 mess up unconditional or indirect jumps that cross between hot
144 and cold sections.
146 Basic block partitioning may result in some jumps that appear to
147 be optimizable (or blocks that appear to be mergeable), but which really
148 must be left untouched (they are required to make it safely across
149 partition boundaries). See the comments at the top of
150 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
152 if (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
153 || (cbranch_jump_edge->flags & EDGE_CROSSING))
154 return false;
156 /* The conditional branch must target the block after the
157 unconditional branch. */
158 cbranch_dest_block = cbranch_jump_edge->dest;
160 if (cbranch_dest_block == EXIT_BLOCK_PTR
161 || !can_fallthru (jump_block, cbranch_dest_block))
162 return false;
164 /* Invert the conditional branch. */
165 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
166 return false;
168 if (dump_file)
169 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
170 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
172 /* Success. Update the CFG to match. Note that after this point
173 the edge variable names appear backwards; the redirection is done
174 this way to preserve edge profile data. */
175 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
176 cbranch_dest_block);
177 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
178 jump_dest_block);
179 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
180 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
181 update_br_prob_note (cbranch_block);
183 /* Delete the block with the unconditional jump, and clean up the mess. */
184 delete_basic_block (jump_block);
185 tidy_fallthru_edge (cbranch_jump_edge);
186 update_forwarder_flag (cbranch_block);
188 return true;
191 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
192 on register. Used by jump threading. */
194 static bool
195 mark_effect (rtx exp, regset nonequal)
197 int regno;
198 rtx dest;
199 switch (GET_CODE (exp))
201 /* In case we do clobber the register, mark it as equal, as we know the
202 value is dead so it don't have to match. */
203 case CLOBBER:
204 if (REG_P (XEXP (exp, 0)))
206 dest = XEXP (exp, 0);
207 regno = REGNO (dest);
208 CLEAR_REGNO_REG_SET (nonequal, regno);
209 if (regno < FIRST_PSEUDO_REGISTER)
211 int n = hard_regno_nregs[regno][GET_MODE (dest)];
212 while (--n > 0)
213 CLEAR_REGNO_REG_SET (nonequal, regno + n);
216 return false;
218 case SET:
219 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
220 return false;
221 dest = SET_DEST (exp);
222 if (dest == pc_rtx)
223 return false;
224 if (!REG_P (dest))
225 return true;
226 regno = REGNO (dest);
227 SET_REGNO_REG_SET (nonequal, regno);
228 if (regno < FIRST_PSEUDO_REGISTER)
230 int n = hard_regno_nregs[regno][GET_MODE (dest)];
231 while (--n > 0)
232 SET_REGNO_REG_SET (nonequal, regno + n);
234 return false;
236 default:
237 return false;
241 /* Return nonzero if X is a register set in regset DATA.
242 Called via for_each_rtx. */
243 static int
244 mentions_nonequal_regs (rtx *x, void *data)
246 regset nonequal = (regset) data;
247 if (REG_P (*x))
249 int regno;
251 regno = REGNO (*x);
252 if (REGNO_REG_SET_P (nonequal, regno))
253 return 1;
254 if (regno < FIRST_PSEUDO_REGISTER)
256 int n = hard_regno_nregs[regno][GET_MODE (*x)];
257 while (--n > 0)
258 if (REGNO_REG_SET_P (nonequal, regno + n))
259 return 1;
262 return 0;
264 /* Attempt to prove that the basic block B will have no side effects and
265 always continues in the same edge if reached via E. Return the edge
266 if exist, NULL otherwise. */
268 static edge
269 thread_jump (edge e, basic_block b)
271 rtx set1, set2, cond1, cond2, insn;
272 enum rtx_code code1, code2, reversed_code2;
273 bool reverse1 = false;
274 unsigned i;
275 regset nonequal;
276 bool failed = false;
277 reg_set_iterator rsi;
279 if (b->flags & BB_NONTHREADABLE_BLOCK)
280 return NULL;
282 /* At the moment, we do handle only conditional jumps, but later we may
283 want to extend this code to tablejumps and others. */
284 if (EDGE_COUNT (e->src->succs) != 2)
285 return NULL;
286 if (EDGE_COUNT (b->succs) != 2)
288 b->flags |= BB_NONTHREADABLE_BLOCK;
289 return NULL;
292 /* Second branch must end with onlyjump, as we will eliminate the jump. */
293 if (!any_condjump_p (BB_END (e->src)))
294 return NULL;
296 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
298 b->flags |= BB_NONTHREADABLE_BLOCK;
299 return NULL;
302 set1 = pc_set (BB_END (e->src));
303 set2 = pc_set (BB_END (b));
304 if (((e->flags & EDGE_FALLTHRU) != 0)
305 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
306 reverse1 = true;
308 cond1 = XEXP (SET_SRC (set1), 0);
309 cond2 = XEXP (SET_SRC (set2), 0);
310 if (reverse1)
311 code1 = reversed_comparison_code (cond1, BB_END (e->src));
312 else
313 code1 = GET_CODE (cond1);
315 code2 = GET_CODE (cond2);
316 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
318 if (!comparison_dominates_p (code1, code2)
319 && !comparison_dominates_p (code1, reversed_code2))
320 return NULL;
322 /* Ensure that the comparison operators are equivalent.
323 ??? This is far too pessimistic. We should allow swapped operands,
324 different CCmodes, or for example comparisons for interval, that
325 dominate even when operands are not equivalent. */
326 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
327 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
328 return NULL;
330 /* Short circuit cases where block B contains some side effects, as we can't
331 safely bypass it. */
332 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
333 insn = NEXT_INSN (insn))
334 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
336 b->flags |= BB_NONTHREADABLE_BLOCK;
337 return NULL;
340 cselib_init (false);
342 /* First process all values computed in the source basic block. */
343 for (insn = NEXT_INSN (BB_HEAD (e->src));
344 insn != NEXT_INSN (BB_END (e->src));
345 insn = NEXT_INSN (insn))
346 if (INSN_P (insn))
347 cselib_process_insn (insn);
349 nonequal = BITMAP_ALLOC (NULL);
350 CLEAR_REG_SET (nonequal);
352 /* Now assume that we've continued by the edge E to B and continue
353 processing as if it were same basic block.
354 Our goal is to prove that whole block is an NOOP. */
356 for (insn = NEXT_INSN (BB_HEAD (b));
357 insn != NEXT_INSN (BB_END (b)) && !failed;
358 insn = NEXT_INSN (insn))
360 if (INSN_P (insn))
362 rtx pat = PATTERN (insn);
364 if (GET_CODE (pat) == PARALLEL)
366 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
367 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
369 else
370 failed |= mark_effect (pat, nonequal);
373 cselib_process_insn (insn);
376 /* Later we should clear nonequal of dead registers. So far we don't
377 have life information in cfg_cleanup. */
378 if (failed)
380 b->flags |= BB_NONTHREADABLE_BLOCK;
381 goto failed_exit;
384 /* cond2 must not mention any register that is not equal to the
385 former block. */
386 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
387 goto failed_exit;
389 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
390 goto failed_exit;
392 BITMAP_FREE (nonequal);
393 cselib_finish ();
394 if ((comparison_dominates_p (code1, code2) != 0)
395 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
396 return BRANCH_EDGE (b);
397 else
398 return FALLTHRU_EDGE (b);
400 failed_exit:
401 BITMAP_FREE (nonequal);
402 cselib_finish ();
403 return NULL;
406 /* Attempt to forward edges leaving basic block B.
407 Return true if successful. */
409 static bool
410 try_forward_edges (int mode, basic_block b)
412 bool changed = false;
413 edge_iterator ei;
414 edge e, *threaded_edges = NULL;
416 /* If we are partitioning hot/cold basic blocks, we don't want to
417 mess up unconditional or indirect jumps that cross between hot
418 and cold sections.
420 Basic block partitioning may result in some jumps that appear to
421 be optimizable (or blocks that appear to be mergeable), but which really
422 must be left untouched (they are required to make it safely across
423 partition boundaries). See the comments at the top of
424 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
426 if (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
427 return false;
429 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
431 basic_block target, first;
432 int counter, goto_locus;
433 bool threaded = false;
434 int nthreaded_edges = 0;
435 bool may_thread = first_pass | df_get_bb_dirty (b);
437 /* Skip complex edges because we don't know how to update them.
439 Still handle fallthru edges, as we can succeed to forward fallthru
440 edge to the same place as the branch edge of conditional branch
441 and turn conditional branch to an unconditional branch. */
442 if (e->flags & EDGE_COMPLEX)
444 ei_next (&ei);
445 continue;
448 target = first = e->dest;
449 counter = NUM_FIXED_BLOCKS;
450 goto_locus = e->goto_locus;
452 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
453 up jumps that cross between hot/cold sections.
455 Basic block partitioning may result in some jumps that appear
456 to be optimizable (or blocks that appear to be mergeable), but which
457 really must be left untouched (they are required to make it safely
458 across partition boundaries). See the comments at the top of
459 bb-reorder.c:partition_hot_cold_basic_blocks for complete
460 details. */
462 if (first != EXIT_BLOCK_PTR
463 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
464 return false;
466 while (counter < n_basic_blocks)
468 basic_block new_target = NULL;
469 bool new_target_threaded = false;
470 may_thread |= df_get_bb_dirty (target);
472 if (FORWARDER_BLOCK_P (target)
473 && !(single_succ_edge (target)->flags & EDGE_CROSSING)
474 && single_succ (target) != EXIT_BLOCK_PTR)
476 /* Bypass trivial infinite loops. */
477 new_target = single_succ (target);
478 if (target == new_target)
479 counter = n_basic_blocks;
480 else if (!optimize)
482 /* When not optimizing, ensure that edges or forwarder
483 blocks with different locus are not optimized out. */
484 int locus = single_succ_edge (target)->goto_locus;
486 if (locus && goto_locus && !locator_eq (locus, goto_locus))
487 counter = n_basic_blocks;
488 else if (locus)
489 goto_locus = locus;
491 if (INSN_P (BB_END (target)))
493 locus = INSN_LOCATOR (BB_END (target));
495 if (locus && goto_locus
496 && !locator_eq (locus, goto_locus))
497 counter = n_basic_blocks;
498 else if (locus)
499 goto_locus = locus;
504 /* Allow to thread only over one edge at time to simplify updating
505 of probabilities. */
506 else if ((mode & CLEANUP_THREADING) && may_thread)
508 edge t = thread_jump (e, target);
509 if (t)
511 if (!threaded_edges)
512 threaded_edges = XNEWVEC (edge, n_basic_blocks);
513 else
515 int i;
517 /* Detect an infinite loop across blocks not
518 including the start block. */
519 for (i = 0; i < nthreaded_edges; ++i)
520 if (threaded_edges[i] == t)
521 break;
522 if (i < nthreaded_edges)
524 counter = n_basic_blocks;
525 break;
529 /* Detect an infinite loop across the start block. */
530 if (t->dest == b)
531 break;
533 gcc_assert (nthreaded_edges < n_basic_blocks - NUM_FIXED_BLOCKS);
534 threaded_edges[nthreaded_edges++] = t;
536 new_target = t->dest;
537 new_target_threaded = true;
541 if (!new_target)
542 break;
544 counter++;
545 target = new_target;
546 threaded |= new_target_threaded;
549 if (counter >= n_basic_blocks)
551 if (dump_file)
552 fprintf (dump_file, "Infinite loop in BB %i.\n",
553 target->index);
555 else if (target == first)
556 ; /* We didn't do anything. */
557 else
559 /* Save the values now, as the edge may get removed. */
560 gcov_type edge_count = e->count;
561 int edge_probability = e->probability;
562 int edge_frequency;
563 int n = 0;
565 e->goto_locus = goto_locus;
567 /* Don't force if target is exit block. */
568 if (threaded && target != EXIT_BLOCK_PTR)
570 notice_new_block (redirect_edge_and_branch_force (e, target));
571 if (dump_file)
572 fprintf (dump_file, "Conditionals threaded.\n");
574 else if (!redirect_edge_and_branch (e, target))
576 if (dump_file)
577 fprintf (dump_file,
578 "Forwarding edge %i->%i to %i failed.\n",
579 b->index, e->dest->index, target->index);
580 ei_next (&ei);
581 continue;
584 /* We successfully forwarded the edge. Now update profile
585 data: for each edge we traversed in the chain, remove
586 the original edge's execution count. */
587 edge_frequency = ((edge_probability * b->frequency
588 + REG_BR_PROB_BASE / 2)
589 / REG_BR_PROB_BASE);
591 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
592 b->flags |= BB_FORWARDER_BLOCK;
596 edge t;
598 if (!single_succ_p (first))
600 gcc_assert (n < nthreaded_edges);
601 t = threaded_edges [n++];
602 gcc_assert (t->src == first);
603 update_bb_profile_for_threading (first, edge_frequency,
604 edge_count, t);
605 update_br_prob_note (first);
607 else
609 first->count -= edge_count;
610 if (first->count < 0)
611 first->count = 0;
612 first->frequency -= edge_frequency;
613 if (first->frequency < 0)
614 first->frequency = 0;
615 /* It is possible that as the result of
616 threading we've removed edge as it is
617 threaded to the fallthru edge. Avoid
618 getting out of sync. */
619 if (n < nthreaded_edges
620 && first == threaded_edges [n]->src)
621 n++;
622 t = single_succ_edge (first);
625 t->count -= edge_count;
626 if (t->count < 0)
627 t->count = 0;
628 first = t->dest;
630 while (first != target);
632 changed = true;
633 continue;
635 ei_next (&ei);
638 if (threaded_edges)
639 free (threaded_edges);
640 return changed;
644 /* Blocks A and B are to be merged into a single block. A has no incoming
645 fallthru edge, so it can be moved before B without adding or modifying
646 any jumps (aside from the jump from A to B). */
648 static void
649 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
651 rtx barrier;
653 /* If we are partitioning hot/cold basic blocks, we don't want to
654 mess up unconditional or indirect jumps that cross between hot
655 and cold sections.
657 Basic block partitioning may result in some jumps that appear to
658 be optimizable (or blocks that appear to be mergeable), but which really
659 must be left untouched (they are required to make it safely across
660 partition boundaries). See the comments at the top of
661 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
663 if (BB_PARTITION (a) != BB_PARTITION (b))
664 return;
666 barrier = next_nonnote_insn (BB_END (a));
667 gcc_assert (BARRIER_P (barrier));
668 delete_insn (barrier);
670 /* Scramble the insn chain. */
671 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
672 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
673 df_set_bb_dirty (a);
675 if (dump_file)
676 fprintf (dump_file, "Moved block %d before %d and merged.\n",
677 a->index, b->index);
679 /* Swap the records for the two blocks around. */
681 unlink_block (a);
682 link_block (a, b->prev_bb);
684 /* Now blocks A and B are contiguous. Merge them. */
685 merge_blocks (a, b);
688 /* Blocks A and B are to be merged into a single block. B has no outgoing
689 fallthru edge, so it can be moved after A without adding or modifying
690 any jumps (aside from the jump from A to B). */
692 static void
693 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
695 rtx barrier, real_b_end;
696 rtx label, table;
698 /* If we are partitioning hot/cold basic blocks, we don't want to
699 mess up unconditional or indirect jumps that cross between hot
700 and cold sections.
702 Basic block partitioning may result in some jumps that appear to
703 be optimizable (or blocks that appear to be mergeable), but which really
704 must be left untouched (they are required to make it safely across
705 partition boundaries). See the comments at the top of
706 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
708 if (BB_PARTITION (a) != BB_PARTITION (b))
709 return;
711 real_b_end = BB_END (b);
713 /* If there is a jump table following block B temporarily add the jump table
714 to block B so that it will also be moved to the correct location. */
715 if (tablejump_p (BB_END (b), &label, &table)
716 && prev_active_insn (label) == BB_END (b))
718 BB_END (b) = table;
721 /* There had better have been a barrier there. Delete it. */
722 barrier = NEXT_INSN (BB_END (b));
723 if (barrier && BARRIER_P (barrier))
724 delete_insn (barrier);
727 /* Scramble the insn chain. */
728 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
730 /* Restore the real end of b. */
731 BB_END (b) = real_b_end;
733 if (dump_file)
734 fprintf (dump_file, "Moved block %d after %d and merged.\n",
735 b->index, a->index);
737 /* Now blocks A and B are contiguous. Merge them. */
738 merge_blocks (a, b);
741 /* Attempt to merge basic blocks that are potentially non-adjacent.
742 Return NULL iff the attempt failed, otherwise return basic block
743 where cleanup_cfg should continue. Because the merging commonly
744 moves basic block away or introduces another optimization
745 possibility, return basic block just before B so cleanup_cfg don't
746 need to iterate.
748 It may be good idea to return basic block before C in the case
749 C has been moved after B and originally appeared earlier in the
750 insn sequence, but we have no information available about the
751 relative ordering of these two. Hopefully it is not too common. */
753 static basic_block
754 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
756 basic_block next;
758 /* If we are partitioning hot/cold basic blocks, we don't want to
759 mess up unconditional or indirect jumps that cross between hot
760 and cold sections.
762 Basic block partitioning may result in some jumps that appear to
763 be optimizable (or blocks that appear to be mergeable), but which really
764 must be left untouched (they are required to make it safely across
765 partition boundaries). See the comments at the top of
766 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
768 if (BB_PARTITION (b) != BB_PARTITION (c))
769 return NULL;
771 /* If B has a fallthru edge to C, no need to move anything. */
772 if (e->flags & EDGE_FALLTHRU)
774 int b_index = b->index, c_index = c->index;
775 merge_blocks (b, c);
776 update_forwarder_flag (b);
778 if (dump_file)
779 fprintf (dump_file, "Merged %d and %d without moving.\n",
780 b_index, c_index);
782 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
785 /* Otherwise we will need to move code around. Do that only if expensive
786 transformations are allowed. */
787 else if (mode & CLEANUP_EXPENSIVE)
789 edge tmp_edge, b_fallthru_edge;
790 bool c_has_outgoing_fallthru;
791 bool b_has_incoming_fallthru;
792 edge_iterator ei;
794 /* Avoid overactive code motion, as the forwarder blocks should be
795 eliminated by edge redirection instead. One exception might have
796 been if B is a forwarder block and C has no fallthru edge, but
797 that should be cleaned up by bb-reorder instead. */
798 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
799 return NULL;
801 /* We must make sure to not munge nesting of lexical blocks,
802 and loop notes. This is done by squeezing out all the notes
803 and leaving them there to lie. Not ideal, but functional. */
805 FOR_EACH_EDGE (tmp_edge, ei, c->succs)
806 if (tmp_edge->flags & EDGE_FALLTHRU)
807 break;
809 c_has_outgoing_fallthru = (tmp_edge != NULL);
811 FOR_EACH_EDGE (tmp_edge, ei, b->preds)
812 if (tmp_edge->flags & EDGE_FALLTHRU)
813 break;
815 b_has_incoming_fallthru = (tmp_edge != NULL);
816 b_fallthru_edge = tmp_edge;
817 next = b->prev_bb;
818 if (next == c)
819 next = next->prev_bb;
821 /* Otherwise, we're going to try to move C after B. If C does
822 not have an outgoing fallthru, then it can be moved
823 immediately after B without introducing or modifying jumps. */
824 if (! c_has_outgoing_fallthru)
826 merge_blocks_move_successor_nojumps (b, c);
827 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
830 /* If B does not have an incoming fallthru, then it can be moved
831 immediately before C without introducing or modifying jumps.
832 C cannot be the first block, so we do not have to worry about
833 accessing a non-existent block. */
835 if (b_has_incoming_fallthru)
837 basic_block bb;
839 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
840 return NULL;
841 bb = force_nonfallthru (b_fallthru_edge);
842 if (bb)
843 notice_new_block (bb);
846 merge_blocks_move_predecessor_nojumps (b, c);
847 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
850 return NULL;
854 /* Removes the memory attributes of MEM expression
855 if they are not equal. */
857 void
858 merge_memattrs (rtx x, rtx y)
860 int i;
861 int j;
862 enum rtx_code code;
863 const char *fmt;
865 if (x == y)
866 return;
867 if (x == 0 || y == 0)
868 return;
870 code = GET_CODE (x);
872 if (code != GET_CODE (y))
873 return;
875 if (GET_MODE (x) != GET_MODE (y))
876 return;
878 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
880 if (! MEM_ATTRS (x))
881 MEM_ATTRS (y) = 0;
882 else if (! MEM_ATTRS (y))
883 MEM_ATTRS (x) = 0;
884 else
886 rtx mem_size;
888 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
890 set_mem_alias_set (x, 0);
891 set_mem_alias_set (y, 0);
894 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
896 set_mem_expr (x, 0);
897 set_mem_expr (y, 0);
898 set_mem_offset (x, 0);
899 set_mem_offset (y, 0);
901 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
903 set_mem_offset (x, 0);
904 set_mem_offset (y, 0);
907 if (!MEM_SIZE (x))
908 mem_size = NULL_RTX;
909 else if (!MEM_SIZE (y))
910 mem_size = NULL_RTX;
911 else
912 mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
913 INTVAL (MEM_SIZE (y))));
914 set_mem_size (x, mem_size);
915 set_mem_size (y, mem_size);
917 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
918 set_mem_align (y, MEM_ALIGN (x));
922 fmt = GET_RTX_FORMAT (code);
923 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
925 switch (fmt[i])
927 case 'E':
928 /* Two vectors must have the same length. */
929 if (XVECLEN (x, i) != XVECLEN (y, i))
930 return;
932 for (j = 0; j < XVECLEN (x, i); j++)
933 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
935 break;
937 case 'e':
938 merge_memattrs (XEXP (x, i), XEXP (y, i));
941 return;
945 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
947 static bool
948 old_insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
950 rtx p1, p2;
952 /* Verify that I1 and I2 are equivalent. */
953 if (GET_CODE (i1) != GET_CODE (i2))
954 return false;
956 /* __builtin_unreachable() may lead to empty blocks (ending with
957 NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */
958 if (NOTE_INSN_BASIC_BLOCK_P (i1) && NOTE_INSN_BASIC_BLOCK_P (i2))
959 return true;
961 p1 = PATTERN (i1);
962 p2 = PATTERN (i2);
964 if (GET_CODE (p1) != GET_CODE (p2))
965 return false;
967 /* If this is a CALL_INSN, compare register usage information.
968 If we don't check this on stack register machines, the two
969 CALL_INSNs might be merged leaving reg-stack.c with mismatching
970 numbers of stack registers in the same basic block.
971 If we don't check this on machines with delay slots, a delay slot may
972 be filled that clobbers a parameter expected by the subroutine.
974 ??? We take the simple route for now and assume that if they're
975 equal, they were constructed identically. */
977 if (CALL_P (i1)
978 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
979 CALL_INSN_FUNCTION_USAGE (i2))
980 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
981 return false;
983 #ifdef STACK_REGS
984 /* If cross_jump_death_matters is not 0, the insn's mode
985 indicates whether or not the insn contains any stack-like
986 regs. */
988 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
990 /* If register stack conversion has already been done, then
991 death notes must also be compared before it is certain that
992 the two instruction streams match. */
994 rtx note;
995 HARD_REG_SET i1_regset, i2_regset;
997 CLEAR_HARD_REG_SET (i1_regset);
998 CLEAR_HARD_REG_SET (i2_regset);
1000 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1001 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1002 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1004 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1005 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1006 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1008 if (!hard_reg_set_equal_p (i1_regset, i2_regset))
1009 return false;
1011 #endif
1013 if (reload_completed
1014 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1015 return true;
1017 return false;
1020 /* Look through the insns at the end of BB1 and BB2 and find the longest
1021 sequence that are equivalent. Store the first insns for that sequence
1022 in *F1 and *F2 and return the sequence length.
1024 To simplify callers of this function, if the blocks match exactly,
1025 store the head of the blocks in *F1 and *F2. */
1027 static int
1028 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1029 basic_block bb2, rtx *f1, rtx *f2)
1031 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1032 int ninsns = 0;
1034 /* Skip simple jumps at the end of the blocks. Complex jumps still
1035 need to be compared for equivalence, which we'll do below. */
1037 i1 = BB_END (bb1);
1038 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1039 if (onlyjump_p (i1)
1040 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1042 last1 = i1;
1043 i1 = PREV_INSN (i1);
1046 i2 = BB_END (bb2);
1047 if (onlyjump_p (i2)
1048 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1050 last2 = i2;
1051 /* Count everything except for unconditional jump as insn. */
1052 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1053 ninsns++;
1054 i2 = PREV_INSN (i2);
1057 while (true)
1059 /* Ignore notes. */
1060 while (!NONDEBUG_INSN_P (i1) && i1 != BB_HEAD (bb1))
1061 i1 = PREV_INSN (i1);
1063 while (!NONDEBUG_INSN_P (i2) && i2 != BB_HEAD (bb2))
1064 i2 = PREV_INSN (i2);
1066 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1067 break;
1069 if (!old_insns_match_p (mode, i1, i2))
1070 break;
1072 merge_memattrs (i1, i2);
1074 /* Don't begin a cross-jump with a NOTE insn. */
1075 if (INSN_P (i1))
1077 /* If the merged insns have different REG_EQUAL notes, then
1078 remove them. */
1079 rtx equiv1 = find_reg_equal_equiv_note (i1);
1080 rtx equiv2 = find_reg_equal_equiv_note (i2);
1082 if (equiv1 && !equiv2)
1083 remove_note (i1, equiv1);
1084 else if (!equiv1 && equiv2)
1085 remove_note (i2, equiv2);
1086 else if (equiv1 && equiv2
1087 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1089 remove_note (i1, equiv1);
1090 remove_note (i2, equiv2);
1093 afterlast1 = last1, afterlast2 = last2;
1094 last1 = i1, last2 = i2;
1095 ninsns++;
1098 i1 = PREV_INSN (i1);
1099 i2 = PREV_INSN (i2);
1102 #ifdef HAVE_cc0
1103 /* Don't allow the insn after a compare to be shared by
1104 cross-jumping unless the compare is also shared. */
1105 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1106 last1 = afterlast1, last2 = afterlast2, ninsns--;
1107 #endif
1109 /* Include preceding notes and labels in the cross-jump. One,
1110 this may bring us to the head of the blocks as requested above.
1111 Two, it keeps line number notes as matched as may be. */
1112 if (ninsns)
1114 while (last1 != BB_HEAD (bb1) && !NONDEBUG_INSN_P (PREV_INSN (last1)))
1115 last1 = PREV_INSN (last1);
1117 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1118 last1 = PREV_INSN (last1);
1120 while (last2 != BB_HEAD (bb2) && !NONDEBUG_INSN_P (PREV_INSN (last2)))
1121 last2 = PREV_INSN (last2);
1123 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1124 last2 = PREV_INSN (last2);
1126 *f1 = last1;
1127 *f2 = last2;
1130 return ninsns;
1133 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1134 the branch instruction. This means that if we commonize the control
1135 flow before end of the basic block, the semantic remains unchanged.
1137 We may assume that there exists one edge with a common destination. */
1139 static bool
1140 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1142 int nehedges1 = 0, nehedges2 = 0;
1143 edge fallthru1 = 0, fallthru2 = 0;
1144 edge e1, e2;
1145 edge_iterator ei;
1147 /* If BB1 has only one successor, we may be looking at either an
1148 unconditional jump, or a fake edge to exit. */
1149 if (single_succ_p (bb1)
1150 && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1151 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1152 return (single_succ_p (bb2)
1153 && (single_succ_edge (bb2)->flags
1154 & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1155 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1157 /* Match conditional jumps - this may get tricky when fallthru and branch
1158 edges are crossed. */
1159 if (EDGE_COUNT (bb1->succs) == 2
1160 && any_condjump_p (BB_END (bb1))
1161 && onlyjump_p (BB_END (bb1)))
1163 edge b1, f1, b2, f2;
1164 bool reverse, match;
1165 rtx set1, set2, cond1, cond2;
1166 enum rtx_code code1, code2;
1168 if (EDGE_COUNT (bb2->succs) != 2
1169 || !any_condjump_p (BB_END (bb2))
1170 || !onlyjump_p (BB_END (bb2)))
1171 return false;
1173 b1 = BRANCH_EDGE (bb1);
1174 b2 = BRANCH_EDGE (bb2);
1175 f1 = FALLTHRU_EDGE (bb1);
1176 f2 = FALLTHRU_EDGE (bb2);
1178 /* Get around possible forwarders on fallthru edges. Other cases
1179 should be optimized out already. */
1180 if (FORWARDER_BLOCK_P (f1->dest))
1181 f1 = single_succ_edge (f1->dest);
1183 if (FORWARDER_BLOCK_P (f2->dest))
1184 f2 = single_succ_edge (f2->dest);
1186 /* To simplify use of this function, return false if there are
1187 unneeded forwarder blocks. These will get eliminated later
1188 during cleanup_cfg. */
1189 if (FORWARDER_BLOCK_P (f1->dest)
1190 || FORWARDER_BLOCK_P (f2->dest)
1191 || FORWARDER_BLOCK_P (b1->dest)
1192 || FORWARDER_BLOCK_P (b2->dest))
1193 return false;
1195 if (f1->dest == f2->dest && b1->dest == b2->dest)
1196 reverse = false;
1197 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1198 reverse = true;
1199 else
1200 return false;
1202 set1 = pc_set (BB_END (bb1));
1203 set2 = pc_set (BB_END (bb2));
1204 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1205 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1206 reverse = !reverse;
1208 cond1 = XEXP (SET_SRC (set1), 0);
1209 cond2 = XEXP (SET_SRC (set2), 0);
1210 code1 = GET_CODE (cond1);
1211 if (reverse)
1212 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1213 else
1214 code2 = GET_CODE (cond2);
1216 if (code2 == UNKNOWN)
1217 return false;
1219 /* Verify codes and operands match. */
1220 match = ((code1 == code2
1221 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1222 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1223 || (code1 == swap_condition (code2)
1224 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1225 XEXP (cond2, 0))
1226 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1227 XEXP (cond2, 1))));
1229 /* If we return true, we will join the blocks. Which means that
1230 we will only have one branch prediction bit to work with. Thus
1231 we require the existing branches to have probabilities that are
1232 roughly similar. */
1233 if (match
1234 && optimize_bb_for_speed_p (bb1)
1235 && optimize_bb_for_speed_p (bb2))
1237 int prob2;
1239 if (b1->dest == b2->dest)
1240 prob2 = b2->probability;
1241 else
1242 /* Do not use f2 probability as f2 may be forwarded. */
1243 prob2 = REG_BR_PROB_BASE - b2->probability;
1245 /* Fail if the difference in probabilities is greater than 50%.
1246 This rules out two well-predicted branches with opposite
1247 outcomes. */
1248 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1250 if (dump_file)
1251 fprintf (dump_file,
1252 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1253 bb1->index, bb2->index, b1->probability, prob2);
1255 return false;
1259 if (dump_file && match)
1260 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1261 bb1->index, bb2->index);
1263 return match;
1266 /* Generic case - we are seeing a computed jump, table jump or trapping
1267 instruction. */
1269 /* Check whether there are tablejumps in the end of BB1 and BB2.
1270 Return true if they are identical. */
1272 rtx label1, label2;
1273 rtx table1, table2;
1275 if (tablejump_p (BB_END (bb1), &label1, &table1)
1276 && tablejump_p (BB_END (bb2), &label2, &table2)
1277 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1279 /* The labels should never be the same rtx. If they really are same
1280 the jump tables are same too. So disable crossjumping of blocks BB1
1281 and BB2 because when deleting the common insns in the end of BB1
1282 by delete_basic_block () the jump table would be deleted too. */
1283 /* If LABEL2 is referenced in BB1->END do not do anything
1284 because we would loose information when replacing
1285 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1286 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1288 /* Set IDENTICAL to true when the tables are identical. */
1289 bool identical = false;
1290 rtx p1, p2;
1292 p1 = PATTERN (table1);
1293 p2 = PATTERN (table2);
1294 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1296 identical = true;
1298 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1299 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1300 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1301 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1303 int i;
1305 identical = true;
1306 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1307 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1308 identical = false;
1311 if (identical)
1313 replace_label_data rr;
1314 bool match;
1316 /* Temporarily replace references to LABEL1 with LABEL2
1317 in BB1->END so that we could compare the instructions. */
1318 rr.r1 = label1;
1319 rr.r2 = label2;
1320 rr.update_label_nuses = false;
1321 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1323 match = old_insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1324 if (dump_file && match)
1325 fprintf (dump_file,
1326 "Tablejumps in bb %i and %i match.\n",
1327 bb1->index, bb2->index);
1329 /* Set the original label in BB1->END because when deleting
1330 a block whose end is a tablejump, the tablejump referenced
1331 from the instruction is deleted too. */
1332 rr.r1 = label2;
1333 rr.r2 = label1;
1334 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1336 return match;
1339 return false;
1343 /* First ensure that the instructions match. There may be many outgoing
1344 edges so this test is generally cheaper. */
1345 if (!old_insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1346 return false;
1348 /* Search the outgoing edges, ensure that the counts do match, find possible
1349 fallthru and exception handling edges since these needs more
1350 validation. */
1351 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1352 return false;
1354 FOR_EACH_EDGE (e1, ei, bb1->succs)
1356 e2 = EDGE_SUCC (bb2, ei.index);
1358 if (e1->flags & EDGE_EH)
1359 nehedges1++;
1361 if (e2->flags & EDGE_EH)
1362 nehedges2++;
1364 if (e1->flags & EDGE_FALLTHRU)
1365 fallthru1 = e1;
1366 if (e2->flags & EDGE_FALLTHRU)
1367 fallthru2 = e2;
1370 /* If number of edges of various types does not match, fail. */
1371 if (nehedges1 != nehedges2
1372 || (fallthru1 != 0) != (fallthru2 != 0))
1373 return false;
1375 /* fallthru edges must be forwarded to the same destination. */
1376 if (fallthru1)
1378 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1379 ? single_succ (fallthru1->dest): fallthru1->dest);
1380 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1381 ? single_succ (fallthru2->dest): fallthru2->dest);
1383 if (d1 != d2)
1384 return false;
1387 /* Ensure the same EH region. */
1389 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1390 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1392 if (!n1 && n2)
1393 return false;
1395 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1396 return false;
1399 /* The same checks as in try_crossjump_to_edge. It is required for RTL
1400 version of sequence abstraction. */
1401 FOR_EACH_EDGE (e1, ei, bb2->succs)
1403 edge e2;
1404 edge_iterator ei;
1405 basic_block d1 = e1->dest;
1407 if (FORWARDER_BLOCK_P (d1))
1408 d1 = EDGE_SUCC (d1, 0)->dest;
1410 FOR_EACH_EDGE (e2, ei, bb1->succs)
1412 basic_block d2 = e2->dest;
1413 if (FORWARDER_BLOCK_P (d2))
1414 d2 = EDGE_SUCC (d2, 0)->dest;
1415 if (d1 == d2)
1416 break;
1419 if (!e2)
1420 return false;
1423 return true;
1426 /* Returns true if BB basic block has a preserve label. */
1428 static bool
1429 block_has_preserve_label (basic_block bb)
1431 return (bb
1432 && block_label (bb)
1433 && LABEL_PRESERVE_P (block_label (bb)));
1436 /* E1 and E2 are edges with the same destination block. Search their
1437 predecessors for common code. If found, redirect control flow from
1438 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1440 static bool
1441 try_crossjump_to_edge (int mode, edge e1, edge e2)
1443 int nmatch;
1444 basic_block src1 = e1->src, src2 = e2->src;
1445 basic_block redirect_to, redirect_from, to_remove;
1446 rtx newpos1, newpos2;
1447 edge s;
1448 edge_iterator ei;
1450 newpos1 = newpos2 = NULL_RTX;
1452 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1453 to try this optimization.
1455 Basic block partitioning may result in some jumps that appear to
1456 be optimizable (or blocks that appear to be mergeable), but which really
1457 must be left untouched (they are required to make it safely across
1458 partition boundaries). See the comments at the top of
1459 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1461 if (flag_reorder_blocks_and_partition && reload_completed)
1462 return false;
1464 /* Search backward through forwarder blocks. We don't need to worry
1465 about multiple entry or chained forwarders, as they will be optimized
1466 away. We do this to look past the unconditional jump following a
1467 conditional jump that is required due to the current CFG shape. */
1468 if (single_pred_p (src1)
1469 && FORWARDER_BLOCK_P (src1))
1470 e1 = single_pred_edge (src1), src1 = e1->src;
1472 if (single_pred_p (src2)
1473 && FORWARDER_BLOCK_P (src2))
1474 e2 = single_pred_edge (src2), src2 = e2->src;
1476 /* Nothing to do if we reach ENTRY, or a common source block. */
1477 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1478 return false;
1479 if (src1 == src2)
1480 return false;
1482 /* Seeing more than 1 forwarder blocks would confuse us later... */
1483 if (FORWARDER_BLOCK_P (e1->dest)
1484 && FORWARDER_BLOCK_P (single_succ (e1->dest)))
1485 return false;
1487 if (FORWARDER_BLOCK_P (e2->dest)
1488 && FORWARDER_BLOCK_P (single_succ (e2->dest)))
1489 return false;
1491 /* Likewise with dead code (possibly newly created by the other optimizations
1492 of cfg_cleanup). */
1493 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1494 return false;
1496 /* Look for the common insn sequence, part the first ... */
1497 if (!outgoing_edges_match (mode, src1, src2))
1498 return false;
1500 /* ... and part the second. */
1501 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1503 /* Don't proceed with the crossjump unless we found a sufficient number
1504 of matching instructions or the 'from' block was totally matched
1505 (such that its predecessors will hopefully be redirected and the
1506 block removed). */
1507 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1508 && (newpos1 != BB_HEAD (src1)))
1509 return false;
1511 /* Avoid deleting preserve label when redirecting ABNORMAL edges. */
1512 if (block_has_preserve_label (e1->dest)
1513 && (e1->flags & EDGE_ABNORMAL))
1514 return false;
1516 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1517 will be deleted.
1518 If we have tablejumps in the end of SRC1 and SRC2
1519 they have been already compared for equivalence in outgoing_edges_match ()
1520 so replace the references to TABLE1 by references to TABLE2. */
1522 rtx label1, label2;
1523 rtx table1, table2;
1525 if (tablejump_p (BB_END (src1), &label1, &table1)
1526 && tablejump_p (BB_END (src2), &label2, &table2)
1527 && label1 != label2)
1529 replace_label_data rr;
1530 rtx insn;
1532 /* Replace references to LABEL1 with LABEL2. */
1533 rr.r1 = label1;
1534 rr.r2 = label2;
1535 rr.update_label_nuses = true;
1536 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1538 /* Do not replace the label in SRC1->END because when deleting
1539 a block whose end is a tablejump, the tablejump referenced
1540 from the instruction is deleted too. */
1541 if (insn != BB_END (src1))
1542 for_each_rtx (&insn, replace_label, &rr);
1547 /* Avoid splitting if possible. We must always split when SRC2 has
1548 EH predecessor edges, or we may end up with basic blocks with both
1549 normal and EH predecessor edges. */
1550 if (newpos2 == BB_HEAD (src2)
1551 && !(EDGE_PRED (src2, 0)->flags & EDGE_EH))
1552 redirect_to = src2;
1553 else
1555 if (newpos2 == BB_HEAD (src2))
1557 /* Skip possible basic block header. */
1558 if (LABEL_P (newpos2))
1559 newpos2 = NEXT_INSN (newpos2);
1560 while (DEBUG_INSN_P (newpos2))
1561 newpos2 = NEXT_INSN (newpos2);
1562 if (NOTE_P (newpos2))
1563 newpos2 = NEXT_INSN (newpos2);
1564 while (DEBUG_INSN_P (newpos2))
1565 newpos2 = NEXT_INSN (newpos2);
1568 if (dump_file)
1569 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1570 src2->index, nmatch);
1571 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1574 if (dump_file)
1575 fprintf (dump_file,
1576 "Cross jumping from bb %i to bb %i; %i common insns\n",
1577 src1->index, src2->index, nmatch);
1579 /* We may have some registers visible through the block. */
1580 df_set_bb_dirty (redirect_to);
1582 /* Recompute the frequencies and counts of outgoing edges. */
1583 FOR_EACH_EDGE (s, ei, redirect_to->succs)
1585 edge s2;
1586 edge_iterator ei;
1587 basic_block d = s->dest;
1589 if (FORWARDER_BLOCK_P (d))
1590 d = single_succ (d);
1592 FOR_EACH_EDGE (s2, ei, src1->succs)
1594 basic_block d2 = s2->dest;
1595 if (FORWARDER_BLOCK_P (d2))
1596 d2 = single_succ (d2);
1597 if (d == d2)
1598 break;
1601 s->count += s2->count;
1603 /* Take care to update possible forwarder blocks. We verified
1604 that there is no more than one in the chain, so we can't run
1605 into infinite loop. */
1606 if (FORWARDER_BLOCK_P (s->dest))
1608 single_succ_edge (s->dest)->count += s2->count;
1609 s->dest->count += s2->count;
1610 s->dest->frequency += EDGE_FREQUENCY (s);
1613 if (FORWARDER_BLOCK_P (s2->dest))
1615 single_succ_edge (s2->dest)->count -= s2->count;
1616 if (single_succ_edge (s2->dest)->count < 0)
1617 single_succ_edge (s2->dest)->count = 0;
1618 s2->dest->count -= s2->count;
1619 s2->dest->frequency -= EDGE_FREQUENCY (s);
1620 if (s2->dest->frequency < 0)
1621 s2->dest->frequency = 0;
1622 if (s2->dest->count < 0)
1623 s2->dest->count = 0;
1626 if (!redirect_to->frequency && !src1->frequency)
1627 s->probability = (s->probability + s2->probability) / 2;
1628 else
1629 s->probability
1630 = ((s->probability * redirect_to->frequency +
1631 s2->probability * src1->frequency)
1632 / (redirect_to->frequency + src1->frequency));
1635 /* Adjust count and frequency for the block. An earlier jump
1636 threading pass may have left the profile in an inconsistent
1637 state (see update_bb_profile_for_threading) so we must be
1638 prepared for overflows. */
1639 redirect_to->count += src1->count;
1640 redirect_to->frequency += src1->frequency;
1641 if (redirect_to->frequency > BB_FREQ_MAX)
1642 redirect_to->frequency = BB_FREQ_MAX;
1643 update_br_prob_note (redirect_to);
1645 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1647 /* Skip possible basic block header. */
1648 if (LABEL_P (newpos1))
1649 newpos1 = NEXT_INSN (newpos1);
1651 while (DEBUG_INSN_P (newpos1))
1652 newpos1 = NEXT_INSN (newpos1);
1654 if (NOTE_INSN_BASIC_BLOCK_P (newpos1))
1655 newpos1 = NEXT_INSN (newpos1);
1657 while (DEBUG_INSN_P (newpos1))
1658 newpos1 = NEXT_INSN (newpos1);
1660 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1661 to_remove = single_succ (redirect_from);
1663 redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to);
1664 delete_basic_block (to_remove);
1666 update_forwarder_flag (redirect_from);
1667 if (redirect_to != src2)
1668 update_forwarder_flag (src2);
1670 return true;
1673 /* Search the predecessors of BB for common insn sequences. When found,
1674 share code between them by redirecting control flow. Return true if
1675 any changes made. */
1677 static bool
1678 try_crossjump_bb (int mode, basic_block bb)
1680 edge e, e2, fallthru;
1681 bool changed;
1682 unsigned max, ix, ix2;
1683 basic_block ev, ev2;
1684 edge_iterator ei;
1686 /* Nothing to do if there is not at least two incoming edges. */
1687 if (EDGE_COUNT (bb->preds) < 2)
1688 return false;
1690 /* Don't crossjump if this block ends in a computed jump,
1691 unless we are optimizing for size. */
1692 if (optimize_bb_for_size_p (bb)
1693 && bb != EXIT_BLOCK_PTR
1694 && computed_jump_p (BB_END (bb)))
1695 return false;
1697 /* If we are partitioning hot/cold basic blocks, we don't want to
1698 mess up unconditional or indirect jumps that cross between hot
1699 and cold sections.
1701 Basic block partitioning may result in some jumps that appear to
1702 be optimizable (or blocks that appear to be mergeable), but which really
1703 must be left untouched (they are required to make it safely across
1704 partition boundaries). See the comments at the top of
1705 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1707 if (BB_PARTITION (EDGE_PRED (bb, 0)->src) !=
1708 BB_PARTITION (EDGE_PRED (bb, 1)->src)
1709 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING))
1710 return false;
1712 /* It is always cheapest to redirect a block that ends in a branch to
1713 a block that falls through into BB, as that adds no branches to the
1714 program. We'll try that combination first. */
1715 fallthru = NULL;
1716 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1718 if (EDGE_COUNT (bb->preds) > max)
1719 return false;
1721 FOR_EACH_EDGE (e, ei, bb->preds)
1723 if (e->flags & EDGE_FALLTHRU)
1725 fallthru = e;
1726 break;
1730 changed = false;
1731 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1733 e = EDGE_PRED (ev, ix);
1734 ix++;
1736 /* As noted above, first try with the fallthru predecessor (or, a
1737 fallthru predecessor if we are in cfglayout mode). */
1738 if (fallthru)
1740 /* Don't combine the fallthru edge into anything else.
1741 If there is a match, we'll do it the other way around. */
1742 if (e == fallthru)
1743 continue;
1744 /* If nothing changed since the last attempt, there is nothing
1745 we can do. */
1746 if (!first_pass
1747 && (!(df_get_bb_dirty (e->src))
1748 && !(df_get_bb_dirty (fallthru->src))))
1749 continue;
1751 if (try_crossjump_to_edge (mode, e, fallthru))
1753 changed = true;
1754 ix = 0;
1755 ev = bb;
1756 continue;
1760 /* Non-obvious work limiting check: Recognize that we're going
1761 to call try_crossjump_bb on every basic block. So if we have
1762 two blocks with lots of outgoing edges (a switch) and they
1763 share lots of common destinations, then we would do the
1764 cross-jump check once for each common destination.
1766 Now, if the blocks actually are cross-jump candidates, then
1767 all of their destinations will be shared. Which means that
1768 we only need check them for cross-jump candidacy once. We
1769 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1770 choosing to do the check from the block for which the edge
1771 in question is the first successor of A. */
1772 if (EDGE_SUCC (e->src, 0) != e)
1773 continue;
1775 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1777 e2 = EDGE_PRED (ev2, ix2);
1778 ix2++;
1780 if (e2 == e)
1781 continue;
1783 /* We've already checked the fallthru edge above. */
1784 if (e2 == fallthru)
1785 continue;
1787 /* The "first successor" check above only prevents multiple
1788 checks of crossjump(A,B). In order to prevent redundant
1789 checks of crossjump(B,A), require that A be the block
1790 with the lowest index. */
1791 if (e->src->index > e2->src->index)
1792 continue;
1794 /* If nothing changed since the last attempt, there is nothing
1795 we can do. */
1796 if (!first_pass
1797 && (!(df_get_bb_dirty (e->src))
1798 && !(df_get_bb_dirty (e2->src))))
1799 continue;
1801 if (try_crossjump_to_edge (mode, e, e2))
1803 changed = true;
1804 ev2 = bb;
1805 ix = 0;
1806 break;
1811 if (changed)
1812 crossjumps_occured = true;
1814 return changed;
1817 /* Return true if BB contains just bb note, or bb note followed
1818 by only DEBUG_INSNs. */
1820 static bool
1821 trivially_empty_bb_p (basic_block bb)
1823 rtx insn = BB_END (bb);
1825 while (1)
1827 if (insn == BB_HEAD (bb))
1828 return true;
1829 if (!DEBUG_INSN_P (insn))
1830 return false;
1831 insn = PREV_INSN (insn);
1835 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1836 instructions etc. Return nonzero if changes were made. */
1838 static bool
1839 try_optimize_cfg (int mode)
1841 bool changed_overall = false;
1842 bool changed;
1843 int iterations = 0;
1844 basic_block bb, b, next;
1846 if (mode & (CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1847 clear_bb_flags ();
1849 crossjumps_occured = false;
1851 FOR_EACH_BB (bb)
1852 update_forwarder_flag (bb);
1854 if (! targetm.cannot_modify_jumps_p ())
1856 first_pass = true;
1857 /* Attempt to merge blocks as made possible by edge removal. If
1858 a block has only one successor, and the successor has only
1859 one predecessor, they may be combined. */
1862 changed = false;
1863 iterations++;
1865 if (dump_file)
1866 fprintf (dump_file,
1867 "\n\ntry_optimize_cfg iteration %i\n\n",
1868 iterations);
1870 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1872 basic_block c;
1873 edge s;
1874 bool changed_here = false;
1876 /* Delete trivially dead basic blocks. This is either
1877 blocks with no predecessors, or empty blocks with no
1878 successors. However if the empty block with no
1879 successors is the successor of the ENTRY_BLOCK, it is
1880 kept. This ensures that the ENTRY_BLOCK will have a
1881 successor which is a precondition for many RTL
1882 passes. Empty blocks may result from expanding
1883 __builtin_unreachable (). */
1884 if (EDGE_COUNT (b->preds) == 0
1885 || (EDGE_COUNT (b->succs) == 0
1886 && trivially_empty_bb_p (b)
1887 && single_succ_edge (ENTRY_BLOCK_PTR)->dest != b))
1889 c = b->prev_bb;
1890 delete_basic_block (b);
1891 if (!(mode & CLEANUP_CFGLAYOUT))
1892 changed = true;
1893 /* Avoid trying to remove ENTRY_BLOCK_PTR. */
1894 b = (c == ENTRY_BLOCK_PTR ? c->next_bb : c);
1895 continue;
1898 /* Remove code labels no longer used. */
1899 if (single_pred_p (b)
1900 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1901 && !(single_pred_edge (b)->flags & EDGE_COMPLEX)
1902 && LABEL_P (BB_HEAD (b))
1903 /* If the previous block ends with a branch to this
1904 block, we can't delete the label. Normally this
1905 is a condjump that is yet to be simplified, but
1906 if CASE_DROPS_THRU, this can be a tablejump with
1907 some element going to the same place as the
1908 default (fallthru). */
1909 && (single_pred (b) == ENTRY_BLOCK_PTR
1910 || !JUMP_P (BB_END (single_pred (b)))
1911 || ! label_is_jump_target_p (BB_HEAD (b),
1912 BB_END (single_pred (b)))))
1914 rtx label = BB_HEAD (b);
1916 delete_insn_chain (label, label, false);
1917 /* If the case label is undeletable, move it after the
1918 BASIC_BLOCK note. */
1919 if (NOTE_KIND (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1921 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1923 reorder_insns_nobb (label, label, bb_note);
1924 BB_HEAD (b) = bb_note;
1925 if (BB_END (b) == bb_note)
1926 BB_END (b) = label;
1928 if (dump_file)
1929 fprintf (dump_file, "Deleted label in block %i.\n",
1930 b->index);
1933 /* If we fall through an empty block, we can remove it. */
1934 if (!(mode & CLEANUP_CFGLAYOUT)
1935 && single_pred_p (b)
1936 && (single_pred_edge (b)->flags & EDGE_FALLTHRU)
1937 && !LABEL_P (BB_HEAD (b))
1938 && FORWARDER_BLOCK_P (b)
1939 /* Note that forwarder_block_p true ensures that
1940 there is a successor for this block. */
1941 && (single_succ_edge (b)->flags & EDGE_FALLTHRU)
1942 && n_basic_blocks > NUM_FIXED_BLOCKS + 1)
1944 if (dump_file)
1945 fprintf (dump_file,
1946 "Deleting fallthru block %i.\n",
1947 b->index);
1949 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1950 redirect_edge_succ_nodup (single_pred_edge (b),
1951 single_succ (b));
1952 delete_basic_block (b);
1953 changed = true;
1954 b = c;
1955 continue;
1958 if (single_succ_p (b)
1959 && (s = single_succ_edge (b))
1960 && !(s->flags & EDGE_COMPLEX)
1961 && (c = s->dest) != EXIT_BLOCK_PTR
1962 && single_pred_p (c)
1963 && b != c)
1965 /* When not in cfg_layout mode use code aware of reordering
1966 INSN. This code possibly creates new basic blocks so it
1967 does not fit merge_blocks interface and is kept here in
1968 hope that it will become useless once more of compiler
1969 is transformed to use cfg_layout mode. */
1971 if ((mode & CLEANUP_CFGLAYOUT)
1972 && can_merge_blocks_p (b, c))
1974 merge_blocks (b, c);
1975 update_forwarder_flag (b);
1976 changed_here = true;
1978 else if (!(mode & CLEANUP_CFGLAYOUT)
1979 /* If the jump insn has side effects,
1980 we can't kill the edge. */
1981 && (!JUMP_P (BB_END (b))
1982 || (reload_completed
1983 ? simplejump_p (BB_END (b))
1984 : (onlyjump_p (BB_END (b))
1985 && !tablejump_p (BB_END (b),
1986 NULL, NULL))))
1987 && (next = merge_blocks_move (s, b, c, mode)))
1989 b = next;
1990 changed_here = true;
1994 /* Simplify branch over branch. */
1995 if ((mode & CLEANUP_EXPENSIVE)
1996 && !(mode & CLEANUP_CFGLAYOUT)
1997 && try_simplify_condjump (b))
1998 changed_here = true;
2000 /* If B has a single outgoing edge, but uses a
2001 non-trivial jump instruction without side-effects, we
2002 can either delete the jump entirely, or replace it
2003 with a simple unconditional jump. */
2004 if (single_succ_p (b)
2005 && single_succ (b) != EXIT_BLOCK_PTR
2006 && onlyjump_p (BB_END (b))
2007 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
2008 && try_redirect_by_replacing_jump (single_succ_edge (b),
2009 single_succ (b),
2010 (mode & CLEANUP_CFGLAYOUT) != 0))
2012 update_forwarder_flag (b);
2013 changed_here = true;
2016 /* Simplify branch to branch. */
2017 if (try_forward_edges (mode, b))
2018 changed_here = true;
2020 /* Look for shared code between blocks. */
2021 if ((mode & CLEANUP_CROSSJUMP)
2022 && try_crossjump_bb (mode, b))
2023 changed_here = true;
2025 /* Don't get confused by the index shift caused by
2026 deleting blocks. */
2027 if (!changed_here)
2028 b = b->next_bb;
2029 else
2030 changed = true;
2033 if ((mode & CLEANUP_CROSSJUMP)
2034 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2035 changed = true;
2037 #ifdef ENABLE_CHECKING
2038 if (changed)
2039 verify_flow_info ();
2040 #endif
2042 changed_overall |= changed;
2043 first_pass = false;
2045 while (changed);
2048 FOR_ALL_BB (b)
2049 b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK);
2051 return changed_overall;
2054 /* Delete all unreachable basic blocks. */
2056 bool
2057 delete_unreachable_blocks (void)
2059 bool changed = false;
2060 basic_block b, prev_bb;
2062 find_unreachable_blocks ();
2064 /* When we're in GIMPLE mode and there may be debug insns, we should
2065 delete blocks in reverse dominator order, so as to get a chance
2066 to substitute all released DEFs into debug stmts. If we don't
2067 have dominators information, walking blocks backward gets us a
2068 better chance of retaining most debug information than
2069 otherwise. */
2070 if (MAY_HAVE_DEBUG_STMTS && current_ir_type () == IR_GIMPLE
2071 && dom_info_available_p (CDI_DOMINATORS))
2073 for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
2075 prev_bb = b->prev_bb;
2077 if (!(b->flags & BB_REACHABLE))
2079 /* Speed up the removal of blocks that don't dominate
2080 others. Walking backwards, this should be the common
2081 case. */
2082 if (!first_dom_son (CDI_DOMINATORS, b))
2083 delete_basic_block (b);
2084 else
2086 VEC (basic_block, heap) *h
2087 = get_all_dominated_blocks (CDI_DOMINATORS, b);
2089 while (VEC_length (basic_block, h))
2091 b = VEC_pop (basic_block, h);
2093 prev_bb = b->prev_bb;
2095 gcc_assert (!(b->flags & BB_REACHABLE));
2097 delete_basic_block (b);
2100 VEC_free (basic_block, heap, h);
2103 changed = true;
2107 else
2109 for (b = EXIT_BLOCK_PTR->prev_bb; b != ENTRY_BLOCK_PTR; b = prev_bb)
2111 prev_bb = b->prev_bb;
2113 if (!(b->flags & BB_REACHABLE))
2115 delete_basic_block (b);
2116 changed = true;
2121 if (changed)
2122 tidy_fallthru_edges ();
2123 return changed;
2126 /* Delete any jump tables never referenced. We can't delete them at the
2127 time of removing tablejump insn as they are referenced by the preceding
2128 insns computing the destination, so we delay deleting and garbagecollect
2129 them once life information is computed. */
2130 void
2131 delete_dead_jumptables (void)
2133 basic_block bb;
2135 /* A dead jump table does not belong to any basic block. Scan insns
2136 between two adjacent basic blocks. */
2137 FOR_EACH_BB (bb)
2139 rtx insn, next;
2141 for (insn = NEXT_INSN (BB_END (bb));
2142 insn && !NOTE_INSN_BASIC_BLOCK_P (insn);
2143 insn = next)
2145 next = NEXT_INSN (insn);
2146 if (LABEL_P (insn)
2147 && LABEL_NUSES (insn) == LABEL_PRESERVE_P (insn)
2148 && JUMP_TABLE_DATA_P (next))
2150 rtx label = insn, jump = next;
2152 if (dump_file)
2153 fprintf (dump_file, "Dead jumptable %i removed\n",
2154 INSN_UID (insn));
2156 next = NEXT_INSN (next);
2157 delete_insn (jump);
2158 delete_insn (label);
2165 /* Tidy the CFG by deleting unreachable code and whatnot. */
2167 bool
2168 cleanup_cfg (int mode)
2170 bool changed = false;
2172 /* Set the cfglayout mode flag here. We could update all the callers
2173 but that is just inconvenient, especially given that we eventually
2174 want to have cfglayout mode as the default. */
2175 if (current_ir_type () == IR_RTL_CFGLAYOUT)
2176 mode |= CLEANUP_CFGLAYOUT;
2178 timevar_push (TV_CLEANUP_CFG);
2179 if (delete_unreachable_blocks ())
2181 changed = true;
2182 /* We've possibly created trivially dead code. Cleanup it right
2183 now to introduce more opportunities for try_optimize_cfg. */
2184 if (!(mode & (CLEANUP_NO_INSN_DEL))
2185 && !reload_completed)
2186 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2189 compact_blocks ();
2191 /* To tail-merge blocks ending in the same noreturn function (e.g.
2192 a call to abort) we have to insert fake edges to exit. Do this
2193 here once. The fake edges do not interfere with any other CFG
2194 cleanups. */
2195 if (mode & CLEANUP_CROSSJUMP)
2196 add_noreturn_fake_exit_edges ();
2198 if (!dbg_cnt (cfg_cleanup))
2199 return changed;
2201 while (try_optimize_cfg (mode))
2203 delete_unreachable_blocks (), changed = true;
2204 if (!(mode & CLEANUP_NO_INSN_DEL))
2206 /* Try to remove some trivially dead insns when doing an expensive
2207 cleanup. But delete_trivially_dead_insns doesn't work after
2208 reload (it only handles pseudos) and run_fast_dce is too costly
2209 to run in every iteration.
2211 For effective cross jumping, we really want to run a fast DCE to
2212 clean up any dead conditions, or they get in the way of performing
2213 useful tail merges.
2215 Other transformations in cleanup_cfg are not so sensitive to dead
2216 code, so delete_trivially_dead_insns or even doing nothing at all
2217 is good enough. */
2218 if ((mode & CLEANUP_EXPENSIVE) && !reload_completed
2219 && !delete_trivially_dead_insns (get_insns (), max_reg_num ()))
2220 break;
2221 else if ((mode & CLEANUP_CROSSJUMP)
2222 && crossjumps_occured)
2223 run_fast_dce ();
2225 else
2226 break;
2229 if (mode & CLEANUP_CROSSJUMP)
2230 remove_fake_exit_edges ();
2232 /* Don't call delete_dead_jumptables in cfglayout mode, because
2233 that function assumes that jump tables are in the insns stream.
2234 But we also don't _have_ to delete dead jumptables in cfglayout
2235 mode because we shouldn't even be looking at things that are
2236 not in a basic block. Dead jumptables are cleaned up when
2237 going out of cfglayout mode. */
2238 if (!(mode & CLEANUP_CFGLAYOUT))
2239 delete_dead_jumptables ();
2241 timevar_pop (TV_CLEANUP_CFG);
2243 return changed;
2246 static unsigned int
2247 rest_of_handle_jump (void)
2249 if (crtl->tail_call_emit)
2250 fixup_tail_calls ();
2251 return 0;
2254 struct rtl_opt_pass pass_jump =
2257 RTL_PASS,
2258 "sibling", /* name */
2259 NULL, /* gate */
2260 rest_of_handle_jump, /* execute */
2261 NULL, /* sub */
2262 NULL, /* next */
2263 0, /* static_pass_number */
2264 TV_JUMP, /* tv_id */
2265 0, /* properties_required */
2266 0, /* properties_provided */
2267 0, /* properties_destroyed */
2268 TODO_ggc_collect, /* todo_flags_start */
2269 TODO_verify_flow, /* todo_flags_finish */
2274 static unsigned int
2275 rest_of_handle_jump2 (void)
2277 delete_trivially_dead_insns (get_insns (), max_reg_num ());
2278 if (dump_file)
2279 dump_flow_info (dump_file, dump_flags);
2280 cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0)
2281 | (flag_thread_jumps ? CLEANUP_THREADING : 0));
2282 return 0;
2286 struct rtl_opt_pass pass_jump2 =
2289 RTL_PASS,
2290 "jump", /* name */
2291 NULL, /* gate */
2292 rest_of_handle_jump2, /* execute */
2293 NULL, /* sub */
2294 NULL, /* next */
2295 0, /* static_pass_number */
2296 TV_JUMP, /* tv_id */
2297 0, /* properties_required */
2298 0, /* properties_provided */
2299 0, /* properties_destroyed */
2300 TODO_ggc_collect, /* todo_flags_start */
2301 TODO_dump_func | TODO_verify_rtl_sharing,/* todo_flags_finish */