* bt-load.c, cfgloop.c, convert.c, dominance.c, global.c,
[official-gcc.git] / gcc / cfgcleanup.c
blob4cf2577309c59f55a8979ffd0a7ef0247445d20e
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "regs.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "cfglayout.h"
52 #include "emit-rtl.h"
54 /* cleanup_cfg maintains following flags for each basic block. */
56 enum bb_flags
58 /* Set if BB is the forwarder block to avoid too many
59 forwarder_block_p calls. */
60 BB_FORWARDER_BLOCK = 1,
61 BB_NONTHREADABLE_BLOCK = 2
64 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
65 #define BB_SET_FLAG(BB, FLAG) \
66 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
67 #define BB_CLEAR_FLAG(BB, FLAG) \
68 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
70 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
72 /* Set to true when we are running first pass of try_optimize_cfg loop. */
73 static bool first_pass;
74 static bool try_crossjump_to_edge (int, edge, edge);
75 static bool try_crossjump_bb (int, basic_block);
76 static bool outgoing_edges_match (int, basic_block, basic_block);
77 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
78 static bool insns_match_p (int, rtx, rtx);
80 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
81 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
82 static bool try_optimize_cfg (int);
83 static bool try_simplify_condjump (basic_block);
84 static bool try_forward_edges (int, basic_block);
85 static edge thread_jump (int, edge, basic_block);
86 static bool mark_effect (rtx, bitmap);
87 static void notice_new_block (basic_block);
88 static void update_forwarder_flag (basic_block);
89 static int mentions_nonequal_regs (rtx *, void *);
90 static void merge_memattrs (rtx, rtx);
92 /* Set flags for newly created block. */
94 static void
95 notice_new_block (basic_block bb)
97 if (!bb)
98 return;
100 if (forwarder_block_p (bb))
101 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
104 /* Recompute forwarder flag after block has been modified. */
106 static void
107 update_forwarder_flag (basic_block bb)
109 if (forwarder_block_p (bb))
110 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
111 else
112 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
115 /* Simplify a conditional jump around an unconditional jump.
116 Return true if something changed. */
118 static bool
119 try_simplify_condjump (basic_block cbranch_block)
121 basic_block jump_block, jump_dest_block, cbranch_dest_block;
122 edge cbranch_jump_edge, cbranch_fallthru_edge;
123 rtx cbranch_insn;
125 /* Verify that there are exactly two successors. */
126 if (EDGE_COUNT (cbranch_block->succs) != 2)
127 return false;
129 /* Verify that we've got a normal conditional branch at the end
130 of the block. */
131 cbranch_insn = BB_END (cbranch_block);
132 if (!any_condjump_p (cbranch_insn))
133 return false;
135 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
136 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
138 /* The next block must not have multiple predecessors, must not
139 be the last block in the function, and must contain just the
140 unconditional jump. */
141 jump_block = cbranch_fallthru_edge->dest;
142 if (EDGE_COUNT (jump_block->preds) >= 2
143 || jump_block->next_bb == EXIT_BLOCK_PTR
144 || !FORWARDER_BLOCK_P (jump_block))
145 return false;
146 jump_dest_block = EDGE_SUCC (jump_block, 0)->dest;
148 /* If we are partitioning hot/cold basic blocks, we don't want to
149 mess up unconditional or indirect jumps that cross between hot
150 and cold sections.
152 Basic block partitioning may result in some jumps that appear to
153 be optimizable (or blocks that appear to be mergeable), but which really
154 must be left untouched (they are required to make it safely across
155 partition boundaries). See the comments at the top of
156 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
158 if (flag_reorder_blocks_and_partition
159 && (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
160 || (cbranch_jump_edge->flags & EDGE_CROSSING)))
161 return false;
163 /* The conditional branch must target the block after the
164 unconditional branch. */
165 cbranch_dest_block = cbranch_jump_edge->dest;
167 if (cbranch_dest_block == EXIT_BLOCK_PTR
168 || !can_fallthru (jump_block, cbranch_dest_block))
169 return false;
171 /* Invert the conditional branch. */
172 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
173 return false;
175 if (dump_file)
176 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
177 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
179 /* Success. Update the CFG to match. Note that after this point
180 the edge variable names appear backwards; the redirection is done
181 this way to preserve edge profile data. */
182 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
183 cbranch_dest_block);
184 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
185 jump_dest_block);
186 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
187 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
188 update_br_prob_note (cbranch_block);
190 /* Delete the block with the unconditional jump, and clean up the mess. */
191 delete_basic_block (jump_block);
192 tidy_fallthru_edge (cbranch_jump_edge);
193 update_forwarder_flag (cbranch_block);
195 return true;
198 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
199 on register. Used by jump threading. */
201 static bool
202 mark_effect (rtx exp, regset nonequal)
204 int regno;
205 rtx dest;
206 switch (GET_CODE (exp))
208 /* In case we do clobber the register, mark it as equal, as we know the
209 value is dead so it don't have to match. */
210 case CLOBBER:
211 if (REG_P (XEXP (exp, 0)))
213 dest = XEXP (exp, 0);
214 regno = REGNO (dest);
215 CLEAR_REGNO_REG_SET (nonequal, regno);
216 if (regno < FIRST_PSEUDO_REGISTER)
218 int n = hard_regno_nregs[regno][GET_MODE (dest)];
219 while (--n > 0)
220 CLEAR_REGNO_REG_SET (nonequal, regno + n);
223 return false;
225 case SET:
226 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
227 return false;
228 dest = SET_DEST (exp);
229 if (dest == pc_rtx)
230 return false;
231 if (!REG_P (dest))
232 return true;
233 regno = REGNO (dest);
234 SET_REGNO_REG_SET (nonequal, regno);
235 if (regno < FIRST_PSEUDO_REGISTER)
237 int n = hard_regno_nregs[regno][GET_MODE (dest)];
238 while (--n > 0)
239 SET_REGNO_REG_SET (nonequal, regno + n);
241 return false;
243 default:
244 return false;
248 /* Return nonzero if X is a register set in regset DATA.
249 Called via for_each_rtx. */
250 static int
251 mentions_nonequal_regs (rtx *x, void *data)
253 regset nonequal = (regset) data;
254 if (REG_P (*x))
256 int regno;
258 regno = REGNO (*x);
259 if (REGNO_REG_SET_P (nonequal, regno))
260 return 1;
261 if (regno < FIRST_PSEUDO_REGISTER)
263 int n = hard_regno_nregs[regno][GET_MODE (*x)];
264 while (--n > 0)
265 if (REGNO_REG_SET_P (nonequal, regno + n))
266 return 1;
269 return 0;
271 /* Attempt to prove that the basic block B will have no side effects and
272 always continues in the same edge if reached via E. Return the edge
273 if exist, NULL otherwise. */
275 static edge
276 thread_jump (int mode, edge e, basic_block b)
278 rtx set1, set2, cond1, cond2, insn;
279 enum rtx_code code1, code2, reversed_code2;
280 bool reverse1 = false;
281 unsigned i;
282 regset nonequal;
283 bool failed = false;
284 reg_set_iterator rsi;
286 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
287 return NULL;
289 /* At the moment, we do handle only conditional jumps, but later we may
290 want to extend this code to tablejumps and others. */
291 if (EDGE_COUNT (e->src->succs) != 2)
292 return NULL;
293 if (EDGE_COUNT (b->succs) != 2)
295 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
296 return NULL;
299 /* Second branch must end with onlyjump, as we will eliminate the jump. */
300 if (!any_condjump_p (BB_END (e->src)))
301 return NULL;
303 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
305 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
306 return NULL;
309 set1 = pc_set (BB_END (e->src));
310 set2 = pc_set (BB_END (b));
311 if (((e->flags & EDGE_FALLTHRU) != 0)
312 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
313 reverse1 = true;
315 cond1 = XEXP (SET_SRC (set1), 0);
316 cond2 = XEXP (SET_SRC (set2), 0);
317 if (reverse1)
318 code1 = reversed_comparison_code (cond1, BB_END (e->src));
319 else
320 code1 = GET_CODE (cond1);
322 code2 = GET_CODE (cond2);
323 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
325 if (!comparison_dominates_p (code1, code2)
326 && !comparison_dominates_p (code1, reversed_code2))
327 return NULL;
329 /* Ensure that the comparison operators are equivalent.
330 ??? This is far too pessimistic. We should allow swapped operands,
331 different CCmodes, or for example comparisons for interval, that
332 dominate even when operands are not equivalent. */
333 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
334 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
335 return NULL;
337 /* Short circuit cases where block B contains some side effects, as we can't
338 safely bypass it. */
339 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
340 insn = NEXT_INSN (insn))
341 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
343 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
344 return NULL;
347 cselib_init (false);
349 /* First process all values computed in the source basic block. */
350 for (insn = NEXT_INSN (BB_HEAD (e->src));
351 insn != NEXT_INSN (BB_END (e->src));
352 insn = NEXT_INSN (insn))
353 if (INSN_P (insn))
354 cselib_process_insn (insn);
356 nonequal = BITMAP_ALLOC (NULL);
357 CLEAR_REG_SET (nonequal);
359 /* Now assume that we've continued by the edge E to B and continue
360 processing as if it were same basic block.
361 Our goal is to prove that whole block is an NOOP. */
363 for (insn = NEXT_INSN (BB_HEAD (b));
364 insn != NEXT_INSN (BB_END (b)) && !failed;
365 insn = NEXT_INSN (insn))
367 if (INSN_P (insn))
369 rtx pat = PATTERN (insn);
371 if (GET_CODE (pat) == PARALLEL)
373 for (i = 0; i < (unsigned)XVECLEN (pat, 0); i++)
374 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
376 else
377 failed |= mark_effect (pat, nonequal);
380 cselib_process_insn (insn);
383 /* Later we should clear nonequal of dead registers. So far we don't
384 have life information in cfg_cleanup. */
385 if (failed)
387 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
388 goto failed_exit;
391 /* cond2 must not mention any register that is not equal to the
392 former block. */
393 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
394 goto failed_exit;
396 /* In case liveness information is available, we need to prove equivalence
397 only of the live values. */
398 if (mode & CLEANUP_UPDATE_LIFE)
399 AND_REG_SET (nonequal, b->global_live_at_end);
401 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, rsi)
402 goto failed_exit;
404 BITMAP_FREE (nonequal);
405 cselib_finish ();
406 if ((comparison_dominates_p (code1, code2) != 0)
407 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
408 return BRANCH_EDGE (b);
409 else
410 return FALLTHRU_EDGE (b);
412 failed_exit:
413 BITMAP_FREE (nonequal);
414 cselib_finish ();
415 return NULL;
418 /* Attempt to forward edges leaving basic block B.
419 Return true if successful. */
421 static bool
422 try_forward_edges (int mode, basic_block b)
424 bool changed = false;
425 edge_iterator ei;
426 edge e, *threaded_edges = NULL;
428 /* If we are partitioning hot/cold basic blocks, we don't want to
429 mess up unconditional or indirect jumps that cross between hot
430 and cold sections.
432 Basic block partitioning may result in some jumps that appear to
433 be optimizable (or blocks that appear to be mergeable), but which really m
434 ust be left untouched (they are required to make it safely across
435 partition boundaries). See the comments at the top of
436 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
438 if (flag_reorder_blocks_and_partition
439 && find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
440 return false;
442 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
444 basic_block target, first;
445 int counter;
446 bool threaded = false;
447 int nthreaded_edges = 0;
448 bool may_thread = first_pass | (b->flags & BB_DIRTY);
450 /* Skip complex edges because we don't know how to update them.
452 Still handle fallthru edges, as we can succeed to forward fallthru
453 edge to the same place as the branch edge of conditional branch
454 and turn conditional branch to an unconditional branch. */
455 if (e->flags & EDGE_COMPLEX)
457 ei_next (&ei);
458 continue;
461 target = first = e->dest;
462 counter = 0;
464 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
465 up jumps that cross between hot/cold sections.
467 Basic block partitioning may result in some jumps that appear
468 to be optimizable (or blocks that appear to be mergeable), but which
469 really must be left untouched (they are required to make it safely
470 across partition boundaries). See the comments at the top of
471 bb-reorder.c:partition_hot_cold_basic_blocks for complete
472 details. */
474 if (flag_reorder_blocks_and_partition
475 && first != EXIT_BLOCK_PTR
476 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
477 return false;
479 while (counter < n_basic_blocks)
481 basic_block new_target = NULL;
482 bool new_target_threaded = false;
483 may_thread |= target->flags & BB_DIRTY;
485 if (FORWARDER_BLOCK_P (target)
486 && !(EDGE_SUCC (target, 0)->flags & EDGE_CROSSING)
487 && EDGE_SUCC (target, 0)->dest != EXIT_BLOCK_PTR)
489 /* Bypass trivial infinite loops. */
490 if (target == EDGE_SUCC (target, 0)->dest)
491 counter = n_basic_blocks;
492 new_target = EDGE_SUCC (target, 0)->dest;
495 /* Allow to thread only over one edge at time to simplify updating
496 of probabilities. */
497 else if ((mode & CLEANUP_THREADING) && may_thread)
499 edge t = thread_jump (mode, e, target);
500 if (t)
502 if (!threaded_edges)
503 threaded_edges = xmalloc (sizeof (*threaded_edges)
504 * n_basic_blocks);
505 else
507 int i;
509 /* Detect an infinite loop across blocks not
510 including the start block. */
511 for (i = 0; i < nthreaded_edges; ++i)
512 if (threaded_edges[i] == t)
513 break;
514 if (i < nthreaded_edges)
516 counter = n_basic_blocks;
517 break;
521 /* Detect an infinite loop across the start block. */
522 if (t->dest == b)
523 break;
525 gcc_assert (nthreaded_edges < n_basic_blocks);
526 threaded_edges[nthreaded_edges++] = t;
528 new_target = t->dest;
529 new_target_threaded = true;
533 if (!new_target)
534 break;
536 /* Avoid killing of loop pre-headers, as it is the place loop
537 optimizer wants to hoist code to.
539 For fallthru forwarders, the LOOP_BEG note must appear between
540 the header of block and CODE_LABEL of the loop, for non forwarders
541 it must appear before the JUMP_INSN. */
542 if ((mode & CLEANUP_PRE_LOOP) && optimize)
544 rtx insn = (EDGE_SUCC (target, 0)->flags & EDGE_FALLTHRU
545 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
547 if (!NOTE_P (insn))
548 insn = NEXT_INSN (insn);
550 for (; insn && !LABEL_P (insn) && !INSN_P (insn);
551 insn = NEXT_INSN (insn))
552 if (NOTE_P (insn)
553 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
554 break;
556 if (NOTE_P (insn))
557 break;
559 /* Do not clean up branches to just past the end of a loop
560 at this time; it can mess up the loop optimizer's
561 recognition of some patterns. */
563 insn = PREV_INSN (BB_HEAD (target));
564 if (insn && NOTE_P (insn)
565 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
566 break;
569 counter++;
570 target = new_target;
571 threaded |= new_target_threaded;
574 if (counter >= n_basic_blocks)
576 if (dump_file)
577 fprintf (dump_file, "Infinite loop in BB %i.\n",
578 target->index);
580 else if (target == first)
581 ; /* We didn't do anything. */
582 else
584 /* Save the values now, as the edge may get removed. */
585 gcov_type edge_count = e->count;
586 int edge_probability = e->probability;
587 int edge_frequency;
588 int n = 0;
590 /* Don't force if target is exit block. */
591 if (threaded && target != EXIT_BLOCK_PTR)
593 notice_new_block (redirect_edge_and_branch_force (e, target));
594 if (dump_file)
595 fprintf (dump_file, "Conditionals threaded.\n");
597 else if (!redirect_edge_and_branch (e, target))
599 if (dump_file)
600 fprintf (dump_file,
601 "Forwarding edge %i->%i to %i failed.\n",
602 b->index, e->dest->index, target->index);
603 ei_next (&ei);
604 continue;
607 /* We successfully forwarded the edge. Now update profile
608 data: for each edge we traversed in the chain, remove
609 the original edge's execution count. */
610 edge_frequency = ((edge_probability * b->frequency
611 + REG_BR_PROB_BASE / 2)
612 / REG_BR_PROB_BASE);
614 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
615 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
619 edge t;
621 if (EDGE_COUNT (first->succs) > 1)
623 gcc_assert (n < nthreaded_edges);
624 t = threaded_edges [n++];
625 gcc_assert (t->src == first);
626 update_bb_profile_for_threading (first, edge_frequency,
627 edge_count, t);
628 update_br_prob_note (first);
630 else
632 first->count -= edge_count;
633 if (first->count < 0)
634 first->count = 0;
635 first->frequency -= edge_frequency;
636 if (first->frequency < 0)
637 first->frequency = 0;
638 /* It is possible that as the result of
639 threading we've removed edge as it is
640 threaded to the fallthru edge. Avoid
641 getting out of sync. */
642 if (n < nthreaded_edges
643 && first == threaded_edges [n]->src)
644 n++;
645 t = EDGE_SUCC (first, 0);
648 t->count -= edge_count;
649 if (t->count < 0)
650 t->count = 0;
651 first = t->dest;
653 while (first != target);
655 changed = true;
656 continue;
658 ei_next (&ei);
661 if (threaded_edges)
662 free (threaded_edges);
663 return changed;
667 /* Blocks A and B are to be merged into a single block. A has no incoming
668 fallthru edge, so it can be moved before B without adding or modifying
669 any jumps (aside from the jump from A to B). */
671 static void
672 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
674 rtx barrier;
675 bool only_notes;
677 /* If we are partitioning hot/cold basic blocks, we don't want to
678 mess up unconditional or indirect jumps that cross between hot
679 and cold sections.
681 Basic block partitioning may result in some jumps that appear to
682 be optimizable (or blocks that appear to be mergeable), but which really
683 must be left untouched (they are required to make it safely across
684 partition boundaries). See the comments at the top of
685 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
687 if (flag_reorder_blocks_and_partition
688 && (BB_PARTITION (a) != BB_PARTITION (b)
689 || find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)))
690 return;
692 barrier = next_nonnote_insn (BB_END (a));
693 gcc_assert (BARRIER_P (barrier));
694 delete_insn (barrier);
696 /* Move block and loop notes out of the chain so that we do not
697 disturb their order.
699 ??? A better solution would be to squeeze out all the non-nested notes
700 and adjust the block trees appropriately. Even better would be to have
701 a tighter connection between block trees and rtl so that this is not
702 necessary. */
703 only_notes = squeeze_notes (&BB_HEAD (a), &BB_END (a));
704 gcc_assert (!only_notes);
706 /* Scramble the insn chain. */
707 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
708 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
709 a->flags |= BB_DIRTY;
711 if (dump_file)
712 fprintf (dump_file, "Moved block %d before %d and merged.\n",
713 a->index, b->index);
715 /* Swap the records for the two blocks around. */
717 unlink_block (a);
718 link_block (a, b->prev_bb);
720 /* Now blocks A and B are contiguous. Merge them. */
721 merge_blocks (a, b);
724 /* Blocks A and B are to be merged into a single block. B has no outgoing
725 fallthru edge, so it can be moved after A without adding or modifying
726 any jumps (aside from the jump from A to B). */
728 static void
729 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
731 rtx barrier, real_b_end;
732 rtx label, table;
733 bool only_notes;
735 /* If we are partitioning hot/cold basic blocks, we don't want to
736 mess up unconditional or indirect jumps that cross between hot
737 and cold sections.
739 Basic block partitioning may result in some jumps that appear to
740 be optimizable (or blocks that appear to be mergeable), but which really
741 must be left untouched (they are required to make it safely across
742 partition boundaries). See the comments at the top of
743 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
745 if (flag_reorder_blocks_and_partition
746 && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
747 || BB_PARTITION (a) != BB_PARTITION (b)))
748 return;
750 real_b_end = BB_END (b);
752 /* If there is a jump table following block B temporarily add the jump table
753 to block B so that it will also be moved to the correct location. */
754 if (tablejump_p (BB_END (b), &label, &table)
755 && prev_active_insn (label) == BB_END (b))
757 BB_END (b) = table;
760 /* There had better have been a barrier there. Delete it. */
761 barrier = NEXT_INSN (BB_END (b));
762 if (barrier && BARRIER_P (barrier))
763 delete_insn (barrier);
765 /* Move block and loop notes out of the chain so that we do not
766 disturb their order.
768 ??? A better solution would be to squeeze out all the non-nested notes
769 and adjust the block trees appropriately. Even better would be to have
770 a tighter connection between block trees and rtl so that this is not
771 necessary. */
772 only_notes = squeeze_notes (&BB_HEAD (b), &BB_END (b));
773 gcc_assert (!only_notes);
776 /* Scramble the insn chain. */
777 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
779 /* Restore the real end of b. */
780 BB_END (b) = real_b_end;
782 if (dump_file)
783 fprintf (dump_file, "Moved block %d after %d and merged.\n",
784 b->index, a->index);
786 /* Now blocks A and B are contiguous. Merge them. */
787 merge_blocks (a, b);
790 /* Attempt to merge basic blocks that are potentially non-adjacent.
791 Return NULL iff the attempt failed, otherwise return basic block
792 where cleanup_cfg should continue. Because the merging commonly
793 moves basic block away or introduces another optimization
794 possibility, return basic block just before B so cleanup_cfg don't
795 need to iterate.
797 It may be good idea to return basic block before C in the case
798 C has been moved after B and originally appeared earlier in the
799 insn sequence, but we have no information available about the
800 relative ordering of these two. Hopefully it is not too common. */
802 static basic_block
803 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
805 basic_block next;
807 /* If we are partitioning hot/cold basic blocks, we don't want to
808 mess up unconditional or indirect jumps that cross between hot
809 and cold sections.
811 Basic block partitioning may result in some jumps that appear to
812 be optimizable (or blocks that appear to be mergeable), but which really
813 must be left untouched (they are required to make it safely across
814 partition boundaries). See the comments at the top of
815 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
817 if (flag_reorder_blocks_and_partition
818 && (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
819 || find_reg_note (BB_END (c), REG_CROSSING_JUMP, NULL_RTX)
820 || BB_PARTITION (b) != BB_PARTITION (c)))
821 return NULL;
825 /* If B has a fallthru edge to C, no need to move anything. */
826 if (e->flags & EDGE_FALLTHRU)
828 int b_index = b->index, c_index = c->index;
829 merge_blocks (b, c);
830 update_forwarder_flag (b);
832 if (dump_file)
833 fprintf (dump_file, "Merged %d and %d without moving.\n",
834 b_index, c_index);
836 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
839 /* Otherwise we will need to move code around. Do that only if expensive
840 transformations are allowed. */
841 else if (mode & CLEANUP_EXPENSIVE)
843 edge tmp_edge, b_fallthru_edge;
844 bool c_has_outgoing_fallthru;
845 bool b_has_incoming_fallthru;
846 edge_iterator ei;
848 /* Avoid overactive code motion, as the forwarder blocks should be
849 eliminated by edge redirection instead. One exception might have
850 been if B is a forwarder block and C has no fallthru edge, but
851 that should be cleaned up by bb-reorder instead. */
852 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
853 return NULL;
855 /* We must make sure to not munge nesting of lexical blocks,
856 and loop notes. This is done by squeezing out all the notes
857 and leaving them there to lie. Not ideal, but functional. */
859 FOR_EACH_EDGE (tmp_edge, ei, c->succs)
860 if (tmp_edge->flags & EDGE_FALLTHRU)
861 break;
863 c_has_outgoing_fallthru = (tmp_edge != NULL);
865 FOR_EACH_EDGE (tmp_edge, ei, b->preds)
866 if (tmp_edge->flags & EDGE_FALLTHRU)
867 break;
869 b_has_incoming_fallthru = (tmp_edge != NULL);
870 b_fallthru_edge = tmp_edge;
871 next = b->prev_bb;
872 if (next == c)
873 next = next->prev_bb;
875 /* Otherwise, we're going to try to move C after B. If C does
876 not have an outgoing fallthru, then it can be moved
877 immediately after B without introducing or modifying jumps. */
878 if (! c_has_outgoing_fallthru)
880 merge_blocks_move_successor_nojumps (b, c);
881 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
884 /* If B does not have an incoming fallthru, then it can be moved
885 immediately before C without introducing or modifying jumps.
886 C cannot be the first block, so we do not have to worry about
887 accessing a non-existent block. */
889 if (b_has_incoming_fallthru)
891 basic_block bb;
893 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
894 return NULL;
895 bb = force_nonfallthru (b_fallthru_edge);
896 if (bb)
897 notice_new_block (bb);
900 merge_blocks_move_predecessor_nojumps (b, c);
901 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
904 return NULL;
908 /* Removes the memory attributes of MEM expression
909 if they are not equal. */
911 void
912 merge_memattrs (rtx x, rtx y)
914 int i;
915 int j;
916 enum rtx_code code;
917 const char *fmt;
919 if (x == y)
920 return;
921 if (x == 0 || y == 0)
922 return;
924 code = GET_CODE (x);
926 if (code != GET_CODE (y))
927 return;
929 if (GET_MODE (x) != GET_MODE (y))
930 return;
932 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
934 if (! MEM_ATTRS (x))
935 MEM_ATTRS (y) = 0;
936 else if (! MEM_ATTRS (y))
937 MEM_ATTRS (x) = 0;
938 else
940 rtx mem_size;
942 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
944 set_mem_alias_set (x, 0);
945 set_mem_alias_set (y, 0);
948 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
950 set_mem_expr (x, 0);
951 set_mem_expr (y, 0);
952 set_mem_offset (x, 0);
953 set_mem_offset (y, 0);
955 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
957 set_mem_offset (x, 0);
958 set_mem_offset (y, 0);
961 if (!MEM_SIZE (x))
962 mem_size = NULL_RTX;
963 else if (!MEM_SIZE (y))
964 mem_size = NULL_RTX;
965 else
966 mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
967 INTVAL (MEM_SIZE (y))));
968 set_mem_size (x, mem_size);
969 set_mem_size (y, mem_size);
971 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
972 set_mem_align (y, MEM_ALIGN (x));
976 fmt = GET_RTX_FORMAT (code);
977 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
979 switch (fmt[i])
981 case 'E':
982 /* Two vectors must have the same length. */
983 if (XVECLEN (x, i) != XVECLEN (y, i))
984 return;
986 for (j = 0; j < XVECLEN (x, i); j++)
987 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
989 break;
991 case 'e':
992 merge_memattrs (XEXP (x, i), XEXP (y, i));
995 return;
999 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
1001 static bool
1002 insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
1004 rtx p1, p2;
1006 /* Verify that I1 and I2 are equivalent. */
1007 if (GET_CODE (i1) != GET_CODE (i2))
1008 return false;
1010 p1 = PATTERN (i1);
1011 p2 = PATTERN (i2);
1013 if (GET_CODE (p1) != GET_CODE (p2))
1014 return false;
1016 /* If this is a CALL_INSN, compare register usage information.
1017 If we don't check this on stack register machines, the two
1018 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1019 numbers of stack registers in the same basic block.
1020 If we don't check this on machines with delay slots, a delay slot may
1021 be filled that clobbers a parameter expected by the subroutine.
1023 ??? We take the simple route for now and assume that if they're
1024 equal, they were constructed identically. */
1026 if (CALL_P (i1)
1027 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1028 CALL_INSN_FUNCTION_USAGE (i2))
1029 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
1030 return false;
1032 #ifdef STACK_REGS
1033 /* If cross_jump_death_matters is not 0, the insn's mode
1034 indicates whether or not the insn contains any stack-like
1035 regs. */
1037 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1039 /* If register stack conversion has already been done, then
1040 death notes must also be compared before it is certain that
1041 the two instruction streams match. */
1043 rtx note;
1044 HARD_REG_SET i1_regset, i2_regset;
1046 CLEAR_HARD_REG_SET (i1_regset);
1047 CLEAR_HARD_REG_SET (i2_regset);
1049 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1050 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1051 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1053 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1054 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1055 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1057 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1059 return false;
1061 done:
1064 #endif
1066 if (reload_completed
1067 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1068 return true;
1070 /* Do not do EQUIV substitution after reload. First, we're undoing the
1071 work of reload_cse. Second, we may be undoing the work of the post-
1072 reload splitting pass. */
1073 /* ??? Possibly add a new phase switch variable that can be used by
1074 targets to disallow the troublesome insns after splitting. */
1075 if (!reload_completed)
1077 /* The following code helps take care of G++ cleanups. */
1078 rtx equiv1 = find_reg_equal_equiv_note (i1);
1079 rtx equiv2 = find_reg_equal_equiv_note (i2);
1081 if (equiv1 && equiv2
1082 /* If the equivalences are not to a constant, they may
1083 reference pseudos that no longer exist, so we can't
1084 use them. */
1085 && (! reload_completed
1086 || (CONSTANT_P (XEXP (equiv1, 0))
1087 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1089 rtx s1 = single_set (i1);
1090 rtx s2 = single_set (i2);
1091 if (s1 != 0 && s2 != 0
1092 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1094 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1095 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1096 if (! rtx_renumbered_equal_p (p1, p2))
1097 cancel_changes (0);
1098 else if (apply_change_group ())
1099 return true;
1104 return false;
1107 /* Look through the insns at the end of BB1 and BB2 and find the longest
1108 sequence that are equivalent. Store the first insns for that sequence
1109 in *F1 and *F2 and return the sequence length.
1111 To simplify callers of this function, if the blocks match exactly,
1112 store the head of the blocks in *F1 and *F2. */
1114 static int
1115 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1116 basic_block bb2, rtx *f1, rtx *f2)
1118 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1119 int ninsns = 0;
1121 /* Skip simple jumps at the end of the blocks. Complex jumps still
1122 need to be compared for equivalence, which we'll do below. */
1124 i1 = BB_END (bb1);
1125 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1126 if (onlyjump_p (i1)
1127 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1129 last1 = i1;
1130 i1 = PREV_INSN (i1);
1133 i2 = BB_END (bb2);
1134 if (onlyjump_p (i2)
1135 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1137 last2 = i2;
1138 /* Count everything except for unconditional jump as insn. */
1139 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1140 ninsns++;
1141 i2 = PREV_INSN (i2);
1144 while (true)
1146 /* Ignore notes. */
1147 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1148 i1 = PREV_INSN (i1);
1150 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1151 i2 = PREV_INSN (i2);
1153 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1154 break;
1156 if (!insns_match_p (mode, i1, i2))
1157 break;
1159 merge_memattrs (i1, i2);
1161 /* Don't begin a cross-jump with a NOTE insn. */
1162 if (INSN_P (i1))
1164 /* If the merged insns have different REG_EQUAL notes, then
1165 remove them. */
1166 rtx equiv1 = find_reg_equal_equiv_note (i1);
1167 rtx equiv2 = find_reg_equal_equiv_note (i2);
1169 if (equiv1 && !equiv2)
1170 remove_note (i1, equiv1);
1171 else if (!equiv1 && equiv2)
1172 remove_note (i2, equiv2);
1173 else if (equiv1 && equiv2
1174 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1176 remove_note (i1, equiv1);
1177 remove_note (i2, equiv2);
1180 afterlast1 = last1, afterlast2 = last2;
1181 last1 = i1, last2 = i2;
1182 ninsns++;
1185 i1 = PREV_INSN (i1);
1186 i2 = PREV_INSN (i2);
1189 #ifdef HAVE_cc0
1190 /* Don't allow the insn after a compare to be shared by
1191 cross-jumping unless the compare is also shared. */
1192 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1193 last1 = afterlast1, last2 = afterlast2, ninsns--;
1194 #endif
1196 /* Include preceding notes and labels in the cross-jump. One,
1197 this may bring us to the head of the blocks as requested above.
1198 Two, it keeps line number notes as matched as may be. */
1199 if (ninsns)
1201 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1202 last1 = PREV_INSN (last1);
1204 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1205 last1 = PREV_INSN (last1);
1207 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1208 last2 = PREV_INSN (last2);
1210 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1211 last2 = PREV_INSN (last2);
1213 *f1 = last1;
1214 *f2 = last2;
1217 return ninsns;
1220 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1221 the branch instruction. This means that if we commonize the control
1222 flow before end of the basic block, the semantic remains unchanged.
1224 We may assume that there exists one edge with a common destination. */
1226 static bool
1227 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1229 int nehedges1 = 0, nehedges2 = 0;
1230 edge fallthru1 = 0, fallthru2 = 0;
1231 edge e1, e2;
1232 edge_iterator ei;
1234 /* If BB1 has only one successor, we may be looking at either an
1235 unconditional jump, or a fake edge to exit. */
1236 if (EDGE_COUNT (bb1->succs) == 1
1237 && (EDGE_SUCC (bb1, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1238 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1239 return (EDGE_COUNT (bb2->succs) == 1
1240 && (EDGE_SUCC (bb2, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1241 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1243 /* Match conditional jumps - this may get tricky when fallthru and branch
1244 edges are crossed. */
1245 if (EDGE_COUNT (bb1->succs) == 2
1246 && any_condjump_p (BB_END (bb1))
1247 && onlyjump_p (BB_END (bb1)))
1249 edge b1, f1, b2, f2;
1250 bool reverse, match;
1251 rtx set1, set2, cond1, cond2;
1252 enum rtx_code code1, code2;
1254 if (EDGE_COUNT (bb2->succs) != 2
1255 || !any_condjump_p (BB_END (bb2))
1256 || !onlyjump_p (BB_END (bb2)))
1257 return false;
1259 b1 = BRANCH_EDGE (bb1);
1260 b2 = BRANCH_EDGE (bb2);
1261 f1 = FALLTHRU_EDGE (bb1);
1262 f2 = FALLTHRU_EDGE (bb2);
1264 /* Get around possible forwarders on fallthru edges. Other cases
1265 should be optimized out already. */
1266 if (FORWARDER_BLOCK_P (f1->dest))
1267 f1 = EDGE_SUCC (f1->dest, 0);
1269 if (FORWARDER_BLOCK_P (f2->dest))
1270 f2 = EDGE_SUCC (f2->dest, 0);
1272 /* To simplify use of this function, return false if there are
1273 unneeded forwarder blocks. These will get eliminated later
1274 during cleanup_cfg. */
1275 if (FORWARDER_BLOCK_P (f1->dest)
1276 || FORWARDER_BLOCK_P (f2->dest)
1277 || FORWARDER_BLOCK_P (b1->dest)
1278 || FORWARDER_BLOCK_P (b2->dest))
1279 return false;
1281 if (f1->dest == f2->dest && b1->dest == b2->dest)
1282 reverse = false;
1283 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1284 reverse = true;
1285 else
1286 return false;
1288 set1 = pc_set (BB_END (bb1));
1289 set2 = pc_set (BB_END (bb2));
1290 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1291 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1292 reverse = !reverse;
1294 cond1 = XEXP (SET_SRC (set1), 0);
1295 cond2 = XEXP (SET_SRC (set2), 0);
1296 code1 = GET_CODE (cond1);
1297 if (reverse)
1298 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1299 else
1300 code2 = GET_CODE (cond2);
1302 if (code2 == UNKNOWN)
1303 return false;
1305 /* Verify codes and operands match. */
1306 match = ((code1 == code2
1307 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1308 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1309 || (code1 == swap_condition (code2)
1310 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1311 XEXP (cond2, 0))
1312 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1313 XEXP (cond2, 1))));
1315 /* If we return true, we will join the blocks. Which means that
1316 we will only have one branch prediction bit to work with. Thus
1317 we require the existing branches to have probabilities that are
1318 roughly similar. */
1319 if (match
1320 && !optimize_size
1321 && maybe_hot_bb_p (bb1)
1322 && maybe_hot_bb_p (bb2))
1324 int prob2;
1326 if (b1->dest == b2->dest)
1327 prob2 = b2->probability;
1328 else
1329 /* Do not use f2 probability as f2 may be forwarded. */
1330 prob2 = REG_BR_PROB_BASE - b2->probability;
1332 /* Fail if the difference in probabilities is greater than 50%.
1333 This rules out two well-predicted branches with opposite
1334 outcomes. */
1335 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1337 if (dump_file)
1338 fprintf (dump_file,
1339 "Outcomes of branch in bb %i and %i differ too much (%i %i)\n",
1340 bb1->index, bb2->index, b1->probability, prob2);
1342 return false;
1346 if (dump_file && match)
1347 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1348 bb1->index, bb2->index);
1350 return match;
1353 /* Generic case - we are seeing a computed jump, table jump or trapping
1354 instruction. */
1356 /* Check whether there are tablejumps in the end of BB1 and BB2.
1357 Return true if they are identical. */
1359 rtx label1, label2;
1360 rtx table1, table2;
1362 if (tablejump_p (BB_END (bb1), &label1, &table1)
1363 && tablejump_p (BB_END (bb2), &label2, &table2)
1364 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1366 /* The labels should never be the same rtx. If they really are same
1367 the jump tables are same too. So disable crossjumping of blocks BB1
1368 and BB2 because when deleting the common insns in the end of BB1
1369 by delete_basic_block () the jump table would be deleted too. */
1370 /* If LABEL2 is referenced in BB1->END do not do anything
1371 because we would loose information when replacing
1372 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1373 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1375 /* Set IDENTICAL to true when the tables are identical. */
1376 bool identical = false;
1377 rtx p1, p2;
1379 p1 = PATTERN (table1);
1380 p2 = PATTERN (table2);
1381 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1383 identical = true;
1385 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1386 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1387 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1388 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1390 int i;
1392 identical = true;
1393 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1394 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1395 identical = false;
1398 if (identical)
1400 replace_label_data rr;
1401 bool match;
1403 /* Temporarily replace references to LABEL1 with LABEL2
1404 in BB1->END so that we could compare the instructions. */
1405 rr.r1 = label1;
1406 rr.r2 = label2;
1407 rr.update_label_nuses = false;
1408 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1410 match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1411 if (dump_file && match)
1412 fprintf (dump_file,
1413 "Tablejumps in bb %i and %i match.\n",
1414 bb1->index, bb2->index);
1416 /* Set the original label in BB1->END because when deleting
1417 a block whose end is a tablejump, the tablejump referenced
1418 from the instruction is deleted too. */
1419 rr.r1 = label2;
1420 rr.r2 = label1;
1421 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1423 return match;
1426 return false;
1430 /* First ensure that the instructions match. There may be many outgoing
1431 edges so this test is generally cheaper. */
1432 if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1433 return false;
1435 /* Search the outgoing edges, ensure that the counts do match, find possible
1436 fallthru and exception handling edges since these needs more
1437 validation. */
1438 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1439 return false;
1441 FOR_EACH_EDGE (e1, ei, bb1->succs)
1443 e2 = EDGE_SUCC (bb2, ei.index);
1445 if (e1->flags & EDGE_EH)
1446 nehedges1++;
1448 if (e2->flags & EDGE_EH)
1449 nehedges2++;
1451 if (e1->flags & EDGE_FALLTHRU)
1452 fallthru1 = e1;
1453 if (e2->flags & EDGE_FALLTHRU)
1454 fallthru2 = e2;
1457 /* If number of edges of various types does not match, fail. */
1458 if (nehedges1 != nehedges2
1459 || (fallthru1 != 0) != (fallthru2 != 0))
1460 return false;
1462 /* fallthru edges must be forwarded to the same destination. */
1463 if (fallthru1)
1465 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1466 ? EDGE_SUCC (fallthru1->dest, 0)->dest: fallthru1->dest);
1467 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1468 ? EDGE_SUCC (fallthru2->dest, 0)->dest: fallthru2->dest);
1470 if (d1 != d2)
1471 return false;
1474 /* Ensure the same EH region. */
1476 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1477 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1479 if (!n1 && n2)
1480 return false;
1482 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1483 return false;
1486 /* We don't need to match the rest of edges as above checks should be enough
1487 to ensure that they are equivalent. */
1488 return true;
1491 /* E1 and E2 are edges with the same destination block. Search their
1492 predecessors for common code. If found, redirect control flow from
1493 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1495 static bool
1496 try_crossjump_to_edge (int mode, edge e1, edge e2)
1498 int nmatch;
1499 basic_block src1 = e1->src, src2 = e2->src;
1500 basic_block redirect_to, redirect_from, to_remove;
1501 rtx newpos1, newpos2;
1502 edge s;
1503 edge_iterator ei;
1505 newpos1 = newpos2 = NULL_RTX;
1507 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1508 to try this optimization.
1510 Basic block partitioning may result in some jumps that appear to
1511 be optimizable (or blocks that appear to be mergeable), but which really
1512 must be left untouched (they are required to make it safely across
1513 partition boundaries). See the comments at the top of
1514 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1516 if (flag_reorder_blocks_and_partition && no_new_pseudos)
1517 return false;
1519 /* Search backward through forwarder blocks. We don't need to worry
1520 about multiple entry or chained forwarders, as they will be optimized
1521 away. We do this to look past the unconditional jump following a
1522 conditional jump that is required due to the current CFG shape. */
1523 if (EDGE_COUNT (src1->preds) == 1
1524 && FORWARDER_BLOCK_P (src1))
1525 e1 = EDGE_PRED (src1, 0), src1 = e1->src;
1527 if (EDGE_COUNT (src2->preds) == 1
1528 && FORWARDER_BLOCK_P (src2))
1529 e2 = EDGE_PRED (src2, 0), src2 = e2->src;
1531 /* Nothing to do if we reach ENTRY, or a common source block. */
1532 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1533 return false;
1534 if (src1 == src2)
1535 return false;
1537 /* Seeing more than 1 forwarder blocks would confuse us later... */
1538 if (FORWARDER_BLOCK_P (e1->dest)
1539 && FORWARDER_BLOCK_P (EDGE_SUCC (e1->dest, 0)->dest))
1540 return false;
1542 if (FORWARDER_BLOCK_P (e2->dest)
1543 && FORWARDER_BLOCK_P (EDGE_SUCC (e2->dest, 0)->dest))
1544 return false;
1546 /* Likewise with dead code (possibly newly created by the other optimizations
1547 of cfg_cleanup). */
1548 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1549 return false;
1551 /* Look for the common insn sequence, part the first ... */
1552 if (!outgoing_edges_match (mode, src1, src2))
1553 return false;
1555 /* ... and part the second. */
1556 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1558 /* Don't proceed with the crossjump unless we found a sufficient number
1559 of matching instructions or the 'from' block was totally matched
1560 (such that its predecessors will hopefully be redirected and the
1561 block removed). */
1562 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1563 && (newpos1 != BB_HEAD (src1)))
1564 return false;
1566 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1567 will be deleted.
1568 If we have tablejumps in the end of SRC1 and SRC2
1569 they have been already compared for equivalence in outgoing_edges_match ()
1570 so replace the references to TABLE1 by references to TABLE2. */
1572 rtx label1, label2;
1573 rtx table1, table2;
1575 if (tablejump_p (BB_END (src1), &label1, &table1)
1576 && tablejump_p (BB_END (src2), &label2, &table2)
1577 && label1 != label2)
1579 replace_label_data rr;
1580 rtx insn;
1582 /* Replace references to LABEL1 with LABEL2. */
1583 rr.r1 = label1;
1584 rr.r2 = label2;
1585 rr.update_label_nuses = true;
1586 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1588 /* Do not replace the label in SRC1->END because when deleting
1589 a block whose end is a tablejump, the tablejump referenced
1590 from the instruction is deleted too. */
1591 if (insn != BB_END (src1))
1592 for_each_rtx (&insn, replace_label, &rr);
1597 /* Avoid splitting if possible. */
1598 if (newpos2 == BB_HEAD (src2))
1599 redirect_to = src2;
1600 else
1602 if (dump_file)
1603 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1604 src2->index, nmatch);
1605 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1608 if (dump_file)
1609 fprintf (dump_file,
1610 "Cross jumping from bb %i to bb %i; %i common insns\n",
1611 src1->index, src2->index, nmatch);
1613 redirect_to->count += src1->count;
1614 redirect_to->frequency += src1->frequency;
1615 /* We may have some registers visible trought the block. */
1616 redirect_to->flags |= BB_DIRTY;
1618 /* Recompute the frequencies and counts of outgoing edges. */
1619 FOR_EACH_EDGE (s, ei, redirect_to->succs)
1621 edge s2;
1622 edge_iterator ei;
1623 basic_block d = s->dest;
1625 if (FORWARDER_BLOCK_P (d))
1626 d = EDGE_SUCC (d, 0)->dest;
1628 FOR_EACH_EDGE (s2, ei, src1->succs)
1630 basic_block d2 = s2->dest;
1631 if (FORWARDER_BLOCK_P (d2))
1632 d2 = EDGE_SUCC (d2, 0)->dest;
1633 if (d == d2)
1634 break;
1637 s->count += s2->count;
1639 /* Take care to update possible forwarder blocks. We verified
1640 that there is no more than one in the chain, so we can't run
1641 into infinite loop. */
1642 if (FORWARDER_BLOCK_P (s->dest))
1644 EDGE_SUCC (s->dest, 0)->count += s2->count;
1645 s->dest->count += s2->count;
1646 s->dest->frequency += EDGE_FREQUENCY (s);
1649 if (FORWARDER_BLOCK_P (s2->dest))
1651 EDGE_SUCC (s2->dest, 0)->count -= s2->count;
1652 if (EDGE_SUCC (s2->dest, 0)->count < 0)
1653 EDGE_SUCC (s2->dest, 0)->count = 0;
1654 s2->dest->count -= s2->count;
1655 s2->dest->frequency -= EDGE_FREQUENCY (s);
1656 if (s2->dest->frequency < 0)
1657 s2->dest->frequency = 0;
1658 if (s2->dest->count < 0)
1659 s2->dest->count = 0;
1662 if (!redirect_to->frequency && !src1->frequency)
1663 s->probability = (s->probability + s2->probability) / 2;
1664 else
1665 s->probability
1666 = ((s->probability * redirect_to->frequency +
1667 s2->probability * src1->frequency)
1668 / (redirect_to->frequency + src1->frequency));
1671 update_br_prob_note (redirect_to);
1673 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1675 /* Skip possible basic block header. */
1676 if (LABEL_P (newpos1))
1677 newpos1 = NEXT_INSN (newpos1);
1679 if (NOTE_P (newpos1))
1680 newpos1 = NEXT_INSN (newpos1);
1682 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1683 to_remove = EDGE_SUCC (redirect_from, 0)->dest;
1685 redirect_edge_and_branch_force (EDGE_SUCC (redirect_from, 0), redirect_to);
1686 delete_basic_block (to_remove);
1688 update_forwarder_flag (redirect_from);
1690 return true;
1693 /* Search the predecessors of BB for common insn sequences. When found,
1694 share code between them by redirecting control flow. Return true if
1695 any changes made. */
1697 static bool
1698 try_crossjump_bb (int mode, basic_block bb)
1700 edge e, e2, fallthru;
1701 bool changed;
1702 unsigned max, ix, ix2;
1703 basic_block ev, ev2;
1704 edge_iterator ei;
1706 /* Nothing to do if there is not at least two incoming edges. */
1707 if (EDGE_COUNT (bb->preds) < 2)
1708 return false;
1710 /* Don't crossjump if this block ends in a computed jump,
1711 unless we are optimizing for size. */
1712 if (!optimize_size
1713 && bb != EXIT_BLOCK_PTR
1714 && computed_jump_p (BB_END (bb)))
1715 return false;
1717 /* If we are partitioning hot/cold basic blocks, we don't want to
1718 mess up unconditional or indirect jumps that cross between hot
1719 and cold sections.
1721 Basic block partitioning may result in some jumps that appear to
1722 be optimizable (or blocks that appear to be mergeable), but which really
1723 must be left untouched (they are required to make it safely across
1724 partition boundaries). See the comments at the top of
1725 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1727 if (flag_reorder_blocks_and_partition
1728 && (BB_PARTITION (EDGE_PRED (bb, 0)->src) != BB_PARTITION (EDGE_PRED (bb, 1)->src)
1729 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING)))
1730 return false;
1732 /* It is always cheapest to redirect a block that ends in a branch to
1733 a block that falls through into BB, as that adds no branches to the
1734 program. We'll try that combination first. */
1735 fallthru = NULL;
1736 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1738 if (EDGE_COUNT (bb->preds) > max)
1739 return false;
1741 FOR_EACH_EDGE (e, ei, bb->preds)
1743 if (e->flags & EDGE_FALLTHRU)
1744 fallthru = e;
1747 changed = false;
1748 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1750 e = EDGE_PRED (ev, ix);
1751 ix++;
1753 /* As noted above, first try with the fallthru predecessor. */
1754 if (fallthru)
1756 /* Don't combine the fallthru edge into anything else.
1757 If there is a match, we'll do it the other way around. */
1758 if (e == fallthru)
1759 continue;
1760 /* If nothing changed since the last attempt, there is nothing
1761 we can do. */
1762 if (!first_pass
1763 && (!(e->src->flags & BB_DIRTY)
1764 && !(fallthru->src->flags & BB_DIRTY)))
1765 continue;
1767 if (try_crossjump_to_edge (mode, e, fallthru))
1769 changed = true;
1770 ix = 0;
1771 ev = bb;
1772 continue;
1776 /* Non-obvious work limiting check: Recognize that we're going
1777 to call try_crossjump_bb on every basic block. So if we have
1778 two blocks with lots of outgoing edges (a switch) and they
1779 share lots of common destinations, then we would do the
1780 cross-jump check once for each common destination.
1782 Now, if the blocks actually are cross-jump candidates, then
1783 all of their destinations will be shared. Which means that
1784 we only need check them for cross-jump candidacy once. We
1785 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1786 choosing to do the check from the block for which the edge
1787 in question is the first successor of A. */
1788 if (EDGE_SUCC (e->src, 0) != e)
1789 continue;
1791 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1793 e2 = EDGE_PRED (ev2, ix2);
1794 ix2++;
1796 if (e2 == e)
1797 continue;
1799 /* We've already checked the fallthru edge above. */
1800 if (e2 == fallthru)
1801 continue;
1803 /* The "first successor" check above only prevents multiple
1804 checks of crossjump(A,B). In order to prevent redundant
1805 checks of crossjump(B,A), require that A be the block
1806 with the lowest index. */
1807 if (e->src->index > e2->src->index)
1808 continue;
1810 /* If nothing changed since the last attempt, there is nothing
1811 we can do. */
1812 if (!first_pass
1813 && (!(e->src->flags & BB_DIRTY)
1814 && !(e2->src->flags & BB_DIRTY)))
1815 continue;
1817 if (try_crossjump_to_edge (mode, e, e2))
1819 changed = true;
1820 ev2 = bb;
1821 ix = 0;
1822 break;
1827 return changed;
1830 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1831 instructions etc. Return nonzero if changes were made. */
1833 static bool
1834 try_optimize_cfg (int mode)
1836 bool changed_overall = false;
1837 bool changed;
1838 int iterations = 0;
1839 basic_block bb, b, next;
1841 if (mode & CLEANUP_CROSSJUMP)
1842 add_noreturn_fake_exit_edges ();
1844 FOR_EACH_BB (bb)
1845 update_forwarder_flag (bb);
1847 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1848 clear_bb_flags ();
1850 if (! targetm.cannot_modify_jumps_p ())
1852 first_pass = true;
1853 /* Attempt to merge blocks as made possible by edge removal. If
1854 a block has only one successor, and the successor has only
1855 one predecessor, they may be combined. */
1858 changed = false;
1859 iterations++;
1861 if (dump_file)
1862 fprintf (dump_file,
1863 "\n\ntry_optimize_cfg iteration %i\n\n",
1864 iterations);
1866 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1868 basic_block c;
1869 edge s;
1870 bool changed_here = false;
1872 /* Delete trivially dead basic blocks. */
1873 while (EDGE_COUNT (b->preds) == 0)
1875 c = b->prev_bb;
1876 if (dump_file)
1877 fprintf (dump_file, "Deleting block %i.\n",
1878 b->index);
1880 delete_basic_block (b);
1881 if (!(mode & CLEANUP_CFGLAYOUT))
1882 changed = true;
1883 b = c;
1886 /* Remove code labels no longer used. */
1887 if (EDGE_COUNT (b->preds) == 1
1888 && (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU)
1889 && !(EDGE_PRED (b, 0)->flags & EDGE_COMPLEX)
1890 && LABEL_P (BB_HEAD (b))
1891 /* If the previous block ends with a branch to this
1892 block, we can't delete the label. Normally this
1893 is a condjump that is yet to be simplified, but
1894 if CASE_DROPS_THRU, this can be a tablejump with
1895 some element going to the same place as the
1896 default (fallthru). */
1897 && (EDGE_PRED (b, 0)->src == ENTRY_BLOCK_PTR
1898 || !JUMP_P (BB_END (EDGE_PRED (b, 0)->src))
1899 || ! label_is_jump_target_p (BB_HEAD (b),
1900 BB_END (EDGE_PRED (b, 0)->src))))
1902 rtx label = BB_HEAD (b);
1904 delete_insn_chain (label, label);
1905 /* In the case label is undeletable, move it after the
1906 BASIC_BLOCK note. */
1907 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1909 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1911 reorder_insns_nobb (label, label, bb_note);
1912 BB_HEAD (b) = bb_note;
1914 if (dump_file)
1915 fprintf (dump_file, "Deleted label in block %i.\n",
1916 b->index);
1919 /* If we fall through an empty block, we can remove it. */
1920 if (!(mode & CLEANUP_CFGLAYOUT)
1921 && EDGE_COUNT (b->preds) == 1
1922 && (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU)
1923 && !LABEL_P (BB_HEAD (b))
1924 && FORWARDER_BLOCK_P (b)
1925 /* Note that forwarder_block_p true ensures that
1926 there is a successor for this block. */
1927 && (EDGE_SUCC (b, 0)->flags & EDGE_FALLTHRU)
1928 && n_basic_blocks > 1)
1930 if (dump_file)
1931 fprintf (dump_file,
1932 "Deleting fallthru block %i.\n",
1933 b->index);
1935 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1936 redirect_edge_succ_nodup (EDGE_PRED (b, 0), EDGE_SUCC (b, 0)->dest);
1937 delete_basic_block (b);
1938 changed = true;
1939 b = c;
1942 if (EDGE_COUNT (b->succs) == 1
1943 && (s = EDGE_SUCC (b, 0))
1944 && !(s->flags & EDGE_COMPLEX)
1945 && (c = s->dest) != EXIT_BLOCK_PTR
1946 && EDGE_COUNT (c->preds) == 1
1947 && b != c)
1949 /* When not in cfg_layout mode use code aware of reordering
1950 INSN. This code possibly creates new basic blocks so it
1951 does not fit merge_blocks interface and is kept here in
1952 hope that it will become useless once more of compiler
1953 is transformed to use cfg_layout mode. */
1955 if ((mode & CLEANUP_CFGLAYOUT)
1956 && can_merge_blocks_p (b, c))
1958 merge_blocks (b, c);
1959 update_forwarder_flag (b);
1960 changed_here = true;
1962 else if (!(mode & CLEANUP_CFGLAYOUT)
1963 /* If the jump insn has side effects,
1964 we can't kill the edge. */
1965 && (!JUMP_P (BB_END (b))
1966 || (reload_completed
1967 ? simplejump_p (BB_END (b))
1968 : (onlyjump_p (BB_END (b))
1969 && !tablejump_p (BB_END (b),
1970 NULL, NULL))))
1971 && (next = merge_blocks_move (s, b, c, mode)))
1973 b = next;
1974 changed_here = true;
1978 /* Simplify branch over branch. */
1979 if ((mode & CLEANUP_EXPENSIVE)
1980 && !(mode & CLEANUP_CFGLAYOUT)
1981 && try_simplify_condjump (b))
1982 changed_here = true;
1984 /* If B has a single outgoing edge, but uses a
1985 non-trivial jump instruction without side-effects, we
1986 can either delete the jump entirely, or replace it
1987 with a simple unconditional jump. */
1988 if (EDGE_COUNT (b->succs) == 1
1989 && EDGE_SUCC (b, 0)->dest != EXIT_BLOCK_PTR
1990 && onlyjump_p (BB_END (b))
1991 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
1992 && try_redirect_by_replacing_jump (EDGE_SUCC (b, 0), EDGE_SUCC (b, 0)->dest,
1993 (mode & CLEANUP_CFGLAYOUT) != 0))
1995 update_forwarder_flag (b);
1996 changed_here = true;
1999 /* Simplify branch to branch. */
2000 if (try_forward_edges (mode, b))
2001 changed_here = true;
2003 /* Look for shared code between blocks. */
2004 if ((mode & CLEANUP_CROSSJUMP)
2005 && try_crossjump_bb (mode, b))
2006 changed_here = true;
2008 /* Don't get confused by the index shift caused by
2009 deleting blocks. */
2010 if (!changed_here)
2011 b = b->next_bb;
2012 else
2013 changed = true;
2016 if ((mode & CLEANUP_CROSSJUMP)
2017 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2018 changed = true;
2020 #ifdef ENABLE_CHECKING
2021 if (changed)
2022 verify_flow_info ();
2023 #endif
2025 changed_overall |= changed;
2026 first_pass = false;
2028 while (changed);
2031 if (mode & CLEANUP_CROSSJUMP)
2032 remove_fake_exit_edges ();
2034 clear_aux_for_blocks ();
2036 return changed_overall;
2039 /* Delete all unreachable basic blocks. */
2041 bool
2042 delete_unreachable_blocks (void)
2044 bool changed = false;
2045 basic_block b, next_bb;
2047 find_unreachable_blocks ();
2049 /* Delete all unreachable basic blocks. */
2051 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2053 next_bb = b->next_bb;
2055 if (!(b->flags & BB_REACHABLE))
2057 delete_basic_block (b);
2058 changed = true;
2062 if (changed)
2063 tidy_fallthru_edges ();
2064 return changed;
2067 /* Merges sequential blocks if possible. */
2069 bool
2070 merge_seq_blocks (void)
2072 basic_block bb;
2073 bool changed = false;
2075 for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2077 if (EDGE_COUNT (bb->succs) == 1
2078 && can_merge_blocks_p (bb, EDGE_SUCC (bb, 0)->dest))
2080 /* Merge the blocks and retry. */
2081 merge_blocks (bb, EDGE_SUCC (bb, 0)->dest);
2082 changed = true;
2083 continue;
2086 bb = bb->next_bb;
2089 return changed;
2092 /* Tidy the CFG by deleting unreachable code and whatnot. */
2094 bool
2095 cleanup_cfg (int mode)
2097 bool changed = false;
2099 timevar_push (TV_CLEANUP_CFG);
2100 if (delete_unreachable_blocks ())
2102 changed = true;
2103 /* We've possibly created trivially dead code. Cleanup it right
2104 now to introduce more opportunities for try_optimize_cfg. */
2105 if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
2106 && !reload_completed)
2107 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2110 compact_blocks ();
2112 while (try_optimize_cfg (mode))
2114 delete_unreachable_blocks (), changed = true;
2115 if (mode & CLEANUP_UPDATE_LIFE)
2117 /* Cleaning up CFG introduces more opportunities for dead code
2118 removal that in turn may introduce more opportunities for
2119 cleaning up the CFG. */
2120 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2121 PROP_DEATH_NOTES
2122 | PROP_SCAN_DEAD_CODE
2123 | PROP_KILL_DEAD_CODE
2124 | ((mode & CLEANUP_LOG_LINKS)
2125 ? PROP_LOG_LINKS : 0)))
2126 break;
2128 else if (!(mode & CLEANUP_NO_INSN_DEL)
2129 && (mode & CLEANUP_EXPENSIVE)
2130 && !reload_completed)
2132 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2133 break;
2135 else
2136 break;
2137 delete_dead_jumptables ();
2140 /* Kill the data we won't maintain. */
2141 free_EXPR_LIST_list (&label_value_list);
2142 timevar_pop (TV_CLEANUP_CFG);
2144 return changed;