* config/i386/i386.md (addqi_1_slp): Test for incdec_operand
[official-gcc.git] / gcc / cfgcleanup.c
blob089c330344521f39744814b94f8e433caa7d7200
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "basic-block.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "regs.h"
52 #include "cfglayout.h"
53 #include "emit-rtl.h"
55 /* cleanup_cfg maintains following flags for each basic block. */
57 enum bb_flags
59 /* Set if BB is the forwarder block to avoid too many
60 forwarder_block_p calls. */
61 BB_FORWARDER_BLOCK = 1,
62 BB_NONTHREADABLE_BLOCK = 2
65 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
66 #define BB_SET_FLAG(BB, FLAG) \
67 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
68 #define BB_CLEAR_FLAG(BB, FLAG) \
69 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
71 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
73 /* Set to true when we are running first pass of try_optimize_cfg loop. */
74 static bool first_pass;
75 static bool try_crossjump_to_edge (int, edge, edge);
76 static bool try_crossjump_bb (int, basic_block);
77 static bool outgoing_edges_match (int, basic_block, basic_block);
78 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
79 static bool insns_match_p (int, rtx, rtx);
81 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
82 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
83 static bool try_optimize_cfg (int);
84 static bool try_simplify_condjump (basic_block);
85 static bool try_forward_edges (int, basic_block);
86 static edge thread_jump (int, edge, basic_block);
87 static bool mark_effect (rtx, bitmap);
88 static void notice_new_block (basic_block);
89 static void update_forwarder_flag (basic_block);
90 static int mentions_nonequal_regs (rtx *, void *);
91 static void merge_memattrs (rtx, rtx);
93 /* Set flags for newly created block. */
95 static void
96 notice_new_block (basic_block bb)
98 if (!bb)
99 return;
101 if (forwarder_block_p (bb))
102 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
105 /* Recompute forwarder flag after block has been modified. */
107 static void
108 update_forwarder_flag (basic_block bb)
110 if (forwarder_block_p (bb))
111 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
112 else
113 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
116 /* Simplify a conditional jump around an unconditional jump.
117 Return true if something changed. */
119 static bool
120 try_simplify_condjump (basic_block cbranch_block)
122 basic_block jump_block, jump_dest_block, cbranch_dest_block;
123 edge cbranch_jump_edge, cbranch_fallthru_edge;
124 rtx cbranch_insn;
126 /* Verify that there are exactly two successors. */
127 if (EDGE_COUNT (cbranch_block->succs) != 2)
128 return false;
130 /* Verify that we've got a normal conditional branch at the end
131 of the block. */
132 cbranch_insn = BB_END (cbranch_block);
133 if (!any_condjump_p (cbranch_insn))
134 return false;
136 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
137 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
139 /* The next block must not have multiple predecessors, must not
140 be the last block in the function, and must contain just the
141 unconditional jump. */
142 jump_block = cbranch_fallthru_edge->dest;
143 if (EDGE_COUNT (jump_block->preds) >= 2
144 || jump_block->next_bb == EXIT_BLOCK_PTR
145 || !FORWARDER_BLOCK_P (jump_block))
146 return false;
147 jump_dest_block = EDGE_SUCC (jump_block, 0)->dest;
149 /* If we are partitioning hot/cold basic blocks, we don't want to
150 mess up unconditional or indirect jumps that cross between hot
151 and cold sections.
153 Basic block partitioning may result in some jumps that appear to
154 be optimizable (or blocks that appear to be mergeable), but which really
155 must be left untouched (they are required to make it safely across
156 partition boundaries). See the comments at the top of
157 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
159 if (flag_reorder_blocks_and_partition
160 && (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
161 || (cbranch_jump_edge->flags & EDGE_CROSSING)))
162 return false;
164 /* The conditional branch must target the block after the
165 unconditional branch. */
166 cbranch_dest_block = cbranch_jump_edge->dest;
168 if (cbranch_dest_block == EXIT_BLOCK_PTR
169 || !can_fallthru (jump_block, cbranch_dest_block))
170 return false;
172 /* Invert the conditional branch. */
173 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
174 return false;
176 if (dump_file)
177 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
178 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
180 /* Success. Update the CFG to match. Note that after this point
181 the edge variable names appear backwards; the redirection is done
182 this way to preserve edge profile data. */
183 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
184 cbranch_dest_block);
185 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
186 jump_dest_block);
187 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
188 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
189 update_br_prob_note (cbranch_block);
191 /* Delete the block with the unconditional jump, and clean up the mess. */
192 delete_basic_block (jump_block);
193 tidy_fallthru_edge (cbranch_jump_edge);
194 update_forwarder_flag (cbranch_block);
196 return true;
199 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
200 on register. Used by jump threading. */
202 static bool
203 mark_effect (rtx exp, regset nonequal)
205 int regno;
206 rtx dest;
207 switch (GET_CODE (exp))
209 /* In case we do clobber the register, mark it as equal, as we know the
210 value is dead so it don't have to match. */
211 case CLOBBER:
212 if (REG_P (XEXP (exp, 0)))
214 dest = XEXP (exp, 0);
215 regno = REGNO (dest);
216 CLEAR_REGNO_REG_SET (nonequal, regno);
217 if (regno < FIRST_PSEUDO_REGISTER)
219 int n = hard_regno_nregs[regno][GET_MODE (dest)];
220 while (--n > 0)
221 CLEAR_REGNO_REG_SET (nonequal, regno + n);
224 return false;
226 case SET:
227 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
228 return false;
229 dest = SET_DEST (exp);
230 if (dest == pc_rtx)
231 return false;
232 if (!REG_P (dest))
233 return true;
234 regno = REGNO (dest);
235 SET_REGNO_REG_SET (nonequal, regno);
236 if (regno < FIRST_PSEUDO_REGISTER)
238 int n = hard_regno_nregs[regno][GET_MODE (dest)];
239 while (--n > 0)
240 SET_REGNO_REG_SET (nonequal, regno + n);
242 return false;
244 default:
245 return false;
249 /* Return nonzero if X is a register set in regset DATA.
250 Called via for_each_rtx. */
251 static int
252 mentions_nonequal_regs (rtx *x, void *data)
254 regset nonequal = (regset) data;
255 if (REG_P (*x))
257 int regno;
259 regno = REGNO (*x);
260 if (REGNO_REG_SET_P (nonequal, regno))
261 return 1;
262 if (regno < FIRST_PSEUDO_REGISTER)
264 int n = hard_regno_nregs[regno][GET_MODE (*x)];
265 while (--n > 0)
266 if (REGNO_REG_SET_P (nonequal, regno + n))
267 return 1;
270 return 0;
272 /* Attempt to prove that the basic block B will have no side effects and
273 always continues in the same edge if reached via E. Return the edge
274 if exist, NULL otherwise. */
276 static edge
277 thread_jump (int mode, edge e, basic_block b)
279 rtx set1, set2, cond1, cond2, insn;
280 enum rtx_code code1, code2, reversed_code2;
281 bool reverse1 = false;
282 int i;
283 regset nonequal;
284 bool failed = false;
286 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
287 return NULL;
289 /* At the moment, we do handle only conditional jumps, but later we may
290 want to extend this code to tablejumps and others. */
291 if (EDGE_COUNT (e->src->succs) != 2)
292 return NULL;
293 if (EDGE_COUNT (b->succs) != 2)
295 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
296 return NULL;
299 /* Second branch must end with onlyjump, as we will eliminate the jump. */
300 if (!any_condjump_p (BB_END (e->src)))
301 return NULL;
303 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
305 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
306 return NULL;
309 set1 = pc_set (BB_END (e->src));
310 set2 = pc_set (BB_END (b));
311 if (((e->flags & EDGE_FALLTHRU) != 0)
312 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
313 reverse1 = true;
315 cond1 = XEXP (SET_SRC (set1), 0);
316 cond2 = XEXP (SET_SRC (set2), 0);
317 if (reverse1)
318 code1 = reversed_comparison_code (cond1, BB_END (e->src));
319 else
320 code1 = GET_CODE (cond1);
322 code2 = GET_CODE (cond2);
323 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
325 if (!comparison_dominates_p (code1, code2)
326 && !comparison_dominates_p (code1, reversed_code2))
327 return NULL;
329 /* Ensure that the comparison operators are equivalent.
330 ??? This is far too pessimistic. We should allow swapped operands,
331 different CCmodes, or for example comparisons for interval, that
332 dominate even when operands are not equivalent. */
333 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
334 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
335 return NULL;
337 /* Short circuit cases where block B contains some side effects, as we can't
338 safely bypass it. */
339 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
340 insn = NEXT_INSN (insn))
341 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
343 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
344 return NULL;
347 cselib_init (false);
349 /* First process all values computed in the source basic block. */
350 for (insn = NEXT_INSN (BB_HEAD (e->src)); insn != NEXT_INSN (BB_END (e->src));
351 insn = NEXT_INSN (insn))
352 if (INSN_P (insn))
353 cselib_process_insn (insn);
355 nonequal = BITMAP_XMALLOC();
356 CLEAR_REG_SET (nonequal);
358 /* Now assume that we've continued by the edge E to B and continue
359 processing as if it were same basic block.
360 Our goal is to prove that whole block is an NOOP. */
362 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)) && !failed;
363 insn = NEXT_INSN (insn))
365 if (INSN_P (insn))
367 rtx pat = PATTERN (insn);
369 if (GET_CODE (pat) == PARALLEL)
371 for (i = 0; i < XVECLEN (pat, 0); i++)
372 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
374 else
375 failed |= mark_effect (pat, nonequal);
378 cselib_process_insn (insn);
381 /* Later we should clear nonequal of dead registers. So far we don't
382 have life information in cfg_cleanup. */
383 if (failed)
385 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
386 goto failed_exit;
389 /* cond2 must not mention any register that is not equal to the
390 former block. */
391 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
392 goto failed_exit;
394 /* In case liveness information is available, we need to prove equivalence
395 only of the live values. */
396 if (mode & CLEANUP_UPDATE_LIFE)
397 AND_REG_SET (nonequal, b->global_live_at_end);
399 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
401 BITMAP_XFREE (nonequal);
402 cselib_finish ();
403 if ((comparison_dominates_p (code1, code2) != 0)
404 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
405 return BRANCH_EDGE (b);
406 else
407 return FALLTHRU_EDGE (b);
409 failed_exit:
410 BITMAP_XFREE (nonequal);
411 cselib_finish ();
412 return NULL;
415 /* Attempt to forward edges leaving basic block B.
416 Return true if successful. */
418 static bool
419 try_forward_edges (int mode, basic_block b)
421 bool changed = false;
422 edge_iterator ei;
423 edge e, *threaded_edges = NULL;
425 /* If we are partitioning hot/cold basic blocks, we don't want to
426 mess up unconditional or indirect jumps that cross between hot
427 and cold sections.
429 Basic block partitioning may result in some jumps that appear to
430 be optimizable (or blocks that appear to be mergeable), but which really m
431 ust be left untouched (they are required to make it safely across
432 partition boundaries). See the comments at the top of
433 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
435 if (flag_reorder_blocks_and_partition
436 && find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
437 return false;
439 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
441 basic_block target, first;
442 int counter;
443 bool threaded = false;
444 int nthreaded_edges = 0;
445 bool may_thread = first_pass | (b->flags & BB_DIRTY);
447 /* Skip complex edges because we don't know how to update them.
449 Still handle fallthru edges, as we can succeed to forward fallthru
450 edge to the same place as the branch edge of conditional branch
451 and turn conditional branch to an unconditional branch. */
452 if (e->flags & EDGE_COMPLEX)
454 ei_next (&ei);
455 continue;
458 target = first = e->dest;
459 counter = 0;
461 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
462 up jumps that cross between hot/cold sections.
464 Basic block partitioning may result in some jumps that appear
465 to be optimizable (or blocks that appear to be mergeable), but which
466 really must be left untouched (they are required to make it safely
467 across partition boundaries). See the comments at the top of
468 bb-reorder.c:partition_hot_cold_basic_blocks for complete
469 details. */
471 if (flag_reorder_blocks_and_partition
472 && first != EXIT_BLOCK_PTR
473 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
474 return false;
476 while (counter < n_basic_blocks)
478 basic_block new_target = NULL;
479 bool new_target_threaded = false;
480 may_thread |= target->flags & BB_DIRTY;
482 if (FORWARDER_BLOCK_P (target)
483 && !(EDGE_SUCC (target, 0)->flags & EDGE_CROSSING)
484 && EDGE_SUCC (target, 0)->dest != EXIT_BLOCK_PTR)
486 /* Bypass trivial infinite loops. */
487 if (target == EDGE_SUCC (target, 0)->dest)
488 counter = n_basic_blocks;
489 new_target = EDGE_SUCC (target, 0)->dest;
492 /* Allow to thread only over one edge at time to simplify updating
493 of probabilities. */
494 else if ((mode & CLEANUP_THREADING) && may_thread)
496 edge t = thread_jump (mode, e, target);
497 if (t)
499 if (!threaded_edges)
500 threaded_edges = xmalloc (sizeof (*threaded_edges)
501 * n_basic_blocks);
502 else
504 int i;
506 /* Detect an infinite loop across blocks not
507 including the start block. */
508 for (i = 0; i < nthreaded_edges; ++i)
509 if (threaded_edges[i] == t)
510 break;
511 if (i < nthreaded_edges)
513 counter = n_basic_blocks;
514 break;
518 /* Detect an infinite loop across the start block. */
519 if (t->dest == b)
520 break;
522 gcc_assert (nthreaded_edges < n_basic_blocks);
523 threaded_edges[nthreaded_edges++] = t;
525 new_target = t->dest;
526 new_target_threaded = true;
530 if (!new_target)
531 break;
533 /* Avoid killing of loop pre-headers, as it is the place loop
534 optimizer wants to hoist code to.
536 For fallthru forwarders, the LOOP_BEG note must appear between
537 the header of block and CODE_LABEL of the loop, for non forwarders
538 it must appear before the JUMP_INSN. */
539 if ((mode & CLEANUP_PRE_LOOP) && optimize)
541 rtx insn = (EDGE_SUCC (target, 0)->flags & EDGE_FALLTHRU
542 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
544 if (!NOTE_P (insn))
545 insn = NEXT_INSN (insn);
547 for (; insn && !LABEL_P (insn) && !INSN_P (insn);
548 insn = NEXT_INSN (insn))
549 if (NOTE_P (insn)
550 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
551 break;
553 if (NOTE_P (insn))
554 break;
556 /* Do not clean up branches to just past the end of a loop
557 at this time; it can mess up the loop optimizer's
558 recognition of some patterns. */
560 insn = PREV_INSN (BB_HEAD (target));
561 if (insn && NOTE_P (insn)
562 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
563 break;
566 counter++;
567 target = new_target;
568 threaded |= new_target_threaded;
571 if (counter >= n_basic_blocks)
573 if (dump_file)
574 fprintf (dump_file, "Infinite loop in BB %i.\n",
575 target->index);
577 else if (target == first)
578 ; /* We didn't do anything. */
579 else
581 /* Save the values now, as the edge may get removed. */
582 gcov_type edge_count = e->count;
583 int edge_probability = e->probability;
584 int edge_frequency;
585 int n = 0;
587 /* Don't force if target is exit block. */
588 if (threaded && target != EXIT_BLOCK_PTR)
590 notice_new_block (redirect_edge_and_branch_force (e, target));
591 if (dump_file)
592 fprintf (dump_file, "Conditionals threaded.\n");
594 else if (!redirect_edge_and_branch (e, target))
596 if (dump_file)
597 fprintf (dump_file,
598 "Forwarding edge %i->%i to %i failed.\n",
599 b->index, e->dest->index, target->index);
600 ei_next (&ei);
601 continue;
604 /* We successfully forwarded the edge. Now update profile
605 data: for each edge we traversed in the chain, remove
606 the original edge's execution count. */
607 edge_frequency = ((edge_probability * b->frequency
608 + REG_BR_PROB_BASE / 2)
609 / REG_BR_PROB_BASE);
611 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
612 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
616 edge t;
618 if (EDGE_COUNT (first->succs) > 1)
620 gcc_assert (n < nthreaded_edges);
621 t = threaded_edges [n++];
622 gcc_assert (t->src == first);
623 update_bb_profile_for_threading (first, edge_frequency,
624 edge_count, t);
625 update_br_prob_note (first);
627 else
629 first->count -= edge_count;
630 if (first->count < 0)
631 first->count = 0;
632 first->frequency -= edge_frequency;
633 if (first->frequency < 0)
634 first->frequency = 0;
635 /* It is possible that as the result of
636 threading we've removed edge as it is
637 threaded to the fallthru edge. Avoid
638 getting out of sync. */
639 if (n < nthreaded_edges
640 && first == threaded_edges [n]->src)
641 n++;
642 t = EDGE_SUCC (first, 0);
645 t->count -= edge_count;
646 if (t->count < 0)
647 t->count = 0;
648 first = t->dest;
650 while (first != target);
652 changed = true;
653 continue;
655 ei_next (&ei);
658 if (threaded_edges)
659 free (threaded_edges);
660 return changed;
664 /* Blocks A and B are to be merged into a single block. A has no incoming
665 fallthru edge, so it can be moved before B without adding or modifying
666 any jumps (aside from the jump from A to B). */
668 static void
669 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
671 rtx barrier;
672 bool only_notes;
674 /* If we are partitioning hot/cold basic blocks, we don't want to
675 mess up unconditional or indirect jumps that cross between hot
676 and cold sections.
678 Basic block partitioning may result in some jumps that appear to
679 be optimizable (or blocks that appear to be mergeable), but which really
680 must be left untouched (they are required to make it safely across
681 partition boundaries). See the comments at the top of
682 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
684 if (flag_reorder_blocks_and_partition
685 && (BB_PARTITION (a) != BB_PARTITION (b)
686 || find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)))
687 return;
689 barrier = next_nonnote_insn (BB_END (a));
690 gcc_assert (BARRIER_P (barrier));
691 delete_insn (barrier);
693 /* Move block and loop notes out of the chain so that we do not
694 disturb their order.
696 ??? A better solution would be to squeeze out all the non-nested notes
697 and adjust the block trees appropriately. Even better would be to have
698 a tighter connection between block trees and rtl so that this is not
699 necessary. */
700 only_notes = squeeze_notes (&BB_HEAD (a), &BB_END (a));
701 gcc_assert (!only_notes);
703 /* Scramble the insn chain. */
704 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
705 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
706 a->flags |= BB_DIRTY;
708 if (dump_file)
709 fprintf (dump_file, "Moved block %d before %d and merged.\n",
710 a->index, b->index);
712 /* Swap the records for the two blocks around. */
714 unlink_block (a);
715 link_block (a, b->prev_bb);
717 /* Now blocks A and B are contiguous. Merge them. */
718 merge_blocks (a, b);
721 /* Blocks A and B are to be merged into a single block. B has no outgoing
722 fallthru edge, so it can be moved after A without adding or modifying
723 any jumps (aside from the jump from A to B). */
725 static void
726 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
728 rtx barrier, real_b_end;
729 rtx label, table;
730 bool only_notes;
732 /* If we are partitioning hot/cold basic blocks, we don't want to
733 mess up unconditional or indirect jumps that cross between hot
734 and cold sections.
736 Basic block partitioning may result in some jumps that appear to
737 be optimizable (or blocks that appear to be mergeable), but which really
738 must be left untouched (they are required to make it safely across
739 partition boundaries). See the comments at the top of
740 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
742 if (flag_reorder_blocks_and_partition
743 && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
744 || BB_PARTITION (a) != BB_PARTITION (b)))
745 return;
747 real_b_end = BB_END (b);
749 /* If there is a jump table following block B temporarily add the jump table
750 to block B so that it will also be moved to the correct location. */
751 if (tablejump_p (BB_END (b), &label, &table)
752 && prev_active_insn (label) == BB_END (b))
754 BB_END (b) = table;
757 /* There had better have been a barrier there. Delete it. */
758 barrier = NEXT_INSN (BB_END (b));
759 if (barrier && BARRIER_P (barrier))
760 delete_insn (barrier);
762 /* Move block and loop notes out of the chain so that we do not
763 disturb their order.
765 ??? A better solution would be to squeeze out all the non-nested notes
766 and adjust the block trees appropriately. Even better would be to have
767 a tighter connection between block trees and rtl so that this is not
768 necessary. */
769 only_notes = squeeze_notes (&BB_HEAD (b), &BB_END (b));
770 gcc_assert (!only_notes);
773 /* Scramble the insn chain. */
774 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
776 /* Restore the real end of b. */
777 BB_END (b) = real_b_end;
779 if (dump_file)
780 fprintf (dump_file, "Moved block %d after %d and merged.\n",
781 b->index, a->index);
783 /* Now blocks A and B are contiguous. Merge them. */
784 merge_blocks (a, b);
787 /* Attempt to merge basic blocks that are potentially non-adjacent.
788 Return NULL iff the attempt failed, otherwise return basic block
789 where cleanup_cfg should continue. Because the merging commonly
790 moves basic block away or introduces another optimization
791 possibility, return basic block just before B so cleanup_cfg don't
792 need to iterate.
794 It may be good idea to return basic block before C in the case
795 C has been moved after B and originally appeared earlier in the
796 insn sequence, but we have no information available about the
797 relative ordering of these two. Hopefully it is not too common. */
799 static basic_block
800 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
802 basic_block next;
804 /* If we are partitioning hot/cold basic blocks, we don't want to
805 mess up unconditional or indirect jumps that cross between hot
806 and cold sections.
808 Basic block partitioning may result in some jumps that appear to
809 be optimizable (or blocks that appear to be mergeable), but which really
810 must be left untouched (they are required to make it safely across
811 partition boundaries). See the comments at the top of
812 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
814 if (flag_reorder_blocks_and_partition
815 && (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
816 || find_reg_note (BB_END (c), REG_CROSSING_JUMP, NULL_RTX)
817 || BB_PARTITION (b) != BB_PARTITION (c)))
818 return NULL;
822 /* If B has a fallthru edge to C, no need to move anything. */
823 if (e->flags & EDGE_FALLTHRU)
825 int b_index = b->index, c_index = c->index;
826 merge_blocks (b, c);
827 update_forwarder_flag (b);
829 if (dump_file)
830 fprintf (dump_file, "Merged %d and %d without moving.\n",
831 b_index, c_index);
833 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
836 /* Otherwise we will need to move code around. Do that only if expensive
837 transformations are allowed. */
838 else if (mode & CLEANUP_EXPENSIVE)
840 edge tmp_edge, b_fallthru_edge;
841 bool c_has_outgoing_fallthru;
842 bool b_has_incoming_fallthru;
843 edge_iterator ei;
845 /* Avoid overactive code motion, as the forwarder blocks should be
846 eliminated by edge redirection instead. One exception might have
847 been if B is a forwarder block and C has no fallthru edge, but
848 that should be cleaned up by bb-reorder instead. */
849 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
850 return NULL;
852 /* We must make sure to not munge nesting of lexical blocks,
853 and loop notes. This is done by squeezing out all the notes
854 and leaving them there to lie. Not ideal, but functional. */
856 FOR_EACH_EDGE (tmp_edge, ei, c->succs)
857 if (tmp_edge->flags & EDGE_FALLTHRU)
858 break;
860 c_has_outgoing_fallthru = (tmp_edge != NULL);
862 FOR_EACH_EDGE (tmp_edge, ei, b->preds)
863 if (tmp_edge->flags & EDGE_FALLTHRU)
864 break;
866 b_has_incoming_fallthru = (tmp_edge != NULL);
867 b_fallthru_edge = tmp_edge;
868 next = b->prev_bb;
869 if (next == c)
870 next = next->prev_bb;
872 /* Otherwise, we're going to try to move C after B. If C does
873 not have an outgoing fallthru, then it can be moved
874 immediately after B without introducing or modifying jumps. */
875 if (! c_has_outgoing_fallthru)
877 merge_blocks_move_successor_nojumps (b, c);
878 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
881 /* If B does not have an incoming fallthru, then it can be moved
882 immediately before C without introducing or modifying jumps.
883 C cannot be the first block, so we do not have to worry about
884 accessing a non-existent block. */
886 if (b_has_incoming_fallthru)
888 basic_block bb;
890 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
891 return NULL;
892 bb = force_nonfallthru (b_fallthru_edge);
893 if (bb)
894 notice_new_block (bb);
897 merge_blocks_move_predecessor_nojumps (b, c);
898 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
901 return NULL;
905 /* Removes the memory attributes of MEM expression
906 if they are not equal. */
908 void
909 merge_memattrs (rtx x, rtx y)
911 int i;
912 int j;
913 enum rtx_code code;
914 const char *fmt;
916 if (x == y)
917 return;
918 if (x == 0 || y == 0)
919 return;
921 code = GET_CODE (x);
923 if (code != GET_CODE (y))
924 return;
926 if (GET_MODE (x) != GET_MODE (y))
927 return;
929 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
931 if (! MEM_ATTRS (x))
932 MEM_ATTRS (y) = 0;
933 else if (! MEM_ATTRS (y))
934 MEM_ATTRS (x) = 0;
935 else
937 rtx mem_size;
939 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
941 set_mem_alias_set (x, 0);
942 set_mem_alias_set (y, 0);
945 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
947 set_mem_expr (x, 0);
948 set_mem_expr (y, 0);
949 set_mem_offset (x, 0);
950 set_mem_offset (y, 0);
952 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
954 set_mem_offset (x, 0);
955 set_mem_offset (y, 0);
958 if (!MEM_SIZE (x))
959 mem_size = NULL_RTX;
960 else if (!MEM_SIZE (y))
961 mem_size = NULL_RTX;
962 else
963 mem_size = GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
964 INTVAL (MEM_SIZE (y))));
965 set_mem_size (x, mem_size);
966 set_mem_size (y, mem_size);
968 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
969 set_mem_align (y, MEM_ALIGN (x));
973 fmt = GET_RTX_FORMAT (code);
974 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
976 switch (fmt[i])
978 case 'E':
979 /* Two vectors must have the same length. */
980 if (XVECLEN (x, i) != XVECLEN (y, i))
981 return;
983 for (j = 0; j < XVECLEN (x, i); j++)
984 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
986 break;
988 case 'e':
989 merge_memattrs (XEXP (x, i), XEXP (y, i));
992 return;
996 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
998 static bool
999 insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
1001 rtx p1, p2;
1003 /* Verify that I1 and I2 are equivalent. */
1004 if (GET_CODE (i1) != GET_CODE (i2))
1005 return false;
1007 p1 = PATTERN (i1);
1008 p2 = PATTERN (i2);
1010 if (GET_CODE (p1) != GET_CODE (p2))
1011 return false;
1013 /* If this is a CALL_INSN, compare register usage information.
1014 If we don't check this on stack register machines, the two
1015 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1016 numbers of stack registers in the same basic block.
1017 If we don't check this on machines with delay slots, a delay slot may
1018 be filled that clobbers a parameter expected by the subroutine.
1020 ??? We take the simple route for now and assume that if they're
1021 equal, they were constructed identically. */
1023 if (CALL_P (i1)
1024 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1025 CALL_INSN_FUNCTION_USAGE (i2))
1026 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
1027 return false;
1029 #ifdef STACK_REGS
1030 /* If cross_jump_death_matters is not 0, the insn's mode
1031 indicates whether or not the insn contains any stack-like
1032 regs. */
1034 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1036 /* If register stack conversion has already been done, then
1037 death notes must also be compared before it is certain that
1038 the two instruction streams match. */
1040 rtx note;
1041 HARD_REG_SET i1_regset, i2_regset;
1043 CLEAR_HARD_REG_SET (i1_regset);
1044 CLEAR_HARD_REG_SET (i2_regset);
1046 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1047 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1048 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1050 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1051 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1052 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1054 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1056 return false;
1058 done:
1061 #endif
1063 if (reload_completed
1064 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1065 return true;
1067 /* Do not do EQUIV substitution after reload. First, we're undoing the
1068 work of reload_cse. Second, we may be undoing the work of the post-
1069 reload splitting pass. */
1070 /* ??? Possibly add a new phase switch variable that can be used by
1071 targets to disallow the troublesome insns after splitting. */
1072 if (!reload_completed)
1074 /* The following code helps take care of G++ cleanups. */
1075 rtx equiv1 = find_reg_equal_equiv_note (i1);
1076 rtx equiv2 = find_reg_equal_equiv_note (i2);
1078 if (equiv1 && equiv2
1079 /* If the equivalences are not to a constant, they may
1080 reference pseudos that no longer exist, so we can't
1081 use them. */
1082 && (! reload_completed
1083 || (CONSTANT_P (XEXP (equiv1, 0))
1084 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1086 rtx s1 = single_set (i1);
1087 rtx s2 = single_set (i2);
1088 if (s1 != 0 && s2 != 0
1089 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1091 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1092 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1093 if (! rtx_renumbered_equal_p (p1, p2))
1094 cancel_changes (0);
1095 else if (apply_change_group ())
1096 return true;
1101 return false;
1104 /* Look through the insns at the end of BB1 and BB2 and find the longest
1105 sequence that are equivalent. Store the first insns for that sequence
1106 in *F1 and *F2 and return the sequence length.
1108 To simplify callers of this function, if the blocks match exactly,
1109 store the head of the blocks in *F1 and *F2. */
1111 static int
1112 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1113 basic_block bb2, rtx *f1, rtx *f2)
1115 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1116 int ninsns = 0;
1118 /* Skip simple jumps at the end of the blocks. Complex jumps still
1119 need to be compared for equivalence, which we'll do below. */
1121 i1 = BB_END (bb1);
1122 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1123 if (onlyjump_p (i1)
1124 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1126 last1 = i1;
1127 i1 = PREV_INSN (i1);
1130 i2 = BB_END (bb2);
1131 if (onlyjump_p (i2)
1132 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1134 last2 = i2;
1135 /* Count everything except for unconditional jump as insn. */
1136 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1137 ninsns++;
1138 i2 = PREV_INSN (i2);
1141 while (true)
1143 /* Ignore notes. */
1144 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1145 i1 = PREV_INSN (i1);
1147 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1148 i2 = PREV_INSN (i2);
1150 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1151 break;
1153 if (!insns_match_p (mode, i1, i2))
1154 break;
1156 merge_memattrs (i1, i2);
1158 /* Don't begin a cross-jump with a NOTE insn. */
1159 if (INSN_P (i1))
1161 /* If the merged insns have different REG_EQUAL notes, then
1162 remove them. */
1163 rtx equiv1 = find_reg_equal_equiv_note (i1);
1164 rtx equiv2 = find_reg_equal_equiv_note (i2);
1166 if (equiv1 && !equiv2)
1167 remove_note (i1, equiv1);
1168 else if (!equiv1 && equiv2)
1169 remove_note (i2, equiv2);
1170 else if (equiv1 && equiv2
1171 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1173 remove_note (i1, equiv1);
1174 remove_note (i2, equiv2);
1177 afterlast1 = last1, afterlast2 = last2;
1178 last1 = i1, last2 = i2;
1179 ninsns++;
1182 i1 = PREV_INSN (i1);
1183 i2 = PREV_INSN (i2);
1186 #ifdef HAVE_cc0
1187 /* Don't allow the insn after a compare to be shared by
1188 cross-jumping unless the compare is also shared. */
1189 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1190 last1 = afterlast1, last2 = afterlast2, ninsns--;
1191 #endif
1193 /* Include preceding notes and labels in the cross-jump. One,
1194 this may bring us to the head of the blocks as requested above.
1195 Two, it keeps line number notes as matched as may be. */
1196 if (ninsns)
1198 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1199 last1 = PREV_INSN (last1);
1201 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1202 last1 = PREV_INSN (last1);
1204 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1205 last2 = PREV_INSN (last2);
1207 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1208 last2 = PREV_INSN (last2);
1210 *f1 = last1;
1211 *f2 = last2;
1214 return ninsns;
1217 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1218 the branch instruction. This means that if we commonize the control
1219 flow before end of the basic block, the semantic remains unchanged.
1221 We may assume that there exists one edge with a common destination. */
1223 static bool
1224 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1226 int nehedges1 = 0, nehedges2 = 0;
1227 edge fallthru1 = 0, fallthru2 = 0;
1228 edge e1, e2;
1229 edge_iterator ei;
1231 /* If BB1 has only one successor, we may be looking at either an
1232 unconditional jump, or a fake edge to exit. */
1233 if (EDGE_COUNT (bb1->succs) == 1
1234 && (EDGE_SUCC (bb1, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1235 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1236 return (EDGE_COUNT (bb2->succs) == 1
1237 && (EDGE_SUCC (bb2, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1238 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1240 /* Match conditional jumps - this may get tricky when fallthru and branch
1241 edges are crossed. */
1242 if (EDGE_COUNT (bb1->succs) == 2
1243 && any_condjump_p (BB_END (bb1))
1244 && onlyjump_p (BB_END (bb1)))
1246 edge b1, f1, b2, f2;
1247 bool reverse, match;
1248 rtx set1, set2, cond1, cond2;
1249 enum rtx_code code1, code2;
1251 if (EDGE_COUNT (bb2->succs) != 2
1252 || !any_condjump_p (BB_END (bb2))
1253 || !onlyjump_p (BB_END (bb2)))
1254 return false;
1256 b1 = BRANCH_EDGE (bb1);
1257 b2 = BRANCH_EDGE (bb2);
1258 f1 = FALLTHRU_EDGE (bb1);
1259 f2 = FALLTHRU_EDGE (bb2);
1261 /* Get around possible forwarders on fallthru edges. Other cases
1262 should be optimized out already. */
1263 if (FORWARDER_BLOCK_P (f1->dest))
1264 f1 = EDGE_SUCC (f1->dest, 0);
1266 if (FORWARDER_BLOCK_P (f2->dest))
1267 f2 = EDGE_SUCC (f2->dest, 0);
1269 /* To simplify use of this function, return false if there are
1270 unneeded forwarder blocks. These will get eliminated later
1271 during cleanup_cfg. */
1272 if (FORWARDER_BLOCK_P (f1->dest)
1273 || FORWARDER_BLOCK_P (f2->dest)
1274 || FORWARDER_BLOCK_P (b1->dest)
1275 || FORWARDER_BLOCK_P (b2->dest))
1276 return false;
1278 if (f1->dest == f2->dest && b1->dest == b2->dest)
1279 reverse = false;
1280 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1281 reverse = true;
1282 else
1283 return false;
1285 set1 = pc_set (BB_END (bb1));
1286 set2 = pc_set (BB_END (bb2));
1287 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1288 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1289 reverse = !reverse;
1291 cond1 = XEXP (SET_SRC (set1), 0);
1292 cond2 = XEXP (SET_SRC (set2), 0);
1293 code1 = GET_CODE (cond1);
1294 if (reverse)
1295 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1296 else
1297 code2 = GET_CODE (cond2);
1299 if (code2 == UNKNOWN)
1300 return false;
1302 /* Verify codes and operands match. */
1303 match = ((code1 == code2
1304 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1305 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1306 || (code1 == swap_condition (code2)
1307 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1308 XEXP (cond2, 0))
1309 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1310 XEXP (cond2, 1))));
1312 /* If we return true, we will join the blocks. Which means that
1313 we will only have one branch prediction bit to work with. Thus
1314 we require the existing branches to have probabilities that are
1315 roughly similar. */
1316 if (match
1317 && !optimize_size
1318 && maybe_hot_bb_p (bb1)
1319 && maybe_hot_bb_p (bb2))
1321 int prob2;
1323 if (b1->dest == b2->dest)
1324 prob2 = b2->probability;
1325 else
1326 /* Do not use f2 probability as f2 may be forwarded. */
1327 prob2 = REG_BR_PROB_BASE - b2->probability;
1329 /* Fail if the difference in probabilities is greater than 50%.
1330 This rules out two well-predicted branches with opposite
1331 outcomes. */
1332 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1334 if (dump_file)
1335 fprintf (dump_file,
1336 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1337 bb1->index, bb2->index, b1->probability, prob2);
1339 return false;
1343 if (dump_file && match)
1344 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1345 bb1->index, bb2->index);
1347 return match;
1350 /* Generic case - we are seeing a computed jump, table jump or trapping
1351 instruction. */
1353 #ifndef CASE_DROPS_THROUGH
1354 /* Check whether there are tablejumps in the end of BB1 and BB2.
1355 Return true if they are identical. */
1357 rtx label1, label2;
1358 rtx table1, table2;
1360 if (tablejump_p (BB_END (bb1), &label1, &table1)
1361 && tablejump_p (BB_END (bb2), &label2, &table2)
1362 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1364 /* The labels should never be the same rtx. If they really are same
1365 the jump tables are same too. So disable crossjumping of blocks BB1
1366 and BB2 because when deleting the common insns in the end of BB1
1367 by delete_basic_block () the jump table would be deleted too. */
1368 /* If LABEL2 is referenced in BB1->END do not do anything
1369 because we would loose information when replacing
1370 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1371 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1373 /* Set IDENTICAL to true when the tables are identical. */
1374 bool identical = false;
1375 rtx p1, p2;
1377 p1 = PATTERN (table1);
1378 p2 = PATTERN (table2);
1379 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1381 identical = true;
1383 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1384 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1385 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1386 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1388 int i;
1390 identical = true;
1391 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1392 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1393 identical = false;
1396 if (identical)
1398 replace_label_data rr;
1399 bool match;
1401 /* Temporarily replace references to LABEL1 with LABEL2
1402 in BB1->END so that we could compare the instructions. */
1403 rr.r1 = label1;
1404 rr.r2 = label2;
1405 rr.update_label_nuses = false;
1406 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1408 match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1409 if (dump_file && match)
1410 fprintf (dump_file,
1411 "Tablejumps in bb %i and %i match.\n",
1412 bb1->index, bb2->index);
1414 /* Set the original label in BB1->END because when deleting
1415 a block whose end is a tablejump, the tablejump referenced
1416 from the instruction is deleted too. */
1417 rr.r1 = label2;
1418 rr.r2 = label1;
1419 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1421 return match;
1424 return false;
1427 #endif
1429 /* First ensure that the instructions match. There may be many outgoing
1430 edges so this test is generally cheaper. */
1431 if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1432 return false;
1434 /* Search the outgoing edges, ensure that the counts do match, find possible
1435 fallthru and exception handling edges since these needs more
1436 validation. */
1437 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1438 return false;
1440 FOR_EACH_EDGE (e1, ei, bb1->succs)
1442 e2 = EDGE_SUCC (bb2, ei.index);
1444 if (e1->flags & EDGE_EH)
1445 nehedges1++;
1447 if (e2->flags & EDGE_EH)
1448 nehedges2++;
1450 if (e1->flags & EDGE_FALLTHRU)
1451 fallthru1 = e1;
1452 if (e2->flags & EDGE_FALLTHRU)
1453 fallthru2 = e2;
1456 /* If number of edges of various types does not match, fail. */
1457 if (nehedges1 != nehedges2
1458 || (fallthru1 != 0) != (fallthru2 != 0))
1459 return false;
1461 /* fallthru edges must be forwarded to the same destination. */
1462 if (fallthru1)
1464 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1465 ? EDGE_SUCC (fallthru1->dest, 0)->dest: fallthru1->dest);
1466 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1467 ? EDGE_SUCC (fallthru2->dest, 0)->dest: fallthru2->dest);
1469 if (d1 != d2)
1470 return false;
1473 /* Ensure the same EH region. */
1475 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1476 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1478 if (!n1 && n2)
1479 return false;
1481 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1482 return false;
1485 /* We don't need to match the rest of edges as above checks should be enough
1486 to ensure that they are equivalent. */
1487 return true;
1490 /* E1 and E2 are edges with the same destination block. Search their
1491 predecessors for common code. If found, redirect control flow from
1492 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1494 static bool
1495 try_crossjump_to_edge (int mode, edge e1, edge e2)
1497 int nmatch;
1498 basic_block src1 = e1->src, src2 = e2->src;
1499 basic_block redirect_to, redirect_from, to_remove;
1500 rtx newpos1, newpos2;
1501 edge s;
1502 edge_iterator ei;
1504 newpos1 = newpos2 = NULL_RTX;
1506 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1507 to try this optimization.
1509 Basic block partitioning may result in some jumps that appear to
1510 be optimizable (or blocks that appear to be mergeable), but which really
1511 must be left untouched (they are required to make it safely across
1512 partition boundaries). See the comments at the top of
1513 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1515 if (flag_reorder_blocks_and_partition && no_new_pseudos)
1516 return false;
1518 /* Search backward through forwarder blocks. We don't need to worry
1519 about multiple entry or chained forwarders, as they will be optimized
1520 away. We do this to look past the unconditional jump following a
1521 conditional jump that is required due to the current CFG shape. */
1522 if (EDGE_COUNT (src1->preds) == 1
1523 && FORWARDER_BLOCK_P (src1))
1524 e1 = EDGE_PRED (src1, 0), src1 = e1->src;
1526 if (EDGE_COUNT (src2->preds) == 1
1527 && FORWARDER_BLOCK_P (src2))
1528 e2 = EDGE_PRED (src2, 0), src2 = e2->src;
1530 /* Nothing to do if we reach ENTRY, or a common source block. */
1531 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1532 return false;
1533 if (src1 == src2)
1534 return false;
1536 /* Seeing more than 1 forwarder blocks would confuse us later... */
1537 if (FORWARDER_BLOCK_P (e1->dest)
1538 && FORWARDER_BLOCK_P (EDGE_SUCC (e1->dest, 0)->dest))
1539 return false;
1541 if (FORWARDER_BLOCK_P (e2->dest)
1542 && FORWARDER_BLOCK_P (EDGE_SUCC (e2->dest, 0)->dest))
1543 return false;
1545 /* Likewise with dead code (possibly newly created by the other optimizations
1546 of cfg_cleanup). */
1547 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1548 return false;
1550 /* Look for the common insn sequence, part the first ... */
1551 if (!outgoing_edges_match (mode, src1, src2))
1552 return false;
1554 /* ... and part the second. */
1555 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1557 /* Don't proceed with the crossjump unless we found a sufficient number
1558 of matching instructions or the 'from' block was totally matched
1559 (such that its predecessors will hopefully be redirected and the
1560 block removed). */
1561 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1562 && (newpos1 != BB_HEAD (src1)))
1563 return false;
1565 #ifndef CASE_DROPS_THROUGH
1566 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1567 will be deleted.
1568 If we have tablejumps in the end of SRC1 and SRC2
1569 they have been already compared for equivalence in outgoing_edges_match ()
1570 so replace the references to TABLE1 by references to TABLE2. */
1572 rtx label1, label2;
1573 rtx table1, table2;
1575 if (tablejump_p (BB_END (src1), &label1, &table1)
1576 && tablejump_p (BB_END (src2), &label2, &table2)
1577 && label1 != label2)
1579 replace_label_data rr;
1580 rtx insn;
1582 /* Replace references to LABEL1 with LABEL2. */
1583 rr.r1 = label1;
1584 rr.r2 = label2;
1585 rr.update_label_nuses = true;
1586 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1588 /* Do not replace the label in SRC1->END because when deleting
1589 a block whose end is a tablejump, the tablejump referenced
1590 from the instruction is deleted too. */
1591 if (insn != BB_END (src1))
1592 for_each_rtx (&insn, replace_label, &rr);
1596 #endif
1598 /* Avoid splitting if possible. */
1599 if (newpos2 == BB_HEAD (src2))
1600 redirect_to = src2;
1601 else
1603 if (dump_file)
1604 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1605 src2->index, nmatch);
1606 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1609 if (dump_file)
1610 fprintf (dump_file,
1611 "Cross jumping from bb %i to bb %i; %i common insns\n",
1612 src1->index, src2->index, nmatch);
1614 redirect_to->count += src1->count;
1615 redirect_to->frequency += src1->frequency;
1616 /* We may have some registers visible trought the block. */
1617 redirect_to->flags |= BB_DIRTY;
1619 /* Recompute the frequencies and counts of outgoing edges. */
1620 FOR_EACH_EDGE (s, ei, redirect_to->succs)
1622 edge s2;
1623 edge_iterator ei;
1624 basic_block d = s->dest;
1626 if (FORWARDER_BLOCK_P (d))
1627 d = EDGE_SUCC (d, 0)->dest;
1629 FOR_EACH_EDGE (s2, ei, src1->succs)
1631 basic_block d2 = s2->dest;
1632 if (FORWARDER_BLOCK_P (d2))
1633 d2 = EDGE_SUCC (d2, 0)->dest;
1634 if (d == d2)
1635 break;
1638 s->count += s2->count;
1640 /* Take care to update possible forwarder blocks. We verified
1641 that there is no more than one in the chain, so we can't run
1642 into infinite loop. */
1643 if (FORWARDER_BLOCK_P (s->dest))
1645 EDGE_SUCC (s->dest, 0)->count += s2->count;
1646 s->dest->count += s2->count;
1647 s->dest->frequency += EDGE_FREQUENCY (s);
1650 if (FORWARDER_BLOCK_P (s2->dest))
1652 EDGE_SUCC (s2->dest, 0)->count -= s2->count;
1653 if (EDGE_SUCC (s2->dest, 0)->count < 0)
1654 EDGE_SUCC (s2->dest, 0)->count = 0;
1655 s2->dest->count -= s2->count;
1656 s2->dest->frequency -= EDGE_FREQUENCY (s);
1657 if (s2->dest->frequency < 0)
1658 s2->dest->frequency = 0;
1659 if (s2->dest->count < 0)
1660 s2->dest->count = 0;
1663 if (!redirect_to->frequency && !src1->frequency)
1664 s->probability = (s->probability + s2->probability) / 2;
1665 else
1666 s->probability
1667 = ((s->probability * redirect_to->frequency +
1668 s2->probability * src1->frequency)
1669 / (redirect_to->frequency + src1->frequency));
1672 update_br_prob_note (redirect_to);
1674 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1676 /* Skip possible basic block header. */
1677 if (LABEL_P (newpos1))
1678 newpos1 = NEXT_INSN (newpos1);
1680 if (NOTE_P (newpos1))
1681 newpos1 = NEXT_INSN (newpos1);
1683 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1684 to_remove = EDGE_SUCC (redirect_from, 0)->dest;
1686 redirect_edge_and_branch_force (EDGE_SUCC (redirect_from, 0), redirect_to);
1687 delete_basic_block (to_remove);
1689 update_forwarder_flag (redirect_from);
1691 return true;
1694 /* Search the predecessors of BB for common insn sequences. When found,
1695 share code between them by redirecting control flow. Return true if
1696 any changes made. */
1698 static bool
1699 try_crossjump_bb (int mode, basic_block bb)
1701 edge e, e2, fallthru;
1702 bool changed;
1703 unsigned max, ix, ix2;
1704 basic_block ev, ev2;
1705 edge_iterator ei;
1707 /* Nothing to do if there is not at least two incoming edges. */
1708 if (EDGE_COUNT (bb->preds) < 2)
1709 return false;
1711 /* If we are partitioning hot/cold basic blocks, we don't want to
1712 mess up unconditional or indirect jumps that cross between hot
1713 and cold sections.
1715 Basic block partitioning may result in some jumps that appear to
1716 be optimizable (or blocks that appear to be mergeable), but which really
1717 must be left untouched (they are required to make it safely across
1718 partition boundaries). See the comments at the top of
1719 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1721 if (flag_reorder_blocks_and_partition
1722 && (BB_PARTITION (EDGE_PRED (bb, 0)->src) != BB_PARTITION (EDGE_PRED (bb, 1)->src)
1723 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING)))
1724 return false;
1726 /* It is always cheapest to redirect a block that ends in a branch to
1727 a block that falls through into BB, as that adds no branches to the
1728 program. We'll try that combination first. */
1729 fallthru = NULL;
1730 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1732 if (EDGE_COUNT (bb->preds) > max)
1733 return false;
1735 FOR_EACH_EDGE (e, ei, bb->preds)
1737 if (e->flags & EDGE_FALLTHRU)
1738 fallthru = e;
1741 changed = false;
1742 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1744 e = EDGE_PRED (ev, ix);
1745 ix++;
1747 /* As noted above, first try with the fallthru predecessor. */
1748 if (fallthru)
1750 /* Don't combine the fallthru edge into anything else.
1751 If there is a match, we'll do it the other way around. */
1752 if (e == fallthru)
1753 continue;
1754 /* If nothing changed since the last attempt, there is nothing
1755 we can do. */
1756 if (!first_pass
1757 && (!(e->src->flags & BB_DIRTY)
1758 && !(fallthru->src->flags & BB_DIRTY)))
1759 continue;
1761 if (try_crossjump_to_edge (mode, e, fallthru))
1763 changed = true;
1764 ix = 0;
1765 ev = bb;
1766 continue;
1770 /* Non-obvious work limiting check: Recognize that we're going
1771 to call try_crossjump_bb on every basic block. So if we have
1772 two blocks with lots of outgoing edges (a switch) and they
1773 share lots of common destinations, then we would do the
1774 cross-jump check once for each common destination.
1776 Now, if the blocks actually are cross-jump candidates, then
1777 all of their destinations will be shared. Which means that
1778 we only need check them for cross-jump candidacy once. We
1779 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1780 choosing to do the check from the block for which the edge
1781 in question is the first successor of A. */
1782 if (EDGE_SUCC (e->src, 0) != e)
1783 continue;
1785 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1787 e2 = EDGE_PRED (ev2, ix2);
1788 ix2++;
1790 if (e2 == e)
1791 continue;
1793 /* We've already checked the fallthru edge above. */
1794 if (e2 == fallthru)
1795 continue;
1797 /* The "first successor" check above only prevents multiple
1798 checks of crossjump(A,B). In order to prevent redundant
1799 checks of crossjump(B,A), require that A be the block
1800 with the lowest index. */
1801 if (e->src->index > e2->src->index)
1802 continue;
1804 /* If nothing changed since the last attempt, there is nothing
1805 we can do. */
1806 if (!first_pass
1807 && (!(e->src->flags & BB_DIRTY)
1808 && !(e2->src->flags & BB_DIRTY)))
1809 continue;
1811 if (try_crossjump_to_edge (mode, e, e2))
1813 changed = true;
1814 ev2 = bb;
1815 ix = 0;
1816 break;
1821 return changed;
1824 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1825 instructions etc. Return nonzero if changes were made. */
1827 static bool
1828 try_optimize_cfg (int mode)
1830 bool changed_overall = false;
1831 bool changed;
1832 int iterations = 0;
1833 basic_block bb, b, next;
1835 if (mode & CLEANUP_CROSSJUMP)
1836 add_noreturn_fake_exit_edges ();
1838 FOR_EACH_BB (bb)
1839 update_forwarder_flag (bb);
1841 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1842 clear_bb_flags ();
1844 if (! targetm.cannot_modify_jumps_p ())
1846 first_pass = true;
1847 /* Attempt to merge blocks as made possible by edge removal. If
1848 a block has only one successor, and the successor has only
1849 one predecessor, they may be combined. */
1852 changed = false;
1853 iterations++;
1855 if (dump_file)
1856 fprintf (dump_file,
1857 "\n\ntry_optimize_cfg iteration %i\n\n",
1858 iterations);
1860 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1862 basic_block c;
1863 edge s;
1864 bool changed_here = false;
1866 /* Delete trivially dead basic blocks. */
1867 while (EDGE_COUNT (b->preds) == 0)
1869 c = b->prev_bb;
1870 if (dump_file)
1871 fprintf (dump_file, "Deleting block %i.\n",
1872 b->index);
1874 delete_basic_block (b);
1875 if (!(mode & CLEANUP_CFGLAYOUT))
1876 changed = true;
1877 b = c;
1880 /* Remove code labels no longer used. */
1881 if (EDGE_COUNT (b->preds) == 1
1882 && (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU)
1883 && !(EDGE_PRED (b, 0)->flags & EDGE_COMPLEX)
1884 && LABEL_P (BB_HEAD (b))
1885 /* If the previous block ends with a branch to this
1886 block, we can't delete the label. Normally this
1887 is a condjump that is yet to be simplified, but
1888 if CASE_DROPS_THRU, this can be a tablejump with
1889 some element going to the same place as the
1890 default (fallthru). */
1891 && (EDGE_PRED (b, 0)->src == ENTRY_BLOCK_PTR
1892 || !JUMP_P (BB_END (EDGE_PRED (b, 0)->src))
1893 || ! label_is_jump_target_p (BB_HEAD (b),
1894 BB_END (EDGE_PRED (b, 0)->src))))
1896 rtx label = BB_HEAD (b);
1898 delete_insn_chain (label, label);
1899 /* In the case label is undeletable, move it after the
1900 BASIC_BLOCK note. */
1901 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1903 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1905 reorder_insns_nobb (label, label, bb_note);
1906 BB_HEAD (b) = bb_note;
1908 if (dump_file)
1909 fprintf (dump_file, "Deleted label in block %i.\n",
1910 b->index);
1913 /* If we fall through an empty block, we can remove it. */
1914 if (!(mode & CLEANUP_CFGLAYOUT)
1915 && EDGE_COUNT (b->preds) == 1
1916 && (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU)
1917 && !LABEL_P (BB_HEAD (b))
1918 && FORWARDER_BLOCK_P (b)
1919 /* Note that forwarder_block_p true ensures that
1920 there is a successor for this block. */
1921 && (EDGE_SUCC (b, 0)->flags & EDGE_FALLTHRU)
1922 && n_basic_blocks > 1)
1924 if (dump_file)
1925 fprintf (dump_file,
1926 "Deleting fallthru block %i.\n",
1927 b->index);
1929 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1930 redirect_edge_succ_nodup (EDGE_PRED (b, 0), EDGE_SUCC (b, 0)->dest);
1931 delete_basic_block (b);
1932 changed = true;
1933 b = c;
1936 if (EDGE_COUNT (b->succs) == 1
1937 && (s = EDGE_SUCC (b, 0))
1938 && !(s->flags & EDGE_COMPLEX)
1939 && (c = s->dest) != EXIT_BLOCK_PTR
1940 && EDGE_COUNT (c->preds) == 1
1941 && b != c)
1943 /* When not in cfg_layout mode use code aware of reordering
1944 INSN. This code possibly creates new basic blocks so it
1945 does not fit merge_blocks interface and is kept here in
1946 hope that it will become useless once more of compiler
1947 is transformed to use cfg_layout mode. */
1949 if ((mode & CLEANUP_CFGLAYOUT)
1950 && can_merge_blocks_p (b, c))
1952 merge_blocks (b, c);
1953 update_forwarder_flag (b);
1954 changed_here = true;
1956 else if (!(mode & CLEANUP_CFGLAYOUT)
1957 /* If the jump insn has side effects,
1958 we can't kill the edge. */
1959 && (!JUMP_P (BB_END (b))
1960 || (reload_completed
1961 ? simplejump_p (BB_END (b))
1962 : (onlyjump_p (BB_END (b))
1963 && !tablejump_p (BB_END (b),
1964 NULL, NULL))))
1965 && (next = merge_blocks_move (s, b, c, mode)))
1967 b = next;
1968 changed_here = true;
1972 /* Simplify branch over branch. */
1973 if ((mode & CLEANUP_EXPENSIVE)
1974 && !(mode & CLEANUP_CFGLAYOUT)
1975 && try_simplify_condjump (b))
1976 changed_here = true;
1978 /* If B has a single outgoing edge, but uses a
1979 non-trivial jump instruction without side-effects, we
1980 can either delete the jump entirely, or replace it
1981 with a simple unconditional jump. */
1982 if (EDGE_COUNT (b->succs) == 1
1983 && EDGE_SUCC (b, 0)->dest != EXIT_BLOCK_PTR
1984 && onlyjump_p (BB_END (b))
1985 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
1986 && try_redirect_by_replacing_jump (EDGE_SUCC (b, 0), EDGE_SUCC (b, 0)->dest,
1987 (mode & CLEANUP_CFGLAYOUT) != 0))
1989 update_forwarder_flag (b);
1990 changed_here = true;
1993 /* Simplify branch to branch. */
1994 if (try_forward_edges (mode, b))
1995 changed_here = true;
1997 /* Look for shared code between blocks. */
1998 if ((mode & CLEANUP_CROSSJUMP)
1999 && try_crossjump_bb (mode, b))
2000 changed_here = true;
2002 /* Don't get confused by the index shift caused by
2003 deleting blocks. */
2004 if (!changed_here)
2005 b = b->next_bb;
2006 else
2007 changed = true;
2010 if ((mode & CLEANUP_CROSSJUMP)
2011 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2012 changed = true;
2014 #ifdef ENABLE_CHECKING
2015 if (changed)
2016 verify_flow_info ();
2017 #endif
2019 changed_overall |= changed;
2020 first_pass = false;
2022 while (changed);
2025 if (mode & CLEANUP_CROSSJUMP)
2026 remove_fake_exit_edges ();
2028 clear_aux_for_blocks ();
2030 return changed_overall;
2033 /* Delete all unreachable basic blocks. */
2035 bool
2036 delete_unreachable_blocks (void)
2038 bool changed = false;
2039 basic_block b, next_bb;
2041 find_unreachable_blocks ();
2043 /* Delete all unreachable basic blocks. */
2045 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2047 next_bb = b->next_bb;
2049 if (!(b->flags & BB_REACHABLE))
2051 delete_basic_block (b);
2052 changed = true;
2056 if (changed)
2057 tidy_fallthru_edges ();
2058 return changed;
2061 /* Merges sequential blocks if possible. */
2063 bool
2064 merge_seq_blocks (void)
2066 basic_block bb;
2067 bool changed = false;
2069 for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2071 if (EDGE_COUNT (bb->succs) == 1
2072 && can_merge_blocks_p (bb, EDGE_SUCC (bb, 0)->dest))
2074 /* Merge the blocks and retry. */
2075 merge_blocks (bb, EDGE_SUCC (bb, 0)->dest);
2076 changed = true;
2077 continue;
2080 bb = bb->next_bb;
2083 return changed;
2086 /* Tidy the CFG by deleting unreachable code and whatnot. */
2088 bool
2089 cleanup_cfg (int mode)
2091 bool changed = false;
2093 timevar_push (TV_CLEANUP_CFG);
2094 if (delete_unreachable_blocks ())
2096 changed = true;
2097 /* We've possibly created trivially dead code. Cleanup it right
2098 now to introduce more opportunities for try_optimize_cfg. */
2099 if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
2100 && !reload_completed)
2101 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2104 compact_blocks ();
2106 while (try_optimize_cfg (mode))
2108 delete_unreachable_blocks (), changed = true;
2109 if (mode & CLEANUP_UPDATE_LIFE)
2111 /* Cleaning up CFG introduces more opportunities for dead code
2112 removal that in turn may introduce more opportunities for
2113 cleaning up the CFG. */
2114 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2115 PROP_DEATH_NOTES
2116 | PROP_SCAN_DEAD_CODE
2117 | PROP_KILL_DEAD_CODE
2118 | ((mode & CLEANUP_LOG_LINKS)
2119 ? PROP_LOG_LINKS : 0)))
2120 break;
2122 else if (!(mode & CLEANUP_NO_INSN_DEL)
2123 && (mode & CLEANUP_EXPENSIVE)
2124 && !reload_completed)
2126 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2127 break;
2129 else
2130 break;
2131 delete_dead_jumptables ();
2134 /* Kill the data we won't maintain. */
2135 free_EXPR_LIST_list (&label_value_list);
2136 timevar_pop (TV_CLEANUP_CFG);
2138 return changed;