* config/arm/bpabi.h (SUBTARGET_EXTRA_ASM_SPEC): Change meabi=3 to
[official-gcc.git] / gcc / cfgcleanup.c
blobeccaab4605e9a8169fd6c01a1b1caf525d50bd71
1 /* Control flow optimization code for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
22 /* This file contains optimizer of the control flow. The main entry point is
23 cleanup_cfg. Following optimizations are performed:
25 - Unreachable blocks removal
26 - Edge forwarding (edge to the forwarder block is forwarded to its
27 successor. Simplification of the branch instruction is performed by
28 underlying infrastructure so branch can be converted to simplejump or
29 eliminated).
30 - Cross jumping (tail merging)
31 - Conditional jump-around-simplejump simplification
32 - Basic block merging. */
34 #include "config.h"
35 #include "system.h"
36 #include "coretypes.h"
37 #include "tm.h"
38 #include "rtl.h"
39 #include "hard-reg-set.h"
40 #include "basic-block.h"
41 #include "timevar.h"
42 #include "output.h"
43 #include "insn-config.h"
44 #include "flags.h"
45 #include "recog.h"
46 #include "toplev.h"
47 #include "cselib.h"
48 #include "params.h"
49 #include "tm_p.h"
50 #include "target.h"
51 #include "regs.h"
52 #include "cfglayout.h"
53 #include "emit-rtl.h"
55 /* cleanup_cfg maintains following flags for each basic block. */
57 enum bb_flags
59 /* Set if BB is the forwarder block to avoid too many
60 forwarder_block_p calls. */
61 BB_FORWARDER_BLOCK = 1,
62 BB_NONTHREADABLE_BLOCK = 2
65 #define BB_FLAGS(BB) (enum bb_flags) (BB)->aux
66 #define BB_SET_FLAG(BB, FLAG) \
67 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux | (FLAG))
68 #define BB_CLEAR_FLAG(BB, FLAG) \
69 (BB)->aux = (void *) (long) ((enum bb_flags) (BB)->aux & ~(FLAG))
71 #define FORWARDER_BLOCK_P(BB) (BB_FLAGS (BB) & BB_FORWARDER_BLOCK)
73 /* Set to true when we are running first pass of try_optimize_cfg loop. */
74 static bool first_pass;
75 static bool try_crossjump_to_edge (int, edge, edge);
76 static bool try_crossjump_bb (int, basic_block);
77 static bool outgoing_edges_match (int, basic_block, basic_block);
78 static int flow_find_cross_jump (int, basic_block, basic_block, rtx *, rtx *);
79 static bool insns_match_p (int, rtx, rtx);
81 static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block);
82 static void merge_blocks_move_successor_nojumps (basic_block, basic_block);
83 static bool try_optimize_cfg (int);
84 static bool try_simplify_condjump (basic_block);
85 static bool try_forward_edges (int, basic_block);
86 static edge thread_jump (int, edge, basic_block);
87 static bool mark_effect (rtx, bitmap);
88 static void notice_new_block (basic_block);
89 static void update_forwarder_flag (basic_block);
90 static int mentions_nonequal_regs (rtx *, void *);
91 static void merge_memattrs (rtx, rtx);
93 /* Set flags for newly created block. */
95 static void
96 notice_new_block (basic_block bb)
98 if (!bb)
99 return;
101 if (forwarder_block_p (bb))
102 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
105 /* Recompute forwarder flag after block has been modified. */
107 static void
108 update_forwarder_flag (basic_block bb)
110 if (forwarder_block_p (bb))
111 BB_SET_FLAG (bb, BB_FORWARDER_BLOCK);
112 else
113 BB_CLEAR_FLAG (bb, BB_FORWARDER_BLOCK);
116 /* Simplify a conditional jump around an unconditional jump.
117 Return true if something changed. */
119 static bool
120 try_simplify_condjump (basic_block cbranch_block)
122 basic_block jump_block, jump_dest_block, cbranch_dest_block;
123 edge cbranch_jump_edge, cbranch_fallthru_edge;
124 rtx cbranch_insn;
126 /* Verify that there are exactly two successors. */
127 if (EDGE_COUNT (cbranch_block->succs) != 2)
128 return false;
130 /* Verify that we've got a normal conditional branch at the end
131 of the block. */
132 cbranch_insn = BB_END (cbranch_block);
133 if (!any_condjump_p (cbranch_insn))
134 return false;
136 cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block);
137 cbranch_jump_edge = BRANCH_EDGE (cbranch_block);
139 /* The next block must not have multiple predecessors, must not
140 be the last block in the function, and must contain just the
141 unconditional jump. */
142 jump_block = cbranch_fallthru_edge->dest;
143 if (EDGE_COUNT (jump_block->preds) >= 2
144 || jump_block->next_bb == EXIT_BLOCK_PTR
145 || !FORWARDER_BLOCK_P (jump_block))
146 return false;
147 jump_dest_block = EDGE_SUCC (jump_block, 0)->dest;
149 /* If we are partitioning hot/cold basic blocks, we don't want to
150 mess up unconditional or indirect jumps that cross between hot
151 and cold sections.
153 Basic block partitioning may result in some jumps that appear to
154 be optimizable (or blocks that appear to be mergeable), but which really
155 must be left untouched (they are required to make it safely across
156 partition boundaries). See the comments at the top of
157 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
159 if (flag_reorder_blocks_and_partition
160 && (BB_PARTITION (jump_block) != BB_PARTITION (jump_dest_block)
161 || (cbranch_jump_edge->flags & EDGE_CROSSING)))
162 return false;
164 /* The conditional branch must target the block after the
165 unconditional branch. */
166 cbranch_dest_block = cbranch_jump_edge->dest;
168 if (cbranch_dest_block == EXIT_BLOCK_PTR
169 || !can_fallthru (jump_block, cbranch_dest_block))
170 return false;
172 /* Invert the conditional branch. */
173 if (!invert_jump (cbranch_insn, block_label (jump_dest_block), 0))
174 return false;
176 if (dump_file)
177 fprintf (dump_file, "Simplifying condjump %i around jump %i\n",
178 INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)));
180 /* Success. Update the CFG to match. Note that after this point
181 the edge variable names appear backwards; the redirection is done
182 this way to preserve edge profile data. */
183 cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge,
184 cbranch_dest_block);
185 cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge,
186 jump_dest_block);
187 cbranch_jump_edge->flags |= EDGE_FALLTHRU;
188 cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU;
189 update_br_prob_note (cbranch_block);
191 /* Delete the block with the unconditional jump, and clean up the mess. */
192 delete_basic_block (jump_block);
193 tidy_fallthru_edge (cbranch_jump_edge);
194 update_forwarder_flag (cbranch_block);
196 return true;
199 /* Attempt to prove that operation is NOOP using CSElib or mark the effect
200 on register. Used by jump threading. */
202 static bool
203 mark_effect (rtx exp, regset nonequal)
205 int regno;
206 rtx dest;
207 switch (GET_CODE (exp))
209 /* In case we do clobber the register, mark it as equal, as we know the
210 value is dead so it don't have to match. */
211 case CLOBBER:
212 if (REG_P (XEXP (exp, 0)))
214 dest = XEXP (exp, 0);
215 regno = REGNO (dest);
216 CLEAR_REGNO_REG_SET (nonequal, regno);
217 if (regno < FIRST_PSEUDO_REGISTER)
219 int n = hard_regno_nregs[regno][GET_MODE (dest)];
220 while (--n > 0)
221 CLEAR_REGNO_REG_SET (nonequal, regno + n);
224 return false;
226 case SET:
227 if (rtx_equal_for_cselib_p (SET_DEST (exp), SET_SRC (exp)))
228 return false;
229 dest = SET_DEST (exp);
230 if (dest == pc_rtx)
231 return false;
232 if (!REG_P (dest))
233 return true;
234 regno = REGNO (dest);
235 SET_REGNO_REG_SET (nonequal, regno);
236 if (regno < FIRST_PSEUDO_REGISTER)
238 int n = hard_regno_nregs[regno][GET_MODE (dest)];
239 while (--n > 0)
240 SET_REGNO_REG_SET (nonequal, regno + n);
242 return false;
244 default:
245 return false;
249 /* Return nonzero if X is a register set in regset DATA.
250 Called via for_each_rtx. */
251 static int
252 mentions_nonequal_regs (rtx *x, void *data)
254 regset nonequal = (regset) data;
255 if (REG_P (*x))
257 int regno;
259 regno = REGNO (*x);
260 if (REGNO_REG_SET_P (nonequal, regno))
261 return 1;
262 if (regno < FIRST_PSEUDO_REGISTER)
264 int n = hard_regno_nregs[regno][GET_MODE (*x)];
265 while (--n > 0)
266 if (REGNO_REG_SET_P (nonequal, regno + n))
267 return 1;
270 return 0;
272 /* Attempt to prove that the basic block B will have no side effects and
273 always continues in the same edge if reached via E. Return the edge
274 if exist, NULL otherwise. */
276 static edge
277 thread_jump (int mode, edge e, basic_block b)
279 rtx set1, set2, cond1, cond2, insn;
280 enum rtx_code code1, code2, reversed_code2;
281 bool reverse1 = false;
282 int i;
283 regset nonequal;
284 bool failed = false;
286 if (BB_FLAGS (b) & BB_NONTHREADABLE_BLOCK)
287 return NULL;
289 /* At the moment, we do handle only conditional jumps, but later we may
290 want to extend this code to tablejumps and others. */
291 if (EDGE_COUNT (e->src->succs) != 2)
292 return NULL;
293 if (EDGE_COUNT (b->succs) != 2)
295 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
296 return NULL;
299 /* Second branch must end with onlyjump, as we will eliminate the jump. */
300 if (!any_condjump_p (BB_END (e->src)))
301 return NULL;
303 if (!any_condjump_p (BB_END (b)) || !onlyjump_p (BB_END (b)))
305 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
306 return NULL;
309 set1 = pc_set (BB_END (e->src));
310 set2 = pc_set (BB_END (b));
311 if (((e->flags & EDGE_FALLTHRU) != 0)
312 != (XEXP (SET_SRC (set1), 1) == pc_rtx))
313 reverse1 = true;
315 cond1 = XEXP (SET_SRC (set1), 0);
316 cond2 = XEXP (SET_SRC (set2), 0);
317 if (reverse1)
318 code1 = reversed_comparison_code (cond1, BB_END (e->src));
319 else
320 code1 = GET_CODE (cond1);
322 code2 = GET_CODE (cond2);
323 reversed_code2 = reversed_comparison_code (cond2, BB_END (b));
325 if (!comparison_dominates_p (code1, code2)
326 && !comparison_dominates_p (code1, reversed_code2))
327 return NULL;
329 /* Ensure that the comparison operators are equivalent.
330 ??? This is far too pessimistic. We should allow swapped operands,
331 different CCmodes, or for example comparisons for interval, that
332 dominate even when operands are not equivalent. */
333 if (!rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
334 || !rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
335 return NULL;
337 /* Short circuit cases where block B contains some side effects, as we can't
338 safely bypass it. */
339 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b));
340 insn = NEXT_INSN (insn))
341 if (INSN_P (insn) && side_effects_p (PATTERN (insn)))
343 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
344 return NULL;
347 cselib_init (false);
349 /* First process all values computed in the source basic block. */
350 for (insn = NEXT_INSN (BB_HEAD (e->src)); insn != NEXT_INSN (BB_END (e->src));
351 insn = NEXT_INSN (insn))
352 if (INSN_P (insn))
353 cselib_process_insn (insn);
355 nonequal = BITMAP_XMALLOC();
356 CLEAR_REG_SET (nonequal);
358 /* Now assume that we've continued by the edge E to B and continue
359 processing as if it were same basic block.
360 Our goal is to prove that whole block is an NOOP. */
362 for (insn = NEXT_INSN (BB_HEAD (b)); insn != NEXT_INSN (BB_END (b)) && !failed;
363 insn = NEXT_INSN (insn))
365 if (INSN_P (insn))
367 rtx pat = PATTERN (insn);
369 if (GET_CODE (pat) == PARALLEL)
371 for (i = 0; i < XVECLEN (pat, 0); i++)
372 failed |= mark_effect (XVECEXP (pat, 0, i), nonequal);
374 else
375 failed |= mark_effect (pat, nonequal);
378 cselib_process_insn (insn);
381 /* Later we should clear nonequal of dead registers. So far we don't
382 have life information in cfg_cleanup. */
383 if (failed)
385 BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
386 goto failed_exit;
389 /* cond2 must not mention any register that is not equal to the
390 former block. */
391 if (for_each_rtx (&cond2, mentions_nonequal_regs, nonequal))
392 goto failed_exit;
394 /* In case liveness information is available, we need to prove equivalence
395 only of the live values. */
396 if (mode & CLEANUP_UPDATE_LIFE)
397 AND_REG_SET (nonequal, b->global_live_at_end);
399 EXECUTE_IF_SET_IN_REG_SET (nonequal, 0, i, goto failed_exit;);
401 BITMAP_XFREE (nonequal);
402 cselib_finish ();
403 if ((comparison_dominates_p (code1, code2) != 0)
404 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
405 return BRANCH_EDGE (b);
406 else
407 return FALLTHRU_EDGE (b);
409 failed_exit:
410 BITMAP_XFREE (nonequal);
411 cselib_finish ();
412 return NULL;
415 /* Attempt to forward edges leaving basic block B.
416 Return true if successful. */
418 static bool
419 try_forward_edges (int mode, basic_block b)
421 bool changed = false;
422 edge_iterator ei;
423 edge e, *threaded_edges = NULL;
425 /* If we are partitioning hot/cold basic blocks, we don't want to
426 mess up unconditional or indirect jumps that cross between hot
427 and cold sections.
429 Basic block partitioning may result in some jumps that appear to
430 be optimizable (or blocks that appear to be mergeable), but which really m
431 ust be left untouched (they are required to make it safely across
432 partition boundaries). See the comments at the top of
433 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
435 if (flag_reorder_blocks_and_partition
436 && find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
437 return false;
439 for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
441 basic_block target, first;
442 int counter;
443 bool threaded = false;
444 int nthreaded_edges = 0;
445 bool may_thread = first_pass | (b->flags & BB_DIRTY);
447 /* Skip complex edges because we don't know how to update them.
449 Still handle fallthru edges, as we can succeed to forward fallthru
450 edge to the same place as the branch edge of conditional branch
451 and turn conditional branch to an unconditional branch. */
452 if (e->flags & EDGE_COMPLEX)
454 ei_next (&ei);
455 continue;
458 target = first = e->dest;
459 counter = 0;
461 /* If we are partitioning hot/cold basic_blocks, we don't want to mess
462 up jumps that cross between hot/cold sections.
464 Basic block partitioning may result in some jumps that appear
465 to be optimizable (or blocks that appear to be mergeable), but which
466 really must be left untouched (they are required to make it safely
467 across partition boundaries). See the comments at the top of
468 bb-reorder.c:partition_hot_cold_basic_blocks for complete
469 details. */
471 if (flag_reorder_blocks_and_partition
472 && first != EXIT_BLOCK_PTR
473 && find_reg_note (BB_END (first), REG_CROSSING_JUMP, NULL_RTX))
474 return false;
476 while (counter < n_basic_blocks)
478 basic_block new_target = NULL;
479 bool new_target_threaded = false;
480 may_thread |= target->flags & BB_DIRTY;
482 if (FORWARDER_BLOCK_P (target)
483 && !(EDGE_SUCC (target, 0)->flags & EDGE_CROSSING)
484 && EDGE_SUCC (target, 0)->dest != EXIT_BLOCK_PTR)
486 /* Bypass trivial infinite loops. */
487 if (target == EDGE_SUCC (target, 0)->dest)
488 counter = n_basic_blocks;
489 new_target = EDGE_SUCC (target, 0)->dest;
492 /* Allow to thread only over one edge at time to simplify updating
493 of probabilities. */
494 else if ((mode & CLEANUP_THREADING) && may_thread)
496 edge t = thread_jump (mode, e, target);
497 if (t)
499 if (!threaded_edges)
500 threaded_edges = xmalloc (sizeof (*threaded_edges)
501 * n_basic_blocks);
502 else
504 int i;
506 /* Detect an infinite loop across blocks not
507 including the start block. */
508 for (i = 0; i < nthreaded_edges; ++i)
509 if (threaded_edges[i] == t)
510 break;
511 if (i < nthreaded_edges)
513 counter = n_basic_blocks;
514 break;
518 /* Detect an infinite loop across the start block. */
519 if (t->dest == b)
520 break;
522 gcc_assert (nthreaded_edges < n_basic_blocks);
523 threaded_edges[nthreaded_edges++] = t;
525 new_target = t->dest;
526 new_target_threaded = true;
530 if (!new_target)
531 break;
533 /* Avoid killing of loop pre-headers, as it is the place loop
534 optimizer wants to hoist code to.
536 For fallthru forwarders, the LOOP_BEG note must appear between
537 the header of block and CODE_LABEL of the loop, for non forwarders
538 it must appear before the JUMP_INSN. */
539 if ((mode & CLEANUP_PRE_LOOP) && optimize)
541 rtx insn = (EDGE_SUCC (target, 0)->flags & EDGE_FALLTHRU
542 ? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
544 if (!NOTE_P (insn))
545 insn = NEXT_INSN (insn);
547 for (; insn && !LABEL_P (insn) && !INSN_P (insn);
548 insn = NEXT_INSN (insn))
549 if (NOTE_P (insn)
550 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
551 break;
553 if (NOTE_P (insn))
554 break;
556 /* Do not clean up branches to just past the end of a loop
557 at this time; it can mess up the loop optimizer's
558 recognition of some patterns. */
560 insn = PREV_INSN (BB_HEAD (target));
561 if (insn && NOTE_P (insn)
562 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
563 break;
566 counter++;
567 target = new_target;
568 threaded |= new_target_threaded;
571 if (counter >= n_basic_blocks)
573 if (dump_file)
574 fprintf (dump_file, "Infinite loop in BB %i.\n",
575 target->index);
577 else if (target == first)
578 ; /* We didn't do anything. */
579 else
581 /* Save the values now, as the edge may get removed. */
582 gcov_type edge_count = e->count;
583 int edge_probability = e->probability;
584 int edge_frequency;
585 int n = 0;
587 /* Don't force if target is exit block. */
588 if (threaded && target != EXIT_BLOCK_PTR)
590 notice_new_block (redirect_edge_and_branch_force (e, target));
591 if (dump_file)
592 fprintf (dump_file, "Conditionals threaded.\n");
594 else if (!redirect_edge_and_branch (e, target))
596 if (dump_file)
597 fprintf (dump_file,
598 "Forwarding edge %i->%i to %i failed.\n",
599 b->index, e->dest->index, target->index);
600 ei_next (&ei);
601 continue;
604 /* We successfully forwarded the edge. Now update profile
605 data: for each edge we traversed in the chain, remove
606 the original edge's execution count. */
607 edge_frequency = ((edge_probability * b->frequency
608 + REG_BR_PROB_BASE / 2)
609 / REG_BR_PROB_BASE);
611 if (!FORWARDER_BLOCK_P (b) && forwarder_block_p (b))
612 BB_SET_FLAG (b, BB_FORWARDER_BLOCK);
616 edge t;
618 if (EDGE_COUNT (first->succs) > 1)
620 gcc_assert (n < nthreaded_edges);
621 t = threaded_edges [n++];
622 gcc_assert (t->src == first);
623 update_bb_profile_for_threading (first, edge_frequency,
624 edge_count, t);
625 update_br_prob_note (first);
627 else
629 first->count -= edge_count;
630 if (first->count < 0)
631 first->count = 0;
632 first->frequency -= edge_frequency;
633 if (first->frequency < 0)
634 first->frequency = 0;
635 /* It is possible that as the result of
636 threading we've removed edge as it is
637 threaded to the fallthru edge. Avoid
638 getting out of sync. */
639 if (n < nthreaded_edges
640 && first == threaded_edges [n]->src)
641 n++;
642 t = EDGE_SUCC (first, 0);
645 t->count -= edge_count;
646 if (t->count < 0)
647 t->count = 0;
648 first = t->dest;
650 while (first != target);
652 changed = true;
653 continue;
655 ei_next (&ei);
658 if (threaded_edges)
659 free (threaded_edges);
660 return changed;
664 /* Blocks A and B are to be merged into a single block. A has no incoming
665 fallthru edge, so it can be moved before B without adding or modifying
666 any jumps (aside from the jump from A to B). */
668 static void
669 merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b)
671 rtx barrier;
672 bool only_notes;
674 /* If we are partitioning hot/cold basic blocks, we don't want to
675 mess up unconditional or indirect jumps that cross between hot
676 and cold sections.
678 Basic block partitioning may result in some jumps that appear to
679 be optimizable (or blocks that appear to be mergeable), but which really
680 must be left untouched (they are required to make it safely across
681 partition boundaries). See the comments at the top of
682 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
684 if (flag_reorder_blocks_and_partition
685 && (BB_PARTITION (a) != BB_PARTITION (b)
686 || find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)))
687 return;
689 barrier = next_nonnote_insn (BB_END (a));
690 gcc_assert (BARRIER_P (barrier));
691 delete_insn (barrier);
693 /* Move block and loop notes out of the chain so that we do not
694 disturb their order.
696 ??? A better solution would be to squeeze out all the non-nested notes
697 and adjust the block trees appropriately. Even better would be to have
698 a tighter connection between block trees and rtl so that this is not
699 necessary. */
700 only_notes = squeeze_notes (&BB_HEAD (a), &BB_END (a));
701 gcc_assert (!only_notes);
703 /* Scramble the insn chain. */
704 if (BB_END (a) != PREV_INSN (BB_HEAD (b)))
705 reorder_insns_nobb (BB_HEAD (a), BB_END (a), PREV_INSN (BB_HEAD (b)));
706 a->flags |= BB_DIRTY;
708 if (dump_file)
709 fprintf (dump_file, "Moved block %d before %d and merged.\n",
710 a->index, b->index);
712 /* Swap the records for the two blocks around. */
714 unlink_block (a);
715 link_block (a, b->prev_bb);
717 /* Now blocks A and B are contiguous. Merge them. */
718 merge_blocks (a, b);
721 /* Blocks A and B are to be merged into a single block. B has no outgoing
722 fallthru edge, so it can be moved after A without adding or modifying
723 any jumps (aside from the jump from A to B). */
725 static void
726 merge_blocks_move_successor_nojumps (basic_block a, basic_block b)
728 rtx barrier, real_b_end;
729 rtx label, table;
730 bool only_notes;
732 /* If we are partitioning hot/cold basic blocks, we don't want to
733 mess up unconditional or indirect jumps that cross between hot
734 and cold sections.
736 Basic block partitioning may result in some jumps that appear to
737 be optimizable (or blocks that appear to be mergeable), but which really
738 must be left untouched (they are required to make it safely across
739 partition boundaries). See the comments at the top of
740 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
742 if (flag_reorder_blocks_and_partition
743 && (find_reg_note (BB_END (a), REG_CROSSING_JUMP, NULL_RTX)
744 || BB_PARTITION (a) != BB_PARTITION (b)))
745 return;
747 real_b_end = BB_END (b);
749 /* If there is a jump table following block B temporarily add the jump table
750 to block B so that it will also be moved to the correct location. */
751 if (tablejump_p (BB_END (b), &label, &table)
752 && prev_active_insn (label) == BB_END (b))
754 BB_END (b) = table;
757 /* There had better have been a barrier there. Delete it. */
758 barrier = NEXT_INSN (BB_END (b));
759 if (barrier && BARRIER_P (barrier))
760 delete_insn (barrier);
762 /* Move block and loop notes out of the chain so that we do not
763 disturb their order.
765 ??? A better solution would be to squeeze out all the non-nested notes
766 and adjust the block trees appropriately. Even better would be to have
767 a tighter connection between block trees and rtl so that this is not
768 necessary. */
769 only_notes = squeeze_notes (&BB_HEAD (b), &BB_END (b));
770 gcc_assert (!only_notes);
773 /* Scramble the insn chain. */
774 reorder_insns_nobb (BB_HEAD (b), BB_END (b), BB_END (a));
776 /* Restore the real end of b. */
777 BB_END (b) = real_b_end;
779 if (dump_file)
780 fprintf (dump_file, "Moved block %d after %d and merged.\n",
781 b->index, a->index);
783 /* Now blocks A and B are contiguous. Merge them. */
784 merge_blocks (a, b);
787 /* Attempt to merge basic blocks that are potentially non-adjacent.
788 Return NULL iff the attempt failed, otherwise return basic block
789 where cleanup_cfg should continue. Because the merging commonly
790 moves basic block away or introduces another optimization
791 possibility, return basic block just before B so cleanup_cfg don't
792 need to iterate.
794 It may be good idea to return basic block before C in the case
795 C has been moved after B and originally appeared earlier in the
796 insn sequence, but we have no information available about the
797 relative ordering of these two. Hopefully it is not too common. */
799 static basic_block
800 merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
802 basic_block next;
804 /* If we are partitioning hot/cold basic blocks, we don't want to
805 mess up unconditional or indirect jumps that cross between hot
806 and cold sections.
808 Basic block partitioning may result in some jumps that appear to
809 be optimizable (or blocks that appear to be mergeable), but which really
810 must be left untouched (they are required to make it safely across
811 partition boundaries). See the comments at the top of
812 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
814 if (flag_reorder_blocks_and_partition
815 && (find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
816 || find_reg_note (BB_END (c), REG_CROSSING_JUMP, NULL_RTX)
817 || BB_PARTITION (b) != BB_PARTITION (c)))
818 return NULL;
822 /* If B has a fallthru edge to C, no need to move anything. */
823 if (e->flags & EDGE_FALLTHRU)
825 int b_index = b->index, c_index = c->index;
826 merge_blocks (b, c);
827 update_forwarder_flag (b);
829 if (dump_file)
830 fprintf (dump_file, "Merged %d and %d without moving.\n",
831 b_index, c_index);
833 return b->prev_bb == ENTRY_BLOCK_PTR ? b : b->prev_bb;
836 /* Otherwise we will need to move code around. Do that only if expensive
837 transformations are allowed. */
838 else if (mode & CLEANUP_EXPENSIVE)
840 edge tmp_edge, b_fallthru_edge;
841 bool c_has_outgoing_fallthru;
842 bool b_has_incoming_fallthru;
843 edge_iterator ei;
845 /* Avoid overactive code motion, as the forwarder blocks should be
846 eliminated by edge redirection instead. One exception might have
847 been if B is a forwarder block and C has no fallthru edge, but
848 that should be cleaned up by bb-reorder instead. */
849 if (FORWARDER_BLOCK_P (b) || FORWARDER_BLOCK_P (c))
850 return NULL;
852 /* We must make sure to not munge nesting of lexical blocks,
853 and loop notes. This is done by squeezing out all the notes
854 and leaving them there to lie. Not ideal, but functional. */
856 FOR_EACH_EDGE (tmp_edge, ei, c->succs)
857 if (tmp_edge->flags & EDGE_FALLTHRU)
858 break;
860 c_has_outgoing_fallthru = (tmp_edge != NULL);
862 FOR_EACH_EDGE (tmp_edge, ei, b->preds)
863 if (tmp_edge->flags & EDGE_FALLTHRU)
864 break;
866 b_has_incoming_fallthru = (tmp_edge != NULL);
867 b_fallthru_edge = tmp_edge;
868 next = b->prev_bb;
869 if (next == c)
870 next = next->prev_bb;
872 /* Otherwise, we're going to try to move C after B. If C does
873 not have an outgoing fallthru, then it can be moved
874 immediately after B without introducing or modifying jumps. */
875 if (! c_has_outgoing_fallthru)
877 merge_blocks_move_successor_nojumps (b, c);
878 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
881 /* If B does not have an incoming fallthru, then it can be moved
882 immediately before C without introducing or modifying jumps.
883 C cannot be the first block, so we do not have to worry about
884 accessing a non-existent block. */
886 if (b_has_incoming_fallthru)
888 basic_block bb;
890 if (b_fallthru_edge->src == ENTRY_BLOCK_PTR)
891 return NULL;
892 bb = force_nonfallthru (b_fallthru_edge);
893 if (bb)
894 notice_new_block (bb);
897 merge_blocks_move_predecessor_nojumps (b, c);
898 return next == ENTRY_BLOCK_PTR ? next->next_bb : next;
901 return NULL;
905 /* Removes the memory attributes of MEM expression
906 if they are not equal. */
908 void
909 merge_memattrs (rtx x, rtx y)
911 int i;
912 int j;
913 enum rtx_code code;
914 const char *fmt;
916 if (x == y)
917 return;
918 if (x == 0 || y == 0)
919 return;
921 code = GET_CODE (x);
923 if (code != GET_CODE (y))
924 return;
926 if (GET_MODE (x) != GET_MODE (y))
927 return;
929 if (code == MEM && MEM_ATTRS (x) != MEM_ATTRS (y))
931 if (! MEM_ATTRS (x))
932 MEM_ATTRS (y) = 0;
933 else if (! MEM_ATTRS (y))
934 MEM_ATTRS (x) = 0;
935 else
937 if (MEM_ALIAS_SET (x) != MEM_ALIAS_SET (y))
939 set_mem_alias_set (x, 0);
940 set_mem_alias_set (y, 0);
943 if (! mem_expr_equal_p (MEM_EXPR (x), MEM_EXPR (y)))
945 set_mem_expr (x, 0);
946 set_mem_expr (y, 0);
947 set_mem_offset (x, 0);
948 set_mem_offset (y, 0);
950 else if (MEM_OFFSET (x) != MEM_OFFSET (y))
952 set_mem_offset (x, 0);
953 set_mem_offset (y, 0);
956 set_mem_size (x, GEN_INT (MAX (INTVAL (MEM_SIZE (x)),
957 INTVAL (MEM_SIZE (y)))));
958 set_mem_size (y, MEM_SIZE (x));
960 set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y)));
961 set_mem_align (y, MEM_ALIGN (x));
965 fmt = GET_RTX_FORMAT (code);
966 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
968 switch (fmt[i])
970 case 'E':
971 /* Two vectors must have the same length. */
972 if (XVECLEN (x, i) != XVECLEN (y, i))
973 return;
975 for (j = 0; j < XVECLEN (x, i); j++)
976 merge_memattrs (XVECEXP (x, i, j), XVECEXP (y, i, j));
978 break;
980 case 'e':
981 merge_memattrs (XEXP (x, i), XEXP (y, i));
984 return;
988 /* Return true if I1 and I2 are equivalent and thus can be crossjumped. */
990 static bool
991 insns_match_p (int mode ATTRIBUTE_UNUSED, rtx i1, rtx i2)
993 rtx p1, p2;
995 /* Verify that I1 and I2 are equivalent. */
996 if (GET_CODE (i1) != GET_CODE (i2))
997 return false;
999 p1 = PATTERN (i1);
1000 p2 = PATTERN (i2);
1002 if (GET_CODE (p1) != GET_CODE (p2))
1003 return false;
1005 /* If this is a CALL_INSN, compare register usage information.
1006 If we don't check this on stack register machines, the two
1007 CALL_INSNs might be merged leaving reg-stack.c with mismatching
1008 numbers of stack registers in the same basic block.
1009 If we don't check this on machines with delay slots, a delay slot may
1010 be filled that clobbers a parameter expected by the subroutine.
1012 ??? We take the simple route for now and assume that if they're
1013 equal, they were constructed identically. */
1015 if (CALL_P (i1)
1016 && (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1),
1017 CALL_INSN_FUNCTION_USAGE (i2))
1018 || SIBLING_CALL_P (i1) != SIBLING_CALL_P (i2)))
1019 return false;
1021 #ifdef STACK_REGS
1022 /* If cross_jump_death_matters is not 0, the insn's mode
1023 indicates whether or not the insn contains any stack-like
1024 regs. */
1026 if ((mode & CLEANUP_POST_REGSTACK) && stack_regs_mentioned (i1))
1028 /* If register stack conversion has already been done, then
1029 death notes must also be compared before it is certain that
1030 the two instruction streams match. */
1032 rtx note;
1033 HARD_REG_SET i1_regset, i2_regset;
1035 CLEAR_HARD_REG_SET (i1_regset);
1036 CLEAR_HARD_REG_SET (i2_regset);
1038 for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
1039 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1040 SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
1042 for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
1043 if (REG_NOTE_KIND (note) == REG_DEAD && STACK_REG_P (XEXP (note, 0)))
1044 SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
1046 GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
1048 return false;
1050 done:
1053 #endif
1055 if (reload_completed
1056 ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2))
1057 return true;
1059 /* Do not do EQUIV substitution after reload. First, we're undoing the
1060 work of reload_cse. Second, we may be undoing the work of the post-
1061 reload splitting pass. */
1062 /* ??? Possibly add a new phase switch variable that can be used by
1063 targets to disallow the troublesome insns after splitting. */
1064 if (!reload_completed)
1066 /* The following code helps take care of G++ cleanups. */
1067 rtx equiv1 = find_reg_equal_equiv_note (i1);
1068 rtx equiv2 = find_reg_equal_equiv_note (i2);
1070 if (equiv1 && equiv2
1071 /* If the equivalences are not to a constant, they may
1072 reference pseudos that no longer exist, so we can't
1073 use them. */
1074 && (! reload_completed
1075 || (CONSTANT_P (XEXP (equiv1, 0))
1076 && rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))))
1078 rtx s1 = single_set (i1);
1079 rtx s2 = single_set (i2);
1080 if (s1 != 0 && s2 != 0
1081 && rtx_renumbered_equal_p (SET_DEST (s1), SET_DEST (s2)))
1083 validate_change (i1, &SET_SRC (s1), XEXP (equiv1, 0), 1);
1084 validate_change (i2, &SET_SRC (s2), XEXP (equiv2, 0), 1);
1085 if (! rtx_renumbered_equal_p (p1, p2))
1086 cancel_changes (0);
1087 else if (apply_change_group ())
1088 return true;
1093 return false;
1096 /* Look through the insns at the end of BB1 and BB2 and find the longest
1097 sequence that are equivalent. Store the first insns for that sequence
1098 in *F1 and *F2 and return the sequence length.
1100 To simplify callers of this function, if the blocks match exactly,
1101 store the head of the blocks in *F1 and *F2. */
1103 static int
1104 flow_find_cross_jump (int mode ATTRIBUTE_UNUSED, basic_block bb1,
1105 basic_block bb2, rtx *f1, rtx *f2)
1107 rtx i1, i2, last1, last2, afterlast1, afterlast2;
1108 int ninsns = 0;
1110 /* Skip simple jumps at the end of the blocks. Complex jumps still
1111 need to be compared for equivalence, which we'll do below. */
1113 i1 = BB_END (bb1);
1114 last1 = afterlast1 = last2 = afterlast2 = NULL_RTX;
1115 if (onlyjump_p (i1)
1116 || (returnjump_p (i1) && !side_effects_p (PATTERN (i1))))
1118 last1 = i1;
1119 i1 = PREV_INSN (i1);
1122 i2 = BB_END (bb2);
1123 if (onlyjump_p (i2)
1124 || (returnjump_p (i2) && !side_effects_p (PATTERN (i2))))
1126 last2 = i2;
1127 /* Count everything except for unconditional jump as insn. */
1128 if (!simplejump_p (i2) && !returnjump_p (i2) && last1)
1129 ninsns++;
1130 i2 = PREV_INSN (i2);
1133 while (true)
1135 /* Ignore notes. */
1136 while (!INSN_P (i1) && i1 != BB_HEAD (bb1))
1137 i1 = PREV_INSN (i1);
1139 while (!INSN_P (i2) && i2 != BB_HEAD (bb2))
1140 i2 = PREV_INSN (i2);
1142 if (i1 == BB_HEAD (bb1) || i2 == BB_HEAD (bb2))
1143 break;
1145 if (!insns_match_p (mode, i1, i2))
1146 break;
1148 merge_memattrs (i1, i2);
1150 /* Don't begin a cross-jump with a NOTE insn. */
1151 if (INSN_P (i1))
1153 /* If the merged insns have different REG_EQUAL notes, then
1154 remove them. */
1155 rtx equiv1 = find_reg_equal_equiv_note (i1);
1156 rtx equiv2 = find_reg_equal_equiv_note (i2);
1158 if (equiv1 && !equiv2)
1159 remove_note (i1, equiv1);
1160 else if (!equiv1 && equiv2)
1161 remove_note (i2, equiv2);
1162 else if (equiv1 && equiv2
1163 && !rtx_equal_p (XEXP (equiv1, 0), XEXP (equiv2, 0)))
1165 remove_note (i1, equiv1);
1166 remove_note (i2, equiv2);
1169 afterlast1 = last1, afterlast2 = last2;
1170 last1 = i1, last2 = i2;
1171 ninsns++;
1174 i1 = PREV_INSN (i1);
1175 i2 = PREV_INSN (i2);
1178 #ifdef HAVE_cc0
1179 /* Don't allow the insn after a compare to be shared by
1180 cross-jumping unless the compare is also shared. */
1181 if (ninsns && reg_mentioned_p (cc0_rtx, last1) && ! sets_cc0_p (last1))
1182 last1 = afterlast1, last2 = afterlast2, ninsns--;
1183 #endif
1185 /* Include preceding notes and labels in the cross-jump. One,
1186 this may bring us to the head of the blocks as requested above.
1187 Two, it keeps line number notes as matched as may be. */
1188 if (ninsns)
1190 while (last1 != BB_HEAD (bb1) && !INSN_P (PREV_INSN (last1)))
1191 last1 = PREV_INSN (last1);
1193 if (last1 != BB_HEAD (bb1) && LABEL_P (PREV_INSN (last1)))
1194 last1 = PREV_INSN (last1);
1196 while (last2 != BB_HEAD (bb2) && !INSN_P (PREV_INSN (last2)))
1197 last2 = PREV_INSN (last2);
1199 if (last2 != BB_HEAD (bb2) && LABEL_P (PREV_INSN (last2)))
1200 last2 = PREV_INSN (last2);
1202 *f1 = last1;
1203 *f2 = last2;
1206 return ninsns;
1209 /* Return true iff outgoing edges of BB1 and BB2 match, together with
1210 the branch instruction. This means that if we commonize the control
1211 flow before end of the basic block, the semantic remains unchanged.
1213 We may assume that there exists one edge with a common destination. */
1215 static bool
1216 outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
1218 int nehedges1 = 0, nehedges2 = 0;
1219 edge fallthru1 = 0, fallthru2 = 0;
1220 edge e1, e2;
1221 edge_iterator ei;
1223 /* If BB1 has only one successor, we may be looking at either an
1224 unconditional jump, or a fake edge to exit. */
1225 if (EDGE_COUNT (bb1->succs) == 1
1226 && (EDGE_SUCC (bb1, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1227 && (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
1228 return (EDGE_COUNT (bb2->succs) == 1
1229 && (EDGE_SUCC (bb2, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
1230 && (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
1232 /* Match conditional jumps - this may get tricky when fallthru and branch
1233 edges are crossed. */
1234 if (EDGE_COUNT (bb1->succs) == 2
1235 && any_condjump_p (BB_END (bb1))
1236 && onlyjump_p (BB_END (bb1)))
1238 edge b1, f1, b2, f2;
1239 bool reverse, match;
1240 rtx set1, set2, cond1, cond2;
1241 enum rtx_code code1, code2;
1243 if (EDGE_COUNT (bb2->succs) != 2
1244 || !any_condjump_p (BB_END (bb2))
1245 || !onlyjump_p (BB_END (bb2)))
1246 return false;
1248 b1 = BRANCH_EDGE (bb1);
1249 b2 = BRANCH_EDGE (bb2);
1250 f1 = FALLTHRU_EDGE (bb1);
1251 f2 = FALLTHRU_EDGE (bb2);
1253 /* Get around possible forwarders on fallthru edges. Other cases
1254 should be optimized out already. */
1255 if (FORWARDER_BLOCK_P (f1->dest))
1256 f1 = EDGE_SUCC (f1->dest, 0);
1258 if (FORWARDER_BLOCK_P (f2->dest))
1259 f2 = EDGE_SUCC (f2->dest, 0);
1261 /* To simplify use of this function, return false if there are
1262 unneeded forwarder blocks. These will get eliminated later
1263 during cleanup_cfg. */
1264 if (FORWARDER_BLOCK_P (f1->dest)
1265 || FORWARDER_BLOCK_P (f2->dest)
1266 || FORWARDER_BLOCK_P (b1->dest)
1267 || FORWARDER_BLOCK_P (b2->dest))
1268 return false;
1270 if (f1->dest == f2->dest && b1->dest == b2->dest)
1271 reverse = false;
1272 else if (f1->dest == b2->dest && b1->dest == f2->dest)
1273 reverse = true;
1274 else
1275 return false;
1277 set1 = pc_set (BB_END (bb1));
1278 set2 = pc_set (BB_END (bb2));
1279 if ((XEXP (SET_SRC (set1), 1) == pc_rtx)
1280 != (XEXP (SET_SRC (set2), 1) == pc_rtx))
1281 reverse = !reverse;
1283 cond1 = XEXP (SET_SRC (set1), 0);
1284 cond2 = XEXP (SET_SRC (set2), 0);
1285 code1 = GET_CODE (cond1);
1286 if (reverse)
1287 code2 = reversed_comparison_code (cond2, BB_END (bb2));
1288 else
1289 code2 = GET_CODE (cond2);
1291 if (code2 == UNKNOWN)
1292 return false;
1294 /* Verify codes and operands match. */
1295 match = ((code1 == code2
1296 && rtx_renumbered_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
1297 && rtx_renumbered_equal_p (XEXP (cond1, 1), XEXP (cond2, 1)))
1298 || (code1 == swap_condition (code2)
1299 && rtx_renumbered_equal_p (XEXP (cond1, 1),
1300 XEXP (cond2, 0))
1301 && rtx_renumbered_equal_p (XEXP (cond1, 0),
1302 XEXP (cond2, 1))));
1304 /* If we return true, we will join the blocks. Which means that
1305 we will only have one branch prediction bit to work with. Thus
1306 we require the existing branches to have probabilities that are
1307 roughly similar. */
1308 if (match
1309 && !optimize_size
1310 && maybe_hot_bb_p (bb1)
1311 && maybe_hot_bb_p (bb2))
1313 int prob2;
1315 if (b1->dest == b2->dest)
1316 prob2 = b2->probability;
1317 else
1318 /* Do not use f2 probability as f2 may be forwarded. */
1319 prob2 = REG_BR_PROB_BASE - b2->probability;
1321 /* Fail if the difference in probabilities is greater than 50%.
1322 This rules out two well-predicted branches with opposite
1323 outcomes. */
1324 if (abs (b1->probability - prob2) > REG_BR_PROB_BASE / 2)
1326 if (dump_file)
1327 fprintf (dump_file,
1328 "Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
1329 bb1->index, bb2->index, b1->probability, prob2);
1331 return false;
1335 if (dump_file && match)
1336 fprintf (dump_file, "Conditionals in bb %i and %i match.\n",
1337 bb1->index, bb2->index);
1339 return match;
1342 /* Generic case - we are seeing a computed jump, table jump or trapping
1343 instruction. */
1345 #ifndef CASE_DROPS_THROUGH
1346 /* Check whether there are tablejumps in the end of BB1 and BB2.
1347 Return true if they are identical. */
1349 rtx label1, label2;
1350 rtx table1, table2;
1352 if (tablejump_p (BB_END (bb1), &label1, &table1)
1353 && tablejump_p (BB_END (bb2), &label2, &table2)
1354 && GET_CODE (PATTERN (table1)) == GET_CODE (PATTERN (table2)))
1356 /* The labels should never be the same rtx. If they really are same
1357 the jump tables are same too. So disable crossjumping of blocks BB1
1358 and BB2 because when deleting the common insns in the end of BB1
1359 by delete_basic_block () the jump table would be deleted too. */
1360 /* If LABEL2 is referenced in BB1->END do not do anything
1361 because we would loose information when replacing
1362 LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */
1363 if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)))
1365 /* Set IDENTICAL to true when the tables are identical. */
1366 bool identical = false;
1367 rtx p1, p2;
1369 p1 = PATTERN (table1);
1370 p2 = PATTERN (table2);
1371 if (GET_CODE (p1) == ADDR_VEC && rtx_equal_p (p1, p2))
1373 identical = true;
1375 else if (GET_CODE (p1) == ADDR_DIFF_VEC
1376 && (XVECLEN (p1, 1) == XVECLEN (p2, 1))
1377 && rtx_equal_p (XEXP (p1, 2), XEXP (p2, 2))
1378 && rtx_equal_p (XEXP (p1, 3), XEXP (p2, 3)))
1380 int i;
1382 identical = true;
1383 for (i = XVECLEN (p1, 1) - 1; i >= 0 && identical; i--)
1384 if (!rtx_equal_p (XVECEXP (p1, 1, i), XVECEXP (p2, 1, i)))
1385 identical = false;
1388 if (identical)
1390 replace_label_data rr;
1391 bool match;
1393 /* Temporarily replace references to LABEL1 with LABEL2
1394 in BB1->END so that we could compare the instructions. */
1395 rr.r1 = label1;
1396 rr.r2 = label2;
1397 rr.update_label_nuses = false;
1398 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1400 match = insns_match_p (mode, BB_END (bb1), BB_END (bb2));
1401 if (dump_file && match)
1402 fprintf (dump_file,
1403 "Tablejumps in bb %i and %i match.\n",
1404 bb1->index, bb2->index);
1406 /* Set the original label in BB1->END because when deleting
1407 a block whose end is a tablejump, the tablejump referenced
1408 from the instruction is deleted too. */
1409 rr.r1 = label2;
1410 rr.r2 = label1;
1411 for_each_rtx (&BB_END (bb1), replace_label, &rr);
1413 return match;
1416 return false;
1419 #endif
1421 /* First ensure that the instructions match. There may be many outgoing
1422 edges so this test is generally cheaper. */
1423 if (!insns_match_p (mode, BB_END (bb1), BB_END (bb2)))
1424 return false;
1426 /* Search the outgoing edges, ensure that the counts do match, find possible
1427 fallthru and exception handling edges since these needs more
1428 validation. */
1429 if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
1430 return false;
1432 FOR_EACH_EDGE (e1, ei, bb1->succs)
1434 e2 = EDGE_SUCC (bb2, ei.index);
1436 if (e1->flags & EDGE_EH)
1437 nehedges1++;
1439 if (e2->flags & EDGE_EH)
1440 nehedges2++;
1442 if (e1->flags & EDGE_FALLTHRU)
1443 fallthru1 = e1;
1444 if (e2->flags & EDGE_FALLTHRU)
1445 fallthru2 = e2;
1448 /* If number of edges of various types does not match, fail. */
1449 if (nehedges1 != nehedges2
1450 || (fallthru1 != 0) != (fallthru2 != 0))
1451 return false;
1453 /* fallthru edges must be forwarded to the same destination. */
1454 if (fallthru1)
1456 basic_block d1 = (forwarder_block_p (fallthru1->dest)
1457 ? EDGE_SUCC (fallthru1->dest, 0)->dest: fallthru1->dest);
1458 basic_block d2 = (forwarder_block_p (fallthru2->dest)
1459 ? EDGE_SUCC (fallthru2->dest, 0)->dest: fallthru2->dest);
1461 if (d1 != d2)
1462 return false;
1465 /* Ensure the same EH region. */
1467 rtx n1 = find_reg_note (BB_END (bb1), REG_EH_REGION, 0);
1468 rtx n2 = find_reg_note (BB_END (bb2), REG_EH_REGION, 0);
1470 if (!n1 && n2)
1471 return false;
1473 if (n1 && (!n2 || XEXP (n1, 0) != XEXP (n2, 0)))
1474 return false;
1477 /* We don't need to match the rest of edges as above checks should be enough
1478 to ensure that they are equivalent. */
1479 return true;
1482 /* E1 and E2 are edges with the same destination block. Search their
1483 predecessors for common code. If found, redirect control flow from
1484 (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC. */
1486 static bool
1487 try_crossjump_to_edge (int mode, edge e1, edge e2)
1489 int nmatch;
1490 basic_block src1 = e1->src, src2 = e2->src;
1491 basic_block redirect_to, redirect_from, to_remove;
1492 rtx newpos1, newpos2;
1493 edge s;
1494 edge_iterator ei;
1496 newpos1 = newpos2 = NULL_RTX;
1498 /* If we have partitioned hot/cold basic blocks, it is a bad idea
1499 to try this optimization.
1501 Basic block partitioning may result in some jumps that appear to
1502 be optimizable (or blocks that appear to be mergeable), but which really
1503 must be left untouched (they are required to make it safely across
1504 partition boundaries). See the comments at the top of
1505 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1507 if (flag_reorder_blocks_and_partition && no_new_pseudos)
1508 return false;
1510 /* Search backward through forwarder blocks. We don't need to worry
1511 about multiple entry or chained forwarders, as they will be optimized
1512 away. We do this to look past the unconditional jump following a
1513 conditional jump that is required due to the current CFG shape. */
1514 if (EDGE_COUNT (src1->preds) == 1
1515 && FORWARDER_BLOCK_P (src1))
1516 e1 = EDGE_PRED (src1, 0), src1 = e1->src;
1518 if (EDGE_COUNT (src2->preds) == 1
1519 && FORWARDER_BLOCK_P (src2))
1520 e2 = EDGE_PRED (src2, 0), src2 = e2->src;
1522 /* Nothing to do if we reach ENTRY, or a common source block. */
1523 if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
1524 return false;
1525 if (src1 == src2)
1526 return false;
1528 /* Seeing more than 1 forwarder blocks would confuse us later... */
1529 if (FORWARDER_BLOCK_P (e1->dest)
1530 && FORWARDER_BLOCK_P (EDGE_SUCC (e1->dest, 0)->dest))
1531 return false;
1533 if (FORWARDER_BLOCK_P (e2->dest)
1534 && FORWARDER_BLOCK_P (EDGE_SUCC (e2->dest, 0)->dest))
1535 return false;
1537 /* Likewise with dead code (possibly newly created by the other optimizations
1538 of cfg_cleanup). */
1539 if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
1540 return false;
1542 /* Look for the common insn sequence, part the first ... */
1543 if (!outgoing_edges_match (mode, src1, src2))
1544 return false;
1546 /* ... and part the second. */
1547 nmatch = flow_find_cross_jump (mode, src1, src2, &newpos1, &newpos2);
1549 /* Don't proceed with the crossjump unless we found a sufficient number
1550 of matching instructions or the 'from' block was totally matched
1551 (such that its predecessors will hopefully be redirected and the
1552 block removed). */
1553 if ((nmatch < PARAM_VALUE (PARAM_MIN_CROSSJUMP_INSNS))
1554 && (newpos1 != BB_HEAD (src1)))
1555 return false;
1557 #ifndef CASE_DROPS_THROUGH
1558 /* Here we know that the insns in the end of SRC1 which are common with SRC2
1559 will be deleted.
1560 If we have tablejumps in the end of SRC1 and SRC2
1561 they have been already compared for equivalence in outgoing_edges_match ()
1562 so replace the references to TABLE1 by references to TABLE2. */
1564 rtx label1, label2;
1565 rtx table1, table2;
1567 if (tablejump_p (BB_END (src1), &label1, &table1)
1568 && tablejump_p (BB_END (src2), &label2, &table2)
1569 && label1 != label2)
1571 replace_label_data rr;
1572 rtx insn;
1574 /* Replace references to LABEL1 with LABEL2. */
1575 rr.r1 = label1;
1576 rr.r2 = label2;
1577 rr.update_label_nuses = true;
1578 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1580 /* Do not replace the label in SRC1->END because when deleting
1581 a block whose end is a tablejump, the tablejump referenced
1582 from the instruction is deleted too. */
1583 if (insn != BB_END (src1))
1584 for_each_rtx (&insn, replace_label, &rr);
1588 #endif
1590 /* Avoid splitting if possible. */
1591 if (newpos2 == BB_HEAD (src2))
1592 redirect_to = src2;
1593 else
1595 if (dump_file)
1596 fprintf (dump_file, "Splitting bb %i before %i insns\n",
1597 src2->index, nmatch);
1598 redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
1601 if (dump_file)
1602 fprintf (dump_file,
1603 "Cross jumping from bb %i to bb %i; %i common insns\n",
1604 src1->index, src2->index, nmatch);
1606 redirect_to->count += src1->count;
1607 redirect_to->frequency += src1->frequency;
1608 /* We may have some registers visible trought the block. */
1609 redirect_to->flags |= BB_DIRTY;
1611 /* Recompute the frequencies and counts of outgoing edges. */
1612 FOR_EACH_EDGE (s, ei, redirect_to->succs)
1614 edge s2;
1615 edge_iterator ei;
1616 basic_block d = s->dest;
1618 if (FORWARDER_BLOCK_P (d))
1619 d = EDGE_SUCC (d, 0)->dest;
1621 FOR_EACH_EDGE (s2, ei, src1->succs)
1623 basic_block d2 = s2->dest;
1624 if (FORWARDER_BLOCK_P (d2))
1625 d2 = EDGE_SUCC (d2, 0)->dest;
1626 if (d == d2)
1627 break;
1630 s->count += s2->count;
1632 /* Take care to update possible forwarder blocks. We verified
1633 that there is no more than one in the chain, so we can't run
1634 into infinite loop. */
1635 if (FORWARDER_BLOCK_P (s->dest))
1637 EDGE_SUCC (s->dest, 0)->count += s2->count;
1638 s->dest->count += s2->count;
1639 s->dest->frequency += EDGE_FREQUENCY (s);
1642 if (FORWARDER_BLOCK_P (s2->dest))
1644 EDGE_SUCC (s2->dest, 0)->count -= s2->count;
1645 if (EDGE_SUCC (s2->dest, 0)->count < 0)
1646 EDGE_SUCC (s2->dest, 0)->count = 0;
1647 s2->dest->count -= s2->count;
1648 s2->dest->frequency -= EDGE_FREQUENCY (s);
1649 if (s2->dest->frequency < 0)
1650 s2->dest->frequency = 0;
1651 if (s2->dest->count < 0)
1652 s2->dest->count = 0;
1655 if (!redirect_to->frequency && !src1->frequency)
1656 s->probability = (s->probability + s2->probability) / 2;
1657 else
1658 s->probability
1659 = ((s->probability * redirect_to->frequency +
1660 s2->probability * src1->frequency)
1661 / (redirect_to->frequency + src1->frequency));
1664 update_br_prob_note (redirect_to);
1666 /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */
1668 /* Skip possible basic block header. */
1669 if (LABEL_P (newpos1))
1670 newpos1 = NEXT_INSN (newpos1);
1672 if (NOTE_P (newpos1))
1673 newpos1 = NEXT_INSN (newpos1);
1675 redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
1676 to_remove = EDGE_SUCC (redirect_from, 0)->dest;
1678 redirect_edge_and_branch_force (EDGE_SUCC (redirect_from, 0), redirect_to);
1679 delete_basic_block (to_remove);
1681 update_forwarder_flag (redirect_from);
1683 return true;
1686 /* Search the predecessors of BB for common insn sequences. When found,
1687 share code between them by redirecting control flow. Return true if
1688 any changes made. */
1690 static bool
1691 try_crossjump_bb (int mode, basic_block bb)
1693 edge e, e2, fallthru;
1694 bool changed;
1695 unsigned max, ix, ix2;
1696 basic_block ev, ev2;
1697 edge_iterator ei;
1699 /* Nothing to do if there is not at least two incoming edges. */
1700 if (EDGE_COUNT (bb->preds) < 2)
1701 return false;
1703 /* If we are partitioning hot/cold basic blocks, we don't want to
1704 mess up unconditional or indirect jumps that cross between hot
1705 and cold sections.
1707 Basic block partitioning may result in some jumps that appear to
1708 be optimizable (or blocks that appear to be mergeable), but which really
1709 must be left untouched (they are required to make it safely across
1710 partition boundaries). See the comments at the top of
1711 bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
1713 if (flag_reorder_blocks_and_partition
1714 && (BB_PARTITION (EDGE_PRED (bb, 0)->src) != BB_PARTITION (EDGE_PRED (bb, 1)->src)
1715 || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING)))
1716 return false;
1718 /* It is always cheapest to redirect a block that ends in a branch to
1719 a block that falls through into BB, as that adds no branches to the
1720 program. We'll try that combination first. */
1721 fallthru = NULL;
1722 max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
1724 if (EDGE_COUNT (bb->preds) > max)
1725 return false;
1727 FOR_EACH_EDGE (e, ei, bb->preds)
1729 if (e->flags & EDGE_FALLTHRU)
1730 fallthru = e;
1733 changed = false;
1734 for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
1736 e = EDGE_PRED (ev, ix);
1737 ix++;
1739 /* As noted above, first try with the fallthru predecessor. */
1740 if (fallthru)
1742 /* Don't combine the fallthru edge into anything else.
1743 If there is a match, we'll do it the other way around. */
1744 if (e == fallthru)
1745 continue;
1746 /* If nothing changed since the last attempt, there is nothing
1747 we can do. */
1748 if (!first_pass
1749 && (!(e->src->flags & BB_DIRTY)
1750 && !(fallthru->src->flags & BB_DIRTY)))
1751 continue;
1753 if (try_crossjump_to_edge (mode, e, fallthru))
1755 changed = true;
1756 ix = 0;
1757 ev = bb;
1758 continue;
1762 /* Non-obvious work limiting check: Recognize that we're going
1763 to call try_crossjump_bb on every basic block. So if we have
1764 two blocks with lots of outgoing edges (a switch) and they
1765 share lots of common destinations, then we would do the
1766 cross-jump check once for each common destination.
1768 Now, if the blocks actually are cross-jump candidates, then
1769 all of their destinations will be shared. Which means that
1770 we only need check them for cross-jump candidacy once. We
1771 can eliminate redundant checks of crossjump(A,B) by arbitrarily
1772 choosing to do the check from the block for which the edge
1773 in question is the first successor of A. */
1774 if (EDGE_SUCC (e->src, 0) != e)
1775 continue;
1777 for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
1779 e2 = EDGE_PRED (ev2, ix2);
1780 ix2++;
1782 if (e2 == e)
1783 continue;
1785 /* We've already checked the fallthru edge above. */
1786 if (e2 == fallthru)
1787 continue;
1789 /* The "first successor" check above only prevents multiple
1790 checks of crossjump(A,B). In order to prevent redundant
1791 checks of crossjump(B,A), require that A be the block
1792 with the lowest index. */
1793 if (e->src->index > e2->src->index)
1794 continue;
1796 /* If nothing changed since the last attempt, there is nothing
1797 we can do. */
1798 if (!first_pass
1799 && (!(e->src->flags & BB_DIRTY)
1800 && !(e2->src->flags & BB_DIRTY)))
1801 continue;
1803 if (try_crossjump_to_edge (mode, e, e2))
1805 changed = true;
1806 ev2 = bb;
1807 ix = 0;
1808 break;
1813 return changed;
1816 /* Do simple CFG optimizations - basic block merging, simplifying of jump
1817 instructions etc. Return nonzero if changes were made. */
1819 static bool
1820 try_optimize_cfg (int mode)
1822 bool changed_overall = false;
1823 bool changed;
1824 int iterations = 0;
1825 basic_block bb, b, next;
1827 if (mode & CLEANUP_CROSSJUMP)
1828 add_noreturn_fake_exit_edges ();
1830 FOR_EACH_BB (bb)
1831 update_forwarder_flag (bb);
1833 if (mode & (CLEANUP_UPDATE_LIFE | CLEANUP_CROSSJUMP | CLEANUP_THREADING))
1834 clear_bb_flags ();
1836 if (! targetm.cannot_modify_jumps_p ())
1838 first_pass = true;
1839 /* Attempt to merge blocks as made possible by edge removal. If
1840 a block has only one successor, and the successor has only
1841 one predecessor, they may be combined. */
1844 changed = false;
1845 iterations++;
1847 if (dump_file)
1848 fprintf (dump_file,
1849 "\n\ntry_optimize_cfg iteration %i\n\n",
1850 iterations);
1852 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
1854 basic_block c;
1855 edge s;
1856 bool changed_here = false;
1858 /* Delete trivially dead basic blocks. */
1859 while (EDGE_COUNT (b->preds) == 0)
1861 c = b->prev_bb;
1862 if (dump_file)
1863 fprintf (dump_file, "Deleting block %i.\n",
1864 b->index);
1866 delete_basic_block (b);
1867 if (!(mode & CLEANUP_CFGLAYOUT))
1868 changed = true;
1869 b = c;
1872 /* Remove code labels no longer used. */
1873 if (EDGE_COUNT (b->preds) == 1
1874 && (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU)
1875 && !(EDGE_PRED (b, 0)->flags & EDGE_COMPLEX)
1876 && LABEL_P (BB_HEAD (b))
1877 /* If the previous block ends with a branch to this
1878 block, we can't delete the label. Normally this
1879 is a condjump that is yet to be simplified, but
1880 if CASE_DROPS_THRU, this can be a tablejump with
1881 some element going to the same place as the
1882 default (fallthru). */
1883 && (EDGE_PRED (b, 0)->src == ENTRY_BLOCK_PTR
1884 || !JUMP_P (BB_END (EDGE_PRED (b, 0)->src))
1885 || ! label_is_jump_target_p (BB_HEAD (b),
1886 BB_END (EDGE_PRED (b, 0)->src))))
1888 rtx label = BB_HEAD (b);
1890 delete_insn_chain (label, label);
1891 /* In the case label is undeletable, move it after the
1892 BASIC_BLOCK note. */
1893 if (NOTE_LINE_NUMBER (BB_HEAD (b)) == NOTE_INSN_DELETED_LABEL)
1895 rtx bb_note = NEXT_INSN (BB_HEAD (b));
1897 reorder_insns_nobb (label, label, bb_note);
1898 BB_HEAD (b) = bb_note;
1900 if (dump_file)
1901 fprintf (dump_file, "Deleted label in block %i.\n",
1902 b->index);
1905 /* If we fall through an empty block, we can remove it. */
1906 if (!(mode & CLEANUP_CFGLAYOUT)
1907 && EDGE_COUNT (b->preds) == 1
1908 && (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU)
1909 && !LABEL_P (BB_HEAD (b))
1910 && FORWARDER_BLOCK_P (b)
1911 /* Note that forwarder_block_p true ensures that
1912 there is a successor for this block. */
1913 && (EDGE_SUCC (b, 0)->flags & EDGE_FALLTHRU)
1914 && n_basic_blocks > 1)
1916 if (dump_file)
1917 fprintf (dump_file,
1918 "Deleting fallthru block %i.\n",
1919 b->index);
1921 c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
1922 redirect_edge_succ_nodup (EDGE_PRED (b, 0), EDGE_SUCC (b, 0)->dest);
1923 delete_basic_block (b);
1924 changed = true;
1925 b = c;
1928 if (EDGE_COUNT (b->succs) == 1
1929 && (s = EDGE_SUCC (b, 0))
1930 && !(s->flags & EDGE_COMPLEX)
1931 && (c = s->dest) != EXIT_BLOCK_PTR
1932 && EDGE_COUNT (c->preds) == 1
1933 && b != c)
1935 /* When not in cfg_layout mode use code aware of reordering
1936 INSN. This code possibly creates new basic blocks so it
1937 does not fit merge_blocks interface and is kept here in
1938 hope that it will become useless once more of compiler
1939 is transformed to use cfg_layout mode. */
1941 if ((mode & CLEANUP_CFGLAYOUT)
1942 && can_merge_blocks_p (b, c))
1944 merge_blocks (b, c);
1945 update_forwarder_flag (b);
1946 changed_here = true;
1948 else if (!(mode & CLEANUP_CFGLAYOUT)
1949 /* If the jump insn has side effects,
1950 we can't kill the edge. */
1951 && (!JUMP_P (BB_END (b))
1952 || (reload_completed
1953 ? simplejump_p (BB_END (b))
1954 : (onlyjump_p (BB_END (b))
1955 && !tablejump_p (BB_END (b),
1956 NULL, NULL))))
1957 && (next = merge_blocks_move (s, b, c, mode)))
1959 b = next;
1960 changed_here = true;
1964 /* Simplify branch over branch. */
1965 if ((mode & CLEANUP_EXPENSIVE)
1966 && !(mode & CLEANUP_CFGLAYOUT)
1967 && try_simplify_condjump (b))
1968 changed_here = true;
1970 /* If B has a single outgoing edge, but uses a
1971 non-trivial jump instruction without side-effects, we
1972 can either delete the jump entirely, or replace it
1973 with a simple unconditional jump. */
1974 if (EDGE_COUNT (b->succs) == 1
1975 && EDGE_SUCC (b, 0)->dest != EXIT_BLOCK_PTR
1976 && onlyjump_p (BB_END (b))
1977 && !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
1978 && try_redirect_by_replacing_jump (EDGE_SUCC (b, 0), EDGE_SUCC (b, 0)->dest,
1979 (mode & CLEANUP_CFGLAYOUT) != 0))
1981 update_forwarder_flag (b);
1982 changed_here = true;
1985 /* Simplify branch to branch. */
1986 if (try_forward_edges (mode, b))
1987 changed_here = true;
1989 /* Look for shared code between blocks. */
1990 if ((mode & CLEANUP_CROSSJUMP)
1991 && try_crossjump_bb (mode, b))
1992 changed_here = true;
1994 /* Don't get confused by the index shift caused by
1995 deleting blocks. */
1996 if (!changed_here)
1997 b = b->next_bb;
1998 else
1999 changed = true;
2002 if ((mode & CLEANUP_CROSSJUMP)
2003 && try_crossjump_bb (mode, EXIT_BLOCK_PTR))
2004 changed = true;
2006 #ifdef ENABLE_CHECKING
2007 if (changed)
2008 verify_flow_info ();
2009 #endif
2011 changed_overall |= changed;
2012 first_pass = false;
2014 while (changed);
2017 if (mode & CLEANUP_CROSSJUMP)
2018 remove_fake_exit_edges ();
2020 clear_aux_for_blocks ();
2022 return changed_overall;
2025 /* Delete all unreachable basic blocks. */
2027 bool
2028 delete_unreachable_blocks (void)
2030 bool changed = false;
2031 basic_block b, next_bb;
2033 find_unreachable_blocks ();
2035 /* Delete all unreachable basic blocks. */
2037 for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
2039 next_bb = b->next_bb;
2041 if (!(b->flags & BB_REACHABLE))
2043 delete_basic_block (b);
2044 changed = true;
2048 if (changed)
2049 tidy_fallthru_edges ();
2050 return changed;
2053 /* Merges sequential blocks if possible. */
2055 bool
2056 merge_seq_blocks (void)
2058 basic_block bb;
2059 bool changed = false;
2061 for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
2063 if (EDGE_COUNT (bb->succs) == 1
2064 && can_merge_blocks_p (bb, EDGE_SUCC (bb, 0)->dest))
2066 /* Merge the blocks and retry. */
2067 merge_blocks (bb, EDGE_SUCC (bb, 0)->dest);
2068 changed = true;
2069 continue;
2072 bb = bb->next_bb;
2075 return changed;
2078 /* Tidy the CFG by deleting unreachable code and whatnot. */
2080 bool
2081 cleanup_cfg (int mode)
2083 bool changed = false;
2085 timevar_push (TV_CLEANUP_CFG);
2086 if (delete_unreachable_blocks ())
2088 changed = true;
2089 /* We've possibly created trivially dead code. Cleanup it right
2090 now to introduce more opportunities for try_optimize_cfg. */
2091 if (!(mode & (CLEANUP_NO_INSN_DEL | CLEANUP_UPDATE_LIFE))
2092 && !reload_completed)
2093 delete_trivially_dead_insns (get_insns(), max_reg_num ());
2096 compact_blocks ();
2098 while (try_optimize_cfg (mode))
2100 delete_unreachable_blocks (), changed = true;
2101 if (mode & CLEANUP_UPDATE_LIFE)
2103 /* Cleaning up CFG introduces more opportunities for dead code
2104 removal that in turn may introduce more opportunities for
2105 cleaning up the CFG. */
2106 if (!update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
2107 PROP_DEATH_NOTES
2108 | PROP_SCAN_DEAD_CODE
2109 | PROP_KILL_DEAD_CODE
2110 | ((mode & CLEANUP_LOG_LINKS)
2111 ? PROP_LOG_LINKS : 0)))
2112 break;
2114 else if (!(mode & CLEANUP_NO_INSN_DEL)
2115 && (mode & CLEANUP_EXPENSIVE)
2116 && !reload_completed)
2118 if (!delete_trivially_dead_insns (get_insns(), max_reg_num ()))
2119 break;
2121 else
2122 break;
2123 delete_dead_jumptables ();
2126 /* Kill the data we won't maintain. */
2127 free_EXPR_LIST_list (&label_value_list);
2128 timevar_pop (TV_CLEANUP_CFG);
2130 return changed;