[RTL-ifcvt] PR rtl-optimization/68506: Fix emitting order of insns in IF-THEN-JOIN...
[official-gcc.git] / gcc / tree-cfgcleanup.c
blob9eee7bb8606cc79f279e18ebbde097a0991f896e
1 /* CFG cleanup for trees.
2 Copyright (C) 2001-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "diagnostic-core.h"
31 #include "fold-const.h"
32 #include "cfganal.h"
33 #include "cfgcleanup.h"
34 #include "tree-eh.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-manip.h"
39 #include "tree-dfa.h"
40 #include "tree-ssa.h"
41 #include "cfgloop.h"
42 #include "tree-scalar-evolution.h"
43 #include "gimple-match.h"
44 #include "gimple-fold.h"
47 /* The set of blocks in that at least one of the following changes happened:
48 -- the statement at the end of the block was changed
49 -- the block was newly created
50 -- the set of the predecessors of the block changed
51 -- the set of the successors of the block changed
52 ??? Maybe we could track these changes separately, since they determine
53 what cleanups it makes sense to try on the block. */
54 bitmap cfgcleanup_altered_bbs;
56 /* Remove any fallthru edge from EV. Return true if an edge was removed. */
58 static bool
59 remove_fallthru_edge (vec<edge, va_gc> *ev)
61 edge_iterator ei;
62 edge e;
64 FOR_EACH_EDGE (e, ei, ev)
65 if ((e->flags & EDGE_FALLTHRU) != 0)
67 if (e->flags & EDGE_COMPLEX)
68 e->flags &= ~EDGE_FALLTHRU;
69 else
70 remove_edge_and_dominated_blocks (e);
71 return true;
73 return false;
77 /* Disconnect an unreachable block in the control expression starting
78 at block BB. */
80 static bool
81 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
83 edge taken_edge;
84 bool retval = false;
85 gimple *stmt = gsi_stmt (gsi);
87 if (!single_succ_p (bb))
89 edge e;
90 edge_iterator ei;
91 bool warned;
92 tree val = NULL_TREE;
94 fold_defer_overflow_warnings ();
95 switch (gimple_code (stmt))
97 case GIMPLE_COND:
99 code_helper rcode;
100 tree ops[3] = {};
101 if (gimple_simplify (stmt, &rcode, ops, NULL, no_follow_ssa_edges,
102 no_follow_ssa_edges)
103 && rcode == INTEGER_CST)
104 val = ops[0];
105 break;
108 case GIMPLE_SWITCH:
109 val = gimple_switch_index (as_a <gswitch *> (stmt));
110 break;
112 default:
115 taken_edge = find_taken_edge (bb, val);
116 if (!taken_edge)
118 fold_undefer_and_ignore_overflow_warnings ();
119 return false;
122 /* Remove all the edges except the one that is always executed. */
123 warned = false;
124 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
126 if (e != taken_edge)
128 if (!warned)
130 fold_undefer_overflow_warnings
131 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
132 warned = true;
135 taken_edge->probability += e->probability;
136 taken_edge->count += e->count;
137 remove_edge_and_dominated_blocks (e);
138 retval = true;
140 else
141 ei_next (&ei);
143 if (!warned)
144 fold_undefer_and_ignore_overflow_warnings ();
145 if (taken_edge->probability > REG_BR_PROB_BASE)
146 taken_edge->probability = REG_BR_PROB_BASE;
148 else
149 taken_edge = single_succ_edge (bb);
151 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
152 gsi_remove (&gsi, true);
153 taken_edge->flags = EDGE_FALLTHRU;
155 return retval;
158 /* Cleanup the GF_CALL_CTRL_ALTERING flag according to
159 to updated gimple_call_flags. */
161 static void
162 cleanup_call_ctrl_altering_flag (gimple *bb_end)
164 if (!is_gimple_call (bb_end)
165 || !gimple_call_ctrl_altering_p (bb_end))
166 return;
168 int flags = gimple_call_flags (bb_end);
169 if (((flags & (ECF_CONST | ECF_PURE))
170 && !(flags & ECF_LOOPING_CONST_OR_PURE))
171 || (flags & ECF_LEAF))
172 gimple_call_set_ctrl_altering (bb_end, false);
175 /* Try to remove superfluous control structures in basic block BB. Returns
176 true if anything changes. */
178 static bool
179 cleanup_control_flow_bb (basic_block bb)
181 gimple_stmt_iterator gsi;
182 bool retval = false;
183 gimple *stmt;
185 /* If the last statement of the block could throw and now cannot,
186 we need to prune cfg. */
187 retval |= gimple_purge_dead_eh_edges (bb);
189 gsi = gsi_last_bb (bb);
190 if (gsi_end_p (gsi))
191 return retval;
193 stmt = gsi_stmt (gsi);
195 /* Try to cleanup ctrl altering flag for call which ends bb. */
196 cleanup_call_ctrl_altering_flag (stmt);
198 if (gimple_code (stmt) == GIMPLE_COND
199 || gimple_code (stmt) == GIMPLE_SWITCH)
200 retval |= cleanup_control_expr_graph (bb, gsi);
201 else if (gimple_code (stmt) == GIMPLE_GOTO
202 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
203 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
204 == LABEL_DECL))
206 /* If we had a computed goto which has a compile-time determinable
207 destination, then we can eliminate the goto. */
208 edge e;
209 tree label;
210 edge_iterator ei;
211 basic_block target_block;
213 /* First look at all the outgoing edges. Delete any outgoing
214 edges which do not go to the right block. For the one
215 edge which goes to the right block, fix up its flags. */
216 label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
217 target_block = label_to_block (label);
218 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
220 if (e->dest != target_block)
221 remove_edge_and_dominated_blocks (e);
222 else
224 /* Turn off the EDGE_ABNORMAL flag. */
225 e->flags &= ~EDGE_ABNORMAL;
227 /* And set EDGE_FALLTHRU. */
228 e->flags |= EDGE_FALLTHRU;
229 ei_next (&ei);
233 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
234 bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
236 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
237 relevant information we need. */
238 gsi_remove (&gsi, true);
239 retval = true;
242 /* Check for indirect calls that have been turned into
243 noreturn calls. */
244 else if (is_gimple_call (stmt)
245 && gimple_call_noreturn_p (stmt)
246 && remove_fallthru_edge (bb->succs))
247 retval = true;
249 return retval;
252 /* Return true if basic block BB does nothing except pass control
253 flow to another block and that we can safely insert a label at
254 the start of the successor block.
256 As a precondition, we require that BB be not equal to
257 the entry block. */
259 static bool
260 tree_forwarder_block_p (basic_block bb, bool phi_wanted)
262 gimple_stmt_iterator gsi;
263 location_t locus;
265 /* BB must have a single outgoing edge. */
266 if (single_succ_p (bb) != 1
267 /* If PHI_WANTED is false, BB must not have any PHI nodes.
268 Otherwise, BB must have PHI nodes. */
269 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
270 /* BB may not be a predecessor of the exit block. */
271 || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
272 /* Nor should this be an infinite loop. */
273 || single_succ (bb) == bb
274 /* BB may not have an abnormal outgoing edge. */
275 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
276 return false;
278 gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
280 locus = single_succ_edge (bb)->goto_locus;
282 /* There should not be an edge coming from entry, or an EH edge. */
284 edge_iterator ei;
285 edge e;
287 FOR_EACH_EDGE (e, ei, bb->preds)
288 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
289 return false;
290 /* If goto_locus of any of the edges differs, prevent removing
291 the forwarder block for -O0. */
292 else if (optimize == 0 && e->goto_locus != locus)
293 return false;
296 /* Now walk through the statements backward. We can ignore labels,
297 anything else means this is not a forwarder block. */
298 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
300 gimple *stmt = gsi_stmt (gsi);
302 switch (gimple_code (stmt))
304 case GIMPLE_LABEL:
305 if (DECL_NONLOCAL (gimple_label_label (as_a <glabel *> (stmt))))
306 return false;
307 if (optimize == 0 && gimple_location (stmt) != locus)
308 return false;
309 break;
311 /* ??? For now, hope there's a corresponding debug
312 assignment at the destination. */
313 case GIMPLE_DEBUG:
314 break;
316 default:
317 return false;
321 if (current_loops)
323 basic_block dest;
324 /* Protect loop headers. */
325 if (bb->loop_father->header == bb)
326 return false;
328 dest = EDGE_SUCC (bb, 0)->dest;
329 /* Protect loop preheaders and latches if requested. */
330 if (dest->loop_father->header == dest)
332 if (bb->loop_father == dest->loop_father)
334 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
335 return false;
336 /* If bb doesn't have a single predecessor we'd make this
337 loop have multiple latches. Don't do that if that
338 would in turn require disambiguating them. */
339 return (single_pred_p (bb)
340 || loops_state_satisfies_p
341 (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
343 else if (bb->loop_father == loop_outer (dest->loop_father))
344 return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS);
345 /* Always preserve other edges into loop headers that are
346 not simple latches or preheaders. */
347 return false;
351 return true;
354 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
355 those alternatives are equal in each of the PHI nodes, then return
356 true, else return false. */
358 static bool
359 phi_alternatives_equal (basic_block dest, edge e1, edge e2)
361 int n1 = e1->dest_idx;
362 int n2 = e2->dest_idx;
363 gphi_iterator gsi;
365 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
367 gphi *phi = gsi.phi ();
368 tree val1 = gimple_phi_arg_def (phi, n1);
369 tree val2 = gimple_phi_arg_def (phi, n2);
371 gcc_assert (val1 != NULL_TREE);
372 gcc_assert (val2 != NULL_TREE);
374 if (!operand_equal_for_phi_arg_p (val1, val2))
375 return false;
378 return true;
381 /* Removes forwarder block BB. Returns false if this failed. */
383 static bool
384 remove_forwarder_block (basic_block bb)
386 edge succ = single_succ_edge (bb), e, s;
387 basic_block dest = succ->dest;
388 gimple *label;
389 edge_iterator ei;
390 gimple_stmt_iterator gsi, gsi_to;
391 bool can_move_debug_stmts;
393 /* We check for infinite loops already in tree_forwarder_block_p.
394 However it may happen that the infinite loop is created
395 afterwards due to removal of forwarders. */
396 if (dest == bb)
397 return false;
399 /* If the destination block consists of a nonlocal label or is a
400 EH landing pad, do not merge it. */
401 label = first_stmt (dest);
402 if (label)
403 if (glabel *label_stmt = dyn_cast <glabel *> (label))
404 if (DECL_NONLOCAL (gimple_label_label (label_stmt))
405 || EH_LANDING_PAD_NR (gimple_label_label (label_stmt)) != 0)
406 return false;
408 /* If there is an abnormal edge to basic block BB, but not into
409 dest, problems might occur during removal of the phi node at out
410 of ssa due to overlapping live ranges of registers.
412 If there is an abnormal edge in DEST, the problems would occur
413 anyway since cleanup_dead_labels would then merge the labels for
414 two different eh regions, and rest of exception handling code
415 does not like it.
417 So if there is an abnormal edge to BB, proceed only if there is
418 no abnormal edge to DEST and there are no phi nodes in DEST. */
419 if (bb_has_abnormal_pred (bb)
420 && (bb_has_abnormal_pred (dest)
421 || !gimple_seq_empty_p (phi_nodes (dest))))
422 return false;
424 /* If there are phi nodes in DEST, and some of the blocks that are
425 predecessors of BB are also predecessors of DEST, check that the
426 phi node arguments match. */
427 if (!gimple_seq_empty_p (phi_nodes (dest)))
429 FOR_EACH_EDGE (e, ei, bb->preds)
431 s = find_edge (e->src, dest);
432 if (!s)
433 continue;
435 if (!phi_alternatives_equal (dest, succ, s))
436 return false;
440 can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest);
442 basic_block pred = NULL;
443 if (single_pred_p (bb))
444 pred = single_pred (bb);
446 /* Redirect the edges. */
447 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
449 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
451 if (e->flags & EDGE_ABNORMAL)
453 /* If there is an abnormal edge, redirect it anyway, and
454 move the labels to the new block to make it legal. */
455 s = redirect_edge_succ_nodup (e, dest);
457 else
458 s = redirect_edge_and_branch (e, dest);
460 if (s == e)
462 /* Create arguments for the phi nodes, since the edge was not
463 here before. */
464 for (gphi_iterator psi = gsi_start_phis (dest);
465 !gsi_end_p (psi);
466 gsi_next (&psi))
468 gphi *phi = psi.phi ();
469 source_location l = gimple_phi_arg_location_from_edge (phi, succ);
470 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
471 add_phi_arg (phi, unshare_expr (def), s, l);
476 /* Move nonlocal labels and computed goto targets as well as user
477 defined labels and labels with an EH landing pad number to the
478 new block, so that the redirection of the abnormal edges works,
479 jump targets end up in a sane place and debug information for
480 labels is retained. */
481 gsi_to = gsi_start_bb (dest);
482 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
484 tree decl;
485 label = gsi_stmt (gsi);
486 if (is_gimple_debug (label))
487 break;
488 decl = gimple_label_label (as_a <glabel *> (label));
489 if (EH_LANDING_PAD_NR (decl) != 0
490 || DECL_NONLOCAL (decl)
491 || FORCED_LABEL (decl)
492 || !DECL_ARTIFICIAL (decl))
494 gsi_remove (&gsi, false);
495 gsi_insert_before (&gsi_to, label, GSI_SAME_STMT);
497 else
498 gsi_next (&gsi);
501 /* Move debug statements if the destination has a single predecessor. */
502 if (can_move_debug_stmts)
504 gsi_to = gsi_after_labels (dest);
505 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
507 gimple *debug = gsi_stmt (gsi);
508 if (!is_gimple_debug (debug))
509 break;
510 gsi_remove (&gsi, false);
511 gsi_insert_before (&gsi_to, debug, GSI_SAME_STMT);
515 bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
517 /* Update the dominators. */
518 if (dom_info_available_p (CDI_DOMINATORS))
520 basic_block dom, dombb, domdest;
522 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
523 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
524 if (domdest == bb)
526 /* Shortcut to avoid calling (relatively expensive)
527 nearest_common_dominator unless necessary. */
528 dom = dombb;
530 else
531 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
533 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
536 /* Adjust latch infomation of BB's parent loop as otherwise
537 the cfg hook has a hard time not to kill the loop. */
538 if (current_loops && bb->loop_father->latch == bb)
539 bb->loop_father->latch = pred;
541 /* And kill the forwarder block. */
542 delete_basic_block (bb);
544 return true;
547 /* STMT is a call that has been discovered noreturn. Split the
548 block to prepare fixing up the CFG and remove LHS.
549 Return true if cleanup-cfg needs to run. */
551 bool
552 fixup_noreturn_call (gimple *stmt)
554 basic_block bb = gimple_bb (stmt);
555 bool changed = false;
557 if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
558 return false;
560 /* First split basic block if stmt is not last. */
561 if (stmt != gsi_stmt (gsi_last_bb (bb)))
563 if (stmt == gsi_stmt (gsi_last_nondebug_bb (bb)))
565 /* Don't split if there are only debug stmts
566 after stmt, that can result in -fcompare-debug
567 failures. Remove the debug stmts instead,
568 they should be all unreachable anyway. */
569 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
570 for (gsi_next (&gsi); !gsi_end_p (gsi); )
571 gsi_remove (&gsi, true);
573 else
575 split_block (bb, stmt);
576 changed = true;
580 /* If there is an LHS, remove it, but only if its type has fixed size.
581 The LHS will need to be recreated during RTL expansion and creating
582 temporaries of variable-sized types is not supported. */
583 tree lhs = gimple_call_lhs (stmt);
584 if (lhs && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (lhs))) == INTEGER_CST)
586 gimple_call_set_lhs (stmt, NULL_TREE);
588 /* We need to fix up the SSA name to avoid checking errors. */
589 if (TREE_CODE (lhs) == SSA_NAME)
591 tree new_var = create_tmp_reg (TREE_TYPE (lhs));
592 SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs, new_var);
593 SSA_NAME_DEF_STMT (lhs) = gimple_build_nop ();
594 set_ssa_default_def (cfun, new_var, lhs);
597 update_stmt (stmt);
600 /* Mark the call as altering control flow. */
601 if (!gimple_call_ctrl_altering_p (stmt))
603 gimple_call_set_ctrl_altering (stmt, true);
604 changed = true;
607 return changed;
611 /* Tries to cleanup cfg in basic block BB. Returns true if anything
612 changes. */
614 static bool
615 cleanup_tree_cfg_bb (basic_block bb)
617 bool retval = cleanup_control_flow_bb (bb);
619 if (tree_forwarder_block_p (bb, false)
620 && remove_forwarder_block (bb))
621 return true;
623 /* Merging the blocks may create new opportunities for folding
624 conditional branches (due to the elimination of single-valued PHI
625 nodes). */
626 if (single_succ_p (bb)
627 && can_merge_blocks_p (bb, single_succ (bb)))
629 /* If there is a merge opportunity with the predecessor
630 do nothing now but wait until we process the predecessor.
631 This happens when we visit BBs in a non-optimal order and
632 avoids quadratic behavior with adjusting stmts BB pointer. */
633 if (single_pred_p (bb)
634 && can_merge_blocks_p (single_pred (bb), bb))
636 else
638 merge_blocks (bb, single_succ (bb));
639 return true;
643 return retval;
646 /* Iterate the cfg cleanups, while anything changes. */
648 static bool
649 cleanup_tree_cfg_1 (void)
651 bool retval = false;
652 basic_block bb;
653 unsigned i, n;
655 /* Prepare the worklists of altered blocks. */
656 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
658 /* During forwarder block cleanup, we may redirect edges out of
659 SWITCH_EXPRs, which can get expensive. So we want to enable
660 recording of edge to CASE_LABEL_EXPR. */
661 start_recording_case_labels ();
663 /* Start by iterating over all basic blocks. We cannot use FOR_EACH_BB_FN,
664 since the basic blocks may get removed. */
665 n = last_basic_block_for_fn (cfun);
666 for (i = NUM_FIXED_BLOCKS; i < n; i++)
668 bb = BASIC_BLOCK_FOR_FN (cfun, i);
669 if (bb)
670 retval |= cleanup_tree_cfg_bb (bb);
673 /* Now process the altered blocks, as long as any are available. */
674 while (!bitmap_empty_p (cfgcleanup_altered_bbs))
676 i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
677 bitmap_clear_bit (cfgcleanup_altered_bbs, i);
678 if (i < NUM_FIXED_BLOCKS)
679 continue;
681 bb = BASIC_BLOCK_FOR_FN (cfun, i);
682 if (!bb)
683 continue;
685 retval |= cleanup_tree_cfg_bb (bb);
688 end_recording_case_labels ();
689 BITMAP_FREE (cfgcleanup_altered_bbs);
690 return retval;
694 /* Remove unreachable blocks and other miscellaneous clean up work.
695 Return true if the flowgraph was modified, false otherwise. */
697 static bool
698 cleanup_tree_cfg_noloop (void)
700 bool changed;
702 timevar_push (TV_TREE_CLEANUP_CFG);
704 /* Iterate until there are no more cleanups left to do. If any
705 iteration changed the flowgraph, set CHANGED to true.
707 If dominance information is available, there cannot be any unreachable
708 blocks. */
709 if (!dom_info_available_p (CDI_DOMINATORS))
711 changed = delete_unreachable_blocks ();
712 calculate_dominance_info (CDI_DOMINATORS);
714 else
716 checking_verify_dominators (CDI_DOMINATORS);
717 changed = false;
720 changed |= cleanup_tree_cfg_1 ();
722 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
723 compact_blocks ();
725 checking_verify_flow_info ();
727 timevar_pop (TV_TREE_CLEANUP_CFG);
729 if (changed && current_loops)
730 loops_state_set (LOOPS_NEED_FIXUP);
732 return changed;
735 /* Repairs loop structures. */
737 static void
738 repair_loop_structures (void)
740 bitmap changed_bbs;
741 unsigned n_new_loops;
743 calculate_dominance_info (CDI_DOMINATORS);
745 timevar_push (TV_REPAIR_LOOPS);
746 changed_bbs = BITMAP_ALLOC (NULL);
747 n_new_loops = fix_loop_structure (changed_bbs);
749 /* This usually does nothing. But sometimes parts of cfg that originally
750 were inside a loop get out of it due to edge removal (since they
751 become unreachable by back edges from latch). Also a former
752 irreducible loop can become reducible - in this case force a full
753 rewrite into loop-closed SSA form. */
754 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
755 rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
756 TODO_update_ssa);
758 BITMAP_FREE (changed_bbs);
760 checking_verify_loop_structure ();
761 scev_reset ();
763 timevar_pop (TV_REPAIR_LOOPS);
766 /* Cleanup cfg and repair loop structures. */
768 bool
769 cleanup_tree_cfg (void)
771 bool changed = cleanup_tree_cfg_noloop ();
773 if (current_loops != NULL
774 && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
775 repair_loop_structures ();
777 return changed;
780 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
781 Returns true if successful. */
783 static bool
784 remove_forwarder_block_with_phi (basic_block bb)
786 edge succ = single_succ_edge (bb);
787 basic_block dest = succ->dest;
788 gimple *label;
789 basic_block dombb, domdest, dom;
791 /* We check for infinite loops already in tree_forwarder_block_p.
792 However it may happen that the infinite loop is created
793 afterwards due to removal of forwarders. */
794 if (dest == bb)
795 return false;
797 /* If the destination block consists of a nonlocal label, do not
798 merge it. */
799 label = first_stmt (dest);
800 if (label)
801 if (glabel *label_stmt = dyn_cast <glabel *> (label))
802 if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
803 return false;
805 /* Record BB's single pred in case we need to update the father
806 loop's latch information later. */
807 basic_block pred = NULL;
808 if (single_pred_p (bb))
809 pred = single_pred (bb);
811 /* Redirect each incoming edge to BB to DEST. */
812 while (EDGE_COUNT (bb->preds) > 0)
814 edge e = EDGE_PRED (bb, 0), s;
815 gphi_iterator gsi;
817 s = find_edge (e->src, dest);
818 if (s)
820 /* We already have an edge S from E->src to DEST. If S and
821 E->dest's sole successor edge have the same PHI arguments
822 at DEST, redirect S to DEST. */
823 if (phi_alternatives_equal (dest, s, succ))
825 e = redirect_edge_and_branch (e, dest);
826 redirect_edge_var_map_clear (e);
827 continue;
830 /* PHI arguments are different. Create a forwarder block by
831 splitting E so that we can merge PHI arguments on E to
832 DEST. */
833 e = single_succ_edge (split_edge (e));
836 s = redirect_edge_and_branch (e, dest);
838 /* redirect_edge_and_branch must not create a new edge. */
839 gcc_assert (s == e);
841 /* Add to the PHI nodes at DEST each PHI argument removed at the
842 destination of E. */
843 for (gsi = gsi_start_phis (dest);
844 !gsi_end_p (gsi);
845 gsi_next (&gsi))
847 gphi *phi = gsi.phi ();
848 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
849 source_location locus = gimple_phi_arg_location_from_edge (phi, succ);
851 if (TREE_CODE (def) == SSA_NAME)
853 /* If DEF is one of the results of PHI nodes removed during
854 redirection, replace it with the PHI argument that used
855 to be on E. */
856 vec<edge_var_map> *head = redirect_edge_var_map_vector (e);
857 size_t length = head ? head->length () : 0;
858 for (size_t i = 0; i < length; i++)
860 edge_var_map *vm = &(*head)[i];
861 tree old_arg = redirect_edge_var_map_result (vm);
862 tree new_arg = redirect_edge_var_map_def (vm);
864 if (def == old_arg)
866 def = new_arg;
867 locus = redirect_edge_var_map_location (vm);
868 break;
873 add_phi_arg (phi, def, s, locus);
876 redirect_edge_var_map_clear (e);
879 /* Update the dominators. */
880 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
881 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
882 if (domdest == bb)
884 /* Shortcut to avoid calling (relatively expensive)
885 nearest_common_dominator unless necessary. */
886 dom = dombb;
888 else
889 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
891 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
893 /* Adjust latch infomation of BB's parent loop as otherwise
894 the cfg hook has a hard time not to kill the loop. */
895 if (current_loops && bb->loop_father->latch == bb)
896 bb->loop_father->latch = pred;
898 /* Remove BB since all of BB's incoming edges have been redirected
899 to DEST. */
900 delete_basic_block (bb);
902 return true;
905 /* This pass merges PHI nodes if one feeds into another. For example,
906 suppose we have the following:
908 goto <bb 9> (<L9>);
910 <L8>:;
911 tem_17 = foo ();
913 # tem_6 = PHI <tem_17(8), tem_23(7)>;
914 <L9>:;
916 # tem_3 = PHI <tem_6(9), tem_2(5)>;
917 <L10>:;
919 Then we merge the first PHI node into the second one like so:
921 goto <bb 9> (<L10>);
923 <L8>:;
924 tem_17 = foo ();
926 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
927 <L10>:;
930 namespace {
932 const pass_data pass_data_merge_phi =
934 GIMPLE_PASS, /* type */
935 "mergephi", /* name */
936 OPTGROUP_NONE, /* optinfo_flags */
937 TV_TREE_MERGE_PHI, /* tv_id */
938 ( PROP_cfg | PROP_ssa ), /* properties_required */
939 0, /* properties_provided */
940 0, /* properties_destroyed */
941 0, /* todo_flags_start */
942 0, /* todo_flags_finish */
945 class pass_merge_phi : public gimple_opt_pass
947 public:
948 pass_merge_phi (gcc::context *ctxt)
949 : gimple_opt_pass (pass_data_merge_phi, ctxt)
952 /* opt_pass methods: */
953 opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
954 virtual unsigned int execute (function *);
956 }; // class pass_merge_phi
958 unsigned int
959 pass_merge_phi::execute (function *fun)
961 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun));
962 basic_block *current = worklist;
963 basic_block bb;
965 calculate_dominance_info (CDI_DOMINATORS);
967 /* Find all PHI nodes that we may be able to merge. */
968 FOR_EACH_BB_FN (bb, fun)
970 basic_block dest;
972 /* Look for a forwarder block with PHI nodes. */
973 if (!tree_forwarder_block_p (bb, true))
974 continue;
976 dest = single_succ (bb);
978 /* We have to feed into another basic block with PHI
979 nodes. */
980 if (gimple_seq_empty_p (phi_nodes (dest))
981 /* We don't want to deal with a basic block with
982 abnormal edges. */
983 || bb_has_abnormal_pred (bb))
984 continue;
986 if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
988 /* If BB does not dominate DEST, then the PHI nodes at
989 DEST must be the only users of the results of the PHI
990 nodes at BB. */
991 *current++ = bb;
993 else
995 gphi_iterator gsi;
996 unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
998 /* BB dominates DEST. There may be many users of the PHI
999 nodes in BB. However, there is still a trivial case we
1000 can handle. If the result of every PHI in BB is used
1001 only by a PHI in DEST, then we can trivially merge the
1002 PHI nodes from BB into DEST. */
1003 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
1004 gsi_next (&gsi))
1006 gphi *phi = gsi.phi ();
1007 tree result = gimple_phi_result (phi);
1008 use_operand_p imm_use;
1009 gimple *use_stmt;
1011 /* If the PHI's result is never used, then we can just
1012 ignore it. */
1013 if (has_zero_uses (result))
1014 continue;
1016 /* Get the single use of the result of this PHI node. */
1017 if (!single_imm_use (result, &imm_use, &use_stmt)
1018 || gimple_code (use_stmt) != GIMPLE_PHI
1019 || gimple_bb (use_stmt) != dest
1020 || gimple_phi_arg_def (use_stmt, dest_idx) != result)
1021 break;
1024 /* If the loop above iterated through all the PHI nodes
1025 in BB, then we can merge the PHIs from BB into DEST. */
1026 if (gsi_end_p (gsi))
1027 *current++ = bb;
1031 /* Now let's drain WORKLIST. */
1032 bool changed = false;
1033 while (current != worklist)
1035 bb = *--current;
1036 changed |= remove_forwarder_block_with_phi (bb);
1038 free (worklist);
1040 /* Removing forwarder blocks can cause formerly irreducible loops
1041 to become reducible if we merged two entry blocks. */
1042 if (changed
1043 && current_loops)
1044 loops_state_set (LOOPS_NEED_FIXUP);
1046 return 0;
1049 } // anon namespace
1051 gimple_opt_pass *
1052 make_pass_merge_phi (gcc::context *ctxt)
1054 return new pass_merge_phi (ctxt);
1057 /* Pass: cleanup the CFG just before expanding trees to RTL.
1058 This is just a round of label cleanups and case node grouping
1059 because after the tree optimizers have run such cleanups may
1060 be necessary. */
1062 static unsigned int
1063 execute_cleanup_cfg_post_optimizing (void)
1065 unsigned int todo = execute_fixup_cfg ();
1066 if (cleanup_tree_cfg ())
1068 todo &= ~TODO_cleanup_cfg;
1069 todo |= TODO_update_ssa;
1071 maybe_remove_unreachable_handlers ();
1072 cleanup_dead_labels ();
1073 group_case_labels ();
1074 if ((flag_compare_debug_opt || flag_compare_debug)
1075 && flag_dump_final_insns)
1077 FILE *final_output = fopen (flag_dump_final_insns, "a");
1079 if (!final_output)
1081 error ("could not open final insn dump file %qs: %m",
1082 flag_dump_final_insns);
1083 flag_dump_final_insns = NULL;
1085 else
1087 int save_unnumbered = flag_dump_unnumbered;
1088 int save_noaddr = flag_dump_noaddr;
1090 flag_dump_noaddr = flag_dump_unnumbered = 1;
1091 fprintf (final_output, "\n");
1092 dump_enumerated_decls (final_output, dump_flags | TDF_NOUID);
1093 flag_dump_noaddr = save_noaddr;
1094 flag_dump_unnumbered = save_unnumbered;
1095 if (fclose (final_output))
1097 error ("could not close final insn dump file %qs: %m",
1098 flag_dump_final_insns);
1099 flag_dump_final_insns = NULL;
1103 return todo;
1106 namespace {
1108 const pass_data pass_data_cleanup_cfg_post_optimizing =
1110 GIMPLE_PASS, /* type */
1111 "optimized", /* name */
1112 OPTGROUP_NONE, /* optinfo_flags */
1113 TV_TREE_CLEANUP_CFG, /* tv_id */
1114 PROP_cfg, /* properties_required */
1115 0, /* properties_provided */
1116 0, /* properties_destroyed */
1117 0, /* todo_flags_start */
1118 TODO_remove_unused_locals, /* todo_flags_finish */
1121 class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1123 public:
1124 pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1125 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1128 /* opt_pass methods: */
1129 virtual unsigned int execute (function *)
1131 return execute_cleanup_cfg_post_optimizing ();
1134 }; // class pass_cleanup_cfg_post_optimizing
1136 } // anon namespace
1138 gimple_opt_pass *
1139 make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1141 return new pass_cleanup_cfg_post_optimizing (ctxt);