* function.c (dump_stack_clash_frame_info): New function.
[official-gcc.git] / gcc / tree-cfgcleanup.c
bloba7053d748c66b22f905f29dac4b28adfd3cb7cfc
1 /* CFG cleanup for trees.
2 Copyright (C) 2001-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "diagnostic-core.h"
31 #include "fold-const.h"
32 #include "cfganal.h"
33 #include "cfgcleanup.h"
34 #include "tree-eh.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-manip.h"
39 #include "tree-dfa.h"
40 #include "tree-ssa.h"
41 #include "cfgloop.h"
42 #include "tree-scalar-evolution.h"
43 #include "gimple-match.h"
44 #include "gimple-fold.h"
45 #include "tree-ssa-loop-niter.h"
48 /* The set of blocks in that at least one of the following changes happened:
49 -- the statement at the end of the block was changed
50 -- the block was newly created
51 -- the set of the predecessors of the block changed
52 -- the set of the successors of the block changed
53 ??? Maybe we could track these changes separately, since they determine
54 what cleanups it makes sense to try on the block. */
55 bitmap cfgcleanup_altered_bbs;
57 /* Remove any fallthru edge from EV. Return true if an edge was removed. */
59 static bool
60 remove_fallthru_edge (vec<edge, va_gc> *ev)
62 edge_iterator ei;
63 edge e;
65 FOR_EACH_EDGE (e, ei, ev)
66 if ((e->flags & EDGE_FALLTHRU) != 0)
68 if (e->flags & EDGE_COMPLEX)
69 e->flags &= ~EDGE_FALLTHRU;
70 else
71 remove_edge_and_dominated_blocks (e);
72 return true;
74 return false;
77 /* Convert a SWTCH with single non-default case to gcond and replace it
78 at GSI. */
80 static bool
81 convert_single_case_switch (gswitch *swtch, gimple_stmt_iterator &gsi)
83 if (gimple_switch_num_labels (swtch) != 2)
84 return false;
86 tree index = gimple_switch_index (swtch);
87 tree default_label = CASE_LABEL (gimple_switch_default_label (swtch));
88 tree label = gimple_switch_label (swtch, 1);
89 tree low = CASE_LOW (label);
90 tree high = CASE_HIGH (label);
92 basic_block default_bb = label_to_block_fn (cfun, default_label);
93 basic_block case_bb = label_to_block_fn (cfun, CASE_LABEL (label));
95 basic_block bb = gimple_bb (swtch);
96 gcond *cond;
98 /* Replace switch statement with condition statement. */
99 if (high)
101 tree lhs, rhs;
102 generate_range_test (bb, index, low, high, &lhs, &rhs);
103 cond = gimple_build_cond (LE_EXPR, lhs, rhs, NULL_TREE, NULL_TREE);
105 else
106 cond = gimple_build_cond (EQ_EXPR, index,
107 fold_convert (TREE_TYPE (index), low),
108 NULL_TREE, NULL_TREE);
110 gsi_replace (&gsi, cond, true);
112 /* Update edges. */
113 edge case_edge = find_edge (bb, case_bb);
114 edge default_edge = find_edge (bb, default_bb);
116 case_edge->flags |= EDGE_TRUE_VALUE;
117 default_edge->flags |= EDGE_FALSE_VALUE;
118 return true;
121 /* Disconnect an unreachable block in the control expression starting
122 at block BB. */
124 static bool
125 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi,
126 bool first_p)
128 edge taken_edge;
129 bool retval = false;
130 gimple *stmt = gsi_stmt (gsi);
132 if (!single_succ_p (bb))
134 edge e;
135 edge_iterator ei;
136 bool warned;
137 tree val = NULL_TREE;
139 /* Try to convert a switch with just a single non-default case to
140 GIMPLE condition. */
141 if (gimple_code (stmt) == GIMPLE_SWITCH
142 && convert_single_case_switch (as_a<gswitch *> (stmt), gsi))
143 stmt = gsi_stmt (gsi);
145 fold_defer_overflow_warnings ();
146 switch (gimple_code (stmt))
148 case GIMPLE_COND:
149 /* During a first iteration on the CFG only remove trivially
150 dead edges but mark other conditions for re-evaluation. */
151 if (first_p)
153 val = const_binop (gimple_cond_code (stmt), boolean_type_node,
154 gimple_cond_lhs (stmt),
155 gimple_cond_rhs (stmt));
156 if (! val)
157 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
159 else
161 code_helper rcode;
162 tree ops[3] = {};
163 if (gimple_simplify (stmt, &rcode, ops, NULL, no_follow_ssa_edges,
164 no_follow_ssa_edges)
165 && rcode == INTEGER_CST)
166 val = ops[0];
168 break;
170 case GIMPLE_SWITCH:
171 val = gimple_switch_index (as_a <gswitch *> (stmt));
172 break;
174 default:
177 taken_edge = find_taken_edge (bb, val);
178 if (!taken_edge)
180 fold_undefer_and_ignore_overflow_warnings ();
181 return false;
184 /* Remove all the edges except the one that is always executed. */
185 warned = false;
186 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
188 if (e != taken_edge)
190 if (!warned)
192 fold_undefer_overflow_warnings
193 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
194 warned = true;
197 taken_edge->probability += e->probability;
198 taken_edge->count += e->count;
199 remove_edge_and_dominated_blocks (e);
200 retval = true;
202 else
203 ei_next (&ei);
205 if (!warned)
206 fold_undefer_and_ignore_overflow_warnings ();
208 else
209 taken_edge = single_succ_edge (bb);
211 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
212 gsi_remove (&gsi, true);
213 taken_edge->flags = EDGE_FALLTHRU;
215 return retval;
218 /* Cleanup the GF_CALL_CTRL_ALTERING flag according to
219 to updated gimple_call_flags. */
221 static void
222 cleanup_call_ctrl_altering_flag (gimple *bb_end)
224 if (!is_gimple_call (bb_end)
225 || !gimple_call_ctrl_altering_p (bb_end))
226 return;
228 int flags = gimple_call_flags (bb_end);
229 if (((flags & (ECF_CONST | ECF_PURE))
230 && !(flags & ECF_LOOPING_CONST_OR_PURE))
231 || (flags & ECF_LEAF))
232 gimple_call_set_ctrl_altering (bb_end, false);
235 /* Try to remove superfluous control structures in basic block BB. Returns
236 true if anything changes. */
238 static bool
239 cleanup_control_flow_bb (basic_block bb, bool first_p)
241 gimple_stmt_iterator gsi;
242 bool retval = false;
243 gimple *stmt;
245 /* If the last statement of the block could throw and now cannot,
246 we need to prune cfg. */
247 retval |= gimple_purge_dead_eh_edges (bb);
249 gsi = gsi_last_nondebug_bb (bb);
250 if (gsi_end_p (gsi))
251 return retval;
253 stmt = gsi_stmt (gsi);
255 /* Try to cleanup ctrl altering flag for call which ends bb. */
256 cleanup_call_ctrl_altering_flag (stmt);
258 if (gimple_code (stmt) == GIMPLE_COND
259 || gimple_code (stmt) == GIMPLE_SWITCH)
261 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
262 retval |= cleanup_control_expr_graph (bb, gsi, first_p);
264 else if (gimple_code (stmt) == GIMPLE_GOTO
265 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
266 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
267 == LABEL_DECL))
269 /* If we had a computed goto which has a compile-time determinable
270 destination, then we can eliminate the goto. */
271 edge e;
272 tree label;
273 edge_iterator ei;
274 basic_block target_block;
276 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
277 /* First look at all the outgoing edges. Delete any outgoing
278 edges which do not go to the right block. For the one
279 edge which goes to the right block, fix up its flags. */
280 label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
281 if (DECL_CONTEXT (label) != cfun->decl)
282 return retval;
283 target_block = label_to_block (label);
284 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
286 if (e->dest != target_block)
287 remove_edge_and_dominated_blocks (e);
288 else
290 /* Turn off the EDGE_ABNORMAL flag. */
291 e->flags &= ~EDGE_ABNORMAL;
293 /* And set EDGE_FALLTHRU. */
294 e->flags |= EDGE_FALLTHRU;
295 ei_next (&ei);
299 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
300 bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
302 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
303 relevant information we need. */
304 gsi_remove (&gsi, true);
305 retval = true;
308 /* Check for indirect calls that have been turned into
309 noreturn calls. */
310 else if (is_gimple_call (stmt)
311 && gimple_call_noreturn_p (stmt))
313 /* If there are debug stmts after the noreturn call, remove them
314 now, they should be all unreachable anyway. */
315 for (gsi_next (&gsi); !gsi_end_p (gsi); )
316 gsi_remove (&gsi, true);
317 if (remove_fallthru_edge (bb->succs))
318 retval = true;
321 return retval;
324 /* Return true if basic block BB does nothing except pass control
325 flow to another block and that we can safely insert a label at
326 the start of the successor block.
328 As a precondition, we require that BB be not equal to
329 the entry block. */
331 static bool
332 tree_forwarder_block_p (basic_block bb, bool phi_wanted)
334 gimple_stmt_iterator gsi;
335 location_t locus;
337 /* BB must have a single outgoing edge. */
338 if (single_succ_p (bb) != 1
339 /* If PHI_WANTED is false, BB must not have any PHI nodes.
340 Otherwise, BB must have PHI nodes. */
341 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
342 /* BB may not be a predecessor of the exit block. */
343 || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
344 /* Nor should this be an infinite loop. */
345 || single_succ (bb) == bb
346 /* BB may not have an abnormal outgoing edge. */
347 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
348 return false;
350 gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
352 locus = single_succ_edge (bb)->goto_locus;
354 /* There should not be an edge coming from entry, or an EH edge. */
356 edge_iterator ei;
357 edge e;
359 FOR_EACH_EDGE (e, ei, bb->preds)
360 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
361 return false;
362 /* If goto_locus of any of the edges differs, prevent removing
363 the forwarder block for -O0. */
364 else if (optimize == 0 && e->goto_locus != locus)
365 return false;
368 /* Now walk through the statements backward. We can ignore labels,
369 anything else means this is not a forwarder block. */
370 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
372 gimple *stmt = gsi_stmt (gsi);
374 switch (gimple_code (stmt))
376 case GIMPLE_LABEL:
377 if (DECL_NONLOCAL (gimple_label_label (as_a <glabel *> (stmt))))
378 return false;
379 if (optimize == 0 && gimple_location (stmt) != locus)
380 return false;
381 break;
383 /* ??? For now, hope there's a corresponding debug
384 assignment at the destination. */
385 case GIMPLE_DEBUG:
386 break;
388 default:
389 return false;
393 if (current_loops)
395 basic_block dest;
396 /* Protect loop headers. */
397 if (bb_loop_header_p (bb))
398 return false;
400 dest = EDGE_SUCC (bb, 0)->dest;
401 /* Protect loop preheaders and latches if requested. */
402 if (dest->loop_father->header == dest)
404 if (bb->loop_father == dest->loop_father)
406 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
407 return false;
408 /* If bb doesn't have a single predecessor we'd make this
409 loop have multiple latches. Don't do that if that
410 would in turn require disambiguating them. */
411 return (single_pred_p (bb)
412 || loops_state_satisfies_p
413 (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
415 else if (bb->loop_father == loop_outer (dest->loop_father))
416 return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS);
417 /* Always preserve other edges into loop headers that are
418 not simple latches or preheaders. */
419 return false;
423 return true;
426 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
427 those alternatives are equal in each of the PHI nodes, then return
428 true, else return false. */
430 static bool
431 phi_alternatives_equal (basic_block dest, edge e1, edge e2)
433 int n1 = e1->dest_idx;
434 int n2 = e2->dest_idx;
435 gphi_iterator gsi;
437 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
439 gphi *phi = gsi.phi ();
440 tree val1 = gimple_phi_arg_def (phi, n1);
441 tree val2 = gimple_phi_arg_def (phi, n2);
443 gcc_assert (val1 != NULL_TREE);
444 gcc_assert (val2 != NULL_TREE);
446 if (!operand_equal_for_phi_arg_p (val1, val2))
447 return false;
450 return true;
453 /* Removes forwarder block BB. Returns false if this failed. */
455 static bool
456 remove_forwarder_block (basic_block bb)
458 edge succ = single_succ_edge (bb), e, s;
459 basic_block dest = succ->dest;
460 gimple *label;
461 edge_iterator ei;
462 gimple_stmt_iterator gsi, gsi_to;
463 bool can_move_debug_stmts;
465 /* We check for infinite loops already in tree_forwarder_block_p.
466 However it may happen that the infinite loop is created
467 afterwards due to removal of forwarders. */
468 if (dest == bb)
469 return false;
471 /* If the destination block consists of a nonlocal label or is a
472 EH landing pad, do not merge it. */
473 label = first_stmt (dest);
474 if (label)
475 if (glabel *label_stmt = dyn_cast <glabel *> (label))
476 if (DECL_NONLOCAL (gimple_label_label (label_stmt))
477 || EH_LANDING_PAD_NR (gimple_label_label (label_stmt)) != 0)
478 return false;
480 /* If there is an abnormal edge to basic block BB, but not into
481 dest, problems might occur during removal of the phi node at out
482 of ssa due to overlapping live ranges of registers.
484 If there is an abnormal edge in DEST, the problems would occur
485 anyway since cleanup_dead_labels would then merge the labels for
486 two different eh regions, and rest of exception handling code
487 does not like it.
489 So if there is an abnormal edge to BB, proceed only if there is
490 no abnormal edge to DEST and there are no phi nodes in DEST. */
491 if (bb_has_abnormal_pred (bb)
492 && (bb_has_abnormal_pred (dest)
493 || !gimple_seq_empty_p (phi_nodes (dest))))
494 return false;
496 /* If there are phi nodes in DEST, and some of the blocks that are
497 predecessors of BB are also predecessors of DEST, check that the
498 phi node arguments match. */
499 if (!gimple_seq_empty_p (phi_nodes (dest)))
501 FOR_EACH_EDGE (e, ei, bb->preds)
503 s = find_edge (e->src, dest);
504 if (!s)
505 continue;
507 if (!phi_alternatives_equal (dest, succ, s))
508 return false;
512 can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest);
514 basic_block pred = NULL;
515 if (single_pred_p (bb))
516 pred = single_pred (bb);
518 /* Redirect the edges. */
519 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
521 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
523 if (e->flags & EDGE_ABNORMAL)
525 /* If there is an abnormal edge, redirect it anyway, and
526 move the labels to the new block to make it legal. */
527 s = redirect_edge_succ_nodup (e, dest);
529 else
530 s = redirect_edge_and_branch (e, dest);
532 if (s == e)
534 /* Create arguments for the phi nodes, since the edge was not
535 here before. */
536 for (gphi_iterator psi = gsi_start_phis (dest);
537 !gsi_end_p (psi);
538 gsi_next (&psi))
540 gphi *phi = psi.phi ();
541 source_location l = gimple_phi_arg_location_from_edge (phi, succ);
542 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
543 add_phi_arg (phi, unshare_expr (def), s, l);
548 /* Move nonlocal labels and computed goto targets as well as user
549 defined labels and labels with an EH landing pad number to the
550 new block, so that the redirection of the abnormal edges works,
551 jump targets end up in a sane place and debug information for
552 labels is retained. */
553 gsi_to = gsi_start_bb (dest);
554 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
556 tree decl;
557 label = gsi_stmt (gsi);
558 if (is_gimple_debug (label))
559 break;
560 decl = gimple_label_label (as_a <glabel *> (label));
561 if (EH_LANDING_PAD_NR (decl) != 0
562 || DECL_NONLOCAL (decl)
563 || FORCED_LABEL (decl)
564 || !DECL_ARTIFICIAL (decl))
566 gsi_remove (&gsi, false);
567 gsi_insert_before (&gsi_to, label, GSI_SAME_STMT);
569 else
570 gsi_next (&gsi);
573 /* Move debug statements if the destination has a single predecessor. */
574 if (can_move_debug_stmts)
576 gsi_to = gsi_after_labels (dest);
577 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
579 gimple *debug = gsi_stmt (gsi);
580 if (!is_gimple_debug (debug))
581 break;
582 gsi_remove (&gsi, false);
583 gsi_insert_before (&gsi_to, debug, GSI_SAME_STMT);
587 bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
589 /* Update the dominators. */
590 if (dom_info_available_p (CDI_DOMINATORS))
592 basic_block dom, dombb, domdest;
594 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
595 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
596 if (domdest == bb)
598 /* Shortcut to avoid calling (relatively expensive)
599 nearest_common_dominator unless necessary. */
600 dom = dombb;
602 else
603 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
605 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
608 /* Adjust latch infomation of BB's parent loop as otherwise
609 the cfg hook has a hard time not to kill the loop. */
610 if (current_loops && bb->loop_father->latch == bb)
611 bb->loop_father->latch = pred;
613 /* And kill the forwarder block. */
614 delete_basic_block (bb);
616 return true;
619 /* STMT is a call that has been discovered noreturn. Split the
620 block to prepare fixing up the CFG and remove LHS.
621 Return true if cleanup-cfg needs to run. */
623 bool
624 fixup_noreturn_call (gimple *stmt)
626 basic_block bb = gimple_bb (stmt);
627 bool changed = false;
629 if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
630 return false;
632 /* First split basic block if stmt is not last. */
633 if (stmt != gsi_stmt (gsi_last_bb (bb)))
635 if (stmt == gsi_stmt (gsi_last_nondebug_bb (bb)))
637 /* Don't split if there are only debug stmts
638 after stmt, that can result in -fcompare-debug
639 failures. Remove the debug stmts instead,
640 they should be all unreachable anyway. */
641 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
642 for (gsi_next (&gsi); !gsi_end_p (gsi); )
643 gsi_remove (&gsi, true);
645 else
647 split_block (bb, stmt);
648 changed = true;
652 /* If there is an LHS, remove it, but only if its type has fixed size.
653 The LHS will need to be recreated during RTL expansion and creating
654 temporaries of variable-sized types is not supported. Also don't
655 do this with TREE_ADDRESSABLE types, as assign_temp will abort.
656 Drop LHS regardless of TREE_ADDRESSABLE, if the function call
657 has been changed into a call that does not return a value, like
658 __builtin_unreachable or __cxa_pure_virtual. */
659 tree lhs = gimple_call_lhs (stmt);
660 if (lhs
661 && (should_remove_lhs_p (lhs)
662 || VOID_TYPE_P (TREE_TYPE (gimple_call_fntype (stmt)))))
664 gimple_call_set_lhs (stmt, NULL_TREE);
666 /* We need to fix up the SSA name to avoid checking errors. */
667 if (TREE_CODE (lhs) == SSA_NAME)
669 tree new_var = create_tmp_reg (TREE_TYPE (lhs));
670 SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs, new_var);
671 SSA_NAME_DEF_STMT (lhs) = gimple_build_nop ();
672 set_ssa_default_def (cfun, new_var, lhs);
675 update_stmt (stmt);
678 /* Mark the call as altering control flow. */
679 if (!gimple_call_ctrl_altering_p (stmt))
681 gimple_call_set_ctrl_altering (stmt, true);
682 changed = true;
685 return changed;
688 /* Return true if we want to merge BB1 and BB2 into a single block. */
690 static bool
691 want_merge_blocks_p (basic_block bb1, basic_block bb2)
693 if (!can_merge_blocks_p (bb1, bb2))
694 return false;
695 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb1);
696 if (gsi_end_p (gsi) || !stmt_can_terminate_bb_p (gsi_stmt (gsi)))
697 return true;
698 return bb1->count.ok_for_merging (bb2->count);
702 /* Tries to cleanup cfg in basic block BB. Returns true if anything
703 changes. */
705 static bool
706 cleanup_tree_cfg_bb (basic_block bb)
708 if (tree_forwarder_block_p (bb, false)
709 && remove_forwarder_block (bb))
710 return true;
712 /* If there is a merge opportunity with the predecessor
713 do nothing now but wait until we process the predecessor.
714 This happens when we visit BBs in a non-optimal order and
715 avoids quadratic behavior with adjusting stmts BB pointer. */
716 if (single_pred_p (bb)
717 && want_merge_blocks_p (single_pred (bb), bb))
718 /* But make sure we _do_ visit it. When we remove unreachable paths
719 ending in a backedge we fail to mark the destinations predecessors
720 as changed. */
721 bitmap_set_bit (cfgcleanup_altered_bbs, single_pred (bb)->index);
723 /* Merging the blocks may create new opportunities for folding
724 conditional branches (due to the elimination of single-valued PHI
725 nodes). */
726 else if (single_succ_p (bb)
727 && want_merge_blocks_p (bb, single_succ (bb)))
729 merge_blocks (bb, single_succ (bb));
730 return true;
733 return false;
736 /* Iterate the cfg cleanups, while anything changes. */
738 static bool
739 cleanup_tree_cfg_1 (void)
741 bool retval = false;
742 basic_block bb;
743 unsigned i, n;
745 /* Prepare the worklists of altered blocks. */
746 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
748 /* During forwarder block cleanup, we may redirect edges out of
749 SWITCH_EXPRs, which can get expensive. So we want to enable
750 recording of edge to CASE_LABEL_EXPR. */
751 start_recording_case_labels ();
753 /* We cannot use FOR_EACH_BB_FN for the BB iterations below
754 since the basic blocks may get removed. */
756 /* Start by iterating over all basic blocks looking for edge removal
757 opportunities. Do this first because incoming SSA form may be
758 invalid and we want to avoid performing SSA related tasks such
759 as propgating out a PHI node during BB merging in that state. */
760 n = last_basic_block_for_fn (cfun);
761 for (i = NUM_FIXED_BLOCKS; i < n; i++)
763 bb = BASIC_BLOCK_FOR_FN (cfun, i);
764 if (bb)
765 retval |= cleanup_control_flow_bb (bb, true);
768 /* After doing the above SSA form should be valid (or an update SSA
769 should be required). */
771 /* Continue by iterating over all basic blocks looking for BB merging
772 opportunities. */
773 n = last_basic_block_for_fn (cfun);
774 for (i = NUM_FIXED_BLOCKS; i < n; i++)
776 bb = BASIC_BLOCK_FOR_FN (cfun, i);
777 if (bb)
778 retval |= cleanup_tree_cfg_bb (bb);
781 /* Now process the altered blocks, as long as any are available. */
782 while (!bitmap_empty_p (cfgcleanup_altered_bbs))
784 i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
785 bitmap_clear_bit (cfgcleanup_altered_bbs, i);
786 if (i < NUM_FIXED_BLOCKS)
787 continue;
789 bb = BASIC_BLOCK_FOR_FN (cfun, i);
790 if (!bb)
791 continue;
793 retval |= cleanup_control_flow_bb (bb, false);
794 retval |= cleanup_tree_cfg_bb (bb);
797 end_recording_case_labels ();
798 BITMAP_FREE (cfgcleanup_altered_bbs);
799 return retval;
802 static bool
803 mfb_keep_latches (edge e)
805 return ! dominated_by_p (CDI_DOMINATORS, e->src, e->dest);
808 /* Remove unreachable blocks and other miscellaneous clean up work.
809 Return true if the flowgraph was modified, false otherwise. */
811 static bool
812 cleanup_tree_cfg_noloop (void)
814 bool changed;
816 timevar_push (TV_TREE_CLEANUP_CFG);
818 /* Iterate until there are no more cleanups left to do. If any
819 iteration changed the flowgraph, set CHANGED to true.
821 If dominance information is available, there cannot be any unreachable
822 blocks. */
823 if (!dom_info_available_p (CDI_DOMINATORS))
825 changed = delete_unreachable_blocks ();
826 calculate_dominance_info (CDI_DOMINATORS);
828 else
830 checking_verify_dominators (CDI_DOMINATORS);
831 changed = false;
834 /* Ensure that we have single entries into loop headers. Otherwise
835 if one of the entries is becoming a latch due to CFG cleanup
836 (from formerly being part of an irreducible region) then we mess
837 up loop fixup and associate the old loop with a different region
838 which makes niter upper bounds invalid. See for example PR80549.
839 This needs to be done before we remove trivially dead edges as
840 we need to capture the dominance state before the pending transform. */
841 if (current_loops)
843 loop_p loop;
844 unsigned i;
845 FOR_EACH_VEC_ELT (*get_loops (cfun), i, loop)
846 if (loop && loop->header)
848 basic_block bb = loop->header;
849 edge_iterator ei;
850 edge e;
851 bool found_latch = false;
852 bool any_abnormal = false;
853 unsigned n = 0;
854 /* We are only interested in preserving existing loops, but
855 we need to check whether they are still real and of course
856 if we need to add a preheader at all. */
857 FOR_EACH_EDGE (e, ei, bb->preds)
859 if (e->flags & EDGE_ABNORMAL)
861 any_abnormal = true;
862 break;
864 if (dominated_by_p (CDI_DOMINATORS, e->src, bb))
866 found_latch = true;
867 continue;
869 n++;
871 /* If we have more than one entry to the loop header
872 create a forwarder. */
873 if (found_latch && ! any_abnormal && n > 1)
875 edge fallthru = make_forwarder_block (bb, mfb_keep_latches,
876 NULL);
877 loop->header = fallthru->dest;
878 if (! loops_state_satisfies_p (LOOPS_NEED_FIXUP))
880 /* The loop updating from the CFG hook is incomplete
881 when we have multiple latches, fixup manually. */
882 remove_bb_from_loops (fallthru->src);
883 loop_p cloop = loop;
884 FOR_EACH_EDGE (e, ei, fallthru->src->preds)
885 cloop = find_common_loop (cloop, e->src->loop_father);
886 add_bb_to_loop (fallthru->src, cloop);
892 changed |= cleanup_tree_cfg_1 ();
894 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
895 compact_blocks ();
897 checking_verify_flow_info ();
899 timevar_pop (TV_TREE_CLEANUP_CFG);
901 if (changed && current_loops)
903 /* Removing edges and/or blocks may make recorded bounds refer
904 to stale GIMPLE stmts now, so clear them. */
905 free_numbers_of_iterations_estimates (cfun);
906 loops_state_set (LOOPS_NEED_FIXUP);
909 return changed;
912 /* Repairs loop structures. */
914 static void
915 repair_loop_structures (void)
917 bitmap changed_bbs;
918 unsigned n_new_loops;
920 calculate_dominance_info (CDI_DOMINATORS);
922 timevar_push (TV_REPAIR_LOOPS);
923 changed_bbs = BITMAP_ALLOC (NULL);
924 n_new_loops = fix_loop_structure (changed_bbs);
926 /* This usually does nothing. But sometimes parts of cfg that originally
927 were inside a loop get out of it due to edge removal (since they
928 become unreachable by back edges from latch). Also a former
929 irreducible loop can become reducible - in this case force a full
930 rewrite into loop-closed SSA form. */
931 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
932 rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
933 TODO_update_ssa);
935 BITMAP_FREE (changed_bbs);
937 checking_verify_loop_structure ();
938 scev_reset ();
940 timevar_pop (TV_REPAIR_LOOPS);
943 /* Cleanup cfg and repair loop structures. */
945 bool
946 cleanup_tree_cfg (void)
948 bool changed = cleanup_tree_cfg_noloop ();
950 if (current_loops != NULL
951 && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
952 repair_loop_structures ();
954 return changed;
957 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
958 Returns true if successful. */
960 static bool
961 remove_forwarder_block_with_phi (basic_block bb)
963 edge succ = single_succ_edge (bb);
964 basic_block dest = succ->dest;
965 gimple *label;
966 basic_block dombb, domdest, dom;
968 /* We check for infinite loops already in tree_forwarder_block_p.
969 However it may happen that the infinite loop is created
970 afterwards due to removal of forwarders. */
971 if (dest == bb)
972 return false;
974 /* Removal of forwarders may expose new natural loops and thus
975 a block may turn into a loop header. */
976 if (current_loops && bb_loop_header_p (bb))
977 return false;
979 /* If the destination block consists of a nonlocal label, do not
980 merge it. */
981 label = first_stmt (dest);
982 if (label)
983 if (glabel *label_stmt = dyn_cast <glabel *> (label))
984 if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
985 return false;
987 /* Record BB's single pred in case we need to update the father
988 loop's latch information later. */
989 basic_block pred = NULL;
990 if (single_pred_p (bb))
991 pred = single_pred (bb);
993 /* Redirect each incoming edge to BB to DEST. */
994 while (EDGE_COUNT (bb->preds) > 0)
996 edge e = EDGE_PRED (bb, 0), s;
997 gphi_iterator gsi;
999 s = find_edge (e->src, dest);
1000 if (s)
1002 /* We already have an edge S from E->src to DEST. If S and
1003 E->dest's sole successor edge have the same PHI arguments
1004 at DEST, redirect S to DEST. */
1005 if (phi_alternatives_equal (dest, s, succ))
1007 e = redirect_edge_and_branch (e, dest);
1008 redirect_edge_var_map_clear (e);
1009 continue;
1012 /* PHI arguments are different. Create a forwarder block by
1013 splitting E so that we can merge PHI arguments on E to
1014 DEST. */
1015 e = single_succ_edge (split_edge (e));
1017 else
1019 /* If we merge the forwarder into a loop header verify if we
1020 are creating another loop latch edge. If so, reset
1021 number of iteration information of the loop. */
1022 if (dest->loop_father->header == dest
1023 && dominated_by_p (CDI_DOMINATORS, e->src, dest))
1025 dest->loop_father->any_upper_bound = false;
1026 dest->loop_father->any_likely_upper_bound = false;
1027 free_numbers_of_iterations_estimates (dest->loop_father);
1031 s = redirect_edge_and_branch (e, dest);
1033 /* redirect_edge_and_branch must not create a new edge. */
1034 gcc_assert (s == e);
1036 /* Add to the PHI nodes at DEST each PHI argument removed at the
1037 destination of E. */
1038 for (gsi = gsi_start_phis (dest);
1039 !gsi_end_p (gsi);
1040 gsi_next (&gsi))
1042 gphi *phi = gsi.phi ();
1043 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
1044 source_location locus = gimple_phi_arg_location_from_edge (phi, succ);
1046 if (TREE_CODE (def) == SSA_NAME)
1048 /* If DEF is one of the results of PHI nodes removed during
1049 redirection, replace it with the PHI argument that used
1050 to be on E. */
1051 vec<edge_var_map> *head = redirect_edge_var_map_vector (e);
1052 size_t length = head ? head->length () : 0;
1053 for (size_t i = 0; i < length; i++)
1055 edge_var_map *vm = &(*head)[i];
1056 tree old_arg = redirect_edge_var_map_result (vm);
1057 tree new_arg = redirect_edge_var_map_def (vm);
1059 if (def == old_arg)
1061 def = new_arg;
1062 locus = redirect_edge_var_map_location (vm);
1063 break;
1068 add_phi_arg (phi, def, s, locus);
1071 redirect_edge_var_map_clear (e);
1074 /* Update the dominators. */
1075 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
1076 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
1077 if (domdest == bb)
1079 /* Shortcut to avoid calling (relatively expensive)
1080 nearest_common_dominator unless necessary. */
1081 dom = dombb;
1083 else
1084 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
1086 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
1088 /* Adjust latch infomation of BB's parent loop as otherwise
1089 the cfg hook has a hard time not to kill the loop. */
1090 if (current_loops && bb->loop_father->latch == bb)
1091 bb->loop_father->latch = pred;
1093 /* Remove BB since all of BB's incoming edges have been redirected
1094 to DEST. */
1095 delete_basic_block (bb);
1097 return true;
1100 /* This pass merges PHI nodes if one feeds into another. For example,
1101 suppose we have the following:
1103 goto <bb 9> (<L9>);
1105 <L8>:;
1106 tem_17 = foo ();
1108 # tem_6 = PHI <tem_17(8), tem_23(7)>;
1109 <L9>:;
1111 # tem_3 = PHI <tem_6(9), tem_2(5)>;
1112 <L10>:;
1114 Then we merge the first PHI node into the second one like so:
1116 goto <bb 9> (<L10>);
1118 <L8>:;
1119 tem_17 = foo ();
1121 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
1122 <L10>:;
1125 namespace {
1127 const pass_data pass_data_merge_phi =
1129 GIMPLE_PASS, /* type */
1130 "mergephi", /* name */
1131 OPTGROUP_NONE, /* optinfo_flags */
1132 TV_TREE_MERGE_PHI, /* tv_id */
1133 ( PROP_cfg | PROP_ssa ), /* properties_required */
1134 0, /* properties_provided */
1135 0, /* properties_destroyed */
1136 0, /* todo_flags_start */
1137 0, /* todo_flags_finish */
1140 class pass_merge_phi : public gimple_opt_pass
1142 public:
1143 pass_merge_phi (gcc::context *ctxt)
1144 : gimple_opt_pass (pass_data_merge_phi, ctxt)
1147 /* opt_pass methods: */
1148 opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
1149 virtual unsigned int execute (function *);
1151 }; // class pass_merge_phi
1153 unsigned int
1154 pass_merge_phi::execute (function *fun)
1156 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun));
1157 basic_block *current = worklist;
1158 basic_block bb;
1160 calculate_dominance_info (CDI_DOMINATORS);
1162 /* Find all PHI nodes that we may be able to merge. */
1163 FOR_EACH_BB_FN (bb, fun)
1165 basic_block dest;
1167 /* Look for a forwarder block with PHI nodes. */
1168 if (!tree_forwarder_block_p (bb, true))
1169 continue;
1171 dest = single_succ (bb);
1173 /* We have to feed into another basic block with PHI
1174 nodes. */
1175 if (gimple_seq_empty_p (phi_nodes (dest))
1176 /* We don't want to deal with a basic block with
1177 abnormal edges. */
1178 || bb_has_abnormal_pred (bb))
1179 continue;
1181 if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
1183 /* If BB does not dominate DEST, then the PHI nodes at
1184 DEST must be the only users of the results of the PHI
1185 nodes at BB. */
1186 *current++ = bb;
1188 else
1190 gphi_iterator gsi;
1191 unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
1193 /* BB dominates DEST. There may be many users of the PHI
1194 nodes in BB. However, there is still a trivial case we
1195 can handle. If the result of every PHI in BB is used
1196 only by a PHI in DEST, then we can trivially merge the
1197 PHI nodes from BB into DEST. */
1198 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
1199 gsi_next (&gsi))
1201 gphi *phi = gsi.phi ();
1202 tree result = gimple_phi_result (phi);
1203 use_operand_p imm_use;
1204 gimple *use_stmt;
1206 /* If the PHI's result is never used, then we can just
1207 ignore it. */
1208 if (has_zero_uses (result))
1209 continue;
1211 /* Get the single use of the result of this PHI node. */
1212 if (!single_imm_use (result, &imm_use, &use_stmt)
1213 || gimple_code (use_stmt) != GIMPLE_PHI
1214 || gimple_bb (use_stmt) != dest
1215 || gimple_phi_arg_def (use_stmt, dest_idx) != result)
1216 break;
1219 /* If the loop above iterated through all the PHI nodes
1220 in BB, then we can merge the PHIs from BB into DEST. */
1221 if (gsi_end_p (gsi))
1222 *current++ = bb;
1226 /* Now let's drain WORKLIST. */
1227 bool changed = false;
1228 while (current != worklist)
1230 bb = *--current;
1231 changed |= remove_forwarder_block_with_phi (bb);
1233 free (worklist);
1235 /* Removing forwarder blocks can cause formerly irreducible loops
1236 to become reducible if we merged two entry blocks. */
1237 if (changed
1238 && current_loops)
1239 loops_state_set (LOOPS_NEED_FIXUP);
1241 return 0;
1244 } // anon namespace
1246 gimple_opt_pass *
1247 make_pass_merge_phi (gcc::context *ctxt)
1249 return new pass_merge_phi (ctxt);
1252 /* Pass: cleanup the CFG just before expanding trees to RTL.
1253 This is just a round of label cleanups and case node grouping
1254 because after the tree optimizers have run such cleanups may
1255 be necessary. */
1257 static unsigned int
1258 execute_cleanup_cfg_post_optimizing (void)
1260 unsigned int todo = execute_fixup_cfg ();
1261 if (cleanup_tree_cfg ())
1263 todo &= ~TODO_cleanup_cfg;
1264 todo |= TODO_update_ssa;
1266 maybe_remove_unreachable_handlers ();
1267 cleanup_dead_labels ();
1268 if (group_case_labels ())
1269 todo |= TODO_cleanup_cfg;
1270 if ((flag_compare_debug_opt || flag_compare_debug)
1271 && flag_dump_final_insns)
1273 FILE *final_output = fopen (flag_dump_final_insns, "a");
1275 if (!final_output)
1277 error ("could not open final insn dump file %qs: %m",
1278 flag_dump_final_insns);
1279 flag_dump_final_insns = NULL;
1281 else
1283 int save_unnumbered = flag_dump_unnumbered;
1284 int save_noaddr = flag_dump_noaddr;
1286 flag_dump_noaddr = flag_dump_unnumbered = 1;
1287 fprintf (final_output, "\n");
1288 dump_enumerated_decls (final_output, dump_flags | TDF_NOUID);
1289 flag_dump_noaddr = save_noaddr;
1290 flag_dump_unnumbered = save_unnumbered;
1291 if (fclose (final_output))
1293 error ("could not close final insn dump file %qs: %m",
1294 flag_dump_final_insns);
1295 flag_dump_final_insns = NULL;
1299 return todo;
1302 namespace {
1304 const pass_data pass_data_cleanup_cfg_post_optimizing =
1306 GIMPLE_PASS, /* type */
1307 "optimized", /* name */
1308 OPTGROUP_NONE, /* optinfo_flags */
1309 TV_TREE_CLEANUP_CFG, /* tv_id */
1310 PROP_cfg, /* properties_required */
1311 0, /* properties_provided */
1312 0, /* properties_destroyed */
1313 0, /* todo_flags_start */
1314 TODO_remove_unused_locals, /* todo_flags_finish */
1317 class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1319 public:
1320 pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1321 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1324 /* opt_pass methods: */
1325 virtual unsigned int execute (function *)
1327 return execute_cleanup_cfg_post_optimizing ();
1330 }; // class pass_cleanup_cfg_post_optimizing
1332 } // anon namespace
1334 gimple_opt_pass *
1335 make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1337 return new pass_cleanup_cfg_post_optimizing (ctxt);