usr.sbin/makefs/hammer2: Remove redundant hammer2_inode_modify()
[dragonfly.git] / contrib / gcc-8.0 / gcc / tree-cfgcleanup.c
blob6d57329d4eb8eaf10667eddd18389384eb94e482
1 /* CFG cleanup for trees.
2 Copyright (C) 2001-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "diagnostic-core.h"
31 #include "fold-const.h"
32 #include "cfganal.h"
33 #include "cfgcleanup.h"
34 #include "tree-eh.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-manip.h"
39 #include "tree-dfa.h"
40 #include "tree-ssa.h"
41 #include "cfgloop.h"
42 #include "tree-scalar-evolution.h"
43 #include "gimple-match.h"
44 #include "gimple-fold.h"
45 #include "tree-ssa-loop-niter.h"
46 #include "tree-into-ssa.h"
47 #include "tree-cfgcleanup.h"
50 /* The set of blocks in that at least one of the following changes happened:
51 -- the statement at the end of the block was changed
52 -- the block was newly created
53 -- the set of the predecessors of the block changed
54 -- the set of the successors of the block changed
55 ??? Maybe we could track these changes separately, since they determine
56 what cleanups it makes sense to try on the block. */
57 bitmap cfgcleanup_altered_bbs;
59 /* Remove any fallthru edge from EV. Return true if an edge was removed. */
61 static bool
62 remove_fallthru_edge (vec<edge, va_gc> *ev)
64 edge_iterator ei;
65 edge e;
67 FOR_EACH_EDGE (e, ei, ev)
68 if ((e->flags & EDGE_FALLTHRU) != 0)
70 if (e->flags & EDGE_COMPLEX)
71 e->flags &= ~EDGE_FALLTHRU;
72 else
73 remove_edge_and_dominated_blocks (e);
74 return true;
76 return false;
79 /* Convert a SWTCH with single non-default case to gcond and replace it
80 at GSI. */
82 static bool
83 convert_single_case_switch (gswitch *swtch, gimple_stmt_iterator &gsi)
85 if (gimple_switch_num_labels (swtch) != 2)
86 return false;
88 tree index = gimple_switch_index (swtch);
89 tree default_label = CASE_LABEL (gimple_switch_default_label (swtch));
90 tree label = gimple_switch_label (swtch, 1);
91 tree low = CASE_LOW (label);
92 tree high = CASE_HIGH (label);
94 basic_block default_bb = label_to_block_fn (cfun, default_label);
95 basic_block case_bb = label_to_block_fn (cfun, CASE_LABEL (label));
97 basic_block bb = gimple_bb (swtch);
98 gcond *cond;
100 /* Replace switch statement with condition statement. */
101 if (high)
103 tree lhs, rhs;
104 generate_range_test (bb, index, low, high, &lhs, &rhs);
105 cond = gimple_build_cond (LE_EXPR, lhs, rhs, NULL_TREE, NULL_TREE);
107 else
108 cond = gimple_build_cond (EQ_EXPR, index,
109 fold_convert (TREE_TYPE (index), low),
110 NULL_TREE, NULL_TREE);
112 gsi_replace (&gsi, cond, true);
114 /* Update edges. */
115 edge case_edge = find_edge (bb, case_bb);
116 edge default_edge = find_edge (bb, default_bb);
118 case_edge->flags |= EDGE_TRUE_VALUE;
119 default_edge->flags |= EDGE_FALSE_VALUE;
120 return true;
123 /* Disconnect an unreachable block in the control expression starting
124 at block BB. */
126 static bool
127 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi)
129 edge taken_edge;
130 bool retval = false;
131 gimple *stmt = gsi_stmt (gsi);
133 if (!single_succ_p (bb))
135 edge e;
136 edge_iterator ei;
137 bool warned;
138 tree val = NULL_TREE;
140 /* Try to convert a switch with just a single non-default case to
141 GIMPLE condition. */
142 if (gimple_code (stmt) == GIMPLE_SWITCH
143 && convert_single_case_switch (as_a<gswitch *> (stmt), gsi))
144 stmt = gsi_stmt (gsi);
146 fold_defer_overflow_warnings ();
147 switch (gimple_code (stmt))
149 case GIMPLE_COND:
151 code_helper rcode;
152 tree ops[3] = {};
153 if (gimple_simplify (stmt, &rcode, ops, NULL, no_follow_ssa_edges,
154 no_follow_ssa_edges)
155 && rcode == INTEGER_CST)
156 val = ops[0];
158 break;
160 case GIMPLE_SWITCH:
161 val = gimple_switch_index (as_a <gswitch *> (stmt));
162 break;
164 default:
167 taken_edge = find_taken_edge (bb, val);
168 if (!taken_edge)
170 fold_undefer_and_ignore_overflow_warnings ();
171 return false;
174 /* Remove all the edges except the one that is always executed. */
175 warned = false;
176 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
178 if (e != taken_edge)
180 if (!warned)
182 fold_undefer_overflow_warnings
183 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
184 warned = true;
187 taken_edge->probability += e->probability;
188 remove_edge_and_dominated_blocks (e);
189 retval = true;
191 else
192 ei_next (&ei);
194 if (!warned)
195 fold_undefer_and_ignore_overflow_warnings ();
197 else
198 taken_edge = single_succ_edge (bb);
200 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
201 gsi_remove (&gsi, true);
202 taken_edge->flags = EDGE_FALLTHRU;
204 return retval;
207 /* Cleanup the GF_CALL_CTRL_ALTERING flag according to
208 to updated gimple_call_flags. */
210 static void
211 cleanup_call_ctrl_altering_flag (gimple *bb_end)
213 if (!is_gimple_call (bb_end)
214 || !gimple_call_ctrl_altering_p (bb_end))
215 return;
217 int flags = gimple_call_flags (bb_end);
218 if (((flags & (ECF_CONST | ECF_PURE))
219 && !(flags & ECF_LOOPING_CONST_OR_PURE))
220 || (flags & ECF_LEAF))
221 gimple_call_set_ctrl_altering (bb_end, false);
224 /* Try to remove superfluous control structures in basic block BB. Returns
225 true if anything changes. */
227 static bool
228 cleanup_control_flow_bb (basic_block bb)
230 gimple_stmt_iterator gsi;
231 bool retval = false;
232 gimple *stmt;
234 /* If the last statement of the block could throw and now cannot,
235 we need to prune cfg. */
236 retval |= gimple_purge_dead_eh_edges (bb);
238 gsi = gsi_last_nondebug_bb (bb);
239 if (gsi_end_p (gsi))
240 return retval;
242 stmt = gsi_stmt (gsi);
244 /* Try to cleanup ctrl altering flag for call which ends bb. */
245 cleanup_call_ctrl_altering_flag (stmt);
247 if (gimple_code (stmt) == GIMPLE_COND
248 || gimple_code (stmt) == GIMPLE_SWITCH)
250 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
251 retval |= cleanup_control_expr_graph (bb, gsi);
253 else if (gimple_code (stmt) == GIMPLE_GOTO
254 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
255 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
256 == LABEL_DECL))
258 /* If we had a computed goto which has a compile-time determinable
259 destination, then we can eliminate the goto. */
260 edge e;
261 tree label;
262 edge_iterator ei;
263 basic_block target_block;
265 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
266 /* First look at all the outgoing edges. Delete any outgoing
267 edges which do not go to the right block. For the one
268 edge which goes to the right block, fix up its flags. */
269 label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
270 if (DECL_CONTEXT (label) != cfun->decl)
271 return retval;
272 target_block = label_to_block (label);
273 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
275 if (e->dest != target_block)
276 remove_edge_and_dominated_blocks (e);
277 else
279 /* Turn off the EDGE_ABNORMAL flag. */
280 e->flags &= ~EDGE_ABNORMAL;
282 /* And set EDGE_FALLTHRU. */
283 e->flags |= EDGE_FALLTHRU;
284 ei_next (&ei);
288 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
289 bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
291 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
292 relevant information we need. */
293 gsi_remove (&gsi, true);
294 retval = true;
297 /* Check for indirect calls that have been turned into
298 noreturn calls. */
299 else if (is_gimple_call (stmt)
300 && gimple_call_noreturn_p (stmt))
302 /* If there are debug stmts after the noreturn call, remove them
303 now, they should be all unreachable anyway. */
304 for (gsi_next (&gsi); !gsi_end_p (gsi); )
305 gsi_remove (&gsi, true);
306 if (remove_fallthru_edge (bb->succs))
307 retval = true;
310 return retval;
313 /* Return true if basic block BB does nothing except pass control
314 flow to another block and that we can safely insert a label at
315 the start of the successor block.
317 As a precondition, we require that BB be not equal to
318 the entry block. */
320 static bool
321 tree_forwarder_block_p (basic_block bb, bool phi_wanted)
323 gimple_stmt_iterator gsi;
324 location_t locus;
326 /* BB must have a single outgoing edge. */
327 if (single_succ_p (bb) != 1
328 /* If PHI_WANTED is false, BB must not have any PHI nodes.
329 Otherwise, BB must have PHI nodes. */
330 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
331 /* BB may not be a predecessor of the exit block. */
332 || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
333 /* Nor should this be an infinite loop. */
334 || single_succ (bb) == bb
335 /* BB may not have an abnormal outgoing edge. */
336 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
337 return false;
339 gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
341 locus = single_succ_edge (bb)->goto_locus;
343 /* There should not be an edge coming from entry, or an EH edge. */
345 edge_iterator ei;
346 edge e;
348 FOR_EACH_EDGE (e, ei, bb->preds)
349 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
350 return false;
351 /* If goto_locus of any of the edges differs, prevent removing
352 the forwarder block for -O0. */
353 else if (optimize == 0 && e->goto_locus != locus)
354 return false;
357 /* Now walk through the statements backward. We can ignore labels,
358 anything else means this is not a forwarder block. */
359 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
361 gimple *stmt = gsi_stmt (gsi);
363 switch (gimple_code (stmt))
365 case GIMPLE_LABEL:
366 if (DECL_NONLOCAL (gimple_label_label (as_a <glabel *> (stmt))))
367 return false;
368 if (optimize == 0 && gimple_location (stmt) != locus)
369 return false;
370 break;
372 /* ??? For now, hope there's a corresponding debug
373 assignment at the destination. */
374 case GIMPLE_DEBUG:
375 break;
377 default:
378 return false;
382 if (current_loops)
384 basic_block dest;
385 /* Protect loop headers. */
386 if (bb_loop_header_p (bb))
387 return false;
389 dest = EDGE_SUCC (bb, 0)->dest;
390 /* Protect loop preheaders and latches if requested. */
391 if (dest->loop_father->header == dest)
393 if (bb->loop_father == dest->loop_father)
395 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
396 return false;
397 /* If bb doesn't have a single predecessor we'd make this
398 loop have multiple latches. Don't do that if that
399 would in turn require disambiguating them. */
400 return (single_pred_p (bb)
401 || loops_state_satisfies_p
402 (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
404 else if (bb->loop_father == loop_outer (dest->loop_father))
405 return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS);
406 /* Always preserve other edges into loop headers that are
407 not simple latches or preheaders. */
408 return false;
412 return true;
415 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
416 those alternatives are equal in each of the PHI nodes, then return
417 true, else return false. */
419 static bool
420 phi_alternatives_equal (basic_block dest, edge e1, edge e2)
422 int n1 = e1->dest_idx;
423 int n2 = e2->dest_idx;
424 gphi_iterator gsi;
426 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
428 gphi *phi = gsi.phi ();
429 tree val1 = gimple_phi_arg_def (phi, n1);
430 tree val2 = gimple_phi_arg_def (phi, n2);
432 gcc_assert (val1 != NULL_TREE);
433 gcc_assert (val2 != NULL_TREE);
435 if (!operand_equal_for_phi_arg_p (val1, val2))
436 return false;
439 return true;
442 /* Removes forwarder block BB. Returns false if this failed. */
444 static bool
445 remove_forwarder_block (basic_block bb)
447 edge succ = single_succ_edge (bb), e, s;
448 basic_block dest = succ->dest;
449 gimple *stmt;
450 edge_iterator ei;
451 gimple_stmt_iterator gsi, gsi_to;
452 bool can_move_debug_stmts;
454 /* We check for infinite loops already in tree_forwarder_block_p.
455 However it may happen that the infinite loop is created
456 afterwards due to removal of forwarders. */
457 if (dest == bb)
458 return false;
460 /* If the destination block consists of a nonlocal label or is a
461 EH landing pad, do not merge it. */
462 stmt = first_stmt (dest);
463 if (stmt)
464 if (glabel *label_stmt = dyn_cast <glabel *> (stmt))
465 if (DECL_NONLOCAL (gimple_label_label (label_stmt))
466 || EH_LANDING_PAD_NR (gimple_label_label (label_stmt)) != 0)
467 return false;
469 /* If there is an abnormal edge to basic block BB, but not into
470 dest, problems might occur during removal of the phi node at out
471 of ssa due to overlapping live ranges of registers.
473 If there is an abnormal edge in DEST, the problems would occur
474 anyway since cleanup_dead_labels would then merge the labels for
475 two different eh regions, and rest of exception handling code
476 does not like it.
478 So if there is an abnormal edge to BB, proceed only if there is
479 no abnormal edge to DEST and there are no phi nodes in DEST. */
480 if (bb_has_abnormal_pred (bb)
481 && (bb_has_abnormal_pred (dest)
482 || !gimple_seq_empty_p (phi_nodes (dest))))
483 return false;
485 /* If there are phi nodes in DEST, and some of the blocks that are
486 predecessors of BB are also predecessors of DEST, check that the
487 phi node arguments match. */
488 if (!gimple_seq_empty_p (phi_nodes (dest)))
490 FOR_EACH_EDGE (e, ei, bb->preds)
492 s = find_edge (e->src, dest);
493 if (!s)
494 continue;
496 if (!phi_alternatives_equal (dest, succ, s))
497 return false;
501 can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest);
503 basic_block pred = NULL;
504 if (single_pred_p (bb))
505 pred = single_pred (bb);
507 /* Redirect the edges. */
508 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
510 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
512 if (e->flags & EDGE_ABNORMAL)
514 /* If there is an abnormal edge, redirect it anyway, and
515 move the labels to the new block to make it legal. */
516 s = redirect_edge_succ_nodup (e, dest);
518 else
519 s = redirect_edge_and_branch (e, dest);
521 if (s == e)
523 /* Create arguments for the phi nodes, since the edge was not
524 here before. */
525 for (gphi_iterator psi = gsi_start_phis (dest);
526 !gsi_end_p (psi);
527 gsi_next (&psi))
529 gphi *phi = psi.phi ();
530 source_location l = gimple_phi_arg_location_from_edge (phi, succ);
531 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
532 add_phi_arg (phi, unshare_expr (def), s, l);
537 /* Move nonlocal labels and computed goto targets as well as user
538 defined labels and labels with an EH landing pad number to the
539 new block, so that the redirection of the abnormal edges works,
540 jump targets end up in a sane place and debug information for
541 labels is retained. */
542 gsi_to = gsi_start_bb (dest);
543 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
545 stmt = gsi_stmt (gsi);
546 if (is_gimple_debug (stmt))
547 break;
549 /* Forwarder blocks can only contain labels and debug stmts, and
550 labels must come first, so if we get to this point, we know
551 we're looking at a label. */
552 tree decl = gimple_label_label (as_a <glabel *> (stmt));
553 if (EH_LANDING_PAD_NR (decl) != 0
554 || DECL_NONLOCAL (decl)
555 || FORCED_LABEL (decl)
556 || !DECL_ARTIFICIAL (decl))
557 gsi_move_before (&gsi, &gsi_to);
558 else
559 gsi_next (&gsi);
562 /* Move debug statements if the destination has a single predecessor. */
563 if (can_move_debug_stmts && !gsi_end_p (gsi))
565 gsi_to = gsi_after_labels (dest);
568 gimple *debug = gsi_stmt (gsi);
569 gcc_assert (is_gimple_debug (debug));
570 gsi_move_before (&gsi, &gsi_to);
572 while (!gsi_end_p (gsi));
575 bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
577 /* Update the dominators. */
578 if (dom_info_available_p (CDI_DOMINATORS))
580 basic_block dom, dombb, domdest;
582 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
583 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
584 if (domdest == bb)
586 /* Shortcut to avoid calling (relatively expensive)
587 nearest_common_dominator unless necessary. */
588 dom = dombb;
590 else
591 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
593 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
596 /* Adjust latch infomation of BB's parent loop as otherwise
597 the cfg hook has a hard time not to kill the loop. */
598 if (current_loops && bb->loop_father->latch == bb)
599 bb->loop_father->latch = pred;
601 /* And kill the forwarder block. */
602 delete_basic_block (bb);
604 return true;
607 /* STMT is a call that has been discovered noreturn. Split the
608 block to prepare fixing up the CFG and remove LHS.
609 Return true if cleanup-cfg needs to run. */
611 bool
612 fixup_noreturn_call (gimple *stmt)
614 basic_block bb = gimple_bb (stmt);
615 bool changed = false;
617 if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
618 return false;
620 /* First split basic block if stmt is not last. */
621 if (stmt != gsi_stmt (gsi_last_bb (bb)))
623 if (stmt == gsi_stmt (gsi_last_nondebug_bb (bb)))
625 /* Don't split if there are only debug stmts
626 after stmt, that can result in -fcompare-debug
627 failures. Remove the debug stmts instead,
628 they should be all unreachable anyway. */
629 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
630 for (gsi_next (&gsi); !gsi_end_p (gsi); )
631 gsi_remove (&gsi, true);
633 else
635 split_block (bb, stmt);
636 changed = true;
640 /* If there is an LHS, remove it, but only if its type has fixed size.
641 The LHS will need to be recreated during RTL expansion and creating
642 temporaries of variable-sized types is not supported. Also don't
643 do this with TREE_ADDRESSABLE types, as assign_temp will abort.
644 Drop LHS regardless of TREE_ADDRESSABLE, if the function call
645 has been changed into a call that does not return a value, like
646 __builtin_unreachable or __cxa_pure_virtual. */
647 tree lhs = gimple_call_lhs (stmt);
648 if (lhs
649 && (should_remove_lhs_p (lhs)
650 || VOID_TYPE_P (TREE_TYPE (gimple_call_fntype (stmt)))))
652 gimple_call_set_lhs (stmt, NULL_TREE);
654 /* We need to fix up the SSA name to avoid checking errors. */
655 if (TREE_CODE (lhs) == SSA_NAME)
657 tree new_var = create_tmp_reg (TREE_TYPE (lhs));
658 SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs, new_var);
659 SSA_NAME_DEF_STMT (lhs) = gimple_build_nop ();
660 set_ssa_default_def (cfun, new_var, lhs);
663 update_stmt (stmt);
666 /* Mark the call as altering control flow. */
667 if (!gimple_call_ctrl_altering_p (stmt))
669 gimple_call_set_ctrl_altering (stmt, true);
670 changed = true;
673 return changed;
676 /* Return true if we want to merge BB1 and BB2 into a single block. */
678 static bool
679 want_merge_blocks_p (basic_block bb1, basic_block bb2)
681 if (!can_merge_blocks_p (bb1, bb2))
682 return false;
683 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb1);
684 if (gsi_end_p (gsi) || !stmt_can_terminate_bb_p (gsi_stmt (gsi)))
685 return true;
686 return bb1->count.ok_for_merging (bb2->count);
690 /* Tries to cleanup cfg in basic block BB. Returns true if anything
691 changes. */
693 static bool
694 cleanup_tree_cfg_bb (basic_block bb)
696 if (tree_forwarder_block_p (bb, false)
697 && remove_forwarder_block (bb))
698 return true;
700 /* If there is a merge opportunity with the predecessor
701 do nothing now but wait until we process the predecessor.
702 This happens when we visit BBs in a non-optimal order and
703 avoids quadratic behavior with adjusting stmts BB pointer. */
704 if (single_pred_p (bb)
705 && want_merge_blocks_p (single_pred (bb), bb))
706 /* But make sure we _do_ visit it. When we remove unreachable paths
707 ending in a backedge we fail to mark the destinations predecessors
708 as changed. */
709 bitmap_set_bit (cfgcleanup_altered_bbs, single_pred (bb)->index);
711 /* Merging the blocks may create new opportunities for folding
712 conditional branches (due to the elimination of single-valued PHI
713 nodes). */
714 else if (single_succ_p (bb)
715 && want_merge_blocks_p (bb, single_succ (bb)))
717 merge_blocks (bb, single_succ (bb));
718 return true;
721 return false;
724 /* Do cleanup_control_flow_bb in PRE order. */
726 static bool
727 cleanup_control_flow_pre ()
729 bool retval = false;
731 auto_vec<edge_iterator, 20> stack (n_basic_blocks_for_fn (cfun) + 1);
732 auto_sbitmap visited (last_basic_block_for_fn (cfun));
733 bitmap_clear (visited);
735 stack.quick_push (ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs));
737 while (! stack.is_empty ())
739 /* Look at the edge on the top of the stack. */
740 edge_iterator ei = stack.last ();
741 basic_block dest = ei_edge (ei)->dest;
743 if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
744 && ! bitmap_bit_p (visited, dest->index))
746 bitmap_set_bit (visited, dest->index);
747 retval |= cleanup_control_flow_bb (dest);
748 if (EDGE_COUNT (dest->succs) > 0)
749 stack.quick_push (ei_start (dest->succs));
751 else
753 if (!ei_one_before_end_p (ei))
754 ei_next (&stack.last ());
755 else
756 stack.pop ();
760 return retval;
763 /* Iterate the cfg cleanups, while anything changes. */
765 static bool
766 cleanup_tree_cfg_1 (unsigned ssa_update_flags)
768 bool retval = false;
769 basic_block bb;
770 unsigned i, n;
772 /* Prepare the worklists of altered blocks. */
773 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
775 /* During forwarder block cleanup, we may redirect edges out of
776 SWITCH_EXPRs, which can get expensive. So we want to enable
777 recording of edge to CASE_LABEL_EXPR. */
778 start_recording_case_labels ();
780 /* We cannot use FOR_EACH_BB_FN for the BB iterations below
781 since the basic blocks may get removed. */
783 /* Start by iterating over all basic blocks in PRE order looking for
784 edge removal opportunities. Do this first because incoming SSA form
785 may be invalid and we want to avoid performing SSA related tasks such
786 as propgating out a PHI node during BB merging in that state. */
787 retval |= cleanup_control_flow_pre ();
789 /* After doing the above SSA form should be valid (or an update SSA
790 should be required). */
791 if (ssa_update_flags)
792 update_ssa (ssa_update_flags);
794 /* Continue by iterating over all basic blocks looking for BB merging
795 opportunities. */
796 n = last_basic_block_for_fn (cfun);
797 for (i = NUM_FIXED_BLOCKS; i < n; i++)
799 bb = BASIC_BLOCK_FOR_FN (cfun, i);
800 if (bb)
801 retval |= cleanup_tree_cfg_bb (bb);
804 /* Now process the altered blocks, as long as any are available. */
805 while (!bitmap_empty_p (cfgcleanup_altered_bbs))
807 i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
808 bitmap_clear_bit (cfgcleanup_altered_bbs, i);
809 if (i < NUM_FIXED_BLOCKS)
810 continue;
812 bb = BASIC_BLOCK_FOR_FN (cfun, i);
813 if (!bb)
814 continue;
816 retval |= cleanup_control_flow_bb (bb);
817 retval |= cleanup_tree_cfg_bb (bb);
820 end_recording_case_labels ();
821 BITMAP_FREE (cfgcleanup_altered_bbs);
822 return retval;
825 static bool
826 mfb_keep_latches (edge e)
828 return ! dominated_by_p (CDI_DOMINATORS, e->src, e->dest);
831 /* Remove unreachable blocks and other miscellaneous clean up work.
832 Return true if the flowgraph was modified, false otherwise. */
834 static bool
835 cleanup_tree_cfg_noloop (unsigned ssa_update_flags)
837 bool changed;
839 timevar_push (TV_TREE_CLEANUP_CFG);
841 /* Iterate until there are no more cleanups left to do. If any
842 iteration changed the flowgraph, set CHANGED to true.
844 If dominance information is available, there cannot be any unreachable
845 blocks. */
846 if (!dom_info_available_p (CDI_DOMINATORS))
848 changed = delete_unreachable_blocks ();
849 calculate_dominance_info (CDI_DOMINATORS);
851 else
853 checking_verify_dominators (CDI_DOMINATORS);
854 changed = false;
857 /* Ensure that we have single entries into loop headers. Otherwise
858 if one of the entries is becoming a latch due to CFG cleanup
859 (from formerly being part of an irreducible region) then we mess
860 up loop fixup and associate the old loop with a different region
861 which makes niter upper bounds invalid. See for example PR80549.
862 This needs to be done before we remove trivially dead edges as
863 we need to capture the dominance state before the pending transform. */
864 if (current_loops)
866 loop_p loop;
867 unsigned i;
868 FOR_EACH_VEC_ELT (*get_loops (cfun), i, loop)
869 if (loop && loop->header)
871 basic_block bb = loop->header;
872 edge_iterator ei;
873 edge e;
874 bool found_latch = false;
875 bool any_abnormal = false;
876 unsigned n = 0;
877 /* We are only interested in preserving existing loops, but
878 we need to check whether they are still real and of course
879 if we need to add a preheader at all. */
880 FOR_EACH_EDGE (e, ei, bb->preds)
882 if (e->flags & EDGE_ABNORMAL)
884 any_abnormal = true;
885 break;
887 if (dominated_by_p (CDI_DOMINATORS, e->src, bb))
889 found_latch = true;
890 continue;
892 n++;
894 /* If we have more than one entry to the loop header
895 create a forwarder. */
896 if (found_latch && ! any_abnormal && n > 1)
898 edge fallthru = make_forwarder_block (bb, mfb_keep_latches,
899 NULL);
900 loop->header = fallthru->dest;
901 if (! loops_state_satisfies_p (LOOPS_NEED_FIXUP))
903 /* The loop updating from the CFG hook is incomplete
904 when we have multiple latches, fixup manually. */
905 remove_bb_from_loops (fallthru->src);
906 loop_p cloop = loop;
907 FOR_EACH_EDGE (e, ei, fallthru->src->preds)
908 cloop = find_common_loop (cloop, e->src->loop_father);
909 add_bb_to_loop (fallthru->src, cloop);
915 changed |= cleanup_tree_cfg_1 (ssa_update_flags);
917 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
919 /* Do not renumber blocks if the SCEV cache is active, it is indexed by
920 basic-block numbers. */
921 if (! scev_initialized_p ())
922 compact_blocks ();
924 checking_verify_flow_info ();
926 timevar_pop (TV_TREE_CLEANUP_CFG);
928 if (changed && current_loops)
930 /* Removing edges and/or blocks may make recorded bounds refer
931 to stale GIMPLE stmts now, so clear them. */
932 free_numbers_of_iterations_estimates (cfun);
933 loops_state_set (LOOPS_NEED_FIXUP);
936 return changed;
939 /* Repairs loop structures. */
941 static void
942 repair_loop_structures (void)
944 bitmap changed_bbs;
945 unsigned n_new_loops;
947 calculate_dominance_info (CDI_DOMINATORS);
949 timevar_push (TV_REPAIR_LOOPS);
950 changed_bbs = BITMAP_ALLOC (NULL);
951 n_new_loops = fix_loop_structure (changed_bbs);
953 /* This usually does nothing. But sometimes parts of cfg that originally
954 were inside a loop get out of it due to edge removal (since they
955 become unreachable by back edges from latch). Also a former
956 irreducible loop can become reducible - in this case force a full
957 rewrite into loop-closed SSA form. */
958 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
959 rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
960 TODO_update_ssa);
962 BITMAP_FREE (changed_bbs);
964 checking_verify_loop_structure ();
965 scev_reset ();
967 timevar_pop (TV_REPAIR_LOOPS);
970 /* Cleanup cfg and repair loop structures. */
972 bool
973 cleanup_tree_cfg (unsigned ssa_update_flags)
975 bool changed = cleanup_tree_cfg_noloop (ssa_update_flags);
977 if (current_loops != NULL
978 && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
979 repair_loop_structures ();
981 return changed;
984 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
985 Returns true if successful. */
987 static bool
988 remove_forwarder_block_with_phi (basic_block bb)
990 edge succ = single_succ_edge (bb);
991 basic_block dest = succ->dest;
992 gimple *label;
993 basic_block dombb, domdest, dom;
995 /* We check for infinite loops already in tree_forwarder_block_p.
996 However it may happen that the infinite loop is created
997 afterwards due to removal of forwarders. */
998 if (dest == bb)
999 return false;
1001 /* Removal of forwarders may expose new natural loops and thus
1002 a block may turn into a loop header. */
1003 if (current_loops && bb_loop_header_p (bb))
1004 return false;
1006 /* If the destination block consists of a nonlocal label, do not
1007 merge it. */
1008 label = first_stmt (dest);
1009 if (label)
1010 if (glabel *label_stmt = dyn_cast <glabel *> (label))
1011 if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
1012 return false;
1014 /* Record BB's single pred in case we need to update the father
1015 loop's latch information later. */
1016 basic_block pred = NULL;
1017 if (single_pred_p (bb))
1018 pred = single_pred (bb);
1020 /* Redirect each incoming edge to BB to DEST. */
1021 while (EDGE_COUNT (bb->preds) > 0)
1023 edge e = EDGE_PRED (bb, 0), s;
1024 gphi_iterator gsi;
1026 s = find_edge (e->src, dest);
1027 if (s)
1029 /* We already have an edge S from E->src to DEST. If S and
1030 E->dest's sole successor edge have the same PHI arguments
1031 at DEST, redirect S to DEST. */
1032 if (phi_alternatives_equal (dest, s, succ))
1034 e = redirect_edge_and_branch (e, dest);
1035 redirect_edge_var_map_clear (e);
1036 continue;
1039 /* PHI arguments are different. Create a forwarder block by
1040 splitting E so that we can merge PHI arguments on E to
1041 DEST. */
1042 e = single_succ_edge (split_edge (e));
1044 else
1046 /* If we merge the forwarder into a loop header verify if we
1047 are creating another loop latch edge. If so, reset
1048 number of iteration information of the loop. */
1049 if (dest->loop_father->header == dest
1050 && dominated_by_p (CDI_DOMINATORS, e->src, dest))
1052 dest->loop_father->any_upper_bound = false;
1053 dest->loop_father->any_likely_upper_bound = false;
1054 free_numbers_of_iterations_estimates (dest->loop_father);
1058 s = redirect_edge_and_branch (e, dest);
1060 /* redirect_edge_and_branch must not create a new edge. */
1061 gcc_assert (s == e);
1063 /* Add to the PHI nodes at DEST each PHI argument removed at the
1064 destination of E. */
1065 for (gsi = gsi_start_phis (dest);
1066 !gsi_end_p (gsi);
1067 gsi_next (&gsi))
1069 gphi *phi = gsi.phi ();
1070 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
1071 source_location locus = gimple_phi_arg_location_from_edge (phi, succ);
1073 if (TREE_CODE (def) == SSA_NAME)
1075 /* If DEF is one of the results of PHI nodes removed during
1076 redirection, replace it with the PHI argument that used
1077 to be on E. */
1078 vec<edge_var_map> *head = redirect_edge_var_map_vector (e);
1079 size_t length = head ? head->length () : 0;
1080 for (size_t i = 0; i < length; i++)
1082 edge_var_map *vm = &(*head)[i];
1083 tree old_arg = redirect_edge_var_map_result (vm);
1084 tree new_arg = redirect_edge_var_map_def (vm);
1086 if (def == old_arg)
1088 def = new_arg;
1089 locus = redirect_edge_var_map_location (vm);
1090 break;
1095 add_phi_arg (phi, def, s, locus);
1098 redirect_edge_var_map_clear (e);
1101 /* Update the dominators. */
1102 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
1103 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
1104 if (domdest == bb)
1106 /* Shortcut to avoid calling (relatively expensive)
1107 nearest_common_dominator unless necessary. */
1108 dom = dombb;
1110 else
1111 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
1113 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
1115 /* Adjust latch infomation of BB's parent loop as otherwise
1116 the cfg hook has a hard time not to kill the loop. */
1117 if (current_loops && bb->loop_father->latch == bb)
1118 bb->loop_father->latch = pred;
1120 /* Remove BB since all of BB's incoming edges have been redirected
1121 to DEST. */
1122 delete_basic_block (bb);
1124 return true;
1127 /* This pass merges PHI nodes if one feeds into another. For example,
1128 suppose we have the following:
1130 goto <bb 9> (<L9>);
1132 <L8>:;
1133 tem_17 = foo ();
1135 # tem_6 = PHI <tem_17(8), tem_23(7)>;
1136 <L9>:;
1138 # tem_3 = PHI <tem_6(9), tem_2(5)>;
1139 <L10>:;
1141 Then we merge the first PHI node into the second one like so:
1143 goto <bb 9> (<L10>);
1145 <L8>:;
1146 tem_17 = foo ();
1148 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
1149 <L10>:;
1152 namespace {
1154 const pass_data pass_data_merge_phi =
1156 GIMPLE_PASS, /* type */
1157 "mergephi", /* name */
1158 OPTGROUP_NONE, /* optinfo_flags */
1159 TV_TREE_MERGE_PHI, /* tv_id */
1160 ( PROP_cfg | PROP_ssa ), /* properties_required */
1161 0, /* properties_provided */
1162 0, /* properties_destroyed */
1163 0, /* todo_flags_start */
1164 0, /* todo_flags_finish */
1167 class pass_merge_phi : public gimple_opt_pass
1169 public:
1170 pass_merge_phi (gcc::context *ctxt)
1171 : gimple_opt_pass (pass_data_merge_phi, ctxt)
1174 /* opt_pass methods: */
1175 opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
1176 virtual unsigned int execute (function *);
1178 }; // class pass_merge_phi
1180 unsigned int
1181 pass_merge_phi::execute (function *fun)
1183 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun));
1184 basic_block *current = worklist;
1185 basic_block bb;
1187 calculate_dominance_info (CDI_DOMINATORS);
1189 /* Find all PHI nodes that we may be able to merge. */
1190 FOR_EACH_BB_FN (bb, fun)
1192 basic_block dest;
1194 /* Look for a forwarder block with PHI nodes. */
1195 if (!tree_forwarder_block_p (bb, true))
1196 continue;
1198 dest = single_succ (bb);
1200 /* We have to feed into another basic block with PHI
1201 nodes. */
1202 if (gimple_seq_empty_p (phi_nodes (dest))
1203 /* We don't want to deal with a basic block with
1204 abnormal edges. */
1205 || bb_has_abnormal_pred (bb))
1206 continue;
1208 if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
1210 /* If BB does not dominate DEST, then the PHI nodes at
1211 DEST must be the only users of the results of the PHI
1212 nodes at BB. */
1213 *current++ = bb;
1215 else
1217 gphi_iterator gsi;
1218 unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
1220 /* BB dominates DEST. There may be many users of the PHI
1221 nodes in BB. However, there is still a trivial case we
1222 can handle. If the result of every PHI in BB is used
1223 only by a PHI in DEST, then we can trivially merge the
1224 PHI nodes from BB into DEST. */
1225 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
1226 gsi_next (&gsi))
1228 gphi *phi = gsi.phi ();
1229 tree result = gimple_phi_result (phi);
1230 use_operand_p imm_use;
1231 gimple *use_stmt;
1233 /* If the PHI's result is never used, then we can just
1234 ignore it. */
1235 if (has_zero_uses (result))
1236 continue;
1238 /* Get the single use of the result of this PHI node. */
1239 if (!single_imm_use (result, &imm_use, &use_stmt)
1240 || gimple_code (use_stmt) != GIMPLE_PHI
1241 || gimple_bb (use_stmt) != dest
1242 || gimple_phi_arg_def (use_stmt, dest_idx) != result)
1243 break;
1246 /* If the loop above iterated through all the PHI nodes
1247 in BB, then we can merge the PHIs from BB into DEST. */
1248 if (gsi_end_p (gsi))
1249 *current++ = bb;
1253 /* Now let's drain WORKLIST. */
1254 bool changed = false;
1255 while (current != worklist)
1257 bb = *--current;
1258 changed |= remove_forwarder_block_with_phi (bb);
1260 free (worklist);
1262 /* Removing forwarder blocks can cause formerly irreducible loops
1263 to become reducible if we merged two entry blocks. */
1264 if (changed
1265 && current_loops)
1266 loops_state_set (LOOPS_NEED_FIXUP);
1268 return 0;
1271 } // anon namespace
1273 gimple_opt_pass *
1274 make_pass_merge_phi (gcc::context *ctxt)
1276 return new pass_merge_phi (ctxt);
1279 /* Pass: cleanup the CFG just before expanding trees to RTL.
1280 This is just a round of label cleanups and case node grouping
1281 because after the tree optimizers have run such cleanups may
1282 be necessary. */
1284 static unsigned int
1285 execute_cleanup_cfg_post_optimizing (void)
1287 unsigned int todo = execute_fixup_cfg ();
1288 if (cleanup_tree_cfg ())
1290 todo &= ~TODO_cleanup_cfg;
1291 todo |= TODO_update_ssa;
1293 maybe_remove_unreachable_handlers ();
1294 cleanup_dead_labels ();
1295 if (group_case_labels ())
1296 todo |= TODO_cleanup_cfg;
1297 if ((flag_compare_debug_opt || flag_compare_debug)
1298 && flag_dump_final_insns)
1300 FILE *final_output = fopen (flag_dump_final_insns, "a");
1302 if (!final_output)
1304 error ("could not open final insn dump file %qs: %m",
1305 flag_dump_final_insns);
1306 flag_dump_final_insns = NULL;
1308 else
1310 int save_unnumbered = flag_dump_unnumbered;
1311 int save_noaddr = flag_dump_noaddr;
1313 flag_dump_noaddr = flag_dump_unnumbered = 1;
1314 fprintf (final_output, "\n");
1315 dump_enumerated_decls (final_output,
1316 dump_flags | TDF_SLIM | TDF_NOUID);
1317 flag_dump_noaddr = save_noaddr;
1318 flag_dump_unnumbered = save_unnumbered;
1319 if (fclose (final_output))
1321 error ("could not close final insn dump file %qs: %m",
1322 flag_dump_final_insns);
1323 flag_dump_final_insns = NULL;
1327 return todo;
1330 namespace {
1332 const pass_data pass_data_cleanup_cfg_post_optimizing =
1334 GIMPLE_PASS, /* type */
1335 "optimized", /* name */
1336 OPTGROUP_NONE, /* optinfo_flags */
1337 TV_TREE_CLEANUP_CFG, /* tv_id */
1338 PROP_cfg, /* properties_required */
1339 0, /* properties_provided */
1340 0, /* properties_destroyed */
1341 0, /* todo_flags_start */
1342 TODO_remove_unused_locals, /* todo_flags_finish */
1345 class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1347 public:
1348 pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1349 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1352 /* opt_pass methods: */
1353 virtual unsigned int execute (function *)
1355 return execute_cleanup_cfg_post_optimizing ();
1358 }; // class pass_cleanup_cfg_post_optimizing
1360 } // anon namespace
1362 gimple_opt_pass *
1363 make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1365 return new pass_cleanup_cfg_post_optimizing (ctxt);