libgo: add misc/cgo files
[official-gcc.git] / gcc / tree-cfgcleanup.c
blob8bb5e237395259dfa7d49ccb3b1ab1932f1cd781
1 /* CFG cleanup for trees.
2 Copyright (C) 2001-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "diagnostic-core.h"
31 #include "fold-const.h"
32 #include "cfganal.h"
33 #include "cfgcleanup.h"
34 #include "tree-eh.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-manip.h"
39 #include "tree-dfa.h"
40 #include "tree-ssa.h"
41 #include "cfgloop.h"
42 #include "tree-scalar-evolution.h"
43 #include "gimple-match.h"
44 #include "gimple-fold.h"
45 #include "tree-ssa-loop-niter.h"
48 /* The set of blocks in that at least one of the following changes happened:
49 -- the statement at the end of the block was changed
50 -- the block was newly created
51 -- the set of the predecessors of the block changed
52 -- the set of the successors of the block changed
53 ??? Maybe we could track these changes separately, since they determine
54 what cleanups it makes sense to try on the block. */
55 bitmap cfgcleanup_altered_bbs;
57 /* Remove any fallthru edge from EV. Return true if an edge was removed. */
59 static bool
60 remove_fallthru_edge (vec<edge, va_gc> *ev)
62 edge_iterator ei;
63 edge e;
65 FOR_EACH_EDGE (e, ei, ev)
66 if ((e->flags & EDGE_FALLTHRU) != 0)
68 if (e->flags & EDGE_COMPLEX)
69 e->flags &= ~EDGE_FALLTHRU;
70 else
71 remove_edge_and_dominated_blocks (e);
72 return true;
74 return false;
78 /* Disconnect an unreachable block in the control expression starting
79 at block BB. */
81 static bool
82 cleanup_control_expr_graph (basic_block bb, gimple_stmt_iterator gsi,
83 bool first_p)
85 edge taken_edge;
86 bool retval = false;
87 gimple *stmt = gsi_stmt (gsi);
89 if (!single_succ_p (bb))
91 edge e;
92 edge_iterator ei;
93 bool warned;
94 tree val = NULL_TREE;
96 fold_defer_overflow_warnings ();
97 switch (gimple_code (stmt))
99 case GIMPLE_COND:
100 /* During a first iteration on the CFG only remove trivially
101 dead edges but mark other conditions for re-evaluation. */
102 if (first_p)
104 val = const_binop (gimple_cond_code (stmt), boolean_type_node,
105 gimple_cond_lhs (stmt),
106 gimple_cond_rhs (stmt));
107 if (! val)
108 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
110 else
112 code_helper rcode;
113 tree ops[3] = {};
114 if (gimple_simplify (stmt, &rcode, ops, NULL, no_follow_ssa_edges,
115 no_follow_ssa_edges)
116 && rcode == INTEGER_CST)
117 val = ops[0];
119 break;
121 case GIMPLE_SWITCH:
122 val = gimple_switch_index (as_a <gswitch *> (stmt));
123 break;
125 default:
128 taken_edge = find_taken_edge (bb, val);
129 if (!taken_edge)
131 fold_undefer_and_ignore_overflow_warnings ();
132 return false;
135 /* Remove all the edges except the one that is always executed. */
136 warned = false;
137 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
139 if (e != taken_edge)
141 if (!warned)
143 fold_undefer_overflow_warnings
144 (true, stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
145 warned = true;
148 taken_edge->probability += e->probability;
149 taken_edge->count += e->count;
150 remove_edge_and_dominated_blocks (e);
151 retval = true;
153 else
154 ei_next (&ei);
156 if (!warned)
157 fold_undefer_and_ignore_overflow_warnings ();
158 if (taken_edge->probability > REG_BR_PROB_BASE)
159 taken_edge->probability = REG_BR_PROB_BASE;
161 else
162 taken_edge = single_succ_edge (bb);
164 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
165 gsi_remove (&gsi, true);
166 taken_edge->flags = EDGE_FALLTHRU;
168 return retval;
171 /* Cleanup the GF_CALL_CTRL_ALTERING flag according to
172 to updated gimple_call_flags. */
174 static void
175 cleanup_call_ctrl_altering_flag (gimple *bb_end)
177 if (!is_gimple_call (bb_end)
178 || !gimple_call_ctrl_altering_p (bb_end))
179 return;
181 int flags = gimple_call_flags (bb_end);
182 if (((flags & (ECF_CONST | ECF_PURE))
183 && !(flags & ECF_LOOPING_CONST_OR_PURE))
184 || (flags & ECF_LEAF))
185 gimple_call_set_ctrl_altering (bb_end, false);
188 /* Try to remove superfluous control structures in basic block BB. Returns
189 true if anything changes. */
191 static bool
192 cleanup_control_flow_bb (basic_block bb, bool first_p)
194 gimple_stmt_iterator gsi;
195 bool retval = false;
196 gimple *stmt;
198 /* If the last statement of the block could throw and now cannot,
199 we need to prune cfg. */
200 retval |= gimple_purge_dead_eh_edges (bb);
202 gsi = gsi_last_nondebug_bb (bb);
203 if (gsi_end_p (gsi))
204 return retval;
206 stmt = gsi_stmt (gsi);
208 /* Try to cleanup ctrl altering flag for call which ends bb. */
209 cleanup_call_ctrl_altering_flag (stmt);
211 if (gimple_code (stmt) == GIMPLE_COND
212 || gimple_code (stmt) == GIMPLE_SWITCH)
214 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
215 retval |= cleanup_control_expr_graph (bb, gsi, first_p);
217 else if (gimple_code (stmt) == GIMPLE_GOTO
218 && TREE_CODE (gimple_goto_dest (stmt)) == ADDR_EXPR
219 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt), 0))
220 == LABEL_DECL))
222 /* If we had a computed goto which has a compile-time determinable
223 destination, then we can eliminate the goto. */
224 edge e;
225 tree label;
226 edge_iterator ei;
227 basic_block target_block;
229 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb)) == stmt);
230 /* First look at all the outgoing edges. Delete any outgoing
231 edges which do not go to the right block. For the one
232 edge which goes to the right block, fix up its flags. */
233 label = TREE_OPERAND (gimple_goto_dest (stmt), 0);
234 if (DECL_CONTEXT (label) != cfun->decl)
235 return retval;
236 target_block = label_to_block (label);
237 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); )
239 if (e->dest != target_block)
240 remove_edge_and_dominated_blocks (e);
241 else
243 /* Turn off the EDGE_ABNORMAL flag. */
244 e->flags &= ~EDGE_ABNORMAL;
246 /* And set EDGE_FALLTHRU. */
247 e->flags |= EDGE_FALLTHRU;
248 ei_next (&ei);
252 bitmap_set_bit (cfgcleanup_altered_bbs, bb->index);
253 bitmap_set_bit (cfgcleanup_altered_bbs, target_block->index);
255 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
256 relevant information we need. */
257 gsi_remove (&gsi, true);
258 retval = true;
261 /* Check for indirect calls that have been turned into
262 noreturn calls. */
263 else if (is_gimple_call (stmt)
264 && gimple_call_noreturn_p (stmt))
266 /* If there are debug stmts after the noreturn call, remove them
267 now, they should be all unreachable anyway. */
268 for (gsi_next (&gsi); !gsi_end_p (gsi); )
269 gsi_remove (&gsi, true);
270 if (remove_fallthru_edge (bb->succs))
271 retval = true;
274 return retval;
277 /* Return true if basic block BB does nothing except pass control
278 flow to another block and that we can safely insert a label at
279 the start of the successor block.
281 As a precondition, we require that BB be not equal to
282 the entry block. */
284 static bool
285 tree_forwarder_block_p (basic_block bb, bool phi_wanted)
287 gimple_stmt_iterator gsi;
288 location_t locus;
290 /* BB must have a single outgoing edge. */
291 if (single_succ_p (bb) != 1
292 /* If PHI_WANTED is false, BB must not have any PHI nodes.
293 Otherwise, BB must have PHI nodes. */
294 || gimple_seq_empty_p (phi_nodes (bb)) == phi_wanted
295 /* BB may not be a predecessor of the exit block. */
296 || single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun)
297 /* Nor should this be an infinite loop. */
298 || single_succ (bb) == bb
299 /* BB may not have an abnormal outgoing edge. */
300 || (single_succ_edge (bb)->flags & EDGE_ABNORMAL))
301 return false;
303 gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun));
305 locus = single_succ_edge (bb)->goto_locus;
307 /* There should not be an edge coming from entry, or an EH edge. */
309 edge_iterator ei;
310 edge e;
312 FOR_EACH_EDGE (e, ei, bb->preds)
313 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || (e->flags & EDGE_EH))
314 return false;
315 /* If goto_locus of any of the edges differs, prevent removing
316 the forwarder block for -O0. */
317 else if (optimize == 0 && e->goto_locus != locus)
318 return false;
321 /* Now walk through the statements backward. We can ignore labels,
322 anything else means this is not a forwarder block. */
323 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
325 gimple *stmt = gsi_stmt (gsi);
327 switch (gimple_code (stmt))
329 case GIMPLE_LABEL:
330 if (DECL_NONLOCAL (gimple_label_label (as_a <glabel *> (stmt))))
331 return false;
332 if (optimize == 0 && gimple_location (stmt) != locus)
333 return false;
334 break;
336 /* ??? For now, hope there's a corresponding debug
337 assignment at the destination. */
338 case GIMPLE_DEBUG:
339 break;
341 default:
342 return false;
346 if (current_loops)
348 basic_block dest;
349 /* Protect loop headers. */
350 if (bb_loop_header_p (bb))
351 return false;
353 dest = EDGE_SUCC (bb, 0)->dest;
354 /* Protect loop preheaders and latches if requested. */
355 if (dest->loop_father->header == dest)
357 if (bb->loop_father == dest->loop_father)
359 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
360 return false;
361 /* If bb doesn't have a single predecessor we'd make this
362 loop have multiple latches. Don't do that if that
363 would in turn require disambiguating them. */
364 return (single_pred_p (bb)
365 || loops_state_satisfies_p
366 (LOOPS_MAY_HAVE_MULTIPLE_LATCHES));
368 else if (bb->loop_father == loop_outer (dest->loop_father))
369 return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS);
370 /* Always preserve other edges into loop headers that are
371 not simple latches or preheaders. */
372 return false;
376 return true;
379 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
380 those alternatives are equal in each of the PHI nodes, then return
381 true, else return false. */
383 static bool
384 phi_alternatives_equal (basic_block dest, edge e1, edge e2)
386 int n1 = e1->dest_idx;
387 int n2 = e2->dest_idx;
388 gphi_iterator gsi;
390 for (gsi = gsi_start_phis (dest); !gsi_end_p (gsi); gsi_next (&gsi))
392 gphi *phi = gsi.phi ();
393 tree val1 = gimple_phi_arg_def (phi, n1);
394 tree val2 = gimple_phi_arg_def (phi, n2);
396 gcc_assert (val1 != NULL_TREE);
397 gcc_assert (val2 != NULL_TREE);
399 if (!operand_equal_for_phi_arg_p (val1, val2))
400 return false;
403 return true;
406 /* Removes forwarder block BB. Returns false if this failed. */
408 static bool
409 remove_forwarder_block (basic_block bb)
411 edge succ = single_succ_edge (bb), e, s;
412 basic_block dest = succ->dest;
413 gimple *label;
414 edge_iterator ei;
415 gimple_stmt_iterator gsi, gsi_to;
416 bool can_move_debug_stmts;
418 /* We check for infinite loops already in tree_forwarder_block_p.
419 However it may happen that the infinite loop is created
420 afterwards due to removal of forwarders. */
421 if (dest == bb)
422 return false;
424 /* If the destination block consists of a nonlocal label or is a
425 EH landing pad, do not merge it. */
426 label = first_stmt (dest);
427 if (label)
428 if (glabel *label_stmt = dyn_cast <glabel *> (label))
429 if (DECL_NONLOCAL (gimple_label_label (label_stmt))
430 || EH_LANDING_PAD_NR (gimple_label_label (label_stmt)) != 0)
431 return false;
433 /* If there is an abnormal edge to basic block BB, but not into
434 dest, problems might occur during removal of the phi node at out
435 of ssa due to overlapping live ranges of registers.
437 If there is an abnormal edge in DEST, the problems would occur
438 anyway since cleanup_dead_labels would then merge the labels for
439 two different eh regions, and rest of exception handling code
440 does not like it.
442 So if there is an abnormal edge to BB, proceed only if there is
443 no abnormal edge to DEST and there are no phi nodes in DEST. */
444 if (bb_has_abnormal_pred (bb)
445 && (bb_has_abnormal_pred (dest)
446 || !gimple_seq_empty_p (phi_nodes (dest))))
447 return false;
449 /* If there are phi nodes in DEST, and some of the blocks that are
450 predecessors of BB are also predecessors of DEST, check that the
451 phi node arguments match. */
452 if (!gimple_seq_empty_p (phi_nodes (dest)))
454 FOR_EACH_EDGE (e, ei, bb->preds)
456 s = find_edge (e->src, dest);
457 if (!s)
458 continue;
460 if (!phi_alternatives_equal (dest, succ, s))
461 return false;
465 can_move_debug_stmts = MAY_HAVE_DEBUG_STMTS && single_pred_p (dest);
467 basic_block pred = NULL;
468 if (single_pred_p (bb))
469 pred = single_pred (bb);
471 /* Redirect the edges. */
472 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
474 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
476 if (e->flags & EDGE_ABNORMAL)
478 /* If there is an abnormal edge, redirect it anyway, and
479 move the labels to the new block to make it legal. */
480 s = redirect_edge_succ_nodup (e, dest);
482 else
483 s = redirect_edge_and_branch (e, dest);
485 if (s == e)
487 /* Create arguments for the phi nodes, since the edge was not
488 here before. */
489 for (gphi_iterator psi = gsi_start_phis (dest);
490 !gsi_end_p (psi);
491 gsi_next (&psi))
493 gphi *phi = psi.phi ();
494 source_location l = gimple_phi_arg_location_from_edge (phi, succ);
495 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
496 add_phi_arg (phi, unshare_expr (def), s, l);
501 /* Move nonlocal labels and computed goto targets as well as user
502 defined labels and labels with an EH landing pad number to the
503 new block, so that the redirection of the abnormal edges works,
504 jump targets end up in a sane place and debug information for
505 labels is retained. */
506 gsi_to = gsi_start_bb (dest);
507 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
509 tree decl;
510 label = gsi_stmt (gsi);
511 if (is_gimple_debug (label))
512 break;
513 decl = gimple_label_label (as_a <glabel *> (label));
514 if (EH_LANDING_PAD_NR (decl) != 0
515 || DECL_NONLOCAL (decl)
516 || FORCED_LABEL (decl)
517 || !DECL_ARTIFICIAL (decl))
519 gsi_remove (&gsi, false);
520 gsi_insert_before (&gsi_to, label, GSI_SAME_STMT);
522 else
523 gsi_next (&gsi);
526 /* Move debug statements if the destination has a single predecessor. */
527 if (can_move_debug_stmts)
529 gsi_to = gsi_after_labels (dest);
530 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); )
532 gimple *debug = gsi_stmt (gsi);
533 if (!is_gimple_debug (debug))
534 break;
535 gsi_remove (&gsi, false);
536 gsi_insert_before (&gsi_to, debug, GSI_SAME_STMT);
540 bitmap_set_bit (cfgcleanup_altered_bbs, dest->index);
542 /* Update the dominators. */
543 if (dom_info_available_p (CDI_DOMINATORS))
545 basic_block dom, dombb, domdest;
547 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
548 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
549 if (domdest == bb)
551 /* Shortcut to avoid calling (relatively expensive)
552 nearest_common_dominator unless necessary. */
553 dom = dombb;
555 else
556 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
558 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
561 /* Adjust latch infomation of BB's parent loop as otherwise
562 the cfg hook has a hard time not to kill the loop. */
563 if (current_loops && bb->loop_father->latch == bb)
564 bb->loop_father->latch = pred;
566 /* And kill the forwarder block. */
567 delete_basic_block (bb);
569 return true;
572 /* STMT is a call that has been discovered noreturn. Split the
573 block to prepare fixing up the CFG and remove LHS.
574 Return true if cleanup-cfg needs to run. */
576 bool
577 fixup_noreturn_call (gimple *stmt)
579 basic_block bb = gimple_bb (stmt);
580 bool changed = false;
582 if (gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
583 return false;
585 /* First split basic block if stmt is not last. */
586 if (stmt != gsi_stmt (gsi_last_bb (bb)))
588 if (stmt == gsi_stmt (gsi_last_nondebug_bb (bb)))
590 /* Don't split if there are only debug stmts
591 after stmt, that can result in -fcompare-debug
592 failures. Remove the debug stmts instead,
593 they should be all unreachable anyway. */
594 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
595 for (gsi_next (&gsi); !gsi_end_p (gsi); )
596 gsi_remove (&gsi, true);
598 else
600 split_block (bb, stmt);
601 changed = true;
605 /* If there is an LHS, remove it, but only if its type has fixed size.
606 The LHS will need to be recreated during RTL expansion and creating
607 temporaries of variable-sized types is not supported. Also don't
608 do this with TREE_ADDRESSABLE types, as assign_temp will abort.
609 Drop LHS regardless of TREE_ADDRESSABLE, if the function call
610 has been changed into a call that does not return a value, like
611 __builtin_unreachable or __cxa_pure_virtual. */
612 tree lhs = gimple_call_lhs (stmt);
613 if (lhs
614 && (should_remove_lhs_p (lhs)
615 || VOID_TYPE_P (TREE_TYPE (gimple_call_fntype (stmt)))))
617 gimple_call_set_lhs (stmt, NULL_TREE);
619 /* We need to fix up the SSA name to avoid checking errors. */
620 if (TREE_CODE (lhs) == SSA_NAME)
622 tree new_var = create_tmp_reg (TREE_TYPE (lhs));
623 SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs, new_var);
624 SSA_NAME_DEF_STMT (lhs) = gimple_build_nop ();
625 set_ssa_default_def (cfun, new_var, lhs);
628 update_stmt (stmt);
631 /* Mark the call as altering control flow. */
632 if (!gimple_call_ctrl_altering_p (stmt))
634 gimple_call_set_ctrl_altering (stmt, true);
635 changed = true;
638 return changed;
642 /* Tries to cleanup cfg in basic block BB. Returns true if anything
643 changes. */
645 static bool
646 cleanup_tree_cfg_bb (basic_block bb)
648 if (tree_forwarder_block_p (bb, false)
649 && remove_forwarder_block (bb))
650 return true;
652 /* If there is a merge opportunity with the predecessor
653 do nothing now but wait until we process the predecessor.
654 This happens when we visit BBs in a non-optimal order and
655 avoids quadratic behavior with adjusting stmts BB pointer. */
656 if (single_pred_p (bb)
657 && can_merge_blocks_p (single_pred (bb), bb))
658 /* But make sure we _do_ visit it. When we remove unreachable paths
659 ending in a backedge we fail to mark the destinations predecessors
660 as changed. */
661 bitmap_set_bit (cfgcleanup_altered_bbs, single_pred (bb)->index);
663 /* Merging the blocks may create new opportunities for folding
664 conditional branches (due to the elimination of single-valued PHI
665 nodes). */
666 else if (single_succ_p (bb)
667 && can_merge_blocks_p (bb, single_succ (bb)))
669 merge_blocks (bb, single_succ (bb));
670 return true;
673 return false;
676 /* Iterate the cfg cleanups, while anything changes. */
678 static bool
679 cleanup_tree_cfg_1 (void)
681 bool retval = false;
682 basic_block bb;
683 unsigned i, n;
685 /* Prepare the worklists of altered blocks. */
686 cfgcleanup_altered_bbs = BITMAP_ALLOC (NULL);
688 /* During forwarder block cleanup, we may redirect edges out of
689 SWITCH_EXPRs, which can get expensive. So we want to enable
690 recording of edge to CASE_LABEL_EXPR. */
691 start_recording_case_labels ();
693 /* We cannot use FOR_EACH_BB_FN for the BB iterations below
694 since the basic blocks may get removed. */
696 /* Start by iterating over all basic blocks looking for edge removal
697 opportunities. Do this first because incoming SSA form may be
698 invalid and we want to avoid performing SSA related tasks such
699 as propgating out a PHI node during BB merging in that state. */
700 n = last_basic_block_for_fn (cfun);
701 for (i = NUM_FIXED_BLOCKS; i < n; i++)
703 bb = BASIC_BLOCK_FOR_FN (cfun, i);
704 if (bb)
705 retval |= cleanup_control_flow_bb (bb, true);
708 /* After doing the above SSA form should be valid (or an update SSA
709 should be required). */
711 /* Continue by iterating over all basic blocks looking for BB merging
712 opportunities. */
713 n = last_basic_block_for_fn (cfun);
714 for (i = NUM_FIXED_BLOCKS; i < n; i++)
716 bb = BASIC_BLOCK_FOR_FN (cfun, i);
717 if (bb)
718 retval |= cleanup_tree_cfg_bb (bb);
721 /* Now process the altered blocks, as long as any are available. */
722 while (!bitmap_empty_p (cfgcleanup_altered_bbs))
724 i = bitmap_first_set_bit (cfgcleanup_altered_bbs);
725 bitmap_clear_bit (cfgcleanup_altered_bbs, i);
726 if (i < NUM_FIXED_BLOCKS)
727 continue;
729 bb = BASIC_BLOCK_FOR_FN (cfun, i);
730 if (!bb)
731 continue;
733 retval |= cleanup_control_flow_bb (bb, false);
734 retval |= cleanup_tree_cfg_bb (bb);
737 end_recording_case_labels ();
738 BITMAP_FREE (cfgcleanup_altered_bbs);
739 return retval;
742 static bool
743 mfb_keep_latches (edge e)
745 return ! dominated_by_p (CDI_DOMINATORS, e->src, e->dest);
748 /* Remove unreachable blocks and other miscellaneous clean up work.
749 Return true if the flowgraph was modified, false otherwise. */
751 static bool
752 cleanup_tree_cfg_noloop (void)
754 bool changed;
756 timevar_push (TV_TREE_CLEANUP_CFG);
758 /* Iterate until there are no more cleanups left to do. If any
759 iteration changed the flowgraph, set CHANGED to true.
761 If dominance information is available, there cannot be any unreachable
762 blocks. */
763 if (!dom_info_available_p (CDI_DOMINATORS))
765 changed = delete_unreachable_blocks ();
766 calculate_dominance_info (CDI_DOMINATORS);
768 else
770 checking_verify_dominators (CDI_DOMINATORS);
771 changed = false;
774 /* Ensure that we have single entries into loop headers. Otherwise
775 if one of the entries is becoming a latch due to CFG cleanup
776 (from formerly being part of an irreducible region) then we mess
777 up loop fixup and associate the old loop with a different region
778 which makes niter upper bounds invalid. See for example PR80549.
779 This needs to be done before we remove trivially dead edges as
780 we need to capture the dominance state before the pending transform. */
781 if (current_loops)
783 loop_p loop;
784 unsigned i;
785 FOR_EACH_VEC_ELT (*get_loops (cfun), i, loop)
786 if (loop && loop->header)
788 basic_block bb = loop->header;
789 edge_iterator ei;
790 edge e;
791 bool found_latch = false;
792 bool any_abnormal = false;
793 unsigned n = 0;
794 /* We are only interested in preserving existing loops, but
795 we need to check whether they are still real and of course
796 if we need to add a preheader at all. */
797 FOR_EACH_EDGE (e, ei, bb->preds)
799 if (e->flags & EDGE_ABNORMAL)
801 any_abnormal = true;
802 break;
804 if (dominated_by_p (CDI_DOMINATORS, e->src, bb))
806 found_latch = true;
807 continue;
809 n++;
811 /* If we have more than one entry to the loop header
812 create a forwarder. */
813 if (found_latch && ! any_abnormal && n > 1)
815 edge fallthru = make_forwarder_block (bb, mfb_keep_latches,
816 NULL);
817 loop->header = fallthru->dest;
818 if (! loops_state_satisfies_p (LOOPS_NEED_FIXUP))
820 /* The loop updating from the CFG hook is incomplete
821 when we have multiple latches, fixup manually. */
822 remove_bb_from_loops (fallthru->src);
823 loop_p cloop = loop;
824 FOR_EACH_EDGE (e, ei, fallthru->src->preds)
825 cloop = find_common_loop (cloop, e->src->loop_father);
826 add_bb_to_loop (fallthru->src, cloop);
832 changed |= cleanup_tree_cfg_1 ();
834 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
835 compact_blocks ();
837 checking_verify_flow_info ();
839 timevar_pop (TV_TREE_CLEANUP_CFG);
841 if (changed && current_loops)
843 /* Removing edges and/or blocks may make recorded bounds refer
844 to stale GIMPLE stmts now, so clear them. */
845 free_numbers_of_iterations_estimates (cfun);
846 loops_state_set (LOOPS_NEED_FIXUP);
849 return changed;
852 /* Repairs loop structures. */
854 static void
855 repair_loop_structures (void)
857 bitmap changed_bbs;
858 unsigned n_new_loops;
860 calculate_dominance_info (CDI_DOMINATORS);
862 timevar_push (TV_REPAIR_LOOPS);
863 changed_bbs = BITMAP_ALLOC (NULL);
864 n_new_loops = fix_loop_structure (changed_bbs);
866 /* This usually does nothing. But sometimes parts of cfg that originally
867 were inside a loop get out of it due to edge removal (since they
868 become unreachable by back edges from latch). Also a former
869 irreducible loop can become reducible - in this case force a full
870 rewrite into loop-closed SSA form. */
871 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
872 rewrite_into_loop_closed_ssa (n_new_loops ? NULL : changed_bbs,
873 TODO_update_ssa);
875 BITMAP_FREE (changed_bbs);
877 checking_verify_loop_structure ();
878 scev_reset ();
880 timevar_pop (TV_REPAIR_LOOPS);
883 /* Cleanup cfg and repair loop structures. */
885 bool
886 cleanup_tree_cfg (void)
888 bool changed = cleanup_tree_cfg_noloop ();
890 if (current_loops != NULL
891 && loops_state_satisfies_p (LOOPS_NEED_FIXUP))
892 repair_loop_structures ();
894 return changed;
897 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
898 Returns true if successful. */
900 static bool
901 remove_forwarder_block_with_phi (basic_block bb)
903 edge succ = single_succ_edge (bb);
904 basic_block dest = succ->dest;
905 gimple *label;
906 basic_block dombb, domdest, dom;
908 /* We check for infinite loops already in tree_forwarder_block_p.
909 However it may happen that the infinite loop is created
910 afterwards due to removal of forwarders. */
911 if (dest == bb)
912 return false;
914 /* Removal of forwarders may expose new natural loops and thus
915 a block may turn into a loop header. */
916 if (current_loops && bb_loop_header_p (bb))
917 return false;
919 /* If the destination block consists of a nonlocal label, do not
920 merge it. */
921 label = first_stmt (dest);
922 if (label)
923 if (glabel *label_stmt = dyn_cast <glabel *> (label))
924 if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
925 return false;
927 /* Record BB's single pred in case we need to update the father
928 loop's latch information later. */
929 basic_block pred = NULL;
930 if (single_pred_p (bb))
931 pred = single_pred (bb);
933 /* Redirect each incoming edge to BB to DEST. */
934 while (EDGE_COUNT (bb->preds) > 0)
936 edge e = EDGE_PRED (bb, 0), s;
937 gphi_iterator gsi;
939 s = find_edge (e->src, dest);
940 if (s)
942 /* We already have an edge S from E->src to DEST. If S and
943 E->dest's sole successor edge have the same PHI arguments
944 at DEST, redirect S to DEST. */
945 if (phi_alternatives_equal (dest, s, succ))
947 e = redirect_edge_and_branch (e, dest);
948 redirect_edge_var_map_clear (e);
949 continue;
952 /* PHI arguments are different. Create a forwarder block by
953 splitting E so that we can merge PHI arguments on E to
954 DEST. */
955 e = single_succ_edge (split_edge (e));
957 else
959 /* If we merge the forwarder into a loop header verify if we
960 are creating another loop latch edge. If so, reset
961 number of iteration information of the loop. */
962 if (dest->loop_father->header == dest
963 && dominated_by_p (CDI_DOMINATORS, e->src, dest))
965 dest->loop_father->any_upper_bound = false;
966 dest->loop_father->any_likely_upper_bound = false;
967 free_numbers_of_iterations_estimates (dest->loop_father);
971 s = redirect_edge_and_branch (e, dest);
973 /* redirect_edge_and_branch must not create a new edge. */
974 gcc_assert (s == e);
976 /* Add to the PHI nodes at DEST each PHI argument removed at the
977 destination of E. */
978 for (gsi = gsi_start_phis (dest);
979 !gsi_end_p (gsi);
980 gsi_next (&gsi))
982 gphi *phi = gsi.phi ();
983 tree def = gimple_phi_arg_def (phi, succ->dest_idx);
984 source_location locus = gimple_phi_arg_location_from_edge (phi, succ);
986 if (TREE_CODE (def) == SSA_NAME)
988 /* If DEF is one of the results of PHI nodes removed during
989 redirection, replace it with the PHI argument that used
990 to be on E. */
991 vec<edge_var_map> *head = redirect_edge_var_map_vector (e);
992 size_t length = head ? head->length () : 0;
993 for (size_t i = 0; i < length; i++)
995 edge_var_map *vm = &(*head)[i];
996 tree old_arg = redirect_edge_var_map_result (vm);
997 tree new_arg = redirect_edge_var_map_def (vm);
999 if (def == old_arg)
1001 def = new_arg;
1002 locus = redirect_edge_var_map_location (vm);
1003 break;
1008 add_phi_arg (phi, def, s, locus);
1011 redirect_edge_var_map_clear (e);
1014 /* Update the dominators. */
1015 dombb = get_immediate_dominator (CDI_DOMINATORS, bb);
1016 domdest = get_immediate_dominator (CDI_DOMINATORS, dest);
1017 if (domdest == bb)
1019 /* Shortcut to avoid calling (relatively expensive)
1020 nearest_common_dominator unless necessary. */
1021 dom = dombb;
1023 else
1024 dom = nearest_common_dominator (CDI_DOMINATORS, domdest, dombb);
1026 set_immediate_dominator (CDI_DOMINATORS, dest, dom);
1028 /* Adjust latch infomation of BB's parent loop as otherwise
1029 the cfg hook has a hard time not to kill the loop. */
1030 if (current_loops && bb->loop_father->latch == bb)
1031 bb->loop_father->latch = pred;
1033 /* Remove BB since all of BB's incoming edges have been redirected
1034 to DEST. */
1035 delete_basic_block (bb);
1037 return true;
1040 /* This pass merges PHI nodes if one feeds into another. For example,
1041 suppose we have the following:
1043 goto <bb 9> (<L9>);
1045 <L8>:;
1046 tem_17 = foo ();
1048 # tem_6 = PHI <tem_17(8), tem_23(7)>;
1049 <L9>:;
1051 # tem_3 = PHI <tem_6(9), tem_2(5)>;
1052 <L10>:;
1054 Then we merge the first PHI node into the second one like so:
1056 goto <bb 9> (<L10>);
1058 <L8>:;
1059 tem_17 = foo ();
1061 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
1062 <L10>:;
1065 namespace {
1067 const pass_data pass_data_merge_phi =
1069 GIMPLE_PASS, /* type */
1070 "mergephi", /* name */
1071 OPTGROUP_NONE, /* optinfo_flags */
1072 TV_TREE_MERGE_PHI, /* tv_id */
1073 ( PROP_cfg | PROP_ssa ), /* properties_required */
1074 0, /* properties_provided */
1075 0, /* properties_destroyed */
1076 0, /* todo_flags_start */
1077 0, /* todo_flags_finish */
1080 class pass_merge_phi : public gimple_opt_pass
1082 public:
1083 pass_merge_phi (gcc::context *ctxt)
1084 : gimple_opt_pass (pass_data_merge_phi, ctxt)
1087 /* opt_pass methods: */
1088 opt_pass * clone () { return new pass_merge_phi (m_ctxt); }
1089 virtual unsigned int execute (function *);
1091 }; // class pass_merge_phi
1093 unsigned int
1094 pass_merge_phi::execute (function *fun)
1096 basic_block *worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (fun));
1097 basic_block *current = worklist;
1098 basic_block bb;
1100 calculate_dominance_info (CDI_DOMINATORS);
1102 /* Find all PHI nodes that we may be able to merge. */
1103 FOR_EACH_BB_FN (bb, fun)
1105 basic_block dest;
1107 /* Look for a forwarder block with PHI nodes. */
1108 if (!tree_forwarder_block_p (bb, true))
1109 continue;
1111 dest = single_succ (bb);
1113 /* We have to feed into another basic block with PHI
1114 nodes. */
1115 if (gimple_seq_empty_p (phi_nodes (dest))
1116 /* We don't want to deal with a basic block with
1117 abnormal edges. */
1118 || bb_has_abnormal_pred (bb))
1119 continue;
1121 if (!dominated_by_p (CDI_DOMINATORS, dest, bb))
1123 /* If BB does not dominate DEST, then the PHI nodes at
1124 DEST must be the only users of the results of the PHI
1125 nodes at BB. */
1126 *current++ = bb;
1128 else
1130 gphi_iterator gsi;
1131 unsigned int dest_idx = single_succ_edge (bb)->dest_idx;
1133 /* BB dominates DEST. There may be many users of the PHI
1134 nodes in BB. However, there is still a trivial case we
1135 can handle. If the result of every PHI in BB is used
1136 only by a PHI in DEST, then we can trivially merge the
1137 PHI nodes from BB into DEST. */
1138 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
1139 gsi_next (&gsi))
1141 gphi *phi = gsi.phi ();
1142 tree result = gimple_phi_result (phi);
1143 use_operand_p imm_use;
1144 gimple *use_stmt;
1146 /* If the PHI's result is never used, then we can just
1147 ignore it. */
1148 if (has_zero_uses (result))
1149 continue;
1151 /* Get the single use of the result of this PHI node. */
1152 if (!single_imm_use (result, &imm_use, &use_stmt)
1153 || gimple_code (use_stmt) != GIMPLE_PHI
1154 || gimple_bb (use_stmt) != dest
1155 || gimple_phi_arg_def (use_stmt, dest_idx) != result)
1156 break;
1159 /* If the loop above iterated through all the PHI nodes
1160 in BB, then we can merge the PHIs from BB into DEST. */
1161 if (gsi_end_p (gsi))
1162 *current++ = bb;
1166 /* Now let's drain WORKLIST. */
1167 bool changed = false;
1168 while (current != worklist)
1170 bb = *--current;
1171 changed |= remove_forwarder_block_with_phi (bb);
1173 free (worklist);
1175 /* Removing forwarder blocks can cause formerly irreducible loops
1176 to become reducible if we merged two entry blocks. */
1177 if (changed
1178 && current_loops)
1179 loops_state_set (LOOPS_NEED_FIXUP);
1181 return 0;
1184 } // anon namespace
1186 gimple_opt_pass *
1187 make_pass_merge_phi (gcc::context *ctxt)
1189 return new pass_merge_phi (ctxt);
1192 /* Pass: cleanup the CFG just before expanding trees to RTL.
1193 This is just a round of label cleanups and case node grouping
1194 because after the tree optimizers have run such cleanups may
1195 be necessary. */
1197 static unsigned int
1198 execute_cleanup_cfg_post_optimizing (void)
1200 unsigned int todo = execute_fixup_cfg ();
1201 if (cleanup_tree_cfg ())
1203 todo &= ~TODO_cleanup_cfg;
1204 todo |= TODO_update_ssa;
1206 maybe_remove_unreachable_handlers ();
1207 cleanup_dead_labels ();
1208 group_case_labels ();
1209 if ((flag_compare_debug_opt || flag_compare_debug)
1210 && flag_dump_final_insns)
1212 FILE *final_output = fopen (flag_dump_final_insns, "a");
1214 if (!final_output)
1216 error ("could not open final insn dump file %qs: %m",
1217 flag_dump_final_insns);
1218 flag_dump_final_insns = NULL;
1220 else
1222 int save_unnumbered = flag_dump_unnumbered;
1223 int save_noaddr = flag_dump_noaddr;
1225 flag_dump_noaddr = flag_dump_unnumbered = 1;
1226 fprintf (final_output, "\n");
1227 dump_enumerated_decls (final_output, dump_flags | TDF_NOUID);
1228 flag_dump_noaddr = save_noaddr;
1229 flag_dump_unnumbered = save_unnumbered;
1230 if (fclose (final_output))
1232 error ("could not close final insn dump file %qs: %m",
1233 flag_dump_final_insns);
1234 flag_dump_final_insns = NULL;
1238 return todo;
1241 namespace {
1243 const pass_data pass_data_cleanup_cfg_post_optimizing =
1245 GIMPLE_PASS, /* type */
1246 "optimized", /* name */
1247 OPTGROUP_NONE, /* optinfo_flags */
1248 TV_TREE_CLEANUP_CFG, /* tv_id */
1249 PROP_cfg, /* properties_required */
1250 0, /* properties_provided */
1251 0, /* properties_destroyed */
1252 0, /* todo_flags_start */
1253 TODO_remove_unused_locals, /* todo_flags_finish */
1256 class pass_cleanup_cfg_post_optimizing : public gimple_opt_pass
1258 public:
1259 pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1260 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing, ctxt)
1263 /* opt_pass methods: */
1264 virtual unsigned int execute (function *)
1266 return execute_cleanup_cfg_post_optimizing ();
1269 }; // class pass_cleanup_cfg_post_optimizing
1271 } // anon namespace
1273 gimple_opt_pass *
1274 make_pass_cleanup_cfg_post_optimizing (gcc::context *ctxt)
1276 return new pass_cleanup_cfg_post_optimizing (ctxt);