1 /* CFG cleanup for trees.
2 Copyright (C) 2001-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "tree-pass.h"
30 #include "diagnostic-core.h"
31 #include "fold-const.h"
33 #include "cfgcleanup.h"
36 #include "gimple-iterator.h"
38 #include "tree-ssa-loop-manip.h"
42 #include "tree-scalar-evolution.h"
43 #include "gimple-match.h"
44 #include "gimple-fold.h"
45 #include "tree-ssa-loop-niter.h"
48 /* The set of blocks in that at least one of the following changes happened:
49 -- the statement at the end of the block was changed
50 -- the block was newly created
51 -- the set of the predecessors of the block changed
52 -- the set of the successors of the block changed
53 ??? Maybe we could track these changes separately, since they determine
54 what cleanups it makes sense to try on the block. */
55 bitmap cfgcleanup_altered_bbs
;
57 /* Remove any fallthru edge from EV. Return true if an edge was removed. */
60 remove_fallthru_edge (vec
<edge
, va_gc
> *ev
)
65 FOR_EACH_EDGE (e
, ei
, ev
)
66 if ((e
->flags
& EDGE_FALLTHRU
) != 0)
68 if (e
->flags
& EDGE_COMPLEX
)
69 e
->flags
&= ~EDGE_FALLTHRU
;
71 remove_edge_and_dominated_blocks (e
);
78 /* Disconnect an unreachable block in the control expression starting
82 cleanup_control_expr_graph (basic_block bb
, gimple_stmt_iterator gsi
,
87 gimple
*stmt
= gsi_stmt (gsi
);
89 if (!single_succ_p (bb
))
96 fold_defer_overflow_warnings ();
97 switch (gimple_code (stmt
))
100 /* During a first iteration on the CFG only remove trivially
101 dead edges but mark other conditions for re-evaluation. */
104 val
= const_binop (gimple_cond_code (stmt
), boolean_type_node
,
105 gimple_cond_lhs (stmt
),
106 gimple_cond_rhs (stmt
));
108 bitmap_set_bit (cfgcleanup_altered_bbs
, bb
->index
);
114 if (gimple_simplify (stmt
, &rcode
, ops
, NULL
, no_follow_ssa_edges
,
116 && rcode
== INTEGER_CST
)
122 val
= gimple_switch_index (as_a
<gswitch
*> (stmt
));
128 taken_edge
= find_taken_edge (bb
, val
);
131 fold_undefer_and_ignore_overflow_warnings ();
135 /* Remove all the edges except the one that is always executed. */
137 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
)); )
143 fold_undefer_overflow_warnings
144 (true, stmt
, WARN_STRICT_OVERFLOW_CONDITIONAL
);
148 taken_edge
->probability
+= e
->probability
;
149 taken_edge
->count
+= e
->count
;
150 remove_edge_and_dominated_blocks (e
);
157 fold_undefer_and_ignore_overflow_warnings ();
158 if (taken_edge
->probability
> REG_BR_PROB_BASE
)
159 taken_edge
->probability
= REG_BR_PROB_BASE
;
162 taken_edge
= single_succ_edge (bb
);
164 bitmap_set_bit (cfgcleanup_altered_bbs
, bb
->index
);
165 gsi_remove (&gsi
, true);
166 taken_edge
->flags
= EDGE_FALLTHRU
;
171 /* Cleanup the GF_CALL_CTRL_ALTERING flag according to
172 to updated gimple_call_flags. */
175 cleanup_call_ctrl_altering_flag (gimple
*bb_end
)
177 if (!is_gimple_call (bb_end
)
178 || !gimple_call_ctrl_altering_p (bb_end
))
181 int flags
= gimple_call_flags (bb_end
);
182 if (((flags
& (ECF_CONST
| ECF_PURE
))
183 && !(flags
& ECF_LOOPING_CONST_OR_PURE
))
184 || (flags
& ECF_LEAF
))
185 gimple_call_set_ctrl_altering (bb_end
, false);
188 /* Try to remove superfluous control structures in basic block BB. Returns
189 true if anything changes. */
192 cleanup_control_flow_bb (basic_block bb
, bool first_p
)
194 gimple_stmt_iterator gsi
;
198 /* If the last statement of the block could throw and now cannot,
199 we need to prune cfg. */
200 retval
|= gimple_purge_dead_eh_edges (bb
);
202 gsi
= gsi_last_nondebug_bb (bb
);
206 stmt
= gsi_stmt (gsi
);
208 /* Try to cleanup ctrl altering flag for call which ends bb. */
209 cleanup_call_ctrl_altering_flag (stmt
);
211 if (gimple_code (stmt
) == GIMPLE_COND
212 || gimple_code (stmt
) == GIMPLE_SWITCH
)
214 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb
)) == stmt
);
215 retval
|= cleanup_control_expr_graph (bb
, gsi
, first_p
);
217 else if (gimple_code (stmt
) == GIMPLE_GOTO
218 && TREE_CODE (gimple_goto_dest (stmt
)) == ADDR_EXPR
219 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt
), 0))
222 /* If we had a computed goto which has a compile-time determinable
223 destination, then we can eliminate the goto. */
227 basic_block target_block
;
229 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb
)) == stmt
);
230 /* First look at all the outgoing edges. Delete any outgoing
231 edges which do not go to the right block. For the one
232 edge which goes to the right block, fix up its flags. */
233 label
= TREE_OPERAND (gimple_goto_dest (stmt
), 0);
234 if (DECL_CONTEXT (label
) != cfun
->decl
)
236 target_block
= label_to_block (label
);
237 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
)); )
239 if (e
->dest
!= target_block
)
240 remove_edge_and_dominated_blocks (e
);
243 /* Turn off the EDGE_ABNORMAL flag. */
244 e
->flags
&= ~EDGE_ABNORMAL
;
246 /* And set EDGE_FALLTHRU. */
247 e
->flags
|= EDGE_FALLTHRU
;
252 bitmap_set_bit (cfgcleanup_altered_bbs
, bb
->index
);
253 bitmap_set_bit (cfgcleanup_altered_bbs
, target_block
->index
);
255 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
256 relevant information we need. */
257 gsi_remove (&gsi
, true);
261 /* Check for indirect calls that have been turned into
263 else if (is_gimple_call (stmt
)
264 && gimple_call_noreturn_p (stmt
))
266 /* If there are debug stmts after the noreturn call, remove them
267 now, they should be all unreachable anyway. */
268 for (gsi_next (&gsi
); !gsi_end_p (gsi
); )
269 gsi_remove (&gsi
, true);
270 if (remove_fallthru_edge (bb
->succs
))
277 /* Return true if basic block BB does nothing except pass control
278 flow to another block and that we can safely insert a label at
279 the start of the successor block.
281 As a precondition, we require that BB be not equal to
285 tree_forwarder_block_p (basic_block bb
, bool phi_wanted
)
287 gimple_stmt_iterator gsi
;
290 /* BB must have a single outgoing edge. */
291 if (single_succ_p (bb
) != 1
292 /* If PHI_WANTED is false, BB must not have any PHI nodes.
293 Otherwise, BB must have PHI nodes. */
294 || gimple_seq_empty_p (phi_nodes (bb
)) == phi_wanted
295 /* BB may not be a predecessor of the exit block. */
296 || single_succ (bb
) == EXIT_BLOCK_PTR_FOR_FN (cfun
)
297 /* Nor should this be an infinite loop. */
298 || single_succ (bb
) == bb
299 /* BB may not have an abnormal outgoing edge. */
300 || (single_succ_edge (bb
)->flags
& EDGE_ABNORMAL
))
303 gcc_checking_assert (bb
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
));
305 locus
= single_succ_edge (bb
)->goto_locus
;
307 /* There should not be an edge coming from entry, or an EH edge. */
312 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
313 if (e
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) || (e
->flags
& EDGE_EH
))
315 /* If goto_locus of any of the edges differs, prevent removing
316 the forwarder block for -O0. */
317 else if (optimize
== 0 && e
->goto_locus
!= locus
)
321 /* Now walk through the statements backward. We can ignore labels,
322 anything else means this is not a forwarder block. */
323 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
325 gimple
*stmt
= gsi_stmt (gsi
);
327 switch (gimple_code (stmt
))
330 if (DECL_NONLOCAL (gimple_label_label (as_a
<glabel
*> (stmt
))))
332 if (optimize
== 0 && gimple_location (stmt
) != locus
)
336 /* ??? For now, hope there's a corresponding debug
337 assignment at the destination. */
349 /* Protect loop headers. */
350 if (bb_loop_header_p (bb
))
353 dest
= EDGE_SUCC (bb
, 0)->dest
;
354 /* Protect loop preheaders and latches if requested. */
355 if (dest
->loop_father
->header
== dest
)
357 if (bb
->loop_father
== dest
->loop_father
)
359 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES
))
361 /* If bb doesn't have a single predecessor we'd make this
362 loop have multiple latches. Don't do that if that
363 would in turn require disambiguating them. */
364 return (single_pred_p (bb
)
365 || loops_state_satisfies_p
366 (LOOPS_MAY_HAVE_MULTIPLE_LATCHES
));
368 else if (bb
->loop_father
== loop_outer (dest
->loop_father
))
369 return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS
);
370 /* Always preserve other edges into loop headers that are
371 not simple latches or preheaders. */
379 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
380 those alternatives are equal in each of the PHI nodes, then return
381 true, else return false. */
384 phi_alternatives_equal (basic_block dest
, edge e1
, edge e2
)
386 int n1
= e1
->dest_idx
;
387 int n2
= e2
->dest_idx
;
390 for (gsi
= gsi_start_phis (dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
392 gphi
*phi
= gsi
.phi ();
393 tree val1
= gimple_phi_arg_def (phi
, n1
);
394 tree val2
= gimple_phi_arg_def (phi
, n2
);
396 gcc_assert (val1
!= NULL_TREE
);
397 gcc_assert (val2
!= NULL_TREE
);
399 if (!operand_equal_for_phi_arg_p (val1
, val2
))
406 /* Removes forwarder block BB. Returns false if this failed. */
409 remove_forwarder_block (basic_block bb
)
411 edge succ
= single_succ_edge (bb
), e
, s
;
412 basic_block dest
= succ
->dest
;
415 gimple_stmt_iterator gsi
, gsi_to
;
416 bool can_move_debug_stmts
;
418 /* We check for infinite loops already in tree_forwarder_block_p.
419 However it may happen that the infinite loop is created
420 afterwards due to removal of forwarders. */
424 /* If the destination block consists of a nonlocal label or is a
425 EH landing pad, do not merge it. */
426 label
= first_stmt (dest
);
428 if (glabel
*label_stmt
= dyn_cast
<glabel
*> (label
))
429 if (DECL_NONLOCAL (gimple_label_label (label_stmt
))
430 || EH_LANDING_PAD_NR (gimple_label_label (label_stmt
)) != 0)
433 /* If there is an abnormal edge to basic block BB, but not into
434 dest, problems might occur during removal of the phi node at out
435 of ssa due to overlapping live ranges of registers.
437 If there is an abnormal edge in DEST, the problems would occur
438 anyway since cleanup_dead_labels would then merge the labels for
439 two different eh regions, and rest of exception handling code
442 So if there is an abnormal edge to BB, proceed only if there is
443 no abnormal edge to DEST and there are no phi nodes in DEST. */
444 if (bb_has_abnormal_pred (bb
)
445 && (bb_has_abnormal_pred (dest
)
446 || !gimple_seq_empty_p (phi_nodes (dest
))))
449 /* If there are phi nodes in DEST, and some of the blocks that are
450 predecessors of BB are also predecessors of DEST, check that the
451 phi node arguments match. */
452 if (!gimple_seq_empty_p (phi_nodes (dest
)))
454 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
456 s
= find_edge (e
->src
, dest
);
460 if (!phi_alternatives_equal (dest
, succ
, s
))
465 can_move_debug_stmts
= MAY_HAVE_DEBUG_STMTS
&& single_pred_p (dest
);
467 basic_block pred
= NULL
;
468 if (single_pred_p (bb
))
469 pred
= single_pred (bb
);
471 /* Redirect the edges. */
472 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
474 bitmap_set_bit (cfgcleanup_altered_bbs
, e
->src
->index
);
476 if (e
->flags
& EDGE_ABNORMAL
)
478 /* If there is an abnormal edge, redirect it anyway, and
479 move the labels to the new block to make it legal. */
480 s
= redirect_edge_succ_nodup (e
, dest
);
483 s
= redirect_edge_and_branch (e
, dest
);
487 /* Create arguments for the phi nodes, since the edge was not
489 for (gphi_iterator psi
= gsi_start_phis (dest
);
493 gphi
*phi
= psi
.phi ();
494 source_location l
= gimple_phi_arg_location_from_edge (phi
, succ
);
495 tree def
= gimple_phi_arg_def (phi
, succ
->dest_idx
);
496 add_phi_arg (phi
, unshare_expr (def
), s
, l
);
501 /* Move nonlocal labels and computed goto targets as well as user
502 defined labels and labels with an EH landing pad number to the
503 new block, so that the redirection of the abnormal edges works,
504 jump targets end up in a sane place and debug information for
505 labels is retained. */
506 gsi_to
= gsi_start_bb (dest
);
507 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); )
510 label
= gsi_stmt (gsi
);
511 if (is_gimple_debug (label
))
513 decl
= gimple_label_label (as_a
<glabel
*> (label
));
514 if (EH_LANDING_PAD_NR (decl
) != 0
515 || DECL_NONLOCAL (decl
)
516 || FORCED_LABEL (decl
)
517 || !DECL_ARTIFICIAL (decl
))
519 gsi_remove (&gsi
, false);
520 gsi_insert_before (&gsi_to
, label
, GSI_SAME_STMT
);
526 /* Move debug statements if the destination has a single predecessor. */
527 if (can_move_debug_stmts
)
529 gsi_to
= gsi_after_labels (dest
);
530 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); )
532 gimple
*debug
= gsi_stmt (gsi
);
533 if (!is_gimple_debug (debug
))
535 gsi_remove (&gsi
, false);
536 gsi_insert_before (&gsi_to
, debug
, GSI_SAME_STMT
);
540 bitmap_set_bit (cfgcleanup_altered_bbs
, dest
->index
);
542 /* Update the dominators. */
543 if (dom_info_available_p (CDI_DOMINATORS
))
545 basic_block dom
, dombb
, domdest
;
547 dombb
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
548 domdest
= get_immediate_dominator (CDI_DOMINATORS
, dest
);
551 /* Shortcut to avoid calling (relatively expensive)
552 nearest_common_dominator unless necessary. */
556 dom
= nearest_common_dominator (CDI_DOMINATORS
, domdest
, dombb
);
558 set_immediate_dominator (CDI_DOMINATORS
, dest
, dom
);
561 /* Adjust latch infomation of BB's parent loop as otherwise
562 the cfg hook has a hard time not to kill the loop. */
563 if (current_loops
&& bb
->loop_father
->latch
== bb
)
564 bb
->loop_father
->latch
= pred
;
566 /* And kill the forwarder block. */
567 delete_basic_block (bb
);
572 /* STMT is a call that has been discovered noreturn. Split the
573 block to prepare fixing up the CFG and remove LHS.
574 Return true if cleanup-cfg needs to run. */
577 fixup_noreturn_call (gimple
*stmt
)
579 basic_block bb
= gimple_bb (stmt
);
580 bool changed
= false;
582 if (gimple_call_builtin_p (stmt
, BUILT_IN_RETURN
))
585 /* First split basic block if stmt is not last. */
586 if (stmt
!= gsi_stmt (gsi_last_bb (bb
)))
588 if (stmt
== gsi_stmt (gsi_last_nondebug_bb (bb
)))
590 /* Don't split if there are only debug stmts
591 after stmt, that can result in -fcompare-debug
592 failures. Remove the debug stmts instead,
593 they should be all unreachable anyway. */
594 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
595 for (gsi_next (&gsi
); !gsi_end_p (gsi
); )
596 gsi_remove (&gsi
, true);
600 split_block (bb
, stmt
);
605 /* If there is an LHS, remove it, but only if its type has fixed size.
606 The LHS will need to be recreated during RTL expansion and creating
607 temporaries of variable-sized types is not supported. Also don't
608 do this with TREE_ADDRESSABLE types, as assign_temp will abort.
609 Drop LHS regardless of TREE_ADDRESSABLE, if the function call
610 has been changed into a call that does not return a value, like
611 __builtin_unreachable or __cxa_pure_virtual. */
612 tree lhs
= gimple_call_lhs (stmt
);
614 && (should_remove_lhs_p (lhs
)
615 || VOID_TYPE_P (TREE_TYPE (gimple_call_fntype (stmt
)))))
617 gimple_call_set_lhs (stmt
, NULL_TREE
);
619 /* We need to fix up the SSA name to avoid checking errors. */
620 if (TREE_CODE (lhs
) == SSA_NAME
)
622 tree new_var
= create_tmp_reg (TREE_TYPE (lhs
));
623 SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs
, new_var
);
624 SSA_NAME_DEF_STMT (lhs
) = gimple_build_nop ();
625 set_ssa_default_def (cfun
, new_var
, lhs
);
631 /* Mark the call as altering control flow. */
632 if (!gimple_call_ctrl_altering_p (stmt
))
634 gimple_call_set_ctrl_altering (stmt
, true);
642 /* Tries to cleanup cfg in basic block BB. Returns true if anything
646 cleanup_tree_cfg_bb (basic_block bb
)
648 if (tree_forwarder_block_p (bb
, false)
649 && remove_forwarder_block (bb
))
652 /* If there is a merge opportunity with the predecessor
653 do nothing now but wait until we process the predecessor.
654 This happens when we visit BBs in a non-optimal order and
655 avoids quadratic behavior with adjusting stmts BB pointer. */
656 if (single_pred_p (bb
)
657 && can_merge_blocks_p (single_pred (bb
), bb
))
658 /* But make sure we _do_ visit it. When we remove unreachable paths
659 ending in a backedge we fail to mark the destinations predecessors
661 bitmap_set_bit (cfgcleanup_altered_bbs
, single_pred (bb
)->index
);
663 /* Merging the blocks may create new opportunities for folding
664 conditional branches (due to the elimination of single-valued PHI
666 else if (single_succ_p (bb
)
667 && can_merge_blocks_p (bb
, single_succ (bb
)))
669 merge_blocks (bb
, single_succ (bb
));
676 /* Iterate the cfg cleanups, while anything changes. */
679 cleanup_tree_cfg_1 (void)
685 /* Prepare the worklists of altered blocks. */
686 cfgcleanup_altered_bbs
= BITMAP_ALLOC (NULL
);
688 /* During forwarder block cleanup, we may redirect edges out of
689 SWITCH_EXPRs, which can get expensive. So we want to enable
690 recording of edge to CASE_LABEL_EXPR. */
691 start_recording_case_labels ();
693 /* We cannot use FOR_EACH_BB_FN for the BB iterations below
694 since the basic blocks may get removed. */
696 /* Start by iterating over all basic blocks looking for edge removal
697 opportunities. Do this first because incoming SSA form may be
698 invalid and we want to avoid performing SSA related tasks such
699 as propgating out a PHI node during BB merging in that state. */
700 n
= last_basic_block_for_fn (cfun
);
701 for (i
= NUM_FIXED_BLOCKS
; i
< n
; i
++)
703 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
705 retval
|= cleanup_control_flow_bb (bb
, true);
708 /* After doing the above SSA form should be valid (or an update SSA
709 should be required). */
711 /* Continue by iterating over all basic blocks looking for BB merging
713 n
= last_basic_block_for_fn (cfun
);
714 for (i
= NUM_FIXED_BLOCKS
; i
< n
; i
++)
716 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
718 retval
|= cleanup_tree_cfg_bb (bb
);
721 /* Now process the altered blocks, as long as any are available. */
722 while (!bitmap_empty_p (cfgcleanup_altered_bbs
))
724 i
= bitmap_first_set_bit (cfgcleanup_altered_bbs
);
725 bitmap_clear_bit (cfgcleanup_altered_bbs
, i
);
726 if (i
< NUM_FIXED_BLOCKS
)
729 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
733 retval
|= cleanup_control_flow_bb (bb
, false);
734 retval
|= cleanup_tree_cfg_bb (bb
);
737 end_recording_case_labels ();
738 BITMAP_FREE (cfgcleanup_altered_bbs
);
743 /* Remove unreachable blocks and other miscellaneous clean up work.
744 Return true if the flowgraph was modified, false otherwise. */
747 cleanup_tree_cfg_noloop (void)
751 timevar_push (TV_TREE_CLEANUP_CFG
);
753 /* Iterate until there are no more cleanups left to do. If any
754 iteration changed the flowgraph, set CHANGED to true.
756 If dominance information is available, there cannot be any unreachable
758 if (!dom_info_available_p (CDI_DOMINATORS
))
760 changed
= delete_unreachable_blocks ();
761 calculate_dominance_info (CDI_DOMINATORS
);
765 checking_verify_dominators (CDI_DOMINATORS
);
769 changed
|= cleanup_tree_cfg_1 ();
771 gcc_assert (dom_info_available_p (CDI_DOMINATORS
));
774 checking_verify_flow_info ();
776 timevar_pop (TV_TREE_CLEANUP_CFG
);
778 if (changed
&& current_loops
)
779 loops_state_set (LOOPS_NEED_FIXUP
);
784 /* Repairs loop structures. */
787 repair_loop_structures (void)
790 unsigned n_new_loops
;
792 calculate_dominance_info (CDI_DOMINATORS
);
794 timevar_push (TV_REPAIR_LOOPS
);
795 changed_bbs
= BITMAP_ALLOC (NULL
);
796 n_new_loops
= fix_loop_structure (changed_bbs
);
798 /* This usually does nothing. But sometimes parts of cfg that originally
799 were inside a loop get out of it due to edge removal (since they
800 become unreachable by back edges from latch). Also a former
801 irreducible loop can become reducible - in this case force a full
802 rewrite into loop-closed SSA form. */
803 if (loops_state_satisfies_p (LOOP_CLOSED_SSA
))
804 rewrite_into_loop_closed_ssa (n_new_loops
? NULL
: changed_bbs
,
807 BITMAP_FREE (changed_bbs
);
809 checking_verify_loop_structure ();
812 timevar_pop (TV_REPAIR_LOOPS
);
815 /* Cleanup cfg and repair loop structures. */
818 cleanup_tree_cfg (void)
820 bool changed
= cleanup_tree_cfg_noloop ();
822 if (current_loops
!= NULL
823 && loops_state_satisfies_p (LOOPS_NEED_FIXUP
))
824 repair_loop_structures ();
829 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
830 Returns true if successful. */
833 remove_forwarder_block_with_phi (basic_block bb
)
835 edge succ
= single_succ_edge (bb
);
836 basic_block dest
= succ
->dest
;
838 basic_block dombb
, domdest
, dom
;
840 /* We check for infinite loops already in tree_forwarder_block_p.
841 However it may happen that the infinite loop is created
842 afterwards due to removal of forwarders. */
846 /* Removal of forwarders may expose new natural loops and thus
847 a block may turn into a loop header. */
848 if (current_loops
&& bb_loop_header_p (bb
))
851 /* If the destination block consists of a nonlocal label, do not
853 label
= first_stmt (dest
);
855 if (glabel
*label_stmt
= dyn_cast
<glabel
*> (label
))
856 if (DECL_NONLOCAL (gimple_label_label (label_stmt
)))
859 /* Record BB's single pred in case we need to update the father
860 loop's latch information later. */
861 basic_block pred
= NULL
;
862 if (single_pred_p (bb
))
863 pred
= single_pred (bb
);
865 /* Redirect each incoming edge to BB to DEST. */
866 while (EDGE_COUNT (bb
->preds
) > 0)
868 edge e
= EDGE_PRED (bb
, 0), s
;
871 s
= find_edge (e
->src
, dest
);
874 /* We already have an edge S from E->src to DEST. If S and
875 E->dest's sole successor edge have the same PHI arguments
876 at DEST, redirect S to DEST. */
877 if (phi_alternatives_equal (dest
, s
, succ
))
879 e
= redirect_edge_and_branch (e
, dest
);
880 redirect_edge_var_map_clear (e
);
884 /* PHI arguments are different. Create a forwarder block by
885 splitting E so that we can merge PHI arguments on E to
887 e
= single_succ_edge (split_edge (e
));
891 /* If we merge the forwarder into a loop header verify if we
892 are creating another loop latch edge. If so, reset
893 number of iteration information of the loop. */
894 if (dest
->loop_father
->header
== dest
895 && dominated_by_p (CDI_DOMINATORS
, e
->src
, dest
))
897 dest
->loop_father
->any_upper_bound
= false;
898 dest
->loop_father
->any_likely_upper_bound
= false;
899 free_numbers_of_iterations_estimates_loop (dest
->loop_father
);
903 s
= redirect_edge_and_branch (e
, dest
);
905 /* redirect_edge_and_branch must not create a new edge. */
908 /* Add to the PHI nodes at DEST each PHI argument removed at the
910 for (gsi
= gsi_start_phis (dest
);
914 gphi
*phi
= gsi
.phi ();
915 tree def
= gimple_phi_arg_def (phi
, succ
->dest_idx
);
916 source_location locus
= gimple_phi_arg_location_from_edge (phi
, succ
);
918 if (TREE_CODE (def
) == SSA_NAME
)
920 /* If DEF is one of the results of PHI nodes removed during
921 redirection, replace it with the PHI argument that used
923 vec
<edge_var_map
> *head
= redirect_edge_var_map_vector (e
);
924 size_t length
= head
? head
->length () : 0;
925 for (size_t i
= 0; i
< length
; i
++)
927 edge_var_map
*vm
= &(*head
)[i
];
928 tree old_arg
= redirect_edge_var_map_result (vm
);
929 tree new_arg
= redirect_edge_var_map_def (vm
);
934 locus
= redirect_edge_var_map_location (vm
);
940 add_phi_arg (phi
, def
, s
, locus
);
943 redirect_edge_var_map_clear (e
);
946 /* Update the dominators. */
947 dombb
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
948 domdest
= get_immediate_dominator (CDI_DOMINATORS
, dest
);
951 /* Shortcut to avoid calling (relatively expensive)
952 nearest_common_dominator unless necessary. */
956 dom
= nearest_common_dominator (CDI_DOMINATORS
, domdest
, dombb
);
958 set_immediate_dominator (CDI_DOMINATORS
, dest
, dom
);
960 /* Adjust latch infomation of BB's parent loop as otherwise
961 the cfg hook has a hard time not to kill the loop. */
962 if (current_loops
&& bb
->loop_father
->latch
== bb
)
963 bb
->loop_father
->latch
= pred
;
965 /* Remove BB since all of BB's incoming edges have been redirected
967 delete_basic_block (bb
);
972 /* This pass merges PHI nodes if one feeds into another. For example,
973 suppose we have the following:
980 # tem_6 = PHI <tem_17(8), tem_23(7)>;
983 # tem_3 = PHI <tem_6(9), tem_2(5)>;
986 Then we merge the first PHI node into the second one like so:
993 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
999 const pass_data pass_data_merge_phi
=
1001 GIMPLE_PASS
, /* type */
1002 "mergephi", /* name */
1003 OPTGROUP_NONE
, /* optinfo_flags */
1004 TV_TREE_MERGE_PHI
, /* tv_id */
1005 ( PROP_cfg
| PROP_ssa
), /* properties_required */
1006 0, /* properties_provided */
1007 0, /* properties_destroyed */
1008 0, /* todo_flags_start */
1009 0, /* todo_flags_finish */
1012 class pass_merge_phi
: public gimple_opt_pass
1015 pass_merge_phi (gcc::context
*ctxt
)
1016 : gimple_opt_pass (pass_data_merge_phi
, ctxt
)
1019 /* opt_pass methods: */
1020 opt_pass
* clone () { return new pass_merge_phi (m_ctxt
); }
1021 virtual unsigned int execute (function
*);
1023 }; // class pass_merge_phi
1026 pass_merge_phi::execute (function
*fun
)
1028 basic_block
*worklist
= XNEWVEC (basic_block
, n_basic_blocks_for_fn (fun
));
1029 basic_block
*current
= worklist
;
1032 calculate_dominance_info (CDI_DOMINATORS
);
1034 /* Find all PHI nodes that we may be able to merge. */
1035 FOR_EACH_BB_FN (bb
, fun
)
1039 /* Look for a forwarder block with PHI nodes. */
1040 if (!tree_forwarder_block_p (bb
, true))
1043 dest
= single_succ (bb
);
1045 /* We have to feed into another basic block with PHI
1047 if (gimple_seq_empty_p (phi_nodes (dest
))
1048 /* We don't want to deal with a basic block with
1050 || bb_has_abnormal_pred (bb
))
1053 if (!dominated_by_p (CDI_DOMINATORS
, dest
, bb
))
1055 /* If BB does not dominate DEST, then the PHI nodes at
1056 DEST must be the only users of the results of the PHI
1063 unsigned int dest_idx
= single_succ_edge (bb
)->dest_idx
;
1065 /* BB dominates DEST. There may be many users of the PHI
1066 nodes in BB. However, there is still a trivial case we
1067 can handle. If the result of every PHI in BB is used
1068 only by a PHI in DEST, then we can trivially merge the
1069 PHI nodes from BB into DEST. */
1070 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
);
1073 gphi
*phi
= gsi
.phi ();
1074 tree result
= gimple_phi_result (phi
);
1075 use_operand_p imm_use
;
1078 /* If the PHI's result is never used, then we can just
1080 if (has_zero_uses (result
))
1083 /* Get the single use of the result of this PHI node. */
1084 if (!single_imm_use (result
, &imm_use
, &use_stmt
)
1085 || gimple_code (use_stmt
) != GIMPLE_PHI
1086 || gimple_bb (use_stmt
) != dest
1087 || gimple_phi_arg_def (use_stmt
, dest_idx
) != result
)
1091 /* If the loop above iterated through all the PHI nodes
1092 in BB, then we can merge the PHIs from BB into DEST. */
1093 if (gsi_end_p (gsi
))
1098 /* Now let's drain WORKLIST. */
1099 bool changed
= false;
1100 while (current
!= worklist
)
1103 changed
|= remove_forwarder_block_with_phi (bb
);
1107 /* Removing forwarder blocks can cause formerly irreducible loops
1108 to become reducible if we merged two entry blocks. */
1111 loops_state_set (LOOPS_NEED_FIXUP
);
1119 make_pass_merge_phi (gcc::context
*ctxt
)
1121 return new pass_merge_phi (ctxt
);
1124 /* Pass: cleanup the CFG just before expanding trees to RTL.
1125 This is just a round of label cleanups and case node grouping
1126 because after the tree optimizers have run such cleanups may
1130 execute_cleanup_cfg_post_optimizing (void)
1132 unsigned int todo
= execute_fixup_cfg ();
1133 if (cleanup_tree_cfg ())
1135 todo
&= ~TODO_cleanup_cfg
;
1136 todo
|= TODO_update_ssa
;
1138 maybe_remove_unreachable_handlers ();
1139 cleanup_dead_labels ();
1140 group_case_labels ();
1141 if ((flag_compare_debug_opt
|| flag_compare_debug
)
1142 && flag_dump_final_insns
)
1144 FILE *final_output
= fopen (flag_dump_final_insns
, "a");
1148 error ("could not open final insn dump file %qs: %m",
1149 flag_dump_final_insns
);
1150 flag_dump_final_insns
= NULL
;
1154 int save_unnumbered
= flag_dump_unnumbered
;
1155 int save_noaddr
= flag_dump_noaddr
;
1157 flag_dump_noaddr
= flag_dump_unnumbered
= 1;
1158 fprintf (final_output
, "\n");
1159 dump_enumerated_decls (final_output
, dump_flags
| TDF_NOUID
);
1160 flag_dump_noaddr
= save_noaddr
;
1161 flag_dump_unnumbered
= save_unnumbered
;
1162 if (fclose (final_output
))
1164 error ("could not close final insn dump file %qs: %m",
1165 flag_dump_final_insns
);
1166 flag_dump_final_insns
= NULL
;
1175 const pass_data pass_data_cleanup_cfg_post_optimizing
=
1177 GIMPLE_PASS
, /* type */
1178 "optimized", /* name */
1179 OPTGROUP_NONE
, /* optinfo_flags */
1180 TV_TREE_CLEANUP_CFG
, /* tv_id */
1181 PROP_cfg
, /* properties_required */
1182 0, /* properties_provided */
1183 0, /* properties_destroyed */
1184 0, /* todo_flags_start */
1185 TODO_remove_unused_locals
, /* todo_flags_finish */
1188 class pass_cleanup_cfg_post_optimizing
: public gimple_opt_pass
1191 pass_cleanup_cfg_post_optimizing (gcc::context
*ctxt
)
1192 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing
, ctxt
)
1195 /* opt_pass methods: */
1196 virtual unsigned int execute (function
*)
1198 return execute_cleanup_cfg_post_optimizing ();
1201 }; // class pass_cleanup_cfg_post_optimizing
1206 make_pass_cleanup_cfg_post_optimizing (gcc::context
*ctxt
)
1208 return new pass_cleanup_cfg_post_optimizing (ctxt
);