1 /* CFG cleanup for trees.
2 Copyright (C) 2001-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "tree-pass.h"
30 #include "diagnostic-core.h"
31 #include "fold-const.h"
33 #include "cfgcleanup.h"
36 #include "gimple-iterator.h"
38 #include "tree-ssa-loop-manip.h"
42 #include "tree-scalar-evolution.h"
43 #include "gimple-match.h"
44 #include "gimple-fold.h"
47 /* The set of blocks in that at least one of the following changes happened:
48 -- the statement at the end of the block was changed
49 -- the block was newly created
50 -- the set of the predecessors of the block changed
51 -- the set of the successors of the block changed
52 ??? Maybe we could track these changes separately, since they determine
53 what cleanups it makes sense to try on the block. */
54 bitmap cfgcleanup_altered_bbs
;
56 /* Remove any fallthru edge from EV. Return true if an edge was removed. */
59 remove_fallthru_edge (vec
<edge
, va_gc
> *ev
)
64 FOR_EACH_EDGE (e
, ei
, ev
)
65 if ((e
->flags
& EDGE_FALLTHRU
) != 0)
67 if (e
->flags
& EDGE_COMPLEX
)
68 e
->flags
&= ~EDGE_FALLTHRU
;
70 remove_edge_and_dominated_blocks (e
);
77 /* Disconnect an unreachable block in the control expression starting
81 cleanup_control_expr_graph (basic_block bb
, gimple_stmt_iterator gsi
)
85 gimple
*stmt
= gsi_stmt (gsi
);
87 if (!single_succ_p (bb
))
94 fold_defer_overflow_warnings ();
95 switch (gimple_code (stmt
))
101 if (gimple_simplify (stmt
, &rcode
, ops
, NULL
, no_follow_ssa_edges
,
103 && rcode
== INTEGER_CST
)
109 val
= gimple_switch_index (as_a
<gswitch
*> (stmt
));
115 taken_edge
= find_taken_edge (bb
, val
);
118 fold_undefer_and_ignore_overflow_warnings ();
122 /* Remove all the edges except the one that is always executed. */
124 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
)); )
130 fold_undefer_overflow_warnings
131 (true, stmt
, WARN_STRICT_OVERFLOW_CONDITIONAL
);
135 taken_edge
->probability
+= e
->probability
;
136 taken_edge
->count
+= e
->count
;
137 remove_edge_and_dominated_blocks (e
);
144 fold_undefer_and_ignore_overflow_warnings ();
145 if (taken_edge
->probability
> REG_BR_PROB_BASE
)
146 taken_edge
->probability
= REG_BR_PROB_BASE
;
149 taken_edge
= single_succ_edge (bb
);
151 bitmap_set_bit (cfgcleanup_altered_bbs
, bb
->index
);
152 gsi_remove (&gsi
, true);
153 taken_edge
->flags
= EDGE_FALLTHRU
;
158 /* Cleanup the GF_CALL_CTRL_ALTERING flag according to
159 to updated gimple_call_flags. */
162 cleanup_call_ctrl_altering_flag (gimple
*bb_end
)
164 if (!is_gimple_call (bb_end
)
165 || !gimple_call_ctrl_altering_p (bb_end
))
168 int flags
= gimple_call_flags (bb_end
);
169 if (((flags
& (ECF_CONST
| ECF_PURE
))
170 && !(flags
& ECF_LOOPING_CONST_OR_PURE
))
171 || (flags
& ECF_LEAF
))
172 gimple_call_set_ctrl_altering (bb_end
, false);
175 /* Try to remove superfluous control structures in basic block BB. Returns
176 true if anything changes. */
179 cleanup_control_flow_bb (basic_block bb
)
181 gimple_stmt_iterator gsi
;
185 /* If the last statement of the block could throw and now cannot,
186 we need to prune cfg. */
187 retval
|= gimple_purge_dead_eh_edges (bb
);
189 gsi
= gsi_last_bb (bb
);
193 stmt
= gsi_stmt (gsi
);
195 /* Try to cleanup ctrl altering flag for call which ends bb. */
196 cleanup_call_ctrl_altering_flag (stmt
);
198 if (gimple_code (stmt
) == GIMPLE_COND
199 || gimple_code (stmt
) == GIMPLE_SWITCH
)
200 retval
|= cleanup_control_expr_graph (bb
, gsi
);
201 else if (gimple_code (stmt
) == GIMPLE_GOTO
202 && TREE_CODE (gimple_goto_dest (stmt
)) == ADDR_EXPR
203 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt
), 0))
206 /* If we had a computed goto which has a compile-time determinable
207 destination, then we can eliminate the goto. */
211 basic_block target_block
;
213 /* First look at all the outgoing edges. Delete any outgoing
214 edges which do not go to the right block. For the one
215 edge which goes to the right block, fix up its flags. */
216 label
= TREE_OPERAND (gimple_goto_dest (stmt
), 0);
217 target_block
= label_to_block (label
);
218 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
)); )
220 if (e
->dest
!= target_block
)
221 remove_edge_and_dominated_blocks (e
);
224 /* Turn off the EDGE_ABNORMAL flag. */
225 e
->flags
&= ~EDGE_ABNORMAL
;
227 /* And set EDGE_FALLTHRU. */
228 e
->flags
|= EDGE_FALLTHRU
;
233 bitmap_set_bit (cfgcleanup_altered_bbs
, bb
->index
);
234 bitmap_set_bit (cfgcleanup_altered_bbs
, target_block
->index
);
236 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
237 relevant information we need. */
238 gsi_remove (&gsi
, true);
242 /* Check for indirect calls that have been turned into
244 else if (is_gimple_call (stmt
)
245 && gimple_call_noreturn_p (stmt
)
246 && remove_fallthru_edge (bb
->succs
))
252 /* Return true if basic block BB does nothing except pass control
253 flow to another block and that we can safely insert a label at
254 the start of the successor block.
256 As a precondition, we require that BB be not equal to
260 tree_forwarder_block_p (basic_block bb
, bool phi_wanted
)
262 gimple_stmt_iterator gsi
;
265 /* BB must have a single outgoing edge. */
266 if (single_succ_p (bb
) != 1
267 /* If PHI_WANTED is false, BB must not have any PHI nodes.
268 Otherwise, BB must have PHI nodes. */
269 || gimple_seq_empty_p (phi_nodes (bb
)) == phi_wanted
270 /* BB may not be a predecessor of the exit block. */
271 || single_succ (bb
) == EXIT_BLOCK_PTR_FOR_FN (cfun
)
272 /* Nor should this be an infinite loop. */
273 || single_succ (bb
) == bb
274 /* BB may not have an abnormal outgoing edge. */
275 || (single_succ_edge (bb
)->flags
& EDGE_ABNORMAL
))
278 gcc_checking_assert (bb
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
));
280 locus
= single_succ_edge (bb
)->goto_locus
;
282 /* There should not be an edge coming from entry, or an EH edge. */
287 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
288 if (e
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) || (e
->flags
& EDGE_EH
))
290 /* If goto_locus of any of the edges differs, prevent removing
291 the forwarder block for -O0. */
292 else if (optimize
== 0 && e
->goto_locus
!= locus
)
296 /* Now walk through the statements backward. We can ignore labels,
297 anything else means this is not a forwarder block. */
298 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
300 gimple
*stmt
= gsi_stmt (gsi
);
302 switch (gimple_code (stmt
))
305 if (DECL_NONLOCAL (gimple_label_label (as_a
<glabel
*> (stmt
))))
307 if (optimize
== 0 && gimple_location (stmt
) != locus
)
311 /* ??? For now, hope there's a corresponding debug
312 assignment at the destination. */
324 /* Protect loop headers. */
325 if (bb
->loop_father
->header
== bb
)
328 dest
= EDGE_SUCC (bb
, 0)->dest
;
329 /* Protect loop preheaders and latches if requested. */
330 if (dest
->loop_father
->header
== dest
)
332 if (bb
->loop_father
== dest
->loop_father
)
334 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES
))
336 /* If bb doesn't have a single predecessor we'd make this
337 loop have multiple latches. Don't do that if that
338 would in turn require disambiguating them. */
339 return (single_pred_p (bb
)
340 || loops_state_satisfies_p
341 (LOOPS_MAY_HAVE_MULTIPLE_LATCHES
));
343 else if (bb
->loop_father
== loop_outer (dest
->loop_father
))
344 return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS
);
345 /* Always preserve other edges into loop headers that are
346 not simple latches or preheaders. */
354 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
355 those alternatives are equal in each of the PHI nodes, then return
356 true, else return false. */
359 phi_alternatives_equal (basic_block dest
, edge e1
, edge e2
)
361 int n1
= e1
->dest_idx
;
362 int n2
= e2
->dest_idx
;
365 for (gsi
= gsi_start_phis (dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
367 gphi
*phi
= gsi
.phi ();
368 tree val1
= gimple_phi_arg_def (phi
, n1
);
369 tree val2
= gimple_phi_arg_def (phi
, n2
);
371 gcc_assert (val1
!= NULL_TREE
);
372 gcc_assert (val2
!= NULL_TREE
);
374 if (!operand_equal_for_phi_arg_p (val1
, val2
))
381 /* Removes forwarder block BB. Returns false if this failed. */
384 remove_forwarder_block (basic_block bb
)
386 edge succ
= single_succ_edge (bb
), e
, s
;
387 basic_block dest
= succ
->dest
;
390 gimple_stmt_iterator gsi
, gsi_to
;
391 bool can_move_debug_stmts
;
393 /* We check for infinite loops already in tree_forwarder_block_p.
394 However it may happen that the infinite loop is created
395 afterwards due to removal of forwarders. */
399 /* If the destination block consists of a nonlocal label or is a
400 EH landing pad, do not merge it. */
401 label
= first_stmt (dest
);
403 if (glabel
*label_stmt
= dyn_cast
<glabel
*> (label
))
404 if (DECL_NONLOCAL (gimple_label_label (label_stmt
))
405 || EH_LANDING_PAD_NR (gimple_label_label (label_stmt
)) != 0)
408 /* If there is an abnormal edge to basic block BB, but not into
409 dest, problems might occur during removal of the phi node at out
410 of ssa due to overlapping live ranges of registers.
412 If there is an abnormal edge in DEST, the problems would occur
413 anyway since cleanup_dead_labels would then merge the labels for
414 two different eh regions, and rest of exception handling code
417 So if there is an abnormal edge to BB, proceed only if there is
418 no abnormal edge to DEST and there are no phi nodes in DEST. */
419 if (bb_has_abnormal_pred (bb
)
420 && (bb_has_abnormal_pred (dest
)
421 || !gimple_seq_empty_p (phi_nodes (dest
))))
424 /* If there are phi nodes in DEST, and some of the blocks that are
425 predecessors of BB are also predecessors of DEST, check that the
426 phi node arguments match. */
427 if (!gimple_seq_empty_p (phi_nodes (dest
)))
429 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
431 s
= find_edge (e
->src
, dest
);
435 if (!phi_alternatives_equal (dest
, succ
, s
))
440 can_move_debug_stmts
= MAY_HAVE_DEBUG_STMTS
&& single_pred_p (dest
);
442 basic_block pred
= NULL
;
443 if (single_pred_p (bb
))
444 pred
= single_pred (bb
);
446 /* Redirect the edges. */
447 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
449 bitmap_set_bit (cfgcleanup_altered_bbs
, e
->src
->index
);
451 if (e
->flags
& EDGE_ABNORMAL
)
453 /* If there is an abnormal edge, redirect it anyway, and
454 move the labels to the new block to make it legal. */
455 s
= redirect_edge_succ_nodup (e
, dest
);
458 s
= redirect_edge_and_branch (e
, dest
);
462 /* Create arguments for the phi nodes, since the edge was not
464 for (gphi_iterator psi
= gsi_start_phis (dest
);
468 gphi
*phi
= psi
.phi ();
469 source_location l
= gimple_phi_arg_location_from_edge (phi
, succ
);
470 tree def
= gimple_phi_arg_def (phi
, succ
->dest_idx
);
471 add_phi_arg (phi
, unshare_expr (def
), s
, l
);
476 /* Move nonlocal labels and computed goto targets as well as user
477 defined labels and labels with an EH landing pad number to the
478 new block, so that the redirection of the abnormal edges works,
479 jump targets end up in a sane place and debug information for
480 labels is retained. */
481 gsi_to
= gsi_start_bb (dest
);
482 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); )
485 label
= gsi_stmt (gsi
);
486 if (is_gimple_debug (label
))
488 decl
= gimple_label_label (as_a
<glabel
*> (label
));
489 if (EH_LANDING_PAD_NR (decl
) != 0
490 || DECL_NONLOCAL (decl
)
491 || FORCED_LABEL (decl
)
492 || !DECL_ARTIFICIAL (decl
))
494 gsi_remove (&gsi
, false);
495 gsi_insert_before (&gsi_to
, label
, GSI_SAME_STMT
);
501 /* Move debug statements if the destination has a single predecessor. */
502 if (can_move_debug_stmts
)
504 gsi_to
= gsi_after_labels (dest
);
505 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); )
507 gimple
*debug
= gsi_stmt (gsi
);
508 if (!is_gimple_debug (debug
))
510 gsi_remove (&gsi
, false);
511 gsi_insert_before (&gsi_to
, debug
, GSI_SAME_STMT
);
515 bitmap_set_bit (cfgcleanup_altered_bbs
, dest
->index
);
517 /* Update the dominators. */
518 if (dom_info_available_p (CDI_DOMINATORS
))
520 basic_block dom
, dombb
, domdest
;
522 dombb
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
523 domdest
= get_immediate_dominator (CDI_DOMINATORS
, dest
);
526 /* Shortcut to avoid calling (relatively expensive)
527 nearest_common_dominator unless necessary. */
531 dom
= nearest_common_dominator (CDI_DOMINATORS
, domdest
, dombb
);
533 set_immediate_dominator (CDI_DOMINATORS
, dest
, dom
);
536 /* Adjust latch infomation of BB's parent loop as otherwise
537 the cfg hook has a hard time not to kill the loop. */
538 if (current_loops
&& bb
->loop_father
->latch
== bb
)
539 bb
->loop_father
->latch
= pred
;
541 /* And kill the forwarder block. */
542 delete_basic_block (bb
);
547 /* STMT is a call that has been discovered noreturn. Split the
548 block to prepare fixing up the CFG and remove LHS.
549 Return true if cleanup-cfg needs to run. */
552 fixup_noreturn_call (gimple
*stmt
)
554 basic_block bb
= gimple_bb (stmt
);
555 bool changed
= false;
557 if (gimple_call_builtin_p (stmt
, BUILT_IN_RETURN
))
560 /* First split basic block if stmt is not last. */
561 if (stmt
!= gsi_stmt (gsi_last_bb (bb
)))
563 if (stmt
== gsi_stmt (gsi_last_nondebug_bb (bb
)))
565 /* Don't split if there are only debug stmts
566 after stmt, that can result in -fcompare-debug
567 failures. Remove the debug stmts instead,
568 they should be all unreachable anyway. */
569 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
570 for (gsi_next (&gsi
); !gsi_end_p (gsi
); )
571 gsi_remove (&gsi
, true);
575 split_block (bb
, stmt
);
580 /* If there is an LHS, remove it, but only if its type has fixed size.
581 The LHS will need to be recreated during RTL expansion and creating
582 temporaries of variable-sized types is not supported. */
583 tree lhs
= gimple_call_lhs (stmt
);
584 if (lhs
&& TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (lhs
))) == INTEGER_CST
)
586 gimple_call_set_lhs (stmt
, NULL_TREE
);
588 /* We need to fix up the SSA name to avoid checking errors. */
589 if (TREE_CODE (lhs
) == SSA_NAME
)
591 tree new_var
= create_tmp_reg (TREE_TYPE (lhs
));
592 SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs
, new_var
);
593 SSA_NAME_DEF_STMT (lhs
) = gimple_build_nop ();
594 set_ssa_default_def (cfun
, new_var
, lhs
);
600 /* Mark the call as altering control flow. */
601 if (!gimple_call_ctrl_altering_p (stmt
))
603 gimple_call_set_ctrl_altering (stmt
, true);
611 /* Tries to cleanup cfg in basic block BB. Returns true if anything
615 cleanup_tree_cfg_bb (basic_block bb
)
617 if (tree_forwarder_block_p (bb
, false)
618 && remove_forwarder_block (bb
))
621 /* Merging the blocks may create new opportunities for folding
622 conditional branches (due to the elimination of single-valued PHI
624 if (single_succ_p (bb
)
625 && can_merge_blocks_p (bb
, single_succ (bb
)))
627 /* If there is a merge opportunity with the predecessor
628 do nothing now but wait until we process the predecessor.
629 This happens when we visit BBs in a non-optimal order and
630 avoids quadratic behavior with adjusting stmts BB pointer. */
631 if (single_pred_p (bb
)
632 && can_merge_blocks_p (single_pred (bb
), bb
))
636 merge_blocks (bb
, single_succ (bb
));
644 /* Iterate the cfg cleanups, while anything changes. */
647 cleanup_tree_cfg_1 (void)
653 /* Prepare the worklists of altered blocks. */
654 cfgcleanup_altered_bbs
= BITMAP_ALLOC (NULL
);
656 /* During forwarder block cleanup, we may redirect edges out of
657 SWITCH_EXPRs, which can get expensive. So we want to enable
658 recording of edge to CASE_LABEL_EXPR. */
659 start_recording_case_labels ();
661 /* We cannot use FOR_EACH_BB_FN for the BB iterations below
662 since the basic blocks may get removed. */
664 /* Start by iterating over all basic blocks looking for edge removal
665 opportunities. Do this first because incoming SSA form may be
666 invalid and we want to avoid performing SSA related tasks such
667 as propgating out a PHI node during BB merging in that state. */
668 n
= last_basic_block_for_fn (cfun
);
669 for (i
= NUM_FIXED_BLOCKS
; i
< n
; i
++)
671 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
673 retval
|= cleanup_control_flow_bb (bb
);
676 /* After doing the above SSA form should be valid (or an update SSA
677 should be required). */
679 /* Continue by iterating over all basic blocks looking for BB merging
681 n
= last_basic_block_for_fn (cfun
);
682 for (i
= NUM_FIXED_BLOCKS
; i
< n
; i
++)
684 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
686 retval
|= cleanup_tree_cfg_bb (bb
);
689 /* Now process the altered blocks, as long as any are available. */
690 while (!bitmap_empty_p (cfgcleanup_altered_bbs
))
692 i
= bitmap_first_set_bit (cfgcleanup_altered_bbs
);
693 bitmap_clear_bit (cfgcleanup_altered_bbs
, i
);
694 if (i
< NUM_FIXED_BLOCKS
)
697 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
701 retval
|= cleanup_control_flow_bb (bb
);
702 retval
|= cleanup_tree_cfg_bb (bb
);
705 end_recording_case_labels ();
706 BITMAP_FREE (cfgcleanup_altered_bbs
);
711 /* Remove unreachable blocks and other miscellaneous clean up work.
712 Return true if the flowgraph was modified, false otherwise. */
715 cleanup_tree_cfg_noloop (void)
719 timevar_push (TV_TREE_CLEANUP_CFG
);
721 /* Iterate until there are no more cleanups left to do. If any
722 iteration changed the flowgraph, set CHANGED to true.
724 If dominance information is available, there cannot be any unreachable
726 if (!dom_info_available_p (CDI_DOMINATORS
))
728 changed
= delete_unreachable_blocks ();
729 calculate_dominance_info (CDI_DOMINATORS
);
733 checking_verify_dominators (CDI_DOMINATORS
);
737 changed
|= cleanup_tree_cfg_1 ();
739 gcc_assert (dom_info_available_p (CDI_DOMINATORS
));
742 checking_verify_flow_info ();
744 timevar_pop (TV_TREE_CLEANUP_CFG
);
746 if (changed
&& current_loops
)
747 loops_state_set (LOOPS_NEED_FIXUP
);
752 /* Repairs loop structures. */
755 repair_loop_structures (void)
758 unsigned n_new_loops
;
760 calculate_dominance_info (CDI_DOMINATORS
);
762 timevar_push (TV_REPAIR_LOOPS
);
763 changed_bbs
= BITMAP_ALLOC (NULL
);
764 n_new_loops
= fix_loop_structure (changed_bbs
);
766 /* This usually does nothing. But sometimes parts of cfg that originally
767 were inside a loop get out of it due to edge removal (since they
768 become unreachable by back edges from latch). Also a former
769 irreducible loop can become reducible - in this case force a full
770 rewrite into loop-closed SSA form. */
771 if (loops_state_satisfies_p (LOOP_CLOSED_SSA
))
772 rewrite_into_loop_closed_ssa (n_new_loops
? NULL
: changed_bbs
,
775 BITMAP_FREE (changed_bbs
);
777 checking_verify_loop_structure ();
780 timevar_pop (TV_REPAIR_LOOPS
);
783 /* Cleanup cfg and repair loop structures. */
786 cleanup_tree_cfg (void)
788 bool changed
= cleanup_tree_cfg_noloop ();
790 if (current_loops
!= NULL
791 && loops_state_satisfies_p (LOOPS_NEED_FIXUP
))
792 repair_loop_structures ();
797 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
798 Returns true if successful. */
801 remove_forwarder_block_with_phi (basic_block bb
)
803 edge succ
= single_succ_edge (bb
);
804 basic_block dest
= succ
->dest
;
806 basic_block dombb
, domdest
, dom
;
808 /* We check for infinite loops already in tree_forwarder_block_p.
809 However it may happen that the infinite loop is created
810 afterwards due to removal of forwarders. */
814 /* If the destination block consists of a nonlocal label, do not
816 label
= first_stmt (dest
);
818 if (glabel
*label_stmt
= dyn_cast
<glabel
*> (label
))
819 if (DECL_NONLOCAL (gimple_label_label (label_stmt
)))
822 /* Record BB's single pred in case we need to update the father
823 loop's latch information later. */
824 basic_block pred
= NULL
;
825 if (single_pred_p (bb
))
826 pred
= single_pred (bb
);
828 /* Redirect each incoming edge to BB to DEST. */
829 while (EDGE_COUNT (bb
->preds
) > 0)
831 edge e
= EDGE_PRED (bb
, 0), s
;
834 s
= find_edge (e
->src
, dest
);
837 /* We already have an edge S from E->src to DEST. If S and
838 E->dest's sole successor edge have the same PHI arguments
839 at DEST, redirect S to DEST. */
840 if (phi_alternatives_equal (dest
, s
, succ
))
842 e
= redirect_edge_and_branch (e
, dest
);
843 redirect_edge_var_map_clear (e
);
847 /* PHI arguments are different. Create a forwarder block by
848 splitting E so that we can merge PHI arguments on E to
850 e
= single_succ_edge (split_edge (e
));
853 s
= redirect_edge_and_branch (e
, dest
);
855 /* redirect_edge_and_branch must not create a new edge. */
858 /* Add to the PHI nodes at DEST each PHI argument removed at the
860 for (gsi
= gsi_start_phis (dest
);
864 gphi
*phi
= gsi
.phi ();
865 tree def
= gimple_phi_arg_def (phi
, succ
->dest_idx
);
866 source_location locus
= gimple_phi_arg_location_from_edge (phi
, succ
);
868 if (TREE_CODE (def
) == SSA_NAME
)
870 /* If DEF is one of the results of PHI nodes removed during
871 redirection, replace it with the PHI argument that used
873 vec
<edge_var_map
> *head
= redirect_edge_var_map_vector (e
);
874 size_t length
= head
? head
->length () : 0;
875 for (size_t i
= 0; i
< length
; i
++)
877 edge_var_map
*vm
= &(*head
)[i
];
878 tree old_arg
= redirect_edge_var_map_result (vm
);
879 tree new_arg
= redirect_edge_var_map_def (vm
);
884 locus
= redirect_edge_var_map_location (vm
);
890 add_phi_arg (phi
, def
, s
, locus
);
893 redirect_edge_var_map_clear (e
);
896 /* Update the dominators. */
897 dombb
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
898 domdest
= get_immediate_dominator (CDI_DOMINATORS
, dest
);
901 /* Shortcut to avoid calling (relatively expensive)
902 nearest_common_dominator unless necessary. */
906 dom
= nearest_common_dominator (CDI_DOMINATORS
, domdest
, dombb
);
908 set_immediate_dominator (CDI_DOMINATORS
, dest
, dom
);
910 /* Adjust latch infomation of BB's parent loop as otherwise
911 the cfg hook has a hard time not to kill the loop. */
912 if (current_loops
&& bb
->loop_father
->latch
== bb
)
913 bb
->loop_father
->latch
= pred
;
915 /* Remove BB since all of BB's incoming edges have been redirected
917 delete_basic_block (bb
);
922 /* This pass merges PHI nodes if one feeds into another. For example,
923 suppose we have the following:
930 # tem_6 = PHI <tem_17(8), tem_23(7)>;
933 # tem_3 = PHI <tem_6(9), tem_2(5)>;
936 Then we merge the first PHI node into the second one like so:
943 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
949 const pass_data pass_data_merge_phi
=
951 GIMPLE_PASS
, /* type */
952 "mergephi", /* name */
953 OPTGROUP_NONE
, /* optinfo_flags */
954 TV_TREE_MERGE_PHI
, /* tv_id */
955 ( PROP_cfg
| PROP_ssa
), /* properties_required */
956 0, /* properties_provided */
957 0, /* properties_destroyed */
958 0, /* todo_flags_start */
959 0, /* todo_flags_finish */
962 class pass_merge_phi
: public gimple_opt_pass
965 pass_merge_phi (gcc::context
*ctxt
)
966 : gimple_opt_pass (pass_data_merge_phi
, ctxt
)
969 /* opt_pass methods: */
970 opt_pass
* clone () { return new pass_merge_phi (m_ctxt
); }
971 virtual unsigned int execute (function
*);
973 }; // class pass_merge_phi
976 pass_merge_phi::execute (function
*fun
)
978 basic_block
*worklist
= XNEWVEC (basic_block
, n_basic_blocks_for_fn (fun
));
979 basic_block
*current
= worklist
;
982 calculate_dominance_info (CDI_DOMINATORS
);
984 /* Find all PHI nodes that we may be able to merge. */
985 FOR_EACH_BB_FN (bb
, fun
)
989 /* Look for a forwarder block with PHI nodes. */
990 if (!tree_forwarder_block_p (bb
, true))
993 dest
= single_succ (bb
);
995 /* We have to feed into another basic block with PHI
997 if (gimple_seq_empty_p (phi_nodes (dest
))
998 /* We don't want to deal with a basic block with
1000 || bb_has_abnormal_pred (bb
))
1003 if (!dominated_by_p (CDI_DOMINATORS
, dest
, bb
))
1005 /* If BB does not dominate DEST, then the PHI nodes at
1006 DEST must be the only users of the results of the PHI
1013 unsigned int dest_idx
= single_succ_edge (bb
)->dest_idx
;
1015 /* BB dominates DEST. There may be many users of the PHI
1016 nodes in BB. However, there is still a trivial case we
1017 can handle. If the result of every PHI in BB is used
1018 only by a PHI in DEST, then we can trivially merge the
1019 PHI nodes from BB into DEST. */
1020 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
);
1023 gphi
*phi
= gsi
.phi ();
1024 tree result
= gimple_phi_result (phi
);
1025 use_operand_p imm_use
;
1028 /* If the PHI's result is never used, then we can just
1030 if (has_zero_uses (result
))
1033 /* Get the single use of the result of this PHI node. */
1034 if (!single_imm_use (result
, &imm_use
, &use_stmt
)
1035 || gimple_code (use_stmt
) != GIMPLE_PHI
1036 || gimple_bb (use_stmt
) != dest
1037 || gimple_phi_arg_def (use_stmt
, dest_idx
) != result
)
1041 /* If the loop above iterated through all the PHI nodes
1042 in BB, then we can merge the PHIs from BB into DEST. */
1043 if (gsi_end_p (gsi
))
1048 /* Now let's drain WORKLIST. */
1049 bool changed
= false;
1050 while (current
!= worklist
)
1053 changed
|= remove_forwarder_block_with_phi (bb
);
1057 /* Removing forwarder blocks can cause formerly irreducible loops
1058 to become reducible if we merged two entry blocks. */
1061 loops_state_set (LOOPS_NEED_FIXUP
);
1069 make_pass_merge_phi (gcc::context
*ctxt
)
1071 return new pass_merge_phi (ctxt
);
1074 /* Pass: cleanup the CFG just before expanding trees to RTL.
1075 This is just a round of label cleanups and case node grouping
1076 because after the tree optimizers have run such cleanups may
1080 execute_cleanup_cfg_post_optimizing (void)
1082 unsigned int todo
= execute_fixup_cfg ();
1083 if (cleanup_tree_cfg ())
1085 todo
&= ~TODO_cleanup_cfg
;
1086 todo
|= TODO_update_ssa
;
1088 maybe_remove_unreachable_handlers ();
1089 cleanup_dead_labels ();
1090 group_case_labels ();
1091 if ((flag_compare_debug_opt
|| flag_compare_debug
)
1092 && flag_dump_final_insns
)
1094 FILE *final_output
= fopen (flag_dump_final_insns
, "a");
1098 error ("could not open final insn dump file %qs: %m",
1099 flag_dump_final_insns
);
1100 flag_dump_final_insns
= NULL
;
1104 int save_unnumbered
= flag_dump_unnumbered
;
1105 int save_noaddr
= flag_dump_noaddr
;
1107 flag_dump_noaddr
= flag_dump_unnumbered
= 1;
1108 fprintf (final_output
, "\n");
1109 dump_enumerated_decls (final_output
, dump_flags
| TDF_NOUID
);
1110 flag_dump_noaddr
= save_noaddr
;
1111 flag_dump_unnumbered
= save_unnumbered
;
1112 if (fclose (final_output
))
1114 error ("could not close final insn dump file %qs: %m",
1115 flag_dump_final_insns
);
1116 flag_dump_final_insns
= NULL
;
1125 const pass_data pass_data_cleanup_cfg_post_optimizing
=
1127 GIMPLE_PASS
, /* type */
1128 "optimized", /* name */
1129 OPTGROUP_NONE
, /* optinfo_flags */
1130 TV_TREE_CLEANUP_CFG
, /* tv_id */
1131 PROP_cfg
, /* properties_required */
1132 0, /* properties_provided */
1133 0, /* properties_destroyed */
1134 0, /* todo_flags_start */
1135 TODO_remove_unused_locals
, /* todo_flags_finish */
1138 class pass_cleanup_cfg_post_optimizing
: public gimple_opt_pass
1141 pass_cleanup_cfg_post_optimizing (gcc::context
*ctxt
)
1142 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing
, ctxt
)
1145 /* opt_pass methods: */
1146 virtual unsigned int execute (function
*)
1148 return execute_cleanup_cfg_post_optimizing ();
1151 }; // class pass_cleanup_cfg_post_optimizing
1156 make_pass_cleanup_cfg_post_optimizing (gcc::context
*ctxt
)
1158 return new pass_cleanup_cfg_post_optimizing (ctxt
);