1 /* CFG cleanup for trees.
2 Copyright (C) 2001-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "tree-pass.h"
30 #include "diagnostic-core.h"
31 #include "fold-const.h"
33 #include "cfgcleanup.h"
36 #include "gimple-iterator.h"
38 #include "tree-ssa-loop-manip.h"
42 #include "tree-scalar-evolution.h"
43 #include "gimple-match.h"
44 #include "gimple-fold.h"
45 #include "tree-ssa-loop-niter.h"
48 /* The set of blocks in that at least one of the following changes happened:
49 -- the statement at the end of the block was changed
50 -- the block was newly created
51 -- the set of the predecessors of the block changed
52 -- the set of the successors of the block changed
53 ??? Maybe we could track these changes separately, since they determine
54 what cleanups it makes sense to try on the block. */
55 bitmap cfgcleanup_altered_bbs
;
57 /* Remove any fallthru edge from EV. Return true if an edge was removed. */
60 remove_fallthru_edge (vec
<edge
, va_gc
> *ev
)
65 FOR_EACH_EDGE (e
, ei
, ev
)
66 if ((e
->flags
& EDGE_FALLTHRU
) != 0)
68 if (e
->flags
& EDGE_COMPLEX
)
69 e
->flags
&= ~EDGE_FALLTHRU
;
71 remove_edge_and_dominated_blocks (e
);
77 /* Convert a SWTCH with single non-default case to gcond and replace it
81 convert_single_case_switch (gswitch
*swtch
, gimple_stmt_iterator
&gsi
)
83 if (gimple_switch_num_labels (swtch
) != 2)
86 tree index
= gimple_switch_index (swtch
);
87 tree label
= gimple_switch_label (swtch
, 1);
88 tree low
= CASE_LOW (label
);
89 tree high
= CASE_HIGH (label
);
91 basic_block default_bb
= gimple_switch_default_bb (cfun
, swtch
);
92 basic_block case_bb
= label_to_block (cfun
, CASE_LABEL (label
));
94 basic_block bb
= gimple_bb (swtch
);
97 /* Replace switch statement with condition statement. */
101 generate_range_test (bb
, index
, low
, high
, &lhs
, &rhs
);
102 cond
= gimple_build_cond (LE_EXPR
, lhs
, rhs
, NULL_TREE
, NULL_TREE
);
105 cond
= gimple_build_cond (EQ_EXPR
, index
,
106 fold_convert (TREE_TYPE (index
), low
),
107 NULL_TREE
, NULL_TREE
);
109 gsi_replace (&gsi
, cond
, true);
112 edge case_edge
= find_edge (bb
, case_bb
);
113 edge default_edge
= find_edge (bb
, default_bb
);
115 case_edge
->flags
|= EDGE_TRUE_VALUE
;
116 default_edge
->flags
|= EDGE_FALSE_VALUE
;
120 /* Disconnect an unreachable block in the control expression starting
124 cleanup_control_expr_graph (basic_block bb
, gimple_stmt_iterator gsi
)
128 gimple
*stmt
= gsi_stmt (gsi
);
130 if (!single_succ_p (bb
))
135 tree val
= NULL_TREE
;
137 /* Try to convert a switch with just a single non-default case to
139 if (gimple_code (stmt
) == GIMPLE_SWITCH
140 && convert_single_case_switch (as_a
<gswitch
*> (stmt
), gsi
))
141 stmt
= gsi_stmt (gsi
);
143 fold_defer_overflow_warnings ();
144 switch (gimple_code (stmt
))
148 gimple_match_op res_op
;
149 if (gimple_simplify (stmt
, &res_op
, NULL
, no_follow_ssa_edges
,
151 && res_op
.code
== INTEGER_CST
)
157 val
= gimple_switch_index (as_a
<gswitch
*> (stmt
));
163 taken_edge
= find_taken_edge (bb
, val
);
166 fold_undefer_and_ignore_overflow_warnings ();
170 /* Remove all the edges except the one that is always executed. */
172 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
)); )
178 fold_undefer_overflow_warnings
179 (true, stmt
, WARN_STRICT_OVERFLOW_CONDITIONAL
);
183 taken_edge
->probability
+= e
->probability
;
184 remove_edge_and_dominated_blocks (e
);
191 fold_undefer_and_ignore_overflow_warnings ();
194 taken_edge
= single_succ_edge (bb
);
196 bitmap_set_bit (cfgcleanup_altered_bbs
, bb
->index
);
197 gsi_remove (&gsi
, true);
198 taken_edge
->flags
= EDGE_FALLTHRU
;
203 /* Cleanup the GF_CALL_CTRL_ALTERING flag according to
204 to updated gimple_call_flags. */
207 cleanup_call_ctrl_altering_flag (gimple
*bb_end
)
209 if (!is_gimple_call (bb_end
)
210 || !gimple_call_ctrl_altering_p (bb_end
))
213 int flags
= gimple_call_flags (bb_end
);
214 if (((flags
& (ECF_CONST
| ECF_PURE
))
215 && !(flags
& ECF_LOOPING_CONST_OR_PURE
))
216 || (flags
& ECF_LEAF
))
217 gimple_call_set_ctrl_altering (bb_end
, false);
220 /* Try to remove superfluous control structures in basic block BB. Returns
221 true if anything changes. */
224 cleanup_control_flow_bb (basic_block bb
)
226 gimple_stmt_iterator gsi
;
230 /* If the last statement of the block could throw and now cannot,
231 we need to prune cfg. */
232 retval
|= gimple_purge_dead_eh_edges (bb
);
234 gsi
= gsi_last_nondebug_bb (bb
);
238 stmt
= gsi_stmt (gsi
);
240 /* Try to cleanup ctrl altering flag for call which ends bb. */
241 cleanup_call_ctrl_altering_flag (stmt
);
243 if (gimple_code (stmt
) == GIMPLE_COND
244 || gimple_code (stmt
) == GIMPLE_SWITCH
)
246 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb
)) == stmt
);
247 retval
|= cleanup_control_expr_graph (bb
, gsi
);
249 else if (gimple_code (stmt
) == GIMPLE_GOTO
250 && TREE_CODE (gimple_goto_dest (stmt
)) == ADDR_EXPR
251 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt
), 0))
254 /* If we had a computed goto which has a compile-time determinable
255 destination, then we can eliminate the goto. */
259 basic_block target_block
;
261 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb
)) == stmt
);
262 /* First look at all the outgoing edges. Delete any outgoing
263 edges which do not go to the right block. For the one
264 edge which goes to the right block, fix up its flags. */
265 label
= TREE_OPERAND (gimple_goto_dest (stmt
), 0);
266 if (DECL_CONTEXT (label
) != cfun
->decl
)
268 target_block
= label_to_block (cfun
, label
);
269 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
)); )
271 if (e
->dest
!= target_block
)
272 remove_edge_and_dominated_blocks (e
);
275 /* Turn off the EDGE_ABNORMAL flag. */
276 e
->flags
&= ~EDGE_ABNORMAL
;
278 /* And set EDGE_FALLTHRU. */
279 e
->flags
|= EDGE_FALLTHRU
;
284 bitmap_set_bit (cfgcleanup_altered_bbs
, bb
->index
);
285 bitmap_set_bit (cfgcleanup_altered_bbs
, target_block
->index
);
287 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
288 relevant information we need. */
289 gsi_remove (&gsi
, true);
293 /* Check for indirect calls that have been turned into
295 else if (is_gimple_call (stmt
)
296 && gimple_call_noreturn_p (stmt
))
298 /* If there are debug stmts after the noreturn call, remove them
299 now, they should be all unreachable anyway. */
300 for (gsi_next (&gsi
); !gsi_end_p (gsi
); )
301 gsi_remove (&gsi
, true);
302 if (remove_fallthru_edge (bb
->succs
))
309 /* Return true if basic block BB does nothing except pass control
310 flow to another block and that we can safely insert a label at
311 the start of the successor block.
313 As a precondition, we require that BB be not equal to
317 tree_forwarder_block_p (basic_block bb
, bool phi_wanted
)
319 gimple_stmt_iterator gsi
;
322 /* BB must have a single outgoing edge. */
323 if (single_succ_p (bb
) != 1
324 /* If PHI_WANTED is false, BB must not have any PHI nodes.
325 Otherwise, BB must have PHI nodes. */
326 || gimple_seq_empty_p (phi_nodes (bb
)) == phi_wanted
327 /* BB may not be a predecessor of the exit block. */
328 || single_succ (bb
) == EXIT_BLOCK_PTR_FOR_FN (cfun
)
329 /* Nor should this be an infinite loop. */
330 || single_succ (bb
) == bb
331 /* BB may not have an abnormal outgoing edge. */
332 || (single_succ_edge (bb
)->flags
& EDGE_ABNORMAL
))
335 gcc_checking_assert (bb
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
));
337 locus
= single_succ_edge (bb
)->goto_locus
;
339 /* There should not be an edge coming from entry, or an EH edge. */
344 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
345 if (e
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) || (e
->flags
& EDGE_EH
))
347 /* If goto_locus of any of the edges differs, prevent removing
348 the forwarder block when not optimizing. */
350 && (LOCATION_LOCUS (e
->goto_locus
) != UNKNOWN_LOCATION
351 || LOCATION_LOCUS (locus
) != UNKNOWN_LOCATION
)
352 && e
->goto_locus
!= locus
)
356 /* Now walk through the statements backward. We can ignore labels,
357 anything else means this is not a forwarder block. */
358 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
360 gimple
*stmt
= gsi_stmt (gsi
);
362 switch (gimple_code (stmt
))
365 if (DECL_NONLOCAL (gimple_label_label (as_a
<glabel
*> (stmt
))))
368 && (gimple_has_location (stmt
)
369 || LOCATION_LOCUS (locus
) != UNKNOWN_LOCATION
)
370 && gimple_location (stmt
) != locus
)
374 /* ??? For now, hope there's a corresponding debug
375 assignment at the destination. */
387 /* Protect loop headers. */
388 if (bb_loop_header_p (bb
))
391 dest
= EDGE_SUCC (bb
, 0)->dest
;
392 /* Protect loop preheaders and latches if requested. */
393 if (dest
->loop_father
->header
== dest
)
395 if (bb
->loop_father
== dest
->loop_father
)
397 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES
))
399 /* If bb doesn't have a single predecessor we'd make this
400 loop have multiple latches. Don't do that if that
401 would in turn require disambiguating them. */
402 return (single_pred_p (bb
)
403 || loops_state_satisfies_p
404 (LOOPS_MAY_HAVE_MULTIPLE_LATCHES
));
406 else if (bb
->loop_father
== loop_outer (dest
->loop_father
))
407 return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS
);
408 /* Always preserve other edges into loop headers that are
409 not simple latches or preheaders. */
417 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
418 those alternatives are equal in each of the PHI nodes, then return
419 true, else return false. */
422 phi_alternatives_equal (basic_block dest
, edge e1
, edge e2
)
424 int n1
= e1
->dest_idx
;
425 int n2
= e2
->dest_idx
;
428 for (gsi
= gsi_start_phis (dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
430 gphi
*phi
= gsi
.phi ();
431 tree val1
= gimple_phi_arg_def (phi
, n1
);
432 tree val2
= gimple_phi_arg_def (phi
, n2
);
434 gcc_assert (val1
!= NULL_TREE
);
435 gcc_assert (val2
!= NULL_TREE
);
437 if (!operand_equal_for_phi_arg_p (val1
, val2
))
444 /* Removes forwarder block BB. Returns false if this failed. */
447 remove_forwarder_block (basic_block bb
)
449 edge succ
= single_succ_edge (bb
), e
, s
;
450 basic_block dest
= succ
->dest
;
453 gimple_stmt_iterator gsi
, gsi_to
;
454 bool can_move_debug_stmts
;
456 /* We check for infinite loops already in tree_forwarder_block_p.
457 However it may happen that the infinite loop is created
458 afterwards due to removal of forwarders. */
462 /* If the destination block consists of a nonlocal label or is a
463 EH landing pad, do not merge it. */
464 stmt
= first_stmt (dest
);
466 if (glabel
*label_stmt
= dyn_cast
<glabel
*> (stmt
))
467 if (DECL_NONLOCAL (gimple_label_label (label_stmt
))
468 || EH_LANDING_PAD_NR (gimple_label_label (label_stmt
)) != 0)
471 /* If there is an abnormal edge to basic block BB, but not into
472 dest, problems might occur during removal of the phi node at out
473 of ssa due to overlapping live ranges of registers.
475 If there is an abnormal edge in DEST, the problems would occur
476 anyway since cleanup_dead_labels would then merge the labels for
477 two different eh regions, and rest of exception handling code
480 So if there is an abnormal edge to BB, proceed only if there is
481 no abnormal edge to DEST and there are no phi nodes in DEST. */
482 if (bb_has_abnormal_pred (bb
)
483 && (bb_has_abnormal_pred (dest
)
484 || !gimple_seq_empty_p (phi_nodes (dest
))))
487 /* If there are phi nodes in DEST, and some of the blocks that are
488 predecessors of BB are also predecessors of DEST, check that the
489 phi node arguments match. */
490 if (!gimple_seq_empty_p (phi_nodes (dest
)))
492 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
494 s
= find_edge (e
->src
, dest
);
498 if (!phi_alternatives_equal (dest
, succ
, s
))
503 can_move_debug_stmts
= MAY_HAVE_DEBUG_STMTS
&& single_pred_p (dest
);
505 basic_block pred
= NULL
;
506 if (single_pred_p (bb
))
507 pred
= single_pred (bb
);
509 /* Redirect the edges. */
510 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
512 bitmap_set_bit (cfgcleanup_altered_bbs
, e
->src
->index
);
514 if (e
->flags
& EDGE_ABNORMAL
)
516 /* If there is an abnormal edge, redirect it anyway, and
517 move the labels to the new block to make it legal. */
518 s
= redirect_edge_succ_nodup (e
, dest
);
521 s
= redirect_edge_and_branch (e
, dest
);
525 /* Create arguments for the phi nodes, since the edge was not
527 for (gphi_iterator psi
= gsi_start_phis (dest
);
531 gphi
*phi
= psi
.phi ();
532 location_t l
= gimple_phi_arg_location_from_edge (phi
, succ
);
533 tree def
= gimple_phi_arg_def (phi
, succ
->dest_idx
);
534 add_phi_arg (phi
, unshare_expr (def
), s
, l
);
539 /* Move nonlocal labels and computed goto targets as well as user
540 defined labels and labels with an EH landing pad number to the
541 new block, so that the redirection of the abnormal edges works,
542 jump targets end up in a sane place and debug information for
543 labels is retained. */
544 gsi_to
= gsi_start_bb (dest
);
545 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); )
547 stmt
= gsi_stmt (gsi
);
548 if (is_gimple_debug (stmt
))
551 /* Forwarder blocks can only contain labels and debug stmts, and
552 labels must come first, so if we get to this point, we know
553 we're looking at a label. */
554 tree decl
= gimple_label_label (as_a
<glabel
*> (stmt
));
555 if (EH_LANDING_PAD_NR (decl
) != 0
556 || DECL_NONLOCAL (decl
)
557 || FORCED_LABEL (decl
)
558 || !DECL_ARTIFICIAL (decl
))
559 gsi_move_before (&gsi
, &gsi_to
);
564 /* Move debug statements if the destination has a single predecessor. */
565 if (can_move_debug_stmts
&& !gsi_end_p (gsi
))
567 gsi_to
= gsi_after_labels (dest
);
570 gimple
*debug
= gsi_stmt (gsi
);
571 gcc_assert (is_gimple_debug (debug
));
572 gsi_move_before (&gsi
, &gsi_to
);
574 while (!gsi_end_p (gsi
));
577 bitmap_set_bit (cfgcleanup_altered_bbs
, dest
->index
);
579 /* Update the dominators. */
580 if (dom_info_available_p (CDI_DOMINATORS
))
582 basic_block dom
, dombb
, domdest
;
584 dombb
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
585 domdest
= get_immediate_dominator (CDI_DOMINATORS
, dest
);
588 /* Shortcut to avoid calling (relatively expensive)
589 nearest_common_dominator unless necessary. */
593 dom
= nearest_common_dominator (CDI_DOMINATORS
, domdest
, dombb
);
595 set_immediate_dominator (CDI_DOMINATORS
, dest
, dom
);
598 /* Adjust latch infomation of BB's parent loop as otherwise
599 the cfg hook has a hard time not to kill the loop. */
600 if (current_loops
&& bb
->loop_father
->latch
== bb
)
601 bb
->loop_father
->latch
= pred
;
603 /* And kill the forwarder block. */
604 delete_basic_block (bb
);
609 /* STMT is a call that has been discovered noreturn. Split the
610 block to prepare fixing up the CFG and remove LHS.
611 Return true if cleanup-cfg needs to run. */
614 fixup_noreturn_call (gimple
*stmt
)
616 basic_block bb
= gimple_bb (stmt
);
617 bool changed
= false;
619 if (gimple_call_builtin_p (stmt
, BUILT_IN_RETURN
))
622 /* First split basic block if stmt is not last. */
623 if (stmt
!= gsi_stmt (gsi_last_bb (bb
)))
625 if (stmt
== gsi_stmt (gsi_last_nondebug_bb (bb
)))
627 /* Don't split if there are only debug stmts
628 after stmt, that can result in -fcompare-debug
629 failures. Remove the debug stmts instead,
630 they should be all unreachable anyway. */
631 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
632 for (gsi_next (&gsi
); !gsi_end_p (gsi
); )
633 gsi_remove (&gsi
, true);
637 split_block (bb
, stmt
);
642 /* If there is an LHS, remove it, but only if its type has fixed size.
643 The LHS will need to be recreated during RTL expansion and creating
644 temporaries of variable-sized types is not supported. Also don't
645 do this with TREE_ADDRESSABLE types, as assign_temp will abort.
646 Drop LHS regardless of TREE_ADDRESSABLE, if the function call
647 has been changed into a call that does not return a value, like
648 __builtin_unreachable or __cxa_pure_virtual. */
649 tree lhs
= gimple_call_lhs (stmt
);
651 && (should_remove_lhs_p (lhs
)
652 || VOID_TYPE_P (TREE_TYPE (gimple_call_fntype (stmt
)))))
654 gimple_call_set_lhs (stmt
, NULL_TREE
);
656 /* We need to fix up the SSA name to avoid checking errors. */
657 if (TREE_CODE (lhs
) == SSA_NAME
)
659 tree new_var
= create_tmp_reg (TREE_TYPE (lhs
));
660 SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs
, new_var
);
661 SSA_NAME_DEF_STMT (lhs
) = gimple_build_nop ();
662 set_ssa_default_def (cfun
, new_var
, lhs
);
668 /* Mark the call as altering control flow. */
669 if (!gimple_call_ctrl_altering_p (stmt
))
671 gimple_call_set_ctrl_altering (stmt
, true);
678 /* Return true if we want to merge BB1 and BB2 into a single block. */
681 want_merge_blocks_p (basic_block bb1
, basic_block bb2
)
683 if (!can_merge_blocks_p (bb1
, bb2
))
685 gimple_stmt_iterator gsi
= gsi_last_nondebug_bb (bb1
);
686 if (gsi_end_p (gsi
) || !stmt_can_terminate_bb_p (gsi_stmt (gsi
)))
688 return bb1
->count
.ok_for_merging (bb2
->count
);
692 /* Tries to cleanup cfg in basic block BB by merging blocks. Returns
693 true if anything changes. */
696 cleanup_tree_cfg_bb (basic_block bb
)
698 if (tree_forwarder_block_p (bb
, false)
699 && remove_forwarder_block (bb
))
702 /* If there is a merge opportunity with the predecessor
703 do nothing now but wait until we process the predecessor.
704 This happens when we visit BBs in a non-optimal order and
705 avoids quadratic behavior with adjusting stmts BB pointer. */
706 if (single_pred_p (bb
)
707 && want_merge_blocks_p (single_pred (bb
), bb
))
708 /* But make sure we _do_ visit it. When we remove unreachable paths
709 ending in a backedge we fail to mark the destinations predecessors
711 bitmap_set_bit (cfgcleanup_altered_bbs
, single_pred (bb
)->index
);
713 /* Merging the blocks may create new opportunities for folding
714 conditional branches (due to the elimination of single-valued PHI
716 else if (single_succ_p (bb
)
717 && want_merge_blocks_p (bb
, single_succ (bb
)))
719 merge_blocks (bb
, single_succ (bb
));
726 /* Do cleanup_control_flow_bb in PRE order. */
729 cleanup_control_flow_pre ()
733 /* We want remove_edge_and_dominated_blocks to only remove edges,
734 not dominated blocks which it does when dom info isn't available.
736 dom_state saved_state
= dom_info_state (CDI_DOMINATORS
);
737 set_dom_info_availability (CDI_DOMINATORS
, DOM_NONE
);
739 auto_vec
<edge_iterator
, 20> stack (n_basic_blocks_for_fn (cfun
) + 1);
740 auto_sbitmap
visited (last_basic_block_for_fn (cfun
));
741 bitmap_clear (visited
);
743 stack
.quick_push (ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->succs
));
745 while (! stack
.is_empty ())
747 /* Look at the edge on the top of the stack. */
748 edge_iterator ei
= stack
.last ();
749 basic_block dest
= ei_edge (ei
)->dest
;
751 if (dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
752 && ! bitmap_bit_p (visited
, dest
->index
))
754 bitmap_set_bit (visited
, dest
->index
);
755 /* We only possibly remove edges from DEST here, leaving
756 possibly unreachable code in the IL. */
757 retval
|= cleanup_control_flow_bb (dest
);
758 if (EDGE_COUNT (dest
->succs
) > 0)
759 stack
.quick_push (ei_start (dest
->succs
));
763 if (!ei_one_before_end_p (ei
))
764 ei_next (&stack
.last ());
770 set_dom_info_availability (CDI_DOMINATORS
, saved_state
);
772 /* We are deleting BBs in non-reverse dominator order, make sure
773 insert_debug_temps_for_defs is prepared for that. */
775 free_dominance_info (CDI_DOMINATORS
);
777 /* Remove all now (and previously) unreachable blocks. */
778 for (int i
= NUM_FIXED_BLOCKS
; i
< last_basic_block_for_fn (cfun
); ++i
)
780 basic_block bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
781 if (bb
&& !bitmap_bit_p (visited
, bb
->index
))
784 free_dominance_info (CDI_DOMINATORS
);
785 delete_basic_block (bb
);
794 mfb_keep_latches (edge e
)
796 return !((dom_info_available_p (CDI_DOMINATORS
)
797 && dominated_by_p (CDI_DOMINATORS
, e
->src
, e
->dest
))
798 || (e
->flags
& EDGE_DFS_BACK
));
801 /* Remove unreachable blocks and other miscellaneous clean up work.
802 Return true if the flowgraph was modified, false otherwise. */
805 cleanup_tree_cfg_noloop (void)
807 timevar_push (TV_TREE_CLEANUP_CFG
);
809 /* Ensure that we have single entries into loop headers. Otherwise
810 if one of the entries is becoming a latch due to CFG cleanup
811 (from formerly being part of an irreducible region) then we mess
812 up loop fixup and associate the old loop with a different region
813 which makes niter upper bounds invalid. See for example PR80549.
814 This needs to be done before we remove trivially dead edges as
815 we need to capture the dominance state before the pending transform. */
818 /* This needs backedges or dominators. */
819 if (!dom_info_available_p (CDI_DOMINATORS
))
820 mark_dfs_back_edges ();
824 FOR_EACH_VEC_ELT (*get_loops (cfun
), i
, loop
)
825 if (loop
&& loop
->header
)
827 basic_block bb
= loop
->header
;
830 bool found_latch
= false;
831 bool any_abnormal
= false;
833 /* We are only interested in preserving existing loops, but
834 we need to check whether they are still real and of course
835 if we need to add a preheader at all. */
836 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
838 if (e
->flags
& EDGE_ABNORMAL
)
843 if ((dom_info_available_p (CDI_DOMINATORS
)
844 && dominated_by_p (CDI_DOMINATORS
, e
->src
, bb
))
845 || (e
->flags
& EDGE_DFS_BACK
))
852 /* If we have more than one entry to the loop header
853 create a forwarder. */
854 if (found_latch
&& ! any_abnormal
&& n
> 1)
856 edge fallthru
= make_forwarder_block (bb
, mfb_keep_latches
,
858 loop
->header
= fallthru
->dest
;
859 if (! loops_state_satisfies_p (LOOPS_NEED_FIXUP
))
861 /* The loop updating from the CFG hook is incomplete
862 when we have multiple latches, fixup manually. */
863 remove_bb_from_loops (fallthru
->src
);
865 FOR_EACH_EDGE (e
, ei
, fallthru
->src
->preds
)
866 cloop
= find_common_loop (cloop
, e
->src
->loop_father
);
867 add_bb_to_loop (fallthru
->src
, cloop
);
873 /* Prepare the worklists of altered blocks. */
874 cfgcleanup_altered_bbs
= BITMAP_ALLOC (NULL
);
876 /* Start by iterating over all basic blocks in PRE order looking for
877 edge removal opportunities. Do this first because incoming SSA form
878 may be invalid and we want to avoid performing SSA related tasks such
879 as propgating out a PHI node during BB merging in that state. This
880 also gets rid of unreachable blocks. */
881 bool changed
= cleanup_control_flow_pre ();
883 /* After doing the above SSA form should be valid (or an update SSA
884 should be required). */
886 /* Compute dominator info which we need for the iterative process below. */
887 if (!dom_info_available_p (CDI_DOMINATORS
))
888 calculate_dominance_info (CDI_DOMINATORS
);
890 checking_verify_dominators (CDI_DOMINATORS
);
892 /* During forwarder block cleanup, we may redirect edges out of
893 SWITCH_EXPRs, which can get expensive. So we want to enable
894 recording of edge to CASE_LABEL_EXPR. */
895 start_recording_case_labels ();
897 /* Continue by iterating over all basic blocks looking for BB merging
898 opportunities. We cannot use FOR_EACH_BB_FN for the BB iteration
899 since the basic blocks may get removed. */
900 unsigned n
= last_basic_block_for_fn (cfun
);
901 for (unsigned i
= NUM_FIXED_BLOCKS
; i
< n
; i
++)
903 basic_block bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
905 changed
|= cleanup_tree_cfg_bb (bb
);
908 /* Now process the altered blocks, as long as any are available. */
909 while (!bitmap_empty_p (cfgcleanup_altered_bbs
))
911 unsigned i
= bitmap_first_set_bit (cfgcleanup_altered_bbs
);
912 bitmap_clear_bit (cfgcleanup_altered_bbs
, i
);
913 if (i
< NUM_FIXED_BLOCKS
)
916 basic_block bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
920 /* BB merging done by cleanup_tree_cfg_bb can end up propagating
921 out single-argument PHIs which in turn can expose
922 cleanup_control_flow_bb opportunities so we have to repeat
924 changed
|= cleanup_control_flow_bb (bb
);
925 changed
|= cleanup_tree_cfg_bb (bb
);
928 end_recording_case_labels ();
929 BITMAP_FREE (cfgcleanup_altered_bbs
);
931 gcc_assert (dom_info_available_p (CDI_DOMINATORS
));
933 /* Do not renumber blocks if the SCEV cache is active, it is indexed by
934 basic-block numbers. */
935 if (! scev_initialized_p ())
938 checking_verify_flow_info ();
940 timevar_pop (TV_TREE_CLEANUP_CFG
);
942 if (changed
&& current_loops
)
944 /* Removing edges and/or blocks may make recorded bounds refer
945 to stale GIMPLE stmts now, so clear them. */
946 free_numbers_of_iterations_estimates (cfun
);
947 loops_state_set (LOOPS_NEED_FIXUP
);
953 /* Repairs loop structures. */
956 repair_loop_structures (void)
959 unsigned n_new_loops
;
961 calculate_dominance_info (CDI_DOMINATORS
);
963 timevar_push (TV_REPAIR_LOOPS
);
964 changed_bbs
= BITMAP_ALLOC (NULL
);
965 n_new_loops
= fix_loop_structure (changed_bbs
);
967 /* This usually does nothing. But sometimes parts of cfg that originally
968 were inside a loop get out of it due to edge removal (since they
969 become unreachable by back edges from latch). Also a former
970 irreducible loop can become reducible - in this case force a full
971 rewrite into loop-closed SSA form. */
972 if (loops_state_satisfies_p (LOOP_CLOSED_SSA
))
973 rewrite_into_loop_closed_ssa (n_new_loops
? NULL
: changed_bbs
,
976 BITMAP_FREE (changed_bbs
);
978 checking_verify_loop_structure ();
981 timevar_pop (TV_REPAIR_LOOPS
);
984 /* Cleanup cfg and repair loop structures. */
987 cleanup_tree_cfg (void)
989 bool changed
= cleanup_tree_cfg_noloop ();
991 if (current_loops
!= NULL
992 && loops_state_satisfies_p (LOOPS_NEED_FIXUP
))
993 repair_loop_structures ();
998 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
999 Returns true if successful. */
1002 remove_forwarder_block_with_phi (basic_block bb
)
1004 edge succ
= single_succ_edge (bb
);
1005 basic_block dest
= succ
->dest
;
1007 basic_block dombb
, domdest
, dom
;
1009 /* We check for infinite loops already in tree_forwarder_block_p.
1010 However it may happen that the infinite loop is created
1011 afterwards due to removal of forwarders. */
1015 /* Removal of forwarders may expose new natural loops and thus
1016 a block may turn into a loop header. */
1017 if (current_loops
&& bb_loop_header_p (bb
))
1020 /* If the destination block consists of a nonlocal label, do not
1022 label
= first_stmt (dest
);
1024 if (glabel
*label_stmt
= dyn_cast
<glabel
*> (label
))
1025 if (DECL_NONLOCAL (gimple_label_label (label_stmt
)))
1028 /* Record BB's single pred in case we need to update the father
1029 loop's latch information later. */
1030 basic_block pred
= NULL
;
1031 if (single_pred_p (bb
))
1032 pred
= single_pred (bb
);
1034 /* Redirect each incoming edge to BB to DEST. */
1035 while (EDGE_COUNT (bb
->preds
) > 0)
1037 edge e
= EDGE_PRED (bb
, 0), s
;
1040 s
= find_edge (e
->src
, dest
);
1043 /* We already have an edge S from E->src to DEST. If S and
1044 E->dest's sole successor edge have the same PHI arguments
1045 at DEST, redirect S to DEST. */
1046 if (phi_alternatives_equal (dest
, s
, succ
))
1048 e
= redirect_edge_and_branch (e
, dest
);
1049 redirect_edge_var_map_clear (e
);
1053 /* PHI arguments are different. Create a forwarder block by
1054 splitting E so that we can merge PHI arguments on E to
1056 e
= single_succ_edge (split_edge (e
));
1060 /* If we merge the forwarder into a loop header verify if we
1061 are creating another loop latch edge. If so, reset
1062 number of iteration information of the loop. */
1063 if (dest
->loop_father
->header
== dest
1064 && dominated_by_p (CDI_DOMINATORS
, e
->src
, dest
))
1066 dest
->loop_father
->any_upper_bound
= false;
1067 dest
->loop_father
->any_likely_upper_bound
= false;
1068 free_numbers_of_iterations_estimates (dest
->loop_father
);
1072 s
= redirect_edge_and_branch (e
, dest
);
1074 /* redirect_edge_and_branch must not create a new edge. */
1075 gcc_assert (s
== e
);
1077 /* Add to the PHI nodes at DEST each PHI argument removed at the
1078 destination of E. */
1079 for (gsi
= gsi_start_phis (dest
);
1083 gphi
*phi
= gsi
.phi ();
1084 tree def
= gimple_phi_arg_def (phi
, succ
->dest_idx
);
1085 location_t locus
= gimple_phi_arg_location_from_edge (phi
, succ
);
1087 if (TREE_CODE (def
) == SSA_NAME
)
1089 /* If DEF is one of the results of PHI nodes removed during
1090 redirection, replace it with the PHI argument that used
1092 vec
<edge_var_map
> *head
= redirect_edge_var_map_vector (e
);
1093 size_t length
= head
? head
->length () : 0;
1094 for (size_t i
= 0; i
< length
; i
++)
1096 edge_var_map
*vm
= &(*head
)[i
];
1097 tree old_arg
= redirect_edge_var_map_result (vm
);
1098 tree new_arg
= redirect_edge_var_map_def (vm
);
1103 locus
= redirect_edge_var_map_location (vm
);
1109 add_phi_arg (phi
, def
, s
, locus
);
1112 redirect_edge_var_map_clear (e
);
1115 /* Update the dominators. */
1116 dombb
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
1117 domdest
= get_immediate_dominator (CDI_DOMINATORS
, dest
);
1120 /* Shortcut to avoid calling (relatively expensive)
1121 nearest_common_dominator unless necessary. */
1125 dom
= nearest_common_dominator (CDI_DOMINATORS
, domdest
, dombb
);
1127 set_immediate_dominator (CDI_DOMINATORS
, dest
, dom
);
1129 /* Adjust latch infomation of BB's parent loop as otherwise
1130 the cfg hook has a hard time not to kill the loop. */
1131 if (current_loops
&& bb
->loop_father
->latch
== bb
)
1132 bb
->loop_father
->latch
= pred
;
1134 /* Remove BB since all of BB's incoming edges have been redirected
1136 delete_basic_block (bb
);
1141 /* This pass merges PHI nodes if one feeds into another. For example,
1142 suppose we have the following:
1149 # tem_6 = PHI <tem_17(8), tem_23(7)>;
1152 # tem_3 = PHI <tem_6(9), tem_2(5)>;
1155 Then we merge the first PHI node into the second one like so:
1157 goto <bb 9> (<L10>);
1162 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
1168 const pass_data pass_data_merge_phi
=
1170 GIMPLE_PASS
, /* type */
1171 "mergephi", /* name */
1172 OPTGROUP_NONE
, /* optinfo_flags */
1173 TV_TREE_MERGE_PHI
, /* tv_id */
1174 ( PROP_cfg
| PROP_ssa
), /* properties_required */
1175 0, /* properties_provided */
1176 0, /* properties_destroyed */
1177 0, /* todo_flags_start */
1178 0, /* todo_flags_finish */
1181 class pass_merge_phi
: public gimple_opt_pass
1184 pass_merge_phi (gcc::context
*ctxt
)
1185 : gimple_opt_pass (pass_data_merge_phi
, ctxt
)
1188 /* opt_pass methods: */
1189 opt_pass
* clone () { return new pass_merge_phi (m_ctxt
); }
1190 virtual unsigned int execute (function
*);
1192 }; // class pass_merge_phi
1195 pass_merge_phi::execute (function
*fun
)
1197 basic_block
*worklist
= XNEWVEC (basic_block
, n_basic_blocks_for_fn (fun
));
1198 basic_block
*current
= worklist
;
1201 calculate_dominance_info (CDI_DOMINATORS
);
1203 /* Find all PHI nodes that we may be able to merge. */
1204 FOR_EACH_BB_FN (bb
, fun
)
1208 /* Look for a forwarder block with PHI nodes. */
1209 if (!tree_forwarder_block_p (bb
, true))
1212 dest
= single_succ (bb
);
1214 /* We have to feed into another basic block with PHI
1216 if (gimple_seq_empty_p (phi_nodes (dest
))
1217 /* We don't want to deal with a basic block with
1219 || bb_has_abnormal_pred (bb
))
1222 if (!dominated_by_p (CDI_DOMINATORS
, dest
, bb
))
1224 /* If BB does not dominate DEST, then the PHI nodes at
1225 DEST must be the only users of the results of the PHI
1232 unsigned int dest_idx
= single_succ_edge (bb
)->dest_idx
;
1234 /* BB dominates DEST. There may be many users of the PHI
1235 nodes in BB. However, there is still a trivial case we
1236 can handle. If the result of every PHI in BB is used
1237 only by a PHI in DEST, then we can trivially merge the
1238 PHI nodes from BB into DEST. */
1239 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
);
1242 gphi
*phi
= gsi
.phi ();
1243 tree result
= gimple_phi_result (phi
);
1244 use_operand_p imm_use
;
1247 /* If the PHI's result is never used, then we can just
1249 if (has_zero_uses (result
))
1252 /* Get the single use of the result of this PHI node. */
1253 if (!single_imm_use (result
, &imm_use
, &use_stmt
)
1254 || gimple_code (use_stmt
) != GIMPLE_PHI
1255 || gimple_bb (use_stmt
) != dest
1256 || gimple_phi_arg_def (use_stmt
, dest_idx
) != result
)
1260 /* If the loop above iterated through all the PHI nodes
1261 in BB, then we can merge the PHIs from BB into DEST. */
1262 if (gsi_end_p (gsi
))
1267 /* Now let's drain WORKLIST. */
1268 bool changed
= false;
1269 while (current
!= worklist
)
1272 changed
|= remove_forwarder_block_with_phi (bb
);
1276 /* Removing forwarder blocks can cause formerly irreducible loops
1277 to become reducible if we merged two entry blocks. */
1280 loops_state_set (LOOPS_NEED_FIXUP
);
1288 make_pass_merge_phi (gcc::context
*ctxt
)
1290 return new pass_merge_phi (ctxt
);
1293 /* Pass: cleanup the CFG just before expanding trees to RTL.
1294 This is just a round of label cleanups and case node grouping
1295 because after the tree optimizers have run such cleanups may
1299 execute_cleanup_cfg_post_optimizing (void)
1301 unsigned int todo
= execute_fixup_cfg ();
1302 if (cleanup_tree_cfg ())
1304 todo
&= ~TODO_cleanup_cfg
;
1305 todo
|= TODO_update_ssa
;
1307 maybe_remove_unreachable_handlers ();
1308 cleanup_dead_labels ();
1309 if (group_case_labels ())
1310 todo
|= TODO_cleanup_cfg
;
1311 if ((flag_compare_debug_opt
|| flag_compare_debug
)
1312 && flag_dump_final_insns
)
1314 FILE *final_output
= fopen (flag_dump_final_insns
, "a");
1318 error ("could not open final insn dump file %qs: %m",
1319 flag_dump_final_insns
);
1320 flag_dump_final_insns
= NULL
;
1324 int save_unnumbered
= flag_dump_unnumbered
;
1325 int save_noaddr
= flag_dump_noaddr
;
1327 flag_dump_noaddr
= flag_dump_unnumbered
= 1;
1328 fprintf (final_output
, "\n");
1329 dump_enumerated_decls (final_output
,
1330 dump_flags
| TDF_SLIM
| TDF_NOUID
);
1331 flag_dump_noaddr
= save_noaddr
;
1332 flag_dump_unnumbered
= save_unnumbered
;
1333 if (fclose (final_output
))
1335 error ("could not close final insn dump file %qs: %m",
1336 flag_dump_final_insns
);
1337 flag_dump_final_insns
= NULL
;
1346 const pass_data pass_data_cleanup_cfg_post_optimizing
=
1348 GIMPLE_PASS
, /* type */
1349 "optimized", /* name */
1350 OPTGROUP_NONE
, /* optinfo_flags */
1351 TV_TREE_CLEANUP_CFG
, /* tv_id */
1352 PROP_cfg
, /* properties_required */
1353 0, /* properties_provided */
1354 0, /* properties_destroyed */
1355 0, /* todo_flags_start */
1356 TODO_remove_unused_locals
, /* todo_flags_finish */
1359 class pass_cleanup_cfg_post_optimizing
: public gimple_opt_pass
1362 pass_cleanup_cfg_post_optimizing (gcc::context
*ctxt
)
1363 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing
, ctxt
)
1366 /* opt_pass methods: */
1367 virtual unsigned int execute (function
*)
1369 return execute_cleanup_cfg_post_optimizing ();
1372 }; // class pass_cleanup_cfg_post_optimizing
1377 make_pass_cleanup_cfg_post_optimizing (gcc::context
*ctxt
)
1379 return new pass_cleanup_cfg_post_optimizing (ctxt
);