1 /* CFG cleanup for trees.
2 Copyright (C) 2001-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "tree-pass.h"
30 #include "diagnostic-core.h"
31 #include "fold-const.h"
33 #include "cfgcleanup.h"
36 #include "gimple-iterator.h"
38 #include "tree-ssa-loop-manip.h"
42 #include "tree-scalar-evolution.h"
43 #include "gimple-match.h"
44 #include "gimple-fold.h"
45 #include "tree-ssa-loop-niter.h"
48 /* The set of blocks in that at least one of the following changes happened:
49 -- the statement at the end of the block was changed
50 -- the block was newly created
51 -- the set of the predecessors of the block changed
52 -- the set of the successors of the block changed
53 ??? Maybe we could track these changes separately, since they determine
54 what cleanups it makes sense to try on the block. */
55 bitmap cfgcleanup_altered_bbs
;
57 /* Remove any fallthru edge from EV. Return true if an edge was removed. */
60 remove_fallthru_edge (vec
<edge
, va_gc
> *ev
)
65 FOR_EACH_EDGE (e
, ei
, ev
)
66 if ((e
->flags
& EDGE_FALLTHRU
) != 0)
68 if (e
->flags
& EDGE_COMPLEX
)
69 e
->flags
&= ~EDGE_FALLTHRU
;
71 remove_edge_and_dominated_blocks (e
);
77 /* Convert a SWTCH with single non-default case to gcond and replace it
81 convert_single_case_switch (gswitch
*swtch
, gimple_stmt_iterator
&gsi
)
83 if (gimple_switch_num_labels (swtch
) != 2)
86 tree index
= gimple_switch_index (swtch
);
87 tree default_label
= CASE_LABEL (gimple_switch_default_label (swtch
));
88 tree label
= gimple_switch_label (swtch
, 1);
89 tree low
= CASE_LOW (label
);
90 tree high
= CASE_HIGH (label
);
92 basic_block default_bb
= label_to_block_fn (cfun
, default_label
);
93 basic_block case_bb
= label_to_block_fn (cfun
, CASE_LABEL (label
));
95 basic_block bb
= gimple_bb (swtch
);
98 /* Replace switch statement with condition statement. */
102 generate_range_test (bb
, index
, low
, high
, &lhs
, &rhs
);
103 cond
= gimple_build_cond (LE_EXPR
, lhs
, rhs
, NULL_TREE
, NULL_TREE
);
106 cond
= gimple_build_cond (EQ_EXPR
, index
,
107 fold_convert (TREE_TYPE (index
), low
),
108 NULL_TREE
, NULL_TREE
);
110 gsi_replace (&gsi
, cond
, true);
113 edge case_edge
= find_edge (bb
, case_bb
);
114 edge default_edge
= find_edge (bb
, default_bb
);
116 case_edge
->flags
|= EDGE_TRUE_VALUE
;
117 default_edge
->flags
|= EDGE_FALSE_VALUE
;
121 /* Disconnect an unreachable block in the control expression starting
125 cleanup_control_expr_graph (basic_block bb
, gimple_stmt_iterator gsi
)
129 gimple
*stmt
= gsi_stmt (gsi
);
131 if (!single_succ_p (bb
))
136 tree val
= NULL_TREE
;
138 /* Try to convert a switch with just a single non-default case to
140 if (gimple_code (stmt
) == GIMPLE_SWITCH
141 && convert_single_case_switch (as_a
<gswitch
*> (stmt
), gsi
))
142 stmt
= gsi_stmt (gsi
);
144 fold_defer_overflow_warnings ();
145 switch (gimple_code (stmt
))
149 gimple_match_op res_op
;
150 if (gimple_simplify (stmt
, &res_op
, NULL
, no_follow_ssa_edges
,
152 && res_op
.code
== INTEGER_CST
)
158 val
= gimple_switch_index (as_a
<gswitch
*> (stmt
));
164 taken_edge
= find_taken_edge (bb
, val
);
167 fold_undefer_and_ignore_overflow_warnings ();
171 /* Remove all the edges except the one that is always executed. */
173 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
)); )
179 fold_undefer_overflow_warnings
180 (true, stmt
, WARN_STRICT_OVERFLOW_CONDITIONAL
);
184 taken_edge
->probability
+= e
->probability
;
185 remove_edge_and_dominated_blocks (e
);
192 fold_undefer_and_ignore_overflow_warnings ();
195 taken_edge
= single_succ_edge (bb
);
197 bitmap_set_bit (cfgcleanup_altered_bbs
, bb
->index
);
198 gsi_remove (&gsi
, true);
199 taken_edge
->flags
= EDGE_FALLTHRU
;
204 /* Cleanup the GF_CALL_CTRL_ALTERING flag according to
205 to updated gimple_call_flags. */
208 cleanup_call_ctrl_altering_flag (gimple
*bb_end
)
210 if (!is_gimple_call (bb_end
)
211 || !gimple_call_ctrl_altering_p (bb_end
))
214 int flags
= gimple_call_flags (bb_end
);
215 if (((flags
& (ECF_CONST
| ECF_PURE
))
216 && !(flags
& ECF_LOOPING_CONST_OR_PURE
))
217 || (flags
& ECF_LEAF
))
218 gimple_call_set_ctrl_altering (bb_end
, false);
221 /* Try to remove superfluous control structures in basic block BB. Returns
222 true if anything changes. */
225 cleanup_control_flow_bb (basic_block bb
)
227 gimple_stmt_iterator gsi
;
231 /* If the last statement of the block could throw and now cannot,
232 we need to prune cfg. */
233 retval
|= gimple_purge_dead_eh_edges (bb
);
235 gsi
= gsi_last_nondebug_bb (bb
);
239 stmt
= gsi_stmt (gsi
);
241 /* Try to cleanup ctrl altering flag for call which ends bb. */
242 cleanup_call_ctrl_altering_flag (stmt
);
244 if (gimple_code (stmt
) == GIMPLE_COND
245 || gimple_code (stmt
) == GIMPLE_SWITCH
)
247 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb
)) == stmt
);
248 retval
|= cleanup_control_expr_graph (bb
, gsi
);
250 else if (gimple_code (stmt
) == GIMPLE_GOTO
251 && TREE_CODE (gimple_goto_dest (stmt
)) == ADDR_EXPR
252 && (TREE_CODE (TREE_OPERAND (gimple_goto_dest (stmt
), 0))
255 /* If we had a computed goto which has a compile-time determinable
256 destination, then we can eliminate the goto. */
260 basic_block target_block
;
262 gcc_checking_assert (gsi_stmt (gsi_last_bb (bb
)) == stmt
);
263 /* First look at all the outgoing edges. Delete any outgoing
264 edges which do not go to the right block. For the one
265 edge which goes to the right block, fix up its flags. */
266 label
= TREE_OPERAND (gimple_goto_dest (stmt
), 0);
267 if (DECL_CONTEXT (label
) != cfun
->decl
)
269 target_block
= label_to_block (label
);
270 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
)); )
272 if (e
->dest
!= target_block
)
273 remove_edge_and_dominated_blocks (e
);
276 /* Turn off the EDGE_ABNORMAL flag. */
277 e
->flags
&= ~EDGE_ABNORMAL
;
279 /* And set EDGE_FALLTHRU. */
280 e
->flags
|= EDGE_FALLTHRU
;
285 bitmap_set_bit (cfgcleanup_altered_bbs
, bb
->index
);
286 bitmap_set_bit (cfgcleanup_altered_bbs
, target_block
->index
);
288 /* Remove the GOTO_EXPR as it is not needed. The CFG has all the
289 relevant information we need. */
290 gsi_remove (&gsi
, true);
294 /* Check for indirect calls that have been turned into
296 else if (is_gimple_call (stmt
)
297 && gimple_call_noreturn_p (stmt
))
299 /* If there are debug stmts after the noreturn call, remove them
300 now, they should be all unreachable anyway. */
301 for (gsi_next (&gsi
); !gsi_end_p (gsi
); )
302 gsi_remove (&gsi
, true);
303 if (remove_fallthru_edge (bb
->succs
))
310 /* Return true if basic block BB does nothing except pass control
311 flow to another block and that we can safely insert a label at
312 the start of the successor block.
314 As a precondition, we require that BB be not equal to
318 tree_forwarder_block_p (basic_block bb
, bool phi_wanted
)
320 gimple_stmt_iterator gsi
;
323 /* BB must have a single outgoing edge. */
324 if (single_succ_p (bb
) != 1
325 /* If PHI_WANTED is false, BB must not have any PHI nodes.
326 Otherwise, BB must have PHI nodes. */
327 || gimple_seq_empty_p (phi_nodes (bb
)) == phi_wanted
328 /* BB may not be a predecessor of the exit block. */
329 || single_succ (bb
) == EXIT_BLOCK_PTR_FOR_FN (cfun
)
330 /* Nor should this be an infinite loop. */
331 || single_succ (bb
) == bb
332 /* BB may not have an abnormal outgoing edge. */
333 || (single_succ_edge (bb
)->flags
& EDGE_ABNORMAL
))
336 gcc_checking_assert (bb
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
));
338 locus
= single_succ_edge (bb
)->goto_locus
;
340 /* There should not be an edge coming from entry, or an EH edge. */
345 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
346 if (e
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
) || (e
->flags
& EDGE_EH
))
348 /* If goto_locus of any of the edges differs, prevent removing
349 the forwarder block for -O0. */
350 else if (optimize
== 0 && e
->goto_locus
!= locus
)
354 /* Now walk through the statements backward. We can ignore labels,
355 anything else means this is not a forwarder block. */
356 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
); gsi_prev (&gsi
))
358 gimple
*stmt
= gsi_stmt (gsi
);
360 switch (gimple_code (stmt
))
363 if (DECL_NONLOCAL (gimple_label_label (as_a
<glabel
*> (stmt
))))
365 if (optimize
== 0 && gimple_location (stmt
) != locus
)
369 /* ??? For now, hope there's a corresponding debug
370 assignment at the destination. */
382 /* Protect loop headers. */
383 if (bb_loop_header_p (bb
))
386 dest
= EDGE_SUCC (bb
, 0)->dest
;
387 /* Protect loop preheaders and latches if requested. */
388 if (dest
->loop_father
->header
== dest
)
390 if (bb
->loop_father
== dest
->loop_father
)
392 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES
))
394 /* If bb doesn't have a single predecessor we'd make this
395 loop have multiple latches. Don't do that if that
396 would in turn require disambiguating them. */
397 return (single_pred_p (bb
)
398 || loops_state_satisfies_p
399 (LOOPS_MAY_HAVE_MULTIPLE_LATCHES
));
401 else if (bb
->loop_father
== loop_outer (dest
->loop_father
))
402 return !loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS
);
403 /* Always preserve other edges into loop headers that are
404 not simple latches or preheaders. */
412 /* If all the PHI nodes in DEST have alternatives for E1 and E2 and
413 those alternatives are equal in each of the PHI nodes, then return
414 true, else return false. */
417 phi_alternatives_equal (basic_block dest
, edge e1
, edge e2
)
419 int n1
= e1
->dest_idx
;
420 int n2
= e2
->dest_idx
;
423 for (gsi
= gsi_start_phis (dest
); !gsi_end_p (gsi
); gsi_next (&gsi
))
425 gphi
*phi
= gsi
.phi ();
426 tree val1
= gimple_phi_arg_def (phi
, n1
);
427 tree val2
= gimple_phi_arg_def (phi
, n2
);
429 gcc_assert (val1
!= NULL_TREE
);
430 gcc_assert (val2
!= NULL_TREE
);
432 if (!operand_equal_for_phi_arg_p (val1
, val2
))
439 /* Removes forwarder block BB. Returns false if this failed. */
442 remove_forwarder_block (basic_block bb
)
444 edge succ
= single_succ_edge (bb
), e
, s
;
445 basic_block dest
= succ
->dest
;
448 gimple_stmt_iterator gsi
, gsi_to
;
449 bool can_move_debug_stmts
;
451 /* We check for infinite loops already in tree_forwarder_block_p.
452 However it may happen that the infinite loop is created
453 afterwards due to removal of forwarders. */
457 /* If the destination block consists of a nonlocal label or is a
458 EH landing pad, do not merge it. */
459 stmt
= first_stmt (dest
);
461 if (glabel
*label_stmt
= dyn_cast
<glabel
*> (stmt
))
462 if (DECL_NONLOCAL (gimple_label_label (label_stmt
))
463 || EH_LANDING_PAD_NR (gimple_label_label (label_stmt
)) != 0)
466 /* If there is an abnormal edge to basic block BB, but not into
467 dest, problems might occur during removal of the phi node at out
468 of ssa due to overlapping live ranges of registers.
470 If there is an abnormal edge in DEST, the problems would occur
471 anyway since cleanup_dead_labels would then merge the labels for
472 two different eh regions, and rest of exception handling code
475 So if there is an abnormal edge to BB, proceed only if there is
476 no abnormal edge to DEST and there are no phi nodes in DEST. */
477 if (bb_has_abnormal_pred (bb
)
478 && (bb_has_abnormal_pred (dest
)
479 || !gimple_seq_empty_p (phi_nodes (dest
))))
482 /* If there are phi nodes in DEST, and some of the blocks that are
483 predecessors of BB are also predecessors of DEST, check that the
484 phi node arguments match. */
485 if (!gimple_seq_empty_p (phi_nodes (dest
)))
487 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
489 s
= find_edge (e
->src
, dest
);
493 if (!phi_alternatives_equal (dest
, succ
, s
))
498 can_move_debug_stmts
= MAY_HAVE_DEBUG_STMTS
&& single_pred_p (dest
);
500 basic_block pred
= NULL
;
501 if (single_pred_p (bb
))
502 pred
= single_pred (bb
);
504 /* Redirect the edges. */
505 for (ei
= ei_start (bb
->preds
); (e
= ei_safe_edge (ei
)); )
507 bitmap_set_bit (cfgcleanup_altered_bbs
, e
->src
->index
);
509 if (e
->flags
& EDGE_ABNORMAL
)
511 /* If there is an abnormal edge, redirect it anyway, and
512 move the labels to the new block to make it legal. */
513 s
= redirect_edge_succ_nodup (e
, dest
);
516 s
= redirect_edge_and_branch (e
, dest
);
520 /* Create arguments for the phi nodes, since the edge was not
522 for (gphi_iterator psi
= gsi_start_phis (dest
);
526 gphi
*phi
= psi
.phi ();
527 source_location l
= gimple_phi_arg_location_from_edge (phi
, succ
);
528 tree def
= gimple_phi_arg_def (phi
, succ
->dest_idx
);
529 add_phi_arg (phi
, unshare_expr (def
), s
, l
);
534 /* Move nonlocal labels and computed goto targets as well as user
535 defined labels and labels with an EH landing pad number to the
536 new block, so that the redirection of the abnormal edges works,
537 jump targets end up in a sane place and debug information for
538 labels is retained. */
539 gsi_to
= gsi_start_bb (dest
);
540 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); )
542 stmt
= gsi_stmt (gsi
);
543 if (is_gimple_debug (stmt
))
546 /* Forwarder blocks can only contain labels and debug stmts, and
547 labels must come first, so if we get to this point, we know
548 we're looking at a label. */
549 tree decl
= gimple_label_label (as_a
<glabel
*> (stmt
));
550 if (EH_LANDING_PAD_NR (decl
) != 0
551 || DECL_NONLOCAL (decl
)
552 || FORCED_LABEL (decl
)
553 || !DECL_ARTIFICIAL (decl
))
554 gsi_move_before (&gsi
, &gsi_to
);
559 /* Move debug statements if the destination has a single predecessor. */
560 if (can_move_debug_stmts
&& !gsi_end_p (gsi
))
562 gsi_to
= gsi_after_labels (dest
);
565 gimple
*debug
= gsi_stmt (gsi
);
566 gcc_assert (is_gimple_debug (debug
));
567 gsi_move_before (&gsi
, &gsi_to
);
569 while (!gsi_end_p (gsi
));
572 bitmap_set_bit (cfgcleanup_altered_bbs
, dest
->index
);
574 /* Update the dominators. */
575 if (dom_info_available_p (CDI_DOMINATORS
))
577 basic_block dom
, dombb
, domdest
;
579 dombb
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
580 domdest
= get_immediate_dominator (CDI_DOMINATORS
, dest
);
583 /* Shortcut to avoid calling (relatively expensive)
584 nearest_common_dominator unless necessary. */
588 dom
= nearest_common_dominator (CDI_DOMINATORS
, domdest
, dombb
);
590 set_immediate_dominator (CDI_DOMINATORS
, dest
, dom
);
593 /* Adjust latch infomation of BB's parent loop as otherwise
594 the cfg hook has a hard time not to kill the loop. */
595 if (current_loops
&& bb
->loop_father
->latch
== bb
)
596 bb
->loop_father
->latch
= pred
;
598 /* And kill the forwarder block. */
599 delete_basic_block (bb
);
604 /* STMT is a call that has been discovered noreturn. Split the
605 block to prepare fixing up the CFG and remove LHS.
606 Return true if cleanup-cfg needs to run. */
609 fixup_noreturn_call (gimple
*stmt
)
611 basic_block bb
= gimple_bb (stmt
);
612 bool changed
= false;
614 if (gimple_call_builtin_p (stmt
, BUILT_IN_RETURN
))
617 /* First split basic block if stmt is not last. */
618 if (stmt
!= gsi_stmt (gsi_last_bb (bb
)))
620 if (stmt
== gsi_stmt (gsi_last_nondebug_bb (bb
)))
622 /* Don't split if there are only debug stmts
623 after stmt, that can result in -fcompare-debug
624 failures. Remove the debug stmts instead,
625 they should be all unreachable anyway. */
626 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
627 for (gsi_next (&gsi
); !gsi_end_p (gsi
); )
628 gsi_remove (&gsi
, true);
632 split_block (bb
, stmt
);
637 /* If there is an LHS, remove it, but only if its type has fixed size.
638 The LHS will need to be recreated during RTL expansion and creating
639 temporaries of variable-sized types is not supported. Also don't
640 do this with TREE_ADDRESSABLE types, as assign_temp will abort.
641 Drop LHS regardless of TREE_ADDRESSABLE, if the function call
642 has been changed into a call that does not return a value, like
643 __builtin_unreachable or __cxa_pure_virtual. */
644 tree lhs
= gimple_call_lhs (stmt
);
646 && (should_remove_lhs_p (lhs
)
647 || VOID_TYPE_P (TREE_TYPE (gimple_call_fntype (stmt
)))))
649 gimple_call_set_lhs (stmt
, NULL_TREE
);
651 /* We need to fix up the SSA name to avoid checking errors. */
652 if (TREE_CODE (lhs
) == SSA_NAME
)
654 tree new_var
= create_tmp_reg (TREE_TYPE (lhs
));
655 SET_SSA_NAME_VAR_OR_IDENTIFIER (lhs
, new_var
);
656 SSA_NAME_DEF_STMT (lhs
) = gimple_build_nop ();
657 set_ssa_default_def (cfun
, new_var
, lhs
);
663 /* Mark the call as altering control flow. */
664 if (!gimple_call_ctrl_altering_p (stmt
))
666 gimple_call_set_ctrl_altering (stmt
, true);
673 /* Return true if we want to merge BB1 and BB2 into a single block. */
676 want_merge_blocks_p (basic_block bb1
, basic_block bb2
)
678 if (!can_merge_blocks_p (bb1
, bb2
))
680 gimple_stmt_iterator gsi
= gsi_last_nondebug_bb (bb1
);
681 if (gsi_end_p (gsi
) || !stmt_can_terminate_bb_p (gsi_stmt (gsi
)))
683 return bb1
->count
.ok_for_merging (bb2
->count
);
687 /* Tries to cleanup cfg in basic block BB. Returns true if anything
691 cleanup_tree_cfg_bb (basic_block bb
)
693 if (tree_forwarder_block_p (bb
, false)
694 && remove_forwarder_block (bb
))
697 /* If there is a merge opportunity with the predecessor
698 do nothing now but wait until we process the predecessor.
699 This happens when we visit BBs in a non-optimal order and
700 avoids quadratic behavior with adjusting stmts BB pointer. */
701 if (single_pred_p (bb
)
702 && want_merge_blocks_p (single_pred (bb
), bb
))
703 /* But make sure we _do_ visit it. When we remove unreachable paths
704 ending in a backedge we fail to mark the destinations predecessors
706 bitmap_set_bit (cfgcleanup_altered_bbs
, single_pred (bb
)->index
);
708 /* Merging the blocks may create new opportunities for folding
709 conditional branches (due to the elimination of single-valued PHI
711 else if (single_succ_p (bb
)
712 && want_merge_blocks_p (bb
, single_succ (bb
)))
714 merge_blocks (bb
, single_succ (bb
));
721 /* Do cleanup_control_flow_bb in PRE order. */
724 cleanup_control_flow_pre ()
728 auto_vec
<edge_iterator
, 20> stack (n_basic_blocks_for_fn (cfun
) + 1);
729 auto_sbitmap
visited (last_basic_block_for_fn (cfun
));
730 bitmap_clear (visited
);
732 stack
.quick_push (ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun
)->succs
));
734 while (! stack
.is_empty ())
736 /* Look at the edge on the top of the stack. */
737 edge_iterator ei
= stack
.last ();
738 basic_block dest
= ei_edge (ei
)->dest
;
740 if (dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
741 && ! bitmap_bit_p (visited
, dest
->index
))
743 bitmap_set_bit (visited
, dest
->index
);
744 retval
|= cleanup_control_flow_bb (dest
);
745 if (EDGE_COUNT (dest
->succs
) > 0)
746 stack
.quick_push (ei_start (dest
->succs
));
750 if (!ei_one_before_end_p (ei
))
751 ei_next (&stack
.last ());
760 /* Iterate the cfg cleanups, while anything changes. */
763 cleanup_tree_cfg_1 (void)
769 /* Prepare the worklists of altered blocks. */
770 cfgcleanup_altered_bbs
= BITMAP_ALLOC (NULL
);
772 /* During forwarder block cleanup, we may redirect edges out of
773 SWITCH_EXPRs, which can get expensive. So we want to enable
774 recording of edge to CASE_LABEL_EXPR. */
775 start_recording_case_labels ();
777 /* We cannot use FOR_EACH_BB_FN for the BB iterations below
778 since the basic blocks may get removed. */
780 /* Start by iterating over all basic blocks in PRE order looking for
781 edge removal opportunities. Do this first because incoming SSA form
782 may be invalid and we want to avoid performing SSA related tasks such
783 as propgating out a PHI node during BB merging in that state. */
784 retval
|= cleanup_control_flow_pre ();
786 /* After doing the above SSA form should be valid (or an update SSA
787 should be required). */
789 /* Continue by iterating over all basic blocks looking for BB merging
791 n
= last_basic_block_for_fn (cfun
);
792 for (i
= NUM_FIXED_BLOCKS
; i
< n
; i
++)
794 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
796 retval
|= cleanup_tree_cfg_bb (bb
);
799 /* Now process the altered blocks, as long as any are available. */
800 while (!bitmap_empty_p (cfgcleanup_altered_bbs
))
802 i
= bitmap_first_set_bit (cfgcleanup_altered_bbs
);
803 bitmap_clear_bit (cfgcleanup_altered_bbs
, i
);
804 if (i
< NUM_FIXED_BLOCKS
)
807 bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
811 retval
|= cleanup_control_flow_bb (bb
);
812 retval
|= cleanup_tree_cfg_bb (bb
);
815 end_recording_case_labels ();
816 BITMAP_FREE (cfgcleanup_altered_bbs
);
821 mfb_keep_latches (edge e
)
823 return ! dominated_by_p (CDI_DOMINATORS
, e
->src
, e
->dest
);
826 /* Remove unreachable blocks and other miscellaneous clean up work.
827 Return true if the flowgraph was modified, false otherwise. */
830 cleanup_tree_cfg_noloop (void)
834 timevar_push (TV_TREE_CLEANUP_CFG
);
836 /* Iterate until there are no more cleanups left to do. If any
837 iteration changed the flowgraph, set CHANGED to true.
839 If dominance information is available, there cannot be any unreachable
841 if (!dom_info_available_p (CDI_DOMINATORS
))
843 changed
= delete_unreachable_blocks ();
844 calculate_dominance_info (CDI_DOMINATORS
);
848 checking_verify_dominators (CDI_DOMINATORS
);
852 /* Ensure that we have single entries into loop headers. Otherwise
853 if one of the entries is becoming a latch due to CFG cleanup
854 (from formerly being part of an irreducible region) then we mess
855 up loop fixup and associate the old loop with a different region
856 which makes niter upper bounds invalid. See for example PR80549.
857 This needs to be done before we remove trivially dead edges as
858 we need to capture the dominance state before the pending transform. */
863 FOR_EACH_VEC_ELT (*get_loops (cfun
), i
, loop
)
864 if (loop
&& loop
->header
)
866 basic_block bb
= loop
->header
;
869 bool found_latch
= false;
870 bool any_abnormal
= false;
872 /* We are only interested in preserving existing loops, but
873 we need to check whether they are still real and of course
874 if we need to add a preheader at all. */
875 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
877 if (e
->flags
& EDGE_ABNORMAL
)
882 if (dominated_by_p (CDI_DOMINATORS
, e
->src
, bb
))
889 /* If we have more than one entry to the loop header
890 create a forwarder. */
891 if (found_latch
&& ! any_abnormal
&& n
> 1)
893 edge fallthru
= make_forwarder_block (bb
, mfb_keep_latches
,
895 loop
->header
= fallthru
->dest
;
896 if (! loops_state_satisfies_p (LOOPS_NEED_FIXUP
))
898 /* The loop updating from the CFG hook is incomplete
899 when we have multiple latches, fixup manually. */
900 remove_bb_from_loops (fallthru
->src
);
902 FOR_EACH_EDGE (e
, ei
, fallthru
->src
->preds
)
903 cloop
= find_common_loop (cloop
, e
->src
->loop_father
);
904 add_bb_to_loop (fallthru
->src
, cloop
);
910 changed
|= cleanup_tree_cfg_1 ();
912 gcc_assert (dom_info_available_p (CDI_DOMINATORS
));
914 /* Do not renumber blocks if the SCEV cache is active, it is indexed by
915 basic-block numbers. */
916 if (! scev_initialized_p ())
919 checking_verify_flow_info ();
921 timevar_pop (TV_TREE_CLEANUP_CFG
);
923 if (changed
&& current_loops
)
925 /* Removing edges and/or blocks may make recorded bounds refer
926 to stale GIMPLE stmts now, so clear them. */
927 free_numbers_of_iterations_estimates (cfun
);
928 loops_state_set (LOOPS_NEED_FIXUP
);
934 /* Repairs loop structures. */
937 repair_loop_structures (void)
940 unsigned n_new_loops
;
942 calculate_dominance_info (CDI_DOMINATORS
);
944 timevar_push (TV_REPAIR_LOOPS
);
945 changed_bbs
= BITMAP_ALLOC (NULL
);
946 n_new_loops
= fix_loop_structure (changed_bbs
);
948 /* This usually does nothing. But sometimes parts of cfg that originally
949 were inside a loop get out of it due to edge removal (since they
950 become unreachable by back edges from latch). Also a former
951 irreducible loop can become reducible - in this case force a full
952 rewrite into loop-closed SSA form. */
953 if (loops_state_satisfies_p (LOOP_CLOSED_SSA
))
954 rewrite_into_loop_closed_ssa (n_new_loops
? NULL
: changed_bbs
,
957 BITMAP_FREE (changed_bbs
);
959 checking_verify_loop_structure ();
962 timevar_pop (TV_REPAIR_LOOPS
);
965 /* Cleanup cfg and repair loop structures. */
968 cleanup_tree_cfg (void)
970 bool changed
= cleanup_tree_cfg_noloop ();
972 if (current_loops
!= NULL
973 && loops_state_satisfies_p (LOOPS_NEED_FIXUP
))
974 repair_loop_structures ();
979 /* Tries to merge the PHI nodes at BB into those at BB's sole successor.
980 Returns true if successful. */
983 remove_forwarder_block_with_phi (basic_block bb
)
985 edge succ
= single_succ_edge (bb
);
986 basic_block dest
= succ
->dest
;
988 basic_block dombb
, domdest
, dom
;
990 /* We check for infinite loops already in tree_forwarder_block_p.
991 However it may happen that the infinite loop is created
992 afterwards due to removal of forwarders. */
996 /* Removal of forwarders may expose new natural loops and thus
997 a block may turn into a loop header. */
998 if (current_loops
&& bb_loop_header_p (bb
))
1001 /* If the destination block consists of a nonlocal label, do not
1003 label
= first_stmt (dest
);
1005 if (glabel
*label_stmt
= dyn_cast
<glabel
*> (label
))
1006 if (DECL_NONLOCAL (gimple_label_label (label_stmt
)))
1009 /* Record BB's single pred in case we need to update the father
1010 loop's latch information later. */
1011 basic_block pred
= NULL
;
1012 if (single_pred_p (bb
))
1013 pred
= single_pred (bb
);
1015 /* Redirect each incoming edge to BB to DEST. */
1016 while (EDGE_COUNT (bb
->preds
) > 0)
1018 edge e
= EDGE_PRED (bb
, 0), s
;
1021 s
= find_edge (e
->src
, dest
);
1024 /* We already have an edge S from E->src to DEST. If S and
1025 E->dest's sole successor edge have the same PHI arguments
1026 at DEST, redirect S to DEST. */
1027 if (phi_alternatives_equal (dest
, s
, succ
))
1029 e
= redirect_edge_and_branch (e
, dest
);
1030 redirect_edge_var_map_clear (e
);
1034 /* PHI arguments are different. Create a forwarder block by
1035 splitting E so that we can merge PHI arguments on E to
1037 e
= single_succ_edge (split_edge (e
));
1041 /* If we merge the forwarder into a loop header verify if we
1042 are creating another loop latch edge. If so, reset
1043 number of iteration information of the loop. */
1044 if (dest
->loop_father
->header
== dest
1045 && dominated_by_p (CDI_DOMINATORS
, e
->src
, dest
))
1047 dest
->loop_father
->any_upper_bound
= false;
1048 dest
->loop_father
->any_likely_upper_bound
= false;
1049 free_numbers_of_iterations_estimates (dest
->loop_father
);
1053 s
= redirect_edge_and_branch (e
, dest
);
1055 /* redirect_edge_and_branch must not create a new edge. */
1056 gcc_assert (s
== e
);
1058 /* Add to the PHI nodes at DEST each PHI argument removed at the
1059 destination of E. */
1060 for (gsi
= gsi_start_phis (dest
);
1064 gphi
*phi
= gsi
.phi ();
1065 tree def
= gimple_phi_arg_def (phi
, succ
->dest_idx
);
1066 source_location locus
= gimple_phi_arg_location_from_edge (phi
, succ
);
1068 if (TREE_CODE (def
) == SSA_NAME
)
1070 /* If DEF is one of the results of PHI nodes removed during
1071 redirection, replace it with the PHI argument that used
1073 vec
<edge_var_map
> *head
= redirect_edge_var_map_vector (e
);
1074 size_t length
= head
? head
->length () : 0;
1075 for (size_t i
= 0; i
< length
; i
++)
1077 edge_var_map
*vm
= &(*head
)[i
];
1078 tree old_arg
= redirect_edge_var_map_result (vm
);
1079 tree new_arg
= redirect_edge_var_map_def (vm
);
1084 locus
= redirect_edge_var_map_location (vm
);
1090 add_phi_arg (phi
, def
, s
, locus
);
1093 redirect_edge_var_map_clear (e
);
1096 /* Update the dominators. */
1097 dombb
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
1098 domdest
= get_immediate_dominator (CDI_DOMINATORS
, dest
);
1101 /* Shortcut to avoid calling (relatively expensive)
1102 nearest_common_dominator unless necessary. */
1106 dom
= nearest_common_dominator (CDI_DOMINATORS
, domdest
, dombb
);
1108 set_immediate_dominator (CDI_DOMINATORS
, dest
, dom
);
1110 /* Adjust latch infomation of BB's parent loop as otherwise
1111 the cfg hook has a hard time not to kill the loop. */
1112 if (current_loops
&& bb
->loop_father
->latch
== bb
)
1113 bb
->loop_father
->latch
= pred
;
1115 /* Remove BB since all of BB's incoming edges have been redirected
1117 delete_basic_block (bb
);
1122 /* This pass merges PHI nodes if one feeds into another. For example,
1123 suppose we have the following:
1130 # tem_6 = PHI <tem_17(8), tem_23(7)>;
1133 # tem_3 = PHI <tem_6(9), tem_2(5)>;
1136 Then we merge the first PHI node into the second one like so:
1138 goto <bb 9> (<L10>);
1143 # tem_3 = PHI <tem_23(7), tem_2(5), tem_17(8)>;
1149 const pass_data pass_data_merge_phi
=
1151 GIMPLE_PASS
, /* type */
1152 "mergephi", /* name */
1153 OPTGROUP_NONE
, /* optinfo_flags */
1154 TV_TREE_MERGE_PHI
, /* tv_id */
1155 ( PROP_cfg
| PROP_ssa
), /* properties_required */
1156 0, /* properties_provided */
1157 0, /* properties_destroyed */
1158 0, /* todo_flags_start */
1159 0, /* todo_flags_finish */
1162 class pass_merge_phi
: public gimple_opt_pass
1165 pass_merge_phi (gcc::context
*ctxt
)
1166 : gimple_opt_pass (pass_data_merge_phi
, ctxt
)
1169 /* opt_pass methods: */
1170 opt_pass
* clone () { return new pass_merge_phi (m_ctxt
); }
1171 virtual unsigned int execute (function
*);
1173 }; // class pass_merge_phi
1176 pass_merge_phi::execute (function
*fun
)
1178 basic_block
*worklist
= XNEWVEC (basic_block
, n_basic_blocks_for_fn (fun
));
1179 basic_block
*current
= worklist
;
1182 calculate_dominance_info (CDI_DOMINATORS
);
1184 /* Find all PHI nodes that we may be able to merge. */
1185 FOR_EACH_BB_FN (bb
, fun
)
1189 /* Look for a forwarder block with PHI nodes. */
1190 if (!tree_forwarder_block_p (bb
, true))
1193 dest
= single_succ (bb
);
1195 /* We have to feed into another basic block with PHI
1197 if (gimple_seq_empty_p (phi_nodes (dest
))
1198 /* We don't want to deal with a basic block with
1200 || bb_has_abnormal_pred (bb
))
1203 if (!dominated_by_p (CDI_DOMINATORS
, dest
, bb
))
1205 /* If BB does not dominate DEST, then the PHI nodes at
1206 DEST must be the only users of the results of the PHI
1213 unsigned int dest_idx
= single_succ_edge (bb
)->dest_idx
;
1215 /* BB dominates DEST. There may be many users of the PHI
1216 nodes in BB. However, there is still a trivial case we
1217 can handle. If the result of every PHI in BB is used
1218 only by a PHI in DEST, then we can trivially merge the
1219 PHI nodes from BB into DEST. */
1220 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
);
1223 gphi
*phi
= gsi
.phi ();
1224 tree result
= gimple_phi_result (phi
);
1225 use_operand_p imm_use
;
1228 /* If the PHI's result is never used, then we can just
1230 if (has_zero_uses (result
))
1233 /* Get the single use of the result of this PHI node. */
1234 if (!single_imm_use (result
, &imm_use
, &use_stmt
)
1235 || gimple_code (use_stmt
) != GIMPLE_PHI
1236 || gimple_bb (use_stmt
) != dest
1237 || gimple_phi_arg_def (use_stmt
, dest_idx
) != result
)
1241 /* If the loop above iterated through all the PHI nodes
1242 in BB, then we can merge the PHIs from BB into DEST. */
1243 if (gsi_end_p (gsi
))
1248 /* Now let's drain WORKLIST. */
1249 bool changed
= false;
1250 while (current
!= worklist
)
1253 changed
|= remove_forwarder_block_with_phi (bb
);
1257 /* Removing forwarder blocks can cause formerly irreducible loops
1258 to become reducible if we merged two entry blocks. */
1261 loops_state_set (LOOPS_NEED_FIXUP
);
1269 make_pass_merge_phi (gcc::context
*ctxt
)
1271 return new pass_merge_phi (ctxt
);
1274 /* Pass: cleanup the CFG just before expanding trees to RTL.
1275 This is just a round of label cleanups and case node grouping
1276 because after the tree optimizers have run such cleanups may
1280 execute_cleanup_cfg_post_optimizing (void)
1282 unsigned int todo
= execute_fixup_cfg ();
1283 if (cleanup_tree_cfg ())
1285 todo
&= ~TODO_cleanup_cfg
;
1286 todo
|= TODO_update_ssa
;
1288 maybe_remove_unreachable_handlers ();
1289 cleanup_dead_labels ();
1290 if (group_case_labels ())
1291 todo
|= TODO_cleanup_cfg
;
1292 if ((flag_compare_debug_opt
|| flag_compare_debug
)
1293 && flag_dump_final_insns
)
1295 FILE *final_output
= fopen (flag_dump_final_insns
, "a");
1299 error ("could not open final insn dump file %qs: %m",
1300 flag_dump_final_insns
);
1301 flag_dump_final_insns
= NULL
;
1305 int save_unnumbered
= flag_dump_unnumbered
;
1306 int save_noaddr
= flag_dump_noaddr
;
1308 flag_dump_noaddr
= flag_dump_unnumbered
= 1;
1309 fprintf (final_output
, "\n");
1310 dump_enumerated_decls (final_output
,
1311 dump_flags
| TDF_SLIM
| TDF_NOUID
);
1312 flag_dump_noaddr
= save_noaddr
;
1313 flag_dump_unnumbered
= save_unnumbered
;
1314 if (fclose (final_output
))
1316 error ("could not close final insn dump file %qs: %m",
1317 flag_dump_final_insns
);
1318 flag_dump_final_insns
= NULL
;
1327 const pass_data pass_data_cleanup_cfg_post_optimizing
=
1329 GIMPLE_PASS
, /* type */
1330 "optimized", /* name */
1331 OPTGROUP_NONE
, /* optinfo_flags */
1332 TV_TREE_CLEANUP_CFG
, /* tv_id */
1333 PROP_cfg
, /* properties_required */
1334 0, /* properties_provided */
1335 0, /* properties_destroyed */
1336 0, /* todo_flags_start */
1337 TODO_remove_unused_locals
, /* todo_flags_finish */
1340 class pass_cleanup_cfg_post_optimizing
: public gimple_opt_pass
1343 pass_cleanup_cfg_post_optimizing (gcc::context
*ctxt
)
1344 : gimple_opt_pass (pass_data_cleanup_cfg_post_optimizing
, ctxt
)
1347 /* opt_pass methods: */
1348 virtual unsigned int execute (function
*)
1350 return execute_cleanup_cfg_post_optimizing ();
1353 }; // class pass_cleanup_cfg_post_optimizing
1358 make_pass_cleanup_cfg_post_optimizing (gcc::context
*ctxt
)
1360 return new pass_cleanup_cfg_post_optimizing (ctxt
);