1 /* Code sinking for trees
2 Copyright (C) 2001-2020 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin <dan@dberlin.org>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #include "tree-pass.h"
30 #include "gimple-pretty-print.h"
31 #include "fold-const.h"
32 #include "stor-layout.h"
34 #include "gimple-iterator.h"
40 1. Sinking store only using scalar promotion (IE without moving the RHS):
60 Store copy propagation will take care of the store elimination above.
63 2. Sinking using Partial Dead Code Elimination. */
68 /* The number of statements sunk down the flowgraph by code sinking. */
71 /* The number of stores commoned and sunk down by store commoning. */
76 /* Given a PHI, and one of its arguments (DEF), find the edge for
77 that argument and return it. If the argument occurs twice in the PHI node,
81 find_bb_for_arg (gphi
*phi
, tree def
)
84 bool foundone
= false;
85 basic_block result
= NULL
;
86 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
87 if (PHI_ARG_DEF (phi
, i
) == def
)
92 result
= gimple_phi_arg_edge (phi
, i
)->src
;
97 /* When the first immediate use is in a statement, then return true if all
98 immediate uses in IMM are in the same statement.
99 We could also do the case where the first immediate use is in a phi node,
100 and all the other uses are in phis in the same basic block, but this
101 requires some expensive checking later (you have to make sure no def/vdef
102 in the statement occurs for multiple edges in the various phi nodes it's
103 used in, so that you only have one place you can sink it to. */
106 all_immediate_uses_same_place (def_operand_p def_p
)
108 tree var
= DEF_FROM_PTR (def_p
);
109 imm_use_iterator imm_iter
;
112 gimple
*firstuse
= NULL
;
113 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, var
)
115 if (is_gimple_debug (USE_STMT (use_p
)))
117 if (firstuse
== NULL
)
118 firstuse
= USE_STMT (use_p
);
120 if (firstuse
!= USE_STMT (use_p
))
127 /* Find the nearest common dominator of all of the immediate uses in IMM. */
130 nearest_common_dominator_of_uses (def_operand_p def_p
, bool *debug_stmts
)
132 tree var
= DEF_FROM_PTR (def_p
);
134 basic_block commondom
;
137 imm_use_iterator imm_iter
;
140 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, var
)
142 gimple
*usestmt
= USE_STMT (use_p
);
143 basic_block useblock
;
145 if (gphi
*phi
= dyn_cast
<gphi
*> (usestmt
))
147 int idx
= PHI_ARG_INDEX_FROM_USE (use_p
);
149 useblock
= gimple_phi_arg_edge (phi
, idx
)->src
;
151 else if (is_gimple_debug (usestmt
))
158 useblock
= gimple_bb (usestmt
);
161 /* Short circuit. Nothing dominates the entry block. */
162 if (useblock
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
165 bitmap_set_bit (blocks
, useblock
->index
);
167 commondom
= BASIC_BLOCK_FOR_FN (cfun
, bitmap_first_set_bit (blocks
));
168 EXECUTE_IF_SET_IN_BITMAP (blocks
, 0, j
, bi
)
169 commondom
= nearest_common_dominator (CDI_DOMINATORS
, commondom
,
170 BASIC_BLOCK_FOR_FN (cfun
, j
));
174 /* Given EARLY_BB and LATE_BB, two blocks in a path through the dominator
175 tree, return the best basic block between them (inclusive) to place
178 We want the most control dependent block in the shallowest loop nest.
180 If the resulting block is in a shallower loop nest, then use it. Else
181 only use the resulting block if it has significantly lower execution
182 frequency than EARLY_BB to avoid gratuitous statement movement. We
183 consider statements with VOPS more desirable to move.
185 This pass would obviously benefit from PDO as it utilizes block
186 frequencies. It would also benefit from recomputing frequencies
187 if profile data is not available since frequencies often get out
188 of sync with reality. */
191 select_best_block (basic_block early_bb
,
195 basic_block best_bb
= late_bb
;
196 basic_block temp_bb
= late_bb
;
199 while (temp_bb
!= early_bb
)
201 /* If we've moved into a lower loop nest, then that becomes
203 if (bb_loop_depth (temp_bb
) < bb_loop_depth (best_bb
))
206 /* Walk up the dominator tree, hopefully we'll find a shallower
208 temp_bb
= get_immediate_dominator (CDI_DOMINATORS
, temp_bb
);
211 /* If we found a shallower loop nest, then we always consider that
212 a win. This will always give us the most control dependent block
213 within that loop nest. */
214 if (bb_loop_depth (best_bb
) < bb_loop_depth (early_bb
))
217 /* Get the sinking threshold. If the statement to be moved has memory
218 operands, then increase the threshold by 7% as those are even more
219 profitable to avoid, clamping at 100%. */
220 threshold
= param_sink_frequency_threshold
;
221 if (gimple_vuse (stmt
) || gimple_vdef (stmt
))
228 /* If BEST_BB is at the same nesting level, then require it to have
229 significantly lower execution frequency to avoid gratuitous movement. */
230 if (bb_loop_depth (best_bb
) == bb_loop_depth (early_bb
)
231 /* If result of comparsion is unknown, prefer EARLY_BB.
232 Thus use !(...>=..) rather than (...<...) */
233 && !(best_bb
->count
.apply_scale (100, 1)
234 >= early_bb
->count
.apply_scale (threshold
, 1)))
237 /* No better block found, so return EARLY_BB, which happens to be the
238 statement's original block. */
242 /* Given a statement (STMT) and the basic block it is currently in (FROMBB),
243 determine the location to sink the statement to, if any.
244 Returns true if there is such location; in that case, TOGSI points to the
245 statement before that STMT should be moved. */
248 statement_sink_location (gimple
*stmt
, basic_block frombb
,
249 gimple_stmt_iterator
*togsi
, bool *zero_uses_p
)
252 use_operand_p one_use
= NULL_USE_OPERAND_P
;
257 imm_use_iterator imm_iter
;
259 *zero_uses_p
= false;
261 /* We only can sink assignments and non-looping const/pure calls. */
263 if (!is_gimple_assign (stmt
)
264 && (!is_gimple_call (stmt
)
265 || !((cf
= gimple_call_flags (stmt
)) & (ECF_CONST
|ECF_PURE
))
266 || (cf
& ECF_LOOPING_CONST_OR_PURE
)))
269 /* We only can sink stmts with a single definition. */
270 def_p
= single_ssa_def_operand (stmt
, SSA_OP_ALL_DEFS
);
271 if (def_p
== NULL_DEF_OPERAND_P
)
274 /* There are a few classes of things we can't or don't move, some because we
275 don't have code to handle it, some because it's not profitable and some
276 because it's not legal.
278 We can't sink things that may be global stores, at least not without
279 calculating a lot more information, because we may cause it to no longer
280 be seen by an external routine that needs it depending on where it gets
283 We can't sink statements that end basic blocks without splitting the
284 incoming edge for the sink location to place it there.
286 We can't sink statements that have volatile operands.
288 We don't want to sink dead code, so anything with 0 immediate uses is not
291 Don't sink BLKmode assignments if current function has any local explicit
292 register variables, as BLKmode assignments may involve memcpy or memset
293 calls or, on some targets, inline expansion thereof that sometimes need
294 to use specific hard registers.
297 if (stmt_ends_bb_p (stmt
)
298 || gimple_has_side_effects (stmt
)
299 || (cfun
->has_local_explicit_reg_vars
300 && TYPE_MODE (TREE_TYPE (gimple_get_lhs (stmt
))) == BLKmode
))
303 /* Return if there are no immediate uses of this stmt. */
304 if (has_zero_uses (DEF_FROM_PTR (def_p
)))
310 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (DEF_FROM_PTR (def_p
)))
313 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, iter
, SSA_OP_ALL_USES
)
315 tree use
= USE_FROM_PTR (use_p
);
316 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use
))
322 /* If stmt is a store the one and only use needs to be the VOP
324 if (virtual_operand_p (DEF_FROM_PTR (def_p
)))
326 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
328 gimple
*use_stmt
= USE_STMT (use_p
);
330 /* A killing definition is not a use. */
331 if ((gimple_has_lhs (use_stmt
)
332 && operand_equal_p (gimple_get_lhs (stmt
),
333 gimple_get_lhs (use_stmt
), 0))
334 || stmt_kills_ref_p (use_stmt
, gimple_get_lhs (stmt
)))
336 /* If use_stmt is or might be a nop assignment then USE_STMT
337 acts as a use as well as definition. */
339 && ref_maybe_used_by_stmt_p (use_stmt
,
340 gimple_get_lhs (stmt
)))
345 if (gimple_code (use_stmt
) != GIMPLE_PHI
)
357 /* If all the immediate uses are not in the same place, find the nearest
358 common dominator of all the immediate uses. For PHI nodes, we have to
359 find the nearest common dominator of all of the predecessor blocks, since
360 that is where insertion would have to take place. */
361 else if (gimple_vuse (stmt
)
362 || !all_immediate_uses_same_place (def_p
))
364 bool debug_stmts
= false;
365 basic_block commondom
= nearest_common_dominator_of_uses (def_p
,
368 if (commondom
== frombb
)
371 /* If this is a load then do not sink past any stores.
372 ??? This is overly simple but cheap. We basically look
373 for an existing load with the same VUSE in the path to one
374 of the sink candidate blocks and we adjust commondom to the
375 nearest to commondom. */
376 if (gimple_vuse (stmt
))
378 /* Do not sink loads from hard registers. */
379 if (gimple_assign_single_p (stmt
)
380 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == VAR_DECL
381 && DECL_HARD_REGISTER (gimple_assign_rhs1 (stmt
)))
384 imm_use_iterator imm_iter
;
386 basic_block found
= NULL
;
387 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, gimple_vuse (stmt
))
389 gimple
*use_stmt
= USE_STMT (use_p
);
390 basic_block bb
= gimple_bb (use_stmt
);
391 /* For PHI nodes the block we know sth about
392 is the incoming block with the use. */
393 if (gimple_code (use_stmt
) == GIMPLE_PHI
)
394 bb
= EDGE_PRED (bb
, PHI_ARG_INDEX_FROM_USE (use_p
))->src
;
395 /* Any dominator of commondom would be ok with
396 adjusting commondom to that block. */
397 bb
= nearest_common_dominator (CDI_DOMINATORS
, bb
, commondom
);
400 else if (dominated_by_p (CDI_DOMINATORS
, bb
, found
))
402 /* If we can't improve, stop. */
403 if (found
== commondom
)
407 if (commondom
== frombb
)
411 /* Our common dominator has to be dominated by frombb in order to be a
412 trivially safe place to put this statement, since it has multiple
414 if (!dominated_by_p (CDI_DOMINATORS
, commondom
, frombb
))
417 commondom
= select_best_block (frombb
, commondom
, stmt
);
419 if (commondom
== frombb
)
422 *togsi
= gsi_after_labels (commondom
);
428 FOR_EACH_IMM_USE_FAST (one_use
, imm_iter
, DEF_FROM_PTR (def_p
))
430 if (is_gimple_debug (USE_STMT (one_use
)))
434 use
= USE_STMT (one_use
);
436 if (gimple_code (use
) != GIMPLE_PHI
)
438 sinkbb
= select_best_block (frombb
, gimple_bb (use
), stmt
);
440 if (sinkbb
== frombb
)
443 if (sinkbb
== gimple_bb (use
))
444 *togsi
= gsi_for_stmt (use
);
446 *togsi
= gsi_after_labels (sinkbb
);
452 sinkbb
= find_bb_for_arg (as_a
<gphi
*> (use
), DEF_FROM_PTR (def_p
));
454 /* This can happen if there are multiple uses in a PHI. */
458 sinkbb
= select_best_block (frombb
, sinkbb
, stmt
);
459 if (!sinkbb
|| sinkbb
== frombb
)
462 /* If the latch block is empty, don't make it non-empty by sinking
463 something into it. */
464 if (sinkbb
== frombb
->loop_father
->latch
465 && empty_block_p (sinkbb
))
468 *togsi
= gsi_after_labels (sinkbb
);
473 /* Very simplistic code to sink common stores from the predecessor through
474 our virtual PHI. We do this before sinking stmts from BB as it might
475 expose sinking opportunities of the merged stores.
476 Once we have partial dead code elimination through sth like SSU-PRE this
477 should be moved there. */
480 sink_common_stores_to_bb (basic_block bb
)
485 if (EDGE_COUNT (bb
->preds
) > 1
486 && (phi
= get_virtual_phi (bb
)))
488 /* Repeat until no more common stores are found. */
491 gimple
*first_store
= NULL
;
492 auto_vec
<tree
, 5> vdefs
;
493 gimple_stmt_iterator gsi
;
495 /* Search for common stores defined by all virtual PHI args.
496 ??? Common stores not present in all predecessors could
497 be handled by inserting a forwarder to sink to. Generally
498 this involves deciding which stores to do this for if
499 multiple common stores are present for different sets of
500 predecessors. See PR11832 for an interesting case. */
501 for (unsigned i
= 0; i
< gimple_phi_num_args (phi
); ++i
)
503 tree arg
= gimple_phi_arg_def (phi
, i
);
504 gimple
*def
= SSA_NAME_DEF_STMT (arg
);
505 if (! is_gimple_assign (def
)
506 || stmt_can_throw_internal (cfun
, def
)
507 || (gimple_phi_arg_edge (phi
, i
)->flags
& EDGE_ABNORMAL
))
509 /* ??? We could handle some cascading with the def being
510 another PHI. We'd have to insert multiple PHIs for
511 the rhs then though (if they are not all equal). */
515 /* ??? Do not try to do anything fancy with aliasing, thus
516 do not sink across non-aliased loads (or even stores,
517 so different store order will make the sinking fail). */
518 bool all_uses_on_phi
= true;
519 imm_use_iterator iter
;
521 FOR_EACH_IMM_USE_FAST (use_p
, iter
, arg
)
522 if (USE_STMT (use_p
) != phi
)
524 all_uses_on_phi
= false;
527 if (! all_uses_on_phi
)
532 /* Check all stores are to the same LHS. */
535 /* ??? We could handle differing SSA uses in the LHS by inserting
537 else if (! operand_equal_p (gimple_assign_lhs (first_store
),
538 gimple_assign_lhs (def
), 0)
539 || (gimple_clobber_p (first_store
)
540 != gimple_clobber_p (def
)))
545 vdefs
.safe_push (arg
);
550 /* Check if we need a PHI node to merge the stored values. */
552 if (!gimple_clobber_p (first_store
))
553 for (unsigned i
= 1; i
< vdefs
.length (); ++i
)
555 gimple
*def
= SSA_NAME_DEF_STMT (vdefs
[i
]);
556 if (! operand_equal_p (gimple_assign_rhs1 (first_store
),
557 gimple_assign_rhs1 (def
), 0))
564 /* We cannot handle aggregate values if we need to merge them. */
565 tree type
= TREE_TYPE (gimple_assign_lhs (first_store
));
567 && ! is_gimple_reg_type (type
))
570 if (dump_enabled_p ())
572 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
,
574 "sinking common stores %sto ",
575 allsame
? "with same value " : "");
576 dump_generic_expr (MSG_OPTIMIZED_LOCATIONS
, TDF_SLIM
,
577 gimple_assign_lhs (first_store
));
578 dump_printf (MSG_OPTIMIZED_LOCATIONS
, "\n");
581 /* Insert a PHI to merge differing stored values if necessary.
582 Note that in general inserting PHIs isn't a very good idea as
583 it makes the job of coalescing and register allocation harder.
584 Even common SSA uses on the rhs/lhs might extend their lifetime
585 across multiple edges by this code motion which makes
586 register allocation harder. */
590 from
= make_ssa_name (type
);
591 gphi
*newphi
= create_phi_node (from
, bb
);
592 for (unsigned i
= 0; i
< vdefs
.length (); ++i
)
594 gimple
*def
= SSA_NAME_DEF_STMT (vdefs
[i
]);
595 add_phi_arg (newphi
, gimple_assign_rhs1 (def
),
596 EDGE_PRED (bb
, i
), UNKNOWN_LOCATION
);
600 from
= gimple_assign_rhs1 (first_store
);
602 /* Remove all stores. */
603 for (unsigned i
= 0; i
< vdefs
.length (); ++i
)
604 TREE_VISITED (vdefs
[i
]) = 1;
605 for (unsigned i
= 0; i
< vdefs
.length (); ++i
)
606 /* If we have more than one use of a VDEF on the PHI make sure
607 we remove the defining stmt only once. */
608 if (TREE_VISITED (vdefs
[i
]))
610 TREE_VISITED (vdefs
[i
]) = 0;
611 gimple
*def
= SSA_NAME_DEF_STMT (vdefs
[i
]);
612 gsi
= gsi_for_stmt (def
);
613 unlink_stmt_vdef (def
);
614 gsi_remove (&gsi
, true);
618 /* Insert the first store at the beginning of the merge BB. */
619 gimple_set_vdef (first_store
, gimple_phi_result (phi
));
620 SSA_NAME_DEF_STMT (gimple_vdef (first_store
)) = first_store
;
621 gimple_phi_set_result (phi
, make_ssa_name (gimple_vop (cfun
)));
622 gimple_set_vuse (first_store
, gimple_phi_result (phi
));
623 gimple_assign_set_rhs1 (first_store
, from
);
624 /* ??? Should we reset first_stores location? */
625 gsi
= gsi_after_labels (bb
);
626 gsi_insert_before (&gsi
, first_store
, GSI_SAME_STMT
);
627 sink_stats
.commoned
++;
629 todo
|= TODO_cleanup_cfg
;
632 /* We could now have empty predecessors that we could remove,
633 forming a proper CFG for further sinking. Note that even
634 CFG cleanup doesn't do this fully at the moment and it
635 doesn't preserve post-dominators in the process either.
636 The mergephi pass might do it though. gcc.dg/tree-ssa/ssa-sink-13.c
637 shows this nicely if you disable tail merging or (same effect)
638 make the stored values unequal. */
644 /* Perform code sinking on BB */
647 sink_code_in_bb (basic_block bb
)
650 gimple_stmt_iterator gsi
;
656 /* Sink common stores from the predecessor through our virtual PHI. */
657 todo
|= sink_common_stores_to_bb (bb
);
659 /* If this block doesn't dominate anything, there can't be any place to sink
660 the statements to. */
661 if (first_dom_son (CDI_DOMINATORS
, bb
) == NULL
)
664 /* We can't move things across abnormal edges, so don't try. */
665 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
666 if (e
->flags
& EDGE_ABNORMAL
)
669 for (gsi
= gsi_last_bb (bb
); !gsi_end_p (gsi
);)
671 gimple
*stmt
= gsi_stmt (gsi
);
672 gimple_stmt_iterator togsi
;
675 if (!statement_sink_location (stmt
, bb
, &togsi
, &zero_uses_p
))
677 gimple_stmt_iterator saved
= gsi
;
678 if (!gsi_end_p (gsi
))
680 /* If we face a dead stmt remove it as it possibly blocks
683 && ! gimple_vdef (stmt
))
685 gsi_remove (&saved
, true);
694 fprintf (dump_file
, "Sinking ");
695 print_gimple_stmt (dump_file
, stmt
, 0, TDF_VOPS
);
696 fprintf (dump_file
, " from bb %d to bb %d\n",
697 bb
->index
, (gsi_bb (togsi
))->index
);
700 /* Update virtual operands of statements in the path we
702 if (gimple_vdef (stmt
))
704 imm_use_iterator iter
;
708 FOR_EACH_IMM_USE_STMT (vuse_stmt
, iter
, gimple_vdef (stmt
))
709 if (gimple_code (vuse_stmt
) != GIMPLE_PHI
)
710 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
711 SET_USE (use_p
, gimple_vuse (stmt
));
714 /* If this is the end of the basic block, we need to insert at the end
715 of the basic block. */
716 if (gsi_end_p (togsi
))
717 gsi_move_to_bb_end (&gsi
, gsi_bb (togsi
));
719 gsi_move_before (&gsi
, &togsi
);
723 /* If we've just removed the last statement of the BB, the
724 gsi_end_p() test below would fail, but gsi_prev() would have
725 succeeded, and we want it to succeed. So we keep track of
726 whether we're at the last statement and pick up the new last
730 gsi
= gsi_last_bb (bb
);
735 if (!gsi_end_p (gsi
))
740 for (son
= first_dom_son (CDI_POST_DOMINATORS
, bb
);
742 son
= next_dom_son (CDI_POST_DOMINATORS
, son
))
744 todo
|= sink_code_in_bb (son
);
750 /* Perform code sinking.
751 This moves code down the flowgraph when we know it would be
752 profitable to do so, or it wouldn't increase the number of
753 executions of the statement.
766 a_6 = PHI (a_5, a_1);
769 we'll transform this into:
780 a_6 = PHI (a_5, a_1);
783 Note that this reduces the number of computations of a = b + c to 1
784 when we take the else edge, instead of 2.
788 const pass_data pass_data_sink_code
=
790 GIMPLE_PASS
, /* type */
792 OPTGROUP_NONE
, /* optinfo_flags */
793 TV_TREE_SINK
, /* tv_id */
794 /* PROP_no_crit_edges is ensured by running split_edges_for_insertion in
795 pass_data_sink_code::execute (). */
796 ( PROP_cfg
| PROP_ssa
), /* properties_required */
797 0, /* properties_provided */
798 0, /* properties_destroyed */
799 0, /* todo_flags_start */
800 TODO_update_ssa
, /* todo_flags_finish */
803 class pass_sink_code
: public gimple_opt_pass
806 pass_sink_code (gcc::context
*ctxt
)
807 : gimple_opt_pass (pass_data_sink_code
, ctxt
)
810 /* opt_pass methods: */
811 virtual bool gate (function
*) { return flag_tree_sink
!= 0; }
812 virtual unsigned int execute (function
*);
814 }; // class pass_sink_code
817 pass_sink_code::execute (function
*fun
)
819 loop_optimizer_init (LOOPS_NORMAL
);
820 split_edges_for_insertion ();
821 connect_infinite_loops_to_exit ();
822 memset (&sink_stats
, 0, sizeof (sink_stats
));
823 calculate_dominance_info (CDI_DOMINATORS
);
824 calculate_dominance_info (CDI_POST_DOMINATORS
);
825 unsigned todo
= sink_code_in_bb (EXIT_BLOCK_PTR_FOR_FN (fun
));
826 statistics_counter_event (fun
, "Sunk statements", sink_stats
.sunk
);
827 statistics_counter_event (fun
, "Commoned stores", sink_stats
.commoned
);
828 free_dominance_info (CDI_POST_DOMINATORS
);
829 remove_fake_exit_edges ();
830 loop_optimizer_finalize ();
838 make_pass_sink_code (gcc::context
*ctxt
)
840 return new pass_sink_code (ctxt
);