PR sanitizer/59009
[official-gcc.git] / gcc / tree-ssa-sink.c
bloba8b149287c9179d777e80a11367c77f98dda44e4
1 /* Code sinking for trees
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin <dan@dberlin.org>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "basic-block.h"
27 #include "gimple-pretty-print.h"
28 #include "tree-inline.h"
29 #include "gimple.h"
30 #include "gimple-ssa.h"
31 #include "tree-cfg.h"
32 #include "tree-phinodes.h"
33 #include "ssa-iterators.h"
34 #include "hashtab.h"
35 #include "tree-iterator.h"
36 #include "alloc-pool.h"
37 #include "tree-pass.h"
38 #include "flags.h"
39 #include "cfgloop.h"
40 #include "params.h"
42 /* TODO:
43 1. Sinking store only using scalar promotion (IE without moving the RHS):
45 *q = p;
46 p = p + 1;
47 if (something)
48 *q = <not p>;
49 else
50 y = *q;
53 should become
54 sinktemp = p;
55 p = p + 1;
56 if (something)
57 *q = <not p>;
58 else
60 *q = sinktemp;
61 y = *q
63 Store copy propagation will take care of the store elimination above.
66 2. Sinking using Partial Dead Code Elimination. */
69 static struct
71 /* The number of statements sunk down the flowgraph by code sinking. */
72 int sunk;
74 } sink_stats;
77 /* Given a PHI, and one of its arguments (DEF), find the edge for
78 that argument and return it. If the argument occurs twice in the PHI node,
79 we return NULL. */
81 static basic_block
82 find_bb_for_arg (gimple phi, tree def)
84 size_t i;
85 bool foundone = false;
86 basic_block result = NULL;
87 for (i = 0; i < gimple_phi_num_args (phi); i++)
88 if (PHI_ARG_DEF (phi, i) == def)
90 if (foundone)
91 return NULL;
92 foundone = true;
93 result = gimple_phi_arg_edge (phi, i)->src;
95 return result;
98 /* When the first immediate use is in a statement, then return true if all
99 immediate uses in IMM are in the same statement.
100 We could also do the case where the first immediate use is in a phi node,
101 and all the other uses are in phis in the same basic block, but this
102 requires some expensive checking later (you have to make sure no def/vdef
103 in the statement occurs for multiple edges in the various phi nodes it's
104 used in, so that you only have one place you can sink it to. */
106 static bool
107 all_immediate_uses_same_place (gimple stmt)
109 gimple firstuse = NULL;
110 ssa_op_iter op_iter;
111 imm_use_iterator imm_iter;
112 use_operand_p use_p;
113 tree var;
115 FOR_EACH_SSA_TREE_OPERAND (var, stmt, op_iter, SSA_OP_ALL_DEFS)
117 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var)
119 if (is_gimple_debug (USE_STMT (use_p)))
120 continue;
121 if (firstuse == NULL)
122 firstuse = USE_STMT (use_p);
123 else
124 if (firstuse != USE_STMT (use_p))
125 return false;
129 return true;
132 /* Find the nearest common dominator of all of the immediate uses in IMM. */
134 static basic_block
135 nearest_common_dominator_of_uses (gimple stmt, bool *debug_stmts)
137 bitmap blocks = BITMAP_ALLOC (NULL);
138 basic_block commondom;
139 unsigned int j;
140 bitmap_iterator bi;
141 ssa_op_iter op_iter;
142 imm_use_iterator imm_iter;
143 use_operand_p use_p;
144 tree var;
146 bitmap_clear (blocks);
147 FOR_EACH_SSA_TREE_OPERAND (var, stmt, op_iter, SSA_OP_ALL_DEFS)
149 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var)
151 gimple usestmt = USE_STMT (use_p);
152 basic_block useblock;
154 if (gimple_code (usestmt) == GIMPLE_PHI)
156 int idx = PHI_ARG_INDEX_FROM_USE (use_p);
158 useblock = gimple_phi_arg_edge (usestmt, idx)->src;
160 else if (is_gimple_debug (usestmt))
162 *debug_stmts = true;
163 continue;
165 else
167 useblock = gimple_bb (usestmt);
170 /* Short circuit. Nothing dominates the entry block. */
171 if (useblock == ENTRY_BLOCK_PTR)
173 BITMAP_FREE (blocks);
174 return NULL;
176 bitmap_set_bit (blocks, useblock->index);
179 commondom = BASIC_BLOCK (bitmap_first_set_bit (blocks));
180 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, j, bi)
181 commondom = nearest_common_dominator (CDI_DOMINATORS, commondom,
182 BASIC_BLOCK (j));
183 BITMAP_FREE (blocks);
184 return commondom;
187 /* Given EARLY_BB and LATE_BB, two blocks in a path through the dominator
188 tree, return the best basic block between them (inclusive) to place
189 statements.
191 We want the most control dependent block in the shallowest loop nest.
193 If the resulting block is in a shallower loop nest, then use it. Else
194 only use the resulting block if it has significantly lower execution
195 frequency than EARLY_BB to avoid gratutious statement movement. We
196 consider statements with VOPS more desirable to move.
198 This pass would obviously benefit from PDO as it utilizes block
199 frequencies. It would also benefit from recomputing frequencies
200 if profile data is not available since frequencies often get out
201 of sync with reality. */
203 static basic_block
204 select_best_block (basic_block early_bb,
205 basic_block late_bb,
206 gimple stmt)
208 basic_block best_bb = late_bb;
209 basic_block temp_bb = late_bb;
210 int threshold;
212 while (temp_bb != early_bb)
214 /* If we've moved into a lower loop nest, then that becomes
215 our best block. */
216 if (bb_loop_depth (temp_bb) < bb_loop_depth (best_bb))
217 best_bb = temp_bb;
219 /* Walk up the dominator tree, hopefully we'll find a shallower
220 loop nest. */
221 temp_bb = get_immediate_dominator (CDI_DOMINATORS, temp_bb);
224 /* If we found a shallower loop nest, then we always consider that
225 a win. This will always give us the most control dependent block
226 within that loop nest. */
227 if (bb_loop_depth (best_bb) < bb_loop_depth (early_bb))
228 return best_bb;
230 /* Get the sinking threshold. If the statement to be moved has memory
231 operands, then increase the threshold by 7% as those are even more
232 profitable to avoid, clamping at 100%. */
233 threshold = PARAM_VALUE (PARAM_SINK_FREQUENCY_THRESHOLD);
234 if (gimple_vuse (stmt) || gimple_vdef (stmt))
236 threshold += 7;
237 if (threshold > 100)
238 threshold = 100;
241 /* If BEST_BB is at the same nesting level, then require it to have
242 significantly lower execution frequency to avoid gratutious movement. */
243 if (bb_loop_depth (best_bb) == bb_loop_depth (early_bb)
244 && best_bb->frequency < (early_bb->frequency * threshold / 100.0))
245 return best_bb;
247 /* No better block found, so return EARLY_BB, which happens to be the
248 statement's original block. */
249 return early_bb;
252 /* Given a statement (STMT) and the basic block it is currently in (FROMBB),
253 determine the location to sink the statement to, if any.
254 Returns true if there is such location; in that case, TOGSI points to the
255 statement before that STMT should be moved. */
257 static bool
258 statement_sink_location (gimple stmt, basic_block frombb,
259 gimple_stmt_iterator *togsi)
261 gimple use;
262 use_operand_p one_use = NULL_USE_OPERAND_P;
263 basic_block sinkbb;
264 use_operand_p use_p;
265 def_operand_p def_p;
266 ssa_op_iter iter;
267 imm_use_iterator imm_iter;
269 /* We only can sink assignments. */
270 if (!is_gimple_assign (stmt))
271 return false;
273 /* We only can sink stmts with a single definition. */
274 def_p = single_ssa_def_operand (stmt, SSA_OP_ALL_DEFS);
275 if (def_p == NULL_DEF_OPERAND_P)
276 return false;
278 /* Return if there are no immediate uses of this stmt. */
279 if (has_zero_uses (DEF_FROM_PTR (def_p)))
280 return false;
282 /* There are a few classes of things we can't or don't move, some because we
283 don't have code to handle it, some because it's not profitable and some
284 because it's not legal.
286 We can't sink things that may be global stores, at least not without
287 calculating a lot more information, because we may cause it to no longer
288 be seen by an external routine that needs it depending on where it gets
289 moved to.
291 We don't want to sink loads from memory.
293 We can't sink statements that end basic blocks without splitting the
294 incoming edge for the sink location to place it there.
296 We can't sink statements that have volatile operands.
298 We don't want to sink dead code, so anything with 0 immediate uses is not
299 sunk.
301 Don't sink BLKmode assignments if current function has any local explicit
302 register variables, as BLKmode assignments may involve memcpy or memset
303 calls or, on some targets, inline expansion thereof that sometimes need
304 to use specific hard registers.
307 if (stmt_ends_bb_p (stmt)
308 || gimple_has_side_effects (stmt)
309 || gimple_has_volatile_ops (stmt)
310 || (gimple_vuse (stmt) && !gimple_vdef (stmt))
311 || (cfun->has_local_explicit_reg_vars
312 && TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt))) == BLKmode))
313 return false;
315 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (DEF_FROM_PTR (def_p)))
316 return false;
318 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
320 tree use = USE_FROM_PTR (use_p);
321 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use))
322 return false;
325 use = NULL;
327 /* If stmt is a store the one and only use needs to be the VOP
328 merging PHI node. */
329 if (gimple_vdef (stmt))
331 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
333 gimple use_stmt = USE_STMT (use_p);
335 /* A killing definition is not a use. */
336 if ((gimple_has_lhs (use_stmt)
337 && operand_equal_p (gimple_assign_lhs (stmt),
338 gimple_get_lhs (use_stmt), 0))
339 || stmt_kills_ref_p (use_stmt, gimple_assign_lhs (stmt)))
341 /* If use_stmt is or might be a nop assignment then USE_STMT
342 acts as a use as well as definition. */
343 if (stmt != use_stmt
344 && ref_maybe_used_by_stmt_p (use_stmt,
345 gimple_assign_lhs (stmt)))
346 return false;
347 continue;
350 if (gimple_code (use_stmt) != GIMPLE_PHI)
351 return false;
353 if (use
354 && use != use_stmt)
355 return false;
357 use = use_stmt;
359 if (!use)
360 return false;
362 /* If all the immediate uses are not in the same place, find the nearest
363 common dominator of all the immediate uses. For PHI nodes, we have to
364 find the nearest common dominator of all of the predecessor blocks, since
365 that is where insertion would have to take place. */
366 else if (!all_immediate_uses_same_place (stmt))
368 bool debug_stmts = false;
369 basic_block commondom = nearest_common_dominator_of_uses (stmt,
370 &debug_stmts);
372 if (commondom == frombb)
373 return false;
375 /* Our common dominator has to be dominated by frombb in order to be a
376 trivially safe place to put this statement, since it has multiple
377 uses. */
378 if (!dominated_by_p (CDI_DOMINATORS, commondom, frombb))
379 return false;
381 commondom = select_best_block (frombb, commondom, stmt);
383 if (commondom == frombb)
384 return false;
386 *togsi = gsi_after_labels (commondom);
388 return true;
390 else
392 FOR_EACH_IMM_USE_FAST (one_use, imm_iter, DEF_FROM_PTR (def_p))
394 if (is_gimple_debug (USE_STMT (one_use)))
395 continue;
396 break;
398 use = USE_STMT (one_use);
400 if (gimple_code (use) != GIMPLE_PHI)
402 sinkbb = gimple_bb (use);
403 sinkbb = select_best_block (frombb, gimple_bb (use), stmt);
405 if (sinkbb == frombb)
406 return false;
408 *togsi = gsi_for_stmt (use);
410 return true;
414 sinkbb = find_bb_for_arg (use, DEF_FROM_PTR (def_p));
416 /* This can happen if there are multiple uses in a PHI. */
417 if (!sinkbb)
418 return false;
420 sinkbb = select_best_block (frombb, sinkbb, stmt);
421 if (!sinkbb || sinkbb == frombb)
422 return false;
424 /* If the latch block is empty, don't make it non-empty by sinking
425 something into it. */
426 if (sinkbb == frombb->loop_father->latch
427 && empty_block_p (sinkbb))
428 return false;
430 *togsi = gsi_after_labels (sinkbb);
432 return true;
435 /* Perform code sinking on BB */
437 static void
438 sink_code_in_bb (basic_block bb)
440 basic_block son;
441 gimple_stmt_iterator gsi;
442 edge_iterator ei;
443 edge e;
444 bool last = true;
446 /* If this block doesn't dominate anything, there can't be any place to sink
447 the statements to. */
448 if (first_dom_son (CDI_DOMINATORS, bb) == NULL)
449 goto earlyout;
451 /* We can't move things across abnormal edges, so don't try. */
452 FOR_EACH_EDGE (e, ei, bb->succs)
453 if (e->flags & EDGE_ABNORMAL)
454 goto earlyout;
456 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi);)
458 gimple stmt = gsi_stmt (gsi);
459 gimple_stmt_iterator togsi;
461 if (!statement_sink_location (stmt, bb, &togsi))
463 if (!gsi_end_p (gsi))
464 gsi_prev (&gsi);
465 last = false;
466 continue;
468 if (dump_file)
470 fprintf (dump_file, "Sinking ");
471 print_gimple_stmt (dump_file, stmt, 0, TDF_VOPS);
472 fprintf (dump_file, " from bb %d to bb %d\n",
473 bb->index, (gsi_bb (togsi))->index);
476 /* Update virtual operands of statements in the path we
477 do not sink to. */
478 if (gimple_vdef (stmt))
480 imm_use_iterator iter;
481 use_operand_p use_p;
482 gimple vuse_stmt;
484 FOR_EACH_IMM_USE_STMT (vuse_stmt, iter, gimple_vdef (stmt))
485 if (gimple_code (vuse_stmt) != GIMPLE_PHI)
486 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
487 SET_USE (use_p, gimple_vuse (stmt));
490 /* If this is the end of the basic block, we need to insert at the end
491 of the basic block. */
492 if (gsi_end_p (togsi))
493 gsi_move_to_bb_end (&gsi, gsi_bb (togsi));
494 else
495 gsi_move_before (&gsi, &togsi);
497 sink_stats.sunk++;
499 /* If we've just removed the last statement of the BB, the
500 gsi_end_p() test below would fail, but gsi_prev() would have
501 succeeded, and we want it to succeed. So we keep track of
502 whether we're at the last statement and pick up the new last
503 statement. */
504 if (last)
506 gsi = gsi_last_bb (bb);
507 continue;
510 last = false;
511 if (!gsi_end_p (gsi))
512 gsi_prev (&gsi);
515 earlyout:
516 for (son = first_dom_son (CDI_POST_DOMINATORS, bb);
517 son;
518 son = next_dom_son (CDI_POST_DOMINATORS, son))
520 sink_code_in_bb (son);
524 /* Perform code sinking.
525 This moves code down the flowgraph when we know it would be
526 profitable to do so, or it wouldn't increase the number of
527 executions of the statement.
529 IE given
531 a_1 = b + c;
532 if (<something>)
535 else
537 foo (&b, &c);
538 a_5 = b + c;
540 a_6 = PHI (a_5, a_1);
541 USE a_6.
543 we'll transform this into:
545 if (<something>)
547 a_1 = b + c;
549 else
551 foo (&b, &c);
552 a_5 = b + c;
554 a_6 = PHI (a_5, a_1);
555 USE a_6.
557 Note that this reduces the number of computations of a = b + c to 1
558 when we take the else edge, instead of 2.
560 static void
561 execute_sink_code (void)
563 loop_optimizer_init (LOOPS_NORMAL);
565 connect_infinite_loops_to_exit ();
566 memset (&sink_stats, 0, sizeof (sink_stats));
567 calculate_dominance_info (CDI_DOMINATORS);
568 calculate_dominance_info (CDI_POST_DOMINATORS);
569 sink_code_in_bb (EXIT_BLOCK_PTR);
570 statistics_counter_event (cfun, "Sunk statements", sink_stats.sunk);
571 free_dominance_info (CDI_POST_DOMINATORS);
572 remove_fake_exit_edges ();
573 loop_optimizer_finalize ();
576 /* Gate and execute functions for PRE. */
578 static unsigned int
579 do_sink (void)
581 execute_sink_code ();
582 return 0;
585 static bool
586 gate_sink (void)
588 return flag_tree_sink != 0;
591 namespace {
593 const pass_data pass_data_sink_code =
595 GIMPLE_PASS, /* type */
596 "sink", /* name */
597 OPTGROUP_NONE, /* optinfo_flags */
598 true, /* has_gate */
599 true, /* has_execute */
600 TV_TREE_SINK, /* tv_id */
601 ( PROP_no_crit_edges | PROP_cfg | PROP_ssa ), /* properties_required */
602 0, /* properties_provided */
603 0, /* properties_destroyed */
604 0, /* todo_flags_start */
605 ( TODO_update_ssa | TODO_verify_ssa
606 | TODO_verify_flow ), /* todo_flags_finish */
609 class pass_sink_code : public gimple_opt_pass
611 public:
612 pass_sink_code (gcc::context *ctxt)
613 : gimple_opt_pass (pass_data_sink_code, ctxt)
616 /* opt_pass methods: */
617 bool gate () { return gate_sink (); }
618 unsigned int execute () { return do_sink (); }
620 }; // class pass_sink_code
622 } // anon namespace
624 gimple_opt_pass *
625 make_pass_sink_code (gcc::context *ctxt)
627 return new pass_sink_code (ctxt);