gcc/
[official-gcc.git] / gcc / tree-ssa-threadbackward.c
blob9dd37adf51c395b38d8f8431e1937f26d94ae62f
1 /* SSA Jump Threading
2 Copyright (C) 2005-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "predict.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "fold-const.h"
28 #include "cfgloop.h"
29 #include "gimple-iterator.h"
30 #include "tree-cfg.h"
31 #include "tree-ssa-threadupdate.h"
32 #include "params.h"
33 #include "tree-ssa-loop.h"
34 #include "cfganal.h"
35 #include "tree-pass.h"
36 #include "gimple-ssa.h"
37 #include "tree-phinodes.h"
39 static int max_threaded_paths;
41 /* Simple helper to get the last statement from BB, which is assumed
42 to be a control statement. Return NULL if the last statement is
43 not a control statement. */
45 static gimple *
46 get_gimple_control_stmt (basic_block bb)
48 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
50 if (gsi_end_p (gsi))
51 return NULL;
53 gimple *stmt = gsi_stmt (gsi);
54 enum gimple_code code = gimple_code (stmt);
55 if (code == GIMPLE_COND || code == GIMPLE_SWITCH || code == GIMPLE_GOTO)
56 return stmt;
57 return NULL;
60 /* Return true if the CFG contains at least one path from START_BB to END_BB.
61 When a path is found, record in PATH the blocks from END_BB to START_BB.
62 VISITED_BBS is used to make sure we don't fall into an infinite loop. Bound
63 the recursion to basic blocks belonging to LOOP. */
65 static bool
66 fsm_find_thread_path (basic_block start_bb, basic_block end_bb,
67 vec<basic_block, va_gc> *&path,
68 hash_set<basic_block> *visited_bbs, loop_p loop)
70 if (loop != start_bb->loop_father)
71 return false;
73 if (start_bb == end_bb)
75 vec_safe_push (path, start_bb);
76 return true;
79 if (!visited_bbs->add (start_bb))
81 edge e;
82 edge_iterator ei;
83 FOR_EACH_EDGE (e, ei, start_bb->succs)
84 if (fsm_find_thread_path (e->dest, end_bb, path, visited_bbs, loop))
86 vec_safe_push (path, start_bb);
87 return true;
91 return false;
94 /* Examine jump threading path PATH to which we want to add BBI.
96 If the resulting path is profitable to thread, then return the
97 final taken edge from the path, NULL otherwise.
99 NAME is the SSA_NAME of the variable we found to have a constant
100 value on PATH. ARG is the value of that SSA_NAME.
102 BBI will be appended to PATH when we have a profitable jump threading
103 path. Callers are responsible for removing BBI from PATH in that case. */
105 static edge
106 profitable_jump_thread_path (vec<basic_block, va_gc> *&path,
107 basic_block bbi, tree name, tree arg)
109 /* Note BBI is not in the path yet, hence the +1 in the test below
110 to make sure BBI is accounted for in the path length test. */
111 int path_length = path->length ();
113 /* We can get a length of 0 here when the statement that
114 makes a conditional generate a compile-time constant
115 result is in the same block as the conditional.
117 That's not really a jump threading opportunity, but instead is
118 simple cprop & simplification. We could handle it here if we
119 wanted by wiring up all the incoming edges. If we run this
120 early in IPA, that might be worth doing. For now we just
121 reject that case. */
122 if (path_length == 0)
123 return NULL;
125 if (path_length + 1 > PARAM_VALUE (PARAM_MAX_FSM_THREAD_LENGTH))
127 if (dump_file && (dump_flags & TDF_DETAILS))
128 fprintf (dump_file, "FSM jump-thread path not considered: "
129 "the number of basic blocks on the path "
130 "exceeds PARAM_MAX_FSM_THREAD_LENGTH.\n");
131 return NULL;
134 if (max_threaded_paths <= 0)
136 if (dump_file && (dump_flags & TDF_DETAILS))
137 fprintf (dump_file, "FSM jump-thread path not considered: "
138 "the number of previously recorded FSM paths to "
139 "thread exceeds PARAM_MAX_FSM_THREAD_PATHS.\n");
140 return NULL;
143 /* Add BBI to the path.
144 From this point onward, if we decide we the path is not profitable
145 to thread, we must remove BBI from the path. */
146 vec_safe_push (path, bbi);
147 ++path_length;
149 int n_insns = 0;
150 gimple_stmt_iterator gsi;
151 int j;
152 loop_p loop = (*path)[0]->loop_father;
153 bool path_crosses_loops = false;
154 bool threaded_through_latch = false;
155 bool multiway_branch_in_path = false;
156 bool threaded_multiway_branch = false;
158 /* Count the number of instructions on the path: as these instructions
159 will have to be duplicated, we will not record the path if there
160 are too many instructions on the path. Also check that all the
161 blocks in the path belong to a single loop. */
162 for (j = 0; j < path_length; j++)
164 basic_block bb = (*path)[j];
166 /* Remember, blocks in the path are stored in opposite order
167 in the PATH array. The last entry in the array represents
168 the block with an outgoing edge that we will redirect to the
169 jump threading path. Thus we don't care about that block's
170 loop father, nor how many statements are in that block because
171 it will not be copied or whether or not it ends in a multiway
172 branch. */
173 if (j < path_length - 1)
175 if (bb->loop_father != loop)
177 path_crosses_loops = true;
178 break;
181 /* PHIs in the path will create degenerate PHIS in the
182 copied path which will then get propagated away, so
183 looking at just the duplicate path the PHIs would
184 seem unimportant.
186 But those PHIs, because they're assignments to objects
187 typically with lives that exist outside the thread path,
188 will tend to generate PHIs (or at least new PHI arguments)
189 at points where we leave the thread path and rejoin
190 the original blocks. So we do want to account for them.
192 We ignore virtual PHIs. We also ignore cases where BB
193 has a single incoming edge. That's the most common
194 degenerate PHI we'll see here. Finally we ignore PHIs
195 that are associated with the value we're tracking as
196 that object likely dies. */
197 if (EDGE_COUNT (bb->succs) > 1 && EDGE_COUNT (bb->preds) > 1)
199 for (gphi_iterator gsip = gsi_start_phis (bb);
200 !gsi_end_p (gsip);
201 gsi_next (&gsip))
203 gphi *phi = gsip.phi ();
204 tree dst = gimple_phi_result (phi);
206 /* Note that if both NAME and DST are anonymous
207 SSA_NAMEs, then we do not have enough information
208 to consider them associated. */
209 if ((SSA_NAME_VAR (dst) != SSA_NAME_VAR (name)
210 || !SSA_NAME_VAR (dst))
211 && !virtual_operand_p (dst))
212 ++n_insns;
216 for (gsi = gsi_after_labels (bb);
217 !gsi_end_p (gsi);
218 gsi_next_nondebug (&gsi))
220 gimple *stmt = gsi_stmt (gsi);
221 /* Do not count empty statements and labels. */
222 if (gimple_code (stmt) != GIMPLE_NOP
223 && !(gimple_code (stmt) == GIMPLE_ASSIGN
224 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
225 && !is_gimple_debug (stmt))
226 ++n_insns;
229 /* We do not look at the block with the threaded branch
230 in this loop. So if any block with a last statement that
231 is a GIMPLE_SWITCH or GIMPLE_GOTO is seen, then we have a
232 multiway branch on our path.
234 The block in PATH[0] is special, it's the block were we're
235 going to be able to eliminate its branch. */
236 gimple *last = last_stmt (bb);
237 if (last && (gimple_code (last) == GIMPLE_SWITCH
238 || gimple_code (last) == GIMPLE_GOTO))
240 if (j == 0)
241 threaded_multiway_branch = true;
242 else
243 multiway_branch_in_path = true;
247 /* Note if we thread through the latch, we will want to include
248 the last entry in the array when determining if we thread
249 through the loop latch. */
250 if (loop->latch == bb)
251 threaded_through_latch = true;
254 /* We are going to remove the control statement at the end of the
255 last block in the threading path. So don't count it against our
256 statement count. */
257 n_insns--;
259 gimple *stmt = get_gimple_control_stmt ((*path)[0]);
260 gcc_assert (stmt);
261 /* We have found a constant value for ARG. For GIMPLE_SWITCH
262 and GIMPLE_GOTO, we use it as-is. However, for a GIMPLE_COND
263 we need to substitute, fold and simplify so we can determine
264 the edge taken out of the last block. */
265 if (gimple_code (stmt) == GIMPLE_COND)
267 enum tree_code cond_code = gimple_cond_code (stmt);
269 /* We know the underyling format of the condition. */
270 arg = fold_binary (cond_code, boolean_type_node,
271 arg, gimple_cond_rhs (stmt));
274 /* If this path threaded through the loop latch back into the
275 same loop and the destination does not dominate the loop
276 latch, then this thread would create an irreducible loop.
278 We have to know the outgoing edge to figure this out. */
279 edge taken_edge = find_taken_edge ((*path)[0], arg);
281 /* There are cases where we may not be able to extract the
282 taken edge. For example, a computed goto to an absolute
283 address. Handle those cases gracefully. */
284 if (taken_edge == NULL)
286 path->pop ();
287 return NULL;
290 bool creates_irreducible_loop = false;
291 if (threaded_through_latch
292 && loop == taken_edge->dest->loop_father
293 && (determine_bb_domination_status (loop, taken_edge->dest)
294 == DOMST_NONDOMINATING))
295 creates_irreducible_loop = true;
297 if (path_crosses_loops)
299 if (dump_file && (dump_flags & TDF_DETAILS))
300 fprintf (dump_file, "FSM jump-thread path not considered: "
301 "the path crosses loops.\n");
302 path->pop ();
303 return NULL;
306 if (n_insns >= PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATH_INSNS))
308 if (dump_file && (dump_flags & TDF_DETAILS))
309 fprintf (dump_file, "FSM jump-thread path not considered: "
310 "the number of instructions on the path "
311 "exceeds PARAM_MAX_FSM_THREAD_PATH_INSNS.\n");
312 path->pop ();
313 return NULL;
316 /* We avoid creating irreducible inner loops unless we thread through
317 a multiway branch, in which case we have deemed it worth losing
318 other loop optimizations later.
320 We also consider it worth creating an irreducible inner loop if
321 the number of copied statement is low relative to the length of
322 the path -- in that case there's little the traditional loop
323 optimizer would have done anyway, so an irreducible loop is not
324 so bad. */
325 if (!threaded_multiway_branch && creates_irreducible_loop
326 && (n_insns * PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
327 > path_length * PARAM_VALUE (PARAM_FSM_SCALE_PATH_BLOCKS)))
330 if (dump_file && (dump_flags & TDF_DETAILS))
331 fprintf (dump_file,
332 "FSM would create irreducible loop without threading "
333 "multiway branch.\n");
334 path->pop ();
335 return NULL;
339 /* If this path does not thread through the loop latch, then we are
340 using the FSM threader to find old style jump threads. This
341 is good, except the FSM threader does not re-use an existing
342 threading path to reduce code duplication.
344 So for that case, drastically reduce the number of statements
345 we are allowed to copy. */
346 if (!(threaded_through_latch && threaded_multiway_branch)
347 && (n_insns * PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
348 >= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS)))
350 if (dump_file && (dump_flags & TDF_DETAILS))
351 fprintf (dump_file,
352 "FSM did not thread around loop and would copy too "
353 "many statements.\n");
354 path->pop ();
355 return NULL;
358 /* When there is a multi-way branch on the path, then threading can
359 explode the CFG due to duplicating the edges for that multi-way
360 branch. So like above, only allow a multi-way branch on the path
361 if we actually thread a multi-way branch. */
362 if (!threaded_multiway_branch && multiway_branch_in_path)
364 if (dump_file && (dump_flags & TDF_DETAILS))
365 fprintf (dump_file,
366 "FSM Thread through multiway branch without threading "
367 "a multiway branch.\n");
368 path->pop ();
369 return NULL;
371 return taken_edge;
374 /* PATH is vector of blocks forming a jump threading path in reverse
375 order. TAKEN_EDGE is the edge taken from path[0].
377 Convert that path into the form used by register_jump_thread and
378 register the path. */
380 static void
381 convert_and_register_jump_thread_path (vec<basic_block, va_gc> *path,
382 edge taken_edge)
384 vec<jump_thread_edge *> *jump_thread_path = new vec<jump_thread_edge *> ();
386 /* Record the edges between the blocks in PATH. */
387 for (unsigned int j = 0; j < path->length () - 1; j++)
389 basic_block bb1 = (*path)[path->length () - j - 1];
390 basic_block bb2 = (*path)[path->length () - j - 2];
392 edge e = find_edge (bb1, bb2);
393 gcc_assert (e);
394 jump_thread_edge *x = new jump_thread_edge (e, EDGE_FSM_THREAD);
395 jump_thread_path->safe_push (x);
398 /* Add the edge taken when the control variable has value ARG. */
399 jump_thread_edge *x
400 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
401 jump_thread_path->safe_push (x);
403 register_jump_thread (jump_thread_path);
404 --max_threaded_paths;
407 /* We trace the value of the SSA_NAME NAME back through any phi nodes looking
408 for places where it gets a constant value and save the path. Stop after
409 having recorded MAX_PATHS jump threading paths. */
411 static void
412 fsm_find_control_statement_thread_paths (tree name,
413 hash_set<basic_block> *visited_bbs,
414 vec<basic_block, va_gc> *&path,
415 bool seen_loop_phi)
417 /* If NAME appears in an abnormal PHI, then don't try to trace its
418 value back through PHI nodes. */
419 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
420 return;
422 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
423 basic_block var_bb = gimple_bb (def_stmt);
425 if (var_bb == NULL)
426 return;
428 /* We allow the SSA chain to contains PHIs and simple copies and constant
429 initializations. */
430 if (gimple_code (def_stmt) != GIMPLE_PHI
431 && gimple_code (def_stmt) != GIMPLE_ASSIGN)
432 return;
434 if (gimple_code (def_stmt) == GIMPLE_PHI
435 && (gimple_phi_num_args (def_stmt)
436 >= (unsigned) PARAM_VALUE (PARAM_FSM_MAXIMUM_PHI_ARGUMENTS)))
437 return;
439 if (gimple_code (def_stmt) == GIMPLE_ASSIGN
440 && gimple_assign_rhs_code (def_stmt) != INTEGER_CST
441 && gimple_assign_rhs_code (def_stmt) != SSA_NAME)
442 return;
444 /* Avoid infinite recursion. */
445 if (visited_bbs->add (var_bb))
446 return;
448 int next_path_length = 0;
449 basic_block last_bb_in_path = path->last ();
451 if (loop_containing_stmt (def_stmt)->header == gimple_bb (def_stmt))
453 /* Do not walk through more than one loop PHI node. */
454 if (seen_loop_phi)
455 return;
456 seen_loop_phi = true;
459 if (bb_loop_depth (last_bb_in_path) > bb_loop_depth (var_bb))
460 return;
462 /* Following the chain of SSA_NAME definitions, we jumped from a definition in
463 LAST_BB_IN_PATH to a definition in VAR_BB. When these basic blocks are
464 different, append to PATH the blocks from LAST_BB_IN_PATH to VAR_BB. */
465 if (var_bb != last_bb_in_path)
467 edge e;
468 int e_count = 0;
469 edge_iterator ei;
470 vec<basic_block, va_gc> *next_path;
471 vec_alloc (next_path, 10);
473 /* When VAR_BB == LAST_BB_IN_PATH, then the first block in the path
474 will already be in VISITED_BBS. When they are not equal, then we
475 must ensure that first block is accounted for to ensure we do not
476 create bogus jump threading paths. */
477 visited_bbs->add ((*path)[0]);
478 FOR_EACH_EDGE (e, ei, last_bb_in_path->preds)
480 hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
482 if (fsm_find_thread_path (var_bb, e->src, next_path, visited_bbs,
483 e->src->loop_father))
484 ++e_count;
486 delete visited_bbs;
488 /* If there is more than one path, stop. */
489 if (e_count > 1)
491 vec_free (next_path);
492 return;
496 /* Stop if we have not found a path: this could occur when the recursion
497 is stopped by one of the bounds. */
498 if (e_count == 0)
500 vec_free (next_path);
501 return;
504 /* Make sure we haven't already visited any of the nodes in
505 NEXT_PATH. Don't add them here to avoid pollution. */
506 for (unsigned int i = 0; i < next_path->length () - 1; i++)
508 if (visited_bbs->contains ((*next_path)[i])
509 || (bb_loop_depth (last_bb_in_path)
510 > bb_loop_depth ((*next_path)[i])))
512 vec_free (next_path);
513 return;
517 /* Now add the nodes to VISISTED_BBS. */
518 for (unsigned int i = 0; i < next_path->length () - 1; i++)
519 visited_bbs->add ((*next_path)[i]);
521 /* Append all the nodes from NEXT_PATH to PATH. */
522 vec_safe_splice (path, next_path);
523 next_path_length = next_path->length ();
524 vec_free (next_path);
527 gcc_assert (path->last () == var_bb);
529 /* Iterate over the arguments of PHI. */
530 unsigned int i;
531 if (gimple_code (def_stmt) == GIMPLE_PHI)
533 gphi *phi = as_a <gphi *> (def_stmt);
534 for (i = 0; i < gimple_phi_num_args (phi); i++)
536 tree arg = gimple_phi_arg_def (phi, i);
537 basic_block bbi = gimple_phi_arg_edge (phi, i)->src;
539 /* Skip edges pointing outside the current loop. */
540 if (!arg || var_bb->loop_father != bbi->loop_father)
541 continue;
543 if (TREE_CODE (arg) == SSA_NAME)
545 vec_safe_push (path, bbi);
546 /* Recursively follow SSA_NAMEs looking for a constant
547 definition. */
548 fsm_find_control_statement_thread_paths (arg, visited_bbs, path,
549 seen_loop_phi);
551 path->pop ();
552 continue;
555 if (TREE_CODE (arg) != INTEGER_CST)
556 continue;
558 /* If this is a profitable jump thread path, then convert it
559 into the canonical form and register it. */
560 edge taken_edge = profitable_jump_thread_path (path, bbi, name, arg);
561 if (taken_edge)
563 if (bb_loop_depth (taken_edge->src)
564 >= bb_loop_depth (taken_edge->dest))
565 convert_and_register_jump_thread_path (path, taken_edge);
566 path->pop ();
570 else if (gimple_code (def_stmt) == GIMPLE_ASSIGN)
572 tree arg = gimple_assign_rhs1 (def_stmt);
574 if (TREE_CODE (arg) == SSA_NAME)
575 fsm_find_control_statement_thread_paths (arg, visited_bbs,
576 path, seen_loop_phi);
578 else
580 /* profitable_jump_thread_path is going to push the current
581 block onto the path. But the path will always have the current
582 block at this point. So we can just pop it. */
583 path->pop ();
585 edge taken_edge = profitable_jump_thread_path (path, var_bb,
586 name, arg);
587 if (taken_edge)
589 if (bb_loop_depth (taken_edge->src)
590 >= bb_loop_depth (taken_edge->dest))
591 convert_and_register_jump_thread_path (path, taken_edge);
592 path->pop ();
595 /* And put the current block back onto the path so that the
596 state of the stack is unchanged when we leave. */
597 vec_safe_push (path, var_bb);
601 /* Remove all the nodes that we added from NEXT_PATH. */
602 if (next_path_length)
603 vec_safe_truncate (path, (path->length () - next_path_length));
606 /* Search backwards from BB looking for paths where NAME (an SSA_NAME)
607 is a constant. Record such paths for jump threading.
609 It is assumed that BB ends with a control statement and that by
610 finding a path where NAME is a constant, we can thread the path. */
612 void
613 find_jump_threads_backwards (basic_block bb)
615 if (!flag_expensive_optimizations
616 || optimize_function_for_size_p (cfun))
617 return;
619 gimple *stmt = get_gimple_control_stmt (bb);
620 if (!stmt)
621 return;
623 enum gimple_code code = gimple_code (stmt);
624 tree name = NULL;
625 if (code == GIMPLE_SWITCH)
626 name = gimple_switch_index (as_a <gswitch *> (stmt));
627 else if (code == GIMPLE_GOTO)
628 name = gimple_goto_dest (stmt);
629 else if (code == GIMPLE_COND)
631 if (TREE_CODE (gimple_cond_lhs (stmt)) == SSA_NAME
632 && TREE_CODE (gimple_cond_rhs (stmt)) == INTEGER_CST
633 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))
634 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))))
635 name = gimple_cond_lhs (stmt);
638 if (!name || TREE_CODE (name) != SSA_NAME)
639 return;
641 vec<basic_block, va_gc> *bb_path;
642 vec_alloc (bb_path, 10);
643 vec_safe_push (bb_path, bb);
644 hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
646 max_threaded_paths = PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATHS);
647 fsm_find_control_statement_thread_paths (name, visited_bbs, bb_path, false);
649 delete visited_bbs;
650 vec_free (bb_path);
653 namespace {
655 const pass_data pass_data_thread_jumps =
657 GIMPLE_PASS,
658 "thread",
659 OPTGROUP_NONE,
660 TV_TREE_SSA_THREAD_JUMPS,
661 ( PROP_cfg | PROP_ssa ),
665 ( TODO_cleanup_cfg | TODO_update_ssa ),
668 class pass_thread_jumps : public gimple_opt_pass
670 public:
671 pass_thread_jumps (gcc::context *ctxt)
672 : gimple_opt_pass (pass_data_thread_jumps, ctxt)
675 opt_pass * clone (void) { return new pass_thread_jumps (m_ctxt); }
676 virtual bool gate (function *);
677 virtual unsigned int execute (function *);
680 bool
681 pass_thread_jumps::gate (function *fun ATTRIBUTE_UNUSED)
683 return (flag_expensive_optimizations
684 && ! optimize_function_for_size_p (cfun));
688 unsigned int
689 pass_thread_jumps::execute (function *fun)
691 /* Try to thread each block with more than one successor. */
692 basic_block bb;
693 FOR_EACH_BB_FN (bb, fun)
695 if (EDGE_COUNT (bb->succs) > 1)
696 find_jump_threads_backwards (bb);
698 thread_through_all_blocks (true);
699 return 0;
704 gimple_opt_pass *
705 make_pass_thread_jumps (gcc::context *ctxt)
707 return new pass_thread_jumps (ctxt);