Daily bump.
[official-gcc.git] / gcc / tree-ssa-threadbackward.c
blob24def1deed90a873b0b31cc71bc248730bb9c2d3
1 /* SSA Jump Threading
2 Copyright (C) 2005-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "predict.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "fold-const.h"
28 #include "cfgloop.h"
29 #include "gimple-iterator.h"
30 #include "tree-cfg.h"
31 #include "tree-ssa-threadupdate.h"
32 #include "params.h"
33 #include "tree-ssa-loop.h"
34 #include "cfganal.h"
35 #include "tree-pass.h"
36 #include "gimple-ssa.h"
37 #include "tree-phinodes.h"
38 #include "tree-inline.h"
39 #include "tree-vectorizer.h"
41 static int max_threaded_paths;
43 /* Simple helper to get the last statement from BB, which is assumed
44 to be a control statement. Return NULL if the last statement is
45 not a control statement. */
47 static gimple *
48 get_gimple_control_stmt (basic_block bb)
50 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
52 if (gsi_end_p (gsi))
53 return NULL;
55 gimple *stmt = gsi_stmt (gsi);
56 enum gimple_code code = gimple_code (stmt);
57 if (code == GIMPLE_COND || code == GIMPLE_SWITCH || code == GIMPLE_GOTO)
58 return stmt;
59 return NULL;
62 /* Return true if the CFG contains at least one path from START_BB to END_BB.
63 When a path is found, record in PATH the blocks from END_BB to START_BB.
64 VISITED_BBS is used to make sure we don't fall into an infinite loop. Bound
65 the recursion to basic blocks belonging to LOOP. */
67 static bool
68 fsm_find_thread_path (basic_block start_bb, basic_block end_bb,
69 vec<basic_block, va_gc> *&path,
70 hash_set<basic_block> *visited_bbs, loop_p loop)
72 if (loop != start_bb->loop_father)
73 return false;
75 if (start_bb == end_bb)
77 vec_safe_push (path, start_bb);
78 return true;
81 if (!visited_bbs->add (start_bb))
83 edge e;
84 edge_iterator ei;
85 FOR_EACH_EDGE (e, ei, start_bb->succs)
86 if (fsm_find_thread_path (e->dest, end_bb, path, visited_bbs, loop))
88 vec_safe_push (path, start_bb);
89 return true;
93 return false;
96 /* Examine jump threading path PATH to which we want to add BBI.
98 If the resulting path is profitable to thread, then return the
99 final taken edge from the path, NULL otherwise.
101 NAME is the SSA_NAME of the variable we found to have a constant
102 value on PATH. ARG is the value of that SSA_NAME.
104 BBI will be appended to PATH when we have a profitable jump threading
105 path. Callers are responsible for removing BBI from PATH in that case.
107 SPEED_P indicate that we could increase code size to improve the code path */
109 static edge
110 profitable_jump_thread_path (vec<basic_block, va_gc> *&path,
111 basic_block bbi, tree name, tree arg, bool speed_p,
112 bool *creates_irreducible_loop)
114 /* Note BBI is not in the path yet, hence the +1 in the test below
115 to make sure BBI is accounted for in the path length test. */
116 int path_length = path->length ();
118 /* We can get a length of 0 here when the statement that
119 makes a conditional generate a compile-time constant
120 result is in the same block as the conditional.
122 That's not really a jump threading opportunity, but instead is
123 simple cprop & simplification. We could handle it here if we
124 wanted by wiring up all the incoming edges. If we run this
125 early in IPA, that might be worth doing. For now we just
126 reject that case. */
127 if (path_length == 0)
128 return NULL;
130 if (path_length + 1 > PARAM_VALUE (PARAM_MAX_FSM_THREAD_LENGTH))
132 if (dump_file && (dump_flags & TDF_DETAILS))
133 fprintf (dump_file, "FSM jump-thread path not considered: "
134 "the number of basic blocks on the path "
135 "exceeds PARAM_MAX_FSM_THREAD_LENGTH.\n");
136 return NULL;
139 if (max_threaded_paths <= 0)
141 if (dump_file && (dump_flags & TDF_DETAILS))
142 fprintf (dump_file, "FSM jump-thread path not considered: "
143 "the number of previously recorded FSM paths to "
144 "thread exceeds PARAM_MAX_FSM_THREAD_PATHS.\n");
145 return NULL;
148 /* Add BBI to the path.
149 From this point onward, if we decide we the path is not profitable
150 to thread, we must remove BBI from the path. */
151 vec_safe_push (path, bbi);
152 ++path_length;
154 int n_insns = 0;
155 gimple_stmt_iterator gsi;
156 int j;
157 loop_p loop = (*path)[0]->loop_father;
158 bool path_crosses_loops = false;
159 bool threaded_through_latch = false;
160 bool multiway_branch_in_path = false;
161 bool threaded_multiway_branch = false;
162 bool contains_hot_bb = false;
164 if (dump_file && (dump_flags & TDF_DETAILS))
165 fprintf (dump_file, "Checking profitability of path (backwards): ");
167 /* Count the number of instructions on the path: as these instructions
168 will have to be duplicated, we will not record the path if there
169 are too many instructions on the path. Also check that all the
170 blocks in the path belong to a single loop. */
171 for (j = 0; j < path_length; j++)
173 basic_block bb = (*path)[j];
175 if (dump_file && (dump_flags & TDF_DETAILS))
176 fprintf (dump_file, " bb:%i", bb->index);
177 /* Remember, blocks in the path are stored in opposite order
178 in the PATH array. The last entry in the array represents
179 the block with an outgoing edge that we will redirect to the
180 jump threading path. Thus we don't care about that block's
181 loop father, nor how many statements are in that block because
182 it will not be copied or whether or not it ends in a multiway
183 branch. */
184 if (j < path_length - 1)
186 int orig_n_insns = n_insns;
187 if (bb->loop_father != loop)
189 path_crosses_loops = true;
190 break;
193 /* PHIs in the path will create degenerate PHIS in the
194 copied path which will then get propagated away, so
195 looking at just the duplicate path the PHIs would
196 seem unimportant.
198 But those PHIs, because they're assignments to objects
199 typically with lives that exist outside the thread path,
200 will tend to generate PHIs (or at least new PHI arguments)
201 at points where we leave the thread path and rejoin
202 the original blocks. So we do want to account for them.
204 We ignore virtual PHIs. We also ignore cases where BB
205 has a single incoming edge. That's the most common
206 degenerate PHI we'll see here. Finally we ignore PHIs
207 that are associated with the value we're tracking as
208 that object likely dies. */
209 if (EDGE_COUNT (bb->succs) > 1 && EDGE_COUNT (bb->preds) > 1)
211 for (gphi_iterator gsip = gsi_start_phis (bb);
212 !gsi_end_p (gsip);
213 gsi_next (&gsip))
215 gphi *phi = gsip.phi ();
216 tree dst = gimple_phi_result (phi);
218 /* Note that if both NAME and DST are anonymous
219 SSA_NAMEs, then we do not have enough information
220 to consider them associated. */
221 if (dst != name
222 && (SSA_NAME_VAR (dst) != SSA_NAME_VAR (name)
223 || !SSA_NAME_VAR (dst))
224 && !virtual_operand_p (dst))
225 ++n_insns;
229 if (!contains_hot_bb && speed_p)
230 contains_hot_bb |= optimize_bb_for_speed_p (bb);
231 for (gsi = gsi_after_labels (bb);
232 !gsi_end_p (gsi);
233 gsi_next_nondebug (&gsi))
235 gimple *stmt = gsi_stmt (gsi);
236 /* Do not count empty statements and labels. */
237 if (gimple_code (stmt) != GIMPLE_NOP
238 && !(gimple_code (stmt) == GIMPLE_ASSIGN
239 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
240 && !is_gimple_debug (stmt))
241 n_insns += estimate_num_insns (stmt, &eni_size_weights);
243 if (dump_file && (dump_flags & TDF_DETAILS))
244 fprintf (dump_file, " (%i insns)", n_insns-orig_n_insns);
246 /* We do not look at the block with the threaded branch
247 in this loop. So if any block with a last statement that
248 is a GIMPLE_SWITCH or GIMPLE_GOTO is seen, then we have a
249 multiway branch on our path.
251 The block in PATH[0] is special, it's the block were we're
252 going to be able to eliminate its branch. */
253 gimple *last = last_stmt (bb);
254 if (last && (gimple_code (last) == GIMPLE_SWITCH
255 || gimple_code (last) == GIMPLE_GOTO))
257 if (j == 0)
258 threaded_multiway_branch = true;
259 else
260 multiway_branch_in_path = true;
264 /* Note if we thread through the latch, we will want to include
265 the last entry in the array when determining if we thread
266 through the loop latch. */
267 if (loop->latch == bb)
268 threaded_through_latch = true;
271 gimple *stmt = get_gimple_control_stmt ((*path)[0]);
272 gcc_assert (stmt);
274 /* We are going to remove the control statement at the end of the
275 last block in the threading path. So don't count it against our
276 statement count. */
278 int stmt_insns = estimate_num_insns (stmt, &eni_size_weights);
279 n_insns-= stmt_insns;
281 if (dump_file && (dump_flags & TDF_DETAILS))
282 fprintf (dump_file, "\n Control statement insns: %i\n"
283 " Overall: %i insns\n",
284 stmt_insns, n_insns);
286 /* We have found a constant value for ARG. For GIMPLE_SWITCH
287 and GIMPLE_GOTO, we use it as-is. However, for a GIMPLE_COND
288 we need to substitute, fold and simplify so we can determine
289 the edge taken out of the last block. */
290 if (gimple_code (stmt) == GIMPLE_COND)
292 enum tree_code cond_code = gimple_cond_code (stmt);
294 /* We know the underyling format of the condition. */
295 arg = fold_binary (cond_code, boolean_type_node,
296 arg, gimple_cond_rhs (stmt));
299 /* If this path threaded through the loop latch back into the
300 same loop and the destination does not dominate the loop
301 latch, then this thread would create an irreducible loop.
303 We have to know the outgoing edge to figure this out. */
304 edge taken_edge = find_taken_edge ((*path)[0], arg);
306 /* There are cases where we may not be able to extract the
307 taken edge. For example, a computed goto to an absolute
308 address. Handle those cases gracefully. */
309 if (taken_edge == NULL)
311 path->pop ();
312 return NULL;
315 *creates_irreducible_loop = false;
316 if (threaded_through_latch
317 && loop == taken_edge->dest->loop_father
318 && (determine_bb_domination_status (loop, taken_edge->dest)
319 == DOMST_NONDOMINATING))
320 *creates_irreducible_loop = true;
322 if (path_crosses_loops)
324 if (dump_file && (dump_flags & TDF_DETAILS))
325 fprintf (dump_file, "FSM jump-thread path not considered: "
326 "the path crosses loops.\n");
327 path->pop ();
328 return NULL;
331 /* Threading is profitable if the path duplicated is hot but also
332 in a case we separate cold path from hot path and permit optimization
333 of the hot path later. Be on the agressive side here. In some testcases,
334 as in PR 78407 this leads to noticeable improvements. */
335 if (speed_p && (optimize_edge_for_speed_p (taken_edge) || contains_hot_bb))
337 if (n_insns >= PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATH_INSNS))
339 if (dump_file && (dump_flags & TDF_DETAILS))
340 fprintf (dump_file, "FSM jump-thread path not considered: "
341 "the number of instructions on the path "
342 "exceeds PARAM_MAX_FSM_THREAD_PATH_INSNS.\n");
343 path->pop ();
344 return NULL;
347 else if (n_insns > 1)
349 if (dump_file && (dump_flags & TDF_DETAILS))
350 fprintf (dump_file, "FSM jump-thread path not considered: "
351 "duplication of %i insns is needed and optimizing for size.\n",
352 n_insns);
353 path->pop ();
354 return NULL;
357 /* We avoid creating irreducible inner loops unless we thread through
358 a multiway branch, in which case we have deemed it worth losing
359 other loop optimizations later.
361 We also consider it worth creating an irreducible inner loop if
362 the number of copied statement is low relative to the length of
363 the path -- in that case there's little the traditional loop
364 optimizer would have done anyway, so an irreducible loop is not
365 so bad. */
366 if (!threaded_multiway_branch && *creates_irreducible_loop
367 && (n_insns * PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
368 > path_length * PARAM_VALUE (PARAM_FSM_SCALE_PATH_BLOCKS)))
371 if (dump_file && (dump_flags & TDF_DETAILS))
372 fprintf (dump_file,
373 "FSM would create irreducible loop without threading "
374 "multiway branch.\n");
375 path->pop ();
376 return NULL;
380 /* If this path does not thread through the loop latch, then we are
381 using the FSM threader to find old style jump threads. This
382 is good, except the FSM threader does not re-use an existing
383 threading path to reduce code duplication.
385 So for that case, drastically reduce the number of statements
386 we are allowed to copy. */
387 if (!(threaded_through_latch && threaded_multiway_branch)
388 && (n_insns * PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
389 >= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS)))
391 if (dump_file && (dump_flags & TDF_DETAILS))
392 fprintf (dump_file,
393 "FSM did not thread around loop and would copy too "
394 "many statements.\n");
395 path->pop ();
396 return NULL;
399 /* When there is a multi-way branch on the path, then threading can
400 explode the CFG due to duplicating the edges for that multi-way
401 branch. So like above, only allow a multi-way branch on the path
402 if we actually thread a multi-way branch. */
403 if (!threaded_multiway_branch && multiway_branch_in_path)
405 if (dump_file && (dump_flags & TDF_DETAILS))
406 fprintf (dump_file,
407 "FSM Thread through multiway branch without threading "
408 "a multiway branch.\n");
409 path->pop ();
410 return NULL;
412 return taken_edge;
415 /* PATH is vector of blocks forming a jump threading path in reverse
416 order. TAKEN_EDGE is the edge taken from path[0].
418 Convert that path into the form used by register_jump_thread and
419 register the path. */
421 static void
422 convert_and_register_jump_thread_path (vec<basic_block, va_gc> *path,
423 edge taken_edge)
425 vec<jump_thread_edge *> *jump_thread_path = new vec<jump_thread_edge *> ();
427 /* Record the edges between the blocks in PATH. */
428 for (unsigned int j = 0; j < path->length () - 1; j++)
430 basic_block bb1 = (*path)[path->length () - j - 1];
431 basic_block bb2 = (*path)[path->length () - j - 2];
433 edge e = find_edge (bb1, bb2);
434 gcc_assert (e);
435 jump_thread_edge *x = new jump_thread_edge (e, EDGE_FSM_THREAD);
436 jump_thread_path->safe_push (x);
439 /* Add the edge taken when the control variable has value ARG. */
440 jump_thread_edge *x
441 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
442 jump_thread_path->safe_push (x);
444 register_jump_thread (jump_thread_path);
445 --max_threaded_paths;
448 /* While following a chain of SSA_NAME definitions, we jumped from a definition
449 in LAST_BB to a definition in VAR_BB (walking backwards).
451 Verify there is a single path between the blocks and none of the blocks
452 in the path is already in VISITED_BBS. If so, then update VISISTED_BBS,
453 add the new blocks to PATH and return TRUE. Otherwise return FALSE.
455 Store the length of the subpath in NEXT_PATH_LENGTH. */
457 static bool
458 check_subpath_and_update_thread_path (basic_block last_bb, basic_block new_bb,
459 hash_set<basic_block> *visited_bbs,
460 vec<basic_block, va_gc> *&path,
461 int *next_path_length)
463 edge e;
464 int e_count = 0;
465 edge_iterator ei;
466 vec<basic_block, va_gc> *next_path;
467 vec_alloc (next_path, 10);
469 FOR_EACH_EDGE (e, ei, last_bb->preds)
471 hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
473 if (fsm_find_thread_path (new_bb, e->src, next_path, visited_bbs,
474 e->src->loop_father))
475 ++e_count;
477 delete visited_bbs;
479 /* If there is more than one path, stop. */
480 if (e_count > 1)
482 vec_free (next_path);
483 return false;
487 /* Stop if we have not found a path: this could occur when the recursion
488 is stopped by one of the bounds. */
489 if (e_count == 0)
491 vec_free (next_path);
492 return false;
495 /* Make sure we haven't already visited any of the nodes in
496 NEXT_PATH. Don't add them here to avoid pollution. */
497 for (unsigned int i = 0; i < next_path->length () - 1; i++)
499 if (visited_bbs->contains ((*next_path)[i]))
501 vec_free (next_path);
502 return false;
506 /* Now add the nodes to VISISTED_BBS. */
507 for (unsigned int i = 0; i < next_path->length () - 1; i++)
508 visited_bbs->add ((*next_path)[i]);
510 /* Append all the nodes from NEXT_PATH to PATH. */
511 vec_safe_splice (path, next_path);
512 *next_path_length = next_path->length ();
513 vec_free (next_path);
515 return true;
518 static void fsm_find_control_statement_thread_paths (tree,
519 hash_set<basic_block> *,
520 vec<basic_block, va_gc> *&,
521 bool, bool);
523 /* Given PHI which defines NAME in block VAR_BB, recurse through the
524 PHI's arguments searching for paths where NAME will ultimately have
525 a constant value.
527 VISITED_BBS tracks the blocks that have been encountered.
529 PATH contains the series of blocks to traverse that will result in
530 NAME having a constant value.
532 SEEN_LOOP_PHI tracks if we have recursed through a loop PHI node.
534 SPEED_P indicates if we are optimizing for speed over space. */
536 static void
537 handle_phi (gphi *phi, tree name, basic_block var_bb,
538 hash_set<basic_block> *visited_bbs,
539 vec<basic_block, va_gc> *&path,
540 bool seen_loop_phi, bool speed_p)
542 /* Iterate over the arguments of PHI. */
543 for (unsigned int i = 0; i < gimple_phi_num_args (phi); i++)
545 tree arg = gimple_phi_arg_def (phi, i);
546 basic_block bbi = gimple_phi_arg_edge (phi, i)->src;
548 /* Skip edges pointing outside the current loop. */
549 if (!arg || var_bb->loop_father != bbi->loop_father)
550 continue;
552 if (TREE_CODE (arg) == SSA_NAME)
554 vec_safe_push (path, bbi);
555 /* Recursively follow SSA_NAMEs looking for a constant
556 definition. */
557 fsm_find_control_statement_thread_paths (arg, visited_bbs, path,
558 seen_loop_phi, speed_p);
560 path->pop ();
561 continue;
564 if (TREE_CODE_CLASS (TREE_CODE (arg)) != tcc_constant)
565 continue;
567 /* If this is a profitable jump thread path, then convert it
568 into the canonical form and register it. */
569 bool irreducible = false;
570 edge taken_edge = profitable_jump_thread_path (path, bbi, name, arg,
571 speed_p, &irreducible);
572 if (taken_edge)
574 convert_and_register_jump_thread_path (path, taken_edge);
575 path->pop ();
577 if (irreducible)
578 vect_free_loop_info_assumptions ((*path)[0]->loop_father);
583 /* Return TRUE if STMT is a gimple assignment we want to either directly
584 handle or recurse through. Return FALSE otherwise.
586 Note that adding more cases here requires adding cases to handle_assignment
587 below. */
589 static bool
590 handle_assignment_p (gimple *stmt)
592 if (is_gimple_assign (stmt))
594 enum tree_code def_code = gimple_assign_rhs_code (stmt);
596 /* If the RHS is an SSA_NAME, then we will recurse through it.
597 Go ahead and filter out cases where the SSA_NAME is a default
598 definition. There's little to be gained by trying to handle that. */
599 if (def_code == SSA_NAME
600 && !SSA_NAME_IS_DEFAULT_DEF (gimple_assign_rhs1 (stmt)))
601 return true;
603 /* If the RHS is a constant, then it's a terminal that we'll want
604 to handle as well. */
605 if (TREE_CODE_CLASS (def_code) == tcc_constant)
606 return true;
609 /* Anything not explicitly allowed is not handled. */
610 return false;
613 /* Given STMT which defines NAME in block VAR_BB, recurse through the
614 PHI's arguments searching for paths where NAME will ultimately have
615 a constant value.
617 VISITED_BBS tracks the blocks that have been encountered.
619 PATH contains the series of blocks to traverse that will result in
620 NAME having a constant value.
622 SEEN_LOOP_PHI tracks if we have recursed through a loop PHI node.
624 SPEED_P indicates if we are optimizing for speed over space. */
626 static void
627 handle_assignment (gimple *stmt, tree name, basic_block var_bb,
628 hash_set<basic_block> *visited_bbs,
629 vec<basic_block, va_gc> *&path,
630 bool seen_loop_phi, bool speed_p)
632 tree arg = gimple_assign_rhs1 (stmt);
634 if (TREE_CODE (arg) == SSA_NAME)
635 fsm_find_control_statement_thread_paths (arg, visited_bbs,
636 path, seen_loop_phi, speed_p);
638 else
640 /* profitable_jump_thread_path is going to push the current
641 block onto the path. But the path will always have the current
642 block at this point. So we can just pop it. */
643 path->pop ();
645 bool irreducible = false;
646 edge taken_edge = profitable_jump_thread_path (path, var_bb,
647 name, arg, speed_p,
648 &irreducible);
649 if (taken_edge)
651 convert_and_register_jump_thread_path (path, taken_edge);
652 path->pop ();
654 if (irreducible)
655 vect_free_loop_info_assumptions ((*path)[0]->loop_father);
658 /* And put the current block back onto the path so that the
659 state of the stack is unchanged when we leave. */
660 vec_safe_push (path, var_bb);
664 /* We trace the value of the SSA_NAME NAME back through any phi nodes looking
665 for places where it gets a constant value and save the path. Stop after
666 having recorded MAX_PATHS jump threading paths.
668 SPEED_P indicate that we could increase code size to improve the code path */
670 static void
671 fsm_find_control_statement_thread_paths (tree name,
672 hash_set<basic_block> *visited_bbs,
673 vec<basic_block, va_gc> *&path,
674 bool seen_loop_phi, bool speed_p)
676 /* If NAME appears in an abnormal PHI, then don't try to trace its
677 value back through PHI nodes. */
678 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
679 return;
681 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
682 basic_block var_bb = gimple_bb (def_stmt);
684 if (var_bb == NULL)
685 return;
687 /* We allow the SSA chain to contains PHIs and simple copies and constant
688 initializations. */
689 if (gimple_code (def_stmt) != GIMPLE_PHI
690 && gimple_code (def_stmt) != GIMPLE_ASSIGN)
691 return;
693 if (gimple_code (def_stmt) == GIMPLE_PHI
694 && (gimple_phi_num_args (def_stmt)
695 >= (unsigned) PARAM_VALUE (PARAM_FSM_MAXIMUM_PHI_ARGUMENTS)))
696 return;
698 if (is_gimple_assign (def_stmt)
699 && ! handle_assignment_p (def_stmt))
700 return;
702 /* Avoid infinite recursion. */
703 if (visited_bbs->add (var_bb))
704 return;
706 int next_path_length = 0;
707 basic_block last_bb_in_path = path->last ();
709 if (loop_containing_stmt (def_stmt)->header == gimple_bb (def_stmt))
711 /* Do not walk through more than one loop PHI node. */
712 if (seen_loop_phi)
713 return;
714 seen_loop_phi = true;
717 /* Following the chain of SSA_NAME definitions, we jumped from a definition in
718 LAST_BB_IN_PATH to a definition in VAR_BB. When these basic blocks are
719 different, append to PATH the blocks from LAST_BB_IN_PATH to VAR_BB. */
720 if (var_bb != last_bb_in_path)
722 /* When VAR_BB == LAST_BB_IN_PATH, then the first block in the path
723 will already be in VISITED_BBS. When they are not equal, then we
724 must ensure that first block is accounted for to ensure we do not
725 create bogus jump threading paths. */
726 visited_bbs->add ((*path)[0]);
727 if (!check_subpath_and_update_thread_path (last_bb_in_path, var_bb,
728 visited_bbs, path,
729 &next_path_length))
730 return;
733 gcc_assert (path->last () == var_bb);
735 if (gimple_code (def_stmt) == GIMPLE_PHI)
736 handle_phi (as_a <gphi *> (def_stmt), name, var_bb,
737 visited_bbs, path, seen_loop_phi, speed_p);
738 else if (gimple_code (def_stmt) == GIMPLE_ASSIGN)
739 handle_assignment (def_stmt, name, var_bb,
740 visited_bbs, path, seen_loop_phi, speed_p);
742 /* Remove all the nodes that we added from NEXT_PATH. */
743 if (next_path_length)
744 vec_safe_truncate (path, (path->length () - next_path_length));
747 /* Search backwards from BB looking for paths where NAME (an SSA_NAME)
748 is a constant. Record such paths for jump threading.
750 It is assumed that BB ends with a control statement and that by
751 finding a path where NAME is a constant, we can thread the path.
752 SPEED_P indicate that we could increase code size to improve the code path */
754 void
755 find_jump_threads_backwards (basic_block bb, bool speed_p)
757 gimple *stmt = get_gimple_control_stmt (bb);
758 if (!stmt)
759 return;
761 enum gimple_code code = gimple_code (stmt);
762 tree name = NULL;
763 if (code == GIMPLE_SWITCH)
764 name = gimple_switch_index (as_a <gswitch *> (stmt));
765 else if (code == GIMPLE_GOTO)
766 name = gimple_goto_dest (stmt);
767 else if (code == GIMPLE_COND)
769 if (TREE_CODE (gimple_cond_lhs (stmt)) == SSA_NAME
770 && TREE_CODE_CLASS (TREE_CODE (gimple_cond_rhs (stmt))) == tcc_constant
771 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))
772 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))))
773 name = gimple_cond_lhs (stmt);
776 if (!name || TREE_CODE (name) != SSA_NAME)
777 return;
779 vec<basic_block, va_gc> *bb_path;
780 vec_alloc (bb_path, 10);
781 vec_safe_push (bb_path, bb);
782 hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
784 max_threaded_paths = PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATHS);
785 fsm_find_control_statement_thread_paths (name, visited_bbs, bb_path, false,
786 speed_p);
788 delete visited_bbs;
789 vec_free (bb_path);
792 namespace {
794 const pass_data pass_data_thread_jumps =
796 GIMPLE_PASS,
797 "thread",
798 OPTGROUP_NONE,
799 TV_TREE_SSA_THREAD_JUMPS,
800 ( PROP_cfg | PROP_ssa ),
804 TODO_update_ssa,
807 class pass_thread_jumps : public gimple_opt_pass
809 public:
810 pass_thread_jumps (gcc::context *ctxt)
811 : gimple_opt_pass (pass_data_thread_jumps, ctxt)
814 opt_pass * clone (void) { return new pass_thread_jumps (m_ctxt); }
815 virtual bool gate (function *);
816 virtual unsigned int execute (function *);
819 bool
820 pass_thread_jumps::gate (function *fun ATTRIBUTE_UNUSED)
822 return flag_expensive_optimizations;
826 unsigned int
827 pass_thread_jumps::execute (function *fun)
829 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
831 /* Try to thread each block with more than one successor. */
832 basic_block bb;
833 FOR_EACH_BB_FN (bb, fun)
835 if (EDGE_COUNT (bb->succs) > 1)
836 find_jump_threads_backwards (bb, true);
838 bool changed = thread_through_all_blocks (true);
840 loop_optimizer_finalize ();
841 return changed ? TODO_cleanup_cfg : 0;
846 gimple_opt_pass *
847 make_pass_thread_jumps (gcc::context *ctxt)
849 return new pass_thread_jumps (ctxt);
852 namespace {
854 const pass_data pass_data_early_thread_jumps =
856 GIMPLE_PASS,
857 "ethread",
858 OPTGROUP_NONE,
859 TV_TREE_SSA_THREAD_JUMPS,
860 ( PROP_cfg | PROP_ssa ),
864 ( TODO_cleanup_cfg | TODO_update_ssa ),
867 class pass_early_thread_jumps : public gimple_opt_pass
869 public:
870 pass_early_thread_jumps (gcc::context *ctxt)
871 : gimple_opt_pass (pass_data_early_thread_jumps, ctxt)
874 opt_pass * clone (void) { return new pass_early_thread_jumps (m_ctxt); }
875 virtual bool gate (function *);
876 virtual unsigned int execute (function *);
879 bool
880 pass_early_thread_jumps::gate (function *fun ATTRIBUTE_UNUSED)
882 return true;
886 unsigned int
887 pass_early_thread_jumps::execute (function *fun)
889 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
891 /* Try to thread each block with more than one successor. */
892 basic_block bb;
893 FOR_EACH_BB_FN (bb, fun)
895 if (EDGE_COUNT (bb->succs) > 1)
896 find_jump_threads_backwards (bb, false);
898 thread_through_all_blocks (true);
900 loop_optimizer_finalize ();
901 return 0;
906 gimple_opt_pass *
907 make_pass_early_thread_jumps (gcc::context *ctxt)
909 return new pass_early_thread_jumps (ctxt);