2 Copyright (C) 2005-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "fold-const.h"
29 #include "gimple-iterator.h"
31 #include "tree-ssa-threadupdate.h"
33 #include "tree-ssa-loop.h"
35 #include "tree-pass.h"
36 #include "gimple-ssa.h"
37 #include "tree-phinodes.h"
38 #include "tree-inline.h"
39 #include "tree-vectorizer.h"
41 static int max_threaded_paths
;
43 /* Simple helper to get the last statement from BB, which is assumed
44 to be a control statement. Return NULL if the last statement is
45 not a control statement. */
48 get_gimple_control_stmt (basic_block bb
)
50 gimple_stmt_iterator gsi
= gsi_last_nondebug_bb (bb
);
55 gimple
*stmt
= gsi_stmt (gsi
);
56 enum gimple_code code
= gimple_code (stmt
);
57 if (code
== GIMPLE_COND
|| code
== GIMPLE_SWITCH
|| code
== GIMPLE_GOTO
)
62 /* Return true if the CFG contains at least one path from START_BB to END_BB.
63 When a path is found, record in PATH the blocks from END_BB to START_BB.
64 VISITED_BBS is used to make sure we don't fall into an infinite loop. Bound
65 the recursion to basic blocks belonging to LOOP. */
68 fsm_find_thread_path (basic_block start_bb
, basic_block end_bb
,
69 vec
<basic_block
, va_gc
> *&path
,
70 hash_set
<basic_block
> *visited_bbs
, loop_p loop
)
72 if (loop
!= start_bb
->loop_father
)
75 if (start_bb
== end_bb
)
77 vec_safe_push (path
, start_bb
);
81 if (!visited_bbs
->add (start_bb
))
85 FOR_EACH_EDGE (e
, ei
, start_bb
->succs
)
86 if (fsm_find_thread_path (e
->dest
, end_bb
, path
, visited_bbs
, loop
))
88 vec_safe_push (path
, start_bb
);
96 /* Examine jump threading path PATH to which we want to add BBI.
98 If the resulting path is profitable to thread, then return the
99 final taken edge from the path, NULL otherwise.
101 NAME is the SSA_NAME of the variable we found to have a constant
102 value on PATH. ARG is the value of that SSA_NAME.
104 BBI will be appended to PATH when we have a profitable jump threading
105 path. Callers are responsible for removing BBI from PATH in that case.
107 SPEED_P indicate that we could increase code size to improve the code path */
110 profitable_jump_thread_path (vec
<basic_block
, va_gc
> *&path
,
111 basic_block bbi
, tree name
, tree arg
, bool speed_p
,
112 bool *creates_irreducible_loop
)
114 /* Note BBI is not in the path yet, hence the +1 in the test below
115 to make sure BBI is accounted for in the path length test. */
116 int path_length
= path
->length ();
118 /* We can get a length of 0 here when the statement that
119 makes a conditional generate a compile-time constant
120 result is in the same block as the conditional.
122 That's not really a jump threading opportunity, but instead is
123 simple cprop & simplification. We could handle it here if we
124 wanted by wiring up all the incoming edges. If we run this
125 early in IPA, that might be worth doing. For now we just
127 if (path_length
== 0)
130 if (path_length
+ 1 > PARAM_VALUE (PARAM_MAX_FSM_THREAD_LENGTH
))
132 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
133 fprintf (dump_file
, "FSM jump-thread path not considered: "
134 "the number of basic blocks on the path "
135 "exceeds PARAM_MAX_FSM_THREAD_LENGTH.\n");
139 if (max_threaded_paths
<= 0)
141 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
142 fprintf (dump_file
, "FSM jump-thread path not considered: "
143 "the number of previously recorded FSM paths to "
144 "thread exceeds PARAM_MAX_FSM_THREAD_PATHS.\n");
148 /* Add BBI to the path.
149 From this point onward, if we decide we the path is not profitable
150 to thread, we must remove BBI from the path. */
151 vec_safe_push (path
, bbi
);
155 gimple_stmt_iterator gsi
;
157 loop_p loop
= (*path
)[0]->loop_father
;
158 bool path_crosses_loops
= false;
159 bool threaded_through_latch
= false;
160 bool multiway_branch_in_path
= false;
161 bool threaded_multiway_branch
= false;
163 /* Count the number of instructions on the path: as these instructions
164 will have to be duplicated, we will not record the path if there
165 are too many instructions on the path. Also check that all the
166 blocks in the path belong to a single loop. */
167 for (j
= 0; j
< path_length
; j
++)
169 basic_block bb
= (*path
)[j
];
171 /* Remember, blocks in the path are stored in opposite order
172 in the PATH array. The last entry in the array represents
173 the block with an outgoing edge that we will redirect to the
174 jump threading path. Thus we don't care about that block's
175 loop father, nor how many statements are in that block because
176 it will not be copied or whether or not it ends in a multiway
178 if (j
< path_length
- 1)
180 if (bb
->loop_father
!= loop
)
182 path_crosses_loops
= true;
186 /* PHIs in the path will create degenerate PHIS in the
187 copied path which will then get propagated away, so
188 looking at just the duplicate path the PHIs would
191 But those PHIs, because they're assignments to objects
192 typically with lives that exist outside the thread path,
193 will tend to generate PHIs (or at least new PHI arguments)
194 at points where we leave the thread path and rejoin
195 the original blocks. So we do want to account for them.
197 We ignore virtual PHIs. We also ignore cases where BB
198 has a single incoming edge. That's the most common
199 degenerate PHI we'll see here. Finally we ignore PHIs
200 that are associated with the value we're tracking as
201 that object likely dies. */
202 if (EDGE_COUNT (bb
->succs
) > 1 && EDGE_COUNT (bb
->preds
) > 1)
204 for (gphi_iterator gsip
= gsi_start_phis (bb
);
208 gphi
*phi
= gsip
.phi ();
209 tree dst
= gimple_phi_result (phi
);
211 /* Note that if both NAME and DST are anonymous
212 SSA_NAMEs, then we do not have enough information
213 to consider them associated. */
215 && (SSA_NAME_VAR (dst
) != SSA_NAME_VAR (name
)
216 || !SSA_NAME_VAR (dst
))
217 && !virtual_operand_p (dst
))
222 for (gsi
= gsi_after_labels (bb
);
224 gsi_next_nondebug (&gsi
))
226 gimple
*stmt
= gsi_stmt (gsi
);
227 /* Do not count empty statements and labels. */
228 if (gimple_code (stmt
) != GIMPLE_NOP
229 && !(gimple_code (stmt
) == GIMPLE_ASSIGN
230 && gimple_assign_rhs_code (stmt
) == ASSERT_EXPR
)
231 && !is_gimple_debug (stmt
))
232 n_insns
+= estimate_num_insns (stmt
, &eni_size_weights
);
235 /* We do not look at the block with the threaded branch
236 in this loop. So if any block with a last statement that
237 is a GIMPLE_SWITCH or GIMPLE_GOTO is seen, then we have a
238 multiway branch on our path.
240 The block in PATH[0] is special, it's the block were we're
241 going to be able to eliminate its branch. */
242 gimple
*last
= last_stmt (bb
);
243 if (last
&& (gimple_code (last
) == GIMPLE_SWITCH
244 || gimple_code (last
) == GIMPLE_GOTO
))
247 threaded_multiway_branch
= true;
249 multiway_branch_in_path
= true;
253 /* Note if we thread through the latch, we will want to include
254 the last entry in the array when determining if we thread
255 through the loop latch. */
256 if (loop
->latch
== bb
)
257 threaded_through_latch
= true;
260 gimple
*stmt
= get_gimple_control_stmt ((*path
)[0]);
263 /* We are going to remove the control statement at the end of the
264 last block in the threading path. So don't count it against our
267 n_insns
-= estimate_num_insns (stmt
, &eni_size_weights
);
269 /* We have found a constant value for ARG. For GIMPLE_SWITCH
270 and GIMPLE_GOTO, we use it as-is. However, for a GIMPLE_COND
271 we need to substitute, fold and simplify so we can determine
272 the edge taken out of the last block. */
273 if (gimple_code (stmt
) == GIMPLE_COND
)
275 enum tree_code cond_code
= gimple_cond_code (stmt
);
277 /* We know the underyling format of the condition. */
278 arg
= fold_binary (cond_code
, boolean_type_node
,
279 arg
, gimple_cond_rhs (stmt
));
282 /* If this path threaded through the loop latch back into the
283 same loop and the destination does not dominate the loop
284 latch, then this thread would create an irreducible loop.
286 We have to know the outgoing edge to figure this out. */
287 edge taken_edge
= find_taken_edge ((*path
)[0], arg
);
289 /* There are cases where we may not be able to extract the
290 taken edge. For example, a computed goto to an absolute
291 address. Handle those cases gracefully. */
292 if (taken_edge
== NULL
)
298 *creates_irreducible_loop
= false;
299 if (threaded_through_latch
300 && loop
== taken_edge
->dest
->loop_father
301 && (determine_bb_domination_status (loop
, taken_edge
->dest
)
302 == DOMST_NONDOMINATING
))
303 *creates_irreducible_loop
= true;
305 if (path_crosses_loops
)
307 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
308 fprintf (dump_file
, "FSM jump-thread path not considered: "
309 "the path crosses loops.\n");
314 if (speed_p
&& optimize_edge_for_speed_p (taken_edge
))
316 if (n_insns
>= PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATH_INSNS
))
318 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
319 fprintf (dump_file
, "FSM jump-thread path not considered: "
320 "the number of instructions on the path "
321 "exceeds PARAM_MAX_FSM_THREAD_PATH_INSNS.\n");
326 else if (n_insns
> 1)
328 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
329 fprintf (dump_file
, "FSM jump-thread path not considered: "
330 "duplication of %i insns is needed and optimizing for size.\n",
336 /* We avoid creating irreducible inner loops unless we thread through
337 a multiway branch, in which case we have deemed it worth losing
338 other loop optimizations later.
340 We also consider it worth creating an irreducible inner loop if
341 the number of copied statement is low relative to the length of
342 the path -- in that case there's little the traditional loop
343 optimizer would have done anyway, so an irreducible loop is not
345 if (!threaded_multiway_branch
&& *creates_irreducible_loop
346 && (n_insns
* PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS
)
347 > path_length
* PARAM_VALUE (PARAM_FSM_SCALE_PATH_BLOCKS
)))
350 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
352 "FSM would create irreducible loop without threading "
353 "multiway branch.\n");
359 /* If this path does not thread through the loop latch, then we are
360 using the FSM threader to find old style jump threads. This
361 is good, except the FSM threader does not re-use an existing
362 threading path to reduce code duplication.
364 So for that case, drastically reduce the number of statements
365 we are allowed to copy. */
366 if (!(threaded_through_latch
&& threaded_multiway_branch
)
367 && (n_insns
* PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS
)
368 >= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS
)))
370 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
372 "FSM did not thread around loop and would copy too "
373 "many statements.\n");
378 /* When there is a multi-way branch on the path, then threading can
379 explode the CFG due to duplicating the edges for that multi-way
380 branch. So like above, only allow a multi-way branch on the path
381 if we actually thread a multi-way branch. */
382 if (!threaded_multiway_branch
&& multiway_branch_in_path
)
384 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
386 "FSM Thread through multiway branch without threading "
387 "a multiway branch.\n");
394 /* PATH is vector of blocks forming a jump threading path in reverse
395 order. TAKEN_EDGE is the edge taken from path[0].
397 Convert that path into the form used by register_jump_thread and
398 register the path. */
401 convert_and_register_jump_thread_path (vec
<basic_block
, va_gc
> *path
,
404 vec
<jump_thread_edge
*> *jump_thread_path
= new vec
<jump_thread_edge
*> ();
406 /* Record the edges between the blocks in PATH. */
407 for (unsigned int j
= 0; j
< path
->length () - 1; j
++)
409 basic_block bb1
= (*path
)[path
->length () - j
- 1];
410 basic_block bb2
= (*path
)[path
->length () - j
- 2];
412 edge e
= find_edge (bb1
, bb2
);
414 jump_thread_edge
*x
= new jump_thread_edge (e
, EDGE_FSM_THREAD
);
415 jump_thread_path
->safe_push (x
);
418 /* Add the edge taken when the control variable has value ARG. */
420 = new jump_thread_edge (taken_edge
, EDGE_NO_COPY_SRC_BLOCK
);
421 jump_thread_path
->safe_push (x
);
423 register_jump_thread (jump_thread_path
);
424 --max_threaded_paths
;
427 /* While following a chain of SSA_NAME definitions, we jumped from a definition
428 in LAST_BB to a definition in VAR_BB (walking backwards).
430 Verify there is a single path between the blocks and none of the blocks
431 in the path is already in VISITED_BBS. If so, then update VISISTED_BBS,
432 add the new blocks to PATH and return TRUE. Otherwise return FALSE.
434 Store the length of the subpath in NEXT_PATH_LENGTH. */
437 check_subpath_and_update_thread_path (basic_block last_bb
, basic_block new_bb
,
438 hash_set
<basic_block
> *visited_bbs
,
439 vec
<basic_block
, va_gc
> *&path
,
440 int *next_path_length
)
445 vec
<basic_block
, va_gc
> *next_path
;
446 vec_alloc (next_path
, 10);
448 FOR_EACH_EDGE (e
, ei
, last_bb
->preds
)
450 hash_set
<basic_block
> *visited_bbs
= new hash_set
<basic_block
>;
452 if (fsm_find_thread_path (new_bb
, e
->src
, next_path
, visited_bbs
,
453 e
->src
->loop_father
))
458 /* If there is more than one path, stop. */
461 vec_free (next_path
);
466 /* Stop if we have not found a path: this could occur when the recursion
467 is stopped by one of the bounds. */
470 vec_free (next_path
);
474 /* Make sure we haven't already visited any of the nodes in
475 NEXT_PATH. Don't add them here to avoid pollution. */
476 for (unsigned int i
= 0; i
< next_path
->length () - 1; i
++)
478 if (visited_bbs
->contains ((*next_path
)[i
]))
480 vec_free (next_path
);
485 /* Now add the nodes to VISISTED_BBS. */
486 for (unsigned int i
= 0; i
< next_path
->length () - 1; i
++)
487 visited_bbs
->add ((*next_path
)[i
]);
489 /* Append all the nodes from NEXT_PATH to PATH. */
490 vec_safe_splice (path
, next_path
);
491 *next_path_length
= next_path
->length ();
492 vec_free (next_path
);
497 static void fsm_find_control_statement_thread_paths (tree
,
498 hash_set
<basic_block
> *,
499 vec
<basic_block
, va_gc
> *&,
502 /* Given PHI which defines NAME in block VAR_BB, recurse through the
503 PHI's arguments searching for paths where NAME will ultimately have
506 VISITED_BBS tracks the blocks that have been encountered.
508 PATH contains the series of blocks to traverse that will result in
509 NAME having a constant value.
511 SEEN_LOOP_PHI tracks if we have recursed through a loop PHI node.
513 SPEED_P indicates if we are optimizing for speed over space. */
516 handle_phi (gphi
*phi
, tree name
, basic_block var_bb
,
517 hash_set
<basic_block
> *visited_bbs
,
518 vec
<basic_block
, va_gc
> *&path
,
519 bool seen_loop_phi
, bool speed_p
)
521 /* Iterate over the arguments of PHI. */
522 for (unsigned int i
= 0; i
< gimple_phi_num_args (phi
); i
++)
524 tree arg
= gimple_phi_arg_def (phi
, i
);
525 basic_block bbi
= gimple_phi_arg_edge (phi
, i
)->src
;
527 /* Skip edges pointing outside the current loop. */
528 if (!arg
|| var_bb
->loop_father
!= bbi
->loop_father
)
531 if (TREE_CODE (arg
) == SSA_NAME
)
533 vec_safe_push (path
, bbi
);
534 /* Recursively follow SSA_NAMEs looking for a constant
536 fsm_find_control_statement_thread_paths (arg
, visited_bbs
, path
,
537 seen_loop_phi
, speed_p
);
543 if (TREE_CODE_CLASS (TREE_CODE (arg
)) != tcc_constant
)
546 /* If this is a profitable jump thread path, then convert it
547 into the canonical form and register it. */
548 bool irreducible
= false;
549 edge taken_edge
= profitable_jump_thread_path (path
, bbi
, name
, arg
,
550 speed_p
, &irreducible
);
553 convert_and_register_jump_thread_path (path
, taken_edge
);
557 vect_free_loop_info_assumptions ((*path
)[0]->loop_father
);
562 /* Return TRUE if STMT is a gimple assignment we want to either directly
563 handle or recurse through. Return FALSE otherwise.
565 Note that adding more cases here requires adding cases to handle_assignment
569 handle_assignment_p (gimple
*stmt
)
571 if (is_gimple_assign (stmt
))
573 enum tree_code def_code
= gimple_assign_rhs_code (stmt
);
575 /* If the RHS is an SSA_NAME, then we will recurse through it.
576 Go ahead and filter out cases where the SSA_NAME is a default
577 definition. There's little to be gained by trying to handle that. */
578 if (def_code
== SSA_NAME
579 && !SSA_NAME_IS_DEFAULT_DEF (gimple_assign_rhs1 (stmt
)))
582 /* If the RHS is a constant, then it's a terminal that we'll want
583 to handle as well. */
584 if (TREE_CODE_CLASS (def_code
) == tcc_constant
)
588 /* Anything not explicitly allowed is not handled. */
592 /* Given STMT which defines NAME in block VAR_BB, recurse through the
593 PHI's arguments searching for paths where NAME will ultimately have
596 VISITED_BBS tracks the blocks that have been encountered.
598 PATH contains the series of blocks to traverse that will result in
599 NAME having a constant value.
601 SEEN_LOOP_PHI tracks if we have recursed through a loop PHI node.
603 SPEED_P indicates if we are optimizing for speed over space. */
606 handle_assignment (gimple
*stmt
, tree name
, basic_block var_bb
,
607 hash_set
<basic_block
> *visited_bbs
,
608 vec
<basic_block
, va_gc
> *&path
,
609 bool seen_loop_phi
, bool speed_p
)
611 tree arg
= gimple_assign_rhs1 (stmt
);
613 if (TREE_CODE (arg
) == SSA_NAME
)
614 fsm_find_control_statement_thread_paths (arg
, visited_bbs
,
615 path
, seen_loop_phi
, speed_p
);
619 /* profitable_jump_thread_path is going to push the current
620 block onto the path. But the path will always have the current
621 block at this point. So we can just pop it. */
624 bool irreducible
= false;
625 edge taken_edge
= profitable_jump_thread_path (path
, var_bb
,
630 convert_and_register_jump_thread_path (path
, taken_edge
);
634 vect_free_loop_info_assumptions ((*path
)[0]->loop_father
);
637 /* And put the current block back onto the path so that the
638 state of the stack is unchanged when we leave. */
639 vec_safe_push (path
, var_bb
);
643 /* We trace the value of the SSA_NAME NAME back through any phi nodes looking
644 for places where it gets a constant value and save the path. Stop after
645 having recorded MAX_PATHS jump threading paths.
647 SPEED_P indicate that we could increase code size to improve the code path */
650 fsm_find_control_statement_thread_paths (tree name
,
651 hash_set
<basic_block
> *visited_bbs
,
652 vec
<basic_block
, va_gc
> *&path
,
653 bool seen_loop_phi
, bool speed_p
)
655 /* If NAME appears in an abnormal PHI, then don't try to trace its
656 value back through PHI nodes. */
657 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
660 gimple
*def_stmt
= SSA_NAME_DEF_STMT (name
);
661 basic_block var_bb
= gimple_bb (def_stmt
);
666 /* We allow the SSA chain to contains PHIs and simple copies and constant
668 if (gimple_code (def_stmt
) != GIMPLE_PHI
669 && gimple_code (def_stmt
) != GIMPLE_ASSIGN
)
672 if (gimple_code (def_stmt
) == GIMPLE_PHI
673 && (gimple_phi_num_args (def_stmt
)
674 >= (unsigned) PARAM_VALUE (PARAM_FSM_MAXIMUM_PHI_ARGUMENTS
)))
677 if (is_gimple_assign (def_stmt
)
678 && ! handle_assignment_p (def_stmt
))
681 /* Avoid infinite recursion. */
682 if (visited_bbs
->add (var_bb
))
685 int next_path_length
= 0;
686 basic_block last_bb_in_path
= path
->last ();
688 if (loop_containing_stmt (def_stmt
)->header
== gimple_bb (def_stmt
))
690 /* Do not walk through more than one loop PHI node. */
693 seen_loop_phi
= true;
696 /* Following the chain of SSA_NAME definitions, we jumped from a definition in
697 LAST_BB_IN_PATH to a definition in VAR_BB. When these basic blocks are
698 different, append to PATH the blocks from LAST_BB_IN_PATH to VAR_BB. */
699 if (var_bb
!= last_bb_in_path
)
701 /* When VAR_BB == LAST_BB_IN_PATH, then the first block in the path
702 will already be in VISITED_BBS. When they are not equal, then we
703 must ensure that first block is accounted for to ensure we do not
704 create bogus jump threading paths. */
705 visited_bbs
->add ((*path
)[0]);
706 if (!check_subpath_and_update_thread_path (last_bb_in_path
, var_bb
,
712 gcc_assert (path
->last () == var_bb
);
714 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
715 handle_phi (as_a
<gphi
*> (def_stmt
), name
, var_bb
,
716 visited_bbs
, path
, seen_loop_phi
, speed_p
);
717 else if (gimple_code (def_stmt
) == GIMPLE_ASSIGN
)
718 handle_assignment (def_stmt
, name
, var_bb
,
719 visited_bbs
, path
, seen_loop_phi
, speed_p
);
721 /* Remove all the nodes that we added from NEXT_PATH. */
722 if (next_path_length
)
723 vec_safe_truncate (path
, (path
->length () - next_path_length
));
726 /* Search backwards from BB looking for paths where NAME (an SSA_NAME)
727 is a constant. Record such paths for jump threading.
729 It is assumed that BB ends with a control statement and that by
730 finding a path where NAME is a constant, we can thread the path.
731 SPEED_P indicate that we could increase code size to improve the code path */
734 find_jump_threads_backwards (basic_block bb
, bool speed_p
)
736 gimple
*stmt
= get_gimple_control_stmt (bb
);
740 enum gimple_code code
= gimple_code (stmt
);
742 if (code
== GIMPLE_SWITCH
)
743 name
= gimple_switch_index (as_a
<gswitch
*> (stmt
));
744 else if (code
== GIMPLE_GOTO
)
745 name
= gimple_goto_dest (stmt
);
746 else if (code
== GIMPLE_COND
)
748 if (TREE_CODE (gimple_cond_lhs (stmt
)) == SSA_NAME
749 && TREE_CODE_CLASS (TREE_CODE (gimple_cond_rhs (stmt
))) == tcc_constant
750 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt
)))
751 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt
)))))
752 name
= gimple_cond_lhs (stmt
);
755 if (!name
|| TREE_CODE (name
) != SSA_NAME
)
758 vec
<basic_block
, va_gc
> *bb_path
;
759 vec_alloc (bb_path
, 10);
760 vec_safe_push (bb_path
, bb
);
761 hash_set
<basic_block
> *visited_bbs
= new hash_set
<basic_block
>;
763 max_threaded_paths
= PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATHS
);
764 fsm_find_control_statement_thread_paths (name
, visited_bbs
, bb_path
, false,
773 const pass_data pass_data_thread_jumps
=
778 TV_TREE_SSA_THREAD_JUMPS
,
779 ( PROP_cfg
| PROP_ssa
),
786 class pass_thread_jumps
: public gimple_opt_pass
789 pass_thread_jumps (gcc::context
*ctxt
)
790 : gimple_opt_pass (pass_data_thread_jumps
, ctxt
)
793 opt_pass
* clone (void) { return new pass_thread_jumps (m_ctxt
); }
794 virtual bool gate (function
*);
795 virtual unsigned int execute (function
*);
799 pass_thread_jumps::gate (function
*fun ATTRIBUTE_UNUSED
)
801 return flag_expensive_optimizations
;
806 pass_thread_jumps::execute (function
*fun
)
808 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
| LOOPS_HAVE_SIMPLE_LATCHES
);
810 /* Try to thread each block with more than one successor. */
812 FOR_EACH_BB_FN (bb
, fun
)
814 if (EDGE_COUNT (bb
->succs
) > 1)
815 find_jump_threads_backwards (bb
, true);
817 bool changed
= thread_through_all_blocks (true);
819 loop_optimizer_finalize ();
820 return changed
? TODO_cleanup_cfg
: 0;
826 make_pass_thread_jumps (gcc::context
*ctxt
)
828 return new pass_thread_jumps (ctxt
);
833 const pass_data pass_data_early_thread_jumps
=
838 TV_TREE_SSA_THREAD_JUMPS
,
839 ( PROP_cfg
| PROP_ssa
),
843 ( TODO_cleanup_cfg
| TODO_update_ssa
),
846 class pass_early_thread_jumps
: public gimple_opt_pass
849 pass_early_thread_jumps (gcc::context
*ctxt
)
850 : gimple_opt_pass (pass_data_early_thread_jumps
, ctxt
)
853 opt_pass
* clone (void) { return new pass_early_thread_jumps (m_ctxt
); }
854 virtual bool gate (function
*);
855 virtual unsigned int execute (function
*);
859 pass_early_thread_jumps::gate (function
*fun ATTRIBUTE_UNUSED
)
866 pass_early_thread_jumps::execute (function
*fun
)
868 /* Try to thread each block with more than one successor. */
870 FOR_EACH_BB_FN (bb
, fun
)
872 if (EDGE_COUNT (bb
->succs
) > 1)
873 find_jump_threads_backwards (bb
, false);
875 thread_through_all_blocks (true);
882 make_pass_early_thread_jumps (gcc::context
*ctxt
)
884 return new pass_early_thread_jumps (ctxt
);