S/390: Add static OSC breaker if necessary.
[official-gcc.git] / gcc / tree-ssa-threadbackward.c
blobfd7d855449d7b85f3e2d834d615e7dab23a5674c
1 /* SSA Jump Threading
2 Copyright (C) 2005-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "predict.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "fold-const.h"
28 #include "cfgloop.h"
29 #include "gimple-iterator.h"
30 #include "tree-cfg.h"
31 #include "tree-ssa-threadupdate.h"
32 #include "params.h"
33 #include "tree-ssa-loop.h"
34 #include "cfganal.h"
35 #include "tree-pass.h"
36 #include "gimple-ssa.h"
37 #include "tree-phinodes.h"
38 #include "tree-inline.h"
39 #include "tree-vectorizer.h"
41 static int max_threaded_paths;
43 /* Simple helper to get the last statement from BB, which is assumed
44 to be a control statement. Return NULL if the last statement is
45 not a control statement. */
47 static gimple *
48 get_gimple_control_stmt (basic_block bb)
50 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
52 if (gsi_end_p (gsi))
53 return NULL;
55 gimple *stmt = gsi_stmt (gsi);
56 enum gimple_code code = gimple_code (stmt);
57 if (code == GIMPLE_COND || code == GIMPLE_SWITCH || code == GIMPLE_GOTO)
58 return stmt;
59 return NULL;
62 /* Return true if the CFG contains at least one path from START_BB to END_BB.
63 When a path is found, record in PATH the blocks from END_BB to START_BB.
64 VISITED_BBS is used to make sure we don't fall into an infinite loop. Bound
65 the recursion to basic blocks belonging to LOOP.
66 SPEED_P indicate that we could increase code size to improve the code path */
68 static bool
69 fsm_find_thread_path (basic_block start_bb, basic_block end_bb,
70 vec<basic_block, va_gc> *&path,
71 hash_set<basic_block> *visited_bbs, loop_p loop,
72 bool speed_p)
74 if (loop != start_bb->loop_father)
75 return false;
77 if (start_bb == end_bb)
79 vec_safe_push (path, start_bb);
80 return true;
83 if (!visited_bbs->add (start_bb))
85 edge e;
86 edge_iterator ei;
87 FOR_EACH_EDGE (e, ei, start_bb->succs)
88 if (fsm_find_thread_path (e->dest, end_bb, path, visited_bbs, loop,
89 speed_p))
91 vec_safe_push (path, start_bb);
92 return true;
96 return false;
99 /* Examine jump threading path PATH to which we want to add BBI.
101 If the resulting path is profitable to thread, then return the
102 final taken edge from the path, NULL otherwise.
104 NAME is the SSA_NAME of the variable we found to have a constant
105 value on PATH. ARG is the value of that SSA_NAME.
107 BBI will be appended to PATH when we have a profitable jump threading
108 path. Callers are responsible for removing BBI from PATH in that case.
110 SPEED_P indicate that we could increase code size to improve the code path */
112 static edge
113 profitable_jump_thread_path (vec<basic_block, va_gc> *&path,
114 basic_block bbi, tree name, tree arg, bool speed_p,
115 bool *creates_irreducible_loop)
117 /* Note BBI is not in the path yet, hence the +1 in the test below
118 to make sure BBI is accounted for in the path length test. */
119 int path_length = path->length ();
121 /* We can get a length of 0 here when the statement that
122 makes a conditional generate a compile-time constant
123 result is in the same block as the conditional.
125 That's not really a jump threading opportunity, but instead is
126 simple cprop & simplification. We could handle it here if we
127 wanted by wiring up all the incoming edges. If we run this
128 early in IPA, that might be worth doing. For now we just
129 reject that case. */
130 if (path_length == 0)
131 return NULL;
133 if (path_length + 1 > PARAM_VALUE (PARAM_MAX_FSM_THREAD_LENGTH))
135 if (dump_file && (dump_flags & TDF_DETAILS))
136 fprintf (dump_file, "FSM jump-thread path not considered: "
137 "the number of basic blocks on the path "
138 "exceeds PARAM_MAX_FSM_THREAD_LENGTH.\n");
139 return NULL;
142 if (max_threaded_paths <= 0)
144 if (dump_file && (dump_flags & TDF_DETAILS))
145 fprintf (dump_file, "FSM jump-thread path not considered: "
146 "the number of previously recorded FSM paths to "
147 "thread exceeds PARAM_MAX_FSM_THREAD_PATHS.\n");
148 return NULL;
151 /* Add BBI to the path.
152 From this point onward, if we decide we the path is not profitable
153 to thread, we must remove BBI from the path. */
154 vec_safe_push (path, bbi);
155 ++path_length;
157 int n_insns = 0;
158 gimple_stmt_iterator gsi;
159 int j;
160 loop_p loop = (*path)[0]->loop_father;
161 bool path_crosses_loops = false;
162 bool threaded_through_latch = false;
163 bool multiway_branch_in_path = false;
164 bool threaded_multiway_branch = false;
166 /* Count the number of instructions on the path: as these instructions
167 will have to be duplicated, we will not record the path if there
168 are too many instructions on the path. Also check that all the
169 blocks in the path belong to a single loop. */
170 for (j = 0; j < path_length; j++)
172 basic_block bb = (*path)[j];
174 /* Remember, blocks in the path are stored in opposite order
175 in the PATH array. The last entry in the array represents
176 the block with an outgoing edge that we will redirect to the
177 jump threading path. Thus we don't care about that block's
178 loop father, nor how many statements are in that block because
179 it will not be copied or whether or not it ends in a multiway
180 branch. */
181 if (j < path_length - 1)
183 if (bb->loop_father != loop)
185 path_crosses_loops = true;
186 break;
189 /* PHIs in the path will create degenerate PHIS in the
190 copied path which will then get propagated away, so
191 looking at just the duplicate path the PHIs would
192 seem unimportant.
194 But those PHIs, because they're assignments to objects
195 typically with lives that exist outside the thread path,
196 will tend to generate PHIs (or at least new PHI arguments)
197 at points where we leave the thread path and rejoin
198 the original blocks. So we do want to account for them.
200 We ignore virtual PHIs. We also ignore cases where BB
201 has a single incoming edge. That's the most common
202 degenerate PHI we'll see here. Finally we ignore PHIs
203 that are associated with the value we're tracking as
204 that object likely dies. */
205 if (EDGE_COUNT (bb->succs) > 1 && EDGE_COUNT (bb->preds) > 1)
207 for (gphi_iterator gsip = gsi_start_phis (bb);
208 !gsi_end_p (gsip);
209 gsi_next (&gsip))
211 gphi *phi = gsip.phi ();
212 tree dst = gimple_phi_result (phi);
214 /* Note that if both NAME and DST are anonymous
215 SSA_NAMEs, then we do not have enough information
216 to consider them associated. */
217 if (dst != name
218 && (SSA_NAME_VAR (dst) != SSA_NAME_VAR (name)
219 || !SSA_NAME_VAR (dst))
220 && !virtual_operand_p (dst))
221 ++n_insns;
225 for (gsi = gsi_after_labels (bb);
226 !gsi_end_p (gsi);
227 gsi_next_nondebug (&gsi))
229 gimple *stmt = gsi_stmt (gsi);
230 /* Do not count empty statements and labels. */
231 if (gimple_code (stmt) != GIMPLE_NOP
232 && !(gimple_code (stmt) == GIMPLE_ASSIGN
233 && gimple_assign_rhs_code (stmt) == ASSERT_EXPR)
234 && !is_gimple_debug (stmt))
235 n_insns += estimate_num_insns (stmt, &eni_size_weights);
238 /* We do not look at the block with the threaded branch
239 in this loop. So if any block with a last statement that
240 is a GIMPLE_SWITCH or GIMPLE_GOTO is seen, then we have a
241 multiway branch on our path.
243 The block in PATH[0] is special, it's the block were we're
244 going to be able to eliminate its branch. */
245 gimple *last = last_stmt (bb);
246 if (last && (gimple_code (last) == GIMPLE_SWITCH
247 || gimple_code (last) == GIMPLE_GOTO))
249 if (j == 0)
250 threaded_multiway_branch = true;
251 else
252 multiway_branch_in_path = true;
256 /* Note if we thread through the latch, we will want to include
257 the last entry in the array when determining if we thread
258 through the loop latch. */
259 if (loop->latch == bb)
260 threaded_through_latch = true;
263 gimple *stmt = get_gimple_control_stmt ((*path)[0]);
264 gcc_assert (stmt);
266 /* We are going to remove the control statement at the end of the
267 last block in the threading path. So don't count it against our
268 statement count. */
270 n_insns-= estimate_num_insns (stmt, &eni_size_weights);
272 /* We have found a constant value for ARG. For GIMPLE_SWITCH
273 and GIMPLE_GOTO, we use it as-is. However, for a GIMPLE_COND
274 we need to substitute, fold and simplify so we can determine
275 the edge taken out of the last block. */
276 if (gimple_code (stmt) == GIMPLE_COND)
278 enum tree_code cond_code = gimple_cond_code (stmt);
280 /* We know the underyling format of the condition. */
281 arg = fold_binary (cond_code, boolean_type_node,
282 arg, gimple_cond_rhs (stmt));
285 /* If this path threaded through the loop latch back into the
286 same loop and the destination does not dominate the loop
287 latch, then this thread would create an irreducible loop.
289 We have to know the outgoing edge to figure this out. */
290 edge taken_edge = find_taken_edge ((*path)[0], arg);
292 /* There are cases where we may not be able to extract the
293 taken edge. For example, a computed goto to an absolute
294 address. Handle those cases gracefully. */
295 if (taken_edge == NULL)
297 path->pop ();
298 return NULL;
301 *creates_irreducible_loop = false;
302 if (threaded_through_latch
303 && loop == taken_edge->dest->loop_father
304 && (determine_bb_domination_status (loop, taken_edge->dest)
305 == DOMST_NONDOMINATING))
306 *creates_irreducible_loop = true;
308 if (path_crosses_loops)
310 if (dump_file && (dump_flags & TDF_DETAILS))
311 fprintf (dump_file, "FSM jump-thread path not considered: "
312 "the path crosses loops.\n");
313 path->pop ();
314 return NULL;
317 if (speed_p && optimize_edge_for_speed_p (taken_edge))
319 if (n_insns >= PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATH_INSNS))
321 if (dump_file && (dump_flags & TDF_DETAILS))
322 fprintf (dump_file, "FSM jump-thread path not considered: "
323 "the number of instructions on the path "
324 "exceeds PARAM_MAX_FSM_THREAD_PATH_INSNS.\n");
325 path->pop ();
326 return NULL;
329 else if (n_insns > 1)
331 if (dump_file && (dump_flags & TDF_DETAILS))
332 fprintf (dump_file, "FSM jump-thread path not considered: "
333 "duplication of %i insns is needed and optimizing for size.\n",
334 n_insns);
335 path->pop ();
336 return NULL;
339 /* We avoid creating irreducible inner loops unless we thread through
340 a multiway branch, in which case we have deemed it worth losing
341 other loop optimizations later.
343 We also consider it worth creating an irreducible inner loop if
344 the number of copied statement is low relative to the length of
345 the path -- in that case there's little the traditional loop
346 optimizer would have done anyway, so an irreducible loop is not
347 so bad. */
348 if (!threaded_multiway_branch && *creates_irreducible_loop
349 && (n_insns * PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
350 > path_length * PARAM_VALUE (PARAM_FSM_SCALE_PATH_BLOCKS)))
353 if (dump_file && (dump_flags & TDF_DETAILS))
354 fprintf (dump_file,
355 "FSM would create irreducible loop without threading "
356 "multiway branch.\n");
357 path->pop ();
358 return NULL;
362 /* If this path does not thread through the loop latch, then we are
363 using the FSM threader to find old style jump threads. This
364 is good, except the FSM threader does not re-use an existing
365 threading path to reduce code duplication.
367 So for that case, drastically reduce the number of statements
368 we are allowed to copy. */
369 if (!(threaded_through_latch && threaded_multiway_branch)
370 && (n_insns * PARAM_VALUE (PARAM_FSM_SCALE_PATH_STMTS)
371 >= PARAM_VALUE (PARAM_MAX_JUMP_THREAD_DUPLICATION_STMTS)))
373 if (dump_file && (dump_flags & TDF_DETAILS))
374 fprintf (dump_file,
375 "FSM did not thread around loop and would copy too "
376 "many statements.\n");
377 path->pop ();
378 return NULL;
381 /* When there is a multi-way branch on the path, then threading can
382 explode the CFG due to duplicating the edges for that multi-way
383 branch. So like above, only allow a multi-way branch on the path
384 if we actually thread a multi-way branch. */
385 if (!threaded_multiway_branch && multiway_branch_in_path)
387 if (dump_file && (dump_flags & TDF_DETAILS))
388 fprintf (dump_file,
389 "FSM Thread through multiway branch without threading "
390 "a multiway branch.\n");
391 path->pop ();
392 return NULL;
394 return taken_edge;
397 /* PATH is vector of blocks forming a jump threading path in reverse
398 order. TAKEN_EDGE is the edge taken from path[0].
400 Convert that path into the form used by register_jump_thread and
401 register the path. */
403 static void
404 convert_and_register_jump_thread_path (vec<basic_block, va_gc> *path,
405 edge taken_edge)
407 vec<jump_thread_edge *> *jump_thread_path = new vec<jump_thread_edge *> ();
409 /* Record the edges between the blocks in PATH. */
410 for (unsigned int j = 0; j < path->length () - 1; j++)
412 basic_block bb1 = (*path)[path->length () - j - 1];
413 basic_block bb2 = (*path)[path->length () - j - 2];
415 edge e = find_edge (bb1, bb2);
416 gcc_assert (e);
417 jump_thread_edge *x = new jump_thread_edge (e, EDGE_FSM_THREAD);
418 jump_thread_path->safe_push (x);
421 /* Add the edge taken when the control variable has value ARG. */
422 jump_thread_edge *x
423 = new jump_thread_edge (taken_edge, EDGE_NO_COPY_SRC_BLOCK);
424 jump_thread_path->safe_push (x);
426 register_jump_thread (jump_thread_path);
427 --max_threaded_paths;
430 /* We trace the value of the SSA_NAME NAME back through any phi nodes looking
431 for places where it gets a constant value and save the path. Stop after
432 having recorded MAX_PATHS jump threading paths.
434 SPEED_P indicate that we could increase code size to improve the code path */
436 static void
437 fsm_find_control_statement_thread_paths (tree name,
438 hash_set<basic_block> *visited_bbs,
439 vec<basic_block, va_gc> *&path,
440 bool seen_loop_phi, bool speed_p)
442 /* If NAME appears in an abnormal PHI, then don't try to trace its
443 value back through PHI nodes. */
444 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
445 return;
447 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
448 basic_block var_bb = gimple_bb (def_stmt);
450 if (var_bb == NULL)
451 return;
453 /* We allow the SSA chain to contains PHIs and simple copies and constant
454 initializations. */
455 if (gimple_code (def_stmt) != GIMPLE_PHI
456 && gimple_code (def_stmt) != GIMPLE_ASSIGN)
457 return;
459 if (gimple_code (def_stmt) == GIMPLE_PHI
460 && (gimple_phi_num_args (def_stmt)
461 >= (unsigned) PARAM_VALUE (PARAM_FSM_MAXIMUM_PHI_ARGUMENTS)))
462 return;
464 if (gimple_code (def_stmt) == GIMPLE_ASSIGN
465 && gimple_assign_rhs_code (def_stmt) != INTEGER_CST
466 && gimple_assign_rhs_code (def_stmt) != SSA_NAME)
467 return;
469 /* Avoid infinite recursion. */
470 if (visited_bbs->add (var_bb))
471 return;
473 int next_path_length = 0;
474 basic_block last_bb_in_path = path->last ();
476 if (loop_containing_stmt (def_stmt)->header == gimple_bb (def_stmt))
478 /* Do not walk through more than one loop PHI node. */
479 if (seen_loop_phi)
480 return;
481 seen_loop_phi = true;
484 /* Following the chain of SSA_NAME definitions, we jumped from a definition in
485 LAST_BB_IN_PATH to a definition in VAR_BB. When these basic blocks are
486 different, append to PATH the blocks from LAST_BB_IN_PATH to VAR_BB. */
487 if (var_bb != last_bb_in_path)
489 edge e;
490 int e_count = 0;
491 edge_iterator ei;
492 vec<basic_block, va_gc> *next_path;
493 vec_alloc (next_path, 10);
495 /* When VAR_BB == LAST_BB_IN_PATH, then the first block in the path
496 will already be in VISITED_BBS. When they are not equal, then we
497 must ensure that first block is accounted for to ensure we do not
498 create bogus jump threading paths. */
499 visited_bbs->add ((*path)[0]);
500 FOR_EACH_EDGE (e, ei, last_bb_in_path->preds)
502 hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
504 if (fsm_find_thread_path (var_bb, e->src, next_path, visited_bbs,
505 e->src->loop_father, speed_p))
506 ++e_count;
508 delete visited_bbs;
510 /* If there is more than one path, stop. */
511 if (e_count > 1)
513 vec_free (next_path);
514 return;
518 /* Stop if we have not found a path: this could occur when the recursion
519 is stopped by one of the bounds. */
520 if (e_count == 0)
522 vec_free (next_path);
523 return;
526 /* Make sure we haven't already visited any of the nodes in
527 NEXT_PATH. Don't add them here to avoid pollution. */
528 for (unsigned int i = 0; i < next_path->length () - 1; i++)
530 if (visited_bbs->contains ((*next_path)[i]))
532 vec_free (next_path);
533 return;
537 /* Now add the nodes to VISISTED_BBS. */
538 for (unsigned int i = 0; i < next_path->length () - 1; i++)
539 visited_bbs->add ((*next_path)[i]);
541 /* Append all the nodes from NEXT_PATH to PATH. */
542 vec_safe_splice (path, next_path);
543 next_path_length = next_path->length ();
544 vec_free (next_path);
547 gcc_assert (path->last () == var_bb);
549 /* Iterate over the arguments of PHI. */
550 unsigned int i;
551 if (gimple_code (def_stmt) == GIMPLE_PHI)
553 gphi *phi = as_a <gphi *> (def_stmt);
554 for (i = 0; i < gimple_phi_num_args (phi); i++)
556 tree arg = gimple_phi_arg_def (phi, i);
557 basic_block bbi = gimple_phi_arg_edge (phi, i)->src;
559 /* Skip edges pointing outside the current loop. */
560 if (!arg || var_bb->loop_father != bbi->loop_father)
561 continue;
563 if (TREE_CODE (arg) == SSA_NAME)
565 vec_safe_push (path, bbi);
566 /* Recursively follow SSA_NAMEs looking for a constant
567 definition. */
568 fsm_find_control_statement_thread_paths (arg, visited_bbs, path,
569 seen_loop_phi, speed_p);
571 path->pop ();
572 continue;
575 if (TREE_CODE (arg) != INTEGER_CST)
576 continue;
578 /* If this is a profitable jump thread path, then convert it
579 into the canonical form and register it. */
580 bool irreducible = false;
581 edge taken_edge = profitable_jump_thread_path (path, bbi, name, arg,
582 speed_p, &irreducible);
583 if (taken_edge)
585 convert_and_register_jump_thread_path (path, taken_edge);
586 path->pop ();
588 if (irreducible)
589 vect_free_loop_info_assumptions ((*path)[0]->loop_father);
593 else if (gimple_code (def_stmt) == GIMPLE_ASSIGN)
595 tree arg = gimple_assign_rhs1 (def_stmt);
597 if (TREE_CODE (arg) == SSA_NAME)
598 fsm_find_control_statement_thread_paths (arg, visited_bbs,
599 path, seen_loop_phi, speed_p);
601 else
603 /* profitable_jump_thread_path is going to push the current
604 block onto the path. But the path will always have the current
605 block at this point. So we can just pop it. */
606 path->pop ();
608 bool irreducible = false;
609 edge taken_edge = profitable_jump_thread_path (path, var_bb,
610 name, arg, speed_p,
611 &irreducible);
612 if (taken_edge)
614 convert_and_register_jump_thread_path (path, taken_edge);
615 path->pop ();
617 if (irreducible)
618 vect_free_loop_info_assumptions ((*path)[0]->loop_father);
621 /* And put the current block back onto the path so that the
622 state of the stack is unchanged when we leave. */
623 vec_safe_push (path, var_bb);
627 /* Remove all the nodes that we added from NEXT_PATH. */
628 if (next_path_length)
629 vec_safe_truncate (path, (path->length () - next_path_length));
632 /* Search backwards from BB looking for paths where NAME (an SSA_NAME)
633 is a constant. Record such paths for jump threading.
635 It is assumed that BB ends with a control statement and that by
636 finding a path where NAME is a constant, we can thread the path.
637 SPEED_P indicate that we could increase code size to improve the code path */
639 void
640 find_jump_threads_backwards (basic_block bb, bool speed_p)
642 gimple *stmt = get_gimple_control_stmt (bb);
643 if (!stmt)
644 return;
646 enum gimple_code code = gimple_code (stmt);
647 tree name = NULL;
648 if (code == GIMPLE_SWITCH)
649 name = gimple_switch_index (as_a <gswitch *> (stmt));
650 else if (code == GIMPLE_GOTO)
651 name = gimple_goto_dest (stmt);
652 else if (code == GIMPLE_COND)
654 if (TREE_CODE (gimple_cond_lhs (stmt)) == SSA_NAME
655 && TREE_CODE (gimple_cond_rhs (stmt)) == INTEGER_CST
656 && (INTEGRAL_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))
657 || POINTER_TYPE_P (TREE_TYPE (gimple_cond_lhs (stmt)))))
658 name = gimple_cond_lhs (stmt);
661 if (!name || TREE_CODE (name) != SSA_NAME)
662 return;
664 vec<basic_block, va_gc> *bb_path;
665 vec_alloc (bb_path, 10);
666 vec_safe_push (bb_path, bb);
667 hash_set<basic_block> *visited_bbs = new hash_set<basic_block>;
669 max_threaded_paths = PARAM_VALUE (PARAM_MAX_FSM_THREAD_PATHS);
670 fsm_find_control_statement_thread_paths (name, visited_bbs, bb_path, false,
671 speed_p);
673 delete visited_bbs;
674 vec_free (bb_path);
677 namespace {
679 const pass_data pass_data_thread_jumps =
681 GIMPLE_PASS,
682 "thread",
683 OPTGROUP_NONE,
684 TV_TREE_SSA_THREAD_JUMPS,
685 ( PROP_cfg | PROP_ssa ),
689 TODO_update_ssa,
692 class pass_thread_jumps : public gimple_opt_pass
694 public:
695 pass_thread_jumps (gcc::context *ctxt)
696 : gimple_opt_pass (pass_data_thread_jumps, ctxt)
699 opt_pass * clone (void) { return new pass_thread_jumps (m_ctxt); }
700 virtual bool gate (function *);
701 virtual unsigned int execute (function *);
704 bool
705 pass_thread_jumps::gate (function *fun ATTRIBUTE_UNUSED)
707 return flag_expensive_optimizations;
711 unsigned int
712 pass_thread_jumps::execute (function *fun)
714 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
716 /* Try to thread each block with more than one successor. */
717 basic_block bb;
718 FOR_EACH_BB_FN (bb, fun)
720 if (EDGE_COUNT (bb->succs) > 1)
721 find_jump_threads_backwards (bb, true);
723 bool changed = thread_through_all_blocks (true);
725 loop_optimizer_finalize ();
726 return changed ? TODO_cleanup_cfg : 0;
731 gimple_opt_pass *
732 make_pass_thread_jumps (gcc::context *ctxt)
734 return new pass_thread_jumps (ctxt);
737 namespace {
739 const pass_data pass_data_early_thread_jumps =
741 GIMPLE_PASS,
742 "ethread",
743 OPTGROUP_NONE,
744 TV_TREE_SSA_THREAD_JUMPS,
745 ( PROP_cfg | PROP_ssa ),
749 ( TODO_cleanup_cfg | TODO_update_ssa ),
752 class pass_early_thread_jumps : public gimple_opt_pass
754 public:
755 pass_early_thread_jumps (gcc::context *ctxt)
756 : gimple_opt_pass (pass_data_early_thread_jumps, ctxt)
759 opt_pass * clone (void) { return new pass_early_thread_jumps (m_ctxt); }
760 virtual bool gate (function *);
761 virtual unsigned int execute (function *);
764 bool
765 pass_early_thread_jumps::gate (function *fun ATTRIBUTE_UNUSED)
767 return true;
771 unsigned int
772 pass_early_thread_jumps::execute (function *fun)
774 /* Try to thread each block with more than one successor. */
775 basic_block bb;
776 FOR_EACH_BB_FN (bb, fun)
778 if (EDGE_COUNT (bb->succs) > 1)
779 find_jump_threads_backwards (bb, false);
781 thread_through_all_blocks (true);
782 return 0;
787 gimple_opt_pass *
788 make_pass_early_thread_jumps (gcc::context *ctxt)
790 return new pass_early_thread_jumps (ctxt);