* df-scan.c (df_collection_rec): Adjust.
[official-gcc.git] / gcc / tree-ssa-loop-ivcanon.c
blob6f8f04e457ebc10ce334521380559e5ce576b99d
1 /* Induction variable canonicalization and loop peeling.
2 Copyright (C) 2004-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This pass detects the loops that iterate a constant number of times,
21 adds a canonical induction variable (step -1, tested against 0)
22 and replaces the exit test. This enables the less powerful rtl
23 level analysis to use this information.
25 This might spoil the code in some cases (by increasing register pressure).
26 Note that in the case the new variable is not needed, ivopts will get rid
27 of it, so it might only be a problem when there are no other linear induction
28 variables. In that case the created optimization possibilities are likely
29 to pay up.
31 Additionally in case we detect that it is beneficial to unroll the
32 loop completely, we do it right here to expose the optimization
33 possibilities to the following passes. */
35 #include "config.h"
36 #include "system.h"
37 #include "coretypes.h"
38 #include "tm.h"
39 #include "tree.h"
40 #include "tm_p.h"
41 #include "basic-block.h"
42 #include "gimple-pretty-print.h"
43 #include "gimple.h"
44 #include "gimple-ssa.h"
45 #include "cgraph.h"
46 #include "tree-cfg.h"
47 #include "tree-phinodes.h"
48 #include "ssa-iterators.h"
49 #include "tree-ssanames.h"
50 #include "tree-ssa-loop-manip.h"
51 #include "tree-ssa-loop-niter.h"
52 #include "tree-ssa-loop.h"
53 #include "tree-into-ssa.h"
54 #include "cfgloop.h"
55 #include "tree-pass.h"
56 #include "tree-chrec.h"
57 #include "tree-scalar-evolution.h"
58 #include "params.h"
59 #include "flags.h"
60 #include "tree-inline.h"
61 #include "target.h"
62 #include "tree-cfgcleanup.h"
64 /* Specifies types of loops that may be unrolled. */
66 enum unroll_level
68 UL_SINGLE_ITER, /* Only loops that exit immediately in the first
69 iteration. */
70 UL_NO_GROWTH, /* Only loops whose unrolling will not cause increase
71 of code size. */
72 UL_ALL /* All suitable loops. */
75 /* Adds a canonical induction variable to LOOP iterating NITER times. EXIT
76 is the exit edge whose condition is replaced. */
78 static void
79 create_canonical_iv (struct loop *loop, edge exit, tree niter)
81 edge in;
82 tree type, var;
83 gimple cond;
84 gimple_stmt_iterator incr_at;
85 enum tree_code cmp;
87 if (dump_file && (dump_flags & TDF_DETAILS))
89 fprintf (dump_file, "Added canonical iv to loop %d, ", loop->num);
90 print_generic_expr (dump_file, niter, TDF_SLIM);
91 fprintf (dump_file, " iterations.\n");
94 cond = last_stmt (exit->src);
95 in = EDGE_SUCC (exit->src, 0);
96 if (in == exit)
97 in = EDGE_SUCC (exit->src, 1);
99 /* Note that we do not need to worry about overflows, since
100 type of niter is always unsigned and all comparisons are
101 just for equality/nonequality -- i.e. everything works
102 with a modulo arithmetics. */
104 type = TREE_TYPE (niter);
105 niter = fold_build2 (PLUS_EXPR, type,
106 niter,
107 build_int_cst (type, 1));
108 incr_at = gsi_last_bb (in->src);
109 create_iv (niter,
110 build_int_cst (type, -1),
111 NULL_TREE, loop,
112 &incr_at, false, NULL, &var);
114 cmp = (exit->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR;
115 gimple_cond_set_code (cond, cmp);
116 gimple_cond_set_lhs (cond, var);
117 gimple_cond_set_rhs (cond, build_int_cst (type, 0));
118 update_stmt (cond);
121 /* Describe size of loop as detected by tree_estimate_loop_size. */
122 struct loop_size
124 /* Number of instructions in the loop. */
125 int overall;
127 /* Number of instructions that will be likely optimized out in
128 peeled iterations of loop (i.e. computation based on induction
129 variable where induction variable starts at known constant.) */
130 int eliminated_by_peeling;
132 /* Same statistics for last iteration of loop: it is smaller because
133 instructions after exit are not executed. */
134 int last_iteration;
135 int last_iteration_eliminated_by_peeling;
137 /* If some IV computation will become constant. */
138 bool constant_iv;
140 /* Number of call stmts that are not a builtin and are pure or const
141 present on the hot path. */
142 int num_pure_calls_on_hot_path;
143 /* Number of call stmts that are not a builtin and are not pure nor const
144 present on the hot path. */
145 int num_non_pure_calls_on_hot_path;
146 /* Number of statements other than calls in the loop. */
147 int non_call_stmts_on_hot_path;
148 /* Number of branches seen on the hot path. */
149 int num_branches_on_hot_path;
152 /* Return true if OP in STMT will be constant after peeling LOOP. */
154 static bool
155 constant_after_peeling (tree op, gimple stmt, struct loop *loop)
157 affine_iv iv;
159 if (is_gimple_min_invariant (op))
160 return true;
162 /* We can still fold accesses to constant arrays when index is known. */
163 if (TREE_CODE (op) != SSA_NAME)
165 tree base = op;
167 /* First make fast look if we see constant array inside. */
168 while (handled_component_p (base))
169 base = TREE_OPERAND (base, 0);
170 if ((DECL_P (base)
171 && ctor_for_folding (base) != error_mark_node)
172 || CONSTANT_CLASS_P (base))
174 /* If so, see if we understand all the indices. */
175 base = op;
176 while (handled_component_p (base))
178 if (TREE_CODE (base) == ARRAY_REF
179 && !constant_after_peeling (TREE_OPERAND (base, 1), stmt, loop))
180 return false;
181 base = TREE_OPERAND (base, 0);
183 return true;
185 return false;
188 /* Induction variables are constants. */
189 if (!simple_iv (loop, loop_containing_stmt (stmt), op, &iv, false))
190 return false;
191 if (!is_gimple_min_invariant (iv.base))
192 return false;
193 if (!is_gimple_min_invariant (iv.step))
194 return false;
195 return true;
198 /* Computes an estimated number of insns in LOOP.
199 EXIT (if non-NULL) is an exite edge that will be eliminated in all but last
200 iteration of the loop.
201 EDGE_TO_CANCEL (if non-NULL) is an non-exit edge eliminated in the last iteration
202 of loop.
203 Return results in SIZE, estimate benefits for complete unrolling exiting by EXIT.
204 Stop estimating after UPPER_BOUND is met. Return true in this case. */
206 static bool
207 tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel, struct loop_size *size,
208 int upper_bound)
210 basic_block *body = get_loop_body (loop);
211 gimple_stmt_iterator gsi;
212 unsigned int i;
213 bool after_exit;
214 vec<basic_block> path = get_loop_hot_path (loop);
216 size->overall = 0;
217 size->eliminated_by_peeling = 0;
218 size->last_iteration = 0;
219 size->last_iteration_eliminated_by_peeling = 0;
220 size->num_pure_calls_on_hot_path = 0;
221 size->num_non_pure_calls_on_hot_path = 0;
222 size->non_call_stmts_on_hot_path = 0;
223 size->num_branches_on_hot_path = 0;
224 size->constant_iv = 0;
226 if (dump_file && (dump_flags & TDF_DETAILS))
227 fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num);
228 for (i = 0; i < loop->num_nodes; i++)
230 if (edge_to_cancel && body[i] != edge_to_cancel->src
231 && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src))
232 after_exit = true;
233 else
234 after_exit = false;
235 if (dump_file && (dump_flags & TDF_DETAILS))
236 fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit);
238 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
240 gimple stmt = gsi_stmt (gsi);
241 int num = estimate_num_insns (stmt, &eni_size_weights);
242 bool likely_eliminated = false;
243 bool likely_eliminated_last = false;
244 bool likely_eliminated_peeled = false;
246 if (dump_file && (dump_flags & TDF_DETAILS))
248 fprintf (dump_file, " size: %3i ", num);
249 print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0);
252 /* Look for reasons why we might optimize this stmt away. */
254 if (gimple_has_side_effects (stmt))
256 /* Exit conditional. */
257 else if (exit && body[i] == exit->src
258 && stmt == last_stmt (exit->src))
260 if (dump_file && (dump_flags & TDF_DETAILS))
261 fprintf (dump_file, " Exit condition will be eliminated "
262 "in peeled copies.\n");
263 likely_eliminated_peeled = true;
265 else if (edge_to_cancel && body[i] == edge_to_cancel->src
266 && stmt == last_stmt (edge_to_cancel->src))
268 if (dump_file && (dump_flags & TDF_DETAILS))
269 fprintf (dump_file, " Exit condition will be eliminated "
270 "in last copy.\n");
271 likely_eliminated_last = true;
273 /* Sets of IV variables */
274 else if (gimple_code (stmt) == GIMPLE_ASSIGN
275 && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop))
277 if (dump_file && (dump_flags & TDF_DETAILS))
278 fprintf (dump_file, " Induction variable computation will"
279 " be folded away.\n");
280 likely_eliminated = true;
282 /* Assignments of IV variables. */
283 else if (gimple_code (stmt) == GIMPLE_ASSIGN
284 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
285 && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt, loop)
286 && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS
287 || constant_after_peeling (gimple_assign_rhs2 (stmt),
288 stmt, loop)))
290 size->constant_iv = true;
291 if (dump_file && (dump_flags & TDF_DETAILS))
292 fprintf (dump_file, " Constant expression will be folded away.\n");
293 likely_eliminated = true;
295 /* Conditionals. */
296 else if ((gimple_code (stmt) == GIMPLE_COND
297 && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
298 && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop))
299 || (gimple_code (stmt) == GIMPLE_SWITCH
300 && constant_after_peeling (gimple_switch_index (stmt), stmt, loop)))
302 if (dump_file && (dump_flags & TDF_DETAILS))
303 fprintf (dump_file, " Constant conditional.\n");
304 likely_eliminated = true;
307 size->overall += num;
308 if (likely_eliminated || likely_eliminated_peeled)
309 size->eliminated_by_peeling += num;
310 if (!after_exit)
312 size->last_iteration += num;
313 if (likely_eliminated || likely_eliminated_last)
314 size->last_iteration_eliminated_by_peeling += num;
316 if ((size->overall * 3 / 2 - size->eliminated_by_peeling
317 - size->last_iteration_eliminated_by_peeling) > upper_bound)
319 free (body);
320 path.release ();
321 return true;
325 while (path.length ())
327 basic_block bb = path.pop ();
328 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
330 gimple stmt = gsi_stmt (gsi);
331 if (gimple_code (stmt) == GIMPLE_CALL)
333 int flags = gimple_call_flags (stmt);
334 tree decl = gimple_call_fndecl (stmt);
336 if (decl && DECL_IS_BUILTIN (decl)
337 && is_inexpensive_builtin (decl))
339 else if (flags & (ECF_PURE | ECF_CONST))
340 size->num_pure_calls_on_hot_path++;
341 else
342 size->num_non_pure_calls_on_hot_path++;
343 size->num_branches_on_hot_path ++;
345 else if (gimple_code (stmt) != GIMPLE_CALL
346 && gimple_code (stmt) != GIMPLE_DEBUG)
347 size->non_call_stmts_on_hot_path++;
348 if (((gimple_code (stmt) == GIMPLE_COND
349 && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
350 || constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop)))
351 || (gimple_code (stmt) == GIMPLE_SWITCH
352 && !constant_after_peeling (gimple_switch_index (stmt), stmt, loop)))
353 && (!exit || bb != exit->src))
354 size->num_branches_on_hot_path++;
357 path.release ();
358 if (dump_file && (dump_flags & TDF_DETAILS))
359 fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall,
360 size->eliminated_by_peeling, size->last_iteration,
361 size->last_iteration_eliminated_by_peeling);
363 free (body);
364 return false;
367 /* Estimate number of insns of completely unrolled loop.
368 It is (NUNROLL + 1) * size of loop body with taking into account
369 the fact that in last copy everything after exit conditional
370 is dead and that some instructions will be eliminated after
371 peeling.
373 Loop body is likely going to simplify further, this is difficult
374 to guess, we just decrease the result by 1/3. */
376 static unsigned HOST_WIDE_INT
377 estimated_unrolled_size (struct loop_size *size,
378 unsigned HOST_WIDE_INT nunroll)
380 HOST_WIDE_INT unr_insns = ((nunroll)
381 * (HOST_WIDE_INT) (size->overall
382 - size->eliminated_by_peeling));
383 if (!nunroll)
384 unr_insns = 0;
385 unr_insns += size->last_iteration - size->last_iteration_eliminated_by_peeling;
387 unr_insns = unr_insns * 2 / 3;
388 if (unr_insns <= 0)
389 unr_insns = 1;
391 return unr_insns;
394 /* Loop LOOP is known to not loop. See if there is an edge in the loop
395 body that can be remove to make the loop to always exit and at
396 the same time it does not make any code potentially executed
397 during the last iteration dead.
399 After complette unrolling we still may get rid of the conditional
400 on the exit in the last copy even if we have no idea what it does.
401 This is quite common case for loops of form
403 int a[5];
404 for (i=0;i<b;i++)
405 a[i]=0;
407 Here we prove the loop to iterate 5 times but we do not know
408 it from induction variable.
410 For now we handle only simple case where there is exit condition
411 just before the latch block and the latch block contains no statements
412 with side effect that may otherwise terminate the execution of loop
413 (such as by EH or by terminating the program or longjmp).
415 In the general case we may want to cancel the paths leading to statements
416 loop-niter identified as having undefined effect in the last iteration.
417 The other cases are hopefully rare and will be cleaned up later. */
419 static edge
420 loop_edge_to_cancel (struct loop *loop)
422 vec<edge> exits;
423 unsigned i;
424 edge edge_to_cancel;
425 gimple_stmt_iterator gsi;
427 /* We want only one predecestor of the loop. */
428 if (EDGE_COUNT (loop->latch->preds) > 1)
429 return NULL;
431 exits = get_loop_exit_edges (loop);
433 FOR_EACH_VEC_ELT (exits, i, edge_to_cancel)
435 /* Find the other edge than the loop exit
436 leaving the conditoinal. */
437 if (EDGE_COUNT (edge_to_cancel->src->succs) != 2)
438 continue;
439 if (EDGE_SUCC (edge_to_cancel->src, 0) == edge_to_cancel)
440 edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 1);
441 else
442 edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 0);
444 /* We only can handle conditionals. */
445 if (!(edge_to_cancel->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
446 continue;
448 /* We should never have conditionals in the loop latch. */
449 gcc_assert (edge_to_cancel->dest != loop->header);
451 /* Check that it leads to loop latch. */
452 if (edge_to_cancel->dest != loop->latch)
453 continue;
455 exits.release ();
457 /* Verify that the code in loop latch does nothing that may end program
458 execution without really reaching the exit. This may include
459 non-pure/const function calls, EH statements, volatile ASMs etc. */
460 for (gsi = gsi_start_bb (loop->latch); !gsi_end_p (gsi); gsi_next (&gsi))
461 if (gimple_has_side_effects (gsi_stmt (gsi)))
462 return NULL;
463 return edge_to_cancel;
465 exits.release ();
466 return NULL;
469 /* Remove all tests for exits that are known to be taken after LOOP was
470 peeled NPEELED times. Put gcc_unreachable before every statement
471 known to not be executed. */
473 static bool
474 remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled)
476 struct nb_iter_bound *elt;
477 bool changed = false;
479 for (elt = loop->bounds; elt; elt = elt->next)
481 /* If statement is known to be undefined after peeling, turn it
482 into unreachable (or trap when debugging experience is supposed
483 to be good). */
484 if (!elt->is_exit
485 && elt->bound.ult (double_int::from_uhwi (npeeled)))
487 gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt);
488 gimple stmt = gimple_build_call
489 (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
491 gimple_set_location (stmt, gimple_location (elt->stmt));
492 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
493 changed = true;
494 if (dump_file && (dump_flags & TDF_DETAILS))
496 fprintf (dump_file, "Forced statement unreachable: ");
497 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
500 /* If we know the exit will be taken after peeling, update. */
501 else if (elt->is_exit
502 && elt->bound.ule (double_int::from_uhwi (npeeled)))
504 basic_block bb = gimple_bb (elt->stmt);
505 edge exit_edge = EDGE_SUCC (bb, 0);
507 if (dump_file && (dump_flags & TDF_DETAILS))
509 fprintf (dump_file, "Forced exit to be taken: ");
510 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
512 if (!loop_exit_edge_p (loop, exit_edge))
513 exit_edge = EDGE_SUCC (bb, 1);
514 gcc_checking_assert (loop_exit_edge_p (loop, exit_edge));
515 if (exit_edge->flags & EDGE_TRUE_VALUE)
516 gimple_cond_make_true (elt->stmt);
517 else
518 gimple_cond_make_false (elt->stmt);
519 update_stmt (elt->stmt);
520 changed = true;
523 return changed;
526 /* Remove all exits that are known to be never taken because of the loop bound
527 discovered. */
529 static bool
530 remove_redundant_iv_tests (struct loop *loop)
532 struct nb_iter_bound *elt;
533 bool changed = false;
535 if (!loop->any_upper_bound)
536 return false;
537 for (elt = loop->bounds; elt; elt = elt->next)
539 /* Exit is pointless if it won't be taken before loop reaches
540 upper bound. */
541 if (elt->is_exit && loop->any_upper_bound
542 && loop->nb_iterations_upper_bound.ult (elt->bound))
544 basic_block bb = gimple_bb (elt->stmt);
545 edge exit_edge = EDGE_SUCC (bb, 0);
546 struct tree_niter_desc niter;
548 if (!loop_exit_edge_p (loop, exit_edge))
549 exit_edge = EDGE_SUCC (bb, 1);
551 /* Only when we know the actual number of iterations, not
552 just a bound, we can remove the exit. */
553 if (!number_of_iterations_exit (loop, exit_edge,
554 &niter, false, false)
555 || !integer_onep (niter.assumptions)
556 || !integer_zerop (niter.may_be_zero)
557 || !niter.niter
558 || TREE_CODE (niter.niter) != INTEGER_CST
559 || !loop->nb_iterations_upper_bound.ult
560 (tree_to_double_int (niter.niter)))
561 continue;
563 if (dump_file && (dump_flags & TDF_DETAILS))
565 fprintf (dump_file, "Removed pointless exit: ");
566 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
568 if (exit_edge->flags & EDGE_TRUE_VALUE)
569 gimple_cond_make_false (elt->stmt);
570 else
571 gimple_cond_make_true (elt->stmt);
572 update_stmt (elt->stmt);
573 changed = true;
576 return changed;
579 /* Stores loops that will be unlooped after we process whole loop tree. */
580 static vec<loop_p> loops_to_unloop;
581 static vec<int> loops_to_unloop_nunroll;
583 /* Cancel all fully unrolled loops by putting __builtin_unreachable
584 on the latch edge.
585 We do it after all unrolling since unlooping moves basic blocks
586 across loop boundaries trashing loop closed SSA form as well
587 as SCEV info needed to be intact during unrolling.
589 IRRED_INVALIDATED is used to bookkeep if information about
590 irreducible regions may become invalid as a result
591 of the transformation.
592 LOOP_CLOSED_SSA_INVALIDATED is used to bookkepp the case
593 when we need to go into loop closed SSA form. */
595 static void
596 unloop_loops (bitmap loop_closed_ssa_invalidated,
597 bool *irred_invalidated)
599 while (loops_to_unloop.length ())
601 struct loop *loop = loops_to_unloop.pop ();
602 int n_unroll = loops_to_unloop_nunroll.pop ();
603 basic_block latch = loop->latch;
604 edge latch_edge = loop_latch_edge (loop);
605 int flags = latch_edge->flags;
606 location_t locus = latch_edge->goto_locus;
607 gimple stmt;
608 gimple_stmt_iterator gsi;
610 remove_exits_and_undefined_stmts (loop, n_unroll);
612 /* Unloop destroys the latch edge. */
613 unloop (loop, irred_invalidated, loop_closed_ssa_invalidated);
615 /* Create new basic block for the latch edge destination and wire
616 it in. */
617 stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
618 latch_edge = make_edge (latch, create_basic_block (NULL, NULL, latch), flags);
619 latch_edge->probability = 0;
620 latch_edge->count = 0;
621 latch_edge->flags |= flags;
622 latch_edge->goto_locus = locus;
624 latch_edge->dest->loop_father = current_loops->tree_root;
625 latch_edge->dest->count = 0;
626 latch_edge->dest->frequency = 0;
627 set_immediate_dominator (CDI_DOMINATORS, latch_edge->dest, latch_edge->src);
629 gsi = gsi_start_bb (latch_edge->dest);
630 gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
632 loops_to_unloop.release ();
633 loops_to_unloop_nunroll.release ();
636 /* Tries to unroll LOOP completely, i.e. NITER times.
637 UL determines which loops we are allowed to unroll.
638 EXIT is the exit of the loop that should be eliminated.
639 MAXITER specfy bound on number of iterations, -1 if it is
640 not known or too large for HOST_WIDE_INT. The location
641 LOCUS corresponding to the loop is used when emitting
642 a summary of the unroll to the dump file. */
644 static bool
645 try_unroll_loop_completely (struct loop *loop,
646 edge exit, tree niter,
647 enum unroll_level ul,
648 HOST_WIDE_INT maxiter,
649 location_t locus)
651 unsigned HOST_WIDE_INT n_unroll, ninsns, max_unroll, unr_insns;
652 gimple cond;
653 struct loop_size size;
654 bool n_unroll_found = false;
655 edge edge_to_cancel = NULL;
657 /* See if we proved number of iterations to be low constant.
659 EXIT is an edge that will be removed in all but last iteration of
660 the loop.
662 EDGE_TO_CACNEL is an edge that will be removed from the last iteration
663 of the unrolled sequence and is expected to make the final loop not
664 rolling.
666 If the number of execution of loop is determined by standard induction
667 variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving
668 from the iv test. */
669 if (host_integerp (niter, 1))
671 n_unroll = tree_low_cst (niter, 1);
672 n_unroll_found = true;
673 edge_to_cancel = EDGE_SUCC (exit->src, 0);
674 if (edge_to_cancel == exit)
675 edge_to_cancel = EDGE_SUCC (exit->src, 1);
677 /* We do not know the number of iterations and thus we can not eliminate
678 the EXIT edge. */
679 else
680 exit = NULL;
682 /* See if we can improve our estimate by using recorded loop bounds. */
683 if (maxiter >= 0
684 && (!n_unroll_found || (unsigned HOST_WIDE_INT)maxiter < n_unroll))
686 n_unroll = maxiter;
687 n_unroll_found = true;
688 /* Loop terminates before the IV variable test, so we can not
689 remove it in the last iteration. */
690 edge_to_cancel = NULL;
693 if (!n_unroll_found)
694 return false;
696 max_unroll = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES);
697 if (n_unroll > max_unroll)
698 return false;
700 if (!edge_to_cancel)
701 edge_to_cancel = loop_edge_to_cancel (loop);
703 if (n_unroll)
705 sbitmap wont_exit;
706 edge e;
707 unsigned i;
708 bool large;
709 vec<edge> to_remove = vNULL;
710 if (ul == UL_SINGLE_ITER)
711 return false;
713 large = tree_estimate_loop_size
714 (loop, exit, edge_to_cancel, &size,
715 PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS));
716 ninsns = size.overall;
717 if (large)
719 if (dump_file && (dump_flags & TDF_DETAILS))
720 fprintf (dump_file, "Not unrolling loop %d: it is too large.\n",
721 loop->num);
722 return false;
725 unr_insns = estimated_unrolled_size (&size, n_unroll);
726 if (dump_file && (dump_flags & TDF_DETAILS))
728 fprintf (dump_file, " Loop size: %d\n", (int) ninsns);
729 fprintf (dump_file, " Estimated size after unrolling: %d\n",
730 (int) unr_insns);
733 /* If the code is going to shrink, we don't need to be extra cautious
734 on guessing if the unrolling is going to be profitable. */
735 if (unr_insns
736 /* If there is IV variable that will become constant, we save
737 one instruction in the loop prologue we do not account
738 otherwise. */
739 <= ninsns + (size.constant_iv != false))
741 /* We unroll only inner loops, because we do not consider it profitable
742 otheriwse. We still can cancel loopback edge of not rolling loop;
743 this is always a good idea. */
744 else if (ul == UL_NO_GROWTH)
746 if (dump_file && (dump_flags & TDF_DETAILS))
747 fprintf (dump_file, "Not unrolling loop %d: size would grow.\n",
748 loop->num);
749 return false;
751 /* Outer loops tend to be less interesting candidates for complette
752 unrolling unless we can do a lot of propagation into the inner loop
753 body. For now we disable outer loop unrolling when the code would
754 grow. */
755 else if (loop->inner)
757 if (dump_file && (dump_flags & TDF_DETAILS))
758 fprintf (dump_file, "Not unrolling loop %d: "
759 "it is not innermost and code would grow.\n",
760 loop->num);
761 return false;
763 /* If there is call on a hot path through the loop, then
764 there is most probably not much to optimize. */
765 else if (size.num_non_pure_calls_on_hot_path)
767 if (dump_file && (dump_flags & TDF_DETAILS))
768 fprintf (dump_file, "Not unrolling loop %d: "
769 "contains call and code would grow.\n",
770 loop->num);
771 return false;
773 /* If there is pure/const call in the function, then we
774 can still optimize the unrolled loop body if it contains
775 some other interesting code than the calls and code
776 storing or cumulating the return value. */
777 else if (size.num_pure_calls_on_hot_path
778 /* One IV increment, one test, one ivtmp store
779 and one useful stmt. That is about minimal loop
780 doing pure call. */
781 && (size.non_call_stmts_on_hot_path
782 <= 3 + size.num_pure_calls_on_hot_path))
784 if (dump_file && (dump_flags & TDF_DETAILS))
785 fprintf (dump_file, "Not unrolling loop %d: "
786 "contains just pure calls and code would grow.\n",
787 loop->num);
788 return false;
790 /* Complette unrolling is major win when control flow is removed and
791 one big basic block is created. If the loop contains control flow
792 the optimization may still be a win because of eliminating the loop
793 overhead but it also may blow the branch predictor tables.
794 Limit number of branches on the hot path through the peeled
795 sequence. */
796 else if (size.num_branches_on_hot_path * (int)n_unroll
797 > PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES))
799 if (dump_file && (dump_flags & TDF_DETAILS))
800 fprintf (dump_file, "Not unrolling loop %d: "
801 " number of branches on hot path in the unrolled sequence"
802 " reach --param max-peel-branches limit.\n",
803 loop->num);
804 return false;
806 else if (unr_insns
807 > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS))
809 if (dump_file && (dump_flags & TDF_DETAILS))
810 fprintf (dump_file, "Not unrolling loop %d: "
811 "(--param max-completely-peeled-insns limit reached).\n",
812 loop->num);
813 return false;
816 initialize_original_copy_tables ();
817 wont_exit = sbitmap_alloc (n_unroll + 1);
818 bitmap_ones (wont_exit);
819 bitmap_clear_bit (wont_exit, 0);
821 if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
822 n_unroll, wont_exit,
823 exit, &to_remove,
824 DLTHE_FLAG_UPDATE_FREQ
825 | DLTHE_FLAG_COMPLETTE_PEEL))
827 free_original_copy_tables ();
828 free (wont_exit);
829 if (dump_file && (dump_flags & TDF_DETAILS))
830 fprintf (dump_file, "Failed to duplicate the loop\n");
831 return false;
834 FOR_EACH_VEC_ELT (to_remove, i, e)
836 bool ok = remove_path (e);
837 gcc_assert (ok);
840 to_remove.release ();
841 free (wont_exit);
842 free_original_copy_tables ();
846 /* Remove the conditional from the last copy of the loop. */
847 if (edge_to_cancel)
849 cond = last_stmt (edge_to_cancel->src);
850 if (edge_to_cancel->flags & EDGE_TRUE_VALUE)
851 gimple_cond_make_false (cond);
852 else
853 gimple_cond_make_true (cond);
854 update_stmt (cond);
855 /* Do not remove the path. Doing so may remove outer loop
856 and confuse bookkeeping code in tree_unroll_loops_completelly. */
859 /* Store the loop for later unlooping and exit removal. */
860 loops_to_unloop.safe_push (loop);
861 loops_to_unloop_nunroll.safe_push (n_unroll);
863 if (dump_enabled_p ())
865 if (!n_unroll)
866 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
867 "loop turned into non-loop; it never loops\n");
868 else
870 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
871 "loop with %d iterations completely unrolled",
872 (int) (n_unroll + 1));
873 if (profile_info)
874 dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS,
875 " (header execution count %d)",
876 (int)loop->header->count);
877 dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, "\n");
881 if (dump_file && (dump_flags & TDF_DETAILS))
883 if (exit)
884 fprintf (dump_file, "Exit condition of peeled iterations was "
885 "eliminated.\n");
886 if (edge_to_cancel)
887 fprintf (dump_file, "Last iteration exit edge was proved true.\n");
888 else
889 fprintf (dump_file, "Latch of last iteration was marked by "
890 "__builtin_unreachable ().\n");
893 return true;
896 /* Adds a canonical induction variable to LOOP if suitable.
897 CREATE_IV is true if we may create a new iv. UL determines
898 which loops we are allowed to completely unroll. If TRY_EVAL is true, we try
899 to determine the number of iterations of a loop by direct evaluation.
900 Returns true if cfg is changed. */
902 static bool
903 canonicalize_loop_induction_variables (struct loop *loop,
904 bool create_iv, enum unroll_level ul,
905 bool try_eval)
907 edge exit = NULL;
908 tree niter;
909 HOST_WIDE_INT maxiter;
910 bool modified = false;
911 location_t locus = UNKNOWN_LOCATION;
913 niter = number_of_latch_executions (loop);
914 exit = single_exit (loop);
915 if (TREE_CODE (niter) == INTEGER_CST)
916 locus = gimple_location (last_stmt (exit->src));
917 else
919 /* If the loop has more than one exit, try checking all of them
920 for # of iterations determinable through scev. */
921 if (!exit)
922 niter = find_loop_niter (loop, &exit);
924 /* Finally if everything else fails, try brute force evaluation. */
925 if (try_eval
926 && (chrec_contains_undetermined (niter)
927 || TREE_CODE (niter) != INTEGER_CST))
928 niter = find_loop_niter_by_eval (loop, &exit);
930 if (exit)
931 locus = gimple_location (last_stmt (exit->src));
933 if (TREE_CODE (niter) != INTEGER_CST)
934 exit = NULL;
937 /* We work exceptionally hard here to estimate the bound
938 by find_loop_niter_by_eval. Be sure to keep it for future. */
939 if (niter && TREE_CODE (niter) == INTEGER_CST)
941 record_niter_bound (loop, tree_to_double_int (niter),
942 exit == single_likely_exit (loop), true);
945 /* Force re-computation of loop bounds so we can remove redundant exits. */
946 maxiter = max_loop_iterations_int (loop);
948 if (dump_file && (dump_flags & TDF_DETAILS)
949 && TREE_CODE (niter) == INTEGER_CST)
951 fprintf (dump_file, "Loop %d iterates ", loop->num);
952 print_generic_expr (dump_file, niter, TDF_SLIM);
953 fprintf (dump_file, " times.\n");
955 if (dump_file && (dump_flags & TDF_DETAILS)
956 && maxiter >= 0)
958 fprintf (dump_file, "Loop %d iterates at most %i times.\n", loop->num,
959 (int)maxiter);
962 /* Remove exits that are known to be never taken based on loop bound.
963 Needs to be called after compilation of max_loop_iterations_int that
964 populates the loop bounds. */
965 modified |= remove_redundant_iv_tests (loop);
967 if (try_unroll_loop_completely (loop, exit, niter, ul, maxiter, locus))
968 return true;
970 if (create_iv
971 && niter && !chrec_contains_undetermined (niter)
972 && exit && just_once_each_iteration_p (loop, exit->src))
973 create_canonical_iv (loop, exit, niter);
975 return modified;
978 /* The main entry point of the pass. Adds canonical induction variables
979 to the suitable loops. */
981 unsigned int
982 canonicalize_induction_variables (void)
984 loop_iterator li;
985 struct loop *loop;
986 bool changed = false;
987 bool irred_invalidated = false;
988 bitmap loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
990 free_numbers_of_iterations_estimates ();
991 estimate_numbers_of_iterations ();
993 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
995 changed |= canonicalize_loop_induction_variables (loop,
996 true, UL_SINGLE_ITER,
997 true);
999 gcc_assert (!need_ssa_update_p (cfun));
1001 unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
1002 if (irred_invalidated
1003 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1004 mark_irreducible_loops ();
1006 /* Clean up the information about numbers of iterations, since brute force
1007 evaluation could reveal new information. */
1008 scev_reset ();
1010 if (!bitmap_empty_p (loop_closed_ssa_invalidated))
1012 gcc_checking_assert (loops_state_satisfies_p (LOOP_CLOSED_SSA));
1013 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1015 BITMAP_FREE (loop_closed_ssa_invalidated);
1017 if (changed)
1018 return TODO_cleanup_cfg;
1019 return 0;
1022 /* Propagate VAL into all uses of SSA_NAME. */
1024 static void
1025 propagate_into_all_uses (tree ssa_name, tree val)
1027 imm_use_iterator iter;
1028 gimple use_stmt;
1030 FOR_EACH_IMM_USE_STMT (use_stmt, iter, ssa_name)
1032 gimple_stmt_iterator use_stmt_gsi = gsi_for_stmt (use_stmt);
1033 use_operand_p use;
1035 FOR_EACH_IMM_USE_ON_STMT (use, iter)
1036 SET_USE (use, val);
1038 if (is_gimple_assign (use_stmt)
1039 && get_gimple_rhs_class (gimple_assign_rhs_code (use_stmt))
1040 == GIMPLE_SINGLE_RHS)
1042 tree rhs = gimple_assign_rhs1 (use_stmt);
1044 if (TREE_CODE (rhs) == ADDR_EXPR)
1045 recompute_tree_invariant_for_addr_expr (rhs);
1048 fold_stmt_inplace (&use_stmt_gsi);
1049 update_stmt (use_stmt);
1050 maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt);
1054 /* Propagate constant SSA_NAMEs defined in basic block BB. */
1056 static void
1057 propagate_constants_for_unrolling (basic_block bb)
1059 gimple_stmt_iterator gsi;
1061 /* Look for degenerate PHI nodes with constant argument. */
1062 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
1064 gimple phi = gsi_stmt (gsi);
1065 tree result = gimple_phi_result (phi);
1066 tree arg = gimple_phi_arg_def (phi, 0);
1068 if (gimple_phi_num_args (phi) == 1 && TREE_CODE (arg) == INTEGER_CST)
1070 propagate_into_all_uses (result, arg);
1071 gsi_remove (&gsi, true);
1072 release_ssa_name (result);
1074 else
1075 gsi_next (&gsi);
1078 /* Look for assignments to SSA names with constant RHS. */
1079 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
1081 gimple stmt = gsi_stmt (gsi);
1082 tree lhs;
1084 if (is_gimple_assign (stmt)
1085 && gimple_assign_rhs_code (stmt) == INTEGER_CST
1086 && (lhs = gimple_assign_lhs (stmt), TREE_CODE (lhs) == SSA_NAME)
1087 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
1089 propagate_into_all_uses (lhs, gimple_assign_rhs1 (stmt));
1090 gsi_remove (&gsi, true);
1091 release_ssa_name (lhs);
1093 else
1094 gsi_next (&gsi);
1098 /* Process loops from innermost to outer, stopping at the innermost
1099 loop we unrolled. */
1101 static bool
1102 tree_unroll_loops_completely_1 (bool may_increase_size, bool unroll_outer,
1103 vec<loop_p, va_heap>& father_stack,
1104 struct loop *loop)
1106 struct loop *loop_father;
1107 bool changed = false;
1108 struct loop *inner;
1109 enum unroll_level ul;
1111 /* Process inner loops first. */
1112 for (inner = loop->inner; inner != NULL; inner = inner->next)
1113 changed |= tree_unroll_loops_completely_1 (may_increase_size,
1114 unroll_outer, father_stack,
1115 inner);
1117 /* If we changed an inner loop we cannot process outer loops in this
1118 iteration because SSA form is not up-to-date. Continue with
1119 siblings of outer loops instead. */
1120 if (changed)
1121 return true;
1123 /* Don't unroll #pragma omp simd loops until the vectorizer
1124 attempts to vectorize those. */
1125 if (loop->force_vect)
1126 return false;
1128 /* Try to unroll this loop. */
1129 loop_father = loop_outer (loop);
1130 if (!loop_father)
1131 return false;
1133 if (may_increase_size && optimize_loop_nest_for_speed_p (loop)
1134 /* Unroll outermost loops only if asked to do so or they do
1135 not cause code growth. */
1136 && (unroll_outer || loop_outer (loop_father)))
1137 ul = UL_ALL;
1138 else
1139 ul = UL_NO_GROWTH;
1141 if (canonicalize_loop_induction_variables
1142 (loop, false, ul, !flag_tree_loop_ivcanon))
1144 /* If we'll continue unrolling, we need to propagate constants
1145 within the new basic blocks to fold away induction variable
1146 computations; otherwise, the size might blow up before the
1147 iteration is complete and the IR eventually cleaned up. */
1148 if (loop_outer (loop_father) && !loop_father->aux)
1150 father_stack.safe_push (loop_father);
1151 loop_father->aux = loop_father;
1154 return true;
1157 return false;
1160 /* Unroll LOOPS completely if they iterate just few times. Unless
1161 MAY_INCREASE_SIZE is true, perform the unrolling only if the
1162 size of the code does not increase. */
1164 unsigned int
1165 tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
1167 stack_vec<loop_p, 16> father_stack;
1168 bool changed;
1169 int iteration = 0;
1170 bool irred_invalidated = false;
1174 changed = false;
1175 bitmap loop_closed_ssa_invalidated = NULL;
1177 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
1178 loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
1180 free_numbers_of_iterations_estimates ();
1181 estimate_numbers_of_iterations ();
1183 changed = tree_unroll_loops_completely_1 (may_increase_size,
1184 unroll_outer, father_stack,
1185 current_loops->tree_root);
1186 if (changed)
1188 struct loop **iter;
1189 unsigned i;
1191 /* Be sure to skip unlooped loops while procesing father_stack
1192 array. */
1193 FOR_EACH_VEC_ELT (loops_to_unloop, i, iter)
1194 (*iter)->aux = NULL;
1195 FOR_EACH_VEC_ELT (father_stack, i, iter)
1196 if (!(*iter)->aux)
1197 *iter = NULL;
1198 unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
1200 /* We can not use TODO_update_ssa_no_phi because VOPS gets confused. */
1201 if (loop_closed_ssa_invalidated
1202 && !bitmap_empty_p (loop_closed_ssa_invalidated))
1203 rewrite_into_loop_closed_ssa (loop_closed_ssa_invalidated,
1204 TODO_update_ssa);
1205 else
1206 update_ssa (TODO_update_ssa);
1208 /* Propagate the constants within the new basic blocks. */
1209 FOR_EACH_VEC_ELT (father_stack, i, iter)
1210 if (*iter)
1212 unsigned j;
1213 basic_block *body = get_loop_body_in_dom_order (*iter);
1214 for (j = 0; j < (*iter)->num_nodes; j++)
1215 propagate_constants_for_unrolling (body[j]);
1216 free (body);
1217 (*iter)->aux = NULL;
1219 father_stack.truncate (0);
1221 /* This will take care of removing completely unrolled loops
1222 from the loop structures so we can continue unrolling now
1223 innermost loops. */
1224 if (cleanup_tree_cfg ())
1225 update_ssa (TODO_update_ssa_only_virtuals);
1227 /* Clean up the information about numbers of iterations, since
1228 complete unrolling might have invalidated it. */
1229 scev_reset ();
1230 #ifdef ENABLE_CHECKING
1231 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
1232 verify_loop_closed_ssa (true);
1233 #endif
1235 if (loop_closed_ssa_invalidated)
1236 BITMAP_FREE (loop_closed_ssa_invalidated);
1238 while (changed
1239 && ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS));
1241 father_stack.release ();
1243 if (irred_invalidated
1244 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1245 mark_irreducible_loops ();
1247 return 0;
1250 /* Canonical induction variable creation pass. */
1252 static unsigned int
1253 tree_ssa_loop_ivcanon (void)
1255 if (number_of_loops (cfun) <= 1)
1256 return 0;
1258 return canonicalize_induction_variables ();
1261 static bool
1262 gate_tree_ssa_loop_ivcanon (void)
1264 return flag_tree_loop_ivcanon != 0;
1267 namespace {
1269 const pass_data pass_data_iv_canon =
1271 GIMPLE_PASS, /* type */
1272 "ivcanon", /* name */
1273 OPTGROUP_LOOP, /* optinfo_flags */
1274 true, /* has_gate */
1275 true, /* has_execute */
1276 TV_TREE_LOOP_IVCANON, /* tv_id */
1277 ( PROP_cfg | PROP_ssa ), /* properties_required */
1278 0, /* properties_provided */
1279 0, /* properties_destroyed */
1280 0, /* todo_flags_start */
1281 0, /* todo_flags_finish */
1284 class pass_iv_canon : public gimple_opt_pass
1286 public:
1287 pass_iv_canon (gcc::context *ctxt)
1288 : gimple_opt_pass (pass_data_iv_canon, ctxt)
1291 /* opt_pass methods: */
1292 bool gate () { return gate_tree_ssa_loop_ivcanon (); }
1293 unsigned int execute () { return tree_ssa_loop_ivcanon (); }
1295 }; // class pass_iv_canon
1297 } // anon namespace
1299 gimple_opt_pass *
1300 make_pass_iv_canon (gcc::context *ctxt)
1302 return new pass_iv_canon (ctxt);
1305 /* Complete unrolling of loops. */
1307 static unsigned int
1308 tree_complete_unroll (void)
1310 if (number_of_loops (cfun) <= 1)
1311 return 0;
1313 return tree_unroll_loops_completely (flag_unroll_loops
1314 || flag_peel_loops
1315 || optimize >= 3, true);
1318 static bool
1319 gate_tree_complete_unroll (void)
1321 return true;
1324 namespace {
1326 const pass_data pass_data_complete_unroll =
1328 GIMPLE_PASS, /* type */
1329 "cunroll", /* name */
1330 OPTGROUP_LOOP, /* optinfo_flags */
1331 true, /* has_gate */
1332 true, /* has_execute */
1333 TV_COMPLETE_UNROLL, /* tv_id */
1334 ( PROP_cfg | PROP_ssa ), /* properties_required */
1335 0, /* properties_provided */
1336 0, /* properties_destroyed */
1337 0, /* todo_flags_start */
1338 0, /* todo_flags_finish */
1341 class pass_complete_unroll : public gimple_opt_pass
1343 public:
1344 pass_complete_unroll (gcc::context *ctxt)
1345 : gimple_opt_pass (pass_data_complete_unroll, ctxt)
1348 /* opt_pass methods: */
1349 bool gate () { return gate_tree_complete_unroll (); }
1350 unsigned int execute () { return tree_complete_unroll (); }
1352 }; // class pass_complete_unroll
1354 } // anon namespace
1356 gimple_opt_pass *
1357 make_pass_complete_unroll (gcc::context *ctxt)
1359 return new pass_complete_unroll (ctxt);
1362 /* Complete unrolling of inner loops. */
1364 static unsigned int
1365 tree_complete_unroll_inner (void)
1367 unsigned ret = 0;
1369 loop_optimizer_init (LOOPS_NORMAL
1370 | LOOPS_HAVE_RECORDED_EXITS);
1371 if (number_of_loops (cfun) > 1)
1373 scev_initialize ();
1374 ret = tree_unroll_loops_completely (optimize >= 3, false);
1375 free_numbers_of_iterations_estimates ();
1376 scev_finalize ();
1378 loop_optimizer_finalize ();
1380 return ret;
1383 static bool
1384 gate_tree_complete_unroll_inner (void)
1386 return optimize >= 2;
1389 namespace {
1391 const pass_data pass_data_complete_unrolli =
1393 GIMPLE_PASS, /* type */
1394 "cunrolli", /* name */
1395 OPTGROUP_LOOP, /* optinfo_flags */
1396 true, /* has_gate */
1397 true, /* has_execute */
1398 TV_COMPLETE_UNROLL, /* tv_id */
1399 ( PROP_cfg | PROP_ssa ), /* properties_required */
1400 0, /* properties_provided */
1401 0, /* properties_destroyed */
1402 0, /* todo_flags_start */
1403 TODO_verify_flow, /* todo_flags_finish */
1406 class pass_complete_unrolli : public gimple_opt_pass
1408 public:
1409 pass_complete_unrolli (gcc::context *ctxt)
1410 : gimple_opt_pass (pass_data_complete_unrolli, ctxt)
1413 /* opt_pass methods: */
1414 bool gate () { return gate_tree_complete_unroll_inner (); }
1415 unsigned int execute () { return tree_complete_unroll_inner (); }
1417 }; // class pass_complete_unrolli
1419 } // anon namespace
1421 gimple_opt_pass *
1422 make_pass_complete_unrolli (gcc::context *ctxt)
1424 return new pass_complete_unrolli (ctxt);