Concretize gimple_switch_index and gimple_switch_index_ptr
[official-gcc.git] / gcc / tree-ssa-loop-ivcanon.c
blob6715caa885c2ca2af350d9d760d862162285621b
1 /* Induction variable canonicalization and loop peeling.
2 Copyright (C) 2004-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This pass detects the loops that iterate a constant number of times,
21 adds a canonical induction variable (step -1, tested against 0)
22 and replaces the exit test. This enables the less powerful rtl
23 level analysis to use this information.
25 This might spoil the code in some cases (by increasing register pressure).
26 Note that in the case the new variable is not needed, ivopts will get rid
27 of it, so it might only be a problem when there are no other linear induction
28 variables. In that case the created optimization possibilities are likely
29 to pay up.
31 Additionally in case we detect that it is beneficial to unroll the
32 loop completely, we do it right here to expose the optimization
33 possibilities to the following passes. */
35 #include "config.h"
36 #include "system.h"
37 #include "coretypes.h"
38 #include "tm.h"
39 #include "tree.h"
40 #include "tm_p.h"
41 #include "basic-block.h"
42 #include "gimple-pretty-print.h"
43 #include "tree-ssa-alias.h"
44 #include "internal-fn.h"
45 #include "gimple-fold.h"
46 #include "tree-eh.h"
47 #include "gimple-expr.h"
48 #include "is-a.h"
49 #include "gimple.h"
50 #include "gimple-iterator.h"
51 #include "gimple-ssa.h"
52 #include "cgraph.h"
53 #include "tree-cfg.h"
54 #include "tree-phinodes.h"
55 #include "ssa-iterators.h"
56 #include "stringpool.h"
57 #include "tree-ssanames.h"
58 #include "tree-ssa-loop-manip.h"
59 #include "tree-ssa-loop-niter.h"
60 #include "tree-ssa-loop.h"
61 #include "tree-into-ssa.h"
62 #include "cfgloop.h"
63 #include "tree-pass.h"
64 #include "tree-chrec.h"
65 #include "tree-scalar-evolution.h"
66 #include "params.h"
67 #include "flags.h"
68 #include "tree-inline.h"
69 #include "target.h"
70 #include "tree-cfgcleanup.h"
71 #include "builtins.h"
73 /* Specifies types of loops that may be unrolled. */
75 enum unroll_level
77 UL_SINGLE_ITER, /* Only loops that exit immediately in the first
78 iteration. */
79 UL_NO_GROWTH, /* Only loops whose unrolling will not cause increase
80 of code size. */
81 UL_ALL /* All suitable loops. */
84 /* Adds a canonical induction variable to LOOP iterating NITER times. EXIT
85 is the exit edge whose condition is replaced. */
87 static void
88 create_canonical_iv (struct loop *loop, edge exit, tree niter)
90 edge in;
91 tree type, var;
92 gimple cond;
93 gimple_stmt_iterator incr_at;
94 enum tree_code cmp;
96 if (dump_file && (dump_flags & TDF_DETAILS))
98 fprintf (dump_file, "Added canonical iv to loop %d, ", loop->num);
99 print_generic_expr (dump_file, niter, TDF_SLIM);
100 fprintf (dump_file, " iterations.\n");
103 cond = last_stmt (exit->src);
104 in = EDGE_SUCC (exit->src, 0);
105 if (in == exit)
106 in = EDGE_SUCC (exit->src, 1);
108 /* Note that we do not need to worry about overflows, since
109 type of niter is always unsigned and all comparisons are
110 just for equality/nonequality -- i.e. everything works
111 with a modulo arithmetics. */
113 type = TREE_TYPE (niter);
114 niter = fold_build2 (PLUS_EXPR, type,
115 niter,
116 build_int_cst (type, 1));
117 incr_at = gsi_last_bb (in->src);
118 create_iv (niter,
119 build_int_cst (type, -1),
120 NULL_TREE, loop,
121 &incr_at, false, NULL, &var);
123 cmp = (exit->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR;
124 gimple_cond_set_code (cond, cmp);
125 gimple_cond_set_lhs (cond, var);
126 gimple_cond_set_rhs (cond, build_int_cst (type, 0));
127 update_stmt (cond);
130 /* Describe size of loop as detected by tree_estimate_loop_size. */
131 struct loop_size
133 /* Number of instructions in the loop. */
134 int overall;
136 /* Number of instructions that will be likely optimized out in
137 peeled iterations of loop (i.e. computation based on induction
138 variable where induction variable starts at known constant.) */
139 int eliminated_by_peeling;
141 /* Same statistics for last iteration of loop: it is smaller because
142 instructions after exit are not executed. */
143 int last_iteration;
144 int last_iteration_eliminated_by_peeling;
146 /* If some IV computation will become constant. */
147 bool constant_iv;
149 /* Number of call stmts that are not a builtin and are pure or const
150 present on the hot path. */
151 int num_pure_calls_on_hot_path;
152 /* Number of call stmts that are not a builtin and are not pure nor const
153 present on the hot path. */
154 int num_non_pure_calls_on_hot_path;
155 /* Number of statements other than calls in the loop. */
156 int non_call_stmts_on_hot_path;
157 /* Number of branches seen on the hot path. */
158 int num_branches_on_hot_path;
161 /* Return true if OP in STMT will be constant after peeling LOOP. */
163 static bool
164 constant_after_peeling (tree op, gimple stmt, struct loop *loop)
166 affine_iv iv;
168 if (is_gimple_min_invariant (op))
169 return true;
171 /* We can still fold accesses to constant arrays when index is known. */
172 if (TREE_CODE (op) != SSA_NAME)
174 tree base = op;
176 /* First make fast look if we see constant array inside. */
177 while (handled_component_p (base))
178 base = TREE_OPERAND (base, 0);
179 if ((DECL_P (base)
180 && ctor_for_folding (base) != error_mark_node)
181 || CONSTANT_CLASS_P (base))
183 /* If so, see if we understand all the indices. */
184 base = op;
185 while (handled_component_p (base))
187 if (TREE_CODE (base) == ARRAY_REF
188 && !constant_after_peeling (TREE_OPERAND (base, 1), stmt, loop))
189 return false;
190 base = TREE_OPERAND (base, 0);
192 return true;
194 return false;
197 /* Induction variables are constants. */
198 if (!simple_iv (loop, loop_containing_stmt (stmt), op, &iv, false))
199 return false;
200 if (!is_gimple_min_invariant (iv.base))
201 return false;
202 if (!is_gimple_min_invariant (iv.step))
203 return false;
204 return true;
207 /* Computes an estimated number of insns in LOOP.
208 EXIT (if non-NULL) is an exite edge that will be eliminated in all but last
209 iteration of the loop.
210 EDGE_TO_CANCEL (if non-NULL) is an non-exit edge eliminated in the last iteration
211 of loop.
212 Return results in SIZE, estimate benefits for complete unrolling exiting by EXIT.
213 Stop estimating after UPPER_BOUND is met. Return true in this case. */
215 static bool
216 tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel, struct loop_size *size,
217 int upper_bound)
219 basic_block *body = get_loop_body (loop);
220 gimple_stmt_iterator gsi;
221 unsigned int i;
222 bool after_exit;
223 vec<basic_block> path = get_loop_hot_path (loop);
225 size->overall = 0;
226 size->eliminated_by_peeling = 0;
227 size->last_iteration = 0;
228 size->last_iteration_eliminated_by_peeling = 0;
229 size->num_pure_calls_on_hot_path = 0;
230 size->num_non_pure_calls_on_hot_path = 0;
231 size->non_call_stmts_on_hot_path = 0;
232 size->num_branches_on_hot_path = 0;
233 size->constant_iv = 0;
235 if (dump_file && (dump_flags & TDF_DETAILS))
236 fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num);
237 for (i = 0; i < loop->num_nodes; i++)
239 if (edge_to_cancel && body[i] != edge_to_cancel->src
240 && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src))
241 after_exit = true;
242 else
243 after_exit = false;
244 if (dump_file && (dump_flags & TDF_DETAILS))
245 fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit);
247 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
249 gimple stmt = gsi_stmt (gsi);
250 int num = estimate_num_insns (stmt, &eni_size_weights);
251 bool likely_eliminated = false;
252 bool likely_eliminated_last = false;
253 bool likely_eliminated_peeled = false;
255 if (dump_file && (dump_flags & TDF_DETAILS))
257 fprintf (dump_file, " size: %3i ", num);
258 print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0);
261 /* Look for reasons why we might optimize this stmt away. */
263 if (gimple_has_side_effects (stmt))
265 /* Exit conditional. */
266 else if (exit && body[i] == exit->src
267 && stmt == last_stmt (exit->src))
269 if (dump_file && (dump_flags & TDF_DETAILS))
270 fprintf (dump_file, " Exit condition will be eliminated "
271 "in peeled copies.\n");
272 likely_eliminated_peeled = true;
274 else if (edge_to_cancel && body[i] == edge_to_cancel->src
275 && stmt == last_stmt (edge_to_cancel->src))
277 if (dump_file && (dump_flags & TDF_DETAILS))
278 fprintf (dump_file, " Exit condition will be eliminated "
279 "in last copy.\n");
280 likely_eliminated_last = true;
282 /* Sets of IV variables */
283 else if (gimple_code (stmt) == GIMPLE_ASSIGN
284 && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop))
286 if (dump_file && (dump_flags & TDF_DETAILS))
287 fprintf (dump_file, " Induction variable computation will"
288 " be folded away.\n");
289 likely_eliminated = true;
291 /* Assignments of IV variables. */
292 else if (gimple_code (stmt) == GIMPLE_ASSIGN
293 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
294 && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt, loop)
295 && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS
296 || constant_after_peeling (gimple_assign_rhs2 (stmt),
297 stmt, loop)))
299 size->constant_iv = true;
300 if (dump_file && (dump_flags & TDF_DETAILS))
301 fprintf (dump_file, " Constant expression will be folded away.\n");
302 likely_eliminated = true;
304 /* Conditionals. */
305 else if ((gimple_code (stmt) == GIMPLE_COND
306 && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
307 && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop))
308 || (gimple_code (stmt) == GIMPLE_SWITCH
309 && constant_after_peeling (gimple_switch_index (
310 as_a <gimple_switch> (stmt)),
311 stmt, loop)))
313 if (dump_file && (dump_flags & TDF_DETAILS))
314 fprintf (dump_file, " Constant conditional.\n");
315 likely_eliminated = true;
318 size->overall += num;
319 if (likely_eliminated || likely_eliminated_peeled)
320 size->eliminated_by_peeling += num;
321 if (!after_exit)
323 size->last_iteration += num;
324 if (likely_eliminated || likely_eliminated_last)
325 size->last_iteration_eliminated_by_peeling += num;
327 if ((size->overall * 3 / 2 - size->eliminated_by_peeling
328 - size->last_iteration_eliminated_by_peeling) > upper_bound)
330 free (body);
331 path.release ();
332 return true;
336 while (path.length ())
338 basic_block bb = path.pop ();
339 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
341 gimple stmt = gsi_stmt (gsi);
342 if (gimple_code (stmt) == GIMPLE_CALL)
344 int flags = gimple_call_flags (stmt);
345 tree decl = gimple_call_fndecl (stmt);
347 if (decl && DECL_IS_BUILTIN (decl)
348 && is_inexpensive_builtin (decl))
350 else if (flags & (ECF_PURE | ECF_CONST))
351 size->num_pure_calls_on_hot_path++;
352 else
353 size->num_non_pure_calls_on_hot_path++;
354 size->num_branches_on_hot_path ++;
356 else if (gimple_code (stmt) != GIMPLE_CALL
357 && gimple_code (stmt) != GIMPLE_DEBUG)
358 size->non_call_stmts_on_hot_path++;
359 if (((gimple_code (stmt) == GIMPLE_COND
360 && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
361 || constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop)))
362 || (gimple_code (stmt) == GIMPLE_SWITCH
363 && !constant_after_peeling (gimple_switch_index (
364 as_a <gimple_switch> (stmt)),
365 stmt, loop)))
366 && (!exit || bb != exit->src))
367 size->num_branches_on_hot_path++;
370 path.release ();
371 if (dump_file && (dump_flags & TDF_DETAILS))
372 fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall,
373 size->eliminated_by_peeling, size->last_iteration,
374 size->last_iteration_eliminated_by_peeling);
376 free (body);
377 return false;
380 /* Estimate number of insns of completely unrolled loop.
381 It is (NUNROLL + 1) * size of loop body with taking into account
382 the fact that in last copy everything after exit conditional
383 is dead and that some instructions will be eliminated after
384 peeling.
386 Loop body is likely going to simplify further, this is difficult
387 to guess, we just decrease the result by 1/3. */
389 static unsigned HOST_WIDE_INT
390 estimated_unrolled_size (struct loop_size *size,
391 unsigned HOST_WIDE_INT nunroll)
393 HOST_WIDE_INT unr_insns = ((nunroll)
394 * (HOST_WIDE_INT) (size->overall
395 - size->eliminated_by_peeling));
396 if (!nunroll)
397 unr_insns = 0;
398 unr_insns += size->last_iteration - size->last_iteration_eliminated_by_peeling;
400 unr_insns = unr_insns * 2 / 3;
401 if (unr_insns <= 0)
402 unr_insns = 1;
404 return unr_insns;
407 /* Loop LOOP is known to not loop. See if there is an edge in the loop
408 body that can be remove to make the loop to always exit and at
409 the same time it does not make any code potentially executed
410 during the last iteration dead.
412 After complette unrolling we still may get rid of the conditional
413 on the exit in the last copy even if we have no idea what it does.
414 This is quite common case for loops of form
416 int a[5];
417 for (i=0;i<b;i++)
418 a[i]=0;
420 Here we prove the loop to iterate 5 times but we do not know
421 it from induction variable.
423 For now we handle only simple case where there is exit condition
424 just before the latch block and the latch block contains no statements
425 with side effect that may otherwise terminate the execution of loop
426 (such as by EH or by terminating the program or longjmp).
428 In the general case we may want to cancel the paths leading to statements
429 loop-niter identified as having undefined effect in the last iteration.
430 The other cases are hopefully rare and will be cleaned up later. */
432 static edge
433 loop_edge_to_cancel (struct loop *loop)
435 vec<edge> exits;
436 unsigned i;
437 edge edge_to_cancel;
438 gimple_stmt_iterator gsi;
440 /* We want only one predecestor of the loop. */
441 if (EDGE_COUNT (loop->latch->preds) > 1)
442 return NULL;
444 exits = get_loop_exit_edges (loop);
446 FOR_EACH_VEC_ELT (exits, i, edge_to_cancel)
448 /* Find the other edge than the loop exit
449 leaving the conditoinal. */
450 if (EDGE_COUNT (edge_to_cancel->src->succs) != 2)
451 continue;
452 if (EDGE_SUCC (edge_to_cancel->src, 0) == edge_to_cancel)
453 edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 1);
454 else
455 edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 0);
457 /* We only can handle conditionals. */
458 if (!(edge_to_cancel->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
459 continue;
461 /* We should never have conditionals in the loop latch. */
462 gcc_assert (edge_to_cancel->dest != loop->header);
464 /* Check that it leads to loop latch. */
465 if (edge_to_cancel->dest != loop->latch)
466 continue;
468 exits.release ();
470 /* Verify that the code in loop latch does nothing that may end program
471 execution without really reaching the exit. This may include
472 non-pure/const function calls, EH statements, volatile ASMs etc. */
473 for (gsi = gsi_start_bb (loop->latch); !gsi_end_p (gsi); gsi_next (&gsi))
474 if (gimple_has_side_effects (gsi_stmt (gsi)))
475 return NULL;
476 return edge_to_cancel;
478 exits.release ();
479 return NULL;
482 /* Remove all tests for exits that are known to be taken after LOOP was
483 peeled NPEELED times. Put gcc_unreachable before every statement
484 known to not be executed. */
486 static bool
487 remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled)
489 struct nb_iter_bound *elt;
490 bool changed = false;
492 for (elt = loop->bounds; elt; elt = elt->next)
494 /* If statement is known to be undefined after peeling, turn it
495 into unreachable (or trap when debugging experience is supposed
496 to be good). */
497 if (!elt->is_exit
498 && wi::ltu_p (elt->bound, npeeled))
500 gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt);
501 gimple_call stmt = gimple_build_call
502 (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
504 gimple_set_location (stmt, gimple_location (elt->stmt));
505 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
506 changed = true;
507 if (dump_file && (dump_flags & TDF_DETAILS))
509 fprintf (dump_file, "Forced statement unreachable: ");
510 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
513 /* If we know the exit will be taken after peeling, update. */
514 else if (elt->is_exit
515 && wi::leu_p (elt->bound, npeeled))
517 basic_block bb = gimple_bb (elt->stmt);
518 edge exit_edge = EDGE_SUCC (bb, 0);
520 if (dump_file && (dump_flags & TDF_DETAILS))
522 fprintf (dump_file, "Forced exit to be taken: ");
523 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
525 if (!loop_exit_edge_p (loop, exit_edge))
526 exit_edge = EDGE_SUCC (bb, 1);
527 gcc_checking_assert (loop_exit_edge_p (loop, exit_edge));
528 gimple_cond cond_stmt = as_a <gimple_cond> (elt->stmt);
529 if (exit_edge->flags & EDGE_TRUE_VALUE)
530 gimple_cond_make_true (cond_stmt);
531 else
532 gimple_cond_make_false (cond_stmt);
533 update_stmt (cond_stmt);
534 changed = true;
537 return changed;
540 /* Remove all exits that are known to be never taken because of the loop bound
541 discovered. */
543 static bool
544 remove_redundant_iv_tests (struct loop *loop)
546 struct nb_iter_bound *elt;
547 bool changed = false;
549 if (!loop->any_upper_bound)
550 return false;
551 for (elt = loop->bounds; elt; elt = elt->next)
553 /* Exit is pointless if it won't be taken before loop reaches
554 upper bound. */
555 if (elt->is_exit && loop->any_upper_bound
556 && wi::ltu_p (loop->nb_iterations_upper_bound, elt->bound))
558 basic_block bb = gimple_bb (elt->stmt);
559 edge exit_edge = EDGE_SUCC (bb, 0);
560 struct tree_niter_desc niter;
562 if (!loop_exit_edge_p (loop, exit_edge))
563 exit_edge = EDGE_SUCC (bb, 1);
565 /* Only when we know the actual number of iterations, not
566 just a bound, we can remove the exit. */
567 if (!number_of_iterations_exit (loop, exit_edge,
568 &niter, false, false)
569 || !integer_onep (niter.assumptions)
570 || !integer_zerop (niter.may_be_zero)
571 || !niter.niter
572 || TREE_CODE (niter.niter) != INTEGER_CST
573 || !wi::ltu_p (loop->nb_iterations_upper_bound,
574 wi::to_widest (niter.niter)))
575 continue;
577 if (dump_file && (dump_flags & TDF_DETAILS))
579 fprintf (dump_file, "Removed pointless exit: ");
580 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
582 gimple_cond cond_stmt = as_a <gimple_cond> (elt->stmt);
583 if (exit_edge->flags & EDGE_TRUE_VALUE)
584 gimple_cond_make_false (cond_stmt);
585 else
586 gimple_cond_make_true (cond_stmt);
587 update_stmt (cond_stmt);
588 changed = true;
591 return changed;
594 /* Stores loops that will be unlooped after we process whole loop tree. */
595 static vec<loop_p> loops_to_unloop;
596 static vec<int> loops_to_unloop_nunroll;
598 /* Cancel all fully unrolled loops by putting __builtin_unreachable
599 on the latch edge.
600 We do it after all unrolling since unlooping moves basic blocks
601 across loop boundaries trashing loop closed SSA form as well
602 as SCEV info needed to be intact during unrolling.
604 IRRED_INVALIDATED is used to bookkeep if information about
605 irreducible regions may become invalid as a result
606 of the transformation.
607 LOOP_CLOSED_SSA_INVALIDATED is used to bookkepp the case
608 when we need to go into loop closed SSA form. */
610 static void
611 unloop_loops (bitmap loop_closed_ssa_invalidated,
612 bool *irred_invalidated)
614 while (loops_to_unloop.length ())
616 struct loop *loop = loops_to_unloop.pop ();
617 int n_unroll = loops_to_unloop_nunroll.pop ();
618 basic_block latch = loop->latch;
619 edge latch_edge = loop_latch_edge (loop);
620 int flags = latch_edge->flags;
621 location_t locus = latch_edge->goto_locus;
622 gimple_call stmt;
623 gimple_stmt_iterator gsi;
625 remove_exits_and_undefined_stmts (loop, n_unroll);
627 /* Unloop destroys the latch edge. */
628 unloop (loop, irred_invalidated, loop_closed_ssa_invalidated);
630 /* Create new basic block for the latch edge destination and wire
631 it in. */
632 stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
633 latch_edge = make_edge (latch, create_basic_block (NULL, NULL, latch), flags);
634 latch_edge->probability = 0;
635 latch_edge->count = 0;
636 latch_edge->flags |= flags;
637 latch_edge->goto_locus = locus;
639 latch_edge->dest->loop_father = current_loops->tree_root;
640 latch_edge->dest->count = 0;
641 latch_edge->dest->frequency = 0;
642 set_immediate_dominator (CDI_DOMINATORS, latch_edge->dest, latch_edge->src);
644 gsi = gsi_start_bb (latch_edge->dest);
645 gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
647 loops_to_unloop.release ();
648 loops_to_unloop_nunroll.release ();
651 /* Tries to unroll LOOP completely, i.e. NITER times.
652 UL determines which loops we are allowed to unroll.
653 EXIT is the exit of the loop that should be eliminated.
654 MAXITER specfy bound on number of iterations, -1 if it is
655 not known or too large for HOST_WIDE_INT. The location
656 LOCUS corresponding to the loop is used when emitting
657 a summary of the unroll to the dump file. */
659 static bool
660 try_unroll_loop_completely (struct loop *loop,
661 edge exit, tree niter,
662 enum unroll_level ul,
663 HOST_WIDE_INT maxiter,
664 location_t locus)
666 unsigned HOST_WIDE_INT n_unroll, ninsns, max_unroll, unr_insns;
667 struct loop_size size;
668 bool n_unroll_found = false;
669 edge edge_to_cancel = NULL;
671 /* See if we proved number of iterations to be low constant.
673 EXIT is an edge that will be removed in all but last iteration of
674 the loop.
676 EDGE_TO_CACNEL is an edge that will be removed from the last iteration
677 of the unrolled sequence and is expected to make the final loop not
678 rolling.
680 If the number of execution of loop is determined by standard induction
681 variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving
682 from the iv test. */
683 if (tree_fits_uhwi_p (niter))
685 n_unroll = tree_to_uhwi (niter);
686 n_unroll_found = true;
687 edge_to_cancel = EDGE_SUCC (exit->src, 0);
688 if (edge_to_cancel == exit)
689 edge_to_cancel = EDGE_SUCC (exit->src, 1);
691 /* We do not know the number of iterations and thus we can not eliminate
692 the EXIT edge. */
693 else
694 exit = NULL;
696 /* See if we can improve our estimate by using recorded loop bounds. */
697 if (maxiter >= 0
698 && (!n_unroll_found || (unsigned HOST_WIDE_INT)maxiter < n_unroll))
700 n_unroll = maxiter;
701 n_unroll_found = true;
702 /* Loop terminates before the IV variable test, so we can not
703 remove it in the last iteration. */
704 edge_to_cancel = NULL;
707 if (!n_unroll_found)
708 return false;
710 max_unroll = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES);
711 if (n_unroll > max_unroll)
712 return false;
714 if (!edge_to_cancel)
715 edge_to_cancel = loop_edge_to_cancel (loop);
717 if (n_unroll)
719 sbitmap wont_exit;
720 edge e;
721 unsigned i;
722 bool large;
723 vec<edge> to_remove = vNULL;
724 if (ul == UL_SINGLE_ITER)
725 return false;
727 large = tree_estimate_loop_size
728 (loop, exit, edge_to_cancel, &size,
729 PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS));
730 ninsns = size.overall;
731 if (large)
733 if (dump_file && (dump_flags & TDF_DETAILS))
734 fprintf (dump_file, "Not unrolling loop %d: it is too large.\n",
735 loop->num);
736 return false;
739 unr_insns = estimated_unrolled_size (&size, n_unroll);
740 if (dump_file && (dump_flags & TDF_DETAILS))
742 fprintf (dump_file, " Loop size: %d\n", (int) ninsns);
743 fprintf (dump_file, " Estimated size after unrolling: %d\n",
744 (int) unr_insns);
747 /* If the code is going to shrink, we don't need to be extra cautious
748 on guessing if the unrolling is going to be profitable. */
749 if (unr_insns
750 /* If there is IV variable that will become constant, we save
751 one instruction in the loop prologue we do not account
752 otherwise. */
753 <= ninsns + (size.constant_iv != false))
755 /* We unroll only inner loops, because we do not consider it profitable
756 otheriwse. We still can cancel loopback edge of not rolling loop;
757 this is always a good idea. */
758 else if (ul == UL_NO_GROWTH)
760 if (dump_file && (dump_flags & TDF_DETAILS))
761 fprintf (dump_file, "Not unrolling loop %d: size would grow.\n",
762 loop->num);
763 return false;
765 /* Outer loops tend to be less interesting candidates for complette
766 unrolling unless we can do a lot of propagation into the inner loop
767 body. For now we disable outer loop unrolling when the code would
768 grow. */
769 else if (loop->inner)
771 if (dump_file && (dump_flags & TDF_DETAILS))
772 fprintf (dump_file, "Not unrolling loop %d: "
773 "it is not innermost and code would grow.\n",
774 loop->num);
775 return false;
777 /* If there is call on a hot path through the loop, then
778 there is most probably not much to optimize. */
779 else if (size.num_non_pure_calls_on_hot_path)
781 if (dump_file && (dump_flags & TDF_DETAILS))
782 fprintf (dump_file, "Not unrolling loop %d: "
783 "contains call and code would grow.\n",
784 loop->num);
785 return false;
787 /* If there is pure/const call in the function, then we
788 can still optimize the unrolled loop body if it contains
789 some other interesting code than the calls and code
790 storing or cumulating the return value. */
791 else if (size.num_pure_calls_on_hot_path
792 /* One IV increment, one test, one ivtmp store
793 and one useful stmt. That is about minimal loop
794 doing pure call. */
795 && (size.non_call_stmts_on_hot_path
796 <= 3 + size.num_pure_calls_on_hot_path))
798 if (dump_file && (dump_flags & TDF_DETAILS))
799 fprintf (dump_file, "Not unrolling loop %d: "
800 "contains just pure calls and code would grow.\n",
801 loop->num);
802 return false;
804 /* Complette unrolling is major win when control flow is removed and
805 one big basic block is created. If the loop contains control flow
806 the optimization may still be a win because of eliminating the loop
807 overhead but it also may blow the branch predictor tables.
808 Limit number of branches on the hot path through the peeled
809 sequence. */
810 else if (size.num_branches_on_hot_path * (int)n_unroll
811 > PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES))
813 if (dump_file && (dump_flags & TDF_DETAILS))
814 fprintf (dump_file, "Not unrolling loop %d: "
815 " number of branches on hot path in the unrolled sequence"
816 " reach --param max-peel-branches limit.\n",
817 loop->num);
818 return false;
820 else if (unr_insns
821 > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS))
823 if (dump_file && (dump_flags & TDF_DETAILS))
824 fprintf (dump_file, "Not unrolling loop %d: "
825 "(--param max-completely-peeled-insns limit reached).\n",
826 loop->num);
827 return false;
830 initialize_original_copy_tables ();
831 wont_exit = sbitmap_alloc (n_unroll + 1);
832 bitmap_ones (wont_exit);
833 bitmap_clear_bit (wont_exit, 0);
835 if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
836 n_unroll, wont_exit,
837 exit, &to_remove,
838 DLTHE_FLAG_UPDATE_FREQ
839 | DLTHE_FLAG_COMPLETTE_PEEL))
841 free_original_copy_tables ();
842 free (wont_exit);
843 if (dump_file && (dump_flags & TDF_DETAILS))
844 fprintf (dump_file, "Failed to duplicate the loop\n");
845 return false;
848 FOR_EACH_VEC_ELT (to_remove, i, e)
850 bool ok = remove_path (e);
851 gcc_assert (ok);
854 to_remove.release ();
855 free (wont_exit);
856 free_original_copy_tables ();
860 /* Remove the conditional from the last copy of the loop. */
861 if (edge_to_cancel)
863 gimple_cond cond = as_a <gimple_cond> (last_stmt (edge_to_cancel->src));
864 if (edge_to_cancel->flags & EDGE_TRUE_VALUE)
865 gimple_cond_make_false (cond);
866 else
867 gimple_cond_make_true (cond);
868 update_stmt (cond);
869 /* Do not remove the path. Doing so may remove outer loop
870 and confuse bookkeeping code in tree_unroll_loops_completelly. */
873 /* Store the loop for later unlooping and exit removal. */
874 loops_to_unloop.safe_push (loop);
875 loops_to_unloop_nunroll.safe_push (n_unroll);
877 if (dump_enabled_p ())
879 if (!n_unroll)
880 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
881 "loop turned into non-loop; it never loops\n");
882 else
884 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
885 "loop with %d iterations completely unrolled",
886 (int) (n_unroll + 1));
887 if (profile_info)
888 dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS,
889 " (header execution count %d)",
890 (int)loop->header->count);
891 dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, "\n");
895 if (dump_file && (dump_flags & TDF_DETAILS))
897 if (exit)
898 fprintf (dump_file, "Exit condition of peeled iterations was "
899 "eliminated.\n");
900 if (edge_to_cancel)
901 fprintf (dump_file, "Last iteration exit edge was proved true.\n");
902 else
903 fprintf (dump_file, "Latch of last iteration was marked by "
904 "__builtin_unreachable ().\n");
907 return true;
910 /* Adds a canonical induction variable to LOOP if suitable.
911 CREATE_IV is true if we may create a new iv. UL determines
912 which loops we are allowed to completely unroll. If TRY_EVAL is true, we try
913 to determine the number of iterations of a loop by direct evaluation.
914 Returns true if cfg is changed. */
916 static bool
917 canonicalize_loop_induction_variables (struct loop *loop,
918 bool create_iv, enum unroll_level ul,
919 bool try_eval)
921 edge exit = NULL;
922 tree niter;
923 HOST_WIDE_INT maxiter;
924 bool modified = false;
925 location_t locus = UNKNOWN_LOCATION;
927 niter = number_of_latch_executions (loop);
928 exit = single_exit (loop);
929 if (TREE_CODE (niter) == INTEGER_CST)
930 locus = gimple_location (last_stmt (exit->src));
931 else
933 /* If the loop has more than one exit, try checking all of them
934 for # of iterations determinable through scev. */
935 if (!exit)
936 niter = find_loop_niter (loop, &exit);
938 /* Finally if everything else fails, try brute force evaluation. */
939 if (try_eval
940 && (chrec_contains_undetermined (niter)
941 || TREE_CODE (niter) != INTEGER_CST))
942 niter = find_loop_niter_by_eval (loop, &exit);
944 if (exit)
945 locus = gimple_location (last_stmt (exit->src));
947 if (TREE_CODE (niter) != INTEGER_CST)
948 exit = NULL;
951 /* We work exceptionally hard here to estimate the bound
952 by find_loop_niter_by_eval. Be sure to keep it for future. */
953 if (niter && TREE_CODE (niter) == INTEGER_CST)
955 record_niter_bound (loop, wi::to_widest (niter),
956 exit == single_likely_exit (loop), true);
959 /* Force re-computation of loop bounds so we can remove redundant exits. */
960 maxiter = max_loop_iterations_int (loop);
962 if (dump_file && (dump_flags & TDF_DETAILS)
963 && TREE_CODE (niter) == INTEGER_CST)
965 fprintf (dump_file, "Loop %d iterates ", loop->num);
966 print_generic_expr (dump_file, niter, TDF_SLIM);
967 fprintf (dump_file, " times.\n");
969 if (dump_file && (dump_flags & TDF_DETAILS)
970 && maxiter >= 0)
972 fprintf (dump_file, "Loop %d iterates at most %i times.\n", loop->num,
973 (int)maxiter);
976 /* Remove exits that are known to be never taken based on loop bound.
977 Needs to be called after compilation of max_loop_iterations_int that
978 populates the loop bounds. */
979 modified |= remove_redundant_iv_tests (loop);
981 if (try_unroll_loop_completely (loop, exit, niter, ul, maxiter, locus))
982 return true;
984 if (create_iv
985 && niter && !chrec_contains_undetermined (niter)
986 && exit && just_once_each_iteration_p (loop, exit->src))
987 create_canonical_iv (loop, exit, niter);
989 return modified;
992 /* The main entry point of the pass. Adds canonical induction variables
993 to the suitable loops. */
995 unsigned int
996 canonicalize_induction_variables (void)
998 struct loop *loop;
999 bool changed = false;
1000 bool irred_invalidated = false;
1001 bitmap loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
1003 free_numbers_of_iterations_estimates ();
1004 estimate_numbers_of_iterations ();
1006 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1008 changed |= canonicalize_loop_induction_variables (loop,
1009 true, UL_SINGLE_ITER,
1010 true);
1012 gcc_assert (!need_ssa_update_p (cfun));
1014 unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
1015 if (irred_invalidated
1016 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1017 mark_irreducible_loops ();
1019 /* Clean up the information about numbers of iterations, since brute force
1020 evaluation could reveal new information. */
1021 scev_reset ();
1023 if (!bitmap_empty_p (loop_closed_ssa_invalidated))
1025 gcc_checking_assert (loops_state_satisfies_p (LOOP_CLOSED_SSA));
1026 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1028 BITMAP_FREE (loop_closed_ssa_invalidated);
1030 if (changed)
1031 return TODO_cleanup_cfg;
1032 return 0;
1035 /* Propagate VAL into all uses of SSA_NAME. */
1037 static void
1038 propagate_into_all_uses (tree ssa_name, tree val)
1040 imm_use_iterator iter;
1041 gimple use_stmt;
1043 FOR_EACH_IMM_USE_STMT (use_stmt, iter, ssa_name)
1045 gimple_stmt_iterator use_stmt_gsi = gsi_for_stmt (use_stmt);
1046 use_operand_p use;
1048 FOR_EACH_IMM_USE_ON_STMT (use, iter)
1049 SET_USE (use, val);
1051 if (is_gimple_assign (use_stmt)
1052 && get_gimple_rhs_class (gimple_assign_rhs_code (use_stmt))
1053 == GIMPLE_SINGLE_RHS)
1055 tree rhs = gimple_assign_rhs1 (use_stmt);
1057 if (TREE_CODE (rhs) == ADDR_EXPR)
1058 recompute_tree_invariant_for_addr_expr (rhs);
1061 fold_stmt_inplace (&use_stmt_gsi);
1062 update_stmt (use_stmt);
1063 maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt);
1067 /* Propagate constant SSA_NAMEs defined in basic block BB. */
1069 static void
1070 propagate_constants_for_unrolling (basic_block bb)
1072 gimple_stmt_iterator gsi;
1074 /* Look for degenerate PHI nodes with constant argument. */
1075 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
1077 gimple phi = gsi_stmt (gsi);
1078 tree result = gimple_phi_result (phi);
1079 tree arg = gimple_phi_arg_def (phi, 0);
1081 if (gimple_phi_num_args (phi) == 1 && TREE_CODE (arg) == INTEGER_CST)
1083 propagate_into_all_uses (result, arg);
1084 gsi_remove (&gsi, true);
1085 release_ssa_name (result);
1087 else
1088 gsi_next (&gsi);
1091 /* Look for assignments to SSA names with constant RHS. */
1092 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
1094 gimple stmt = gsi_stmt (gsi);
1095 tree lhs;
1097 if (is_gimple_assign (stmt)
1098 && gimple_assign_rhs_code (stmt) == INTEGER_CST
1099 && (lhs = gimple_assign_lhs (stmt), TREE_CODE (lhs) == SSA_NAME)
1100 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
1102 propagate_into_all_uses (lhs, gimple_assign_rhs1 (stmt));
1103 gsi_remove (&gsi, true);
1104 release_ssa_name (lhs);
1106 else
1107 gsi_next (&gsi);
1111 /* Process loops from innermost to outer, stopping at the innermost
1112 loop we unrolled. */
1114 static bool
1115 tree_unroll_loops_completely_1 (bool may_increase_size, bool unroll_outer,
1116 vec<loop_p, va_heap>& father_stack,
1117 struct loop *loop)
1119 struct loop *loop_father;
1120 bool changed = false;
1121 struct loop *inner;
1122 enum unroll_level ul;
1124 /* Process inner loops first. */
1125 for (inner = loop->inner; inner != NULL; inner = inner->next)
1126 changed |= tree_unroll_loops_completely_1 (may_increase_size,
1127 unroll_outer, father_stack,
1128 inner);
1130 /* If we changed an inner loop we cannot process outer loops in this
1131 iteration because SSA form is not up-to-date. Continue with
1132 siblings of outer loops instead. */
1133 if (changed)
1134 return true;
1136 /* Don't unroll #pragma omp simd loops until the vectorizer
1137 attempts to vectorize those. */
1138 if (loop->force_vectorize)
1139 return false;
1141 /* Try to unroll this loop. */
1142 loop_father = loop_outer (loop);
1143 if (!loop_father)
1144 return false;
1146 if (may_increase_size && optimize_loop_nest_for_speed_p (loop)
1147 /* Unroll outermost loops only if asked to do so or they do
1148 not cause code growth. */
1149 && (unroll_outer || loop_outer (loop_father)))
1150 ul = UL_ALL;
1151 else
1152 ul = UL_NO_GROWTH;
1154 if (canonicalize_loop_induction_variables
1155 (loop, false, ul, !flag_tree_loop_ivcanon))
1157 /* If we'll continue unrolling, we need to propagate constants
1158 within the new basic blocks to fold away induction variable
1159 computations; otherwise, the size might blow up before the
1160 iteration is complete and the IR eventually cleaned up. */
1161 if (loop_outer (loop_father) && !loop_father->aux)
1163 father_stack.safe_push (loop_father);
1164 loop_father->aux = loop_father;
1167 return true;
1170 return false;
1173 /* Unroll LOOPS completely if they iterate just few times. Unless
1174 MAY_INCREASE_SIZE is true, perform the unrolling only if the
1175 size of the code does not increase. */
1177 unsigned int
1178 tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
1180 auto_vec<loop_p, 16> father_stack;
1181 bool changed;
1182 int iteration = 0;
1183 bool irred_invalidated = false;
1187 changed = false;
1188 bitmap loop_closed_ssa_invalidated = NULL;
1190 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
1191 loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
1193 free_numbers_of_iterations_estimates ();
1194 estimate_numbers_of_iterations ();
1196 changed = tree_unroll_loops_completely_1 (may_increase_size,
1197 unroll_outer, father_stack,
1198 current_loops->tree_root);
1199 if (changed)
1201 struct loop **iter;
1202 unsigned i;
1204 /* Be sure to skip unlooped loops while procesing father_stack
1205 array. */
1206 FOR_EACH_VEC_ELT (loops_to_unloop, i, iter)
1207 (*iter)->aux = NULL;
1208 FOR_EACH_VEC_ELT (father_stack, i, iter)
1209 if (!(*iter)->aux)
1210 *iter = NULL;
1211 unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
1213 /* We can not use TODO_update_ssa_no_phi because VOPS gets confused. */
1214 if (loop_closed_ssa_invalidated
1215 && !bitmap_empty_p (loop_closed_ssa_invalidated))
1216 rewrite_into_loop_closed_ssa (loop_closed_ssa_invalidated,
1217 TODO_update_ssa);
1218 else
1219 update_ssa (TODO_update_ssa);
1221 /* Propagate the constants within the new basic blocks. */
1222 FOR_EACH_VEC_ELT (father_stack, i, iter)
1223 if (*iter)
1225 unsigned j;
1226 basic_block *body = get_loop_body_in_dom_order (*iter);
1227 for (j = 0; j < (*iter)->num_nodes; j++)
1228 propagate_constants_for_unrolling (body[j]);
1229 free (body);
1230 (*iter)->aux = NULL;
1232 father_stack.truncate (0);
1234 /* This will take care of removing completely unrolled loops
1235 from the loop structures so we can continue unrolling now
1236 innermost loops. */
1237 if (cleanup_tree_cfg ())
1238 update_ssa (TODO_update_ssa_only_virtuals);
1240 /* Clean up the information about numbers of iterations, since
1241 complete unrolling might have invalidated it. */
1242 scev_reset ();
1243 #ifdef ENABLE_CHECKING
1244 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
1245 verify_loop_closed_ssa (true);
1246 #endif
1248 if (loop_closed_ssa_invalidated)
1249 BITMAP_FREE (loop_closed_ssa_invalidated);
1251 while (changed
1252 && ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS));
1254 father_stack.release ();
1256 if (irred_invalidated
1257 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1258 mark_irreducible_loops ();
1260 return 0;
1263 /* Canonical induction variable creation pass. */
1265 namespace {
1267 const pass_data pass_data_iv_canon =
1269 GIMPLE_PASS, /* type */
1270 "ivcanon", /* name */
1271 OPTGROUP_LOOP, /* optinfo_flags */
1272 TV_TREE_LOOP_IVCANON, /* tv_id */
1273 ( PROP_cfg | PROP_ssa ), /* properties_required */
1274 0, /* properties_provided */
1275 0, /* properties_destroyed */
1276 0, /* todo_flags_start */
1277 0, /* todo_flags_finish */
1280 class pass_iv_canon : public gimple_opt_pass
1282 public:
1283 pass_iv_canon (gcc::context *ctxt)
1284 : gimple_opt_pass (pass_data_iv_canon, ctxt)
1287 /* opt_pass methods: */
1288 virtual bool gate (function *) { return flag_tree_loop_ivcanon != 0; }
1289 virtual unsigned int execute (function *fun);
1291 }; // class pass_iv_canon
1293 unsigned int
1294 pass_iv_canon::execute (function *fun)
1296 if (number_of_loops (fun) <= 1)
1297 return 0;
1299 return canonicalize_induction_variables ();
1302 } // anon namespace
1304 gimple_opt_pass *
1305 make_pass_iv_canon (gcc::context *ctxt)
1307 return new pass_iv_canon (ctxt);
1310 /* Complete unrolling of loops. */
1312 namespace {
1314 const pass_data pass_data_complete_unroll =
1316 GIMPLE_PASS, /* type */
1317 "cunroll", /* name */
1318 OPTGROUP_LOOP, /* optinfo_flags */
1319 TV_COMPLETE_UNROLL, /* tv_id */
1320 ( PROP_cfg | PROP_ssa ), /* properties_required */
1321 0, /* properties_provided */
1322 0, /* properties_destroyed */
1323 0, /* todo_flags_start */
1324 0, /* todo_flags_finish */
1327 class pass_complete_unroll : public gimple_opt_pass
1329 public:
1330 pass_complete_unroll (gcc::context *ctxt)
1331 : gimple_opt_pass (pass_data_complete_unroll, ctxt)
1334 /* opt_pass methods: */
1335 virtual unsigned int execute (function *);
1337 }; // class pass_complete_unroll
1339 unsigned int
1340 pass_complete_unroll::execute (function *fun)
1342 if (number_of_loops (fun) <= 1)
1343 return 0;
1345 return tree_unroll_loops_completely (flag_unroll_loops
1346 || flag_peel_loops
1347 || optimize >= 3, true);
1350 } // anon namespace
1352 gimple_opt_pass *
1353 make_pass_complete_unroll (gcc::context *ctxt)
1355 return new pass_complete_unroll (ctxt);
1358 /* Complete unrolling of inner loops. */
1360 namespace {
1362 const pass_data pass_data_complete_unrolli =
1364 GIMPLE_PASS, /* type */
1365 "cunrolli", /* name */
1366 OPTGROUP_LOOP, /* optinfo_flags */
1367 TV_COMPLETE_UNROLL, /* tv_id */
1368 ( PROP_cfg | PROP_ssa ), /* properties_required */
1369 0, /* properties_provided */
1370 0, /* properties_destroyed */
1371 0, /* todo_flags_start */
1372 0, /* todo_flags_finish */
1375 class pass_complete_unrolli : public gimple_opt_pass
1377 public:
1378 pass_complete_unrolli (gcc::context *ctxt)
1379 : gimple_opt_pass (pass_data_complete_unrolli, ctxt)
1382 /* opt_pass methods: */
1383 virtual bool gate (function *) { return optimize >= 2; }
1384 virtual unsigned int execute (function *);
1386 }; // class pass_complete_unrolli
1388 unsigned int
1389 pass_complete_unrolli::execute (function *fun)
1391 unsigned ret = 0;
1393 loop_optimizer_init (LOOPS_NORMAL
1394 | LOOPS_HAVE_RECORDED_EXITS);
1395 if (number_of_loops (fun) > 1)
1397 scev_initialize ();
1398 ret = tree_unroll_loops_completely (optimize >= 3, false);
1399 free_numbers_of_iterations_estimates ();
1400 scev_finalize ();
1402 loop_optimizer_finalize ();
1404 return ret;
1407 } // anon namespace
1409 gimple_opt_pass *
1410 make_pass_complete_unrolli (gcc::context *ctxt)
1412 return new pass_complete_unrolli (ctxt);