* doc/contrib.texi (Contributors): Add Ira Rosen.
[official-gcc.git] / gcc / tree-ssa-loop-ivcanon.c
blob0e1d75d3323a56f9c45b262f65ee3b07e3df36b0
1 /* Induction variable canonicalization and loop peeling.
2 Copyright (C) 2004-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This pass detects the loops that iterate a constant number of times,
21 adds a canonical induction variable (step -1, tested against 0)
22 and replaces the exit test. This enables the less powerful rtl
23 level analysis to use this information.
25 This might spoil the code in some cases (by increasing register pressure).
26 Note that in the case the new variable is not needed, ivopts will get rid
27 of it, so it might only be a problem when there are no other linear induction
28 variables. In that case the created optimization possibilities are likely
29 to pay up.
31 We also perform
32 - complete unrolling (or peeling) when the loops is rolling few enough
33 times
34 - simple peeling (i.e. copying few initial iterations prior the loop)
35 when number of iteration estimate is known (typically by the profile
36 info). */
38 #include "config.h"
39 #include "system.h"
40 #include "coretypes.h"
41 #include "tm.h"
42 #include "hash-set.h"
43 #include "machmode.h"
44 #include "vec.h"
45 #include "double-int.h"
46 #include "input.h"
47 #include "alias.h"
48 #include "symtab.h"
49 #include "wide-int.h"
50 #include "inchash.h"
51 #include "tree.h"
52 #include "fold-const.h"
53 #include "tm_p.h"
54 #include "profile.h"
55 #include "predict.h"
56 #include "hard-reg-set.h"
57 #include "input.h"
58 #include "function.h"
59 #include "dominance.h"
60 #include "cfg.h"
61 #include "basic-block.h"
62 #include "gimple-pretty-print.h"
63 #include "tree-ssa-alias.h"
64 #include "internal-fn.h"
65 #include "gimple-fold.h"
66 #include "tree-eh.h"
67 #include "gimple-expr.h"
68 #include "is-a.h"
69 #include "gimple.h"
70 #include "gimple-iterator.h"
71 #include "gimple-ssa.h"
72 #include "hash-map.h"
73 #include "plugin-api.h"
74 #include "ipa-ref.h"
75 #include "cgraph.h"
76 #include "tree-cfg.h"
77 #include "tree-phinodes.h"
78 #include "ssa-iterators.h"
79 #include "stringpool.h"
80 #include "tree-ssanames.h"
81 #include "tree-ssa-loop-manip.h"
82 #include "tree-ssa-loop-niter.h"
83 #include "tree-ssa-loop.h"
84 #include "tree-into-ssa.h"
85 #include "cfgloop.h"
86 #include "tree-pass.h"
87 #include "tree-chrec.h"
88 #include "tree-scalar-evolution.h"
89 #include "params.h"
90 #include "flags.h"
91 #include "tree-inline.h"
92 #include "target.h"
93 #include "tree-cfgcleanup.h"
94 #include "builtins.h"
96 /* Specifies types of loops that may be unrolled. */
98 enum unroll_level
100 UL_SINGLE_ITER, /* Only loops that exit immediately in the first
101 iteration. */
102 UL_NO_GROWTH, /* Only loops whose unrolling will not cause increase
103 of code size. */
104 UL_ALL /* All suitable loops. */
107 /* Adds a canonical induction variable to LOOP iterating NITER times. EXIT
108 is the exit edge whose condition is replaced. */
110 static void
111 create_canonical_iv (struct loop *loop, edge exit, tree niter)
113 edge in;
114 tree type, var;
115 gcond *cond;
116 gimple_stmt_iterator incr_at;
117 enum tree_code cmp;
119 if (dump_file && (dump_flags & TDF_DETAILS))
121 fprintf (dump_file, "Added canonical iv to loop %d, ", loop->num);
122 print_generic_expr (dump_file, niter, TDF_SLIM);
123 fprintf (dump_file, " iterations.\n");
126 cond = as_a <gcond *> (last_stmt (exit->src));
127 in = EDGE_SUCC (exit->src, 0);
128 if (in == exit)
129 in = EDGE_SUCC (exit->src, 1);
131 /* Note that we do not need to worry about overflows, since
132 type of niter is always unsigned and all comparisons are
133 just for equality/nonequality -- i.e. everything works
134 with a modulo arithmetics. */
136 type = TREE_TYPE (niter);
137 niter = fold_build2 (PLUS_EXPR, type,
138 niter,
139 build_int_cst (type, 1));
140 incr_at = gsi_last_bb (in->src);
141 create_iv (niter,
142 build_int_cst (type, -1),
143 NULL_TREE, loop,
144 &incr_at, false, NULL, &var);
146 cmp = (exit->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR;
147 gimple_cond_set_code (cond, cmp);
148 gimple_cond_set_lhs (cond, var);
149 gimple_cond_set_rhs (cond, build_int_cst (type, 0));
150 update_stmt (cond);
153 /* Describe size of loop as detected by tree_estimate_loop_size. */
154 struct loop_size
156 /* Number of instructions in the loop. */
157 int overall;
159 /* Number of instructions that will be likely optimized out in
160 peeled iterations of loop (i.e. computation based on induction
161 variable where induction variable starts at known constant.) */
162 int eliminated_by_peeling;
164 /* Same statistics for last iteration of loop: it is smaller because
165 instructions after exit are not executed. */
166 int last_iteration;
167 int last_iteration_eliminated_by_peeling;
169 /* If some IV computation will become constant. */
170 bool constant_iv;
172 /* Number of call stmts that are not a builtin and are pure or const
173 present on the hot path. */
174 int num_pure_calls_on_hot_path;
175 /* Number of call stmts that are not a builtin and are not pure nor const
176 present on the hot path. */
177 int num_non_pure_calls_on_hot_path;
178 /* Number of statements other than calls in the loop. */
179 int non_call_stmts_on_hot_path;
180 /* Number of branches seen on the hot path. */
181 int num_branches_on_hot_path;
184 /* Return true if OP in STMT will be constant after peeling LOOP. */
186 static bool
187 constant_after_peeling (tree op, gimple stmt, struct loop *loop)
189 affine_iv iv;
191 if (is_gimple_min_invariant (op))
192 return true;
194 /* We can still fold accesses to constant arrays when index is known. */
195 if (TREE_CODE (op) != SSA_NAME)
197 tree base = op;
199 /* First make fast look if we see constant array inside. */
200 while (handled_component_p (base))
201 base = TREE_OPERAND (base, 0);
202 if ((DECL_P (base)
203 && ctor_for_folding (base) != error_mark_node)
204 || CONSTANT_CLASS_P (base))
206 /* If so, see if we understand all the indices. */
207 base = op;
208 while (handled_component_p (base))
210 if (TREE_CODE (base) == ARRAY_REF
211 && !constant_after_peeling (TREE_OPERAND (base, 1), stmt, loop))
212 return false;
213 base = TREE_OPERAND (base, 0);
215 return true;
217 return false;
220 /* Induction variables are constants. */
221 if (!simple_iv (loop, loop_containing_stmt (stmt), op, &iv, false))
222 return false;
223 if (!is_gimple_min_invariant (iv.base))
224 return false;
225 if (!is_gimple_min_invariant (iv.step))
226 return false;
227 return true;
230 /* Computes an estimated number of insns in LOOP.
231 EXIT (if non-NULL) is an exite edge that will be eliminated in all but last
232 iteration of the loop.
233 EDGE_TO_CANCEL (if non-NULL) is an non-exit edge eliminated in the last iteration
234 of loop.
235 Return results in SIZE, estimate benefits for complete unrolling exiting by EXIT.
236 Stop estimating after UPPER_BOUND is met. Return true in this case. */
238 static bool
239 tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel, struct loop_size *size,
240 int upper_bound)
242 basic_block *body = get_loop_body (loop);
243 gimple_stmt_iterator gsi;
244 unsigned int i;
245 bool after_exit;
246 vec<basic_block> path = get_loop_hot_path (loop);
248 size->overall = 0;
249 size->eliminated_by_peeling = 0;
250 size->last_iteration = 0;
251 size->last_iteration_eliminated_by_peeling = 0;
252 size->num_pure_calls_on_hot_path = 0;
253 size->num_non_pure_calls_on_hot_path = 0;
254 size->non_call_stmts_on_hot_path = 0;
255 size->num_branches_on_hot_path = 0;
256 size->constant_iv = 0;
258 if (dump_file && (dump_flags & TDF_DETAILS))
259 fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num);
260 for (i = 0; i < loop->num_nodes; i++)
262 if (edge_to_cancel && body[i] != edge_to_cancel->src
263 && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src))
264 after_exit = true;
265 else
266 after_exit = false;
267 if (dump_file && (dump_flags & TDF_DETAILS))
268 fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit);
270 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
272 gimple stmt = gsi_stmt (gsi);
273 int num = estimate_num_insns (stmt, &eni_size_weights);
274 bool likely_eliminated = false;
275 bool likely_eliminated_last = false;
276 bool likely_eliminated_peeled = false;
278 if (dump_file && (dump_flags & TDF_DETAILS))
280 fprintf (dump_file, " size: %3i ", num);
281 print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0);
284 /* Look for reasons why we might optimize this stmt away. */
286 if (gimple_has_side_effects (stmt))
288 /* Exit conditional. */
289 else if (exit && body[i] == exit->src
290 && stmt == last_stmt (exit->src))
292 if (dump_file && (dump_flags & TDF_DETAILS))
293 fprintf (dump_file, " Exit condition will be eliminated "
294 "in peeled copies.\n");
295 likely_eliminated_peeled = true;
297 else if (edge_to_cancel && body[i] == edge_to_cancel->src
298 && stmt == last_stmt (edge_to_cancel->src))
300 if (dump_file && (dump_flags & TDF_DETAILS))
301 fprintf (dump_file, " Exit condition will be eliminated "
302 "in last copy.\n");
303 likely_eliminated_last = true;
305 /* Sets of IV variables */
306 else if (gimple_code (stmt) == GIMPLE_ASSIGN
307 && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop))
309 if (dump_file && (dump_flags & TDF_DETAILS))
310 fprintf (dump_file, " Induction variable computation will"
311 " be folded away.\n");
312 likely_eliminated = true;
314 /* Assignments of IV variables. */
315 else if (gimple_code (stmt) == GIMPLE_ASSIGN
316 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
317 && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt, loop)
318 && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS
319 || constant_after_peeling (gimple_assign_rhs2 (stmt),
320 stmt, loop)))
322 size->constant_iv = true;
323 if (dump_file && (dump_flags & TDF_DETAILS))
324 fprintf (dump_file, " Constant expression will be folded away.\n");
325 likely_eliminated = true;
327 /* Conditionals. */
328 else if ((gimple_code (stmt) == GIMPLE_COND
329 && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
330 && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop))
331 || (gimple_code (stmt) == GIMPLE_SWITCH
332 && constant_after_peeling (gimple_switch_index (
333 as_a <gswitch *> (stmt)),
334 stmt, loop)))
336 if (dump_file && (dump_flags & TDF_DETAILS))
337 fprintf (dump_file, " Constant conditional.\n");
338 likely_eliminated = true;
341 size->overall += num;
342 if (likely_eliminated || likely_eliminated_peeled)
343 size->eliminated_by_peeling += num;
344 if (!after_exit)
346 size->last_iteration += num;
347 if (likely_eliminated || likely_eliminated_last)
348 size->last_iteration_eliminated_by_peeling += num;
350 if ((size->overall * 3 / 2 - size->eliminated_by_peeling
351 - size->last_iteration_eliminated_by_peeling) > upper_bound)
353 free (body);
354 path.release ();
355 return true;
359 while (path.length ())
361 basic_block bb = path.pop ();
362 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
364 gimple stmt = gsi_stmt (gsi);
365 if (gimple_code (stmt) == GIMPLE_CALL)
367 int flags = gimple_call_flags (stmt);
368 tree decl = gimple_call_fndecl (stmt);
370 if (decl && DECL_IS_BUILTIN (decl)
371 && is_inexpensive_builtin (decl))
373 else if (flags & (ECF_PURE | ECF_CONST))
374 size->num_pure_calls_on_hot_path++;
375 else
376 size->num_non_pure_calls_on_hot_path++;
377 size->num_branches_on_hot_path ++;
379 else if (gimple_code (stmt) != GIMPLE_CALL
380 && gimple_code (stmt) != GIMPLE_DEBUG)
381 size->non_call_stmts_on_hot_path++;
382 if (((gimple_code (stmt) == GIMPLE_COND
383 && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
384 || constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop)))
385 || (gimple_code (stmt) == GIMPLE_SWITCH
386 && !constant_after_peeling (gimple_switch_index (
387 as_a <gswitch *> (stmt)),
388 stmt, loop)))
389 && (!exit || bb != exit->src))
390 size->num_branches_on_hot_path++;
393 path.release ();
394 if (dump_file && (dump_flags & TDF_DETAILS))
395 fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall,
396 size->eliminated_by_peeling, size->last_iteration,
397 size->last_iteration_eliminated_by_peeling);
399 free (body);
400 return false;
403 /* Estimate number of insns of completely unrolled loop.
404 It is (NUNROLL + 1) * size of loop body with taking into account
405 the fact that in last copy everything after exit conditional
406 is dead and that some instructions will be eliminated after
407 peeling.
409 Loop body is likely going to simplify further, this is difficult
410 to guess, we just decrease the result by 1/3. */
412 static unsigned HOST_WIDE_INT
413 estimated_unrolled_size (struct loop_size *size,
414 unsigned HOST_WIDE_INT nunroll)
416 HOST_WIDE_INT unr_insns = ((nunroll)
417 * (HOST_WIDE_INT) (size->overall
418 - size->eliminated_by_peeling));
419 if (!nunroll)
420 unr_insns = 0;
421 unr_insns += size->last_iteration - size->last_iteration_eliminated_by_peeling;
423 unr_insns = unr_insns * 2 / 3;
424 if (unr_insns <= 0)
425 unr_insns = 1;
427 return unr_insns;
430 /* Loop LOOP is known to not loop. See if there is an edge in the loop
431 body that can be remove to make the loop to always exit and at
432 the same time it does not make any code potentially executed
433 during the last iteration dead.
435 After complete unrolling we still may get rid of the conditional
436 on the exit in the last copy even if we have no idea what it does.
437 This is quite common case for loops of form
439 int a[5];
440 for (i=0;i<b;i++)
441 a[i]=0;
443 Here we prove the loop to iterate 5 times but we do not know
444 it from induction variable.
446 For now we handle only simple case where there is exit condition
447 just before the latch block and the latch block contains no statements
448 with side effect that may otherwise terminate the execution of loop
449 (such as by EH or by terminating the program or longjmp).
451 In the general case we may want to cancel the paths leading to statements
452 loop-niter identified as having undefined effect in the last iteration.
453 The other cases are hopefully rare and will be cleaned up later. */
455 static edge
456 loop_edge_to_cancel (struct loop *loop)
458 vec<edge> exits;
459 unsigned i;
460 edge edge_to_cancel;
461 gimple_stmt_iterator gsi;
463 /* We want only one predecestor of the loop. */
464 if (EDGE_COUNT (loop->latch->preds) > 1)
465 return NULL;
467 exits = get_loop_exit_edges (loop);
469 FOR_EACH_VEC_ELT (exits, i, edge_to_cancel)
471 /* Find the other edge than the loop exit
472 leaving the conditoinal. */
473 if (EDGE_COUNT (edge_to_cancel->src->succs) != 2)
474 continue;
475 if (EDGE_SUCC (edge_to_cancel->src, 0) == edge_to_cancel)
476 edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 1);
477 else
478 edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 0);
480 /* We only can handle conditionals. */
481 if (!(edge_to_cancel->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
482 continue;
484 /* We should never have conditionals in the loop latch. */
485 gcc_assert (edge_to_cancel->dest != loop->header);
487 /* Check that it leads to loop latch. */
488 if (edge_to_cancel->dest != loop->latch)
489 continue;
491 exits.release ();
493 /* Verify that the code in loop latch does nothing that may end program
494 execution without really reaching the exit. This may include
495 non-pure/const function calls, EH statements, volatile ASMs etc. */
496 for (gsi = gsi_start_bb (loop->latch); !gsi_end_p (gsi); gsi_next (&gsi))
497 if (gimple_has_side_effects (gsi_stmt (gsi)))
498 return NULL;
499 return edge_to_cancel;
501 exits.release ();
502 return NULL;
505 /* Remove all tests for exits that are known to be taken after LOOP was
506 peeled NPEELED times. Put gcc_unreachable before every statement
507 known to not be executed. */
509 static bool
510 remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled)
512 struct nb_iter_bound *elt;
513 bool changed = false;
515 for (elt = loop->bounds; elt; elt = elt->next)
517 /* If statement is known to be undefined after peeling, turn it
518 into unreachable (or trap when debugging experience is supposed
519 to be good). */
520 if (!elt->is_exit
521 && wi::ltu_p (elt->bound, npeeled))
523 gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt);
524 gcall *stmt = gimple_build_call
525 (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
527 gimple_set_location (stmt, gimple_location (elt->stmt));
528 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
529 changed = true;
530 if (dump_file && (dump_flags & TDF_DETAILS))
532 fprintf (dump_file, "Forced statement unreachable: ");
533 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
536 /* If we know the exit will be taken after peeling, update. */
537 else if (elt->is_exit
538 && wi::leu_p (elt->bound, npeeled))
540 basic_block bb = gimple_bb (elt->stmt);
541 edge exit_edge = EDGE_SUCC (bb, 0);
543 if (dump_file && (dump_flags & TDF_DETAILS))
545 fprintf (dump_file, "Forced exit to be taken: ");
546 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
548 if (!loop_exit_edge_p (loop, exit_edge))
549 exit_edge = EDGE_SUCC (bb, 1);
550 gcc_checking_assert (loop_exit_edge_p (loop, exit_edge));
551 gcond *cond_stmt = as_a <gcond *> (elt->stmt);
552 if (exit_edge->flags & EDGE_TRUE_VALUE)
553 gimple_cond_make_true (cond_stmt);
554 else
555 gimple_cond_make_false (cond_stmt);
556 update_stmt (cond_stmt);
557 changed = true;
560 return changed;
563 /* Remove all exits that are known to be never taken because of the loop bound
564 discovered. */
566 static bool
567 remove_redundant_iv_tests (struct loop *loop)
569 struct nb_iter_bound *elt;
570 bool changed = false;
572 if (!loop->any_upper_bound)
573 return false;
574 for (elt = loop->bounds; elt; elt = elt->next)
576 /* Exit is pointless if it won't be taken before loop reaches
577 upper bound. */
578 if (elt->is_exit && loop->any_upper_bound
579 && wi::ltu_p (loop->nb_iterations_upper_bound, elt->bound))
581 basic_block bb = gimple_bb (elt->stmt);
582 edge exit_edge = EDGE_SUCC (bb, 0);
583 struct tree_niter_desc niter;
585 if (!loop_exit_edge_p (loop, exit_edge))
586 exit_edge = EDGE_SUCC (bb, 1);
588 /* Only when we know the actual number of iterations, not
589 just a bound, we can remove the exit. */
590 if (!number_of_iterations_exit (loop, exit_edge,
591 &niter, false, false)
592 || !integer_onep (niter.assumptions)
593 || !integer_zerop (niter.may_be_zero)
594 || !niter.niter
595 || TREE_CODE (niter.niter) != INTEGER_CST
596 || !wi::ltu_p (loop->nb_iterations_upper_bound,
597 wi::to_widest (niter.niter)))
598 continue;
600 if (dump_file && (dump_flags & TDF_DETAILS))
602 fprintf (dump_file, "Removed pointless exit: ");
603 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
605 gcond *cond_stmt = as_a <gcond *> (elt->stmt);
606 if (exit_edge->flags & EDGE_TRUE_VALUE)
607 gimple_cond_make_false (cond_stmt);
608 else
609 gimple_cond_make_true (cond_stmt);
610 update_stmt (cond_stmt);
611 changed = true;
614 return changed;
617 /* Stores loops that will be unlooped after we process whole loop tree. */
618 static vec<loop_p> loops_to_unloop;
619 static vec<int> loops_to_unloop_nunroll;
621 /* Cancel all fully unrolled loops by putting __builtin_unreachable
622 on the latch edge.
623 We do it after all unrolling since unlooping moves basic blocks
624 across loop boundaries trashing loop closed SSA form as well
625 as SCEV info needed to be intact during unrolling.
627 IRRED_INVALIDATED is used to bookkeep if information about
628 irreducible regions may become invalid as a result
629 of the transformation.
630 LOOP_CLOSED_SSA_INVALIDATED is used to bookkepp the case
631 when we need to go into loop closed SSA form. */
633 static void
634 unloop_loops (bitmap loop_closed_ssa_invalidated,
635 bool *irred_invalidated)
637 while (loops_to_unloop.length ())
639 struct loop *loop = loops_to_unloop.pop ();
640 int n_unroll = loops_to_unloop_nunroll.pop ();
641 basic_block latch = loop->latch;
642 edge latch_edge = loop_latch_edge (loop);
643 int flags = latch_edge->flags;
644 location_t locus = latch_edge->goto_locus;
645 gcall *stmt;
646 gimple_stmt_iterator gsi;
648 remove_exits_and_undefined_stmts (loop, n_unroll);
650 /* Unloop destroys the latch edge. */
651 unloop (loop, irred_invalidated, loop_closed_ssa_invalidated);
653 /* Create new basic block for the latch edge destination and wire
654 it in. */
655 stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
656 latch_edge = make_edge (latch, create_basic_block (NULL, NULL, latch), flags);
657 latch_edge->probability = 0;
658 latch_edge->count = 0;
659 latch_edge->flags |= flags;
660 latch_edge->goto_locus = locus;
662 latch_edge->dest->loop_father = current_loops->tree_root;
663 latch_edge->dest->count = 0;
664 latch_edge->dest->frequency = 0;
665 set_immediate_dominator (CDI_DOMINATORS, latch_edge->dest, latch_edge->src);
667 gsi = gsi_start_bb (latch_edge->dest);
668 gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
670 loops_to_unloop.release ();
671 loops_to_unloop_nunroll.release ();
674 /* Tries to unroll LOOP completely, i.e. NITER times.
675 UL determines which loops we are allowed to unroll.
676 EXIT is the exit of the loop that should be eliminated.
677 MAXITER specfy bound on number of iterations, -1 if it is
678 not known or too large for HOST_WIDE_INT. The location
679 LOCUS corresponding to the loop is used when emitting
680 a summary of the unroll to the dump file. */
682 static bool
683 try_unroll_loop_completely (struct loop *loop,
684 edge exit, tree niter,
685 enum unroll_level ul,
686 HOST_WIDE_INT maxiter,
687 location_t locus)
689 unsigned HOST_WIDE_INT n_unroll = 0, ninsns, unr_insns;
690 struct loop_size size;
691 bool n_unroll_found = false;
692 edge edge_to_cancel = NULL;
693 int report_flags = MSG_OPTIMIZED_LOCATIONS | TDF_RTL | TDF_DETAILS;
695 /* See if we proved number of iterations to be low constant.
697 EXIT is an edge that will be removed in all but last iteration of
698 the loop.
700 EDGE_TO_CACNEL is an edge that will be removed from the last iteration
701 of the unrolled sequence and is expected to make the final loop not
702 rolling.
704 If the number of execution of loop is determined by standard induction
705 variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving
706 from the iv test. */
707 if (tree_fits_uhwi_p (niter))
709 n_unroll = tree_to_uhwi (niter);
710 n_unroll_found = true;
711 edge_to_cancel = EDGE_SUCC (exit->src, 0);
712 if (edge_to_cancel == exit)
713 edge_to_cancel = EDGE_SUCC (exit->src, 1);
715 /* We do not know the number of iterations and thus we can not eliminate
716 the EXIT edge. */
717 else
718 exit = NULL;
720 /* See if we can improve our estimate by using recorded loop bounds. */
721 if (maxiter >= 0
722 && (!n_unroll_found || (unsigned HOST_WIDE_INT)maxiter < n_unroll))
724 n_unroll = maxiter;
725 n_unroll_found = true;
726 /* Loop terminates before the IV variable test, so we can not
727 remove it in the last iteration. */
728 edge_to_cancel = NULL;
731 if (!n_unroll_found)
732 return false;
734 if (n_unroll > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES))
736 if (dump_file && (dump_flags & TDF_DETAILS))
737 fprintf (dump_file, "Not unrolling loop %d "
738 "(--param max-completely-peeled-times limit reached).\n",
739 loop->num);
740 return false;
743 if (!edge_to_cancel)
744 edge_to_cancel = loop_edge_to_cancel (loop);
746 if (n_unroll)
748 sbitmap wont_exit;
749 edge e;
750 unsigned i;
751 bool large;
752 vec<edge> to_remove = vNULL;
753 if (ul == UL_SINGLE_ITER)
754 return false;
756 large = tree_estimate_loop_size
757 (loop, exit, edge_to_cancel, &size,
758 PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS));
759 ninsns = size.overall;
760 if (large)
762 if (dump_file && (dump_flags & TDF_DETAILS))
763 fprintf (dump_file, "Not unrolling loop %d: it is too large.\n",
764 loop->num);
765 return false;
768 unr_insns = estimated_unrolled_size (&size, n_unroll);
769 if (dump_file && (dump_flags & TDF_DETAILS))
771 fprintf (dump_file, " Loop size: %d\n", (int) ninsns);
772 fprintf (dump_file, " Estimated size after unrolling: %d\n",
773 (int) unr_insns);
776 /* If the code is going to shrink, we don't need to be extra cautious
777 on guessing if the unrolling is going to be profitable. */
778 if (unr_insns
779 /* If there is IV variable that will become constant, we save
780 one instruction in the loop prologue we do not account
781 otherwise. */
782 <= ninsns + (size.constant_iv != false))
784 /* We unroll only inner loops, because we do not consider it profitable
785 otheriwse. We still can cancel loopback edge of not rolling loop;
786 this is always a good idea. */
787 else if (ul == UL_NO_GROWTH)
789 if (dump_file && (dump_flags & TDF_DETAILS))
790 fprintf (dump_file, "Not unrolling loop %d: size would grow.\n",
791 loop->num);
792 return false;
794 /* Outer loops tend to be less interesting candidates for complete
795 unrolling unless we can do a lot of propagation into the inner loop
796 body. For now we disable outer loop unrolling when the code would
797 grow. */
798 else if (loop->inner)
800 if (dump_file && (dump_flags & TDF_DETAILS))
801 fprintf (dump_file, "Not unrolling loop %d: "
802 "it is not innermost and code would grow.\n",
803 loop->num);
804 return false;
806 /* If there is call on a hot path through the loop, then
807 there is most probably not much to optimize. */
808 else if (size.num_non_pure_calls_on_hot_path)
810 if (dump_file && (dump_flags & TDF_DETAILS))
811 fprintf (dump_file, "Not unrolling loop %d: "
812 "contains call and code would grow.\n",
813 loop->num);
814 return false;
816 /* If there is pure/const call in the function, then we
817 can still optimize the unrolled loop body if it contains
818 some other interesting code than the calls and code
819 storing or cumulating the return value. */
820 else if (size.num_pure_calls_on_hot_path
821 /* One IV increment, one test, one ivtmp store
822 and one useful stmt. That is about minimal loop
823 doing pure call. */
824 && (size.non_call_stmts_on_hot_path
825 <= 3 + size.num_pure_calls_on_hot_path))
827 if (dump_file && (dump_flags & TDF_DETAILS))
828 fprintf (dump_file, "Not unrolling loop %d: "
829 "contains just pure calls and code would grow.\n",
830 loop->num);
831 return false;
833 /* Complette unrolling is major win when control flow is removed and
834 one big basic block is created. If the loop contains control flow
835 the optimization may still be a win because of eliminating the loop
836 overhead but it also may blow the branch predictor tables.
837 Limit number of branches on the hot path through the peeled
838 sequence. */
839 else if (size.num_branches_on_hot_path * (int)n_unroll
840 > PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES))
842 if (dump_file && (dump_flags & TDF_DETAILS))
843 fprintf (dump_file, "Not unrolling loop %d: "
844 " number of branches on hot path in the unrolled sequence"
845 " reach --param max-peel-branches limit.\n",
846 loop->num);
847 return false;
849 else if (unr_insns
850 > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS))
852 if (dump_file && (dump_flags & TDF_DETAILS))
853 fprintf (dump_file, "Not unrolling loop %d: "
854 "(--param max-completely-peeled-insns limit reached).\n",
855 loop->num);
856 return false;
858 dump_printf_loc (report_flags, locus,
859 "loop turned into non-loop; it never loops.\n");
861 initialize_original_copy_tables ();
862 wont_exit = sbitmap_alloc (n_unroll + 1);
863 bitmap_ones (wont_exit);
864 bitmap_clear_bit (wont_exit, 0);
866 if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
867 n_unroll, wont_exit,
868 exit, &to_remove,
869 DLTHE_FLAG_UPDATE_FREQ
870 | DLTHE_FLAG_COMPLETTE_PEEL))
872 free_original_copy_tables ();
873 free (wont_exit);
874 if (dump_file && (dump_flags & TDF_DETAILS))
875 fprintf (dump_file, "Failed to duplicate the loop\n");
876 return false;
879 FOR_EACH_VEC_ELT (to_remove, i, e)
881 bool ok = remove_path (e);
882 gcc_assert (ok);
885 to_remove.release ();
886 free (wont_exit);
887 free_original_copy_tables ();
891 /* Remove the conditional from the last copy of the loop. */
892 if (edge_to_cancel)
894 gcond *cond = as_a <gcond *> (last_stmt (edge_to_cancel->src));
895 if (edge_to_cancel->flags & EDGE_TRUE_VALUE)
896 gimple_cond_make_false (cond);
897 else
898 gimple_cond_make_true (cond);
899 update_stmt (cond);
900 /* Do not remove the path. Doing so may remove outer loop
901 and confuse bookkeeping code in tree_unroll_loops_completelly. */
904 /* Store the loop for later unlooping and exit removal. */
905 loops_to_unloop.safe_push (loop);
906 loops_to_unloop_nunroll.safe_push (n_unroll);
908 if (dump_enabled_p ())
910 if (!n_unroll)
911 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
912 "loop turned into non-loop; it never loops\n");
913 else
915 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
916 "loop with %d iterations completely unrolled",
917 (int) (n_unroll + 1));
918 if (profile_info)
919 dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS,
920 " (header execution count %d)",
921 (int)loop->header->count);
922 dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, "\n");
926 if (dump_file && (dump_flags & TDF_DETAILS))
928 if (exit)
929 fprintf (dump_file, "Exit condition of peeled iterations was "
930 "eliminated.\n");
931 if (edge_to_cancel)
932 fprintf (dump_file, "Last iteration exit edge was proved true.\n");
933 else
934 fprintf (dump_file, "Latch of last iteration was marked by "
935 "__builtin_unreachable ().\n");
938 return true;
941 /* Return number of instructions after peeling. */
942 static unsigned HOST_WIDE_INT
943 estimated_peeled_sequence_size (struct loop_size *size,
944 unsigned HOST_WIDE_INT npeel)
946 return MAX (npeel * (HOST_WIDE_INT) (size->overall
947 - size->eliminated_by_peeling), 1);
950 /* If the loop is expected to iterate N times and is
951 small enough, duplicate the loop body N+1 times before
952 the loop itself. This way the hot path will never
953 enter the loop.
954 Parameters are the same as for try_unroll_loops_completely */
956 static bool
957 try_peel_loop (struct loop *loop,
958 edge exit, tree niter,
959 HOST_WIDE_INT maxiter)
961 int npeel;
962 struct loop_size size;
963 int peeled_size;
964 sbitmap wont_exit;
965 unsigned i;
966 vec<edge> to_remove = vNULL;
967 edge e;
969 /* If the iteration bound is known and large, then we can safely eliminate
970 the check in peeled copies. */
971 if (TREE_CODE (niter) != INTEGER_CST)
972 exit = NULL;
974 if (!flag_peel_loops || PARAM_VALUE (PARAM_MAX_PEEL_TIMES) <= 0)
975 return false;
977 /* Peel only innermost loops. */
978 if (loop->inner)
980 if (dump_file)
981 fprintf (dump_file, "Not peeling: outer loop\n");
982 return false;
985 if (!optimize_loop_for_speed_p (loop))
987 if (dump_file)
988 fprintf (dump_file, "Not peeling: cold loop\n");
989 return false;
992 /* Check if there is an estimate on the number of iterations. */
993 npeel = estimated_loop_iterations_int (loop);
994 if (npeel < 0)
996 if (dump_file)
997 fprintf (dump_file, "Not peeling: number of iterations is not "
998 "estimated\n");
999 return false;
1001 if (maxiter >= 0 && maxiter <= npeel)
1003 if (dump_file)
1004 fprintf (dump_file, "Not peeling: upper bound is known so can "
1005 "unroll completely\n");
1006 return false;
1009 /* We want to peel estimated number of iterations + 1 (so we never
1010 enter the loop on quick path). Check against PARAM_MAX_PEEL_TIMES
1011 and be sure to avoid overflows. */
1012 if (npeel > PARAM_VALUE (PARAM_MAX_PEEL_TIMES) - 1)
1014 if (dump_file)
1015 fprintf (dump_file, "Not peeling: rolls too much "
1016 "(%i + 1 > --param max-peel-times)\n", npeel);
1017 return false;
1019 npeel++;
1021 /* Check peeled loops size. */
1022 tree_estimate_loop_size (loop, exit, NULL, &size,
1023 PARAM_VALUE (PARAM_MAX_PEELED_INSNS));
1024 if ((peeled_size = estimated_peeled_sequence_size (&size, npeel))
1025 > PARAM_VALUE (PARAM_MAX_PEELED_INSNS))
1027 if (dump_file)
1028 fprintf (dump_file, "Not peeling: peeled sequence size is too large "
1029 "(%i insns > --param max-peel-insns)", peeled_size);
1030 return false;
1033 /* Duplicate possibly eliminating the exits. */
1034 initialize_original_copy_tables ();
1035 wont_exit = sbitmap_alloc (npeel + 1);
1036 bitmap_ones (wont_exit);
1037 bitmap_clear_bit (wont_exit, 0);
1038 if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
1039 npeel, wont_exit,
1040 exit, &to_remove,
1041 DLTHE_FLAG_UPDATE_FREQ
1042 | DLTHE_FLAG_COMPLETTE_PEEL))
1044 free_original_copy_tables ();
1045 free (wont_exit);
1046 return false;
1048 FOR_EACH_VEC_ELT (to_remove, i, e)
1050 bool ok = remove_path (e);
1051 gcc_assert (ok);
1053 free (wont_exit);
1054 free_original_copy_tables ();
1055 if (dump_file && (dump_flags & TDF_DETAILS))
1057 fprintf (dump_file, "Peeled loop %d, %i times.\n",
1058 loop->num, npeel);
1060 if (loop->any_upper_bound)
1061 loop->nb_iterations_upper_bound -= npeel;
1062 loop->nb_iterations_estimate = 0;
1063 /* Make sure to mark loop cold so we do not try to peel it more. */
1064 scale_loop_profile (loop, 1, 0);
1065 loop->header->count = 0;
1066 return true;
1068 /* Adds a canonical induction variable to LOOP if suitable.
1069 CREATE_IV is true if we may create a new iv. UL determines
1070 which loops we are allowed to completely unroll. If TRY_EVAL is true, we try
1071 to determine the number of iterations of a loop by direct evaluation.
1072 Returns true if cfg is changed. */
1074 static bool
1075 canonicalize_loop_induction_variables (struct loop *loop,
1076 bool create_iv, enum unroll_level ul,
1077 bool try_eval)
1079 edge exit = NULL;
1080 tree niter;
1081 HOST_WIDE_INT maxiter;
1082 bool modified = false;
1083 location_t locus = UNKNOWN_LOCATION;
1085 niter = number_of_latch_executions (loop);
1086 exit = single_exit (loop);
1087 if (TREE_CODE (niter) == INTEGER_CST)
1088 locus = gimple_location (last_stmt (exit->src));
1089 else
1091 /* If the loop has more than one exit, try checking all of them
1092 for # of iterations determinable through scev. */
1093 if (!exit)
1094 niter = find_loop_niter (loop, &exit);
1096 /* Finally if everything else fails, try brute force evaluation. */
1097 if (try_eval
1098 && (chrec_contains_undetermined (niter)
1099 || TREE_CODE (niter) != INTEGER_CST))
1100 niter = find_loop_niter_by_eval (loop, &exit);
1102 if (exit)
1103 locus = gimple_location (last_stmt (exit->src));
1105 if (TREE_CODE (niter) != INTEGER_CST)
1106 exit = NULL;
1109 /* We work exceptionally hard here to estimate the bound
1110 by find_loop_niter_by_eval. Be sure to keep it for future. */
1111 if (niter && TREE_CODE (niter) == INTEGER_CST)
1113 record_niter_bound (loop, wi::to_widest (niter),
1114 exit == single_likely_exit (loop), true);
1117 /* Force re-computation of loop bounds so we can remove redundant exits. */
1118 maxiter = max_loop_iterations_int (loop);
1120 if (dump_file && (dump_flags & TDF_DETAILS)
1121 && TREE_CODE (niter) == INTEGER_CST)
1123 fprintf (dump_file, "Loop %d iterates ", loop->num);
1124 print_generic_expr (dump_file, niter, TDF_SLIM);
1125 fprintf (dump_file, " times.\n");
1127 if (dump_file && (dump_flags & TDF_DETAILS)
1128 && maxiter >= 0)
1130 fprintf (dump_file, "Loop %d iterates at most %i times.\n", loop->num,
1131 (int)maxiter);
1134 /* Remove exits that are known to be never taken based on loop bound.
1135 Needs to be called after compilation of max_loop_iterations_int that
1136 populates the loop bounds. */
1137 modified |= remove_redundant_iv_tests (loop);
1139 if (try_unroll_loop_completely (loop, exit, niter, ul, maxiter, locus))
1140 return true;
1142 if (create_iv
1143 && niter && !chrec_contains_undetermined (niter)
1144 && exit && just_once_each_iteration_p (loop, exit->src))
1145 create_canonical_iv (loop, exit, niter);
1147 if (ul == UL_ALL)
1148 modified |= try_peel_loop (loop, exit, niter, maxiter);
1150 return modified;
1153 /* The main entry point of the pass. Adds canonical induction variables
1154 to the suitable loops. */
1156 unsigned int
1157 canonicalize_induction_variables (void)
1159 struct loop *loop;
1160 bool changed = false;
1161 bool irred_invalidated = false;
1162 bitmap loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
1164 free_numbers_of_iterations_estimates ();
1165 estimate_numbers_of_iterations ();
1167 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1169 changed |= canonicalize_loop_induction_variables (loop,
1170 true, UL_SINGLE_ITER,
1171 true);
1173 gcc_assert (!need_ssa_update_p (cfun));
1175 unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
1176 if (irred_invalidated
1177 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1178 mark_irreducible_loops ();
1180 /* Clean up the information about numbers of iterations, since brute force
1181 evaluation could reveal new information. */
1182 scev_reset ();
1184 if (!bitmap_empty_p (loop_closed_ssa_invalidated))
1186 gcc_checking_assert (loops_state_satisfies_p (LOOP_CLOSED_SSA));
1187 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1189 BITMAP_FREE (loop_closed_ssa_invalidated);
1191 if (changed)
1192 return TODO_cleanup_cfg;
1193 return 0;
1196 /* Propagate VAL into all uses of SSA_NAME. */
1198 static void
1199 propagate_into_all_uses (tree ssa_name, tree val)
1201 imm_use_iterator iter;
1202 gimple use_stmt;
1204 FOR_EACH_IMM_USE_STMT (use_stmt, iter, ssa_name)
1206 gimple_stmt_iterator use_stmt_gsi = gsi_for_stmt (use_stmt);
1207 use_operand_p use;
1209 FOR_EACH_IMM_USE_ON_STMT (use, iter)
1210 SET_USE (use, val);
1212 if (is_gimple_assign (use_stmt)
1213 && get_gimple_rhs_class (gimple_assign_rhs_code (use_stmt))
1214 == GIMPLE_SINGLE_RHS)
1216 tree rhs = gimple_assign_rhs1 (use_stmt);
1218 if (TREE_CODE (rhs) == ADDR_EXPR)
1219 recompute_tree_invariant_for_addr_expr (rhs);
1222 fold_stmt_inplace (&use_stmt_gsi);
1223 update_stmt (use_stmt);
1224 maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt);
1228 /* Propagate constant SSA_NAMEs defined in basic block BB. */
1230 static void
1231 propagate_constants_for_unrolling (basic_block bb)
1233 /* Look for degenerate PHI nodes with constant argument. */
1234 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
1236 gphi *phi = gsi.phi ();
1237 tree result = gimple_phi_result (phi);
1238 tree arg = gimple_phi_arg_def (phi, 0);
1240 if (gimple_phi_num_args (phi) == 1 && TREE_CODE (arg) == INTEGER_CST)
1242 propagate_into_all_uses (result, arg);
1243 gsi_remove (&gsi, true);
1244 release_ssa_name (result);
1246 else
1247 gsi_next (&gsi);
1250 /* Look for assignments to SSA names with constant RHS. */
1251 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
1253 gimple stmt = gsi_stmt (gsi);
1254 tree lhs;
1256 if (is_gimple_assign (stmt)
1257 && gimple_assign_rhs_code (stmt) == INTEGER_CST
1258 && (lhs = gimple_assign_lhs (stmt), TREE_CODE (lhs) == SSA_NAME)
1259 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
1261 propagate_into_all_uses (lhs, gimple_assign_rhs1 (stmt));
1262 gsi_remove (&gsi, true);
1263 release_ssa_name (lhs);
1265 else
1266 gsi_next (&gsi);
1270 /* Process loops from innermost to outer, stopping at the innermost
1271 loop we unrolled. */
1273 static bool
1274 tree_unroll_loops_completely_1 (bool may_increase_size, bool unroll_outer,
1275 vec<loop_p, va_heap>& father_stack,
1276 struct loop *loop)
1278 struct loop *loop_father;
1279 bool changed = false;
1280 struct loop *inner;
1281 enum unroll_level ul;
1283 /* Process inner loops first. */
1284 for (inner = loop->inner; inner != NULL; inner = inner->next)
1285 changed |= tree_unroll_loops_completely_1 (may_increase_size,
1286 unroll_outer, father_stack,
1287 inner);
1289 /* If we changed an inner loop we cannot process outer loops in this
1290 iteration because SSA form is not up-to-date. Continue with
1291 siblings of outer loops instead. */
1292 if (changed)
1293 return true;
1295 /* Don't unroll #pragma omp simd loops until the vectorizer
1296 attempts to vectorize those. */
1297 if (loop->force_vectorize)
1298 return false;
1300 /* Try to unroll this loop. */
1301 loop_father = loop_outer (loop);
1302 if (!loop_father)
1303 return false;
1305 if (may_increase_size && optimize_loop_nest_for_speed_p (loop)
1306 /* Unroll outermost loops only if asked to do so or they do
1307 not cause code growth. */
1308 && (unroll_outer || loop_outer (loop_father)))
1309 ul = UL_ALL;
1310 else
1311 ul = UL_NO_GROWTH;
1313 if (canonicalize_loop_induction_variables
1314 (loop, false, ul, !flag_tree_loop_ivcanon))
1316 /* If we'll continue unrolling, we need to propagate constants
1317 within the new basic blocks to fold away induction variable
1318 computations; otherwise, the size might blow up before the
1319 iteration is complete and the IR eventually cleaned up. */
1320 if (loop_outer (loop_father) && !loop_father->aux)
1322 father_stack.safe_push (loop_father);
1323 loop_father->aux = loop_father;
1326 return true;
1329 return false;
1332 /* Unroll LOOPS completely if they iterate just few times. Unless
1333 MAY_INCREASE_SIZE is true, perform the unrolling only if the
1334 size of the code does not increase. */
1336 unsigned int
1337 tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
1339 auto_vec<loop_p, 16> father_stack;
1340 bool changed;
1341 int iteration = 0;
1342 bool irred_invalidated = false;
1346 changed = false;
1347 bitmap loop_closed_ssa_invalidated = NULL;
1349 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
1350 loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
1352 free_numbers_of_iterations_estimates ();
1353 estimate_numbers_of_iterations ();
1355 changed = tree_unroll_loops_completely_1 (may_increase_size,
1356 unroll_outer, father_stack,
1357 current_loops->tree_root);
1358 if (changed)
1360 struct loop **iter;
1361 unsigned i;
1363 /* Be sure to skip unlooped loops while procesing father_stack
1364 array. */
1365 FOR_EACH_VEC_ELT (loops_to_unloop, i, iter)
1366 (*iter)->aux = NULL;
1367 FOR_EACH_VEC_ELT (father_stack, i, iter)
1368 if (!(*iter)->aux)
1369 *iter = NULL;
1370 unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
1372 /* We can not use TODO_update_ssa_no_phi because VOPS gets confused. */
1373 if (loop_closed_ssa_invalidated
1374 && !bitmap_empty_p (loop_closed_ssa_invalidated))
1375 rewrite_into_loop_closed_ssa (loop_closed_ssa_invalidated,
1376 TODO_update_ssa);
1377 else
1378 update_ssa (TODO_update_ssa);
1380 /* Propagate the constants within the new basic blocks. */
1381 FOR_EACH_VEC_ELT (father_stack, i, iter)
1382 if (*iter)
1384 unsigned j;
1385 basic_block *body = get_loop_body_in_dom_order (*iter);
1386 for (j = 0; j < (*iter)->num_nodes; j++)
1387 propagate_constants_for_unrolling (body[j]);
1388 free (body);
1389 (*iter)->aux = NULL;
1391 father_stack.truncate (0);
1393 /* This will take care of removing completely unrolled loops
1394 from the loop structures so we can continue unrolling now
1395 innermost loops. */
1396 if (cleanup_tree_cfg ())
1397 update_ssa (TODO_update_ssa_only_virtuals);
1399 /* Clean up the information about numbers of iterations, since
1400 complete unrolling might have invalidated it. */
1401 scev_reset ();
1402 #ifdef ENABLE_CHECKING
1403 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
1404 verify_loop_closed_ssa (true);
1405 #endif
1407 if (loop_closed_ssa_invalidated)
1408 BITMAP_FREE (loop_closed_ssa_invalidated);
1410 while (changed
1411 && ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS));
1413 father_stack.release ();
1415 if (irred_invalidated
1416 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1417 mark_irreducible_loops ();
1419 return 0;
1422 /* Canonical induction variable creation pass. */
1424 namespace {
1426 const pass_data pass_data_iv_canon =
1428 GIMPLE_PASS, /* type */
1429 "ivcanon", /* name */
1430 OPTGROUP_LOOP, /* optinfo_flags */
1431 TV_TREE_LOOP_IVCANON, /* tv_id */
1432 ( PROP_cfg | PROP_ssa ), /* properties_required */
1433 0, /* properties_provided */
1434 0, /* properties_destroyed */
1435 0, /* todo_flags_start */
1436 0, /* todo_flags_finish */
1439 class pass_iv_canon : public gimple_opt_pass
1441 public:
1442 pass_iv_canon (gcc::context *ctxt)
1443 : gimple_opt_pass (pass_data_iv_canon, ctxt)
1446 /* opt_pass methods: */
1447 virtual bool gate (function *) { return flag_tree_loop_ivcanon != 0; }
1448 virtual unsigned int execute (function *fun);
1450 }; // class pass_iv_canon
1452 unsigned int
1453 pass_iv_canon::execute (function *fun)
1455 if (number_of_loops (fun) <= 1)
1456 return 0;
1458 return canonicalize_induction_variables ();
1461 } // anon namespace
1463 gimple_opt_pass *
1464 make_pass_iv_canon (gcc::context *ctxt)
1466 return new pass_iv_canon (ctxt);
1469 /* Complete unrolling of loops. */
1471 namespace {
1473 const pass_data pass_data_complete_unroll =
1475 GIMPLE_PASS, /* type */
1476 "cunroll", /* name */
1477 OPTGROUP_LOOP, /* optinfo_flags */
1478 TV_COMPLETE_UNROLL, /* tv_id */
1479 ( PROP_cfg | PROP_ssa ), /* properties_required */
1480 0, /* properties_provided */
1481 0, /* properties_destroyed */
1482 0, /* todo_flags_start */
1483 0, /* todo_flags_finish */
1486 class pass_complete_unroll : public gimple_opt_pass
1488 public:
1489 pass_complete_unroll (gcc::context *ctxt)
1490 : gimple_opt_pass (pass_data_complete_unroll, ctxt)
1493 /* opt_pass methods: */
1494 virtual unsigned int execute (function *);
1496 }; // class pass_complete_unroll
1498 unsigned int
1499 pass_complete_unroll::execute (function *fun)
1501 if (number_of_loops (fun) <= 1)
1502 return 0;
1504 return tree_unroll_loops_completely (flag_unroll_loops
1505 || flag_peel_loops
1506 || optimize >= 3, true);
1509 } // anon namespace
1511 gimple_opt_pass *
1512 make_pass_complete_unroll (gcc::context *ctxt)
1514 return new pass_complete_unroll (ctxt);
1517 /* Complete unrolling of inner loops. */
1519 namespace {
1521 const pass_data pass_data_complete_unrolli =
1523 GIMPLE_PASS, /* type */
1524 "cunrolli", /* name */
1525 OPTGROUP_LOOP, /* optinfo_flags */
1526 TV_COMPLETE_UNROLL, /* tv_id */
1527 ( PROP_cfg | PROP_ssa ), /* properties_required */
1528 0, /* properties_provided */
1529 0, /* properties_destroyed */
1530 0, /* todo_flags_start */
1531 0, /* todo_flags_finish */
1534 class pass_complete_unrolli : public gimple_opt_pass
1536 public:
1537 pass_complete_unrolli (gcc::context *ctxt)
1538 : gimple_opt_pass (pass_data_complete_unrolli, ctxt)
1541 /* opt_pass methods: */
1542 virtual bool gate (function *) { return optimize >= 2; }
1543 virtual unsigned int execute (function *);
1545 }; // class pass_complete_unrolli
1547 unsigned int
1548 pass_complete_unrolli::execute (function *fun)
1550 unsigned ret = 0;
1552 loop_optimizer_init (LOOPS_NORMAL
1553 | LOOPS_HAVE_RECORDED_EXITS);
1554 if (number_of_loops (fun) > 1)
1556 scev_initialize ();
1557 ret = tree_unroll_loops_completely (optimize >= 3, false);
1558 free_numbers_of_iterations_estimates ();
1559 scev_finalize ();
1561 loop_optimizer_finalize ();
1563 return ret;
1566 } // anon namespace
1568 gimple_opt_pass *
1569 make_pass_complete_unrolli (gcc::context *ctxt)
1571 return new pass_complete_unrolli (ctxt);