gcc/
[official-gcc.git] / gcc / tree-ssa-loop-ivcanon.c
blob73fbac8f825d3d88a2c5716642262cb9deb8af43
1 /* Induction variable canonicalization and loop peeling.
2 Copyright (C) 2004-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This pass detects the loops that iterate a constant number of times,
21 adds a canonical induction variable (step -1, tested against 0)
22 and replaces the exit test. This enables the less powerful rtl
23 level analysis to use this information.
25 This might spoil the code in some cases (by increasing register pressure).
26 Note that in the case the new variable is not needed, ivopts will get rid
27 of it, so it might only be a problem when there are no other linear induction
28 variables. In that case the created optimization possibilities are likely
29 to pay up.
31 We also perform
32 - complete unrolling (or peeling) when the loops is rolling few enough
33 times
34 - simple peeling (i.e. copying few initial iterations prior the loop)
35 when number of iteration estimate is known (typically by the profile
36 info). */
38 #include "config.h"
39 #include "system.h"
40 #include "coretypes.h"
41 #include "tm.h"
42 #include "alias.h"
43 #include "symtab.h"
44 #include "tree.h"
45 #include "fold-const.h"
46 #include "tm_p.h"
47 #include "profile.h"
48 #include "predict.h"
49 #include "hard-reg-set.h"
50 #include "function.h"
51 #include "dominance.h"
52 #include "cfg.h"
53 #include "basic-block.h"
54 #include "gimple-pretty-print.h"
55 #include "tree-ssa-alias.h"
56 #include "internal-fn.h"
57 #include "gimple-fold.h"
58 #include "tree-eh.h"
59 #include "gimple-expr.h"
60 #include "gimple.h"
61 #include "gimple-iterator.h"
62 #include "gimple-ssa.h"
63 #include "plugin-api.h"
64 #include "ipa-ref.h"
65 #include "cgraph.h"
66 #include "tree-cfg.h"
67 #include "tree-phinodes.h"
68 #include "ssa-iterators.h"
69 #include "stringpool.h"
70 #include "tree-ssanames.h"
71 #include "tree-ssa-loop-manip.h"
72 #include "tree-ssa-loop-niter.h"
73 #include "tree-ssa-loop.h"
74 #include "tree-into-ssa.h"
75 #include "cfgloop.h"
76 #include "tree-pass.h"
77 #include "tree-chrec.h"
78 #include "tree-scalar-evolution.h"
79 #include "params.h"
80 #include "flags.h"
81 #include "tree-inline.h"
82 #include "target.h"
83 #include "tree-cfgcleanup.h"
84 #include "builtins.h"
86 /* Specifies types of loops that may be unrolled. */
88 enum unroll_level
90 UL_SINGLE_ITER, /* Only loops that exit immediately in the first
91 iteration. */
92 UL_NO_GROWTH, /* Only loops whose unrolling will not cause increase
93 of code size. */
94 UL_ALL /* All suitable loops. */
97 /* Adds a canonical induction variable to LOOP iterating NITER times. EXIT
98 is the exit edge whose condition is replaced. */
100 static void
101 create_canonical_iv (struct loop *loop, edge exit, tree niter)
103 edge in;
104 tree type, var;
105 gcond *cond;
106 gimple_stmt_iterator incr_at;
107 enum tree_code cmp;
109 if (dump_file && (dump_flags & TDF_DETAILS))
111 fprintf (dump_file, "Added canonical iv to loop %d, ", loop->num);
112 print_generic_expr (dump_file, niter, TDF_SLIM);
113 fprintf (dump_file, " iterations.\n");
116 cond = as_a <gcond *> (last_stmt (exit->src));
117 in = EDGE_SUCC (exit->src, 0);
118 if (in == exit)
119 in = EDGE_SUCC (exit->src, 1);
121 /* Note that we do not need to worry about overflows, since
122 type of niter is always unsigned and all comparisons are
123 just for equality/nonequality -- i.e. everything works
124 with a modulo arithmetics. */
126 type = TREE_TYPE (niter);
127 niter = fold_build2 (PLUS_EXPR, type,
128 niter,
129 build_int_cst (type, 1));
130 incr_at = gsi_last_bb (in->src);
131 create_iv (niter,
132 build_int_cst (type, -1),
133 NULL_TREE, loop,
134 &incr_at, false, NULL, &var);
136 cmp = (exit->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR;
137 gimple_cond_set_code (cond, cmp);
138 gimple_cond_set_lhs (cond, var);
139 gimple_cond_set_rhs (cond, build_int_cst (type, 0));
140 update_stmt (cond);
143 /* Describe size of loop as detected by tree_estimate_loop_size. */
144 struct loop_size
146 /* Number of instructions in the loop. */
147 int overall;
149 /* Number of instructions that will be likely optimized out in
150 peeled iterations of loop (i.e. computation based on induction
151 variable where induction variable starts at known constant.) */
152 int eliminated_by_peeling;
154 /* Same statistics for last iteration of loop: it is smaller because
155 instructions after exit are not executed. */
156 int last_iteration;
157 int last_iteration_eliminated_by_peeling;
159 /* If some IV computation will become constant. */
160 bool constant_iv;
162 /* Number of call stmts that are not a builtin and are pure or const
163 present on the hot path. */
164 int num_pure_calls_on_hot_path;
165 /* Number of call stmts that are not a builtin and are not pure nor const
166 present on the hot path. */
167 int num_non_pure_calls_on_hot_path;
168 /* Number of statements other than calls in the loop. */
169 int non_call_stmts_on_hot_path;
170 /* Number of branches seen on the hot path. */
171 int num_branches_on_hot_path;
174 /* Return true if OP in STMT will be constant after peeling LOOP. */
176 static bool
177 constant_after_peeling (tree op, gimple stmt, struct loop *loop)
179 affine_iv iv;
181 if (is_gimple_min_invariant (op))
182 return true;
184 /* We can still fold accesses to constant arrays when index is known. */
185 if (TREE_CODE (op) != SSA_NAME)
187 tree base = op;
189 /* First make fast look if we see constant array inside. */
190 while (handled_component_p (base))
191 base = TREE_OPERAND (base, 0);
192 if ((DECL_P (base)
193 && ctor_for_folding (base) != error_mark_node)
194 || CONSTANT_CLASS_P (base))
196 /* If so, see if we understand all the indices. */
197 base = op;
198 while (handled_component_p (base))
200 if (TREE_CODE (base) == ARRAY_REF
201 && !constant_after_peeling (TREE_OPERAND (base, 1), stmt, loop))
202 return false;
203 base = TREE_OPERAND (base, 0);
205 return true;
207 return false;
210 /* Induction variables are constants. */
211 if (!simple_iv (loop, loop_containing_stmt (stmt), op, &iv, false))
212 return false;
213 if (!is_gimple_min_invariant (iv.base))
214 return false;
215 if (!is_gimple_min_invariant (iv.step))
216 return false;
217 return true;
220 /* Computes an estimated number of insns in LOOP.
221 EXIT (if non-NULL) is an exite edge that will be eliminated in all but last
222 iteration of the loop.
223 EDGE_TO_CANCEL (if non-NULL) is an non-exit edge eliminated in the last iteration
224 of loop.
225 Return results in SIZE, estimate benefits for complete unrolling exiting by EXIT.
226 Stop estimating after UPPER_BOUND is met. Return true in this case. */
228 static bool
229 tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel, struct loop_size *size,
230 int upper_bound)
232 basic_block *body = get_loop_body (loop);
233 gimple_stmt_iterator gsi;
234 unsigned int i;
235 bool after_exit;
236 vec<basic_block> path = get_loop_hot_path (loop);
238 size->overall = 0;
239 size->eliminated_by_peeling = 0;
240 size->last_iteration = 0;
241 size->last_iteration_eliminated_by_peeling = 0;
242 size->num_pure_calls_on_hot_path = 0;
243 size->num_non_pure_calls_on_hot_path = 0;
244 size->non_call_stmts_on_hot_path = 0;
245 size->num_branches_on_hot_path = 0;
246 size->constant_iv = 0;
248 if (dump_file && (dump_flags & TDF_DETAILS))
249 fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num);
250 for (i = 0; i < loop->num_nodes; i++)
252 if (edge_to_cancel && body[i] != edge_to_cancel->src
253 && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src))
254 after_exit = true;
255 else
256 after_exit = false;
257 if (dump_file && (dump_flags & TDF_DETAILS))
258 fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index, after_exit);
260 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
262 gimple stmt = gsi_stmt (gsi);
263 int num = estimate_num_insns (stmt, &eni_size_weights);
264 bool likely_eliminated = false;
265 bool likely_eliminated_last = false;
266 bool likely_eliminated_peeled = false;
268 if (dump_file && (dump_flags & TDF_DETAILS))
270 fprintf (dump_file, " size: %3i ", num);
271 print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0);
274 /* Look for reasons why we might optimize this stmt away. */
276 if (gimple_has_side_effects (stmt))
278 /* Exit conditional. */
279 else if (exit && body[i] == exit->src
280 && stmt == last_stmt (exit->src))
282 if (dump_file && (dump_flags & TDF_DETAILS))
283 fprintf (dump_file, " Exit condition will be eliminated "
284 "in peeled copies.\n");
285 likely_eliminated_peeled = true;
287 else if (edge_to_cancel && body[i] == edge_to_cancel->src
288 && stmt == last_stmt (edge_to_cancel->src))
290 if (dump_file && (dump_flags & TDF_DETAILS))
291 fprintf (dump_file, " Exit condition will be eliminated "
292 "in last copy.\n");
293 likely_eliminated_last = true;
295 /* Sets of IV variables */
296 else if (gimple_code (stmt) == GIMPLE_ASSIGN
297 && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop))
299 if (dump_file && (dump_flags & TDF_DETAILS))
300 fprintf (dump_file, " Induction variable computation will"
301 " be folded away.\n");
302 likely_eliminated = true;
304 /* Assignments of IV variables. */
305 else if (gimple_code (stmt) == GIMPLE_ASSIGN
306 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
307 && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt, loop)
308 && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS
309 || constant_after_peeling (gimple_assign_rhs2 (stmt),
310 stmt, loop)))
312 size->constant_iv = true;
313 if (dump_file && (dump_flags & TDF_DETAILS))
314 fprintf (dump_file, " Constant expression will be folded away.\n");
315 likely_eliminated = true;
317 /* Conditionals. */
318 else if ((gimple_code (stmt) == GIMPLE_COND
319 && constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
320 && constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop))
321 || (gimple_code (stmt) == GIMPLE_SWITCH
322 && constant_after_peeling (gimple_switch_index (
323 as_a <gswitch *> (stmt)),
324 stmt, loop)))
326 if (dump_file && (dump_flags & TDF_DETAILS))
327 fprintf (dump_file, " Constant conditional.\n");
328 likely_eliminated = true;
331 size->overall += num;
332 if (likely_eliminated || likely_eliminated_peeled)
333 size->eliminated_by_peeling += num;
334 if (!after_exit)
336 size->last_iteration += num;
337 if (likely_eliminated || likely_eliminated_last)
338 size->last_iteration_eliminated_by_peeling += num;
340 if ((size->overall * 3 / 2 - size->eliminated_by_peeling
341 - size->last_iteration_eliminated_by_peeling) > upper_bound)
343 free (body);
344 path.release ();
345 return true;
349 while (path.length ())
351 basic_block bb = path.pop ();
352 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
354 gimple stmt = gsi_stmt (gsi);
355 if (gimple_code (stmt) == GIMPLE_CALL)
357 int flags = gimple_call_flags (stmt);
358 tree decl = gimple_call_fndecl (stmt);
360 if (decl && DECL_IS_BUILTIN (decl)
361 && is_inexpensive_builtin (decl))
363 else if (flags & (ECF_PURE | ECF_CONST))
364 size->num_pure_calls_on_hot_path++;
365 else
366 size->num_non_pure_calls_on_hot_path++;
367 size->num_branches_on_hot_path ++;
369 else if (gimple_code (stmt) != GIMPLE_CALL
370 && gimple_code (stmt) != GIMPLE_DEBUG)
371 size->non_call_stmts_on_hot_path++;
372 if (((gimple_code (stmt) == GIMPLE_COND
373 && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
374 || constant_after_peeling (gimple_cond_rhs (stmt), stmt, loop)))
375 || (gimple_code (stmt) == GIMPLE_SWITCH
376 && !constant_after_peeling (gimple_switch_index (
377 as_a <gswitch *> (stmt)),
378 stmt, loop)))
379 && (!exit || bb != exit->src))
380 size->num_branches_on_hot_path++;
383 path.release ();
384 if (dump_file && (dump_flags & TDF_DETAILS))
385 fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall,
386 size->eliminated_by_peeling, size->last_iteration,
387 size->last_iteration_eliminated_by_peeling);
389 free (body);
390 return false;
393 /* Estimate number of insns of completely unrolled loop.
394 It is (NUNROLL + 1) * size of loop body with taking into account
395 the fact that in last copy everything after exit conditional
396 is dead and that some instructions will be eliminated after
397 peeling.
399 Loop body is likely going to simplify further, this is difficult
400 to guess, we just decrease the result by 1/3. */
402 static unsigned HOST_WIDE_INT
403 estimated_unrolled_size (struct loop_size *size,
404 unsigned HOST_WIDE_INT nunroll)
406 HOST_WIDE_INT unr_insns = ((nunroll)
407 * (HOST_WIDE_INT) (size->overall
408 - size->eliminated_by_peeling));
409 if (!nunroll)
410 unr_insns = 0;
411 unr_insns += size->last_iteration - size->last_iteration_eliminated_by_peeling;
413 unr_insns = unr_insns * 2 / 3;
414 if (unr_insns <= 0)
415 unr_insns = 1;
417 return unr_insns;
420 /* Loop LOOP is known to not loop. See if there is an edge in the loop
421 body that can be remove to make the loop to always exit and at
422 the same time it does not make any code potentially executed
423 during the last iteration dead.
425 After complete unrolling we still may get rid of the conditional
426 on the exit in the last copy even if we have no idea what it does.
427 This is quite common case for loops of form
429 int a[5];
430 for (i=0;i<b;i++)
431 a[i]=0;
433 Here we prove the loop to iterate 5 times but we do not know
434 it from induction variable.
436 For now we handle only simple case where there is exit condition
437 just before the latch block and the latch block contains no statements
438 with side effect that may otherwise terminate the execution of loop
439 (such as by EH or by terminating the program or longjmp).
441 In the general case we may want to cancel the paths leading to statements
442 loop-niter identified as having undefined effect in the last iteration.
443 The other cases are hopefully rare and will be cleaned up later. */
445 static edge
446 loop_edge_to_cancel (struct loop *loop)
448 vec<edge> exits;
449 unsigned i;
450 edge edge_to_cancel;
451 gimple_stmt_iterator gsi;
453 /* We want only one predecestor of the loop. */
454 if (EDGE_COUNT (loop->latch->preds) > 1)
455 return NULL;
457 exits = get_loop_exit_edges (loop);
459 FOR_EACH_VEC_ELT (exits, i, edge_to_cancel)
461 /* Find the other edge than the loop exit
462 leaving the conditoinal. */
463 if (EDGE_COUNT (edge_to_cancel->src->succs) != 2)
464 continue;
465 if (EDGE_SUCC (edge_to_cancel->src, 0) == edge_to_cancel)
466 edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 1);
467 else
468 edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 0);
470 /* We only can handle conditionals. */
471 if (!(edge_to_cancel->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
472 continue;
474 /* We should never have conditionals in the loop latch. */
475 gcc_assert (edge_to_cancel->dest != loop->header);
477 /* Check that it leads to loop latch. */
478 if (edge_to_cancel->dest != loop->latch)
479 continue;
481 exits.release ();
483 /* Verify that the code in loop latch does nothing that may end program
484 execution without really reaching the exit. This may include
485 non-pure/const function calls, EH statements, volatile ASMs etc. */
486 for (gsi = gsi_start_bb (loop->latch); !gsi_end_p (gsi); gsi_next (&gsi))
487 if (gimple_has_side_effects (gsi_stmt (gsi)))
488 return NULL;
489 return edge_to_cancel;
491 exits.release ();
492 return NULL;
495 /* Remove all tests for exits that are known to be taken after LOOP was
496 peeled NPEELED times. Put gcc_unreachable before every statement
497 known to not be executed. */
499 static bool
500 remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled)
502 struct nb_iter_bound *elt;
503 bool changed = false;
505 for (elt = loop->bounds; elt; elt = elt->next)
507 /* If statement is known to be undefined after peeling, turn it
508 into unreachable (or trap when debugging experience is supposed
509 to be good). */
510 if (!elt->is_exit
511 && wi::ltu_p (elt->bound, npeeled))
513 gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt);
514 gcall *stmt = gimple_build_call
515 (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
516 gimple_set_location (stmt, gimple_location (elt->stmt));
517 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
518 split_block (gimple_bb (stmt), stmt);
519 changed = true;
520 if (dump_file && (dump_flags & TDF_DETAILS))
522 fprintf (dump_file, "Forced statement unreachable: ");
523 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
526 /* If we know the exit will be taken after peeling, update. */
527 else if (elt->is_exit
528 && wi::leu_p (elt->bound, npeeled))
530 basic_block bb = gimple_bb (elt->stmt);
531 edge exit_edge = EDGE_SUCC (bb, 0);
533 if (dump_file && (dump_flags & TDF_DETAILS))
535 fprintf (dump_file, "Forced exit to be taken: ");
536 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
538 if (!loop_exit_edge_p (loop, exit_edge))
539 exit_edge = EDGE_SUCC (bb, 1);
540 gcc_checking_assert (loop_exit_edge_p (loop, exit_edge));
541 gcond *cond_stmt = as_a <gcond *> (elt->stmt);
542 if (exit_edge->flags & EDGE_TRUE_VALUE)
543 gimple_cond_make_true (cond_stmt);
544 else
545 gimple_cond_make_false (cond_stmt);
546 update_stmt (cond_stmt);
547 changed = true;
550 return changed;
553 /* Remove all exits that are known to be never taken because of the loop bound
554 discovered. */
556 static bool
557 remove_redundant_iv_tests (struct loop *loop)
559 struct nb_iter_bound *elt;
560 bool changed = false;
562 if (!loop->any_upper_bound)
563 return false;
564 for (elt = loop->bounds; elt; elt = elt->next)
566 /* Exit is pointless if it won't be taken before loop reaches
567 upper bound. */
568 if (elt->is_exit && loop->any_upper_bound
569 && wi::ltu_p (loop->nb_iterations_upper_bound, elt->bound))
571 basic_block bb = gimple_bb (elt->stmt);
572 edge exit_edge = EDGE_SUCC (bb, 0);
573 struct tree_niter_desc niter;
575 if (!loop_exit_edge_p (loop, exit_edge))
576 exit_edge = EDGE_SUCC (bb, 1);
578 /* Only when we know the actual number of iterations, not
579 just a bound, we can remove the exit. */
580 if (!number_of_iterations_exit (loop, exit_edge,
581 &niter, false, false)
582 || !integer_onep (niter.assumptions)
583 || !integer_zerop (niter.may_be_zero)
584 || !niter.niter
585 || TREE_CODE (niter.niter) != INTEGER_CST
586 || !wi::ltu_p (loop->nb_iterations_upper_bound,
587 wi::to_widest (niter.niter)))
588 continue;
590 if (dump_file && (dump_flags & TDF_DETAILS))
592 fprintf (dump_file, "Removed pointless exit: ");
593 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
595 gcond *cond_stmt = as_a <gcond *> (elt->stmt);
596 if (exit_edge->flags & EDGE_TRUE_VALUE)
597 gimple_cond_make_false (cond_stmt);
598 else
599 gimple_cond_make_true (cond_stmt);
600 update_stmt (cond_stmt);
601 changed = true;
604 return changed;
607 /* Stores loops that will be unlooped after we process whole loop tree. */
608 static vec<loop_p> loops_to_unloop;
609 static vec<int> loops_to_unloop_nunroll;
611 /* Cancel all fully unrolled loops by putting __builtin_unreachable
612 on the latch edge.
613 We do it after all unrolling since unlooping moves basic blocks
614 across loop boundaries trashing loop closed SSA form as well
615 as SCEV info needed to be intact during unrolling.
617 IRRED_INVALIDATED is used to bookkeep if information about
618 irreducible regions may become invalid as a result
619 of the transformation.
620 LOOP_CLOSED_SSA_INVALIDATED is used to bookkepp the case
621 when we need to go into loop closed SSA form. */
623 static void
624 unloop_loops (bitmap loop_closed_ssa_invalidated,
625 bool *irred_invalidated)
627 while (loops_to_unloop.length ())
629 struct loop *loop = loops_to_unloop.pop ();
630 int n_unroll = loops_to_unloop_nunroll.pop ();
631 basic_block latch = loop->latch;
632 edge latch_edge = loop_latch_edge (loop);
633 int flags = latch_edge->flags;
634 location_t locus = latch_edge->goto_locus;
635 gcall *stmt;
636 gimple_stmt_iterator gsi;
638 remove_exits_and_undefined_stmts (loop, n_unroll);
640 /* Unloop destroys the latch edge. */
641 unloop (loop, irred_invalidated, loop_closed_ssa_invalidated);
643 /* Create new basic block for the latch edge destination and wire
644 it in. */
645 stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
646 latch_edge = make_edge (latch, create_basic_block (NULL, NULL, latch), flags);
647 latch_edge->probability = 0;
648 latch_edge->count = 0;
649 latch_edge->flags |= flags;
650 latch_edge->goto_locus = locus;
652 latch_edge->dest->loop_father = current_loops->tree_root;
653 latch_edge->dest->count = 0;
654 latch_edge->dest->frequency = 0;
655 set_immediate_dominator (CDI_DOMINATORS, latch_edge->dest, latch_edge->src);
657 gsi = gsi_start_bb (latch_edge->dest);
658 gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
660 loops_to_unloop.release ();
661 loops_to_unloop_nunroll.release ();
664 /* Tries to unroll LOOP completely, i.e. NITER times.
665 UL determines which loops we are allowed to unroll.
666 EXIT is the exit of the loop that should be eliminated.
667 MAXITER specfy bound on number of iterations, -1 if it is
668 not known or too large for HOST_WIDE_INT. The location
669 LOCUS corresponding to the loop is used when emitting
670 a summary of the unroll to the dump file. */
672 static bool
673 try_unroll_loop_completely (struct loop *loop,
674 edge exit, tree niter,
675 enum unroll_level ul,
676 HOST_WIDE_INT maxiter,
677 location_t locus)
679 unsigned HOST_WIDE_INT n_unroll = 0, ninsns, unr_insns;
680 struct loop_size size;
681 bool n_unroll_found = false;
682 edge edge_to_cancel = NULL;
683 int report_flags = MSG_OPTIMIZED_LOCATIONS | TDF_RTL | TDF_DETAILS;
685 /* See if we proved number of iterations to be low constant.
687 EXIT is an edge that will be removed in all but last iteration of
688 the loop.
690 EDGE_TO_CACNEL is an edge that will be removed from the last iteration
691 of the unrolled sequence and is expected to make the final loop not
692 rolling.
694 If the number of execution of loop is determined by standard induction
695 variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving
696 from the iv test. */
697 if (tree_fits_uhwi_p (niter))
699 n_unroll = tree_to_uhwi (niter);
700 n_unroll_found = true;
701 edge_to_cancel = EDGE_SUCC (exit->src, 0);
702 if (edge_to_cancel == exit)
703 edge_to_cancel = EDGE_SUCC (exit->src, 1);
705 /* We do not know the number of iterations and thus we can not eliminate
706 the EXIT edge. */
707 else
708 exit = NULL;
710 /* See if we can improve our estimate by using recorded loop bounds. */
711 if (maxiter >= 0
712 && (!n_unroll_found || (unsigned HOST_WIDE_INT)maxiter < n_unroll))
714 n_unroll = maxiter;
715 n_unroll_found = true;
716 /* Loop terminates before the IV variable test, so we can not
717 remove it in the last iteration. */
718 edge_to_cancel = NULL;
721 if (!n_unroll_found)
722 return false;
724 if (n_unroll > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES))
726 if (dump_file && (dump_flags & TDF_DETAILS))
727 fprintf (dump_file, "Not unrolling loop %d "
728 "(--param max-completely-peeled-times limit reached).\n",
729 loop->num);
730 return false;
733 if (!edge_to_cancel)
734 edge_to_cancel = loop_edge_to_cancel (loop);
736 if (n_unroll)
738 sbitmap wont_exit;
739 edge e;
740 unsigned i;
741 bool large;
742 vec<edge> to_remove = vNULL;
743 if (ul == UL_SINGLE_ITER)
744 return false;
746 large = tree_estimate_loop_size
747 (loop, exit, edge_to_cancel, &size,
748 PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS));
749 ninsns = size.overall;
750 if (large)
752 if (dump_file && (dump_flags & TDF_DETAILS))
753 fprintf (dump_file, "Not unrolling loop %d: it is too large.\n",
754 loop->num);
755 return false;
758 unr_insns = estimated_unrolled_size (&size, n_unroll);
759 if (dump_file && (dump_flags & TDF_DETAILS))
761 fprintf (dump_file, " Loop size: %d\n", (int) ninsns);
762 fprintf (dump_file, " Estimated size after unrolling: %d\n",
763 (int) unr_insns);
766 /* If the code is going to shrink, we don't need to be extra cautious
767 on guessing if the unrolling is going to be profitable. */
768 if (unr_insns
769 /* If there is IV variable that will become constant, we save
770 one instruction in the loop prologue we do not account
771 otherwise. */
772 <= ninsns + (size.constant_iv != false))
774 /* We unroll only inner loops, because we do not consider it profitable
775 otheriwse. We still can cancel loopback edge of not rolling loop;
776 this is always a good idea. */
777 else if (ul == UL_NO_GROWTH)
779 if (dump_file && (dump_flags & TDF_DETAILS))
780 fprintf (dump_file, "Not unrolling loop %d: size would grow.\n",
781 loop->num);
782 return false;
784 /* Outer loops tend to be less interesting candidates for complete
785 unrolling unless we can do a lot of propagation into the inner loop
786 body. For now we disable outer loop unrolling when the code would
787 grow. */
788 else if (loop->inner)
790 if (dump_file && (dump_flags & TDF_DETAILS))
791 fprintf (dump_file, "Not unrolling loop %d: "
792 "it is not innermost and code would grow.\n",
793 loop->num);
794 return false;
796 /* If there is call on a hot path through the loop, then
797 there is most probably not much to optimize. */
798 else if (size.num_non_pure_calls_on_hot_path)
800 if (dump_file && (dump_flags & TDF_DETAILS))
801 fprintf (dump_file, "Not unrolling loop %d: "
802 "contains call and code would grow.\n",
803 loop->num);
804 return false;
806 /* If there is pure/const call in the function, then we
807 can still optimize the unrolled loop body if it contains
808 some other interesting code than the calls and code
809 storing or cumulating the return value. */
810 else if (size.num_pure_calls_on_hot_path
811 /* One IV increment, one test, one ivtmp store
812 and one useful stmt. That is about minimal loop
813 doing pure call. */
814 && (size.non_call_stmts_on_hot_path
815 <= 3 + size.num_pure_calls_on_hot_path))
817 if (dump_file && (dump_flags & TDF_DETAILS))
818 fprintf (dump_file, "Not unrolling loop %d: "
819 "contains just pure calls and code would grow.\n",
820 loop->num);
821 return false;
823 /* Complette unrolling is major win when control flow is removed and
824 one big basic block is created. If the loop contains control flow
825 the optimization may still be a win because of eliminating the loop
826 overhead but it also may blow the branch predictor tables.
827 Limit number of branches on the hot path through the peeled
828 sequence. */
829 else if (size.num_branches_on_hot_path * (int)n_unroll
830 > PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES))
832 if (dump_file && (dump_flags & TDF_DETAILS))
833 fprintf (dump_file, "Not unrolling loop %d: "
834 " number of branches on hot path in the unrolled sequence"
835 " reach --param max-peel-branches limit.\n",
836 loop->num);
837 return false;
839 else if (unr_insns
840 > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS))
842 if (dump_file && (dump_flags & TDF_DETAILS))
843 fprintf (dump_file, "Not unrolling loop %d: "
844 "(--param max-completely-peeled-insns limit reached).\n",
845 loop->num);
846 return false;
848 dump_printf_loc (report_flags, locus,
849 "loop turned into non-loop; it never loops.\n");
851 initialize_original_copy_tables ();
852 wont_exit = sbitmap_alloc (n_unroll + 1);
853 bitmap_ones (wont_exit);
854 bitmap_clear_bit (wont_exit, 0);
856 if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
857 n_unroll, wont_exit,
858 exit, &to_remove,
859 DLTHE_FLAG_UPDATE_FREQ
860 | DLTHE_FLAG_COMPLETTE_PEEL))
862 free_original_copy_tables ();
863 free (wont_exit);
864 if (dump_file && (dump_flags & TDF_DETAILS))
865 fprintf (dump_file, "Failed to duplicate the loop\n");
866 return false;
869 FOR_EACH_VEC_ELT (to_remove, i, e)
871 bool ok = remove_path (e);
872 gcc_assert (ok);
875 to_remove.release ();
876 free (wont_exit);
877 free_original_copy_tables ();
881 /* Remove the conditional from the last copy of the loop. */
882 if (edge_to_cancel)
884 gcond *cond = as_a <gcond *> (last_stmt (edge_to_cancel->src));
885 if (edge_to_cancel->flags & EDGE_TRUE_VALUE)
886 gimple_cond_make_false (cond);
887 else
888 gimple_cond_make_true (cond);
889 update_stmt (cond);
890 /* Do not remove the path. Doing so may remove outer loop
891 and confuse bookkeeping code in tree_unroll_loops_completelly. */
894 /* Store the loop for later unlooping and exit removal. */
895 loops_to_unloop.safe_push (loop);
896 loops_to_unloop_nunroll.safe_push (n_unroll);
898 if (dump_enabled_p ())
900 if (!n_unroll)
901 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
902 "loop turned into non-loop; it never loops\n");
903 else
905 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
906 "loop with %d iterations completely unrolled",
907 (int) (n_unroll + 1));
908 if (profile_info)
909 dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS,
910 " (header execution count %d)",
911 (int)loop->header->count);
912 dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, "\n");
916 if (dump_file && (dump_flags & TDF_DETAILS))
918 if (exit)
919 fprintf (dump_file, "Exit condition of peeled iterations was "
920 "eliminated.\n");
921 if (edge_to_cancel)
922 fprintf (dump_file, "Last iteration exit edge was proved true.\n");
923 else
924 fprintf (dump_file, "Latch of last iteration was marked by "
925 "__builtin_unreachable ().\n");
928 return true;
931 /* Return number of instructions after peeling. */
932 static unsigned HOST_WIDE_INT
933 estimated_peeled_sequence_size (struct loop_size *size,
934 unsigned HOST_WIDE_INT npeel)
936 return MAX (npeel * (HOST_WIDE_INT) (size->overall
937 - size->eliminated_by_peeling), 1);
940 /* If the loop is expected to iterate N times and is
941 small enough, duplicate the loop body N+1 times before
942 the loop itself. This way the hot path will never
943 enter the loop.
944 Parameters are the same as for try_unroll_loops_completely */
946 static bool
947 try_peel_loop (struct loop *loop,
948 edge exit, tree niter,
949 HOST_WIDE_INT maxiter)
951 int npeel;
952 struct loop_size size;
953 int peeled_size;
954 sbitmap wont_exit;
955 unsigned i;
956 vec<edge> to_remove = vNULL;
957 edge e;
959 /* If the iteration bound is known and large, then we can safely eliminate
960 the check in peeled copies. */
961 if (TREE_CODE (niter) != INTEGER_CST)
962 exit = NULL;
964 if (!flag_peel_loops || PARAM_VALUE (PARAM_MAX_PEEL_TIMES) <= 0)
965 return false;
967 /* Peel only innermost loops. */
968 if (loop->inner)
970 if (dump_file)
971 fprintf (dump_file, "Not peeling: outer loop\n");
972 return false;
975 if (!optimize_loop_for_speed_p (loop))
977 if (dump_file)
978 fprintf (dump_file, "Not peeling: cold loop\n");
979 return false;
982 /* Check if there is an estimate on the number of iterations. */
983 npeel = estimated_loop_iterations_int (loop);
984 if (npeel < 0)
986 if (dump_file)
987 fprintf (dump_file, "Not peeling: number of iterations is not "
988 "estimated\n");
989 return false;
991 if (maxiter >= 0 && maxiter <= npeel)
993 if (dump_file)
994 fprintf (dump_file, "Not peeling: upper bound is known so can "
995 "unroll completely\n");
996 return false;
999 /* We want to peel estimated number of iterations + 1 (so we never
1000 enter the loop on quick path). Check against PARAM_MAX_PEEL_TIMES
1001 and be sure to avoid overflows. */
1002 if (npeel > PARAM_VALUE (PARAM_MAX_PEEL_TIMES) - 1)
1004 if (dump_file)
1005 fprintf (dump_file, "Not peeling: rolls too much "
1006 "(%i + 1 > --param max-peel-times)\n", npeel);
1007 return false;
1009 npeel++;
1011 /* Check peeled loops size. */
1012 tree_estimate_loop_size (loop, exit, NULL, &size,
1013 PARAM_VALUE (PARAM_MAX_PEELED_INSNS));
1014 if ((peeled_size = estimated_peeled_sequence_size (&size, npeel))
1015 > PARAM_VALUE (PARAM_MAX_PEELED_INSNS))
1017 if (dump_file)
1018 fprintf (dump_file, "Not peeling: peeled sequence size is too large "
1019 "(%i insns > --param max-peel-insns)", peeled_size);
1020 return false;
1023 /* Duplicate possibly eliminating the exits. */
1024 initialize_original_copy_tables ();
1025 wont_exit = sbitmap_alloc (npeel + 1);
1026 bitmap_ones (wont_exit);
1027 bitmap_clear_bit (wont_exit, 0);
1028 if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
1029 npeel, wont_exit,
1030 exit, &to_remove,
1031 DLTHE_FLAG_UPDATE_FREQ
1032 | DLTHE_FLAG_COMPLETTE_PEEL))
1034 free_original_copy_tables ();
1035 free (wont_exit);
1036 return false;
1038 FOR_EACH_VEC_ELT (to_remove, i, e)
1040 bool ok = remove_path (e);
1041 gcc_assert (ok);
1043 free (wont_exit);
1044 free_original_copy_tables ();
1045 if (dump_file && (dump_flags & TDF_DETAILS))
1047 fprintf (dump_file, "Peeled loop %d, %i times.\n",
1048 loop->num, npeel);
1050 if (loop->any_upper_bound)
1051 loop->nb_iterations_upper_bound -= npeel;
1052 loop->nb_iterations_estimate = 0;
1053 /* Make sure to mark loop cold so we do not try to peel it more. */
1054 scale_loop_profile (loop, 1, 0);
1055 loop->header->count = 0;
1056 return true;
1058 /* Adds a canonical induction variable to LOOP if suitable.
1059 CREATE_IV is true if we may create a new iv. UL determines
1060 which loops we are allowed to completely unroll. If TRY_EVAL is true, we try
1061 to determine the number of iterations of a loop by direct evaluation.
1062 Returns true if cfg is changed. */
1064 static bool
1065 canonicalize_loop_induction_variables (struct loop *loop,
1066 bool create_iv, enum unroll_level ul,
1067 bool try_eval)
1069 edge exit = NULL;
1070 tree niter;
1071 HOST_WIDE_INT maxiter;
1072 bool modified = false;
1073 location_t locus = UNKNOWN_LOCATION;
1075 niter = number_of_latch_executions (loop);
1076 exit = single_exit (loop);
1077 if (TREE_CODE (niter) == INTEGER_CST)
1078 locus = gimple_location (last_stmt (exit->src));
1079 else
1081 /* If the loop has more than one exit, try checking all of them
1082 for # of iterations determinable through scev. */
1083 if (!exit)
1084 niter = find_loop_niter (loop, &exit);
1086 /* Finally if everything else fails, try brute force evaluation. */
1087 if (try_eval
1088 && (chrec_contains_undetermined (niter)
1089 || TREE_CODE (niter) != INTEGER_CST))
1090 niter = find_loop_niter_by_eval (loop, &exit);
1092 if (exit)
1093 locus = gimple_location (last_stmt (exit->src));
1095 if (TREE_CODE (niter) != INTEGER_CST)
1096 exit = NULL;
1099 /* We work exceptionally hard here to estimate the bound
1100 by find_loop_niter_by_eval. Be sure to keep it for future. */
1101 if (niter && TREE_CODE (niter) == INTEGER_CST)
1103 record_niter_bound (loop, wi::to_widest (niter),
1104 exit == single_likely_exit (loop), true);
1107 /* Force re-computation of loop bounds so we can remove redundant exits. */
1108 maxiter = max_loop_iterations_int (loop);
1110 if (dump_file && (dump_flags & TDF_DETAILS)
1111 && TREE_CODE (niter) == INTEGER_CST)
1113 fprintf (dump_file, "Loop %d iterates ", loop->num);
1114 print_generic_expr (dump_file, niter, TDF_SLIM);
1115 fprintf (dump_file, " times.\n");
1117 if (dump_file && (dump_flags & TDF_DETAILS)
1118 && maxiter >= 0)
1120 fprintf (dump_file, "Loop %d iterates at most %i times.\n", loop->num,
1121 (int)maxiter);
1124 /* Remove exits that are known to be never taken based on loop bound.
1125 Needs to be called after compilation of max_loop_iterations_int that
1126 populates the loop bounds. */
1127 modified |= remove_redundant_iv_tests (loop);
1129 if (try_unroll_loop_completely (loop, exit, niter, ul, maxiter, locus))
1130 return true;
1132 if (create_iv
1133 && niter && !chrec_contains_undetermined (niter)
1134 && exit && just_once_each_iteration_p (loop, exit->src))
1135 create_canonical_iv (loop, exit, niter);
1137 if (ul == UL_ALL)
1138 modified |= try_peel_loop (loop, exit, niter, maxiter);
1140 return modified;
1143 /* The main entry point of the pass. Adds canonical induction variables
1144 to the suitable loops. */
1146 unsigned int
1147 canonicalize_induction_variables (void)
1149 struct loop *loop;
1150 bool changed = false;
1151 bool irred_invalidated = false;
1152 bitmap loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
1154 free_numbers_of_iterations_estimates ();
1155 estimate_numbers_of_iterations ();
1157 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1159 changed |= canonicalize_loop_induction_variables (loop,
1160 true, UL_SINGLE_ITER,
1161 true);
1163 gcc_assert (!need_ssa_update_p (cfun));
1165 unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
1166 if (irred_invalidated
1167 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1168 mark_irreducible_loops ();
1170 /* Clean up the information about numbers of iterations, since brute force
1171 evaluation could reveal new information. */
1172 scev_reset ();
1174 if (!bitmap_empty_p (loop_closed_ssa_invalidated))
1176 gcc_checking_assert (loops_state_satisfies_p (LOOP_CLOSED_SSA));
1177 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1179 BITMAP_FREE (loop_closed_ssa_invalidated);
1181 if (changed)
1182 return TODO_cleanup_cfg;
1183 return 0;
1186 /* Propagate VAL into all uses of SSA_NAME. */
1188 static void
1189 propagate_into_all_uses (tree ssa_name, tree val)
1191 imm_use_iterator iter;
1192 gimple use_stmt;
1194 FOR_EACH_IMM_USE_STMT (use_stmt, iter, ssa_name)
1196 gimple_stmt_iterator use_stmt_gsi = gsi_for_stmt (use_stmt);
1197 use_operand_p use;
1199 FOR_EACH_IMM_USE_ON_STMT (use, iter)
1200 SET_USE (use, val);
1202 if (is_gimple_assign (use_stmt)
1203 && get_gimple_rhs_class (gimple_assign_rhs_code (use_stmt))
1204 == GIMPLE_SINGLE_RHS)
1206 tree rhs = gimple_assign_rhs1 (use_stmt);
1208 if (TREE_CODE (rhs) == ADDR_EXPR)
1209 recompute_tree_invariant_for_addr_expr (rhs);
1212 fold_stmt_inplace (&use_stmt_gsi);
1213 update_stmt (use_stmt);
1214 maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt);
1218 /* Propagate constant SSA_NAMEs defined in basic block BB. */
1220 static void
1221 propagate_constants_for_unrolling (basic_block bb)
1223 /* Look for degenerate PHI nodes with constant argument. */
1224 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
1226 gphi *phi = gsi.phi ();
1227 tree result = gimple_phi_result (phi);
1228 tree arg = gimple_phi_arg_def (phi, 0);
1230 if (gimple_phi_num_args (phi) == 1 && TREE_CODE (arg) == INTEGER_CST)
1232 propagate_into_all_uses (result, arg);
1233 gsi_remove (&gsi, true);
1234 release_ssa_name (result);
1236 else
1237 gsi_next (&gsi);
1240 /* Look for assignments to SSA names with constant RHS. */
1241 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
1243 gimple stmt = gsi_stmt (gsi);
1244 tree lhs;
1246 if (is_gimple_assign (stmt)
1247 && gimple_assign_rhs_code (stmt) == INTEGER_CST
1248 && (lhs = gimple_assign_lhs (stmt), TREE_CODE (lhs) == SSA_NAME)
1249 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
1251 propagate_into_all_uses (lhs, gimple_assign_rhs1 (stmt));
1252 gsi_remove (&gsi, true);
1253 release_ssa_name (lhs);
1255 else
1256 gsi_next (&gsi);
1260 /* Process loops from innermost to outer, stopping at the innermost
1261 loop we unrolled. */
1263 static bool
1264 tree_unroll_loops_completely_1 (bool may_increase_size, bool unroll_outer,
1265 vec<loop_p, va_heap>& father_stack,
1266 struct loop *loop)
1268 struct loop *loop_father;
1269 bool changed = false;
1270 struct loop *inner;
1271 enum unroll_level ul;
1273 /* Process inner loops first. */
1274 for (inner = loop->inner; inner != NULL; inner = inner->next)
1275 changed |= tree_unroll_loops_completely_1 (may_increase_size,
1276 unroll_outer, father_stack,
1277 inner);
1279 /* If we changed an inner loop we cannot process outer loops in this
1280 iteration because SSA form is not up-to-date. Continue with
1281 siblings of outer loops instead. */
1282 if (changed)
1283 return true;
1285 /* Don't unroll #pragma omp simd loops until the vectorizer
1286 attempts to vectorize those. */
1287 if (loop->force_vectorize)
1288 return false;
1290 /* Try to unroll this loop. */
1291 loop_father = loop_outer (loop);
1292 if (!loop_father)
1293 return false;
1295 if (may_increase_size && optimize_loop_nest_for_speed_p (loop)
1296 /* Unroll outermost loops only if asked to do so or they do
1297 not cause code growth. */
1298 && (unroll_outer || loop_outer (loop_father)))
1299 ul = UL_ALL;
1300 else
1301 ul = UL_NO_GROWTH;
1303 if (canonicalize_loop_induction_variables
1304 (loop, false, ul, !flag_tree_loop_ivcanon))
1306 /* If we'll continue unrolling, we need to propagate constants
1307 within the new basic blocks to fold away induction variable
1308 computations; otherwise, the size might blow up before the
1309 iteration is complete and the IR eventually cleaned up. */
1310 if (loop_outer (loop_father) && !loop_father->aux)
1312 father_stack.safe_push (loop_father);
1313 loop_father->aux = loop_father;
1316 return true;
1319 return false;
1322 /* Unroll LOOPS completely if they iterate just few times. Unless
1323 MAY_INCREASE_SIZE is true, perform the unrolling only if the
1324 size of the code does not increase. */
1326 unsigned int
1327 tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
1329 auto_vec<loop_p, 16> father_stack;
1330 bool changed;
1331 int iteration = 0;
1332 bool irred_invalidated = false;
1336 changed = false;
1337 bitmap loop_closed_ssa_invalidated = NULL;
1339 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
1340 loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
1342 free_numbers_of_iterations_estimates ();
1343 estimate_numbers_of_iterations ();
1345 changed = tree_unroll_loops_completely_1 (may_increase_size,
1346 unroll_outer, father_stack,
1347 current_loops->tree_root);
1348 if (changed)
1350 struct loop **iter;
1351 unsigned i;
1353 /* Be sure to skip unlooped loops while procesing father_stack
1354 array. */
1355 FOR_EACH_VEC_ELT (loops_to_unloop, i, iter)
1356 (*iter)->aux = NULL;
1357 FOR_EACH_VEC_ELT (father_stack, i, iter)
1358 if (!(*iter)->aux)
1359 *iter = NULL;
1360 unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
1362 /* We can not use TODO_update_ssa_no_phi because VOPS gets confused. */
1363 if (loop_closed_ssa_invalidated
1364 && !bitmap_empty_p (loop_closed_ssa_invalidated))
1365 rewrite_into_loop_closed_ssa (loop_closed_ssa_invalidated,
1366 TODO_update_ssa);
1367 else
1368 update_ssa (TODO_update_ssa);
1370 /* Propagate the constants within the new basic blocks. */
1371 FOR_EACH_VEC_ELT (father_stack, i, iter)
1372 if (*iter)
1374 unsigned j;
1375 basic_block *body = get_loop_body_in_dom_order (*iter);
1376 for (j = 0; j < (*iter)->num_nodes; j++)
1377 propagate_constants_for_unrolling (body[j]);
1378 free (body);
1379 (*iter)->aux = NULL;
1381 father_stack.truncate (0);
1383 /* This will take care of removing completely unrolled loops
1384 from the loop structures so we can continue unrolling now
1385 innermost loops. */
1386 if (cleanup_tree_cfg ())
1387 update_ssa (TODO_update_ssa_only_virtuals);
1389 /* Clean up the information about numbers of iterations, since
1390 complete unrolling might have invalidated it. */
1391 scev_reset ();
1392 #ifdef ENABLE_CHECKING
1393 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
1394 verify_loop_closed_ssa (true);
1395 #endif
1397 if (loop_closed_ssa_invalidated)
1398 BITMAP_FREE (loop_closed_ssa_invalidated);
1400 while (changed
1401 && ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS));
1403 father_stack.release ();
1405 if (irred_invalidated
1406 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1407 mark_irreducible_loops ();
1409 return 0;
1412 /* Canonical induction variable creation pass. */
1414 namespace {
1416 const pass_data pass_data_iv_canon =
1418 GIMPLE_PASS, /* type */
1419 "ivcanon", /* name */
1420 OPTGROUP_LOOP, /* optinfo_flags */
1421 TV_TREE_LOOP_IVCANON, /* tv_id */
1422 ( PROP_cfg | PROP_ssa ), /* properties_required */
1423 0, /* properties_provided */
1424 0, /* properties_destroyed */
1425 0, /* todo_flags_start */
1426 0, /* todo_flags_finish */
1429 class pass_iv_canon : public gimple_opt_pass
1431 public:
1432 pass_iv_canon (gcc::context *ctxt)
1433 : gimple_opt_pass (pass_data_iv_canon, ctxt)
1436 /* opt_pass methods: */
1437 virtual bool gate (function *) { return flag_tree_loop_ivcanon != 0; }
1438 virtual unsigned int execute (function *fun);
1440 }; // class pass_iv_canon
1442 unsigned int
1443 pass_iv_canon::execute (function *fun)
1445 if (number_of_loops (fun) <= 1)
1446 return 0;
1448 return canonicalize_induction_variables ();
1451 } // anon namespace
1453 gimple_opt_pass *
1454 make_pass_iv_canon (gcc::context *ctxt)
1456 return new pass_iv_canon (ctxt);
1459 /* Complete unrolling of loops. */
1461 namespace {
1463 const pass_data pass_data_complete_unroll =
1465 GIMPLE_PASS, /* type */
1466 "cunroll", /* name */
1467 OPTGROUP_LOOP, /* optinfo_flags */
1468 TV_COMPLETE_UNROLL, /* tv_id */
1469 ( PROP_cfg | PROP_ssa ), /* properties_required */
1470 0, /* properties_provided */
1471 0, /* properties_destroyed */
1472 0, /* todo_flags_start */
1473 0, /* todo_flags_finish */
1476 class pass_complete_unroll : public gimple_opt_pass
1478 public:
1479 pass_complete_unroll (gcc::context *ctxt)
1480 : gimple_opt_pass (pass_data_complete_unroll, ctxt)
1483 /* opt_pass methods: */
1484 virtual unsigned int execute (function *);
1486 }; // class pass_complete_unroll
1488 unsigned int
1489 pass_complete_unroll::execute (function *fun)
1491 if (number_of_loops (fun) <= 1)
1492 return 0;
1494 return tree_unroll_loops_completely (flag_unroll_loops
1495 || flag_peel_loops
1496 || optimize >= 3, true);
1499 } // anon namespace
1501 gimple_opt_pass *
1502 make_pass_complete_unroll (gcc::context *ctxt)
1504 return new pass_complete_unroll (ctxt);
1507 /* Complete unrolling of inner loops. */
1509 namespace {
1511 const pass_data pass_data_complete_unrolli =
1513 GIMPLE_PASS, /* type */
1514 "cunrolli", /* name */
1515 OPTGROUP_LOOP, /* optinfo_flags */
1516 TV_COMPLETE_UNROLL, /* tv_id */
1517 ( PROP_cfg | PROP_ssa ), /* properties_required */
1518 0, /* properties_provided */
1519 0, /* properties_destroyed */
1520 0, /* todo_flags_start */
1521 0, /* todo_flags_finish */
1524 class pass_complete_unrolli : public gimple_opt_pass
1526 public:
1527 pass_complete_unrolli (gcc::context *ctxt)
1528 : gimple_opt_pass (pass_data_complete_unrolli, ctxt)
1531 /* opt_pass methods: */
1532 virtual bool gate (function *) { return optimize >= 2; }
1533 virtual unsigned int execute (function *);
1535 }; // class pass_complete_unrolli
1537 unsigned int
1538 pass_complete_unrolli::execute (function *fun)
1540 unsigned ret = 0;
1542 loop_optimizer_init (LOOPS_NORMAL
1543 | LOOPS_HAVE_RECORDED_EXITS);
1544 if (number_of_loops (fun) > 1)
1546 scev_initialize ();
1547 ret = tree_unroll_loops_completely (optimize >= 3, false);
1548 free_numbers_of_iterations_estimates ();
1549 scev_finalize ();
1551 loop_optimizer_finalize ();
1553 return ret;
1556 } // anon namespace
1558 gimple_opt_pass *
1559 make_pass_complete_unrolli (gcc::context *ctxt)
1561 return new pass_complete_unrolli (ctxt);