2016-06-16 Ed Schonberg <schonberg@adacore.com>
[official-gcc.git] / gcc / tree-ssa-loop-ivcanon.c
blobfff28ee8712f41e1d0594ffaef5911260a60c72e
1 /* Induction variable canonicalization and loop peeling.
2 Copyright (C) 2004-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This pass detects the loops that iterate a constant number of times,
21 adds a canonical induction variable (step -1, tested against 0)
22 and replaces the exit test. This enables the less powerful rtl
23 level analysis to use this information.
25 This might spoil the code in some cases (by increasing register pressure).
26 Note that in the case the new variable is not needed, ivopts will get rid
27 of it, so it might only be a problem when there are no other linear induction
28 variables. In that case the created optimization possibilities are likely
29 to pay up.
31 We also perform
32 - complete unrolling (or peeling) when the loops is rolling few enough
33 times
34 - simple peeling (i.e. copying few initial iterations prior the loop)
35 when number of iteration estimate is known (typically by the profile
36 info). */
38 #include "config.h"
39 #include "system.h"
40 #include "coretypes.h"
41 #include "backend.h"
42 #include "tree.h"
43 #include "gimple.h"
44 #include "cfghooks.h"
45 #include "tree-pass.h"
46 #include "ssa.h"
47 #include "cgraph.h"
48 #include "gimple-pretty-print.h"
49 #include "fold-const.h"
50 #include "profile.h"
51 #include "gimple-fold.h"
52 #include "tree-eh.h"
53 #include "gimple-iterator.h"
54 #include "tree-cfg.h"
55 #include "tree-ssa-loop-manip.h"
56 #include "tree-ssa-loop-niter.h"
57 #include "tree-ssa-loop.h"
58 #include "tree-into-ssa.h"
59 #include "cfgloop.h"
60 #include "tree-chrec.h"
61 #include "tree-scalar-evolution.h"
62 #include "params.h"
63 #include "tree-inline.h"
64 #include "tree-cfgcleanup.h"
65 #include "builtins.h"
67 /* Specifies types of loops that may be unrolled. */
69 enum unroll_level
71 UL_SINGLE_ITER, /* Only loops that exit immediately in the first
72 iteration. */
73 UL_NO_GROWTH, /* Only loops whose unrolling will not cause increase
74 of code size. */
75 UL_ALL /* All suitable loops. */
78 /* Adds a canonical induction variable to LOOP iterating NITER times. EXIT
79 is the exit edge whose condition is replaced. */
81 static void
82 create_canonical_iv (struct loop *loop, edge exit, tree niter)
84 edge in;
85 tree type, var;
86 gcond *cond;
87 gimple_stmt_iterator incr_at;
88 enum tree_code cmp;
90 if (dump_file && (dump_flags & TDF_DETAILS))
92 fprintf (dump_file, "Added canonical iv to loop %d, ", loop->num);
93 print_generic_expr (dump_file, niter, TDF_SLIM);
94 fprintf (dump_file, " iterations.\n");
97 cond = as_a <gcond *> (last_stmt (exit->src));
98 in = EDGE_SUCC (exit->src, 0);
99 if (in == exit)
100 in = EDGE_SUCC (exit->src, 1);
102 /* Note that we do not need to worry about overflows, since
103 type of niter is always unsigned and all comparisons are
104 just for equality/nonequality -- i.e. everything works
105 with a modulo arithmetics. */
107 type = TREE_TYPE (niter);
108 niter = fold_build2 (PLUS_EXPR, type,
109 niter,
110 build_int_cst (type, 1));
111 incr_at = gsi_last_bb (in->src);
112 create_iv (niter,
113 build_int_cst (type, -1),
114 NULL_TREE, loop,
115 &incr_at, false, NULL, &var);
117 cmp = (exit->flags & EDGE_TRUE_VALUE) ? EQ_EXPR : NE_EXPR;
118 gimple_cond_set_code (cond, cmp);
119 gimple_cond_set_lhs (cond, var);
120 gimple_cond_set_rhs (cond, build_int_cst (type, 0));
121 update_stmt (cond);
124 /* Describe size of loop as detected by tree_estimate_loop_size. */
125 struct loop_size
127 /* Number of instructions in the loop. */
128 int overall;
130 /* Number of instructions that will be likely optimized out in
131 peeled iterations of loop (i.e. computation based on induction
132 variable where induction variable starts at known constant.) */
133 int eliminated_by_peeling;
135 /* Same statistics for last iteration of loop: it is smaller because
136 instructions after exit are not executed. */
137 int last_iteration;
138 int last_iteration_eliminated_by_peeling;
140 /* If some IV computation will become constant. */
141 bool constant_iv;
143 /* Number of call stmts that are not a builtin and are pure or const
144 present on the hot path. */
145 int num_pure_calls_on_hot_path;
146 /* Number of call stmts that are not a builtin and are not pure nor const
147 present on the hot path. */
148 int num_non_pure_calls_on_hot_path;
149 /* Number of statements other than calls in the loop. */
150 int non_call_stmts_on_hot_path;
151 /* Number of branches seen on the hot path. */
152 int num_branches_on_hot_path;
155 /* Return true if OP in STMT will be constant after peeling LOOP. */
157 static bool
158 constant_after_peeling (tree op, gimple *stmt, struct loop *loop)
160 affine_iv iv;
162 if (is_gimple_min_invariant (op))
163 return true;
165 /* We can still fold accesses to constant arrays when index is known. */
166 if (TREE_CODE (op) != SSA_NAME)
168 tree base = op;
170 /* First make fast look if we see constant array inside. */
171 while (handled_component_p (base))
172 base = TREE_OPERAND (base, 0);
173 if ((DECL_P (base)
174 && ctor_for_folding (base) != error_mark_node)
175 || CONSTANT_CLASS_P (base))
177 /* If so, see if we understand all the indices. */
178 base = op;
179 while (handled_component_p (base))
181 if (TREE_CODE (base) == ARRAY_REF
182 && !constant_after_peeling (TREE_OPERAND (base, 1), stmt, loop))
183 return false;
184 base = TREE_OPERAND (base, 0);
186 return true;
188 return false;
191 /* Induction variables are constants. */
192 if (!simple_iv (loop, loop_containing_stmt (stmt), op, &iv, false))
193 return false;
194 if (!is_gimple_min_invariant (iv.base))
195 return false;
196 if (!is_gimple_min_invariant (iv.step))
197 return false;
198 return true;
201 /* Computes an estimated number of insns in LOOP.
202 EXIT (if non-NULL) is an exite edge that will be eliminated in all but last
203 iteration of the loop.
204 EDGE_TO_CANCEL (if non-NULL) is an non-exit edge eliminated in the last iteration
205 of loop.
206 Return results in SIZE, estimate benefits for complete unrolling exiting by EXIT.
207 Stop estimating after UPPER_BOUND is met. Return true in this case. */
209 static bool
210 tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel,
211 struct loop_size *size, int upper_bound)
213 basic_block *body = get_loop_body (loop);
214 gimple_stmt_iterator gsi;
215 unsigned int i;
216 bool after_exit;
217 vec<basic_block> path = get_loop_hot_path (loop);
219 size->overall = 0;
220 size->eliminated_by_peeling = 0;
221 size->last_iteration = 0;
222 size->last_iteration_eliminated_by_peeling = 0;
223 size->num_pure_calls_on_hot_path = 0;
224 size->num_non_pure_calls_on_hot_path = 0;
225 size->non_call_stmts_on_hot_path = 0;
226 size->num_branches_on_hot_path = 0;
227 size->constant_iv = 0;
229 if (dump_file && (dump_flags & TDF_DETAILS))
230 fprintf (dump_file, "Estimating sizes for loop %i\n", loop->num);
231 for (i = 0; i < loop->num_nodes; i++)
233 if (edge_to_cancel && body[i] != edge_to_cancel->src
234 && dominated_by_p (CDI_DOMINATORS, body[i], edge_to_cancel->src))
235 after_exit = true;
236 else
237 after_exit = false;
238 if (dump_file && (dump_flags & TDF_DETAILS))
239 fprintf (dump_file, " BB: %i, after_exit: %i\n", body[i]->index,
240 after_exit);
242 for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
244 gimple *stmt = gsi_stmt (gsi);
245 int num = estimate_num_insns (stmt, &eni_size_weights);
246 bool likely_eliminated = false;
247 bool likely_eliminated_last = false;
248 bool likely_eliminated_peeled = false;
250 if (dump_file && (dump_flags & TDF_DETAILS))
252 fprintf (dump_file, " size: %3i ", num);
253 print_gimple_stmt (dump_file, gsi_stmt (gsi), 0, 0);
256 /* Look for reasons why we might optimize this stmt away. */
258 if (gimple_has_side_effects (stmt))
260 /* Exit conditional. */
261 else if (exit && body[i] == exit->src
262 && stmt == last_stmt (exit->src))
264 if (dump_file && (dump_flags & TDF_DETAILS))
265 fprintf (dump_file, " Exit condition will be eliminated "
266 "in peeled copies.\n");
267 likely_eliminated_peeled = true;
269 else if (edge_to_cancel && body[i] == edge_to_cancel->src
270 && stmt == last_stmt (edge_to_cancel->src))
272 if (dump_file && (dump_flags & TDF_DETAILS))
273 fprintf (dump_file, " Exit condition will be eliminated "
274 "in last copy.\n");
275 likely_eliminated_last = true;
277 /* Sets of IV variables */
278 else if (gimple_code (stmt) == GIMPLE_ASSIGN
279 && constant_after_peeling (gimple_assign_lhs (stmt), stmt, loop))
281 if (dump_file && (dump_flags & TDF_DETAILS))
282 fprintf (dump_file, " Induction variable computation will"
283 " be folded away.\n");
284 likely_eliminated = true;
286 /* Assignments of IV variables. */
287 else if (gimple_code (stmt) == GIMPLE_ASSIGN
288 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
289 && constant_after_peeling (gimple_assign_rhs1 (stmt), stmt,
290 loop)
291 && (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS
292 || constant_after_peeling (gimple_assign_rhs2 (stmt),
293 stmt, loop)))
295 size->constant_iv = true;
296 if (dump_file && (dump_flags & TDF_DETAILS))
297 fprintf (dump_file,
298 " Constant expression will be folded away.\n");
299 likely_eliminated = true;
301 /* Conditionals. */
302 else if ((gimple_code (stmt) == GIMPLE_COND
303 && constant_after_peeling (gimple_cond_lhs (stmt), stmt,
304 loop)
305 && constant_after_peeling (gimple_cond_rhs (stmt), stmt,
306 loop)
307 /* We don't simplify all constant compares so make sure
308 they are not both constant already. See PR70288. */
309 && (! is_gimple_min_invariant (gimple_cond_lhs (stmt))
310 || ! is_gimple_min_invariant (gimple_cond_rhs (stmt))))
311 || (gimple_code (stmt) == GIMPLE_SWITCH
312 && constant_after_peeling (gimple_switch_index (
313 as_a <gswitch *> (stmt)),
314 stmt, loop)
315 && ! is_gimple_min_invariant
316 (gimple_switch_index (as_a <gswitch *> (stmt)))))
318 if (dump_file && (dump_flags & TDF_DETAILS))
319 fprintf (dump_file, " Constant conditional.\n");
320 likely_eliminated = true;
323 size->overall += num;
324 if (likely_eliminated || likely_eliminated_peeled)
325 size->eliminated_by_peeling += num;
326 if (!after_exit)
328 size->last_iteration += num;
329 if (likely_eliminated || likely_eliminated_last)
330 size->last_iteration_eliminated_by_peeling += num;
332 if ((size->overall * 3 / 2 - size->eliminated_by_peeling
333 - size->last_iteration_eliminated_by_peeling) > upper_bound)
335 free (body);
336 path.release ();
337 return true;
341 while (path.length ())
343 basic_block bb = path.pop ();
344 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
346 gimple *stmt = gsi_stmt (gsi);
347 if (gimple_code (stmt) == GIMPLE_CALL
348 && !gimple_inexpensive_call_p (as_a <gcall *> (stmt)))
350 int flags = gimple_call_flags (stmt);
351 if (flags & (ECF_PURE | ECF_CONST))
352 size->num_pure_calls_on_hot_path++;
353 else
354 size->num_non_pure_calls_on_hot_path++;
355 size->num_branches_on_hot_path ++;
357 /* Count inexpensive calls as non-calls, because they will likely
358 expand inline. */
359 else if (gimple_code (stmt) != GIMPLE_DEBUG)
360 size->non_call_stmts_on_hot_path++;
361 if (((gimple_code (stmt) == GIMPLE_COND
362 && (!constant_after_peeling (gimple_cond_lhs (stmt), stmt, loop)
363 || constant_after_peeling (gimple_cond_rhs (stmt), stmt,
364 loop)))
365 || (gimple_code (stmt) == GIMPLE_SWITCH
366 && !constant_after_peeling (gimple_switch_index (
367 as_a <gswitch *> (stmt)),
368 stmt, loop)))
369 && (!exit || bb != exit->src))
370 size->num_branches_on_hot_path++;
373 path.release ();
374 if (dump_file && (dump_flags & TDF_DETAILS))
375 fprintf (dump_file, "size: %i-%i, last_iteration: %i-%i\n", size->overall,
376 size->eliminated_by_peeling, size->last_iteration,
377 size->last_iteration_eliminated_by_peeling);
379 free (body);
380 return false;
383 /* Estimate number of insns of completely unrolled loop.
384 It is (NUNROLL + 1) * size of loop body with taking into account
385 the fact that in last copy everything after exit conditional
386 is dead and that some instructions will be eliminated after
387 peeling.
389 Loop body is likely going to simplify further, this is difficult
390 to guess, we just decrease the result by 1/3. */
392 static unsigned HOST_WIDE_INT
393 estimated_unrolled_size (struct loop_size *size,
394 unsigned HOST_WIDE_INT nunroll)
396 HOST_WIDE_INT unr_insns = ((nunroll)
397 * (HOST_WIDE_INT) (size->overall
398 - size->eliminated_by_peeling));
399 if (!nunroll)
400 unr_insns = 0;
401 unr_insns += size->last_iteration - size->last_iteration_eliminated_by_peeling;
403 unr_insns = unr_insns * 2 / 3;
404 if (unr_insns <= 0)
405 unr_insns = 1;
407 return unr_insns;
410 /* Loop LOOP is known to not loop. See if there is an edge in the loop
411 body that can be remove to make the loop to always exit and at
412 the same time it does not make any code potentially executed
413 during the last iteration dead.
415 After complete unrolling we still may get rid of the conditional
416 on the exit in the last copy even if we have no idea what it does.
417 This is quite common case for loops of form
419 int a[5];
420 for (i=0;i<b;i++)
421 a[i]=0;
423 Here we prove the loop to iterate 5 times but we do not know
424 it from induction variable.
426 For now we handle only simple case where there is exit condition
427 just before the latch block and the latch block contains no statements
428 with side effect that may otherwise terminate the execution of loop
429 (such as by EH or by terminating the program or longjmp).
431 In the general case we may want to cancel the paths leading to statements
432 loop-niter identified as having undefined effect in the last iteration.
433 The other cases are hopefully rare and will be cleaned up later. */
435 static edge
436 loop_edge_to_cancel (struct loop *loop)
438 vec<edge> exits;
439 unsigned i;
440 edge edge_to_cancel;
441 gimple_stmt_iterator gsi;
443 /* We want only one predecestor of the loop. */
444 if (EDGE_COUNT (loop->latch->preds) > 1)
445 return NULL;
447 exits = get_loop_exit_edges (loop);
449 FOR_EACH_VEC_ELT (exits, i, edge_to_cancel)
451 /* Find the other edge than the loop exit
452 leaving the conditoinal. */
453 if (EDGE_COUNT (edge_to_cancel->src->succs) != 2)
454 continue;
455 if (EDGE_SUCC (edge_to_cancel->src, 0) == edge_to_cancel)
456 edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 1);
457 else
458 edge_to_cancel = EDGE_SUCC (edge_to_cancel->src, 0);
460 /* We only can handle conditionals. */
461 if (!(edge_to_cancel->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
462 continue;
464 /* We should never have conditionals in the loop latch. */
465 gcc_assert (edge_to_cancel->dest != loop->header);
467 /* Check that it leads to loop latch. */
468 if (edge_to_cancel->dest != loop->latch)
469 continue;
471 exits.release ();
473 /* Verify that the code in loop latch does nothing that may end program
474 execution without really reaching the exit. This may include
475 non-pure/const function calls, EH statements, volatile ASMs etc. */
476 for (gsi = gsi_start_bb (loop->latch); !gsi_end_p (gsi); gsi_next (&gsi))
477 if (gimple_has_side_effects (gsi_stmt (gsi)))
478 return NULL;
479 return edge_to_cancel;
481 exits.release ();
482 return NULL;
485 /* Remove all tests for exits that are known to be taken after LOOP was
486 peeled NPEELED times. Put gcc_unreachable before every statement
487 known to not be executed. */
489 static bool
490 remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled)
492 struct nb_iter_bound *elt;
493 bool changed = false;
495 for (elt = loop->bounds; elt; elt = elt->next)
497 /* If statement is known to be undefined after peeling, turn it
498 into unreachable (or trap when debugging experience is supposed
499 to be good). */
500 if (!elt->is_exit
501 && wi::ltu_p (elt->bound, npeeled))
503 gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt);
504 gcall *stmt = gimple_build_call
505 (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
506 gimple_set_location (stmt, gimple_location (elt->stmt));
507 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
508 split_block (gimple_bb (stmt), stmt);
509 changed = true;
510 if (dump_file && (dump_flags & TDF_DETAILS))
512 fprintf (dump_file, "Forced statement unreachable: ");
513 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
516 /* If we know the exit will be taken after peeling, update. */
517 else if (elt->is_exit
518 && wi::leu_p (elt->bound, npeeled))
520 basic_block bb = gimple_bb (elt->stmt);
521 edge exit_edge = EDGE_SUCC (bb, 0);
523 if (dump_file && (dump_flags & TDF_DETAILS))
525 fprintf (dump_file, "Forced exit to be taken: ");
526 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
528 if (!loop_exit_edge_p (loop, exit_edge))
529 exit_edge = EDGE_SUCC (bb, 1);
530 gcc_checking_assert (loop_exit_edge_p (loop, exit_edge));
531 gcond *cond_stmt = as_a <gcond *> (elt->stmt);
532 if (exit_edge->flags & EDGE_TRUE_VALUE)
533 gimple_cond_make_true (cond_stmt);
534 else
535 gimple_cond_make_false (cond_stmt);
536 update_stmt (cond_stmt);
537 changed = true;
540 return changed;
543 /* Remove all exits that are known to be never taken because of the loop bound
544 discovered. */
546 static bool
547 remove_redundant_iv_tests (struct loop *loop)
549 struct nb_iter_bound *elt;
550 bool changed = false;
552 if (!loop->any_upper_bound)
553 return false;
554 for (elt = loop->bounds; elt; elt = elt->next)
556 /* Exit is pointless if it won't be taken before loop reaches
557 upper bound. */
558 if (elt->is_exit && loop->any_upper_bound
559 && wi::ltu_p (loop->nb_iterations_upper_bound, elt->bound))
561 basic_block bb = gimple_bb (elt->stmt);
562 edge exit_edge = EDGE_SUCC (bb, 0);
563 struct tree_niter_desc niter;
565 if (!loop_exit_edge_p (loop, exit_edge))
566 exit_edge = EDGE_SUCC (bb, 1);
568 /* Only when we know the actual number of iterations, not
569 just a bound, we can remove the exit. */
570 if (!number_of_iterations_exit (loop, exit_edge,
571 &niter, false, false)
572 || !integer_onep (niter.assumptions)
573 || !integer_zerop (niter.may_be_zero)
574 || !niter.niter
575 || TREE_CODE (niter.niter) != INTEGER_CST
576 || !wi::ltu_p (loop->nb_iterations_upper_bound,
577 wi::to_widest (niter.niter)))
578 continue;
580 if (dump_file && (dump_flags & TDF_DETAILS))
582 fprintf (dump_file, "Removed pointless exit: ");
583 print_gimple_stmt (dump_file, elt->stmt, 0, 0);
585 gcond *cond_stmt = as_a <gcond *> (elt->stmt);
586 if (exit_edge->flags & EDGE_TRUE_VALUE)
587 gimple_cond_make_false (cond_stmt);
588 else
589 gimple_cond_make_true (cond_stmt);
590 update_stmt (cond_stmt);
591 changed = true;
594 return changed;
597 /* Stores loops that will be unlooped and edges that will be removed
598 after we process whole loop tree. */
599 static vec<loop_p> loops_to_unloop;
600 static vec<int> loops_to_unloop_nunroll;
601 static vec<edge> edges_to_remove;
602 /* Stores loops that has been peeled. */
603 static bitmap peeled_loops;
605 /* Cancel all fully unrolled loops by putting __builtin_unreachable
606 on the latch edge.
607 We do it after all unrolling since unlooping moves basic blocks
608 across loop boundaries trashing loop closed SSA form as well
609 as SCEV info needed to be intact during unrolling.
611 IRRED_INVALIDATED is used to bookkeep if information about
612 irreducible regions may become invalid as a result
613 of the transformation.
614 LOOP_CLOSED_SSA_INVALIDATED is used to bookkepp the case
615 when we need to go into loop closed SSA form. */
617 static void
618 unloop_loops (bitmap loop_closed_ssa_invalidated,
619 bool *irred_invalidated)
621 while (loops_to_unloop.length ())
623 struct loop *loop = loops_to_unloop.pop ();
624 int n_unroll = loops_to_unloop_nunroll.pop ();
625 basic_block latch = loop->latch;
626 edge latch_edge = loop_latch_edge (loop);
627 int flags = latch_edge->flags;
628 location_t locus = latch_edge->goto_locus;
629 gcall *stmt;
630 gimple_stmt_iterator gsi;
632 remove_exits_and_undefined_stmts (loop, n_unroll);
634 /* Unloop destroys the latch edge. */
635 unloop (loop, irred_invalidated, loop_closed_ssa_invalidated);
637 /* Create new basic block for the latch edge destination and wire
638 it in. */
639 stmt = gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
640 latch_edge = make_edge (latch, create_basic_block (NULL, NULL, latch), flags);
641 latch_edge->probability = 0;
642 latch_edge->count = 0;
643 latch_edge->flags |= flags;
644 latch_edge->goto_locus = locus;
646 latch_edge->dest->loop_father = current_loops->tree_root;
647 latch_edge->dest->count = 0;
648 latch_edge->dest->frequency = 0;
649 set_immediate_dominator (CDI_DOMINATORS, latch_edge->dest, latch_edge->src);
651 gsi = gsi_start_bb (latch_edge->dest);
652 gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
654 loops_to_unloop.release ();
655 loops_to_unloop_nunroll.release ();
657 /* Remove edges in peeled copies. */
658 unsigned i;
659 edge e;
660 FOR_EACH_VEC_ELT (edges_to_remove, i, e)
662 bool ok = remove_path (e);
663 gcc_assert (ok);
665 edges_to_remove.release ();
668 /* Tries to unroll LOOP completely, i.e. NITER times.
669 UL determines which loops we are allowed to unroll.
670 EXIT is the exit of the loop that should be eliminated.
671 MAXITER specfy bound on number of iterations, -1 if it is
672 not known or too large for HOST_WIDE_INT. The location
673 LOCUS corresponding to the loop is used when emitting
674 a summary of the unroll to the dump file. */
676 static bool
677 try_unroll_loop_completely (struct loop *loop,
678 edge exit, tree niter,
679 enum unroll_level ul,
680 HOST_WIDE_INT maxiter,
681 location_t locus)
683 unsigned HOST_WIDE_INT n_unroll = 0, ninsns, unr_insns;
684 struct loop_size size;
685 bool n_unroll_found = false;
686 edge edge_to_cancel = NULL;
687 int report_flags = MSG_OPTIMIZED_LOCATIONS | TDF_RTL | TDF_DETAILS;
689 /* See if we proved number of iterations to be low constant.
691 EXIT is an edge that will be removed in all but last iteration of
692 the loop.
694 EDGE_TO_CACNEL is an edge that will be removed from the last iteration
695 of the unrolled sequence and is expected to make the final loop not
696 rolling.
698 If the number of execution of loop is determined by standard induction
699 variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving
700 from the iv test. */
701 if (tree_fits_uhwi_p (niter))
703 n_unroll = tree_to_uhwi (niter);
704 n_unroll_found = true;
705 edge_to_cancel = EDGE_SUCC (exit->src, 0);
706 if (edge_to_cancel == exit)
707 edge_to_cancel = EDGE_SUCC (exit->src, 1);
709 /* We do not know the number of iterations and thus we can not eliminate
710 the EXIT edge. */
711 else
712 exit = NULL;
714 /* See if we can improve our estimate by using recorded loop bounds. */
715 if (maxiter >= 0
716 && (!n_unroll_found || (unsigned HOST_WIDE_INT)maxiter < n_unroll))
718 n_unroll = maxiter;
719 n_unroll_found = true;
720 /* Loop terminates before the IV variable test, so we can not
721 remove it in the last iteration. */
722 edge_to_cancel = NULL;
725 if (!n_unroll_found)
726 return false;
728 if (n_unroll > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES))
730 if (dump_file && (dump_flags & TDF_DETAILS))
731 fprintf (dump_file, "Not unrolling loop %d "
732 "(--param max-completely-peel-times limit reached).\n",
733 loop->num);
734 return false;
737 if (!edge_to_cancel)
738 edge_to_cancel = loop_edge_to_cancel (loop);
740 if (n_unroll)
742 sbitmap wont_exit;
743 bool large;
744 if (ul == UL_SINGLE_ITER)
745 return false;
747 /* EXIT can be removed only if we are sure it passes first N_UNROLL
748 iterations. */
749 bool remove_exit = (exit && niter
750 && TREE_CODE (niter) == INTEGER_CST
751 && wi::leu_p (n_unroll, wi::to_widest (niter)));
753 large = tree_estimate_loop_size
754 (loop, remove_exit ? exit : NULL, edge_to_cancel, &size,
755 PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS));
756 ninsns = size.overall;
757 if (large)
759 if (dump_file && (dump_flags & TDF_DETAILS))
760 fprintf (dump_file, "Not unrolling loop %d: it is too large.\n",
761 loop->num);
762 return false;
765 unr_insns = estimated_unrolled_size (&size, n_unroll);
766 if (dump_file && (dump_flags & TDF_DETAILS))
768 fprintf (dump_file, " Loop size: %d\n", (int) ninsns);
769 fprintf (dump_file, " Estimated size after unrolling: %d\n",
770 (int) unr_insns);
773 /* If the code is going to shrink, we don't need to be extra cautious
774 on guessing if the unrolling is going to be profitable. */
775 if (unr_insns
776 /* If there is IV variable that will become constant, we save
777 one instruction in the loop prologue we do not account
778 otherwise. */
779 <= ninsns + (size.constant_iv != false))
781 /* We unroll only inner loops, because we do not consider it profitable
782 otheriwse. We still can cancel loopback edge of not rolling loop;
783 this is always a good idea. */
784 else if (ul == UL_NO_GROWTH)
786 if (dump_file && (dump_flags & TDF_DETAILS))
787 fprintf (dump_file, "Not unrolling loop %d: size would grow.\n",
788 loop->num);
789 return false;
791 /* Outer loops tend to be less interesting candidates for complete
792 unrolling unless we can do a lot of propagation into the inner loop
793 body. For now we disable outer loop unrolling when the code would
794 grow. */
795 else if (loop->inner)
797 if (dump_file && (dump_flags & TDF_DETAILS))
798 fprintf (dump_file, "Not unrolling loop %d: "
799 "it is not innermost and code would grow.\n",
800 loop->num);
801 return false;
803 /* If there is call on a hot path through the loop, then
804 there is most probably not much to optimize. */
805 else if (size.num_non_pure_calls_on_hot_path)
807 if (dump_file && (dump_flags & TDF_DETAILS))
808 fprintf (dump_file, "Not unrolling loop %d: "
809 "contains call and code would grow.\n",
810 loop->num);
811 return false;
813 /* If there is pure/const call in the function, then we
814 can still optimize the unrolled loop body if it contains
815 some other interesting code than the calls and code
816 storing or cumulating the return value. */
817 else if (size.num_pure_calls_on_hot_path
818 /* One IV increment, one test, one ivtmp store
819 and one useful stmt. That is about minimal loop
820 doing pure call. */
821 && (size.non_call_stmts_on_hot_path
822 <= 3 + size.num_pure_calls_on_hot_path))
824 if (dump_file && (dump_flags & TDF_DETAILS))
825 fprintf (dump_file, "Not unrolling loop %d: "
826 "contains just pure calls and code would grow.\n",
827 loop->num);
828 return false;
830 /* Complete unrolling is a major win when control flow is removed and
831 one big basic block is created. If the loop contains control flow
832 the optimization may still be a win because of eliminating the loop
833 overhead but it also may blow the branch predictor tables.
834 Limit number of branches on the hot path through the peeled
835 sequence. */
836 else if (size.num_branches_on_hot_path * (int)n_unroll
837 > PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES))
839 if (dump_file && (dump_flags & TDF_DETAILS))
840 fprintf (dump_file, "Not unrolling loop %d: "
841 " number of branches on hot path in the unrolled sequence"
842 " reach --param max-peel-branches limit.\n",
843 loop->num);
844 return false;
846 else if (unr_insns
847 > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS))
849 if (dump_file && (dump_flags & TDF_DETAILS))
850 fprintf (dump_file, "Not unrolling loop %d: "
851 "(--param max-completely-peeled-insns limit reached).\n",
852 loop->num);
853 return false;
855 dump_printf_loc (report_flags, locus,
856 "loop turned into non-loop; it never loops.\n");
858 initialize_original_copy_tables ();
859 wont_exit = sbitmap_alloc (n_unroll + 1);
860 if (exit && niter
861 && TREE_CODE (niter) == INTEGER_CST
862 && wi::leu_p (n_unroll, wi::to_widest (niter)))
864 bitmap_ones (wont_exit);
865 if (wi::eq_p (wi::to_widest (niter), n_unroll)
866 || edge_to_cancel)
867 bitmap_clear_bit (wont_exit, 0);
869 else
871 exit = NULL;
872 bitmap_clear (wont_exit);
875 if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
876 n_unroll, wont_exit,
877 exit, &edges_to_remove,
878 DLTHE_FLAG_UPDATE_FREQ
879 | DLTHE_FLAG_COMPLETTE_PEEL))
881 free_original_copy_tables ();
882 free (wont_exit);
883 if (dump_file && (dump_flags & TDF_DETAILS))
884 fprintf (dump_file, "Failed to duplicate the loop\n");
885 return false;
888 free (wont_exit);
889 free_original_copy_tables ();
892 /* Remove the conditional from the last copy of the loop. */
893 if (edge_to_cancel)
895 gcond *cond = as_a <gcond *> (last_stmt (edge_to_cancel->src));
896 force_edge_cold (edge_to_cancel, true);
897 if (edge_to_cancel->flags & EDGE_TRUE_VALUE)
898 gimple_cond_make_false (cond);
899 else
900 gimple_cond_make_true (cond);
901 update_stmt (cond);
902 /* Do not remove the path. Doing so may remove outer loop
903 and confuse bookkeeping code in tree_unroll_loops_completelly. */
906 /* Store the loop for later unlooping and exit removal. */
907 loops_to_unloop.safe_push (loop);
908 loops_to_unloop_nunroll.safe_push (n_unroll);
910 if (dump_enabled_p ())
912 if (!n_unroll)
913 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
914 "loop turned into non-loop; it never loops\n");
915 else
917 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, locus,
918 "loop with %d iterations completely unrolled",
919 (int) (n_unroll + 1));
920 if (profile_info)
921 dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS,
922 " (header execution count %d)",
923 (int)loop->header->count);
924 dump_printf (MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS, "\n");
928 if (dump_file && (dump_flags & TDF_DETAILS))
930 if (exit)
931 fprintf (dump_file, "Exit condition of peeled iterations was "
932 "eliminated.\n");
933 if (edge_to_cancel)
934 fprintf (dump_file, "Last iteration exit edge was proved true.\n");
935 else
936 fprintf (dump_file, "Latch of last iteration was marked by "
937 "__builtin_unreachable ().\n");
940 return true;
943 /* Return number of instructions after peeling. */
944 static unsigned HOST_WIDE_INT
945 estimated_peeled_sequence_size (struct loop_size *size,
946 unsigned HOST_WIDE_INT npeel)
948 return MAX (npeel * (HOST_WIDE_INT) (size->overall
949 - size->eliminated_by_peeling), 1);
952 /* If the loop is expected to iterate N times and is
953 small enough, duplicate the loop body N+1 times before
954 the loop itself. This way the hot path will never
955 enter the loop.
956 Parameters are the same as for try_unroll_loops_completely */
958 static bool
959 try_peel_loop (struct loop *loop,
960 edge exit, tree niter,
961 HOST_WIDE_INT maxiter)
963 HOST_WIDE_INT npeel;
964 struct loop_size size;
965 int peeled_size;
966 sbitmap wont_exit;
968 if (!flag_peel_loops || PARAM_VALUE (PARAM_MAX_PEEL_TIMES) <= 0
969 || !peeled_loops)
970 return false;
972 if (bitmap_bit_p (peeled_loops, loop->num))
974 if (dump_file)
975 fprintf (dump_file, "Not peeling: loop is already peeled\n");
976 return false;
979 /* Peel only innermost loops.
980 While the code is perfectly capable of peeling non-innermost loops,
981 the heuristics would probably need some improvements. */
982 if (loop->inner)
984 if (dump_file)
985 fprintf (dump_file, "Not peeling: outer loop\n");
986 return false;
989 if (!optimize_loop_for_speed_p (loop))
991 if (dump_file)
992 fprintf (dump_file, "Not peeling: cold loop\n");
993 return false;
996 /* Check if there is an estimate on the number of iterations. */
997 npeel = estimated_loop_iterations_int (loop);
998 if (npeel < 0)
999 npeel = likely_max_loop_iterations_int (loop);
1000 if (npeel < 0)
1002 if (dump_file)
1003 fprintf (dump_file, "Not peeling: number of iterations is not "
1004 "estimated\n");
1005 return false;
1007 if (maxiter >= 0 && maxiter <= npeel)
1009 if (dump_file)
1010 fprintf (dump_file, "Not peeling: upper bound is known so can "
1011 "unroll completely\n");
1012 return false;
1015 /* We want to peel estimated number of iterations + 1 (so we never
1016 enter the loop on quick path). Check against PARAM_MAX_PEEL_TIMES
1017 and be sure to avoid overflows. */
1018 if (npeel > PARAM_VALUE (PARAM_MAX_PEEL_TIMES) - 1)
1020 if (dump_file)
1021 fprintf (dump_file, "Not peeling: rolls too much "
1022 "(%i + 1 > --param max-peel-times)\n", (int) npeel);
1023 return false;
1025 npeel++;
1027 /* Check peeled loops size. */
1028 tree_estimate_loop_size (loop, exit, NULL, &size,
1029 PARAM_VALUE (PARAM_MAX_PEELED_INSNS));
1030 if ((peeled_size = estimated_peeled_sequence_size (&size, (int) npeel))
1031 > PARAM_VALUE (PARAM_MAX_PEELED_INSNS))
1033 if (dump_file)
1034 fprintf (dump_file, "Not peeling: peeled sequence size is too large "
1035 "(%i insns > --param max-peel-insns)", peeled_size);
1036 return false;
1039 /* Duplicate possibly eliminating the exits. */
1040 initialize_original_copy_tables ();
1041 wont_exit = sbitmap_alloc (npeel + 1);
1042 if (exit && niter
1043 && TREE_CODE (niter) == INTEGER_CST
1044 && wi::leu_p (npeel, wi::to_widest (niter)))
1046 bitmap_ones (wont_exit);
1047 bitmap_clear_bit (wont_exit, 0);
1049 else
1051 exit = NULL;
1052 bitmap_clear (wont_exit);
1054 if (!gimple_duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
1055 npeel, wont_exit,
1056 exit, &edges_to_remove,
1057 DLTHE_FLAG_UPDATE_FREQ))
1059 free_original_copy_tables ();
1060 free (wont_exit);
1061 return false;
1063 free (wont_exit);
1064 free_original_copy_tables ();
1065 if (dump_file && (dump_flags & TDF_DETAILS))
1067 fprintf (dump_file, "Peeled loop %d, %i times.\n",
1068 loop->num, (int) npeel);
1070 if (loop->any_estimate)
1072 if (wi::ltu_p (npeel, loop->nb_iterations_estimate))
1073 loop->nb_iterations_estimate -= npeel;
1074 else
1075 loop->nb_iterations_estimate = 0;
1077 if (loop->any_upper_bound)
1079 if (wi::ltu_p (npeel, loop->nb_iterations_upper_bound))
1080 loop->nb_iterations_upper_bound -= npeel;
1081 else
1082 loop->nb_iterations_upper_bound = 0;
1084 if (loop->any_likely_upper_bound)
1086 if (wi::ltu_p (npeel, loop->nb_iterations_likely_upper_bound))
1087 loop->nb_iterations_likely_upper_bound -= npeel;
1088 else
1090 loop->any_estimate = true;
1091 loop->nb_iterations_estimate = 0;
1092 loop->nb_iterations_likely_upper_bound = 0;
1095 gcov_type entry_count = 0;
1096 int entry_freq = 0;
1098 edge e;
1099 edge_iterator ei;
1100 FOR_EACH_EDGE (e, ei, loop->header->preds)
1101 if (e->src != loop->latch)
1103 entry_count += e->src->count;
1104 entry_freq += e->src->frequency;
1105 gcc_assert (!flow_bb_inside_loop_p (loop, e->src));
1107 int scale = 1;
1108 if (loop->header->count)
1109 scale = RDIV (entry_count * REG_BR_PROB_BASE, loop->header->count);
1110 else if (loop->header->frequency)
1111 scale = RDIV (entry_freq * REG_BR_PROB_BASE, loop->header->frequency);
1112 scale_loop_profile (loop, scale, 0);
1113 bitmap_set_bit (peeled_loops, loop->num);
1114 return true;
1116 /* Adds a canonical induction variable to LOOP if suitable.
1117 CREATE_IV is true if we may create a new iv. UL determines
1118 which loops we are allowed to completely unroll. If TRY_EVAL is true, we try
1119 to determine the number of iterations of a loop by direct evaluation.
1120 Returns true if cfg is changed. */
1122 static bool
1123 canonicalize_loop_induction_variables (struct loop *loop,
1124 bool create_iv, enum unroll_level ul,
1125 bool try_eval)
1127 edge exit = NULL;
1128 tree niter;
1129 HOST_WIDE_INT maxiter;
1130 bool modified = false;
1131 location_t locus = UNKNOWN_LOCATION;
1133 niter = number_of_latch_executions (loop);
1134 exit = single_exit (loop);
1135 if (TREE_CODE (niter) == INTEGER_CST)
1136 locus = gimple_location (last_stmt (exit->src));
1137 else
1139 /* If the loop has more than one exit, try checking all of them
1140 for # of iterations determinable through scev. */
1141 if (!exit)
1142 niter = find_loop_niter (loop, &exit);
1144 /* Finally if everything else fails, try brute force evaluation. */
1145 if (try_eval
1146 && (chrec_contains_undetermined (niter)
1147 || TREE_CODE (niter) != INTEGER_CST))
1148 niter = find_loop_niter_by_eval (loop, &exit);
1150 if (exit)
1151 locus = gimple_location (last_stmt (exit->src));
1153 if (TREE_CODE (niter) != INTEGER_CST)
1154 exit = NULL;
1157 /* We work exceptionally hard here to estimate the bound
1158 by find_loop_niter_by_eval. Be sure to keep it for future. */
1159 if (niter && TREE_CODE (niter) == INTEGER_CST)
1161 record_niter_bound (loop, wi::to_widest (niter),
1162 exit == single_likely_exit (loop), true);
1165 /* Force re-computation of loop bounds so we can remove redundant exits. */
1166 maxiter = max_loop_iterations_int (loop);
1168 if (dump_file && (dump_flags & TDF_DETAILS)
1169 && TREE_CODE (niter) == INTEGER_CST)
1171 fprintf (dump_file, "Loop %d iterates ", loop->num);
1172 print_generic_expr (dump_file, niter, TDF_SLIM);
1173 fprintf (dump_file, " times.\n");
1175 if (dump_file && (dump_flags & TDF_DETAILS)
1176 && maxiter >= 0)
1178 fprintf (dump_file, "Loop %d iterates at most %i times.\n", loop->num,
1179 (int)maxiter);
1181 if (dump_file && (dump_flags & TDF_DETAILS)
1182 && likely_max_loop_iterations_int (loop) >= 0)
1184 fprintf (dump_file, "Loop %d likely iterates at most %i times.\n",
1185 loop->num, (int)likely_max_loop_iterations_int (loop));
1188 /* Remove exits that are known to be never taken based on loop bound.
1189 Needs to be called after compilation of max_loop_iterations_int that
1190 populates the loop bounds. */
1191 modified |= remove_redundant_iv_tests (loop);
1193 if (try_unroll_loop_completely (loop, exit, niter, ul, maxiter, locus))
1194 return true;
1196 if (create_iv
1197 && niter && !chrec_contains_undetermined (niter)
1198 && exit && just_once_each_iteration_p (loop, exit->src))
1199 create_canonical_iv (loop, exit, niter);
1201 if (ul == UL_ALL)
1202 modified |= try_peel_loop (loop, exit, niter, maxiter);
1204 return modified;
1207 /* The main entry point of the pass. Adds canonical induction variables
1208 to the suitable loops. */
1210 unsigned int
1211 canonicalize_induction_variables (void)
1213 struct loop *loop;
1214 bool changed = false;
1215 bool irred_invalidated = false;
1216 bitmap loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
1218 free_numbers_of_iterations_estimates (cfun);
1219 estimate_numbers_of_iterations ();
1221 FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1223 changed |= canonicalize_loop_induction_variables (loop,
1224 true, UL_SINGLE_ITER,
1225 true);
1227 gcc_assert (!need_ssa_update_p (cfun));
1229 unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
1230 if (irred_invalidated
1231 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1232 mark_irreducible_loops ();
1234 /* Clean up the information about numbers of iterations, since brute force
1235 evaluation could reveal new information. */
1236 scev_reset ();
1238 if (!bitmap_empty_p (loop_closed_ssa_invalidated))
1240 gcc_checking_assert (loops_state_satisfies_p (LOOP_CLOSED_SSA));
1241 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1243 BITMAP_FREE (loop_closed_ssa_invalidated);
1245 if (changed)
1246 return TODO_cleanup_cfg;
1247 return 0;
1250 /* Propagate constant SSA_NAMEs defined in basic block BB. */
1252 static void
1253 propagate_constants_for_unrolling (basic_block bb)
1255 /* Look for degenerate PHI nodes with constant argument. */
1256 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi); )
1258 gphi *phi = gsi.phi ();
1259 tree result = gimple_phi_result (phi);
1260 tree arg = gimple_phi_arg_def (phi, 0);
1262 if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (result)
1263 && gimple_phi_num_args (phi) == 1
1264 && TREE_CODE (arg) == INTEGER_CST)
1266 replace_uses_by (result, arg);
1267 gsi_remove (&gsi, true);
1268 release_ssa_name (result);
1270 else
1271 gsi_next (&gsi);
1274 /* Look for assignments to SSA names with constant RHS. */
1275 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
1277 gimple *stmt = gsi_stmt (gsi);
1278 tree lhs;
1280 if (is_gimple_assign (stmt)
1281 && gimple_assign_rhs_code (stmt) == INTEGER_CST
1282 && (lhs = gimple_assign_lhs (stmt), TREE_CODE (lhs) == SSA_NAME)
1283 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
1285 replace_uses_by (lhs, gimple_assign_rhs1 (stmt));
1286 gsi_remove (&gsi, true);
1287 release_ssa_name (lhs);
1289 else
1290 gsi_next (&gsi);
1294 /* Process loops from innermost to outer, stopping at the innermost
1295 loop we unrolled. */
1297 static bool
1298 tree_unroll_loops_completely_1 (bool may_increase_size, bool unroll_outer,
1299 bitmap father_bbs, struct loop *loop)
1301 struct loop *loop_father;
1302 bool changed = false;
1303 struct loop *inner;
1304 enum unroll_level ul;
1306 /* Process inner loops first. */
1307 for (inner = loop->inner; inner != NULL; inner = inner->next)
1308 changed |= tree_unroll_loops_completely_1 (may_increase_size,
1309 unroll_outer, father_bbs,
1310 inner);
1312 /* If we changed an inner loop we cannot process outer loops in this
1313 iteration because SSA form is not up-to-date. Continue with
1314 siblings of outer loops instead. */
1315 if (changed)
1316 return true;
1318 /* Don't unroll #pragma omp simd loops until the vectorizer
1319 attempts to vectorize those. */
1320 if (loop->force_vectorize)
1321 return false;
1323 /* Try to unroll this loop. */
1324 loop_father = loop_outer (loop);
1325 if (!loop_father)
1326 return false;
1328 if (may_increase_size && optimize_loop_nest_for_speed_p (loop)
1329 /* Unroll outermost loops only if asked to do so or they do
1330 not cause code growth. */
1331 && (unroll_outer || loop_outer (loop_father)))
1332 ul = UL_ALL;
1333 else
1334 ul = UL_NO_GROWTH;
1336 if (canonicalize_loop_induction_variables
1337 (loop, false, ul, !flag_tree_loop_ivcanon))
1339 /* If we'll continue unrolling, we need to propagate constants
1340 within the new basic blocks to fold away induction variable
1341 computations; otherwise, the size might blow up before the
1342 iteration is complete and the IR eventually cleaned up. */
1343 if (loop_outer (loop_father))
1344 bitmap_set_bit (father_bbs, loop_father->header->index);
1346 return true;
1349 return false;
1352 /* Unroll LOOPS completely if they iterate just few times. Unless
1353 MAY_INCREASE_SIZE is true, perform the unrolling only if the
1354 size of the code does not increase. */
1356 unsigned int
1357 tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
1359 bitmap father_bbs = BITMAP_ALLOC (NULL);
1360 bool changed;
1361 int iteration = 0;
1362 bool irred_invalidated = false;
1366 changed = false;
1367 bitmap loop_closed_ssa_invalidated = NULL;
1369 if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
1370 loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
1372 free_numbers_of_iterations_estimates (cfun);
1373 estimate_numbers_of_iterations ();
1375 changed = tree_unroll_loops_completely_1 (may_increase_size,
1376 unroll_outer, father_bbs,
1377 current_loops->tree_root);
1378 if (changed)
1380 unsigned i;
1382 unloop_loops (loop_closed_ssa_invalidated, &irred_invalidated);
1384 /* We can not use TODO_update_ssa_no_phi because VOPS gets confused. */
1385 if (loop_closed_ssa_invalidated
1386 && !bitmap_empty_p (loop_closed_ssa_invalidated))
1387 rewrite_into_loop_closed_ssa (loop_closed_ssa_invalidated,
1388 TODO_update_ssa);
1389 else
1390 update_ssa (TODO_update_ssa);
1392 /* father_bbs is a bitmap of loop father header BB indices.
1393 Translate that to what non-root loops these BBs belong to now. */
1394 bitmap_iterator bi;
1395 bitmap fathers = BITMAP_ALLOC (NULL);
1396 EXECUTE_IF_SET_IN_BITMAP (father_bbs, 0, i, bi)
1398 basic_block unrolled_loop_bb = BASIC_BLOCK_FOR_FN (cfun, i);
1399 if (! unrolled_loop_bb)
1400 continue;
1401 if (loop_outer (unrolled_loop_bb->loop_father))
1402 bitmap_set_bit (fathers,
1403 unrolled_loop_bb->loop_father->num);
1405 bitmap_clear (father_bbs);
1406 /* Propagate the constants within the new basic blocks. */
1407 EXECUTE_IF_SET_IN_BITMAP (fathers, 0, i, bi)
1409 loop_p father = get_loop (cfun, i);
1410 basic_block *body = get_loop_body_in_dom_order (father);
1411 for (unsigned j = 0; j < father->num_nodes; j++)
1412 propagate_constants_for_unrolling (body[j]);
1413 free (body);
1415 BITMAP_FREE (fathers);
1417 /* This will take care of removing completely unrolled loops
1418 from the loop structures so we can continue unrolling now
1419 innermost loops. */
1420 if (cleanup_tree_cfg ())
1421 update_ssa (TODO_update_ssa_only_virtuals);
1423 /* Clean up the information about numbers of iterations, since
1424 complete unrolling might have invalidated it. */
1425 scev_reset ();
1426 if (flag_checking && loops_state_satisfies_p (LOOP_CLOSED_SSA))
1427 verify_loop_closed_ssa (true);
1429 if (loop_closed_ssa_invalidated)
1430 BITMAP_FREE (loop_closed_ssa_invalidated);
1432 while (changed
1433 && ++iteration <= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS));
1435 BITMAP_FREE (father_bbs);
1437 if (irred_invalidated
1438 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1439 mark_irreducible_loops ();
1441 return 0;
1444 /* Canonical induction variable creation pass. */
1446 namespace {
1448 const pass_data pass_data_iv_canon =
1450 GIMPLE_PASS, /* type */
1451 "ivcanon", /* name */
1452 OPTGROUP_LOOP, /* optinfo_flags */
1453 TV_TREE_LOOP_IVCANON, /* tv_id */
1454 ( PROP_cfg | PROP_ssa ), /* properties_required */
1455 0, /* properties_provided */
1456 0, /* properties_destroyed */
1457 0, /* todo_flags_start */
1458 0, /* todo_flags_finish */
1461 class pass_iv_canon : public gimple_opt_pass
1463 public:
1464 pass_iv_canon (gcc::context *ctxt)
1465 : gimple_opt_pass (pass_data_iv_canon, ctxt)
1468 /* opt_pass methods: */
1469 virtual bool gate (function *) { return flag_tree_loop_ivcanon != 0; }
1470 virtual unsigned int execute (function *fun);
1472 }; // class pass_iv_canon
1474 unsigned int
1475 pass_iv_canon::execute (function *fun)
1477 if (number_of_loops (fun) <= 1)
1478 return 0;
1480 return canonicalize_induction_variables ();
1483 } // anon namespace
1485 gimple_opt_pass *
1486 make_pass_iv_canon (gcc::context *ctxt)
1488 return new pass_iv_canon (ctxt);
1491 /* Complete unrolling of loops. */
1493 namespace {
1495 const pass_data pass_data_complete_unroll =
1497 GIMPLE_PASS, /* type */
1498 "cunroll", /* name */
1499 OPTGROUP_LOOP, /* optinfo_flags */
1500 TV_COMPLETE_UNROLL, /* tv_id */
1501 ( PROP_cfg | PROP_ssa ), /* properties_required */
1502 0, /* properties_provided */
1503 0, /* properties_destroyed */
1504 0, /* todo_flags_start */
1505 0, /* todo_flags_finish */
1508 class pass_complete_unroll : public gimple_opt_pass
1510 public:
1511 pass_complete_unroll (gcc::context *ctxt)
1512 : gimple_opt_pass (pass_data_complete_unroll, ctxt)
1515 /* opt_pass methods: */
1516 virtual unsigned int execute (function *);
1518 }; // class pass_complete_unroll
1520 unsigned int
1521 pass_complete_unroll::execute (function *fun)
1523 if (number_of_loops (fun) <= 1)
1524 return 0;
1526 /* If we ever decide to run loop peeling more than once, we will need to
1527 track loops already peeled in loop structures themselves to avoid
1528 re-peeling the same loop multiple times. */
1529 if (flag_peel_loops)
1530 peeled_loops = BITMAP_ALLOC (NULL);
1531 int val = tree_unroll_loops_completely (flag_unroll_loops
1532 || flag_peel_loops
1533 || optimize >= 3, true);
1534 if (peeled_loops)
1536 BITMAP_FREE (peeled_loops);
1537 peeled_loops = NULL;
1539 return val;
1542 } // anon namespace
1544 gimple_opt_pass *
1545 make_pass_complete_unroll (gcc::context *ctxt)
1547 return new pass_complete_unroll (ctxt);
1550 /* Complete unrolling of inner loops. */
1552 namespace {
1554 const pass_data pass_data_complete_unrolli =
1556 GIMPLE_PASS, /* type */
1557 "cunrolli", /* name */
1558 OPTGROUP_LOOP, /* optinfo_flags */
1559 TV_COMPLETE_UNROLL, /* tv_id */
1560 ( PROP_cfg | PROP_ssa ), /* properties_required */
1561 0, /* properties_provided */
1562 0, /* properties_destroyed */
1563 0, /* todo_flags_start */
1564 0, /* todo_flags_finish */
1567 class pass_complete_unrolli : public gimple_opt_pass
1569 public:
1570 pass_complete_unrolli (gcc::context *ctxt)
1571 : gimple_opt_pass (pass_data_complete_unrolli, ctxt)
1574 /* opt_pass methods: */
1575 virtual bool gate (function *) { return optimize >= 2; }
1576 virtual unsigned int execute (function *);
1578 }; // class pass_complete_unrolli
1580 unsigned int
1581 pass_complete_unrolli::execute (function *fun)
1583 unsigned ret = 0;
1585 loop_optimizer_init (LOOPS_NORMAL
1586 | LOOPS_HAVE_RECORDED_EXITS);
1587 if (number_of_loops (fun) > 1)
1589 scev_initialize ();
1590 ret = tree_unroll_loops_completely (optimize >= 3, false);
1591 free_numbers_of_iterations_estimates (fun);
1592 scev_finalize ();
1594 loop_optimizer_finalize ();
1596 return ret;
1599 } // anon namespace
1601 gimple_opt_pass *
1602 make_pass_complete_unrolli (gcc::context *ctxt)
1604 return new pass_complete_unrolli (ctxt);