1 /* Induction variable canonicalization and loop peeling.
2 Copyright (C) 2004-2013 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* This pass detects the loops that iterate a constant number of times,
21 adds a canonical induction variable (step -1, tested against 0)
22 and replaces the exit test. This enables the less powerful rtl
23 level analysis to use this information.
25 This might spoil the code in some cases (by increasing register pressure).
26 Note that in the case the new variable is not needed, ivopts will get rid
27 of it, so it might only be a problem when there are no other linear induction
28 variables. In that case the created optimization possibilities are likely
31 Additionally in case we detect that it is beneficial to unroll the
32 loop completely, we do it right here to expose the optimization
33 possibilities to the following passes. */
37 #include "coretypes.h"
41 #include "basic-block.h"
42 #include "gimple-pretty-print.h"
43 #include "tree-flow.h"
45 #include "tree-pass.h"
46 #include "tree-chrec.h"
47 #include "tree-scalar-evolution.h"
50 #include "tree-inline.h"
53 /* Specifies types of loops that may be unrolled. */
57 UL_SINGLE_ITER
, /* Only loops that exit immediately in the first
59 UL_NO_GROWTH
, /* Only loops whose unrolling will not cause increase
61 UL_ALL
/* All suitable loops. */
64 /* Adds a canonical induction variable to LOOP iterating NITER times. EXIT
65 is the exit edge whose condition is replaced. */
68 create_canonical_iv (struct loop
*loop
, edge exit
, tree niter
)
73 gimple_stmt_iterator incr_at
;
76 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
78 fprintf (dump_file
, "Added canonical iv to loop %d, ", loop
->num
);
79 print_generic_expr (dump_file
, niter
, TDF_SLIM
);
80 fprintf (dump_file
, " iterations.\n");
83 cond
= last_stmt (exit
->src
);
84 in
= EDGE_SUCC (exit
->src
, 0);
86 in
= EDGE_SUCC (exit
->src
, 1);
88 /* Note that we do not need to worry about overflows, since
89 type of niter is always unsigned and all comparisons are
90 just for equality/nonequality -- i.e. everything works
91 with a modulo arithmetics. */
93 type
= TREE_TYPE (niter
);
94 niter
= fold_build2 (PLUS_EXPR
, type
,
96 build_int_cst (type
, 1));
97 incr_at
= gsi_last_bb (in
->src
);
99 build_int_cst (type
, -1),
101 &incr_at
, false, NULL
, &var
);
103 cmp
= (exit
->flags
& EDGE_TRUE_VALUE
) ? EQ_EXPR
: NE_EXPR
;
104 gimple_cond_set_code (cond
, cmp
);
105 gimple_cond_set_lhs (cond
, var
);
106 gimple_cond_set_rhs (cond
, build_int_cst (type
, 0));
110 /* Computes an estimated number of insns in LOOP, weighted by WEIGHTS. */
113 tree_num_loop_insns (struct loop
*loop
, eni_weights
*weights
)
115 basic_block
*body
= get_loop_body (loop
);
116 gimple_stmt_iterator gsi
;
117 unsigned size
= 0, i
;
119 for (i
= 0; i
< loop
->num_nodes
; i
++)
120 for (gsi
= gsi_start_bb (body
[i
]); !gsi_end_p (gsi
); gsi_next (&gsi
))
121 size
+= estimate_num_insns (gsi_stmt (gsi
), weights
);
127 /* Describe size of loop as detected by tree_estimate_loop_size. */
130 /* Number of instructions in the loop. */
133 /* Number of instructions that will be likely optimized out in
134 peeled iterations of loop (i.e. computation based on induction
135 variable where induction variable starts at known constant.) */
136 int eliminated_by_peeling
;
138 /* Same statistics for last iteration of loop: it is smaller because
139 instructions after exit are not executed. */
141 int last_iteration_eliminated_by_peeling
;
143 /* If some IV computation will become constant. */
146 /* Number of call stmts that are not a builtin and are pure or const
147 present on the hot path. */
148 int num_pure_calls_on_hot_path
;
149 /* Number of call stmts that are not a builtin and are not pure nor const
150 present on the hot path. */
151 int num_non_pure_calls_on_hot_path
;
152 /* Number of statements other than calls in the loop. */
153 int non_call_stmts_on_hot_path
;
154 /* Number of branches seen on the hot path. */
155 int num_branches_on_hot_path
;
158 /* Return true if OP in STMT will be constant after peeling LOOP. */
161 constant_after_peeling (tree op
, gimple stmt
, struct loop
*loop
)
165 if (is_gimple_min_invariant (op
))
168 /* We can still fold accesses to constant arrays when index is known. */
169 if (TREE_CODE (op
) != SSA_NAME
)
173 /* First make fast look if we see constant array inside. */
174 while (handled_component_p (base
))
175 base
= TREE_OPERAND (base
, 0);
177 && const_value_known_p (base
))
178 || CONSTANT_CLASS_P (base
))
180 /* If so, see if we understand all the indices. */
182 while (handled_component_p (base
))
184 if (TREE_CODE (base
) == ARRAY_REF
185 && !constant_after_peeling (TREE_OPERAND (base
, 1), stmt
, loop
))
187 base
= TREE_OPERAND (base
, 0);
194 /* Induction variables are constants. */
195 if (!simple_iv (loop
, loop_containing_stmt (stmt
), op
, &iv
, false))
197 if (!is_gimple_min_invariant (iv
.base
))
199 if (!is_gimple_min_invariant (iv
.step
))
204 /* Computes an estimated number of insns in LOOP.
205 EXIT (if non-NULL) is an exite edge that will be eliminated in all but last
206 iteration of the loop.
207 EDGE_TO_CANCEL (if non-NULL) is an non-exit edge eliminated in the last iteration
209 Return results in SIZE, estimate benefits for complete unrolling exiting by EXIT.
210 Stop estimating after UPPER_BOUND is met. Return true in this case. */
213 tree_estimate_loop_size (struct loop
*loop
, edge exit
, edge edge_to_cancel
, struct loop_size
*size
,
216 basic_block
*body
= get_loop_body (loop
);
217 gimple_stmt_iterator gsi
;
220 vec
<basic_block
> path
= get_loop_hot_path (loop
);
223 size
->eliminated_by_peeling
= 0;
224 size
->last_iteration
= 0;
225 size
->last_iteration_eliminated_by_peeling
= 0;
226 size
->num_pure_calls_on_hot_path
= 0;
227 size
->num_non_pure_calls_on_hot_path
= 0;
228 size
->non_call_stmts_on_hot_path
= 0;
229 size
->num_branches_on_hot_path
= 0;
230 size
->constant_iv
= 0;
232 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
233 fprintf (dump_file
, "Estimating sizes for loop %i\n", loop
->num
);
234 for (i
= 0; i
< loop
->num_nodes
; i
++)
236 if (edge_to_cancel
&& body
[i
] != edge_to_cancel
->src
237 && dominated_by_p (CDI_DOMINATORS
, body
[i
], edge_to_cancel
->src
))
241 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
242 fprintf (dump_file
, " BB: %i, after_exit: %i\n", body
[i
]->index
, after_exit
);
244 for (gsi
= gsi_start_bb (body
[i
]); !gsi_end_p (gsi
); gsi_next (&gsi
))
246 gimple stmt
= gsi_stmt (gsi
);
247 int num
= estimate_num_insns (stmt
, &eni_size_weights
);
248 bool likely_eliminated
= false;
249 bool likely_eliminated_last
= false;
250 bool likely_eliminated_peeled
= false;
252 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
254 fprintf (dump_file
, " size: %3i ", num
);
255 print_gimple_stmt (dump_file
, gsi_stmt (gsi
), 0, 0);
258 /* Look for reasons why we might optimize this stmt away. */
260 /* Exit conditional. */
261 if (exit
&& body
[i
] == exit
->src
262 && stmt
== last_stmt (exit
->src
))
264 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
265 fprintf (dump_file
, " Exit condition will be eliminated "
266 "in peeled copies.\n");
267 likely_eliminated_peeled
= true;
269 else if (edge_to_cancel
&& body
[i
] == edge_to_cancel
->src
270 && stmt
== last_stmt (edge_to_cancel
->src
))
272 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
273 fprintf (dump_file
, " Exit condition will be eliminated "
275 likely_eliminated_last
= true;
277 /* Sets of IV variables */
278 else if (gimple_code (stmt
) == GIMPLE_ASSIGN
279 && constant_after_peeling (gimple_assign_lhs (stmt
), stmt
, loop
))
281 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
282 fprintf (dump_file
, " Induction variable computation will"
283 " be folded away.\n");
284 likely_eliminated
= true;
286 /* Assignments of IV variables. */
287 else if (gimple_code (stmt
) == GIMPLE_ASSIGN
288 && TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
289 && constant_after_peeling (gimple_assign_rhs1 (stmt
), stmt
, loop
)
290 && (gimple_assign_rhs_class (stmt
) != GIMPLE_BINARY_RHS
291 || constant_after_peeling (gimple_assign_rhs2 (stmt
),
294 size
->constant_iv
= true;
295 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
296 fprintf (dump_file
, " Constant expression will be folded away.\n");
297 likely_eliminated
= true;
300 else if ((gimple_code (stmt
) == GIMPLE_COND
301 && constant_after_peeling (gimple_cond_lhs (stmt
), stmt
, loop
)
302 && constant_after_peeling (gimple_cond_rhs (stmt
), stmt
, loop
))
303 || (gimple_code (stmt
) == GIMPLE_SWITCH
304 && constant_after_peeling (gimple_switch_index (stmt
), stmt
, loop
)))
306 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
307 fprintf (dump_file
, " Constant conditional.\n");
308 likely_eliminated
= true;
311 size
->overall
+= num
;
312 if (likely_eliminated
|| likely_eliminated_peeled
)
313 size
->eliminated_by_peeling
+= num
;
316 size
->last_iteration
+= num
;
317 if (likely_eliminated
|| likely_eliminated_last
)
318 size
->last_iteration_eliminated_by_peeling
+= num
;
320 if ((size
->overall
* 3 / 2 - size
->eliminated_by_peeling
321 - size
->last_iteration_eliminated_by_peeling
) > upper_bound
)
329 while (path
.length ())
331 basic_block bb
= path
.pop ();
332 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
334 gimple stmt
= gsi_stmt (gsi
);
335 if (gimple_code (stmt
) == GIMPLE_CALL
)
337 int flags
= gimple_call_flags (stmt
);
338 tree decl
= gimple_call_fndecl (stmt
);
340 if (decl
&& DECL_IS_BUILTIN (decl
)
341 && is_inexpensive_builtin (decl
))
343 else if (flags
& (ECF_PURE
| ECF_CONST
))
344 size
->num_pure_calls_on_hot_path
++;
346 size
->num_non_pure_calls_on_hot_path
++;
347 size
->num_branches_on_hot_path
++;
349 else if (gimple_code (stmt
) != GIMPLE_CALL
350 && gimple_code (stmt
) != GIMPLE_DEBUG
)
351 size
->non_call_stmts_on_hot_path
++;
352 if (((gimple_code (stmt
) == GIMPLE_COND
353 && (!constant_after_peeling (gimple_cond_lhs (stmt
), stmt
, loop
)
354 || constant_after_peeling (gimple_cond_rhs (stmt
), stmt
, loop
)))
355 || (gimple_code (stmt
) == GIMPLE_SWITCH
356 && !constant_after_peeling (gimple_switch_index (stmt
), stmt
, loop
)))
357 && (!exit
|| bb
!= exit
->src
))
358 size
->num_branches_on_hot_path
++;
362 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
363 fprintf (dump_file
, "size: %i-%i, last_iteration: %i-%i\n", size
->overall
,
364 size
->eliminated_by_peeling
, size
->last_iteration
,
365 size
->last_iteration_eliminated_by_peeling
);
371 /* Estimate number of insns of completely unrolled loop.
372 It is (NUNROLL + 1) * size of loop body with taking into account
373 the fact that in last copy everything after exit conditional
374 is dead and that some instructions will be eliminated after
377 Loop body is likely going to simplify futher, this is difficult
378 to guess, we just decrease the result by 1/3. */
380 static unsigned HOST_WIDE_INT
381 estimated_unrolled_size (struct loop_size
*size
,
382 unsigned HOST_WIDE_INT nunroll
)
384 HOST_WIDE_INT unr_insns
= ((nunroll
)
385 * (HOST_WIDE_INT
) (size
->overall
386 - size
->eliminated_by_peeling
));
389 unr_insns
+= size
->last_iteration
- size
->last_iteration_eliminated_by_peeling
;
391 unr_insns
= unr_insns
* 2 / 3;
398 /* Loop LOOP is known to not loop. See if there is an edge in the loop
399 body that can be remove to make the loop to always exit and at
400 the same time it does not make any code potentially executed
401 during the last iteration dead.
403 After complette unrolling we still may get rid of the conditional
404 on the exit in the last copy even if we have no idea what it does.
405 This is quite common case for loops of form
411 Here we prove the loop to iterate 5 times but we do not know
412 it from induction variable.
414 For now we handle only simple case where there is exit condition
415 just before the latch block and the latch block contains no statements
416 with side effect that may otherwise terminate the execution of loop
417 (such as by EH or by terminating the program or longjmp).
419 In the general case we may want to cancel the paths leading to statements
420 loop-niter identified as having undefined effect in the last iteration.
421 The other cases are hopefully rare and will be cleaned up later. */
424 loop_edge_to_cancel (struct loop
*loop
)
429 gimple_stmt_iterator gsi
;
431 /* We want only one predecestor of the loop. */
432 if (EDGE_COUNT (loop
->latch
->preds
) > 1)
435 exits
= get_loop_exit_edges (loop
);
437 FOR_EACH_VEC_ELT (exits
, i
, edge_to_cancel
)
439 /* Find the other edge than the loop exit
440 leaving the conditoinal. */
441 if (EDGE_COUNT (edge_to_cancel
->src
->succs
) != 2)
443 if (EDGE_SUCC (edge_to_cancel
->src
, 0) == edge_to_cancel
)
444 edge_to_cancel
= EDGE_SUCC (edge_to_cancel
->src
, 1);
446 edge_to_cancel
= EDGE_SUCC (edge_to_cancel
->src
, 0);
448 /* We only can handle conditionals. */
449 if (!(edge_to_cancel
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
452 /* We should never have conditionals in the loop latch. */
453 gcc_assert (edge_to_cancel
->dest
!= loop
->header
);
455 /* Check that it leads to loop latch. */
456 if (edge_to_cancel
->dest
!= loop
->latch
)
461 /* Verify that the code in loop latch does nothing that may end program
462 execution without really reaching the exit. This may include
463 non-pure/const function calls, EH statements, volatile ASMs etc. */
464 for (gsi
= gsi_start_bb (loop
->latch
); !gsi_end_p (gsi
); gsi_next (&gsi
))
465 if (gimple_has_side_effects (gsi_stmt (gsi
)))
467 return edge_to_cancel
;
473 /* Remove all tests for exits that are known to be taken after LOOP was
474 peeled NPEELED times. Put gcc_unreachable before every statement
475 known to not be executed. */
478 remove_exits_and_undefined_stmts (struct loop
*loop
, unsigned int npeeled
)
480 struct nb_iter_bound
*elt
;
481 bool changed
= false;
483 for (elt
= loop
->bounds
; elt
; elt
= elt
->next
)
485 /* If statement is known to be undefined after peeling, turn it
486 into unreachable (or trap when debugging experience is supposed
489 && elt
->bound
.ult (double_int::from_uhwi (npeeled
)))
491 gimple_stmt_iterator gsi
= gsi_for_stmt (elt
->stmt
);
492 gimple stmt
= gimple_build_call
493 (builtin_decl_implicit (BUILT_IN_UNREACHABLE
), 0);
495 gimple_set_location (stmt
, gimple_location (elt
->stmt
));
496 gsi_insert_before (&gsi
, stmt
, GSI_NEW_STMT
);
498 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
500 fprintf (dump_file
, "Forced statement unreachable: ");
501 print_gimple_stmt (dump_file
, elt
->stmt
, 0, 0);
504 /* If we know the exit will be taken after peeling, update. */
505 else if (elt
->is_exit
506 && elt
->bound
.ule (double_int::from_uhwi (npeeled
)))
508 basic_block bb
= gimple_bb (elt
->stmt
);
509 edge exit_edge
= EDGE_SUCC (bb
, 0);
511 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
513 fprintf (dump_file
, "Forced exit to be taken: ");
514 print_gimple_stmt (dump_file
, elt
->stmt
, 0, 0);
516 if (!loop_exit_edge_p (loop
, exit_edge
))
517 exit_edge
= EDGE_SUCC (bb
, 1);
518 gcc_checking_assert (loop_exit_edge_p (loop
, exit_edge
));
519 if (exit_edge
->flags
& EDGE_TRUE_VALUE
)
520 gimple_cond_make_true (elt
->stmt
);
522 gimple_cond_make_false (elt
->stmt
);
523 update_stmt (elt
->stmt
);
530 /* Remove all exits that are known to be never taken because of the loop bound
534 remove_redundant_iv_tests (struct loop
*loop
)
536 struct nb_iter_bound
*elt
;
537 bool changed
= false;
539 if (!loop
->any_upper_bound
)
541 for (elt
= loop
->bounds
; elt
; elt
= elt
->next
)
543 /* Exit is pointless if it won't be taken before loop reaches
545 if (elt
->is_exit
&& loop
->any_upper_bound
546 && loop
->nb_iterations_upper_bound
.ult (elt
->bound
))
548 basic_block bb
= gimple_bb (elt
->stmt
);
549 edge exit_edge
= EDGE_SUCC (bb
, 0);
550 struct tree_niter_desc niter
;
552 if (!loop_exit_edge_p (loop
, exit_edge
))
553 exit_edge
= EDGE_SUCC (bb
, 1);
555 /* Only when we know the actual number of iterations, not
556 just a bound, we can remove the exit. */
557 if (!number_of_iterations_exit (loop
, exit_edge
,
558 &niter
, false, false)
559 || !integer_onep (niter
.assumptions
)
560 || !integer_zerop (niter
.may_be_zero
)
562 || TREE_CODE (niter
.niter
) != INTEGER_CST
563 || !loop
->nb_iterations_upper_bound
.ult
564 (tree_to_double_int (niter
.niter
)))
567 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
569 fprintf (dump_file
, "Removed pointless exit: ");
570 print_gimple_stmt (dump_file
, elt
->stmt
, 0, 0);
572 if (exit_edge
->flags
& EDGE_TRUE_VALUE
)
573 gimple_cond_make_false (elt
->stmt
);
575 gimple_cond_make_true (elt
->stmt
);
576 update_stmt (elt
->stmt
);
583 /* Stores loops that will be unlooped after we process whole loop tree. */
584 static vec
<loop_p
> loops_to_unloop
;
585 static vec
<int> loops_to_unloop_nunroll
;
587 /* Cancel all fully unrolled loops by putting __builtin_unreachable
589 We do it after all unrolling since unlooping moves basic blocks
590 across loop boundaries trashing loop closed SSA form as well
591 as SCEV info needed to be intact during unrolling.
593 IRRED_INVALIDATED is used to bookkeep if information about
594 irreducible regions may become invalid as a result
595 of the transformation.
596 LOOP_CLOSED_SSA_INVALIDATED is used to bookkepp the case
597 when we need to go into loop closed SSA form. */
600 unloop_loops (bitmap loop_closed_ssa_invalidated
,
601 bool *irred_invalidated
)
603 while (loops_to_unloop
.length ())
605 struct loop
*loop
= loops_to_unloop
.pop ();
606 int n_unroll
= loops_to_unloop_nunroll
.pop ();
607 basic_block latch
= loop
->latch
;
608 edge latch_edge
= loop_latch_edge (loop
);
609 int flags
= latch_edge
->flags
;
610 location_t locus
= latch_edge
->goto_locus
;
612 gimple_stmt_iterator gsi
;
614 remove_exits_and_undefined_stmts (loop
, n_unroll
);
616 /* Unloop destroys the latch edge. */
617 unloop (loop
, irred_invalidated
, loop_closed_ssa_invalidated
);
619 /* Create new basic block for the latch edge destination and wire
621 stmt
= gimple_build_call (builtin_decl_implicit (BUILT_IN_UNREACHABLE
), 0);
622 latch_edge
= make_edge (latch
, create_basic_block (NULL
, NULL
, latch
), flags
);
623 latch_edge
->probability
= 0;
624 latch_edge
->count
= 0;
625 latch_edge
->flags
|= flags
;
626 latch_edge
->goto_locus
= locus
;
628 latch_edge
->dest
->loop_father
= current_loops
->tree_root
;
629 latch_edge
->dest
->count
= 0;
630 latch_edge
->dest
->frequency
= 0;
631 set_immediate_dominator (CDI_DOMINATORS
, latch_edge
->dest
, latch_edge
->src
);
633 gsi
= gsi_start_bb (latch_edge
->dest
);
634 gsi_insert_after (&gsi
, stmt
, GSI_NEW_STMT
);
636 loops_to_unloop
.release ();
637 loops_to_unloop_nunroll
.release ();
640 /* Tries to unroll LOOP completely, i.e. NITER times.
641 UL determines which loops we are allowed to unroll.
642 EXIT is the exit of the loop that should be eliminated.
643 MAXITER specfy bound on number of iterations, -1 if it is
644 not known or too large for HOST_WIDE_INT. The location
645 LOCUS corresponding to the loop is used when emitting
646 a summary of the unroll to the dump file. */
649 try_unroll_loop_completely (struct loop
*loop
,
650 edge exit
, tree niter
,
651 enum unroll_level ul
,
652 HOST_WIDE_INT maxiter
,
655 unsigned HOST_WIDE_INT n_unroll
, ninsns
, max_unroll
, unr_insns
;
657 struct loop_size size
;
658 bool n_unroll_found
= false;
659 edge edge_to_cancel
= NULL
;
661 /* See if we proved number of iterations to be low constant.
663 EXIT is an edge that will be removed in all but last iteration of
666 EDGE_TO_CACNEL is an edge that will be removed from the last iteration
667 of the unrolled sequence and is expected to make the final loop not
670 If the number of execution of loop is determined by standard induction
671 variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving
673 if (host_integerp (niter
, 1))
675 n_unroll
= tree_low_cst (niter
, 1);
676 n_unroll_found
= true;
677 edge_to_cancel
= EDGE_SUCC (exit
->src
, 0);
678 if (edge_to_cancel
== exit
)
679 edge_to_cancel
= EDGE_SUCC (exit
->src
, 1);
681 /* We do not know the number of iterations and thus we can not eliminate
686 /* See if we can improve our estimate by using recorded loop bounds. */
688 && (!n_unroll_found
|| (unsigned HOST_WIDE_INT
)maxiter
< n_unroll
))
691 n_unroll_found
= true;
692 /* Loop terminates before the IV variable test, so we can not
693 remove it in the last iteration. */
694 edge_to_cancel
= NULL
;
700 max_unroll
= PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES
);
701 if (n_unroll
> max_unroll
)
705 edge_to_cancel
= loop_edge_to_cancel (loop
);
713 vec
<edge
> to_remove
= vNULL
;
714 if (ul
== UL_SINGLE_ITER
)
717 large
= tree_estimate_loop_size
718 (loop
, exit
, edge_to_cancel
, &size
,
719 PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS
));
720 ninsns
= size
.overall
;
723 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
724 fprintf (dump_file
, "Not unrolling loop %d: it is too large.\n",
729 unr_insns
= estimated_unrolled_size (&size
, n_unroll
);
730 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
732 fprintf (dump_file
, " Loop size: %d\n", (int) ninsns
);
733 fprintf (dump_file
, " Estimated size after unrolling: %d\n",
737 /* If the code is going to shrink, we don't need to be extra cautious
738 on guessing if the unrolling is going to be profitable. */
740 /* If there is IV variable that will become constant, we save
741 one instruction in the loop prologue we do not account
743 <= ninsns
+ (size
.constant_iv
!= false))
745 /* We unroll only inner loops, because we do not consider it profitable
746 otheriwse. We still can cancel loopback edge of not rolling loop;
747 this is always a good idea. */
748 else if (ul
== UL_NO_GROWTH
)
750 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
751 fprintf (dump_file
, "Not unrolling loop %d: size would grow.\n",
755 /* Outer loops tend to be less interesting candidates for complette
756 unrolling unless we can do a lot of propagation into the inner loop
757 body. For now we disable outer loop unrolling when the code would
759 else if (loop
->inner
)
761 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
762 fprintf (dump_file
, "Not unrolling loop %d: "
763 "it is not innermost and code would grow.\n",
767 /* If there is call on a hot path through the loop, then
768 there is most probably not much to optimize. */
769 else if (size
.num_non_pure_calls_on_hot_path
)
771 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
772 fprintf (dump_file
, "Not unrolling loop %d: "
773 "contains call and code would grow.\n",
777 /* If there is pure/const call in the function, then we
778 can still optimize the unrolled loop body if it contains
779 some other interesting code than the calls and code
780 storing or cumulating the return value. */
781 else if (size
.num_pure_calls_on_hot_path
782 /* One IV increment, one test, one ivtmp store
783 and one usefull stmt. That is about minimal loop
785 && (size
.non_call_stmts_on_hot_path
786 <= 3 + size
.num_pure_calls_on_hot_path
))
788 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
789 fprintf (dump_file
, "Not unrolling loop %d: "
790 "contains just pure calls and code would grow.\n",
794 /* Complette unrolling is major win when control flow is removed and
795 one big basic block is created. If the loop contains control flow
796 the optimization may still be a win because of eliminating the loop
797 overhead but it also may blow the branch predictor tables.
798 Limit number of branches on the hot path through the peeled
800 else if (size
.num_branches_on_hot_path
* (int)n_unroll
801 > PARAM_VALUE (PARAM_MAX_PEEL_BRANCHES
))
803 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
804 fprintf (dump_file
, "Not unrolling loop %d: "
805 " number of branches on hot path in the unrolled sequence"
806 " reach --param max-peel-branches limit.\n",
811 > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS
))
813 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
814 fprintf (dump_file
, "Not unrolling loop %d: "
815 "(--param max-completely-peeled-insns limit reached).\n",
820 initialize_original_copy_tables ();
821 wont_exit
= sbitmap_alloc (n_unroll
+ 1);
822 bitmap_ones (wont_exit
);
823 bitmap_clear_bit (wont_exit
, 0);
825 if (!gimple_duplicate_loop_to_header_edge (loop
, loop_preheader_edge (loop
),
828 DLTHE_FLAG_UPDATE_FREQ
829 | DLTHE_FLAG_COMPLETTE_PEEL
))
831 free_original_copy_tables ();
833 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
834 fprintf (dump_file
, "Failed to duplicate the loop\n");
838 FOR_EACH_VEC_ELT (to_remove
, i
, e
)
840 bool ok
= remove_path (e
);
844 to_remove
.release ();
846 free_original_copy_tables ();
850 /* Remove the conditional from the last copy of the loop. */
853 cond
= last_stmt (edge_to_cancel
->src
);
854 if (edge_to_cancel
->flags
& EDGE_TRUE_VALUE
)
855 gimple_cond_make_false (cond
);
857 gimple_cond_make_true (cond
);
859 /* Do not remove the path. Doing so may remove outer loop
860 and confuse bookkeeping code in tree_unroll_loops_completelly. */
863 /* Store the loop for later unlooping and exit removal. */
864 loops_to_unloop
.safe_push (loop
);
865 loops_to_unloop_nunroll
.safe_push (n_unroll
);
867 if (dump_enabled_p ())
870 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
| TDF_DETAILS
, locus
,
871 "Turned loop into non-loop; it never loops.\n");
874 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
| TDF_DETAILS
, locus
,
875 "Completely unroll loop %d times", (int)n_unroll
);
877 dump_printf (MSG_OPTIMIZED_LOCATIONS
| TDF_DETAILS
,
878 " (header execution count %d)",
879 (int)loop
->header
->count
);
880 dump_printf (MSG_OPTIMIZED_LOCATIONS
| TDF_DETAILS
, "\n");
884 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
887 fprintf (dump_file
, "Exit condition of peeled iterations was "
890 fprintf (dump_file
, "Last iteration exit edge was proved true.\n");
892 fprintf (dump_file
, "Latch of last iteration was marked by "
893 "__builtin_unreachable ().\n");
899 /* Adds a canonical induction variable to LOOP if suitable.
900 CREATE_IV is true if we may create a new iv. UL determines
901 which loops we are allowed to completely unroll. If TRY_EVAL is true, we try
902 to determine the number of iterations of a loop by direct evaluation.
903 Returns true if cfg is changed. */
906 canonicalize_loop_induction_variables (struct loop
*loop
,
907 bool create_iv
, enum unroll_level ul
,
912 HOST_WIDE_INT maxiter
;
913 bool modified
= false;
914 location_t locus
= UNKNOWN_LOCATION
;
916 niter
= number_of_latch_executions (loop
);
917 exit
= single_exit (loop
);
918 if (TREE_CODE (niter
) == INTEGER_CST
)
919 locus
= gimple_location (last_stmt (exit
->src
));
922 /* If the loop has more than one exit, try checking all of them
923 for # of iterations determinable through scev. */
925 niter
= find_loop_niter (loop
, &exit
);
927 /* Finally if everything else fails, try brute force evaluation. */
929 && (chrec_contains_undetermined (niter
)
930 || TREE_CODE (niter
) != INTEGER_CST
))
931 niter
= find_loop_niter_by_eval (loop
, &exit
);
934 locus
= gimple_location (last_stmt (exit
->src
));
936 if (TREE_CODE (niter
) != INTEGER_CST
)
940 /* We work exceptionally hard here to estimate the bound
941 by find_loop_niter_by_eval. Be sure to keep it for future. */
942 if (niter
&& TREE_CODE (niter
) == INTEGER_CST
)
944 record_niter_bound (loop
, tree_to_double_int (niter
),
945 exit
== single_likely_exit (loop
), true);
948 /* Force re-computation of loop bounds so we can remove redundant exits. */
949 maxiter
= max_loop_iterations_int (loop
);
951 if (dump_file
&& (dump_flags
& TDF_DETAILS
)
952 && TREE_CODE (niter
) == INTEGER_CST
)
954 fprintf (dump_file
, "Loop %d iterates ", loop
->num
);
955 print_generic_expr (dump_file
, niter
, TDF_SLIM
);
956 fprintf (dump_file
, " times.\n");
958 if (dump_file
&& (dump_flags
& TDF_DETAILS
)
961 fprintf (dump_file
, "Loop %d iterates at most %i times.\n", loop
->num
,
965 /* Remove exits that are known to be never taken based on loop bound.
966 Needs to be called after compilation of max_loop_iterations_int that
967 populates the loop bounds. */
968 modified
|= remove_redundant_iv_tests (loop
);
970 if (try_unroll_loop_completely (loop
, exit
, niter
, ul
, maxiter
, locus
))
974 && niter
&& !chrec_contains_undetermined (niter
)
975 && exit
&& just_once_each_iteration_p (loop
, exit
->src
))
976 create_canonical_iv (loop
, exit
, niter
);
981 /* The main entry point of the pass. Adds canonical induction variables
982 to the suitable loops. */
985 canonicalize_induction_variables (void)
989 bool changed
= false;
990 bool irred_invalidated
= false;
991 bitmap loop_closed_ssa_invalidated
= BITMAP_ALLOC (NULL
);
993 free_numbers_of_iterations_estimates ();
994 estimate_numbers_of_iterations ();
996 FOR_EACH_LOOP (li
, loop
, LI_FROM_INNERMOST
)
998 changed
|= canonicalize_loop_induction_variables (loop
,
999 true, UL_SINGLE_ITER
,
1002 gcc_assert (!need_ssa_update_p (cfun
));
1004 unloop_loops (loop_closed_ssa_invalidated
, &irred_invalidated
);
1005 if (irred_invalidated
1006 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS
))
1007 mark_irreducible_loops ();
1009 /* Clean up the information about numbers of iterations, since brute force
1010 evaluation could reveal new information. */
1013 if (!bitmap_empty_p (loop_closed_ssa_invalidated
))
1015 gcc_checking_assert (loops_state_satisfies_p (LOOP_CLOSED_SSA
));
1016 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa
);
1018 BITMAP_FREE (loop_closed_ssa_invalidated
);
1021 return TODO_cleanup_cfg
;
1025 /* Propagate VAL into all uses of SSA_NAME. */
1028 propagate_into_all_uses (tree ssa_name
, tree val
)
1030 imm_use_iterator iter
;
1033 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, ssa_name
)
1035 gimple_stmt_iterator use_stmt_gsi
= gsi_for_stmt (use_stmt
);
1038 FOR_EACH_IMM_USE_ON_STMT (use
, iter
)
1041 if (is_gimple_assign (use_stmt
)
1042 && get_gimple_rhs_class (gimple_assign_rhs_code (use_stmt
))
1043 == GIMPLE_SINGLE_RHS
)
1045 tree rhs
= gimple_assign_rhs1 (use_stmt
);
1047 if (TREE_CODE (rhs
) == ADDR_EXPR
)
1048 recompute_tree_invariant_for_addr_expr (rhs
);
1051 fold_stmt_inplace (&use_stmt_gsi
);
1052 update_stmt (use_stmt
);
1053 maybe_clean_or_replace_eh_stmt (use_stmt
, use_stmt
);
1057 /* Propagate constant SSA_NAMEs defined in basic block BB. */
1060 propagate_constants_for_unrolling (basic_block bb
)
1062 gimple_stmt_iterator gsi
;
1064 /* Look for degenerate PHI nodes with constant argument. */
1065 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); )
1067 gimple phi
= gsi_stmt (gsi
);
1068 tree result
= gimple_phi_result (phi
);
1069 tree arg
= gimple_phi_arg_def (phi
, 0);
1071 if (gimple_phi_num_args (phi
) == 1 && TREE_CODE (arg
) == INTEGER_CST
)
1073 propagate_into_all_uses (result
, arg
);
1074 gsi_remove (&gsi
, true);
1075 release_ssa_name (result
);
1081 /* Look for assignments to SSA names with constant RHS. */
1082 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); )
1084 gimple stmt
= gsi_stmt (gsi
);
1087 if (is_gimple_assign (stmt
)
1088 && gimple_assign_rhs_code (stmt
) == INTEGER_CST
1089 && (lhs
= gimple_assign_lhs (stmt
), TREE_CODE (lhs
) == SSA_NAME
)
1090 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
))
1092 propagate_into_all_uses (lhs
, gimple_assign_rhs1 (stmt
));
1093 gsi_remove (&gsi
, true);
1094 release_ssa_name (lhs
);
1101 /* Process loops from innermost to outer, stopping at the innermost
1102 loop we unrolled. */
1105 tree_unroll_loops_completely_1 (bool may_increase_size
, bool unroll_outer
,
1106 vec
<loop_p
, va_stack
>& father_stack
,
1109 struct loop
*loop_father
;
1110 bool changed
= false;
1112 enum unroll_level ul
;
1114 /* Process inner loops first. */
1115 for (inner
= loop
->inner
; inner
!= NULL
; inner
= inner
->next
)
1116 changed
|= tree_unroll_loops_completely_1 (may_increase_size
,
1117 unroll_outer
, father_stack
,
1120 /* If we changed an inner loop we cannot process outer loops in this
1121 iteration because SSA form is not up-to-date. Continue with
1122 siblings of outer loops instead. */
1126 /* Try to unroll this loop. */
1127 loop_father
= loop_outer (loop
);
1131 if (may_increase_size
&& optimize_loop_nest_for_speed_p (loop
)
1132 /* Unroll outermost loops only if asked to do so or they do
1133 not cause code growth. */
1134 && (unroll_outer
|| loop_outer (loop_father
)))
1139 if (canonicalize_loop_induction_variables
1140 (loop
, false, ul
, !flag_tree_loop_ivcanon
))
1142 /* If we'll continue unrolling, we need to propagate constants
1143 within the new basic blocks to fold away induction variable
1144 computations; otherwise, the size might blow up before the
1145 iteration is complete and the IR eventually cleaned up. */
1146 if (loop_outer (loop_father
) && !loop_father
->aux
)
1148 father_stack
.safe_push (loop_father
);
1149 loop_father
->aux
= loop_father
;
1158 /* Unroll LOOPS completely if they iterate just few times. Unless
1159 MAY_INCREASE_SIZE is true, perform the unrolling only if the
1160 size of the code does not increase. */
1163 tree_unroll_loops_completely (bool may_increase_size
, bool unroll_outer
)
1165 vec
<loop_p
, va_stack
> father_stack
;
1168 bool irred_invalidated
= false;
1170 vec_stack_alloc (loop_p
, father_stack
, 16);
1174 bitmap loop_closed_ssa_invalidated
= NULL
;
1176 if (loops_state_satisfies_p (LOOP_CLOSED_SSA
))
1177 loop_closed_ssa_invalidated
= BITMAP_ALLOC (NULL
);
1179 free_numbers_of_iterations_estimates ();
1180 estimate_numbers_of_iterations ();
1182 changed
= tree_unroll_loops_completely_1 (may_increase_size
,
1183 unroll_outer
, father_stack
,
1184 current_loops
->tree_root
);
1190 /* Be sure to skip unlooped loops while procesing father_stack
1192 FOR_EACH_VEC_ELT (loops_to_unloop
, i
, iter
)
1193 (*iter
)->aux
= NULL
;
1194 FOR_EACH_VEC_ELT (father_stack
, i
, iter
)
1197 unloop_loops (loop_closed_ssa_invalidated
, &irred_invalidated
);
1199 /* We can not use TODO_update_ssa_no_phi because VOPS gets confused. */
1200 if (loop_closed_ssa_invalidated
1201 && !bitmap_empty_p (loop_closed_ssa_invalidated
))
1202 rewrite_into_loop_closed_ssa (loop_closed_ssa_invalidated
,
1205 update_ssa (TODO_update_ssa
);
1207 /* Propagate the constants within the new basic blocks. */
1208 FOR_EACH_VEC_ELT (father_stack
, i
, iter
)
1212 basic_block
*body
= get_loop_body_in_dom_order (*iter
);
1213 for (j
= 0; j
< (*iter
)->num_nodes
; j
++)
1214 propagate_constants_for_unrolling (body
[j
]);
1216 (*iter
)->aux
= NULL
;
1218 father_stack
.truncate (0);
1220 /* This will take care of removing completely unrolled loops
1221 from the loop structures so we can continue unrolling now
1223 if (cleanup_tree_cfg ())
1224 update_ssa (TODO_update_ssa_only_virtuals
);
1226 /* Clean up the information about numbers of iterations, since
1227 complete unrolling might have invalidated it. */
1229 #ifdef ENABLE_CHECKING
1230 if (loops_state_satisfies_p (LOOP_CLOSED_SSA
))
1231 verify_loop_closed_ssa (true);
1234 if (loop_closed_ssa_invalidated
)
1235 BITMAP_FREE (loop_closed_ssa_invalidated
);
1238 && ++iteration
<= PARAM_VALUE (PARAM_MAX_UNROLL_ITERATIONS
));
1240 father_stack
.release ();
1242 if (irred_invalidated
1243 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS
))
1244 mark_irreducible_loops ();