1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2014 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "basic-block.h"
29 #include "gimple-pretty-print.h"
31 #include "pointer-set.h"
32 #include "tree-ssa-alias.h"
33 #include "internal-fn.h"
34 #include "gimple-expr.h"
38 #include "gimple-iterator.h"
39 #include "gimple-ssa.h"
41 #include "tree-phinodes.h"
42 #include "ssa-iterators.h"
43 #include "tree-ssa-loop-ivopts.h"
44 #include "tree-ssa-loop-niter.h"
45 #include "tree-ssa-loop.h"
48 #include "tree-chrec.h"
49 #include "tree-scalar-evolution.h"
50 #include "tree-data-ref.h"
53 #include "diagnostic-core.h"
54 #include "tree-inline.h"
55 #include "tree-pass.h"
56 #include "stringpool.h"
57 #include "tree-ssanames.h"
60 #define SWAP(X, Y) do { affine_iv *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
62 /* The maximum number of dominator BBs we search for conditions
63 of loop header copies we use for simplifying a conditional
65 #define MAX_DOMINATORS_TO_WALK 8
69 Analysis of number of iterations of an affine exit test.
73 /* Bounds on some value, BELOW <= X <= UP. */
81 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
84 split_to_var_and_offset (tree expr
, tree
*var
, mpz_t offset
)
86 tree type
= TREE_TYPE (expr
);
92 mpz_set_ui (offset
, 0);
94 switch (TREE_CODE (expr
))
101 case POINTER_PLUS_EXPR
:
102 op0
= TREE_OPERAND (expr
, 0);
103 op1
= TREE_OPERAND (expr
, 1);
105 if (TREE_CODE (op1
) != INTEGER_CST
)
109 /* Always sign extend the offset. */
110 off
= tree_to_double_int (op1
);
111 off
= off
.sext (TYPE_PRECISION (type
));
112 mpz_set_double_int (offset
, off
, false);
114 mpz_neg (offset
, offset
);
118 *var
= build_int_cst_type (type
, 0);
119 off
= tree_to_double_int (expr
);
120 mpz_set_double_int (offset
, off
, TYPE_UNSIGNED (type
));
128 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
129 in TYPE to MIN and MAX. */
132 determine_value_range (struct loop
*loop
, tree type
, tree var
, mpz_t off
,
133 mpz_t min
, mpz_t max
)
135 double_int minv
, maxv
;
136 enum value_range_type rtype
= VR_VARYING
;
138 /* If the expression is a constant, we know its value exactly. */
139 if (integer_zerop (var
))
146 get_type_static_bounds (type
, min
, max
);
148 /* See if we have some range info from VRP. */
149 if (TREE_CODE (var
) == SSA_NAME
&& INTEGRAL_TYPE_P (type
))
151 edge e
= loop_preheader_edge (loop
);
152 gimple_stmt_iterator gsi
;
154 /* Either for VAR itself... */
155 rtype
= get_range_info (var
, &minv
, &maxv
);
156 /* Or for PHI results in loop->header where VAR is used as
157 PHI argument from the loop preheader edge. */
158 for (gsi
= gsi_start_phis (loop
->header
); !gsi_end_p (gsi
); gsi_next (&gsi
))
160 gimple phi
= gsi_stmt (gsi
);
161 double_int minc
, maxc
;
162 if (PHI_ARG_DEF_FROM_EDGE (phi
, e
) == var
163 && (get_range_info (gimple_phi_result (phi
), &minc
, &maxc
)
166 if (rtype
!= VR_RANGE
)
174 minv
= minv
.max (minc
, TYPE_UNSIGNED (type
));
175 maxv
= maxv
.min (maxc
, TYPE_UNSIGNED (type
));
176 /* If the PHI result range are inconsistent with
177 the VAR range, give up on looking at the PHI
178 results. This can happen if VR_UNDEFINED is
180 if (minv
.cmp (maxv
, TYPE_UNSIGNED (type
)) > 0)
182 rtype
= get_range_info (var
, &minv
, &maxv
);
188 if (rtype
== VR_RANGE
)
191 gcc_assert (minv
.cmp (maxv
, TYPE_UNSIGNED (type
)) <= 0);
194 mpz_set_double_int (minm
, minv
, TYPE_UNSIGNED (type
));
195 mpz_set_double_int (maxm
, maxv
, TYPE_UNSIGNED (type
));
196 mpz_add (minm
, minm
, off
);
197 mpz_add (maxm
, maxm
, off
);
198 /* If the computation may not wrap or off is zero, then this
199 is always fine. If off is negative and minv + off isn't
200 smaller than type's minimum, or off is positive and
201 maxv + off isn't bigger than type's maximum, use the more
202 precise range too. */
203 if (nowrap_type_p (type
)
204 || mpz_sgn (off
) == 0
205 || (mpz_sgn (off
) < 0 && mpz_cmp (minm
, min
) >= 0)
206 || (mpz_sgn (off
) > 0 && mpz_cmp (maxm
, max
) <= 0))
219 /* If the computation may wrap, we know nothing about the value, except for
220 the range of the type. */
221 if (!nowrap_type_p (type
))
224 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
225 add it to MIN, otherwise to MAX. */
226 if (mpz_sgn (off
) < 0)
227 mpz_add (max
, max
, off
);
229 mpz_add (min
, min
, off
);
232 /* Stores the bounds on the difference of the values of the expressions
233 (var + X) and (var + Y), computed in TYPE, to BNDS. */
236 bound_difference_of_offsetted_base (tree type
, mpz_t x
, mpz_t y
,
239 int rel
= mpz_cmp (x
, y
);
240 bool may_wrap
= !nowrap_type_p (type
);
243 /* If X == Y, then the expressions are always equal.
244 If X > Y, there are the following possibilities:
245 a) neither of var + X and var + Y overflow or underflow, or both of
246 them do. Then their difference is X - Y.
247 b) var + X overflows, and var + Y does not. Then the values of the
248 expressions are var + X - M and var + Y, where M is the range of
249 the type, and their difference is X - Y - M.
250 c) var + Y underflows and var + X does not. Their difference again
252 Therefore, if the arithmetics in type does not overflow, then the
253 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
254 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
255 (X - Y, X - Y + M). */
259 mpz_set_ui (bnds
->below
, 0);
260 mpz_set_ui (bnds
->up
, 0);
265 mpz_set_double_int (m
, double_int::mask (TYPE_PRECISION (type
)), true);
266 mpz_add_ui (m
, m
, 1);
267 mpz_sub (bnds
->up
, x
, y
);
268 mpz_set (bnds
->below
, bnds
->up
);
273 mpz_sub (bnds
->below
, bnds
->below
, m
);
275 mpz_add (bnds
->up
, bnds
->up
, m
);
281 /* From condition C0 CMP C1 derives information regarding the
282 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
283 and stores it to BNDS. */
286 refine_bounds_using_guard (tree type
, tree varx
, mpz_t offx
,
287 tree vary
, mpz_t offy
,
288 tree c0
, enum tree_code cmp
, tree c1
,
291 tree varc0
, varc1
, tmp
, ctype
;
292 mpz_t offc0
, offc1
, loffx
, loffy
, bnd
;
294 bool no_wrap
= nowrap_type_p (type
);
303 STRIP_SIGN_NOPS (c0
);
304 STRIP_SIGN_NOPS (c1
);
305 ctype
= TREE_TYPE (c0
);
306 if (!useless_type_conversion_p (ctype
, type
))
312 /* We could derive quite precise information from EQ_EXPR, however, such
313 a guard is unlikely to appear, so we do not bother with handling
318 /* NE_EXPR comparisons do not contain much of useful information, except for
319 special case of comparing with the bounds of the type. */
320 if (TREE_CODE (c1
) != INTEGER_CST
321 || !INTEGRAL_TYPE_P (type
))
324 /* Ensure that the condition speaks about an expression in the same type
326 ctype
= TREE_TYPE (c0
);
327 if (TYPE_PRECISION (ctype
) != TYPE_PRECISION (type
))
329 c0
= fold_convert (type
, c0
);
330 c1
= fold_convert (type
, c1
);
332 if (TYPE_MIN_VALUE (type
)
333 && operand_equal_p (c1
, TYPE_MIN_VALUE (type
), 0))
338 if (TYPE_MAX_VALUE (type
)
339 && operand_equal_p (c1
, TYPE_MAX_VALUE (type
), 0))
352 split_to_var_and_offset (expand_simple_operations (c0
), &varc0
, offc0
);
353 split_to_var_and_offset (expand_simple_operations (c1
), &varc1
, offc1
);
355 /* We are only interested in comparisons of expressions based on VARX and
356 VARY. TODO -- we might also be able to derive some bounds from
357 expressions containing just one of the variables. */
359 if (operand_equal_p (varx
, varc1
, 0))
361 tmp
= varc0
; varc0
= varc1
; varc1
= tmp
;
362 mpz_swap (offc0
, offc1
);
363 cmp
= swap_tree_comparison (cmp
);
366 if (!operand_equal_p (varx
, varc0
, 0)
367 || !operand_equal_p (vary
, varc1
, 0))
370 mpz_init_set (loffx
, offx
);
371 mpz_init_set (loffy
, offy
);
373 if (cmp
== GT_EXPR
|| cmp
== GE_EXPR
)
375 tmp
= varx
; varx
= vary
; vary
= tmp
;
376 mpz_swap (offc0
, offc1
);
377 mpz_swap (loffx
, loffy
);
378 cmp
= swap_tree_comparison (cmp
);
382 /* If there is no overflow, the condition implies that
384 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
386 The overflows and underflows may complicate things a bit; each
387 overflow decreases the appropriate offset by M, and underflow
388 increases it by M. The above inequality would not necessarily be
391 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
392 VARX + OFFC0 overflows, but VARX + OFFX does not.
393 This may only happen if OFFX < OFFC0.
394 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
395 VARY + OFFC1 underflows and VARY + OFFY does not.
396 This may only happen if OFFY > OFFC1. */
405 x_ok
= (integer_zerop (varx
)
406 || mpz_cmp (loffx
, offc0
) >= 0);
407 y_ok
= (integer_zerop (vary
)
408 || mpz_cmp (loffy
, offc1
) <= 0);
414 mpz_sub (bnd
, loffx
, loffy
);
415 mpz_add (bnd
, bnd
, offc1
);
416 mpz_sub (bnd
, bnd
, offc0
);
419 mpz_sub_ui (bnd
, bnd
, 1);
424 if (mpz_cmp (bnds
->below
, bnd
) < 0)
425 mpz_set (bnds
->below
, bnd
);
429 if (mpz_cmp (bnd
, bnds
->up
) < 0)
430 mpz_set (bnds
->up
, bnd
);
442 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
443 The subtraction is considered to be performed in arbitrary precision,
446 We do not attempt to be too clever regarding the value ranges of X and
447 Y; most of the time, they are just integers or ssa names offsetted by
448 integer. However, we try to use the information contained in the
449 comparisons before the loop (usually created by loop header copying). */
452 bound_difference (struct loop
*loop
, tree x
, tree y
, bounds
*bnds
)
454 tree type
= TREE_TYPE (x
);
457 mpz_t minx
, maxx
, miny
, maxy
;
465 /* Get rid of unnecessary casts, but preserve the value of
470 mpz_init (bnds
->below
);
474 split_to_var_and_offset (x
, &varx
, offx
);
475 split_to_var_and_offset (y
, &vary
, offy
);
477 if (!integer_zerop (varx
)
478 && operand_equal_p (varx
, vary
, 0))
480 /* Special case VARX == VARY -- we just need to compare the
481 offsets. The matters are a bit more complicated in the
482 case addition of offsets may wrap. */
483 bound_difference_of_offsetted_base (type
, offx
, offy
, bnds
);
487 /* Otherwise, use the value ranges to determine the initial
488 estimates on below and up. */
493 determine_value_range (loop
, type
, varx
, offx
, minx
, maxx
);
494 determine_value_range (loop
, type
, vary
, offy
, miny
, maxy
);
496 mpz_sub (bnds
->below
, minx
, maxy
);
497 mpz_sub (bnds
->up
, maxx
, miny
);
504 /* If both X and Y are constants, we cannot get any more precise. */
505 if (integer_zerop (varx
) && integer_zerop (vary
))
508 /* Now walk the dominators of the loop header and use the entry
509 guards to refine the estimates. */
510 for (bb
= loop
->header
;
511 bb
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
) && cnt
< MAX_DOMINATORS_TO_WALK
;
512 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
))
514 if (!single_pred_p (bb
))
516 e
= single_pred_edge (bb
);
518 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
521 cond
= last_stmt (e
->src
);
522 c0
= gimple_cond_lhs (cond
);
523 cmp
= gimple_cond_code (cond
);
524 c1
= gimple_cond_rhs (cond
);
526 if (e
->flags
& EDGE_FALSE_VALUE
)
527 cmp
= invert_tree_comparison (cmp
, false);
529 refine_bounds_using_guard (type
, varx
, offx
, vary
, offy
,
539 /* Update the bounds in BNDS that restrict the value of X to the bounds
540 that restrict the value of X + DELTA. X can be obtained as a
541 difference of two values in TYPE. */
544 bounds_add (bounds
*bnds
, double_int delta
, tree type
)
549 mpz_set_double_int (mdelta
, delta
, false);
552 mpz_set_double_int (max
, double_int::mask (TYPE_PRECISION (type
)), true);
554 mpz_add (bnds
->up
, bnds
->up
, mdelta
);
555 mpz_add (bnds
->below
, bnds
->below
, mdelta
);
557 if (mpz_cmp (bnds
->up
, max
) > 0)
558 mpz_set (bnds
->up
, max
);
561 if (mpz_cmp (bnds
->below
, max
) < 0)
562 mpz_set (bnds
->below
, max
);
568 /* Update the bounds in BNDS that restrict the value of X to the bounds
569 that restrict the value of -X. */
572 bounds_negate (bounds
*bnds
)
576 mpz_init_set (tmp
, bnds
->up
);
577 mpz_neg (bnds
->up
, bnds
->below
);
578 mpz_neg (bnds
->below
, tmp
);
582 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
585 inverse (tree x
, tree mask
)
587 tree type
= TREE_TYPE (x
);
589 unsigned ctr
= tree_floor_log2 (mask
);
591 if (TYPE_PRECISION (type
) <= HOST_BITS_PER_WIDE_INT
)
593 unsigned HOST_WIDE_INT ix
;
594 unsigned HOST_WIDE_INT imask
;
595 unsigned HOST_WIDE_INT irslt
= 1;
597 gcc_assert (cst_and_fits_in_hwi (x
));
598 gcc_assert (cst_and_fits_in_hwi (mask
));
600 ix
= int_cst_value (x
);
601 imask
= int_cst_value (mask
);
610 rslt
= build_int_cst_type (type
, irslt
);
614 rslt
= build_int_cst (type
, 1);
617 rslt
= int_const_binop (MULT_EXPR
, rslt
, x
);
618 x
= int_const_binop (MULT_EXPR
, x
, x
);
620 rslt
= int_const_binop (BIT_AND_EXPR
, rslt
, mask
);
626 /* Derives the upper bound BND on the number of executions of loop with exit
627 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
628 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
629 that the loop ends through this exit, i.e., the induction variable ever
630 reaches the value of C.
632 The value C is equal to final - base, where final and base are the final and
633 initial value of the actual induction variable in the analysed loop. BNDS
634 bounds the value of this difference when computed in signed type with
635 unbounded range, while the computation of C is performed in an unsigned
636 type with the range matching the range of the type of the induction variable.
637 In particular, BNDS.up contains an upper bound on C in the following cases:
638 -- if the iv must reach its final value without overflow, i.e., if
639 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
640 -- if final >= base, which we know to hold when BNDS.below >= 0. */
643 number_of_iterations_ne_max (mpz_t bnd
, bool no_overflow
, tree c
, tree s
,
644 bounds
*bnds
, bool exit_must_be_taken
)
648 tree type
= TREE_TYPE (c
);
649 bool bnds_u_valid
= ((no_overflow
&& exit_must_be_taken
)
650 || mpz_sgn (bnds
->below
) >= 0);
653 || (TREE_CODE (c
) == INTEGER_CST
654 && TREE_CODE (s
) == INTEGER_CST
655 && tree_to_double_int (c
).mod (tree_to_double_int (s
),
656 TYPE_UNSIGNED (type
),
657 EXACT_DIV_EXPR
).is_zero ())
658 || (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (c
))
659 && multiple_of_p (type
, c
, s
)))
661 /* If C is an exact multiple of S, then its value will be reached before
662 the induction variable overflows (unless the loop is exited in some
663 other way before). Note that the actual induction variable in the
664 loop (which ranges from base to final instead of from 0 to C) may
665 overflow, in which case BNDS.up will not be giving a correct upper
666 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
668 exit_must_be_taken
= true;
671 /* If the induction variable can overflow, the number of iterations is at
672 most the period of the control variable (or infinite, but in that case
673 the whole # of iterations analysis will fail). */
676 max
= double_int::mask (TYPE_PRECISION (type
)
677 - tree_to_uhwi (num_ending_zeros (s
)));
678 mpz_set_double_int (bnd
, max
, true);
682 /* Now we know that the induction variable does not overflow, so the loop
683 iterates at most (range of type / S) times. */
684 mpz_set_double_int (bnd
, double_int::mask (TYPE_PRECISION (type
)), true);
686 /* If the induction variable is guaranteed to reach the value of C before
688 if (exit_must_be_taken
)
690 /* ... then we can strengthen this to C / S, and possibly we can use
691 the upper bound on C given by BNDS. */
692 if (TREE_CODE (c
) == INTEGER_CST
)
693 mpz_set_double_int (bnd
, tree_to_double_int (c
), true);
694 else if (bnds_u_valid
)
695 mpz_set (bnd
, bnds
->up
);
699 mpz_set_double_int (d
, tree_to_double_int (s
), true);
700 mpz_fdiv_q (bnd
, bnd
, d
);
704 /* Determines number of iterations of loop whose ending condition
705 is IV <> FINAL. TYPE is the type of the iv. The number of
706 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
707 we know that the exit must be taken eventually, i.e., that the IV
708 ever reaches the value FINAL (we derived this earlier, and possibly set
709 NITER->assumptions to make sure this is the case). BNDS contains the
710 bounds on the difference FINAL - IV->base. */
713 number_of_iterations_ne (tree type
, affine_iv
*iv
, tree final
,
714 struct tree_niter_desc
*niter
, bool exit_must_be_taken
,
717 tree niter_type
= unsigned_type_for (type
);
718 tree s
, c
, d
, bits
, assumption
, tmp
, bound
;
721 niter
->control
= *iv
;
722 niter
->bound
= final
;
723 niter
->cmp
= NE_EXPR
;
725 /* Rearrange the terms so that we get inequality S * i <> C, with S
726 positive. Also cast everything to the unsigned type. If IV does
727 not overflow, BNDS bounds the value of C. Also, this is the
728 case if the computation |FINAL - IV->base| does not overflow, i.e.,
729 if BNDS->below in the result is nonnegative. */
730 if (tree_int_cst_sign_bit (iv
->step
))
732 s
= fold_convert (niter_type
,
733 fold_build1 (NEGATE_EXPR
, type
, iv
->step
));
734 c
= fold_build2 (MINUS_EXPR
, niter_type
,
735 fold_convert (niter_type
, iv
->base
),
736 fold_convert (niter_type
, final
));
737 bounds_negate (bnds
);
741 s
= fold_convert (niter_type
, iv
->step
);
742 c
= fold_build2 (MINUS_EXPR
, niter_type
,
743 fold_convert (niter_type
, final
),
744 fold_convert (niter_type
, iv
->base
));
748 number_of_iterations_ne_max (max
, iv
->no_overflow
, c
, s
, bnds
,
750 niter
->max
= mpz_get_double_int (niter_type
, max
, false);
753 /* First the trivial cases -- when the step is 1. */
754 if (integer_onep (s
))
760 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
761 is infinite. Otherwise, the number of iterations is
762 (inverse(s/d) * (c/d)) mod (size of mode/d). */
763 bits
= num_ending_zeros (s
);
764 bound
= build_low_bits_mask (niter_type
,
765 (TYPE_PRECISION (niter_type
)
766 - tree_to_uhwi (bits
)));
768 d
= fold_binary_to_constant (LSHIFT_EXPR
, niter_type
,
769 build_int_cst (niter_type
, 1), bits
);
770 s
= fold_binary_to_constant (RSHIFT_EXPR
, niter_type
, s
, bits
);
772 if (!exit_must_be_taken
)
774 /* If we cannot assume that the exit is taken eventually, record the
775 assumptions for divisibility of c. */
776 assumption
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, c
, d
);
777 assumption
= fold_build2 (EQ_EXPR
, boolean_type_node
,
778 assumption
, build_int_cst (niter_type
, 0));
779 if (!integer_nonzerop (assumption
))
780 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
781 niter
->assumptions
, assumption
);
784 c
= fold_build2 (EXACT_DIV_EXPR
, niter_type
, c
, d
);
785 tmp
= fold_build2 (MULT_EXPR
, niter_type
, c
, inverse (s
, bound
));
786 niter
->niter
= fold_build2 (BIT_AND_EXPR
, niter_type
, tmp
, bound
);
790 /* Checks whether we can determine the final value of the control variable
791 of the loop with ending condition IV0 < IV1 (computed in TYPE).
792 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
793 of the step. The assumptions necessary to ensure that the computation
794 of the final value does not overflow are recorded in NITER. If we
795 find the final value, we adjust DELTA and return TRUE. Otherwise
796 we return false. BNDS bounds the value of IV1->base - IV0->base,
797 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
798 true if we know that the exit must be taken eventually. */
801 number_of_iterations_lt_to_ne (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
802 struct tree_niter_desc
*niter
,
803 tree
*delta
, tree step
,
804 bool exit_must_be_taken
, bounds
*bnds
)
806 tree niter_type
= TREE_TYPE (step
);
807 tree mod
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, *delta
, step
);
810 tree assumption
= boolean_true_node
, bound
, noloop
;
811 bool ret
= false, fv_comp_no_overflow
;
813 if (POINTER_TYPE_P (type
))
816 if (TREE_CODE (mod
) != INTEGER_CST
)
818 if (integer_nonzerop (mod
))
819 mod
= fold_build2 (MINUS_EXPR
, niter_type
, step
, mod
);
820 tmod
= fold_convert (type1
, mod
);
823 mpz_set_double_int (mmod
, tree_to_double_int (mod
), true);
824 mpz_neg (mmod
, mmod
);
826 /* If the induction variable does not overflow and the exit is taken,
827 then the computation of the final value does not overflow. This is
828 also obviously the case if the new final value is equal to the
829 current one. Finally, we postulate this for pointer type variables,
830 as the code cannot rely on the object to that the pointer points being
831 placed at the end of the address space (and more pragmatically,
832 TYPE_{MIN,MAX}_VALUE is not defined for pointers). */
833 if (integer_zerop (mod
) || POINTER_TYPE_P (type
))
834 fv_comp_no_overflow
= true;
835 else if (!exit_must_be_taken
)
836 fv_comp_no_overflow
= false;
838 fv_comp_no_overflow
=
839 (iv0
->no_overflow
&& integer_nonzerop (iv0
->step
))
840 || (iv1
->no_overflow
&& integer_nonzerop (iv1
->step
));
842 if (integer_nonzerop (iv0
->step
))
844 /* The final value of the iv is iv1->base + MOD, assuming that this
845 computation does not overflow, and that
846 iv0->base <= iv1->base + MOD. */
847 if (!fv_comp_no_overflow
)
849 bound
= fold_build2 (MINUS_EXPR
, type1
,
850 TYPE_MAX_VALUE (type1
), tmod
);
851 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
853 if (integer_zerop (assumption
))
856 if (mpz_cmp (mmod
, bnds
->below
) < 0)
857 noloop
= boolean_false_node
;
858 else if (POINTER_TYPE_P (type
))
859 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
861 fold_build_pointer_plus (iv1
->base
, tmod
));
863 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
865 fold_build2 (PLUS_EXPR
, type1
,
870 /* The final value of the iv is iv0->base - MOD, assuming that this
871 computation does not overflow, and that
872 iv0->base - MOD <= iv1->base. */
873 if (!fv_comp_no_overflow
)
875 bound
= fold_build2 (PLUS_EXPR
, type1
,
876 TYPE_MIN_VALUE (type1
), tmod
);
877 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
879 if (integer_zerop (assumption
))
882 if (mpz_cmp (mmod
, bnds
->below
) < 0)
883 noloop
= boolean_false_node
;
884 else if (POINTER_TYPE_P (type
))
885 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
886 fold_build_pointer_plus (iv0
->base
,
887 fold_build1 (NEGATE_EXPR
,
891 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
892 fold_build2 (MINUS_EXPR
, type1
,
897 if (!integer_nonzerop (assumption
))
898 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
901 if (!integer_zerop (noloop
))
902 niter
->may_be_zero
= fold_build2 (TRUTH_OR_EXPR
, boolean_type_node
,
905 bounds_add (bnds
, tree_to_double_int (mod
), type
);
906 *delta
= fold_build2 (PLUS_EXPR
, niter_type
, *delta
, mod
);
914 /* Add assertions to NITER that ensure that the control variable of the loop
915 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
916 are TYPE. Returns false if we can prove that there is an overflow, true
917 otherwise. STEP is the absolute value of the step. */
920 assert_no_overflow_lt (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
921 struct tree_niter_desc
*niter
, tree step
)
923 tree bound
, d
, assumption
, diff
;
924 tree niter_type
= TREE_TYPE (step
);
926 if (integer_nonzerop (iv0
->step
))
928 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
929 if (iv0
->no_overflow
)
932 /* If iv0->base is a constant, we can determine the last value before
933 overflow precisely; otherwise we conservatively assume
936 if (TREE_CODE (iv0
->base
) == INTEGER_CST
)
938 d
= fold_build2 (MINUS_EXPR
, niter_type
,
939 fold_convert (niter_type
, TYPE_MAX_VALUE (type
)),
940 fold_convert (niter_type
, iv0
->base
));
941 diff
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, d
, step
);
944 diff
= fold_build2 (MINUS_EXPR
, niter_type
, step
,
945 build_int_cst (niter_type
, 1));
946 bound
= fold_build2 (MINUS_EXPR
, type
,
947 TYPE_MAX_VALUE (type
), fold_convert (type
, diff
));
948 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
953 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
954 if (iv1
->no_overflow
)
957 if (TREE_CODE (iv1
->base
) == INTEGER_CST
)
959 d
= fold_build2 (MINUS_EXPR
, niter_type
,
960 fold_convert (niter_type
, iv1
->base
),
961 fold_convert (niter_type
, TYPE_MIN_VALUE (type
)));
962 diff
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, d
, step
);
965 diff
= fold_build2 (MINUS_EXPR
, niter_type
, step
,
966 build_int_cst (niter_type
, 1));
967 bound
= fold_build2 (PLUS_EXPR
, type
,
968 TYPE_MIN_VALUE (type
), fold_convert (type
, diff
));
969 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
973 if (integer_zerop (assumption
))
975 if (!integer_nonzerop (assumption
))
976 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
977 niter
->assumptions
, assumption
);
979 iv0
->no_overflow
= true;
980 iv1
->no_overflow
= true;
984 /* Add an assumption to NITER that a loop whose ending condition
985 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
986 bounds the value of IV1->base - IV0->base. */
989 assert_loop_rolls_lt (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
990 struct tree_niter_desc
*niter
, bounds
*bnds
)
992 tree assumption
= boolean_true_node
, bound
, diff
;
993 tree mbz
, mbzl
, mbzr
, type1
;
994 bool rolls_p
, no_overflow_p
;
998 /* We are going to compute the number of iterations as
999 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
1000 variant of TYPE. This formula only works if
1002 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
1004 (where MAX is the maximum value of the unsigned variant of TYPE, and
1005 the computations in this formula are performed in full precision,
1006 i.e., without overflows).
1008 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1009 we have a condition of the form iv0->base - step < iv1->base before the loop,
1010 and for loops iv0->base < iv1->base - step * i the condition
1011 iv0->base < iv1->base + step, due to loop header copying, which enable us
1012 to prove the lower bound.
1014 The upper bound is more complicated. Unless the expressions for initial
1015 and final value themselves contain enough information, we usually cannot
1016 derive it from the context. */
1018 /* First check whether the answer does not follow from the bounds we gathered
1020 if (integer_nonzerop (iv0
->step
))
1021 dstep
= tree_to_double_int (iv0
->step
);
1024 dstep
= tree_to_double_int (iv1
->step
).sext (TYPE_PRECISION (type
));
1029 mpz_set_double_int (mstep
, dstep
, true);
1030 mpz_neg (mstep
, mstep
);
1031 mpz_add_ui (mstep
, mstep
, 1);
1033 rolls_p
= mpz_cmp (mstep
, bnds
->below
) <= 0;
1036 mpz_set_double_int (max
, double_int::mask (TYPE_PRECISION (type
)), true);
1037 mpz_add (max
, max
, mstep
);
1038 no_overflow_p
= (mpz_cmp (bnds
->up
, max
) <= 0
1039 /* For pointers, only values lying inside a single object
1040 can be compared or manipulated by pointer arithmetics.
1041 Gcc in general does not allow or handle objects larger
1042 than half of the address space, hence the upper bound
1043 is satisfied for pointers. */
1044 || POINTER_TYPE_P (type
));
1048 if (rolls_p
&& no_overflow_p
)
1052 if (POINTER_TYPE_P (type
))
1055 /* Now the hard part; we must formulate the assumption(s) as expressions, and
1056 we must be careful not to introduce overflow. */
1058 if (integer_nonzerop (iv0
->step
))
1060 diff
= fold_build2 (MINUS_EXPR
, type1
,
1061 iv0
->step
, build_int_cst (type1
, 1));
1063 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
1064 0 address never belongs to any object, we can assume this for
1066 if (!POINTER_TYPE_P (type
))
1068 bound
= fold_build2 (PLUS_EXPR
, type1
,
1069 TYPE_MIN_VALUE (type
), diff
);
1070 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
1074 /* And then we can compute iv0->base - diff, and compare it with
1076 mbzl
= fold_build2 (MINUS_EXPR
, type1
,
1077 fold_convert (type1
, iv0
->base
), diff
);
1078 mbzr
= fold_convert (type1
, iv1
->base
);
1082 diff
= fold_build2 (PLUS_EXPR
, type1
,
1083 iv1
->step
, build_int_cst (type1
, 1));
1085 if (!POINTER_TYPE_P (type
))
1087 bound
= fold_build2 (PLUS_EXPR
, type1
,
1088 TYPE_MAX_VALUE (type
), diff
);
1089 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
1093 mbzl
= fold_convert (type1
, iv0
->base
);
1094 mbzr
= fold_build2 (MINUS_EXPR
, type1
,
1095 fold_convert (type1
, iv1
->base
), diff
);
1098 if (!integer_nonzerop (assumption
))
1099 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1100 niter
->assumptions
, assumption
);
1103 mbz
= fold_build2 (GT_EXPR
, boolean_type_node
, mbzl
, mbzr
);
1104 niter
->may_be_zero
= fold_build2 (TRUTH_OR_EXPR
, boolean_type_node
,
1105 niter
->may_be_zero
, mbz
);
1109 /* Determines number of iterations of loop whose ending condition
1110 is IV0 < IV1. TYPE is the type of the iv. The number of
1111 iterations is stored to NITER. BNDS bounds the difference
1112 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1113 that the exit must be taken eventually. */
1116 number_of_iterations_lt (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
1117 struct tree_niter_desc
*niter
,
1118 bool exit_must_be_taken
, bounds
*bnds
)
1120 tree niter_type
= unsigned_type_for (type
);
1121 tree delta
, step
, s
;
1124 if (integer_nonzerop (iv0
->step
))
1126 niter
->control
= *iv0
;
1127 niter
->cmp
= LT_EXPR
;
1128 niter
->bound
= iv1
->base
;
1132 niter
->control
= *iv1
;
1133 niter
->cmp
= GT_EXPR
;
1134 niter
->bound
= iv0
->base
;
1137 delta
= fold_build2 (MINUS_EXPR
, niter_type
,
1138 fold_convert (niter_type
, iv1
->base
),
1139 fold_convert (niter_type
, iv0
->base
));
1141 /* First handle the special case that the step is +-1. */
1142 if ((integer_onep (iv0
->step
) && integer_zerop (iv1
->step
))
1143 || (integer_all_onesp (iv1
->step
) && integer_zerop (iv0
->step
)))
1145 /* for (i = iv0->base; i < iv1->base; i++)
1149 for (i = iv1->base; i > iv0->base; i--).
1151 In both cases # of iterations is iv1->base - iv0->base, assuming that
1152 iv1->base >= iv0->base.
1154 First try to derive a lower bound on the value of
1155 iv1->base - iv0->base, computed in full precision. If the difference
1156 is nonnegative, we are done, otherwise we must record the
1159 if (mpz_sgn (bnds
->below
) < 0)
1160 niter
->may_be_zero
= fold_build2 (LT_EXPR
, boolean_type_node
,
1161 iv1
->base
, iv0
->base
);
1162 niter
->niter
= delta
;
1163 niter
->max
= mpz_get_double_int (niter_type
, bnds
->up
, false);
1167 if (integer_nonzerop (iv0
->step
))
1168 step
= fold_convert (niter_type
, iv0
->step
);
1170 step
= fold_convert (niter_type
,
1171 fold_build1 (NEGATE_EXPR
, type
, iv1
->step
));
1173 /* If we can determine the final value of the control iv exactly, we can
1174 transform the condition to != comparison. In particular, this will be
1175 the case if DELTA is constant. */
1176 if (number_of_iterations_lt_to_ne (type
, iv0
, iv1
, niter
, &delta
, step
,
1177 exit_must_be_taken
, bnds
))
1181 zps
.base
= build_int_cst (niter_type
, 0);
1183 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1184 zps does not overflow. */
1185 zps
.no_overflow
= true;
1187 return number_of_iterations_ne (type
, &zps
, delta
, niter
, true, bnds
);
1190 /* Make sure that the control iv does not overflow. */
1191 if (!assert_no_overflow_lt (type
, iv0
, iv1
, niter
, step
))
1194 /* We determine the number of iterations as (delta + step - 1) / step. For
1195 this to work, we must know that iv1->base >= iv0->base - step + 1,
1196 otherwise the loop does not roll. */
1197 assert_loop_rolls_lt (type
, iv0
, iv1
, niter
, bnds
);
1199 s
= fold_build2 (MINUS_EXPR
, niter_type
,
1200 step
, build_int_cst (niter_type
, 1));
1201 delta
= fold_build2 (PLUS_EXPR
, niter_type
, delta
, s
);
1202 niter
->niter
= fold_build2 (FLOOR_DIV_EXPR
, niter_type
, delta
, step
);
1206 mpz_set_double_int (mstep
, tree_to_double_int (step
), true);
1207 mpz_add (tmp
, bnds
->up
, mstep
);
1208 mpz_sub_ui (tmp
, tmp
, 1);
1209 mpz_fdiv_q (tmp
, tmp
, mstep
);
1210 niter
->max
= mpz_get_double_int (niter_type
, tmp
, false);
1217 /* Determines number of iterations of loop whose ending condition
1218 is IV0 <= IV1. TYPE is the type of the iv. The number of
1219 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1220 we know that this condition must eventually become false (we derived this
1221 earlier, and possibly set NITER->assumptions to make sure this
1222 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1225 number_of_iterations_le (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
1226 struct tree_niter_desc
*niter
, bool exit_must_be_taken
,
1231 if (POINTER_TYPE_P (type
))
1234 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1235 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1236 value of the type. This we must know anyway, since if it is
1237 equal to this value, the loop rolls forever. We do not check
1238 this condition for pointer type ivs, as the code cannot rely on
1239 the object to that the pointer points being placed at the end of
1240 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1241 not defined for pointers). */
1243 if (!exit_must_be_taken
&& !POINTER_TYPE_P (type
))
1245 if (integer_nonzerop (iv0
->step
))
1246 assumption
= fold_build2 (NE_EXPR
, boolean_type_node
,
1247 iv1
->base
, TYPE_MAX_VALUE (type
));
1249 assumption
= fold_build2 (NE_EXPR
, boolean_type_node
,
1250 iv0
->base
, TYPE_MIN_VALUE (type
));
1252 if (integer_zerop (assumption
))
1254 if (!integer_nonzerop (assumption
))
1255 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1256 niter
->assumptions
, assumption
);
1259 if (integer_nonzerop (iv0
->step
))
1261 if (POINTER_TYPE_P (type
))
1262 iv1
->base
= fold_build_pointer_plus_hwi (iv1
->base
, 1);
1264 iv1
->base
= fold_build2 (PLUS_EXPR
, type1
, iv1
->base
,
1265 build_int_cst (type1
, 1));
1267 else if (POINTER_TYPE_P (type
))
1268 iv0
->base
= fold_build_pointer_plus_hwi (iv0
->base
, -1);
1270 iv0
->base
= fold_build2 (MINUS_EXPR
, type1
,
1271 iv0
->base
, build_int_cst (type1
, 1));
1273 bounds_add (bnds
, double_int_one
, type1
);
1275 return number_of_iterations_lt (type
, iv0
, iv1
, niter
, exit_must_be_taken
,
1279 /* Dumps description of affine induction variable IV to FILE. */
1282 dump_affine_iv (FILE *file
, affine_iv
*iv
)
1284 if (!integer_zerop (iv
->step
))
1285 fprintf (file
, "[");
1287 print_generic_expr (dump_file
, iv
->base
, TDF_SLIM
);
1289 if (!integer_zerop (iv
->step
))
1291 fprintf (file
, ", + , ");
1292 print_generic_expr (dump_file
, iv
->step
, TDF_SLIM
);
1293 fprintf (file
, "]%s", iv
->no_overflow
? "(no_overflow)" : "");
1297 /* Determine the number of iterations according to condition (for staying
1298 inside loop) which compares two induction variables using comparison
1299 operator CODE. The induction variable on left side of the comparison
1300 is IV0, the right-hand side is IV1. Both induction variables must have
1301 type TYPE, which must be an integer or pointer type. The steps of the
1302 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1304 LOOP is the loop whose number of iterations we are determining.
1306 ONLY_EXIT is true if we are sure this is the only way the loop could be
1307 exited (including possibly non-returning function calls, exceptions, etc.)
1308 -- in this case we can use the information whether the control induction
1309 variables can overflow or not in a more efficient way.
1311 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1313 The results (number of iterations and assumptions as described in
1314 comments at struct tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1315 Returns false if it fails to determine number of iterations, true if it
1316 was determined (possibly with some assumptions). */
1319 number_of_iterations_cond (struct loop
*loop
,
1320 tree type
, affine_iv
*iv0
, enum tree_code code
,
1321 affine_iv
*iv1
, struct tree_niter_desc
*niter
,
1322 bool only_exit
, bool every_iteration
)
1324 bool exit_must_be_taken
= false, ret
;
1327 /* If the test is not executed every iteration, wrapping may make the test
1329 TODO: the overflow case can be still used as unreliable estimate of upper
1330 bound. But we have no API to pass it down to number of iterations code
1331 and, at present, it will not use it anyway. */
1332 if (!every_iteration
1333 && (!iv0
->no_overflow
|| !iv1
->no_overflow
1334 || code
== NE_EXPR
|| code
== EQ_EXPR
))
1337 /* The meaning of these assumptions is this:
1339 then the rest of information does not have to be valid
1340 if may_be_zero then the loop does not roll, even if
1342 niter
->assumptions
= boolean_true_node
;
1343 niter
->may_be_zero
= boolean_false_node
;
1344 niter
->niter
= NULL_TREE
;
1345 niter
->max
= double_int_zero
;
1347 niter
->bound
= NULL_TREE
;
1348 niter
->cmp
= ERROR_MARK
;
1350 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1351 the control variable is on lhs. */
1352 if (code
== GE_EXPR
|| code
== GT_EXPR
1353 || (code
== NE_EXPR
&& integer_zerop (iv0
->step
)))
1356 code
= swap_tree_comparison (code
);
1359 if (POINTER_TYPE_P (type
))
1361 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1362 to the same object. If they do, the control variable cannot wrap
1363 (as wrap around the bounds of memory will never return a pointer
1364 that would be guaranteed to point to the same object, even if we
1365 avoid undefined behavior by casting to size_t and back). */
1366 iv0
->no_overflow
= true;
1367 iv1
->no_overflow
= true;
1370 /* If the control induction variable does not overflow and the only exit
1371 from the loop is the one that we analyze, we know it must be taken
1375 if (!integer_zerop (iv0
->step
) && iv0
->no_overflow
)
1376 exit_must_be_taken
= true;
1377 else if (!integer_zerop (iv1
->step
) && iv1
->no_overflow
)
1378 exit_must_be_taken
= true;
1381 /* We can handle the case when neither of the sides of the comparison is
1382 invariant, provided that the test is NE_EXPR. This rarely occurs in
1383 practice, but it is simple enough to manage. */
1384 if (!integer_zerop (iv0
->step
) && !integer_zerop (iv1
->step
))
1386 tree step_type
= POINTER_TYPE_P (type
) ? sizetype
: type
;
1387 if (code
!= NE_EXPR
)
1390 iv0
->step
= fold_binary_to_constant (MINUS_EXPR
, step_type
,
1391 iv0
->step
, iv1
->step
);
1392 iv0
->no_overflow
= false;
1393 iv1
->step
= build_int_cst (step_type
, 0);
1394 iv1
->no_overflow
= true;
1397 /* If the result of the comparison is a constant, the loop is weird. More
1398 precise handling would be possible, but the situation is not common enough
1399 to waste time on it. */
1400 if (integer_zerop (iv0
->step
) && integer_zerop (iv1
->step
))
1403 /* Ignore loops of while (i-- < 10) type. */
1404 if (code
!= NE_EXPR
)
1406 if (iv0
->step
&& tree_int_cst_sign_bit (iv0
->step
))
1409 if (!integer_zerop (iv1
->step
) && !tree_int_cst_sign_bit (iv1
->step
))
1413 /* If the loop exits immediately, there is nothing to do. */
1414 tree tem
= fold_binary (code
, boolean_type_node
, iv0
->base
, iv1
->base
);
1415 if (tem
&& integer_zerop (tem
))
1417 niter
->niter
= build_int_cst (unsigned_type_for (type
), 0);
1418 niter
->max
= double_int_zero
;
1422 /* OK, now we know we have a senseful loop. Handle several cases, depending
1423 on what comparison operator is used. */
1424 bound_difference (loop
, iv1
->base
, iv0
->base
, &bnds
);
1426 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1429 "Analyzing # of iterations of loop %d\n", loop
->num
);
1431 fprintf (dump_file
, " exit condition ");
1432 dump_affine_iv (dump_file
, iv0
);
1433 fprintf (dump_file
, " %s ",
1434 code
== NE_EXPR
? "!="
1435 : code
== LT_EXPR
? "<"
1437 dump_affine_iv (dump_file
, iv1
);
1438 fprintf (dump_file
, "\n");
1440 fprintf (dump_file
, " bounds on difference of bases: ");
1441 mpz_out_str (dump_file
, 10, bnds
.below
);
1442 fprintf (dump_file
, " ... ");
1443 mpz_out_str (dump_file
, 10, bnds
.up
);
1444 fprintf (dump_file
, "\n");
1450 gcc_assert (integer_zerop (iv1
->step
));
1451 ret
= number_of_iterations_ne (type
, iv0
, iv1
->base
, niter
,
1452 exit_must_be_taken
, &bnds
);
1456 ret
= number_of_iterations_lt (type
, iv0
, iv1
, niter
, exit_must_be_taken
,
1461 ret
= number_of_iterations_le (type
, iv0
, iv1
, niter
, exit_must_be_taken
,
1469 mpz_clear (bnds
.up
);
1470 mpz_clear (bnds
.below
);
1472 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1476 fprintf (dump_file
, " result:\n");
1477 if (!integer_nonzerop (niter
->assumptions
))
1479 fprintf (dump_file
, " under assumptions ");
1480 print_generic_expr (dump_file
, niter
->assumptions
, TDF_SLIM
);
1481 fprintf (dump_file
, "\n");
1484 if (!integer_zerop (niter
->may_be_zero
))
1486 fprintf (dump_file
, " zero if ");
1487 print_generic_expr (dump_file
, niter
->may_be_zero
, TDF_SLIM
);
1488 fprintf (dump_file
, "\n");
1491 fprintf (dump_file
, " # of iterations ");
1492 print_generic_expr (dump_file
, niter
->niter
, TDF_SLIM
);
1493 fprintf (dump_file
, ", bounded by ");
1494 dump_double_int (dump_file
, niter
->max
, true);
1495 fprintf (dump_file
, "\n");
1498 fprintf (dump_file
, " failed\n\n");
1503 /* Substitute NEW for OLD in EXPR and fold the result. */
1506 simplify_replace_tree (tree expr
, tree old
, tree new_tree
)
1509 tree ret
= NULL_TREE
, e
, se
;
1514 /* Do not bother to replace constants. */
1515 if (CONSTANT_CLASS_P (old
))
1519 || operand_equal_p (expr
, old
, 0))
1520 return unshare_expr (new_tree
);
1525 n
= TREE_OPERAND_LENGTH (expr
);
1526 for (i
= 0; i
< n
; i
++)
1528 e
= TREE_OPERAND (expr
, i
);
1529 se
= simplify_replace_tree (e
, old
, new_tree
);
1534 ret
= copy_node (expr
);
1536 TREE_OPERAND (ret
, i
) = se
;
1539 return (ret
? fold (ret
) : expr
);
1542 /* Expand definitions of ssa names in EXPR as long as they are simple
1543 enough, and return the new expression. */
1546 expand_simple_operations (tree expr
)
1549 tree ret
= NULL_TREE
, e
, ee
, e1
;
1550 enum tree_code code
;
1553 if (expr
== NULL_TREE
)
1556 if (is_gimple_min_invariant (expr
))
1559 code
= TREE_CODE (expr
);
1560 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code
)))
1562 n
= TREE_OPERAND_LENGTH (expr
);
1563 for (i
= 0; i
< n
; i
++)
1565 e
= TREE_OPERAND (expr
, i
);
1566 ee
= expand_simple_operations (e
);
1571 ret
= copy_node (expr
);
1573 TREE_OPERAND (ret
, i
) = ee
;
1579 fold_defer_overflow_warnings ();
1581 fold_undefer_and_ignore_overflow_warnings ();
1585 if (TREE_CODE (expr
) != SSA_NAME
)
1588 stmt
= SSA_NAME_DEF_STMT (expr
);
1589 if (gimple_code (stmt
) == GIMPLE_PHI
)
1591 basic_block src
, dest
;
1593 if (gimple_phi_num_args (stmt
) != 1)
1595 e
= PHI_ARG_DEF (stmt
, 0);
1597 /* Avoid propagating through loop exit phi nodes, which
1598 could break loop-closed SSA form restrictions. */
1599 dest
= gimple_bb (stmt
);
1600 src
= single_pred (dest
);
1601 if (TREE_CODE (e
) == SSA_NAME
1602 && src
->loop_father
!= dest
->loop_father
)
1605 return expand_simple_operations (e
);
1607 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1610 /* Avoid expanding to expressions that contain SSA names that need
1611 to take part in abnormal coalescing. */
1613 FOR_EACH_SSA_TREE_OPERAND (e
, stmt
, iter
, SSA_OP_USE
)
1614 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e
))
1617 e
= gimple_assign_rhs1 (stmt
);
1618 code
= gimple_assign_rhs_code (stmt
);
1619 if (get_gimple_rhs_class (code
) == GIMPLE_SINGLE_RHS
)
1621 if (is_gimple_min_invariant (e
))
1624 if (code
== SSA_NAME
)
1625 return expand_simple_operations (e
);
1633 /* Casts are simple. */
1634 ee
= expand_simple_operations (e
);
1635 return fold_build1 (code
, TREE_TYPE (expr
), ee
);
1639 case POINTER_PLUS_EXPR
:
1640 /* And increments and decrements by a constant are simple. */
1641 e1
= gimple_assign_rhs2 (stmt
);
1642 if (!is_gimple_min_invariant (e1
))
1645 ee
= expand_simple_operations (e
);
1646 return fold_build2 (code
, TREE_TYPE (expr
), ee
, e1
);
1653 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1654 expression (or EXPR unchanged, if no simplification was possible). */
1657 tree_simplify_using_condition_1 (tree cond
, tree expr
)
1660 tree e
, te
, e0
, e1
, e2
, notcond
;
1661 enum tree_code code
= TREE_CODE (expr
);
1663 if (code
== INTEGER_CST
)
1666 if (code
== TRUTH_OR_EXPR
1667 || code
== TRUTH_AND_EXPR
1668 || code
== COND_EXPR
)
1672 e0
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 0));
1673 if (TREE_OPERAND (expr
, 0) != e0
)
1676 e1
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 1));
1677 if (TREE_OPERAND (expr
, 1) != e1
)
1680 if (code
== COND_EXPR
)
1682 e2
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 2));
1683 if (TREE_OPERAND (expr
, 2) != e2
)
1691 if (code
== COND_EXPR
)
1692 expr
= fold_build3 (code
, boolean_type_node
, e0
, e1
, e2
);
1694 expr
= fold_build2 (code
, boolean_type_node
, e0
, e1
);
1700 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
1701 propagation, and vice versa. Fold does not handle this, since it is
1702 considered too expensive. */
1703 if (TREE_CODE (cond
) == EQ_EXPR
)
1705 e0
= TREE_OPERAND (cond
, 0);
1706 e1
= TREE_OPERAND (cond
, 1);
1708 /* We know that e0 == e1. Check whether we cannot simplify expr
1710 e
= simplify_replace_tree (expr
, e0
, e1
);
1711 if (integer_zerop (e
) || integer_nonzerop (e
))
1714 e
= simplify_replace_tree (expr
, e1
, e0
);
1715 if (integer_zerop (e
) || integer_nonzerop (e
))
1718 if (TREE_CODE (expr
) == EQ_EXPR
)
1720 e0
= TREE_OPERAND (expr
, 0);
1721 e1
= TREE_OPERAND (expr
, 1);
1723 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
1724 e
= simplify_replace_tree (cond
, e0
, e1
);
1725 if (integer_zerop (e
))
1727 e
= simplify_replace_tree (cond
, e1
, e0
);
1728 if (integer_zerop (e
))
1731 if (TREE_CODE (expr
) == NE_EXPR
)
1733 e0
= TREE_OPERAND (expr
, 0);
1734 e1
= TREE_OPERAND (expr
, 1);
1736 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
1737 e
= simplify_replace_tree (cond
, e0
, e1
);
1738 if (integer_zerop (e
))
1739 return boolean_true_node
;
1740 e
= simplify_replace_tree (cond
, e1
, e0
);
1741 if (integer_zerop (e
))
1742 return boolean_true_node
;
1745 te
= expand_simple_operations (expr
);
1747 /* Check whether COND ==> EXPR. */
1748 notcond
= invert_truthvalue (cond
);
1749 e
= fold_binary (TRUTH_OR_EXPR
, boolean_type_node
, notcond
, te
);
1750 if (e
&& integer_nonzerop (e
))
1753 /* Check whether COND ==> not EXPR. */
1754 e
= fold_binary (TRUTH_AND_EXPR
, boolean_type_node
, cond
, te
);
1755 if (e
&& integer_zerop (e
))
1761 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1762 expression (or EXPR unchanged, if no simplification was possible).
1763 Wrapper around tree_simplify_using_condition_1 that ensures that chains
1764 of simple operations in definitions of ssa names in COND are expanded,
1765 so that things like casts or incrementing the value of the bound before
1766 the loop do not cause us to fail. */
1769 tree_simplify_using_condition (tree cond
, tree expr
)
1771 cond
= expand_simple_operations (cond
);
1773 return tree_simplify_using_condition_1 (cond
, expr
);
1776 /* Tries to simplify EXPR using the conditions on entry to LOOP.
1777 Returns the simplified expression (or EXPR unchanged, if no
1778 simplification was possible).*/
1781 simplify_using_initial_conditions (struct loop
*loop
, tree expr
)
1789 if (TREE_CODE (expr
) == INTEGER_CST
)
1792 /* Limit walking the dominators to avoid quadraticness in
1793 the number of BBs times the number of loops in degenerate
1795 for (bb
= loop
->header
;
1796 bb
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
) && cnt
< MAX_DOMINATORS_TO_WALK
;
1797 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
))
1799 if (!single_pred_p (bb
))
1801 e
= single_pred_edge (bb
);
1803 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
1806 stmt
= last_stmt (e
->src
);
1807 cond
= fold_build2 (gimple_cond_code (stmt
),
1809 gimple_cond_lhs (stmt
),
1810 gimple_cond_rhs (stmt
));
1811 if (e
->flags
& EDGE_FALSE_VALUE
)
1812 cond
= invert_truthvalue (cond
);
1813 expr
= tree_simplify_using_condition (cond
, expr
);
1820 /* Tries to simplify EXPR using the evolutions of the loop invariants
1821 in the superloops of LOOP. Returns the simplified expression
1822 (or EXPR unchanged, if no simplification was possible). */
1825 simplify_using_outer_evolutions (struct loop
*loop
, tree expr
)
1827 enum tree_code code
= TREE_CODE (expr
);
1831 if (is_gimple_min_invariant (expr
))
1834 if (code
== TRUTH_OR_EXPR
1835 || code
== TRUTH_AND_EXPR
1836 || code
== COND_EXPR
)
1840 e0
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 0));
1841 if (TREE_OPERAND (expr
, 0) != e0
)
1844 e1
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 1));
1845 if (TREE_OPERAND (expr
, 1) != e1
)
1848 if (code
== COND_EXPR
)
1850 e2
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 2));
1851 if (TREE_OPERAND (expr
, 2) != e2
)
1859 if (code
== COND_EXPR
)
1860 expr
= fold_build3 (code
, boolean_type_node
, e0
, e1
, e2
);
1862 expr
= fold_build2 (code
, boolean_type_node
, e0
, e1
);
1868 e
= instantiate_parameters (loop
, expr
);
1869 if (is_gimple_min_invariant (e
))
1875 /* Returns true if EXIT is the only possible exit from LOOP. */
1878 loop_only_exit_p (const struct loop
*loop
, const_edge exit
)
1881 gimple_stmt_iterator bsi
;
1885 if (exit
!= single_exit (loop
))
1888 body
= get_loop_body (loop
);
1889 for (i
= 0; i
< loop
->num_nodes
; i
++)
1891 for (bsi
= gsi_start_bb (body
[i
]); !gsi_end_p (bsi
); gsi_next (&bsi
))
1893 call
= gsi_stmt (bsi
);
1894 if (gimple_code (call
) != GIMPLE_CALL
)
1897 if (gimple_has_side_effects (call
))
1909 /* Stores description of number of iterations of LOOP derived from
1910 EXIT (an exit edge of the LOOP) in NITER. Returns true if some
1911 useful information could be derived (and fields of NITER has
1912 meaning described in comments at struct tree_niter_desc
1913 declaration), false otherwise. If WARN is true and
1914 -Wunsafe-loop-optimizations was given, warn if the optimizer is going to use
1915 potentially unsafe assumptions.
1916 When EVERY_ITERATION is true, only tests that are known to be executed
1917 every iteration are considered (i.e. only test that alone bounds the loop).
1921 number_of_iterations_exit (struct loop
*loop
, edge exit
,
1922 struct tree_niter_desc
*niter
,
1923 bool warn
, bool every_iteration
)
1928 enum tree_code code
;
1932 safe
= dominated_by_p (CDI_DOMINATORS
, loop
->latch
, exit
->src
);
1934 if (every_iteration
&& !safe
)
1937 niter
->assumptions
= boolean_false_node
;
1938 stmt
= last_stmt (exit
->src
);
1939 if (!stmt
|| gimple_code (stmt
) != GIMPLE_COND
)
1942 /* We want the condition for staying inside loop. */
1943 code
= gimple_cond_code (stmt
);
1944 if (exit
->flags
& EDGE_TRUE_VALUE
)
1945 code
= invert_tree_comparison (code
, false);
1960 op0
= gimple_cond_lhs (stmt
);
1961 op1
= gimple_cond_rhs (stmt
);
1962 type
= TREE_TYPE (op0
);
1964 if (TREE_CODE (type
) != INTEGER_TYPE
1965 && !POINTER_TYPE_P (type
))
1968 if (!simple_iv (loop
, loop_containing_stmt (stmt
), op0
, &iv0
, false))
1970 if (!simple_iv (loop
, loop_containing_stmt (stmt
), op1
, &iv1
, false))
1973 /* We don't want to see undefined signed overflow warnings while
1974 computing the number of iterations. */
1975 fold_defer_overflow_warnings ();
1977 iv0
.base
= expand_simple_operations (iv0
.base
);
1978 iv1
.base
= expand_simple_operations (iv1
.base
);
1979 if (!number_of_iterations_cond (loop
, type
, &iv0
, code
, &iv1
, niter
,
1980 loop_only_exit_p (loop
, exit
), safe
))
1982 fold_undefer_and_ignore_overflow_warnings ();
1988 niter
->assumptions
= simplify_using_outer_evolutions (loop
,
1989 niter
->assumptions
);
1990 niter
->may_be_zero
= simplify_using_outer_evolutions (loop
,
1991 niter
->may_be_zero
);
1992 niter
->niter
= simplify_using_outer_evolutions (loop
, niter
->niter
);
1996 = simplify_using_initial_conditions (loop
,
1997 niter
->assumptions
);
1999 = simplify_using_initial_conditions (loop
,
2000 niter
->may_be_zero
);
2002 fold_undefer_and_ignore_overflow_warnings ();
2004 /* If NITER has simplified into a constant, update MAX. */
2005 if (TREE_CODE (niter
->niter
) == INTEGER_CST
)
2006 niter
->max
= tree_to_double_int (niter
->niter
);
2008 if (integer_onep (niter
->assumptions
))
2011 /* With -funsafe-loop-optimizations we assume that nothing bad can happen.
2012 But if we can prove that there is overflow or some other source of weird
2013 behavior, ignore the loop even with -funsafe-loop-optimizations. */
2014 if (integer_zerop (niter
->assumptions
) || !single_exit (loop
))
2017 if (flag_unsafe_loop_optimizations
)
2018 niter
->assumptions
= boolean_true_node
;
2022 const char *wording
;
2023 location_t loc
= gimple_location (stmt
);
2025 /* We can provide a more specific warning if one of the operator is
2026 constant and the other advances by +1 or -1. */
2027 if (!integer_zerop (iv1
.step
)
2028 ? (integer_zerop (iv0
.step
)
2029 && (integer_onep (iv1
.step
) || integer_all_onesp (iv1
.step
)))
2030 : (integer_onep (iv0
.step
) || integer_all_onesp (iv0
.step
)))
2032 flag_unsafe_loop_optimizations
2033 ? N_("assuming that the loop is not infinite")
2034 : N_("cannot optimize possibly infinite loops");
2037 flag_unsafe_loop_optimizations
2038 ? N_("assuming that the loop counter does not overflow")
2039 : N_("cannot optimize loop, the loop counter may overflow");
2041 warning_at ((LOCATION_LINE (loc
) > 0) ? loc
: input_location
,
2042 OPT_Wunsafe_loop_optimizations
, "%s", gettext (wording
));
2045 return flag_unsafe_loop_optimizations
;
2048 /* Try to determine the number of iterations of LOOP. If we succeed,
2049 expression giving number of iterations is returned and *EXIT is
2050 set to the edge from that the information is obtained. Otherwise
2051 chrec_dont_know is returned. */
2054 find_loop_niter (struct loop
*loop
, edge
*exit
)
2057 vec
<edge
> exits
= get_loop_exit_edges (loop
);
2059 tree niter
= NULL_TREE
, aniter
;
2060 struct tree_niter_desc desc
;
2063 FOR_EACH_VEC_ELT (exits
, i
, ex
)
2065 if (!number_of_iterations_exit (loop
, ex
, &desc
, false))
2068 if (integer_nonzerop (desc
.may_be_zero
))
2070 /* We exit in the first iteration through this exit.
2071 We won't find anything better. */
2072 niter
= build_int_cst (unsigned_type_node
, 0);
2077 if (!integer_zerop (desc
.may_be_zero
))
2080 aniter
= desc
.niter
;
2084 /* Nothing recorded yet. */
2090 /* Prefer constants, the lower the better. */
2091 if (TREE_CODE (aniter
) != INTEGER_CST
)
2094 if (TREE_CODE (niter
) != INTEGER_CST
)
2101 if (tree_int_cst_lt (aniter
, niter
))
2110 return niter
? niter
: chrec_dont_know
;
2113 /* Return true if loop is known to have bounded number of iterations. */
2116 finite_loop_p (struct loop
*loop
)
2121 if (flag_unsafe_loop_optimizations
)
2123 flags
= flags_from_decl_or_type (current_function_decl
);
2124 if ((flags
& (ECF_CONST
|ECF_PURE
)) && !(flags
& ECF_LOOPING_CONST_OR_PURE
))
2126 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2127 fprintf (dump_file
, "Found loop %i to be finite: it is within pure or const function.\n",
2132 if (loop
->any_upper_bound
2133 || max_loop_iterations (loop
, &nit
))
2135 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2136 fprintf (dump_file
, "Found loop %i to be finite: upper bound found.\n",
2145 Analysis of a number of iterations of a loop by a brute-force evaluation.
2149 /* Bound on the number of iterations we try to evaluate. */
2151 #define MAX_ITERATIONS_TO_TRACK \
2152 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
2154 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2155 result by a chain of operations such that all but exactly one of their
2156 operands are constants. */
2159 chain_of_csts_start (struct loop
*loop
, tree x
)
2161 gimple stmt
= SSA_NAME_DEF_STMT (x
);
2163 basic_block bb
= gimple_bb (stmt
);
2164 enum tree_code code
;
2167 || !flow_bb_inside_loop_p (loop
, bb
))
2170 if (gimple_code (stmt
) == GIMPLE_PHI
)
2172 if (bb
== loop
->header
)
2178 if (gimple_code (stmt
) != GIMPLE_ASSIGN
2179 || gimple_assign_rhs_class (stmt
) == GIMPLE_TERNARY_RHS
)
2182 code
= gimple_assign_rhs_code (stmt
);
2183 if (gimple_references_memory_p (stmt
)
2184 || TREE_CODE_CLASS (code
) == tcc_reference
2185 || (code
== ADDR_EXPR
2186 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt
))))
2189 use
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_USE
);
2190 if (use
== NULL_TREE
)
2193 return chain_of_csts_start (loop
, use
);
2196 /* Determines whether the expression X is derived from a result of a phi node
2197 in header of LOOP such that
2199 * the derivation of X consists only from operations with constants
2200 * the initial value of the phi node is constant
2201 * the value of the phi node in the next iteration can be derived from the
2202 value in the current iteration by a chain of operations with constants.
2204 If such phi node exists, it is returned, otherwise NULL is returned. */
2207 get_base_for (struct loop
*loop
, tree x
)
2212 if (is_gimple_min_invariant (x
))
2215 phi
= chain_of_csts_start (loop
, x
);
2219 init
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
2220 next
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
2222 if (TREE_CODE (next
) != SSA_NAME
)
2225 if (!is_gimple_min_invariant (init
))
2228 if (chain_of_csts_start (loop
, next
) != phi
)
2234 /* Given an expression X, then
2236 * if X is NULL_TREE, we return the constant BASE.
2237 * otherwise X is a SSA name, whose value in the considered loop is derived
2238 by a chain of operations with constant from a result of a phi node in
2239 the header of the loop. Then we return value of X when the value of the
2240 result of this phi node is given by the constant BASE. */
2243 get_val_for (tree x
, tree base
)
2247 gcc_checking_assert (is_gimple_min_invariant (base
));
2252 stmt
= SSA_NAME_DEF_STMT (x
);
2253 if (gimple_code (stmt
) == GIMPLE_PHI
)
2256 gcc_checking_assert (is_gimple_assign (stmt
));
2258 /* STMT must be either an assignment of a single SSA name or an
2259 expression involving an SSA name and a constant. Try to fold that
2260 expression using the value for the SSA name. */
2261 if (gimple_assign_ssa_name_copy_p (stmt
))
2262 return get_val_for (gimple_assign_rhs1 (stmt
), base
);
2263 else if (gimple_assign_rhs_class (stmt
) == GIMPLE_UNARY_RHS
2264 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
)
2266 return fold_build1 (gimple_assign_rhs_code (stmt
),
2267 gimple_expr_type (stmt
),
2268 get_val_for (gimple_assign_rhs1 (stmt
), base
));
2270 else if (gimple_assign_rhs_class (stmt
) == GIMPLE_BINARY_RHS
)
2272 tree rhs1
= gimple_assign_rhs1 (stmt
);
2273 tree rhs2
= gimple_assign_rhs2 (stmt
);
2274 if (TREE_CODE (rhs1
) == SSA_NAME
)
2275 rhs1
= get_val_for (rhs1
, base
);
2276 else if (TREE_CODE (rhs2
) == SSA_NAME
)
2277 rhs2
= get_val_for (rhs2
, base
);
2280 return fold_build2 (gimple_assign_rhs_code (stmt
),
2281 gimple_expr_type (stmt
), rhs1
, rhs2
);
2288 /* Tries to count the number of iterations of LOOP till it exits by EXIT
2289 by brute force -- i.e. by determining the value of the operands of the
2290 condition at EXIT in first few iterations of the loop (assuming that
2291 these values are constant) and determining the first one in that the
2292 condition is not satisfied. Returns the constant giving the number
2293 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
2296 loop_niter_by_eval (struct loop
*loop
, edge exit
)
2299 tree op
[2], val
[2], next
[2], aval
[2];
2304 cond
= last_stmt (exit
->src
);
2305 if (!cond
|| gimple_code (cond
) != GIMPLE_COND
)
2306 return chrec_dont_know
;
2308 cmp
= gimple_cond_code (cond
);
2309 if (exit
->flags
& EDGE_TRUE_VALUE
)
2310 cmp
= invert_tree_comparison (cmp
, false);
2320 op
[0] = gimple_cond_lhs (cond
);
2321 op
[1] = gimple_cond_rhs (cond
);
2325 return chrec_dont_know
;
2328 for (j
= 0; j
< 2; j
++)
2330 if (is_gimple_min_invariant (op
[j
]))
2333 next
[j
] = NULL_TREE
;
2338 phi
= get_base_for (loop
, op
[j
]);
2340 return chrec_dont_know
;
2341 val
[j
] = PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
2342 next
[j
] = PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
2346 /* Don't issue signed overflow warnings. */
2347 fold_defer_overflow_warnings ();
2349 for (i
= 0; i
< MAX_ITERATIONS_TO_TRACK
; i
++)
2351 for (j
= 0; j
< 2; j
++)
2352 aval
[j
] = get_val_for (op
[j
], val
[j
]);
2354 acnd
= fold_binary (cmp
, boolean_type_node
, aval
[0], aval
[1]);
2355 if (acnd
&& integer_zerop (acnd
))
2357 fold_undefer_and_ignore_overflow_warnings ();
2358 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2360 "Proved that loop %d iterates %d times using brute force.\n",
2362 return build_int_cst (unsigned_type_node
, i
);
2365 for (j
= 0; j
< 2; j
++)
2367 val
[j
] = get_val_for (next
[j
], val
[j
]);
2368 if (!is_gimple_min_invariant (val
[j
]))
2370 fold_undefer_and_ignore_overflow_warnings ();
2371 return chrec_dont_know
;
2376 fold_undefer_and_ignore_overflow_warnings ();
2378 return chrec_dont_know
;
2381 /* Finds the exit of the LOOP by that the loop exits after a constant
2382 number of iterations and stores the exit edge to *EXIT. The constant
2383 giving the number of iterations of LOOP is returned. The number of
2384 iterations is determined using loop_niter_by_eval (i.e. by brute force
2385 evaluation). If we are unable to find the exit for that loop_niter_by_eval
2386 determines the number of iterations, chrec_dont_know is returned. */
2389 find_loop_niter_by_eval (struct loop
*loop
, edge
*exit
)
2392 vec
<edge
> exits
= get_loop_exit_edges (loop
);
2394 tree niter
= NULL_TREE
, aniter
;
2398 /* Loops with multiple exits are expensive to handle and less important. */
2399 if (!flag_expensive_optimizations
2400 && exits
.length () > 1)
2403 return chrec_dont_know
;
2406 FOR_EACH_VEC_ELT (exits
, i
, ex
)
2408 if (!just_once_each_iteration_p (loop
, ex
->src
))
2411 aniter
= loop_niter_by_eval (loop
, ex
);
2412 if (chrec_contains_undetermined (aniter
))
2416 && !tree_int_cst_lt (aniter
, niter
))
2424 return niter
? niter
: chrec_dont_know
;
2429 Analysis of upper bounds on number of iterations of a loop.
2433 static double_int
derive_constant_upper_bound_ops (tree
, tree
,
2434 enum tree_code
, tree
);
2436 /* Returns a constant upper bound on the value of the right-hand side of
2437 an assignment statement STMT. */
2440 derive_constant_upper_bound_assign (gimple stmt
)
2442 enum tree_code code
= gimple_assign_rhs_code (stmt
);
2443 tree op0
= gimple_assign_rhs1 (stmt
);
2444 tree op1
= gimple_assign_rhs2 (stmt
);
2446 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt
)),
2450 /* Returns a constant upper bound on the value of expression VAL. VAL
2451 is considered to be unsigned. If its type is signed, its value must
2455 derive_constant_upper_bound (tree val
)
2457 enum tree_code code
;
2460 extract_ops_from_tree (val
, &code
, &op0
, &op1
);
2461 return derive_constant_upper_bound_ops (TREE_TYPE (val
), op0
, code
, op1
);
2464 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
2465 whose type is TYPE. The expression is considered to be unsigned. If
2466 its type is signed, its value must be nonnegative. */
2469 derive_constant_upper_bound_ops (tree type
, tree op0
,
2470 enum tree_code code
, tree op1
)
2473 double_int bnd
, max
, mmax
, cst
;
2476 if (INTEGRAL_TYPE_P (type
))
2477 maxt
= TYPE_MAX_VALUE (type
);
2479 maxt
= upper_bound_in_type (type
, type
);
2481 max
= tree_to_double_int (maxt
);
2486 return tree_to_double_int (op0
);
2489 subtype
= TREE_TYPE (op0
);
2490 if (!TYPE_UNSIGNED (subtype
)
2491 /* If TYPE is also signed, the fact that VAL is nonnegative implies
2492 that OP0 is nonnegative. */
2493 && TYPE_UNSIGNED (type
)
2494 && !tree_expr_nonnegative_p (op0
))
2496 /* If we cannot prove that the casted expression is nonnegative,
2497 we cannot establish more useful upper bound than the precision
2498 of the type gives us. */
2502 /* We now know that op0 is an nonnegative value. Try deriving an upper
2504 bnd
= derive_constant_upper_bound (op0
);
2506 /* If the bound does not fit in TYPE, max. value of TYPE could be
2514 case POINTER_PLUS_EXPR
:
2516 if (TREE_CODE (op1
) != INTEGER_CST
2517 || !tree_expr_nonnegative_p (op0
))
2520 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
2521 choose the most logical way how to treat this constant regardless
2522 of the signedness of the type. */
2523 cst
= tree_to_double_int (op1
);
2524 cst
= cst
.sext (TYPE_PRECISION (type
));
2525 if (code
!= MINUS_EXPR
)
2528 bnd
= derive_constant_upper_bound (op0
);
2530 if (cst
.is_negative ())
2533 /* Avoid CST == 0x80000... */
2534 if (cst
.is_negative ())
2537 /* OP0 + CST. We need to check that
2538 BND <= MAX (type) - CST. */
2548 /* OP0 - CST, where CST >= 0.
2550 If TYPE is signed, we have already verified that OP0 >= 0, and we
2551 know that the result is nonnegative. This implies that
2554 If TYPE is unsigned, we must additionally know that OP0 >= CST,
2555 otherwise the operation underflows.
2558 /* This should only happen if the type is unsigned; however, for
2559 buggy programs that use overflowing signed arithmetics even with
2560 -fno-wrapv, this condition may also be true for signed values. */
2564 if (TYPE_UNSIGNED (type
))
2566 tree tem
= fold_binary (GE_EXPR
, boolean_type_node
, op0
,
2567 double_int_to_tree (type
, cst
));
2568 if (!tem
|| integer_nonzerop (tem
))
2577 case FLOOR_DIV_EXPR
:
2578 case EXACT_DIV_EXPR
:
2579 if (TREE_CODE (op1
) != INTEGER_CST
2580 || tree_int_cst_sign_bit (op1
))
2583 bnd
= derive_constant_upper_bound (op0
);
2584 return bnd
.udiv (tree_to_double_int (op1
), FLOOR_DIV_EXPR
);
2587 if (TREE_CODE (op1
) != INTEGER_CST
2588 || tree_int_cst_sign_bit (op1
))
2590 return tree_to_double_int (op1
);
2593 stmt
= SSA_NAME_DEF_STMT (op0
);
2594 if (gimple_code (stmt
) != GIMPLE_ASSIGN
2595 || gimple_assign_lhs (stmt
) != op0
)
2597 return derive_constant_upper_bound_assign (stmt
);
2604 /* Emit a -Waggressive-loop-optimizations warning if needed. */
2607 do_warn_aggressive_loop_optimizations (struct loop
*loop
,
2608 double_int i_bound
, gimple stmt
)
2610 /* Don't warn if the loop doesn't have known constant bound. */
2611 if (!loop
->nb_iterations
2612 || TREE_CODE (loop
->nb_iterations
) != INTEGER_CST
2613 || !warn_aggressive_loop_optimizations
2614 /* To avoid warning multiple times for the same loop,
2615 only start warning when we preserve loops. */
2616 || (cfun
->curr_properties
& PROP_loops
) == 0
2617 /* Only warn once per loop. */
2618 || loop
->warned_aggressive_loop_optimizations
2619 /* Only warn if undefined behavior gives us lower estimate than the
2620 known constant bound. */
2621 || i_bound
.ucmp (tree_to_double_int (loop
->nb_iterations
)) >= 0
2622 /* And undefined behavior happens unconditionally. */
2623 || !dominated_by_p (CDI_DOMINATORS
, loop
->latch
, gimple_bb (stmt
)))
2626 edge e
= single_exit (loop
);
2630 gimple estmt
= last_stmt (e
->src
);
2631 if (warning_at (gimple_location (stmt
), OPT_Waggressive_loop_optimizations
,
2632 "iteration %E invokes undefined behavior",
2633 double_int_to_tree (TREE_TYPE (loop
->nb_iterations
),
2635 inform (gimple_location (estmt
), "containing loop");
2636 loop
->warned_aggressive_loop_optimizations
= true;
2639 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
2640 is true if the loop is exited immediately after STMT, and this exit
2641 is taken at last when the STMT is executed BOUND + 1 times.
2642 REALISTIC is true if BOUND is expected to be close to the real number
2643 of iterations. UPPER is true if we are sure the loop iterates at most
2644 BOUND times. I_BOUND is an unsigned double_int upper estimate on BOUND. */
2647 record_estimate (struct loop
*loop
, tree bound
, double_int i_bound
,
2648 gimple at_stmt
, bool is_exit
, bool realistic
, bool upper
)
2652 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2654 fprintf (dump_file
, "Statement %s", is_exit
? "(exit)" : "");
2655 print_gimple_stmt (dump_file
, at_stmt
, 0, TDF_SLIM
);
2656 fprintf (dump_file
, " is %sexecuted at most ",
2657 upper
? "" : "probably ");
2658 print_generic_expr (dump_file
, bound
, TDF_SLIM
);
2659 fprintf (dump_file
, " (bounded by ");
2660 dump_double_int (dump_file
, i_bound
, true);
2661 fprintf (dump_file
, ") + 1 times in loop %d.\n", loop
->num
);
2664 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
2665 real number of iterations. */
2666 if (TREE_CODE (bound
) != INTEGER_CST
)
2669 gcc_checking_assert (i_bound
== tree_to_double_int (bound
));
2670 if (!upper
&& !realistic
)
2673 /* If we have a guaranteed upper bound, record it in the appropriate
2674 list, unless this is an !is_exit bound (i.e. undefined behavior in
2675 at_stmt) in a loop with known constant number of iterations. */
2678 || loop
->nb_iterations
== NULL_TREE
2679 || TREE_CODE (loop
->nb_iterations
) != INTEGER_CST
))
2681 struct nb_iter_bound
*elt
= ggc_alloc_nb_iter_bound ();
2683 elt
->bound
= i_bound
;
2684 elt
->stmt
= at_stmt
;
2685 elt
->is_exit
= is_exit
;
2686 elt
->next
= loop
->bounds
;
2690 /* If statement is executed on every path to the loop latch, we can directly
2691 infer the upper bound on the # of iterations of the loop. */
2692 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, gimple_bb (at_stmt
)))
2695 /* Update the number of iteration estimates according to the bound.
2696 If at_stmt is an exit then the loop latch is executed at most BOUND times,
2697 otherwise it can be executed BOUND + 1 times. We will lower the estimate
2698 later if such statement must be executed on last iteration */
2700 delta
= double_int_zero
;
2702 delta
= double_int_one
;
2705 /* If an overflow occurred, ignore the result. */
2706 if (i_bound
.ult (delta
))
2709 if (upper
&& !is_exit
)
2710 do_warn_aggressive_loop_optimizations (loop
, i_bound
, at_stmt
);
2711 record_niter_bound (loop
, i_bound
, realistic
, upper
);
2714 /* Record the estimate on number of iterations of LOOP based on the fact that
2715 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
2716 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
2717 estimated number of iterations is expected to be close to the real one.
2718 UPPER is true if we are sure the induction variable does not wrap. */
2721 record_nonwrapping_iv (struct loop
*loop
, tree base
, tree step
, gimple stmt
,
2722 tree low
, tree high
, bool realistic
, bool upper
)
2724 tree niter_bound
, extreme
, delta
;
2725 tree type
= TREE_TYPE (base
), unsigned_type
;
2728 if (TREE_CODE (step
) != INTEGER_CST
|| integer_zerop (step
))
2731 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2733 fprintf (dump_file
, "Induction variable (");
2734 print_generic_expr (dump_file
, TREE_TYPE (base
), TDF_SLIM
);
2735 fprintf (dump_file
, ") ");
2736 print_generic_expr (dump_file
, base
, TDF_SLIM
);
2737 fprintf (dump_file
, " + ");
2738 print_generic_expr (dump_file
, step
, TDF_SLIM
);
2739 fprintf (dump_file
, " * iteration does not wrap in statement ");
2740 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
2741 fprintf (dump_file
, " in loop %d.\n", loop
->num
);
2744 unsigned_type
= unsigned_type_for (type
);
2745 base
= fold_convert (unsigned_type
, base
);
2746 step
= fold_convert (unsigned_type
, step
);
2748 if (tree_int_cst_sign_bit (step
))
2750 extreme
= fold_convert (unsigned_type
, low
);
2751 if (TREE_CODE (base
) != INTEGER_CST
)
2752 base
= fold_convert (unsigned_type
, high
);
2753 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, base
, extreme
);
2754 step
= fold_build1 (NEGATE_EXPR
, unsigned_type
, step
);
2758 extreme
= fold_convert (unsigned_type
, high
);
2759 if (TREE_CODE (base
) != INTEGER_CST
)
2760 base
= fold_convert (unsigned_type
, low
);
2761 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, extreme
, base
);
2764 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
2765 would get out of the range. */
2766 niter_bound
= fold_build2 (FLOOR_DIV_EXPR
, unsigned_type
, delta
, step
);
2767 max
= derive_constant_upper_bound (niter_bound
);
2768 record_estimate (loop
, niter_bound
, max
, stmt
, false, realistic
, upper
);
2771 /* Determine information about number of iterations a LOOP from the index
2772 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
2773 guaranteed to be executed in every iteration of LOOP. Callback for
2783 idx_infer_loop_bounds (tree base
, tree
*idx
, void *dta
)
2785 struct ilb_data
*data
= (struct ilb_data
*) dta
;
2786 tree ev
, init
, step
;
2787 tree low
, high
, type
, next
;
2788 bool sign
, upper
= true, at_end
= false;
2789 struct loop
*loop
= data
->loop
;
2790 bool reliable
= true;
2792 if (TREE_CODE (base
) != ARRAY_REF
)
2795 /* For arrays at the end of the structure, we are not guaranteed that they
2796 do not really extend over their declared size. However, for arrays of
2797 size greater than one, this is unlikely to be intended. */
2798 if (array_at_struct_end_p (base
))
2804 struct loop
*dloop
= loop_containing_stmt (data
->stmt
);
2808 ev
= analyze_scalar_evolution (dloop
, *idx
);
2809 ev
= instantiate_parameters (loop
, ev
);
2810 init
= initial_condition (ev
);
2811 step
= evolution_part_in_loop_num (ev
, loop
->num
);
2815 || TREE_CODE (step
) != INTEGER_CST
2816 || integer_zerop (step
)
2817 || tree_contains_chrecs (init
, NULL
)
2818 || chrec_contains_symbols_defined_in_loop (init
, loop
->num
))
2821 low
= array_ref_low_bound (base
);
2822 high
= array_ref_up_bound (base
);
2824 /* The case of nonconstant bounds could be handled, but it would be
2826 if (TREE_CODE (low
) != INTEGER_CST
2828 || TREE_CODE (high
) != INTEGER_CST
)
2830 sign
= tree_int_cst_sign_bit (step
);
2831 type
= TREE_TYPE (step
);
2833 /* The array of length 1 at the end of a structure most likely extends
2834 beyond its bounds. */
2836 && operand_equal_p (low
, high
, 0))
2839 /* In case the relevant bound of the array does not fit in type, or
2840 it does, but bound + step (in type) still belongs into the range of the
2841 array, the index may wrap and still stay within the range of the array
2842 (consider e.g. if the array is indexed by the full range of
2845 To make things simpler, we require both bounds to fit into type, although
2846 there are cases where this would not be strictly necessary. */
2847 if (!int_fits_type_p (high
, type
)
2848 || !int_fits_type_p (low
, type
))
2850 low
= fold_convert (type
, low
);
2851 high
= fold_convert (type
, high
);
2854 next
= fold_binary (PLUS_EXPR
, type
, low
, step
);
2856 next
= fold_binary (PLUS_EXPR
, type
, high
, step
);
2858 if (tree_int_cst_compare (low
, next
) <= 0
2859 && tree_int_cst_compare (next
, high
) <= 0)
2862 /* If access is not executed on every iteration, we must ensure that overlow may
2863 not make the access valid later. */
2864 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, gimple_bb (data
->stmt
))
2865 && scev_probably_wraps_p (initial_condition_in_loop_num (ev
, loop
->num
),
2866 step
, data
->stmt
, loop
, true))
2869 record_nonwrapping_iv (loop
, init
, step
, data
->stmt
, low
, high
, reliable
, upper
);
2873 /* Determine information about number of iterations a LOOP from the bounds
2874 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
2875 STMT is guaranteed to be executed in every iteration of LOOP.*/
2878 infer_loop_bounds_from_ref (struct loop
*loop
, gimple stmt
, tree ref
)
2880 struct ilb_data data
;
2884 for_each_index (&ref
, idx_infer_loop_bounds
, &data
);
2887 /* Determine information about number of iterations of a LOOP from the way
2888 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
2889 executed in every iteration of LOOP. */
2892 infer_loop_bounds_from_array (struct loop
*loop
, gimple stmt
)
2894 if (is_gimple_assign (stmt
))
2896 tree op0
= gimple_assign_lhs (stmt
);
2897 tree op1
= gimple_assign_rhs1 (stmt
);
2899 /* For each memory access, analyze its access function
2900 and record a bound on the loop iteration domain. */
2901 if (REFERENCE_CLASS_P (op0
))
2902 infer_loop_bounds_from_ref (loop
, stmt
, op0
);
2904 if (REFERENCE_CLASS_P (op1
))
2905 infer_loop_bounds_from_ref (loop
, stmt
, op1
);
2907 else if (is_gimple_call (stmt
))
2910 unsigned i
, n
= gimple_call_num_args (stmt
);
2912 lhs
= gimple_call_lhs (stmt
);
2913 if (lhs
&& REFERENCE_CLASS_P (lhs
))
2914 infer_loop_bounds_from_ref (loop
, stmt
, lhs
);
2916 for (i
= 0; i
< n
; i
++)
2918 arg
= gimple_call_arg (stmt
, i
);
2919 if (REFERENCE_CLASS_P (arg
))
2920 infer_loop_bounds_from_ref (loop
, stmt
, arg
);
2925 /* Determine information about number of iterations of a LOOP from the fact
2926 that pointer arithmetics in STMT does not overflow. */
2929 infer_loop_bounds_from_pointer_arith (struct loop
*loop
, gimple stmt
)
2931 tree def
, base
, step
, scev
, type
, low
, high
;
2934 if (!is_gimple_assign (stmt
)
2935 || gimple_assign_rhs_code (stmt
) != POINTER_PLUS_EXPR
)
2938 def
= gimple_assign_lhs (stmt
);
2939 if (TREE_CODE (def
) != SSA_NAME
)
2942 type
= TREE_TYPE (def
);
2943 if (!nowrap_type_p (type
))
2946 ptr
= gimple_assign_rhs1 (stmt
);
2947 if (!expr_invariant_in_loop_p (loop
, ptr
))
2950 var
= gimple_assign_rhs2 (stmt
);
2951 if (TYPE_PRECISION (type
) != TYPE_PRECISION (TREE_TYPE (var
)))
2954 scev
= instantiate_parameters (loop
, analyze_scalar_evolution (loop
, def
));
2955 if (chrec_contains_undetermined (scev
))
2958 base
= initial_condition_in_loop_num (scev
, loop
->num
);
2959 step
= evolution_part_in_loop_num (scev
, loop
->num
);
2962 || TREE_CODE (step
) != INTEGER_CST
2963 || tree_contains_chrecs (base
, NULL
)
2964 || chrec_contains_symbols_defined_in_loop (base
, loop
->num
))
2967 low
= lower_bound_in_type (type
, type
);
2968 high
= upper_bound_in_type (type
, type
);
2970 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
2971 produce a NULL pointer. The contrary would mean NULL points to an object,
2972 while NULL is supposed to compare unequal with the address of all objects.
2973 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
2974 NULL pointer since that would mean wrapping, which we assume here not to
2975 happen. So, we can exclude NULL from the valid range of pointer
2977 if (flag_delete_null_pointer_checks
&& int_cst_value (low
) == 0)
2978 low
= build_int_cstu (TREE_TYPE (low
), TYPE_ALIGN_UNIT (TREE_TYPE (type
)));
2980 record_nonwrapping_iv (loop
, base
, step
, stmt
, low
, high
, false, true);
2983 /* Determine information about number of iterations of a LOOP from the fact
2984 that signed arithmetics in STMT does not overflow. */
2987 infer_loop_bounds_from_signedness (struct loop
*loop
, gimple stmt
)
2989 tree def
, base
, step
, scev
, type
, low
, high
;
2991 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
2994 def
= gimple_assign_lhs (stmt
);
2996 if (TREE_CODE (def
) != SSA_NAME
)
2999 type
= TREE_TYPE (def
);
3000 if (!INTEGRAL_TYPE_P (type
)
3001 || !TYPE_OVERFLOW_UNDEFINED (type
))
3004 scev
= instantiate_parameters (loop
, analyze_scalar_evolution (loop
, def
));
3005 if (chrec_contains_undetermined (scev
))
3008 base
= initial_condition_in_loop_num (scev
, loop
->num
);
3009 step
= evolution_part_in_loop_num (scev
, loop
->num
);
3012 || TREE_CODE (step
) != INTEGER_CST
3013 || tree_contains_chrecs (base
, NULL
)
3014 || chrec_contains_symbols_defined_in_loop (base
, loop
->num
))
3017 low
= lower_bound_in_type (type
, type
);
3018 high
= upper_bound_in_type (type
, type
);
3020 record_nonwrapping_iv (loop
, base
, step
, stmt
, low
, high
, false, true);
3023 /* The following analyzers are extracting informations on the bounds
3024 of LOOP from the following undefined behaviors:
3026 - data references should not access elements over the statically
3029 - signed variables should not overflow when flag_wrapv is not set.
3033 infer_loop_bounds_from_undefined (struct loop
*loop
)
3037 gimple_stmt_iterator bsi
;
3041 bbs
= get_loop_body (loop
);
3043 for (i
= 0; i
< loop
->num_nodes
; i
++)
3047 /* If BB is not executed in each iteration of the loop, we cannot
3048 use the operations in it to infer reliable upper bound on the
3049 # of iterations of the loop. However, we can use it as a guess.
3050 Reliable guesses come only from array bounds. */
3051 reliable
= dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
);
3053 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
3055 gimple stmt
= gsi_stmt (bsi
);
3057 infer_loop_bounds_from_array (loop
, stmt
);
3061 infer_loop_bounds_from_signedness (loop
, stmt
);
3062 infer_loop_bounds_from_pointer_arith (loop
, stmt
);
3073 /* Compare double ints, callback for qsort. */
3076 double_int_cmp (const void *p1
, const void *p2
)
3078 const double_int
*d1
= (const double_int
*)p1
;
3079 const double_int
*d2
= (const double_int
*)p2
;
3087 /* Return index of BOUND in BOUNDS array sorted in increasing order.
3088 Lookup by binary search. */
3091 bound_index (vec
<double_int
> bounds
, double_int bound
)
3093 unsigned int end
= bounds
.length ();
3094 unsigned int begin
= 0;
3096 /* Find a matching index by means of a binary search. */
3097 while (begin
!= end
)
3099 unsigned int middle
= (begin
+ end
) / 2;
3100 double_int index
= bounds
[middle
];
3104 else if (index
.ult (bound
))
3112 /* We recorded loop bounds only for statements dominating loop latch (and thus
3113 executed each loop iteration). If there are any bounds on statements not
3114 dominating the loop latch we can improve the estimate by walking the loop
3115 body and seeing if every path from loop header to loop latch contains
3116 some bounded statement. */
3119 discover_iteration_bound_by_body_walk (struct loop
*loop
)
3121 pointer_map_t
*bb_bounds
;
3122 struct nb_iter_bound
*elt
;
3123 vec
<double_int
> bounds
= vNULL
;
3124 vec
<vec
<basic_block
> > queues
= vNULL
;
3125 vec
<basic_block
> queue
= vNULL
;
3126 ptrdiff_t queue_index
;
3127 ptrdiff_t latch_index
= 0;
3128 pointer_map_t
*block_priority
;
3130 /* Discover what bounds may interest us. */
3131 for (elt
= loop
->bounds
; elt
; elt
= elt
->next
)
3133 double_int bound
= elt
->bound
;
3135 /* Exit terminates loop at given iteration, while non-exits produce undefined
3136 effect on the next iteration. */
3139 bound
+= double_int_one
;
3140 /* If an overflow occurred, ignore the result. */
3141 if (bound
.is_zero ())
3145 if (!loop
->any_upper_bound
3146 || bound
.ult (loop
->nb_iterations_upper_bound
))
3147 bounds
.safe_push (bound
);
3150 /* Exit early if there is nothing to do. */
3151 if (!bounds
.exists ())
3154 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3155 fprintf (dump_file
, " Trying to walk loop body to reduce the bound.\n");
3157 /* Sort the bounds in decreasing order. */
3158 qsort (bounds
.address (), bounds
.length (),
3159 sizeof (double_int
), double_int_cmp
);
3161 /* For every basic block record the lowest bound that is guaranteed to
3162 terminate the loop. */
3164 bb_bounds
= pointer_map_create ();
3165 for (elt
= loop
->bounds
; elt
; elt
= elt
->next
)
3167 double_int bound
= elt
->bound
;
3170 bound
+= double_int_one
;
3171 /* If an overflow occurred, ignore the result. */
3172 if (bound
.is_zero ())
3176 if (!loop
->any_upper_bound
3177 || bound
.ult (loop
->nb_iterations_upper_bound
))
3179 ptrdiff_t index
= bound_index (bounds
, bound
);
3180 void **entry
= pointer_map_contains (bb_bounds
,
3181 gimple_bb (elt
->stmt
));
3183 *pointer_map_insert (bb_bounds
,
3184 gimple_bb (elt
->stmt
)) = (void *)index
;
3185 else if ((ptrdiff_t)*entry
> index
)
3186 *entry
= (void *)index
;
3190 block_priority
= pointer_map_create ();
3192 /* Perform shortest path discovery loop->header ... loop->latch.
3194 The "distance" is given by the smallest loop bound of basic block
3195 present in the path and we look for path with largest smallest bound
3198 To avoid the need for fibonacci heap on double ints we simply compress
3199 double ints into indexes to BOUNDS array and then represent the queue
3200 as arrays of queues for every index.
3201 Index of BOUNDS.length() means that the execution of given BB has
3202 no bounds determined.
3204 VISITED is a pointer map translating basic block into smallest index
3205 it was inserted into the priority queue with. */
3208 /* Start walk in loop header with index set to infinite bound. */
3209 queue_index
= bounds
.length ();
3210 queues
.safe_grow_cleared (queue_index
+ 1);
3211 queue
.safe_push (loop
->header
);
3212 queues
[queue_index
] = queue
;
3213 *pointer_map_insert (block_priority
, loop
->header
) = (void *)queue_index
;
3215 for (; queue_index
>= 0; queue_index
--)
3217 if (latch_index
< queue_index
)
3219 while (queues
[queue_index
].length ())
3222 ptrdiff_t bound_index
= queue_index
;
3227 queue
= queues
[queue_index
];
3230 /* OK, we later inserted the BB with lower priority, skip it. */
3231 if ((ptrdiff_t)*pointer_map_contains (block_priority
, bb
) > queue_index
)
3234 /* See if we can improve the bound. */
3235 entry
= pointer_map_contains (bb_bounds
, bb
);
3236 if (entry
&& (ptrdiff_t)*entry
< bound_index
)
3237 bound_index
= (ptrdiff_t)*entry
;
3239 /* Insert succesors into the queue, watch for latch edge
3240 and record greatest index we saw. */
3241 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
3243 bool insert
= false;
3246 if (loop_exit_edge_p (loop
, e
))
3249 if (e
== loop_latch_edge (loop
)
3250 && latch_index
< bound_index
)
3251 latch_index
= bound_index
;
3252 else if (!(entry
= pointer_map_contains (block_priority
, e
->dest
)))
3255 *pointer_map_insert (block_priority
, e
->dest
) = (void *)bound_index
;
3257 else if ((ptrdiff_t)*entry
< bound_index
)
3260 *entry
= (void *)bound_index
;
3264 queues
[bound_index
].safe_push (e
->dest
);
3268 queues
[queue_index
].release ();
3271 gcc_assert (latch_index
>= 0);
3272 if ((unsigned)latch_index
< bounds
.length ())
3274 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3276 fprintf (dump_file
, "Found better loop bound ");
3277 dump_double_int (dump_file
, bounds
[latch_index
], true);
3278 fprintf (dump_file
, "\n");
3280 record_niter_bound (loop
, bounds
[latch_index
], false, true);
3285 pointer_map_destroy (bb_bounds
);
3286 pointer_map_destroy (block_priority
);
3289 /* See if every path cross the loop goes through a statement that is known
3290 to not execute at the last iteration. In that case we can decrese iteration
3294 maybe_lower_iteration_bound (struct loop
*loop
)
3296 pointer_set_t
*not_executed_last_iteration
= NULL
;
3297 struct nb_iter_bound
*elt
;
3298 bool found_exit
= false;
3299 vec
<basic_block
> queue
= vNULL
;
3302 /* Collect all statements with interesting (i.e. lower than
3303 nb_iterations_upper_bound) bound on them.
3305 TODO: Due to the way record_estimate choose estimates to store, the bounds
3306 will be always nb_iterations_upper_bound-1. We can change this to record
3307 also statements not dominating the loop latch and update the walk bellow
3308 to the shortest path algorthm. */
3309 for (elt
= loop
->bounds
; elt
; elt
= elt
->next
)
3312 && elt
->bound
.ult (loop
->nb_iterations_upper_bound
))
3314 if (!not_executed_last_iteration
)
3315 not_executed_last_iteration
= pointer_set_create ();
3316 pointer_set_insert (not_executed_last_iteration
, elt
->stmt
);
3319 if (!not_executed_last_iteration
)
3322 /* Start DFS walk in the loop header and see if we can reach the
3323 loop latch or any of the exits (including statements with side
3324 effects that may terminate the loop otherwise) without visiting
3325 any of the statements known to have undefined effect on the last
3327 queue
.safe_push (loop
->header
);
3328 visited
= BITMAP_ALLOC (NULL
);
3329 bitmap_set_bit (visited
, loop
->header
->index
);
3334 basic_block bb
= queue
.pop ();
3335 gimple_stmt_iterator gsi
;
3336 bool stmt_found
= false;
3338 /* Loop for possible exits and statements bounding the execution. */
3339 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3341 gimple stmt
= gsi_stmt (gsi
);
3342 if (pointer_set_contains (not_executed_last_iteration
, stmt
))
3347 if (gimple_has_side_effects (stmt
))
3356 /* If no bounding statement is found, continue the walk. */
3362 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
3364 if (loop_exit_edge_p (loop
, e
)
3365 || e
== loop_latch_edge (loop
))
3370 if (bitmap_set_bit (visited
, e
->dest
->index
))
3371 queue
.safe_push (e
->dest
);
3375 while (queue
.length () && !found_exit
);
3377 /* If every path through the loop reach bounding statement before exit,
3378 then we know the last iteration of the loop will have undefined effect
3379 and we can decrease number of iterations. */
3383 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3384 fprintf (dump_file
, "Reducing loop iteration estimate by 1; "
3385 "undefined statement must be executed at the last iteration.\n");
3386 record_niter_bound (loop
, loop
->nb_iterations_upper_bound
- double_int_one
,
3389 BITMAP_FREE (visited
);
3391 pointer_set_destroy (not_executed_last_iteration
);
3394 /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
3395 is true also use estimates derived from undefined behavior. */
3398 estimate_numbers_of_iterations_loop (struct loop
*loop
)
3403 struct tree_niter_desc niter_desc
;
3408 /* Give up if we already have tried to compute an estimation. */
3409 if (loop
->estimate_state
!= EST_NOT_COMPUTED
)
3412 loop
->estimate_state
= EST_AVAILABLE
;
3413 /* Force estimate compuation but leave any existing upper bound in place. */
3414 loop
->any_estimate
= false;
3416 /* Ensure that loop->nb_iterations is computed if possible. If it turns out
3417 to be constant, we avoid undefined behavior implied bounds and instead
3418 diagnose those loops with -Waggressive-loop-optimizations. */
3419 number_of_latch_executions (loop
);
3421 exits
= get_loop_exit_edges (loop
);
3422 likely_exit
= single_likely_exit (loop
);
3423 FOR_EACH_VEC_ELT (exits
, i
, ex
)
3425 if (!number_of_iterations_exit (loop
, ex
, &niter_desc
, false, false))
3428 niter
= niter_desc
.niter
;
3429 type
= TREE_TYPE (niter
);
3430 if (TREE_CODE (niter_desc
.may_be_zero
) != INTEGER_CST
)
3431 niter
= build3 (COND_EXPR
, type
, niter_desc
.may_be_zero
,
3432 build_int_cst (type
, 0),
3434 record_estimate (loop
, niter
, niter_desc
.max
,
3435 last_stmt (ex
->src
),
3436 true, ex
== likely_exit
, true);
3440 if (flag_aggressive_loop_optimizations
)
3441 infer_loop_bounds_from_undefined (loop
);
3443 discover_iteration_bound_by_body_walk (loop
);
3445 maybe_lower_iteration_bound (loop
);
3447 /* If we have a measured profile, use it to estimate the number of
3449 if (loop
->header
->count
!= 0)
3451 gcov_type nit
= expected_loop_iterations_unbounded (loop
) + 1;
3452 bound
= gcov_type_to_double_int (nit
);
3453 record_niter_bound (loop
, bound
, true, false);
3456 /* If we know the exact number of iterations of this loop, try to
3457 not break code with undefined behavior by not recording smaller
3458 maximum number of iterations. */
3459 if (loop
->nb_iterations
3460 && TREE_CODE (loop
->nb_iterations
) == INTEGER_CST
)
3462 loop
->any_upper_bound
= true;
3463 loop
->nb_iterations_upper_bound
3464 = tree_to_double_int (loop
->nb_iterations
);
3468 /* Sets NIT to the estimated number of executions of the latch of the
3469 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
3470 large as the number of iterations. If we have no reliable estimate,
3471 the function returns false, otherwise returns true. */
3474 estimated_loop_iterations (struct loop
*loop
, double_int
*nit
)
3476 /* When SCEV information is available, try to update loop iterations
3477 estimate. Otherwise just return whatever we recorded earlier. */
3478 if (scev_initialized_p ())
3479 estimate_numbers_of_iterations_loop (loop
);
3481 return (get_estimated_loop_iterations (loop
, nit
));
3484 /* Similar to estimated_loop_iterations, but returns the estimate only
3485 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3486 on the number of iterations of LOOP could not be derived, returns -1. */
3489 estimated_loop_iterations_int (struct loop
*loop
)
3492 HOST_WIDE_INT hwi_nit
;
3494 if (!estimated_loop_iterations (loop
, &nit
))
3497 if (!nit
.fits_shwi ())
3499 hwi_nit
= nit
.to_shwi ();
3501 return hwi_nit
< 0 ? -1 : hwi_nit
;
3505 /* Sets NIT to an upper bound for the maximum number of executions of the
3506 latch of the LOOP. If we have no reliable estimate, the function returns
3507 false, otherwise returns true. */
3510 max_loop_iterations (struct loop
*loop
, double_int
*nit
)
3512 /* When SCEV information is available, try to update loop iterations
3513 estimate. Otherwise just return whatever we recorded earlier. */
3514 if (scev_initialized_p ())
3515 estimate_numbers_of_iterations_loop (loop
);
3517 return get_max_loop_iterations (loop
, nit
);
3520 /* Similar to max_loop_iterations, but returns the estimate only
3521 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3522 on the number of iterations of LOOP could not be derived, returns -1. */
3525 max_loop_iterations_int (struct loop
*loop
)
3528 HOST_WIDE_INT hwi_nit
;
3530 if (!max_loop_iterations (loop
, &nit
))
3533 if (!nit
.fits_shwi ())
3535 hwi_nit
= nit
.to_shwi ();
3537 return hwi_nit
< 0 ? -1 : hwi_nit
;
3540 /* Returns an estimate for the number of executions of statements
3541 in the LOOP. For statements before the loop exit, this exceeds
3542 the number of execution of the latch by one. */
3545 estimated_stmt_executions_int (struct loop
*loop
)
3547 HOST_WIDE_INT nit
= estimated_loop_iterations_int (loop
);
3553 snit
= (HOST_WIDE_INT
) ((unsigned HOST_WIDE_INT
) nit
+ 1);
3555 /* If the computation overflows, return -1. */
3556 return snit
< 0 ? -1 : snit
;
3559 /* Sets NIT to the estimated maximum number of executions of the latch of the
3560 LOOP, plus one. If we have no reliable estimate, the function returns
3561 false, otherwise returns true. */
3564 max_stmt_executions (struct loop
*loop
, double_int
*nit
)
3566 double_int nit_minus_one
;
3568 if (!max_loop_iterations (loop
, nit
))
3571 nit_minus_one
= *nit
;
3573 *nit
+= double_int_one
;
3575 return (*nit
).ugt (nit_minus_one
);
3578 /* Sets NIT to the estimated number of executions of the latch of the
3579 LOOP, plus one. If we have no reliable estimate, the function returns
3580 false, otherwise returns true. */
3583 estimated_stmt_executions (struct loop
*loop
, double_int
*nit
)
3585 double_int nit_minus_one
;
3587 if (!estimated_loop_iterations (loop
, nit
))
3590 nit_minus_one
= *nit
;
3592 *nit
+= double_int_one
;
3594 return (*nit
).ugt (nit_minus_one
);
3597 /* Records estimates on numbers of iterations of loops. */
3600 estimate_numbers_of_iterations (void)
3604 /* We don't want to issue signed overflow warnings while getting
3605 loop iteration estimates. */
3606 fold_defer_overflow_warnings ();
3608 FOR_EACH_LOOP (loop
, 0)
3610 estimate_numbers_of_iterations_loop (loop
);
3613 fold_undefer_and_ignore_overflow_warnings ();
3616 /* Returns true if statement S1 dominates statement S2. */
3619 stmt_dominates_stmt_p (gimple s1
, gimple s2
)
3621 basic_block bb1
= gimple_bb (s1
), bb2
= gimple_bb (s2
);
3629 gimple_stmt_iterator bsi
;
3631 if (gimple_code (s2
) == GIMPLE_PHI
)
3634 if (gimple_code (s1
) == GIMPLE_PHI
)
3637 for (bsi
= gsi_start_bb (bb1
); gsi_stmt (bsi
) != s2
; gsi_next (&bsi
))
3638 if (gsi_stmt (bsi
) == s1
)
3644 return dominated_by_p (CDI_DOMINATORS
, bb2
, bb1
);
3647 /* Returns true when we can prove that the number of executions of
3648 STMT in the loop is at most NITER, according to the bound on
3649 the number of executions of the statement NITER_BOUND->stmt recorded in
3650 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
3652 ??? This code can become quite a CPU hog - we can have many bounds,
3653 and large basic block forcing stmt_dominates_stmt_p to be queried
3654 many times on a large basic blocks, so the whole thing is O(n^2)
3655 for scev_probably_wraps_p invocation (that can be done n times).
3657 It would make more sense (and give better answers) to remember BB
3658 bounds computed by discover_iteration_bound_by_body_walk. */
3661 n_of_executions_at_most (gimple stmt
,
3662 struct nb_iter_bound
*niter_bound
,
3665 double_int bound
= niter_bound
->bound
;
3666 tree nit_type
= TREE_TYPE (niter
), e
;
3669 gcc_assert (TYPE_UNSIGNED (nit_type
));
3671 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
3672 the number of iterations is small. */
3673 if (!double_int_fits_to_tree_p (nit_type
, bound
))
3676 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
3677 times. This means that:
3679 -- if NITER_BOUND->is_exit is true, then everything after
3680 it at most NITER_BOUND->bound times.
3682 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
3683 is executed, then NITER_BOUND->stmt is executed as well in the same
3684 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
3686 If we can determine that NITER_BOUND->stmt is always executed
3687 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
3688 We conclude that if both statements belong to the same
3689 basic block and STMT is before NITER_BOUND->stmt and there are no
3690 statements with side effects in between. */
3692 if (niter_bound
->is_exit
)
3694 if (stmt
== niter_bound
->stmt
3695 || !stmt_dominates_stmt_p (niter_bound
->stmt
, stmt
))
3701 if (!stmt_dominates_stmt_p (niter_bound
->stmt
, stmt
))
3703 gimple_stmt_iterator bsi
;
3704 if (gimple_bb (stmt
) != gimple_bb (niter_bound
->stmt
)
3705 || gimple_code (stmt
) == GIMPLE_PHI
3706 || gimple_code (niter_bound
->stmt
) == GIMPLE_PHI
)
3709 /* By stmt_dominates_stmt_p we already know that STMT appears
3710 before NITER_BOUND->STMT. Still need to test that the loop
3711 can not be terinated by a side effect in between. */
3712 for (bsi
= gsi_for_stmt (stmt
); gsi_stmt (bsi
) != niter_bound
->stmt
;
3714 if (gimple_has_side_effects (gsi_stmt (bsi
)))
3716 bound
+= double_int_one
;
3717 if (bound
.is_zero ()
3718 || !double_int_fits_to_tree_p (nit_type
, bound
))
3724 e
= fold_binary (cmp
, boolean_type_node
,
3725 niter
, double_int_to_tree (nit_type
, bound
));
3726 return e
&& integer_nonzerop (e
);
3729 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
3732 nowrap_type_p (tree type
)
3734 if (INTEGRAL_TYPE_P (type
)
3735 && TYPE_OVERFLOW_UNDEFINED (type
))
3738 if (POINTER_TYPE_P (type
))
3744 /* Return false only when the induction variable BASE + STEP * I is
3745 known to not overflow: i.e. when the number of iterations is small
3746 enough with respect to the step and initial condition in order to
3747 keep the evolution confined in TYPEs bounds. Return true when the
3748 iv is known to overflow or when the property is not computable.
3750 USE_OVERFLOW_SEMANTICS is true if this function should assume that
3751 the rules for overflow of the given language apply (e.g., that signed
3752 arithmetics in C does not overflow). */
3755 scev_probably_wraps_p (tree base
, tree step
,
3756 gimple at_stmt
, struct loop
*loop
,
3757 bool use_overflow_semantics
)
3759 tree delta
, step_abs
;
3760 tree unsigned_type
, valid_niter
;
3761 tree type
= TREE_TYPE (step
);
3764 struct nb_iter_bound
*bound
;
3766 /* FIXME: We really need something like
3767 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
3769 We used to test for the following situation that frequently appears
3770 during address arithmetics:
3772 D.1621_13 = (long unsigned intD.4) D.1620_12;
3773 D.1622_14 = D.1621_13 * 8;
3774 D.1623_15 = (doubleD.29 *) D.1622_14;
3776 And derived that the sequence corresponding to D_14
3777 can be proved to not wrap because it is used for computing a
3778 memory access; however, this is not really the case -- for example,
3779 if D_12 = (unsigned char) [254,+,1], then D_14 has values
3780 2032, 2040, 0, 8, ..., but the code is still legal. */
3782 if (chrec_contains_undetermined (base
)
3783 || chrec_contains_undetermined (step
))
3786 if (integer_zerop (step
))
3789 /* If we can use the fact that signed and pointer arithmetics does not
3790 wrap, we are done. */
3791 if (use_overflow_semantics
&& nowrap_type_p (TREE_TYPE (base
)))
3794 /* To be able to use estimates on number of iterations of the loop,
3795 we must have an upper bound on the absolute value of the step. */
3796 if (TREE_CODE (step
) != INTEGER_CST
)
3799 /* Don't issue signed overflow warnings. */
3800 fold_defer_overflow_warnings ();
3802 /* Otherwise, compute the number of iterations before we reach the
3803 bound of the type, and verify that the loop is exited before this
3805 unsigned_type
= unsigned_type_for (type
);
3806 base
= fold_convert (unsigned_type
, base
);
3808 if (tree_int_cst_sign_bit (step
))
3810 tree extreme
= fold_convert (unsigned_type
,
3811 lower_bound_in_type (type
, type
));
3812 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, base
, extreme
);
3813 step_abs
= fold_build1 (NEGATE_EXPR
, unsigned_type
,
3814 fold_convert (unsigned_type
, step
));
3818 tree extreme
= fold_convert (unsigned_type
,
3819 upper_bound_in_type (type
, type
));
3820 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, extreme
, base
);
3821 step_abs
= fold_convert (unsigned_type
, step
);
3824 valid_niter
= fold_build2 (FLOOR_DIV_EXPR
, unsigned_type
, delta
, step_abs
);
3826 estimate_numbers_of_iterations_loop (loop
);
3828 if (max_loop_iterations (loop
, &niter
)
3829 && double_int_fits_to_tree_p (TREE_TYPE (valid_niter
), niter
)
3830 && (e
= fold_binary (GT_EXPR
, boolean_type_node
, valid_niter
,
3831 double_int_to_tree (TREE_TYPE (valid_niter
),
3833 && integer_nonzerop (e
))
3835 fold_undefer_and_ignore_overflow_warnings ();
3839 for (bound
= loop
->bounds
; bound
; bound
= bound
->next
)
3841 if (n_of_executions_at_most (at_stmt
, bound
, valid_niter
))
3843 fold_undefer_and_ignore_overflow_warnings ();
3848 fold_undefer_and_ignore_overflow_warnings ();
3850 /* At this point we still don't have a proof that the iv does not
3851 overflow: give up. */
3855 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
3858 free_numbers_of_iterations_estimates_loop (struct loop
*loop
)
3860 struct nb_iter_bound
*bound
, *next
;
3862 loop
->nb_iterations
= NULL
;
3863 loop
->estimate_state
= EST_NOT_COMPUTED
;
3864 for (bound
= loop
->bounds
; bound
; bound
= next
)
3870 loop
->bounds
= NULL
;
3873 /* Frees the information on upper bounds on numbers of iterations of loops. */
3876 free_numbers_of_iterations_estimates (void)
3880 FOR_EACH_LOOP (loop
, 0)
3882 free_numbers_of_iterations_estimates_loop (loop
);
3886 /* Substitute value VAL for ssa name NAME inside expressions held
3890 substitute_in_loop_info (struct loop
*loop
, tree name
, tree val
)
3892 loop
->nb_iterations
= simplify_replace_tree (loop
->nb_iterations
, name
, val
);