1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2017 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "tree-pass.h"
29 #include "gimple-pretty-print.h"
30 #include "diagnostic-core.h"
31 #include "stor-layout.h"
32 #include "fold-const.h"
36 #include "gimple-iterator.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-niter.h"
40 #include "tree-ssa-loop.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
48 /* The maximum number of dominator BBs we search for conditions
49 of loop header copies we use for simplifying a conditional
51 #define MAX_DOMINATORS_TO_WALK 8
55 Analysis of number of iterations of an affine exit test.
59 /* Bounds on some value, BELOW <= X <= UP. */
67 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
70 split_to_var_and_offset (tree expr
, tree
*var
, mpz_t offset
)
72 tree type
= TREE_TYPE (expr
);
77 mpz_set_ui (offset
, 0);
79 switch (TREE_CODE (expr
))
86 case POINTER_PLUS_EXPR
:
87 op0
= TREE_OPERAND (expr
, 0);
88 op1
= TREE_OPERAND (expr
, 1);
90 if (TREE_CODE (op1
) != INTEGER_CST
)
94 /* Always sign extend the offset. */
95 wi::to_mpz (op1
, offset
, SIGNED
);
97 mpz_neg (offset
, offset
);
101 *var
= build_int_cst_type (type
, 0);
102 wi::to_mpz (expr
, offset
, TYPE_SIGN (type
));
110 /* From condition C0 CMP C1 derives information regarding the value range
111 of VAR, which is of TYPE. Results are stored in to BELOW and UP. */
114 refine_value_range_using_guard (tree type
, tree var
,
115 tree c0
, enum tree_code cmp
, tree c1
,
116 mpz_t below
, mpz_t up
)
118 tree varc0
, varc1
, ctype
;
120 mpz_t mint
, maxt
, minc1
, maxc1
;
122 bool no_wrap
= nowrap_type_p (type
);
124 signop sgn
= TYPE_SIGN (type
);
132 STRIP_SIGN_NOPS (c0
);
133 STRIP_SIGN_NOPS (c1
);
134 ctype
= TREE_TYPE (c0
);
135 if (!useless_type_conversion_p (ctype
, type
))
141 /* We could derive quite precise information from EQ_EXPR, however,
142 such a guard is unlikely to appear, so we do not bother with
147 /* NE_EXPR comparisons do not contain much of useful information,
148 except for cases of comparing with bounds. */
149 if (TREE_CODE (c1
) != INTEGER_CST
150 || !INTEGRAL_TYPE_P (type
))
153 /* Ensure that the condition speaks about an expression in the same
155 ctype
= TREE_TYPE (c0
);
156 if (TYPE_PRECISION (ctype
) != TYPE_PRECISION (type
))
158 c0
= fold_convert (type
, c0
);
159 c1
= fold_convert (type
, c1
);
161 if (operand_equal_p (var
, c0
, 0))
165 /* Case of comparing VAR with its below/up bounds. */
167 wi::to_mpz (c1
, valc1
, TYPE_SIGN (type
));
168 if (mpz_cmp (valc1
, below
) == 0)
170 if (mpz_cmp (valc1
, up
) == 0)
177 /* Case of comparing with the bounds of the type. */
178 wide_int min
= wi::min_value (type
);
179 wide_int max
= wi::max_value (type
);
181 if (wi::eq_p (c1
, min
))
183 if (wi::eq_p (c1
, max
))
187 /* Quick return if no useful information. */
199 split_to_var_and_offset (expand_simple_operations (c0
), &varc0
, offc0
);
200 split_to_var_and_offset (expand_simple_operations (c1
), &varc1
, offc1
);
202 /* We are only interested in comparisons of expressions based on VAR. */
203 if (operand_equal_p (var
, varc1
, 0))
205 std::swap (varc0
, varc1
);
206 mpz_swap (offc0
, offc1
);
207 cmp
= swap_tree_comparison (cmp
);
209 else if (!operand_equal_p (var
, varc0
, 0))
218 get_type_static_bounds (type
, mint
, maxt
);
221 /* Setup range information for varc1. */
222 if (integer_zerop (varc1
))
224 wi::to_mpz (integer_zero_node
, minc1
, TYPE_SIGN (type
));
225 wi::to_mpz (integer_zero_node
, maxc1
, TYPE_SIGN (type
));
227 else if (TREE_CODE (varc1
) == SSA_NAME
228 && INTEGRAL_TYPE_P (type
)
229 && get_range_info (varc1
, &minv
, &maxv
) == VR_RANGE
)
231 gcc_assert (wi::le_p (minv
, maxv
, sgn
));
232 wi::to_mpz (minv
, minc1
, sgn
);
233 wi::to_mpz (maxv
, maxc1
, sgn
);
237 mpz_set (minc1
, mint
);
238 mpz_set (maxc1
, maxt
);
241 /* Compute valid range information for varc1 + offc1. Note nothing
242 useful can be derived if it overflows or underflows. Overflow or
243 underflow could happen when:
245 offc1 > 0 && varc1 + offc1 > MAX_VAL (type)
246 offc1 < 0 && varc1 + offc1 < MIN_VAL (type). */
247 mpz_add (minc1
, minc1
, offc1
);
248 mpz_add (maxc1
, maxc1
, offc1
);
250 || mpz_sgn (offc1
) == 0
251 || (mpz_sgn (offc1
) < 0 && mpz_cmp (minc1
, mint
) >= 0)
252 || (mpz_sgn (offc1
) > 0 && mpz_cmp (maxc1
, maxt
) <= 0));
256 if (mpz_cmp (minc1
, mint
) < 0)
257 mpz_set (minc1
, mint
);
258 if (mpz_cmp (maxc1
, maxt
) > 0)
259 mpz_set (maxc1
, maxt
);
264 mpz_sub_ui (maxc1
, maxc1
, 1);
269 mpz_add_ui (minc1
, minc1
, 1);
272 /* Compute range information for varc0. If there is no overflow,
273 the condition implied that
275 (varc0) cmp (varc1 + offc1 - offc0)
277 We can possibly improve the upper bound of varc0 if cmp is LE_EXPR,
278 or the below bound if cmp is GE_EXPR.
280 To prove there is no overflow/underflow, we need to check below
282 1) cmp == LE_EXPR && offc0 > 0
284 (varc0 + offc0) doesn't overflow
285 && (varc1 + offc1 - offc0) doesn't underflow
287 2) cmp == LE_EXPR && offc0 < 0
289 (varc0 + offc0) doesn't underflow
290 && (varc1 + offc1 - offc0) doesn't overfloe
292 In this case, (varc0 + offc0) will never underflow if we can
293 prove (varc1 + offc1 - offc0) doesn't overflow.
295 3) cmp == GE_EXPR && offc0 < 0
297 (varc0 + offc0) doesn't underflow
298 && (varc1 + offc1 - offc0) doesn't overflow
300 4) cmp == GE_EXPR && offc0 > 0
302 (varc0 + offc0) doesn't overflow
303 && (varc1 + offc1 - offc0) doesn't underflow
305 In this case, (varc0 + offc0) will never overflow if we can
306 prove (varc1 + offc1 - offc0) doesn't underflow.
308 Note we only handle case 2 and 4 in below code. */
310 mpz_sub (minc1
, minc1
, offc0
);
311 mpz_sub (maxc1
, maxc1
, offc0
);
313 || mpz_sgn (offc0
) == 0
315 && mpz_sgn (offc0
) < 0 && mpz_cmp (maxc1
, maxt
) <= 0)
317 && mpz_sgn (offc0
) > 0 && mpz_cmp (minc1
, mint
) >= 0));
323 if (mpz_cmp (up
, maxc1
) > 0)
328 if (mpz_cmp (below
, minc1
) < 0)
329 mpz_set (below
, minc1
);
341 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
342 in TYPE to MIN and MAX. */
345 determine_value_range (struct loop
*loop
, tree type
, tree var
, mpz_t off
,
346 mpz_t min
, mpz_t max
)
352 enum value_range_type rtype
= VR_VARYING
;
354 /* If the expression is a constant, we know its value exactly. */
355 if (integer_zerop (var
))
362 get_type_static_bounds (type
, min
, max
);
364 /* See if we have some range info from VRP. */
365 if (TREE_CODE (var
) == SSA_NAME
&& INTEGRAL_TYPE_P (type
))
367 edge e
= loop_preheader_edge (loop
);
368 signop sgn
= TYPE_SIGN (type
);
371 /* Either for VAR itself... */
372 rtype
= get_range_info (var
, &minv
, &maxv
);
373 /* Or for PHI results in loop->header where VAR is used as
374 PHI argument from the loop preheader edge. */
375 for (gsi
= gsi_start_phis (loop
->header
); !gsi_end_p (gsi
); gsi_next (&gsi
))
377 gphi
*phi
= gsi
.phi ();
379 if (PHI_ARG_DEF_FROM_EDGE (phi
, e
) == var
380 && (get_range_info (gimple_phi_result (phi
), &minc
, &maxc
)
383 if (rtype
!= VR_RANGE
)
391 minv
= wi::max (minv
, minc
, sgn
);
392 maxv
= wi::min (maxv
, maxc
, sgn
);
393 /* If the PHI result range are inconsistent with
394 the VAR range, give up on looking at the PHI
395 results. This can happen if VR_UNDEFINED is
397 if (wi::gt_p (minv
, maxv
, sgn
))
399 rtype
= get_range_info (var
, &minv
, &maxv
);
407 if (rtype
!= VR_RANGE
)
414 gcc_assert (wi::le_p (minv
, maxv
, sgn
));
415 wi::to_mpz (minv
, minm
, sgn
);
416 wi::to_mpz (maxv
, maxm
, sgn
);
418 /* Now walk the dominators of the loop header and use the entry
419 guards to refine the estimates. */
420 for (bb
= loop
->header
;
421 bb
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
) && cnt
< MAX_DOMINATORS_TO_WALK
;
422 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
))
429 if (!single_pred_p (bb
))
431 e
= single_pred_edge (bb
);
433 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
436 cond
= last_stmt (e
->src
);
437 c0
= gimple_cond_lhs (cond
);
438 cmp
= gimple_cond_code (cond
);
439 c1
= gimple_cond_rhs (cond
);
441 if (e
->flags
& EDGE_FALSE_VALUE
)
442 cmp
= invert_tree_comparison (cmp
, false);
444 refine_value_range_using_guard (type
, var
, c0
, cmp
, c1
, minm
, maxm
);
448 mpz_add (minm
, minm
, off
);
449 mpz_add (maxm
, maxm
, off
);
450 /* If the computation may not wrap or off is zero, then this
451 is always fine. If off is negative and minv + off isn't
452 smaller than type's minimum, or off is positive and
453 maxv + off isn't bigger than type's maximum, use the more
454 precise range too. */
455 if (nowrap_type_p (type
)
456 || mpz_sgn (off
) == 0
457 || (mpz_sgn (off
) < 0 && mpz_cmp (minm
, min
) >= 0)
458 || (mpz_sgn (off
) > 0 && mpz_cmp (maxm
, max
) <= 0))
470 /* If the computation may wrap, we know nothing about the value, except for
471 the range of the type. */
472 if (!nowrap_type_p (type
))
475 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
476 add it to MIN, otherwise to MAX. */
477 if (mpz_sgn (off
) < 0)
478 mpz_add (max
, max
, off
);
480 mpz_add (min
, min
, off
);
483 /* Stores the bounds on the difference of the values of the expressions
484 (var + X) and (var + Y), computed in TYPE, to BNDS. */
487 bound_difference_of_offsetted_base (tree type
, mpz_t x
, mpz_t y
,
490 int rel
= mpz_cmp (x
, y
);
491 bool may_wrap
= !nowrap_type_p (type
);
494 /* If X == Y, then the expressions are always equal.
495 If X > Y, there are the following possibilities:
496 a) neither of var + X and var + Y overflow or underflow, or both of
497 them do. Then their difference is X - Y.
498 b) var + X overflows, and var + Y does not. Then the values of the
499 expressions are var + X - M and var + Y, where M is the range of
500 the type, and their difference is X - Y - M.
501 c) var + Y underflows and var + X does not. Their difference again
503 Therefore, if the arithmetics in type does not overflow, then the
504 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
505 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
506 (X - Y, X - Y + M). */
510 mpz_set_ui (bnds
->below
, 0);
511 mpz_set_ui (bnds
->up
, 0);
516 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type
)), m
, UNSIGNED
);
517 mpz_add_ui (m
, m
, 1);
518 mpz_sub (bnds
->up
, x
, y
);
519 mpz_set (bnds
->below
, bnds
->up
);
524 mpz_sub (bnds
->below
, bnds
->below
, m
);
526 mpz_add (bnds
->up
, bnds
->up
, m
);
532 /* From condition C0 CMP C1 derives information regarding the
533 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
534 and stores it to BNDS. */
537 refine_bounds_using_guard (tree type
, tree varx
, mpz_t offx
,
538 tree vary
, mpz_t offy
,
539 tree c0
, enum tree_code cmp
, tree c1
,
542 tree varc0
, varc1
, ctype
;
543 mpz_t offc0
, offc1
, loffx
, loffy
, bnd
;
545 bool no_wrap
= nowrap_type_p (type
);
554 STRIP_SIGN_NOPS (c0
);
555 STRIP_SIGN_NOPS (c1
);
556 ctype
= TREE_TYPE (c0
);
557 if (!useless_type_conversion_p (ctype
, type
))
563 /* We could derive quite precise information from EQ_EXPR, however, such
564 a guard is unlikely to appear, so we do not bother with handling
569 /* NE_EXPR comparisons do not contain much of useful information, except for
570 special case of comparing with the bounds of the type. */
571 if (TREE_CODE (c1
) != INTEGER_CST
572 || !INTEGRAL_TYPE_P (type
))
575 /* Ensure that the condition speaks about an expression in the same type
577 ctype
= TREE_TYPE (c0
);
578 if (TYPE_PRECISION (ctype
) != TYPE_PRECISION (type
))
580 c0
= fold_convert (type
, c0
);
581 c1
= fold_convert (type
, c1
);
583 if (TYPE_MIN_VALUE (type
)
584 && operand_equal_p (c1
, TYPE_MIN_VALUE (type
), 0))
589 if (TYPE_MAX_VALUE (type
)
590 && operand_equal_p (c1
, TYPE_MAX_VALUE (type
), 0))
603 split_to_var_and_offset (expand_simple_operations (c0
), &varc0
, offc0
);
604 split_to_var_and_offset (expand_simple_operations (c1
), &varc1
, offc1
);
606 /* We are only interested in comparisons of expressions based on VARX and
607 VARY. TODO -- we might also be able to derive some bounds from
608 expressions containing just one of the variables. */
610 if (operand_equal_p (varx
, varc1
, 0))
612 std::swap (varc0
, varc1
);
613 mpz_swap (offc0
, offc1
);
614 cmp
= swap_tree_comparison (cmp
);
617 if (!operand_equal_p (varx
, varc0
, 0)
618 || !operand_equal_p (vary
, varc1
, 0))
621 mpz_init_set (loffx
, offx
);
622 mpz_init_set (loffy
, offy
);
624 if (cmp
== GT_EXPR
|| cmp
== GE_EXPR
)
626 std::swap (varx
, vary
);
627 mpz_swap (offc0
, offc1
);
628 mpz_swap (loffx
, loffy
);
629 cmp
= swap_tree_comparison (cmp
);
633 /* If there is no overflow, the condition implies that
635 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
637 The overflows and underflows may complicate things a bit; each
638 overflow decreases the appropriate offset by M, and underflow
639 increases it by M. The above inequality would not necessarily be
642 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
643 VARX + OFFC0 overflows, but VARX + OFFX does not.
644 This may only happen if OFFX < OFFC0.
645 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
646 VARY + OFFC1 underflows and VARY + OFFY does not.
647 This may only happen if OFFY > OFFC1. */
656 x_ok
= (integer_zerop (varx
)
657 || mpz_cmp (loffx
, offc0
) >= 0);
658 y_ok
= (integer_zerop (vary
)
659 || mpz_cmp (loffy
, offc1
) <= 0);
665 mpz_sub (bnd
, loffx
, loffy
);
666 mpz_add (bnd
, bnd
, offc1
);
667 mpz_sub (bnd
, bnd
, offc0
);
670 mpz_sub_ui (bnd
, bnd
, 1);
675 if (mpz_cmp (bnds
->below
, bnd
) < 0)
676 mpz_set (bnds
->below
, bnd
);
680 if (mpz_cmp (bnd
, bnds
->up
) < 0)
681 mpz_set (bnds
->up
, bnd
);
693 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
694 The subtraction is considered to be performed in arbitrary precision,
697 We do not attempt to be too clever regarding the value ranges of X and
698 Y; most of the time, they are just integers or ssa names offsetted by
699 integer. However, we try to use the information contained in the
700 comparisons before the loop (usually created by loop header copying). */
703 bound_difference (struct loop
*loop
, tree x
, tree y
, bounds
*bnds
)
705 tree type
= TREE_TYPE (x
);
708 mpz_t minx
, maxx
, miny
, maxy
;
716 /* Get rid of unnecessary casts, but preserve the value of
721 mpz_init (bnds
->below
);
725 split_to_var_and_offset (x
, &varx
, offx
);
726 split_to_var_and_offset (y
, &vary
, offy
);
728 if (!integer_zerop (varx
)
729 && operand_equal_p (varx
, vary
, 0))
731 /* Special case VARX == VARY -- we just need to compare the
732 offsets. The matters are a bit more complicated in the
733 case addition of offsets may wrap. */
734 bound_difference_of_offsetted_base (type
, offx
, offy
, bnds
);
738 /* Otherwise, use the value ranges to determine the initial
739 estimates on below and up. */
744 determine_value_range (loop
, type
, varx
, offx
, minx
, maxx
);
745 determine_value_range (loop
, type
, vary
, offy
, miny
, maxy
);
747 mpz_sub (bnds
->below
, minx
, maxy
);
748 mpz_sub (bnds
->up
, maxx
, miny
);
755 /* If both X and Y are constants, we cannot get any more precise. */
756 if (integer_zerop (varx
) && integer_zerop (vary
))
759 /* Now walk the dominators of the loop header and use the entry
760 guards to refine the estimates. */
761 for (bb
= loop
->header
;
762 bb
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
) && cnt
< MAX_DOMINATORS_TO_WALK
;
763 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
))
765 if (!single_pred_p (bb
))
767 e
= single_pred_edge (bb
);
769 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
772 cond
= last_stmt (e
->src
);
773 c0
= gimple_cond_lhs (cond
);
774 cmp
= gimple_cond_code (cond
);
775 c1
= gimple_cond_rhs (cond
);
777 if (e
->flags
& EDGE_FALSE_VALUE
)
778 cmp
= invert_tree_comparison (cmp
, false);
780 refine_bounds_using_guard (type
, varx
, offx
, vary
, offy
,
790 /* Update the bounds in BNDS that restrict the value of X to the bounds
791 that restrict the value of X + DELTA. X can be obtained as a
792 difference of two values in TYPE. */
795 bounds_add (bounds
*bnds
, const widest_int
&delta
, tree type
)
800 wi::to_mpz (delta
, mdelta
, SIGNED
);
803 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type
)), max
, UNSIGNED
);
805 mpz_add (bnds
->up
, bnds
->up
, mdelta
);
806 mpz_add (bnds
->below
, bnds
->below
, mdelta
);
808 if (mpz_cmp (bnds
->up
, max
) > 0)
809 mpz_set (bnds
->up
, max
);
812 if (mpz_cmp (bnds
->below
, max
) < 0)
813 mpz_set (bnds
->below
, max
);
819 /* Update the bounds in BNDS that restrict the value of X to the bounds
820 that restrict the value of -X. */
823 bounds_negate (bounds
*bnds
)
827 mpz_init_set (tmp
, bnds
->up
);
828 mpz_neg (bnds
->up
, bnds
->below
);
829 mpz_neg (bnds
->below
, tmp
);
833 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
836 inverse (tree x
, tree mask
)
838 tree type
= TREE_TYPE (x
);
840 unsigned ctr
= tree_floor_log2 (mask
);
842 if (TYPE_PRECISION (type
) <= HOST_BITS_PER_WIDE_INT
)
844 unsigned HOST_WIDE_INT ix
;
845 unsigned HOST_WIDE_INT imask
;
846 unsigned HOST_WIDE_INT irslt
= 1;
848 gcc_assert (cst_and_fits_in_hwi (x
));
849 gcc_assert (cst_and_fits_in_hwi (mask
));
851 ix
= int_cst_value (x
);
852 imask
= int_cst_value (mask
);
861 rslt
= build_int_cst_type (type
, irslt
);
865 rslt
= build_int_cst (type
, 1);
868 rslt
= int_const_binop (MULT_EXPR
, rslt
, x
);
869 x
= int_const_binop (MULT_EXPR
, x
, x
);
871 rslt
= int_const_binop (BIT_AND_EXPR
, rslt
, mask
);
877 /* Derives the upper bound BND on the number of executions of loop with exit
878 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
879 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
880 that the loop ends through this exit, i.e., the induction variable ever
881 reaches the value of C.
883 The value C is equal to final - base, where final and base are the final and
884 initial value of the actual induction variable in the analysed loop. BNDS
885 bounds the value of this difference when computed in signed type with
886 unbounded range, while the computation of C is performed in an unsigned
887 type with the range matching the range of the type of the induction variable.
888 In particular, BNDS.up contains an upper bound on C in the following cases:
889 -- if the iv must reach its final value without overflow, i.e., if
890 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
891 -- if final >= base, which we know to hold when BNDS.below >= 0. */
894 number_of_iterations_ne_max (mpz_t bnd
, bool no_overflow
, tree c
, tree s
,
895 bounds
*bnds
, bool exit_must_be_taken
)
899 tree type
= TREE_TYPE (c
);
900 bool bnds_u_valid
= ((no_overflow
&& exit_must_be_taken
)
901 || mpz_sgn (bnds
->below
) >= 0);
904 || (TREE_CODE (c
) == INTEGER_CST
905 && TREE_CODE (s
) == INTEGER_CST
906 && wi::mod_trunc (c
, s
, TYPE_SIGN (type
)) == 0)
907 || (TYPE_OVERFLOW_UNDEFINED (type
)
908 && multiple_of_p (type
, c
, s
)))
910 /* If C is an exact multiple of S, then its value will be reached before
911 the induction variable overflows (unless the loop is exited in some
912 other way before). Note that the actual induction variable in the
913 loop (which ranges from base to final instead of from 0 to C) may
914 overflow, in which case BNDS.up will not be giving a correct upper
915 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
917 exit_must_be_taken
= true;
920 /* If the induction variable can overflow, the number of iterations is at
921 most the period of the control variable (or infinite, but in that case
922 the whole # of iterations analysis will fail). */
925 max
= wi::mask
<widest_int
> (TYPE_PRECISION (type
) - wi::ctz (s
), false);
926 wi::to_mpz (max
, bnd
, UNSIGNED
);
930 /* Now we know that the induction variable does not overflow, so the loop
931 iterates at most (range of type / S) times. */
932 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type
)), bnd
, UNSIGNED
);
934 /* If the induction variable is guaranteed to reach the value of C before
936 if (exit_must_be_taken
)
938 /* ... then we can strengthen this to C / S, and possibly we can use
939 the upper bound on C given by BNDS. */
940 if (TREE_CODE (c
) == INTEGER_CST
)
941 wi::to_mpz (c
, bnd
, UNSIGNED
);
942 else if (bnds_u_valid
)
943 mpz_set (bnd
, bnds
->up
);
947 wi::to_mpz (s
, d
, UNSIGNED
);
948 mpz_fdiv_q (bnd
, bnd
, d
);
952 /* Determines number of iterations of loop whose ending condition
953 is IV <> FINAL. TYPE is the type of the iv. The number of
954 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
955 we know that the exit must be taken eventually, i.e., that the IV
956 ever reaches the value FINAL (we derived this earlier, and possibly set
957 NITER->assumptions to make sure this is the case). BNDS contains the
958 bounds on the difference FINAL - IV->base. */
961 number_of_iterations_ne (struct loop
*loop
, tree type
, affine_iv
*iv
,
962 tree final
, struct tree_niter_desc
*niter
,
963 bool exit_must_be_taken
, bounds
*bnds
)
965 tree niter_type
= unsigned_type_for (type
);
966 tree s
, c
, d
, bits
, assumption
, tmp
, bound
;
969 niter
->control
= *iv
;
970 niter
->bound
= final
;
971 niter
->cmp
= NE_EXPR
;
973 /* Rearrange the terms so that we get inequality S * i <> C, with S
974 positive. Also cast everything to the unsigned type. If IV does
975 not overflow, BNDS bounds the value of C. Also, this is the
976 case if the computation |FINAL - IV->base| does not overflow, i.e.,
977 if BNDS->below in the result is nonnegative. */
978 if (tree_int_cst_sign_bit (iv
->step
))
980 s
= fold_convert (niter_type
,
981 fold_build1 (NEGATE_EXPR
, type
, iv
->step
));
982 c
= fold_build2 (MINUS_EXPR
, niter_type
,
983 fold_convert (niter_type
, iv
->base
),
984 fold_convert (niter_type
, final
));
985 bounds_negate (bnds
);
989 s
= fold_convert (niter_type
, iv
->step
);
990 c
= fold_build2 (MINUS_EXPR
, niter_type
,
991 fold_convert (niter_type
, final
),
992 fold_convert (niter_type
, iv
->base
));
996 number_of_iterations_ne_max (max
, iv
->no_overflow
, c
, s
, bnds
,
998 niter
->max
= widest_int::from (wi::from_mpz (niter_type
, max
, false),
999 TYPE_SIGN (niter_type
));
1002 /* Compute no-overflow information for the control iv. This can be
1003 proven when below two conditions are satisfied:
1005 1) IV evaluates toward FINAL at beginning, i.e:
1006 base <= FINAL ; step > 0
1007 base >= FINAL ; step < 0
1009 2) |FINAL - base| is an exact multiple of step.
1011 Unfortunately, it's hard to prove above conditions after pass loop-ch
1012 because loop with exit condition (IV != FINAL) usually will be guarded
1013 by initial-condition (IV.base - IV.step != FINAL). In this case, we
1014 can alternatively try to prove below conditions:
1016 1') IV evaluates toward FINAL at beginning, i.e:
1017 new_base = base - step < FINAL ; step > 0
1018 && base - step doesn't underflow
1019 new_base = base - step > FINAL ; step < 0
1020 && base - step doesn't overflow
1022 2') |FINAL - new_base| is an exact multiple of step.
1024 Please refer to PR34114 as an example of loop-ch's impact, also refer
1025 to PR72817 as an example why condition 2') is necessary.
1027 Note, for NE_EXPR, base equals to FINAL is a special case, in
1028 which the loop exits immediately, and the iv does not overflow. */
1029 if (!niter
->control
.no_overflow
1030 && (integer_onep (s
) || multiple_of_p (type
, c
, s
)))
1032 tree t
, cond
, new_c
, relaxed_cond
= boolean_false_node
;
1034 if (tree_int_cst_sign_bit (iv
->step
))
1036 cond
= fold_build2 (GE_EXPR
, boolean_type_node
, iv
->base
, final
);
1037 if (TREE_CODE (type
) == INTEGER_TYPE
)
1039 /* Only when base - step doesn't overflow. */
1040 t
= TYPE_MAX_VALUE (type
);
1041 t
= fold_build2 (PLUS_EXPR
, type
, t
, iv
->step
);
1042 t
= fold_build2 (GE_EXPR
, boolean_type_node
, t
, iv
->base
);
1043 if (integer_nonzerop (t
))
1045 t
= fold_build2 (MINUS_EXPR
, type
, iv
->base
, iv
->step
);
1046 new_c
= fold_build2 (MINUS_EXPR
, niter_type
,
1047 fold_convert (niter_type
, t
),
1048 fold_convert (niter_type
, final
));
1049 if (multiple_of_p (type
, new_c
, s
))
1050 relaxed_cond
= fold_build2 (GT_EXPR
, boolean_type_node
,
1057 cond
= fold_build2 (LE_EXPR
, boolean_type_node
, iv
->base
, final
);
1058 if (TREE_CODE (type
) == INTEGER_TYPE
)
1060 /* Only when base - step doesn't underflow. */
1061 t
= TYPE_MIN_VALUE (type
);
1062 t
= fold_build2 (PLUS_EXPR
, type
, t
, iv
->step
);
1063 t
= fold_build2 (LE_EXPR
, boolean_type_node
, t
, iv
->base
);
1064 if (integer_nonzerop (t
))
1066 t
= fold_build2 (MINUS_EXPR
, type
, iv
->base
, iv
->step
);
1067 new_c
= fold_build2 (MINUS_EXPR
, niter_type
,
1068 fold_convert (niter_type
, final
),
1069 fold_convert (niter_type
, t
));
1070 if (multiple_of_p (type
, new_c
, s
))
1071 relaxed_cond
= fold_build2 (LT_EXPR
, boolean_type_node
,
1077 t
= simplify_using_initial_conditions (loop
, cond
);
1078 if (!t
|| !integer_onep (t
))
1079 t
= simplify_using_initial_conditions (loop
, relaxed_cond
);
1081 if (t
&& integer_onep (t
))
1082 niter
->control
.no_overflow
= true;
1085 /* First the trivial cases -- when the step is 1. */
1086 if (integer_onep (s
))
1091 if (niter
->control
.no_overflow
&& multiple_of_p (type
, c
, s
))
1093 niter
->niter
= fold_build2 (FLOOR_DIV_EXPR
, niter_type
, c
, s
);
1097 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
1098 is infinite. Otherwise, the number of iterations is
1099 (inverse(s/d) * (c/d)) mod (size of mode/d). */
1100 bits
= num_ending_zeros (s
);
1101 bound
= build_low_bits_mask (niter_type
,
1102 (TYPE_PRECISION (niter_type
)
1103 - tree_to_uhwi (bits
)));
1105 d
= fold_binary_to_constant (LSHIFT_EXPR
, niter_type
,
1106 build_int_cst (niter_type
, 1), bits
);
1107 s
= fold_binary_to_constant (RSHIFT_EXPR
, niter_type
, s
, bits
);
1109 if (!exit_must_be_taken
)
1111 /* If we cannot assume that the exit is taken eventually, record the
1112 assumptions for divisibility of c. */
1113 assumption
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, c
, d
);
1114 assumption
= fold_build2 (EQ_EXPR
, boolean_type_node
,
1115 assumption
, build_int_cst (niter_type
, 0));
1116 if (!integer_nonzerop (assumption
))
1117 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1118 niter
->assumptions
, assumption
);
1121 c
= fold_build2 (EXACT_DIV_EXPR
, niter_type
, c
, d
);
1122 tmp
= fold_build2 (MULT_EXPR
, niter_type
, c
, inverse (s
, bound
));
1123 niter
->niter
= fold_build2 (BIT_AND_EXPR
, niter_type
, tmp
, bound
);
1127 /* Checks whether we can determine the final value of the control variable
1128 of the loop with ending condition IV0 < IV1 (computed in TYPE).
1129 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
1130 of the step. The assumptions necessary to ensure that the computation
1131 of the final value does not overflow are recorded in NITER. If we
1132 find the final value, we adjust DELTA and return TRUE. Otherwise
1133 we return false. BNDS bounds the value of IV1->base - IV0->base,
1134 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
1135 true if we know that the exit must be taken eventually. */
1138 number_of_iterations_lt_to_ne (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
1139 struct tree_niter_desc
*niter
,
1140 tree
*delta
, tree step
,
1141 bool exit_must_be_taken
, bounds
*bnds
)
1143 tree niter_type
= TREE_TYPE (step
);
1144 tree mod
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, *delta
, step
);
1147 tree assumption
= boolean_true_node
, bound
, noloop
;
1148 bool ret
= false, fv_comp_no_overflow
;
1150 if (POINTER_TYPE_P (type
))
1153 if (TREE_CODE (mod
) != INTEGER_CST
)
1155 if (integer_nonzerop (mod
))
1156 mod
= fold_build2 (MINUS_EXPR
, niter_type
, step
, mod
);
1157 tmod
= fold_convert (type1
, mod
);
1160 wi::to_mpz (mod
, mmod
, UNSIGNED
);
1161 mpz_neg (mmod
, mmod
);
1163 /* If the induction variable does not overflow and the exit is taken,
1164 then the computation of the final value does not overflow. This is
1165 also obviously the case if the new final value is equal to the
1166 current one. Finally, we postulate this for pointer type variables,
1167 as the code cannot rely on the object to that the pointer points being
1168 placed at the end of the address space (and more pragmatically,
1169 TYPE_{MIN,MAX}_VALUE is not defined for pointers). */
1170 if (integer_zerop (mod
) || POINTER_TYPE_P (type
))
1171 fv_comp_no_overflow
= true;
1172 else if (!exit_must_be_taken
)
1173 fv_comp_no_overflow
= false;
1175 fv_comp_no_overflow
=
1176 (iv0
->no_overflow
&& integer_nonzerop (iv0
->step
))
1177 || (iv1
->no_overflow
&& integer_nonzerop (iv1
->step
));
1179 if (integer_nonzerop (iv0
->step
))
1181 /* The final value of the iv is iv1->base + MOD, assuming that this
1182 computation does not overflow, and that
1183 iv0->base <= iv1->base + MOD. */
1184 if (!fv_comp_no_overflow
)
1186 bound
= fold_build2 (MINUS_EXPR
, type1
,
1187 TYPE_MAX_VALUE (type1
), tmod
);
1188 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
1190 if (integer_zerop (assumption
))
1193 if (mpz_cmp (mmod
, bnds
->below
) < 0)
1194 noloop
= boolean_false_node
;
1195 else if (POINTER_TYPE_P (type
))
1196 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
1198 fold_build_pointer_plus (iv1
->base
, tmod
));
1200 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
1202 fold_build2 (PLUS_EXPR
, type1
,
1207 /* The final value of the iv is iv0->base - MOD, assuming that this
1208 computation does not overflow, and that
1209 iv0->base - MOD <= iv1->base. */
1210 if (!fv_comp_no_overflow
)
1212 bound
= fold_build2 (PLUS_EXPR
, type1
,
1213 TYPE_MIN_VALUE (type1
), tmod
);
1214 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
1216 if (integer_zerop (assumption
))
1219 if (mpz_cmp (mmod
, bnds
->below
) < 0)
1220 noloop
= boolean_false_node
;
1221 else if (POINTER_TYPE_P (type
))
1222 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
1223 fold_build_pointer_plus (iv0
->base
,
1224 fold_build1 (NEGATE_EXPR
,
1228 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
1229 fold_build2 (MINUS_EXPR
, type1
,
1234 if (!integer_nonzerop (assumption
))
1235 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1238 if (!integer_zerop (noloop
))
1239 niter
->may_be_zero
= fold_build2 (TRUTH_OR_EXPR
, boolean_type_node
,
1242 bounds_add (bnds
, wi::to_widest (mod
), type
);
1243 *delta
= fold_build2 (PLUS_EXPR
, niter_type
, *delta
, mod
);
1251 /* Add assertions to NITER that ensure that the control variable of the loop
1252 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
1253 are TYPE. Returns false if we can prove that there is an overflow, true
1254 otherwise. STEP is the absolute value of the step. */
1257 assert_no_overflow_lt (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
1258 struct tree_niter_desc
*niter
, tree step
)
1260 tree bound
, d
, assumption
, diff
;
1261 tree niter_type
= TREE_TYPE (step
);
1263 if (integer_nonzerop (iv0
->step
))
1265 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
1266 if (iv0
->no_overflow
)
1269 /* If iv0->base is a constant, we can determine the last value before
1270 overflow precisely; otherwise we conservatively assume
1273 if (TREE_CODE (iv0
->base
) == INTEGER_CST
)
1275 d
= fold_build2 (MINUS_EXPR
, niter_type
,
1276 fold_convert (niter_type
, TYPE_MAX_VALUE (type
)),
1277 fold_convert (niter_type
, iv0
->base
));
1278 diff
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, d
, step
);
1281 diff
= fold_build2 (MINUS_EXPR
, niter_type
, step
,
1282 build_int_cst (niter_type
, 1));
1283 bound
= fold_build2 (MINUS_EXPR
, type
,
1284 TYPE_MAX_VALUE (type
), fold_convert (type
, diff
));
1285 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
1290 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
1291 if (iv1
->no_overflow
)
1294 if (TREE_CODE (iv1
->base
) == INTEGER_CST
)
1296 d
= fold_build2 (MINUS_EXPR
, niter_type
,
1297 fold_convert (niter_type
, iv1
->base
),
1298 fold_convert (niter_type
, TYPE_MIN_VALUE (type
)));
1299 diff
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, d
, step
);
1302 diff
= fold_build2 (MINUS_EXPR
, niter_type
, step
,
1303 build_int_cst (niter_type
, 1));
1304 bound
= fold_build2 (PLUS_EXPR
, type
,
1305 TYPE_MIN_VALUE (type
), fold_convert (type
, diff
));
1306 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
1310 if (integer_zerop (assumption
))
1312 if (!integer_nonzerop (assumption
))
1313 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1314 niter
->assumptions
, assumption
);
1316 iv0
->no_overflow
= true;
1317 iv1
->no_overflow
= true;
1321 /* Add an assumption to NITER that a loop whose ending condition
1322 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
1323 bounds the value of IV1->base - IV0->base. */
1326 assert_loop_rolls_lt (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
1327 struct tree_niter_desc
*niter
, bounds
*bnds
)
1329 tree assumption
= boolean_true_node
, bound
, diff
;
1330 tree mbz
, mbzl
, mbzr
, type1
;
1331 bool rolls_p
, no_overflow_p
;
1335 /* We are going to compute the number of iterations as
1336 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
1337 variant of TYPE. This formula only works if
1339 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
1341 (where MAX is the maximum value of the unsigned variant of TYPE, and
1342 the computations in this formula are performed in full precision,
1343 i.e., without overflows).
1345 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1346 we have a condition of the form iv0->base - step < iv1->base before the loop,
1347 and for loops iv0->base < iv1->base - step * i the condition
1348 iv0->base < iv1->base + step, due to loop header copying, which enable us
1349 to prove the lower bound.
1351 The upper bound is more complicated. Unless the expressions for initial
1352 and final value themselves contain enough information, we usually cannot
1353 derive it from the context. */
1355 /* First check whether the answer does not follow from the bounds we gathered
1357 if (integer_nonzerop (iv0
->step
))
1358 dstep
= wi::to_widest (iv0
->step
);
1361 dstep
= wi::sext (wi::to_widest (iv1
->step
), TYPE_PRECISION (type
));
1366 wi::to_mpz (dstep
, mstep
, UNSIGNED
);
1367 mpz_neg (mstep
, mstep
);
1368 mpz_add_ui (mstep
, mstep
, 1);
1370 rolls_p
= mpz_cmp (mstep
, bnds
->below
) <= 0;
1373 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type
)), max
, UNSIGNED
);
1374 mpz_add (max
, max
, mstep
);
1375 no_overflow_p
= (mpz_cmp (bnds
->up
, max
) <= 0
1376 /* For pointers, only values lying inside a single object
1377 can be compared or manipulated by pointer arithmetics.
1378 Gcc in general does not allow or handle objects larger
1379 than half of the address space, hence the upper bound
1380 is satisfied for pointers. */
1381 || POINTER_TYPE_P (type
));
1385 if (rolls_p
&& no_overflow_p
)
1389 if (POINTER_TYPE_P (type
))
1392 /* Now the hard part; we must formulate the assumption(s) as expressions, and
1393 we must be careful not to introduce overflow. */
1395 if (integer_nonzerop (iv0
->step
))
1397 diff
= fold_build2 (MINUS_EXPR
, type1
,
1398 iv0
->step
, build_int_cst (type1
, 1));
1400 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
1401 0 address never belongs to any object, we can assume this for
1403 if (!POINTER_TYPE_P (type
))
1405 bound
= fold_build2 (PLUS_EXPR
, type1
,
1406 TYPE_MIN_VALUE (type
), diff
);
1407 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
1411 /* And then we can compute iv0->base - diff, and compare it with
1413 mbzl
= fold_build2 (MINUS_EXPR
, type1
,
1414 fold_convert (type1
, iv0
->base
), diff
);
1415 mbzr
= fold_convert (type1
, iv1
->base
);
1419 diff
= fold_build2 (PLUS_EXPR
, type1
,
1420 iv1
->step
, build_int_cst (type1
, 1));
1422 if (!POINTER_TYPE_P (type
))
1424 bound
= fold_build2 (PLUS_EXPR
, type1
,
1425 TYPE_MAX_VALUE (type
), diff
);
1426 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
1430 mbzl
= fold_convert (type1
, iv0
->base
);
1431 mbzr
= fold_build2 (MINUS_EXPR
, type1
,
1432 fold_convert (type1
, iv1
->base
), diff
);
1435 if (!integer_nonzerop (assumption
))
1436 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1437 niter
->assumptions
, assumption
);
1440 mbz
= fold_build2 (GT_EXPR
, boolean_type_node
, mbzl
, mbzr
);
1441 niter
->may_be_zero
= fold_build2 (TRUTH_OR_EXPR
, boolean_type_node
,
1442 niter
->may_be_zero
, mbz
);
1446 /* Determines number of iterations of loop whose ending condition
1447 is IV0 < IV1. TYPE is the type of the iv. The number of
1448 iterations is stored to NITER. BNDS bounds the difference
1449 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1450 that the exit must be taken eventually. */
1453 number_of_iterations_lt (struct loop
*loop
, tree type
, affine_iv
*iv0
,
1454 affine_iv
*iv1
, struct tree_niter_desc
*niter
,
1455 bool exit_must_be_taken
, bounds
*bnds
)
1457 tree niter_type
= unsigned_type_for (type
);
1458 tree delta
, step
, s
;
1461 if (integer_nonzerop (iv0
->step
))
1463 niter
->control
= *iv0
;
1464 niter
->cmp
= LT_EXPR
;
1465 niter
->bound
= iv1
->base
;
1469 niter
->control
= *iv1
;
1470 niter
->cmp
= GT_EXPR
;
1471 niter
->bound
= iv0
->base
;
1474 delta
= fold_build2 (MINUS_EXPR
, niter_type
,
1475 fold_convert (niter_type
, iv1
->base
),
1476 fold_convert (niter_type
, iv0
->base
));
1478 /* First handle the special case that the step is +-1. */
1479 if ((integer_onep (iv0
->step
) && integer_zerop (iv1
->step
))
1480 || (integer_all_onesp (iv1
->step
) && integer_zerop (iv0
->step
)))
1482 /* for (i = iv0->base; i < iv1->base; i++)
1486 for (i = iv1->base; i > iv0->base; i--).
1488 In both cases # of iterations is iv1->base - iv0->base, assuming that
1489 iv1->base >= iv0->base.
1491 First try to derive a lower bound on the value of
1492 iv1->base - iv0->base, computed in full precision. If the difference
1493 is nonnegative, we are done, otherwise we must record the
1496 if (mpz_sgn (bnds
->below
) < 0)
1497 niter
->may_be_zero
= fold_build2 (LT_EXPR
, boolean_type_node
,
1498 iv1
->base
, iv0
->base
);
1499 niter
->niter
= delta
;
1500 niter
->max
= widest_int::from (wi::from_mpz (niter_type
, bnds
->up
, false),
1501 TYPE_SIGN (niter_type
));
1502 niter
->control
.no_overflow
= true;
1506 if (integer_nonzerop (iv0
->step
))
1507 step
= fold_convert (niter_type
, iv0
->step
);
1509 step
= fold_convert (niter_type
,
1510 fold_build1 (NEGATE_EXPR
, type
, iv1
->step
));
1512 /* If we can determine the final value of the control iv exactly, we can
1513 transform the condition to != comparison. In particular, this will be
1514 the case if DELTA is constant. */
1515 if (number_of_iterations_lt_to_ne (type
, iv0
, iv1
, niter
, &delta
, step
,
1516 exit_must_be_taken
, bnds
))
1520 zps
.base
= build_int_cst (niter_type
, 0);
1522 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1523 zps does not overflow. */
1524 zps
.no_overflow
= true;
1526 return number_of_iterations_ne (loop
, type
, &zps
,
1527 delta
, niter
, true, bnds
);
1530 /* Make sure that the control iv does not overflow. */
1531 if (!assert_no_overflow_lt (type
, iv0
, iv1
, niter
, step
))
1534 /* We determine the number of iterations as (delta + step - 1) / step. For
1535 this to work, we must know that iv1->base >= iv0->base - step + 1,
1536 otherwise the loop does not roll. */
1537 assert_loop_rolls_lt (type
, iv0
, iv1
, niter
, bnds
);
1539 s
= fold_build2 (MINUS_EXPR
, niter_type
,
1540 step
, build_int_cst (niter_type
, 1));
1541 delta
= fold_build2 (PLUS_EXPR
, niter_type
, delta
, s
);
1542 niter
->niter
= fold_build2 (FLOOR_DIV_EXPR
, niter_type
, delta
, step
);
1546 wi::to_mpz (step
, mstep
, UNSIGNED
);
1547 mpz_add (tmp
, bnds
->up
, mstep
);
1548 mpz_sub_ui (tmp
, tmp
, 1);
1549 mpz_fdiv_q (tmp
, tmp
, mstep
);
1550 niter
->max
= widest_int::from (wi::from_mpz (niter_type
, tmp
, false),
1551 TYPE_SIGN (niter_type
));
1558 /* Determines number of iterations of loop whose ending condition
1559 is IV0 <= IV1. TYPE is the type of the iv. The number of
1560 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1561 we know that this condition must eventually become false (we derived this
1562 earlier, and possibly set NITER->assumptions to make sure this
1563 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1566 number_of_iterations_le (struct loop
*loop
, tree type
, affine_iv
*iv0
,
1567 affine_iv
*iv1
, struct tree_niter_desc
*niter
,
1568 bool exit_must_be_taken
, bounds
*bnds
)
1572 if (POINTER_TYPE_P (type
))
1575 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1576 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1577 value of the type. This we must know anyway, since if it is
1578 equal to this value, the loop rolls forever. We do not check
1579 this condition for pointer type ivs, as the code cannot rely on
1580 the object to that the pointer points being placed at the end of
1581 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1582 not defined for pointers). */
1584 if (!exit_must_be_taken
&& !POINTER_TYPE_P (type
))
1586 if (integer_nonzerop (iv0
->step
))
1587 assumption
= fold_build2 (NE_EXPR
, boolean_type_node
,
1588 iv1
->base
, TYPE_MAX_VALUE (type
));
1590 assumption
= fold_build2 (NE_EXPR
, boolean_type_node
,
1591 iv0
->base
, TYPE_MIN_VALUE (type
));
1593 if (integer_zerop (assumption
))
1595 if (!integer_nonzerop (assumption
))
1596 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1597 niter
->assumptions
, assumption
);
1600 if (integer_nonzerop (iv0
->step
))
1602 if (POINTER_TYPE_P (type
))
1603 iv1
->base
= fold_build_pointer_plus_hwi (iv1
->base
, 1);
1605 iv1
->base
= fold_build2 (PLUS_EXPR
, type1
, iv1
->base
,
1606 build_int_cst (type1
, 1));
1608 else if (POINTER_TYPE_P (type
))
1609 iv0
->base
= fold_build_pointer_plus_hwi (iv0
->base
, -1);
1611 iv0
->base
= fold_build2 (MINUS_EXPR
, type1
,
1612 iv0
->base
, build_int_cst (type1
, 1));
1614 bounds_add (bnds
, 1, type1
);
1616 return number_of_iterations_lt (loop
, type
, iv0
, iv1
, niter
, exit_must_be_taken
,
1620 /* Dumps description of affine induction variable IV to FILE. */
1623 dump_affine_iv (FILE *file
, affine_iv
*iv
)
1625 if (!integer_zerop (iv
->step
))
1626 fprintf (file
, "[");
1628 print_generic_expr (dump_file
, iv
->base
, TDF_SLIM
);
1630 if (!integer_zerop (iv
->step
))
1632 fprintf (file
, ", + , ");
1633 print_generic_expr (dump_file
, iv
->step
, TDF_SLIM
);
1634 fprintf (file
, "]%s", iv
->no_overflow
? "(no_overflow)" : "");
1638 /* Determine the number of iterations according to condition (for staying
1639 inside loop) which compares two induction variables using comparison
1640 operator CODE. The induction variable on left side of the comparison
1641 is IV0, the right-hand side is IV1. Both induction variables must have
1642 type TYPE, which must be an integer or pointer type. The steps of the
1643 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1645 LOOP is the loop whose number of iterations we are determining.
1647 ONLY_EXIT is true if we are sure this is the only way the loop could be
1648 exited (including possibly non-returning function calls, exceptions, etc.)
1649 -- in this case we can use the information whether the control induction
1650 variables can overflow or not in a more efficient way.
1652 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1654 The results (number of iterations and assumptions as described in
1655 comments at struct tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1656 Returns false if it fails to determine number of iterations, true if it
1657 was determined (possibly with some assumptions). */
1660 number_of_iterations_cond (struct loop
*loop
,
1661 tree type
, affine_iv
*iv0
, enum tree_code code
,
1662 affine_iv
*iv1
, struct tree_niter_desc
*niter
,
1663 bool only_exit
, bool every_iteration
)
1665 bool exit_must_be_taken
= false, ret
;
1668 /* If the test is not executed every iteration, wrapping may make the test
1670 TODO: the overflow case can be still used as unreliable estimate of upper
1671 bound. But we have no API to pass it down to number of iterations code
1672 and, at present, it will not use it anyway. */
1673 if (!every_iteration
1674 && (!iv0
->no_overflow
|| !iv1
->no_overflow
1675 || code
== NE_EXPR
|| code
== EQ_EXPR
))
1678 /* The meaning of these assumptions is this:
1680 then the rest of information does not have to be valid
1681 if may_be_zero then the loop does not roll, even if
1683 niter
->assumptions
= boolean_true_node
;
1684 niter
->may_be_zero
= boolean_false_node
;
1685 niter
->niter
= NULL_TREE
;
1687 niter
->bound
= NULL_TREE
;
1688 niter
->cmp
= ERROR_MARK
;
1690 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1691 the control variable is on lhs. */
1692 if (code
== GE_EXPR
|| code
== GT_EXPR
1693 || (code
== NE_EXPR
&& integer_zerop (iv0
->step
)))
1695 std::swap (iv0
, iv1
);
1696 code
= swap_tree_comparison (code
);
1699 if (POINTER_TYPE_P (type
))
1701 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1702 to the same object. If they do, the control variable cannot wrap
1703 (as wrap around the bounds of memory will never return a pointer
1704 that would be guaranteed to point to the same object, even if we
1705 avoid undefined behavior by casting to size_t and back). */
1706 iv0
->no_overflow
= true;
1707 iv1
->no_overflow
= true;
1710 /* If the control induction variable does not overflow and the only exit
1711 from the loop is the one that we analyze, we know it must be taken
1715 if (!integer_zerop (iv0
->step
) && iv0
->no_overflow
)
1716 exit_must_be_taken
= true;
1717 else if (!integer_zerop (iv1
->step
) && iv1
->no_overflow
)
1718 exit_must_be_taken
= true;
1721 /* We can handle cases which neither of the sides of the comparison is
1724 {iv0.base, iv0.step} cmp_code {iv1.base, iv1.step}
1726 {iv0.base, iv0.step - iv1.step} cmp_code {iv1.base, 0}
1728 provided that either below condition is satisfied:
1730 a) the test is NE_EXPR;
1731 b) iv0.step - iv1.step is integer and iv0/iv1 don't overflow.
1733 This rarely occurs in practice, but it is simple enough to manage. */
1734 if (!integer_zerop (iv0
->step
) && !integer_zerop (iv1
->step
))
1736 tree step_type
= POINTER_TYPE_P (type
) ? sizetype
: type
;
1737 tree step
= fold_binary_to_constant (MINUS_EXPR
, step_type
,
1738 iv0
->step
, iv1
->step
);
1740 /* No need to check sign of the new step since below code takes care
1743 && (TREE_CODE (step
) != INTEGER_CST
1744 || !iv0
->no_overflow
|| !iv1
->no_overflow
))
1748 if (!POINTER_TYPE_P (type
))
1749 iv0
->no_overflow
= false;
1751 iv1
->step
= build_int_cst (step_type
, 0);
1752 iv1
->no_overflow
= true;
1755 /* If the result of the comparison is a constant, the loop is weird. More
1756 precise handling would be possible, but the situation is not common enough
1757 to waste time on it. */
1758 if (integer_zerop (iv0
->step
) && integer_zerop (iv1
->step
))
1761 /* Ignore loops of while (i-- < 10) type. */
1762 if (code
!= NE_EXPR
)
1764 if (iv0
->step
&& tree_int_cst_sign_bit (iv0
->step
))
1767 if (!integer_zerop (iv1
->step
) && !tree_int_cst_sign_bit (iv1
->step
))
1771 /* If the loop exits immediately, there is nothing to do. */
1772 tree tem
= fold_binary (code
, boolean_type_node
, iv0
->base
, iv1
->base
);
1773 if (tem
&& integer_zerop (tem
))
1775 niter
->niter
= build_int_cst (unsigned_type_for (type
), 0);
1780 /* OK, now we know we have a senseful loop. Handle several cases, depending
1781 on what comparison operator is used. */
1782 bound_difference (loop
, iv1
->base
, iv0
->base
, &bnds
);
1784 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1787 "Analyzing # of iterations of loop %d\n", loop
->num
);
1789 fprintf (dump_file
, " exit condition ");
1790 dump_affine_iv (dump_file
, iv0
);
1791 fprintf (dump_file
, " %s ",
1792 code
== NE_EXPR
? "!="
1793 : code
== LT_EXPR
? "<"
1795 dump_affine_iv (dump_file
, iv1
);
1796 fprintf (dump_file
, "\n");
1798 fprintf (dump_file
, " bounds on difference of bases: ");
1799 mpz_out_str (dump_file
, 10, bnds
.below
);
1800 fprintf (dump_file
, " ... ");
1801 mpz_out_str (dump_file
, 10, bnds
.up
);
1802 fprintf (dump_file
, "\n");
1808 gcc_assert (integer_zerop (iv1
->step
));
1809 ret
= number_of_iterations_ne (loop
, type
, iv0
, iv1
->base
, niter
,
1810 exit_must_be_taken
, &bnds
);
1814 ret
= number_of_iterations_lt (loop
, type
, iv0
, iv1
, niter
,
1815 exit_must_be_taken
, &bnds
);
1819 ret
= number_of_iterations_le (loop
, type
, iv0
, iv1
, niter
,
1820 exit_must_be_taken
, &bnds
);
1827 mpz_clear (bnds
.up
);
1828 mpz_clear (bnds
.below
);
1830 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1834 fprintf (dump_file
, " result:\n");
1835 if (!integer_nonzerop (niter
->assumptions
))
1837 fprintf (dump_file
, " under assumptions ");
1838 print_generic_expr (dump_file
, niter
->assumptions
, TDF_SLIM
);
1839 fprintf (dump_file
, "\n");
1842 if (!integer_zerop (niter
->may_be_zero
))
1844 fprintf (dump_file
, " zero if ");
1845 print_generic_expr (dump_file
, niter
->may_be_zero
, TDF_SLIM
);
1846 fprintf (dump_file
, "\n");
1849 fprintf (dump_file
, " # of iterations ");
1850 print_generic_expr (dump_file
, niter
->niter
, TDF_SLIM
);
1851 fprintf (dump_file
, ", bounded by ");
1852 print_decu (niter
->max
, dump_file
);
1853 fprintf (dump_file
, "\n");
1856 fprintf (dump_file
, " failed\n\n");
1861 /* Substitute NEW for OLD in EXPR and fold the result. */
1864 simplify_replace_tree (tree expr
, tree old
, tree new_tree
)
1867 tree ret
= NULL_TREE
, e
, se
;
1872 /* Do not bother to replace constants. */
1873 if (CONSTANT_CLASS_P (old
))
1877 || operand_equal_p (expr
, old
, 0))
1878 return unshare_expr (new_tree
);
1883 n
= TREE_OPERAND_LENGTH (expr
);
1884 for (i
= 0; i
< n
; i
++)
1886 e
= TREE_OPERAND (expr
, i
);
1887 se
= simplify_replace_tree (e
, old
, new_tree
);
1892 ret
= copy_node (expr
);
1894 TREE_OPERAND (ret
, i
) = se
;
1897 return (ret
? fold (ret
) : expr
);
1900 /* Expand definitions of ssa names in EXPR as long as they are simple
1901 enough, and return the new expression. If STOP is specified, stop
1902 expanding if EXPR equals to it. */
1905 expand_simple_operations (tree expr
, tree stop
)
1908 tree ret
= NULL_TREE
, e
, ee
, e1
;
1909 enum tree_code code
;
1912 if (expr
== NULL_TREE
)
1915 if (is_gimple_min_invariant (expr
))
1918 code
= TREE_CODE (expr
);
1919 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code
)))
1921 n
= TREE_OPERAND_LENGTH (expr
);
1922 for (i
= 0; i
< n
; i
++)
1924 e
= TREE_OPERAND (expr
, i
);
1925 ee
= expand_simple_operations (e
, stop
);
1930 ret
= copy_node (expr
);
1932 TREE_OPERAND (ret
, i
) = ee
;
1938 fold_defer_overflow_warnings ();
1940 fold_undefer_and_ignore_overflow_warnings ();
1944 /* Stop if it's not ssa name or the one we don't want to expand. */
1945 if (TREE_CODE (expr
) != SSA_NAME
|| expr
== stop
)
1948 stmt
= SSA_NAME_DEF_STMT (expr
);
1949 if (gimple_code (stmt
) == GIMPLE_PHI
)
1951 basic_block src
, dest
;
1953 if (gimple_phi_num_args (stmt
) != 1)
1955 e
= PHI_ARG_DEF (stmt
, 0);
1957 /* Avoid propagating through loop exit phi nodes, which
1958 could break loop-closed SSA form restrictions. */
1959 dest
= gimple_bb (stmt
);
1960 src
= single_pred (dest
);
1961 if (TREE_CODE (e
) == SSA_NAME
1962 && src
->loop_father
!= dest
->loop_father
)
1965 return expand_simple_operations (e
, stop
);
1967 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1970 /* Avoid expanding to expressions that contain SSA names that need
1971 to take part in abnormal coalescing. */
1973 FOR_EACH_SSA_TREE_OPERAND (e
, stmt
, iter
, SSA_OP_USE
)
1974 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e
))
1977 e
= gimple_assign_rhs1 (stmt
);
1978 code
= gimple_assign_rhs_code (stmt
);
1979 if (get_gimple_rhs_class (code
) == GIMPLE_SINGLE_RHS
)
1981 if (is_gimple_min_invariant (e
))
1984 if (code
== SSA_NAME
)
1985 return expand_simple_operations (e
, stop
);
1986 else if (code
== ADDR_EXPR
)
1988 HOST_WIDE_INT offset
;
1989 tree base
= get_addr_base_and_unit_offset (TREE_OPERAND (e
, 0),
1992 && TREE_CODE (base
) == MEM_REF
)
1994 ee
= expand_simple_operations (TREE_OPERAND (base
, 0), stop
);
1995 return fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (expr
), ee
,
1996 wide_int_to_tree (sizetype
,
1997 mem_ref_offset (base
)
2008 /* Casts are simple. */
2009 ee
= expand_simple_operations (e
, stop
);
2010 return fold_build1 (code
, TREE_TYPE (expr
), ee
);
2014 if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (expr
))
2015 && TYPE_OVERFLOW_TRAPS (TREE_TYPE (expr
)))
2018 case POINTER_PLUS_EXPR
:
2019 /* And increments and decrements by a constant are simple. */
2020 e1
= gimple_assign_rhs2 (stmt
);
2021 if (!is_gimple_min_invariant (e1
))
2024 ee
= expand_simple_operations (e
, stop
);
2025 return fold_build2 (code
, TREE_TYPE (expr
), ee
, e1
);
2032 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2033 expression (or EXPR unchanged, if no simplification was possible). */
2036 tree_simplify_using_condition_1 (tree cond
, tree expr
)
2039 tree e
, e0
, e1
, e2
, notcond
;
2040 enum tree_code code
= TREE_CODE (expr
);
2042 if (code
== INTEGER_CST
)
2045 if (code
== TRUTH_OR_EXPR
2046 || code
== TRUTH_AND_EXPR
2047 || code
== COND_EXPR
)
2051 e0
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 0));
2052 if (TREE_OPERAND (expr
, 0) != e0
)
2055 e1
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 1));
2056 if (TREE_OPERAND (expr
, 1) != e1
)
2059 if (code
== COND_EXPR
)
2061 e2
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 2));
2062 if (TREE_OPERAND (expr
, 2) != e2
)
2070 if (code
== COND_EXPR
)
2071 expr
= fold_build3 (code
, boolean_type_node
, e0
, e1
, e2
);
2073 expr
= fold_build2 (code
, boolean_type_node
, e0
, e1
);
2079 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
2080 propagation, and vice versa. Fold does not handle this, since it is
2081 considered too expensive. */
2082 if (TREE_CODE (cond
) == EQ_EXPR
)
2084 e0
= TREE_OPERAND (cond
, 0);
2085 e1
= TREE_OPERAND (cond
, 1);
2087 /* We know that e0 == e1. Check whether we cannot simplify expr
2089 e
= simplify_replace_tree (expr
, e0
, e1
);
2090 if (integer_zerop (e
) || integer_nonzerop (e
))
2093 e
= simplify_replace_tree (expr
, e1
, e0
);
2094 if (integer_zerop (e
) || integer_nonzerop (e
))
2097 if (TREE_CODE (expr
) == EQ_EXPR
)
2099 e0
= TREE_OPERAND (expr
, 0);
2100 e1
= TREE_OPERAND (expr
, 1);
2102 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
2103 e
= simplify_replace_tree (cond
, e0
, e1
);
2104 if (integer_zerop (e
))
2106 e
= simplify_replace_tree (cond
, e1
, e0
);
2107 if (integer_zerop (e
))
2110 if (TREE_CODE (expr
) == NE_EXPR
)
2112 e0
= TREE_OPERAND (expr
, 0);
2113 e1
= TREE_OPERAND (expr
, 1);
2115 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
2116 e
= simplify_replace_tree (cond
, e0
, e1
);
2117 if (integer_zerop (e
))
2118 return boolean_true_node
;
2119 e
= simplify_replace_tree (cond
, e1
, e0
);
2120 if (integer_zerop (e
))
2121 return boolean_true_node
;
2124 /* Check whether COND ==> EXPR. */
2125 notcond
= invert_truthvalue (cond
);
2126 e
= fold_binary (TRUTH_OR_EXPR
, boolean_type_node
, notcond
, expr
);
2127 if (e
&& integer_nonzerop (e
))
2130 /* Check whether COND ==> not EXPR. */
2131 e
= fold_binary (TRUTH_AND_EXPR
, boolean_type_node
, cond
, expr
);
2132 if (e
&& integer_zerop (e
))
2138 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2139 expression (or EXPR unchanged, if no simplification was possible).
2140 Wrapper around tree_simplify_using_condition_1 that ensures that chains
2141 of simple operations in definitions of ssa names in COND are expanded,
2142 so that things like casts or incrementing the value of the bound before
2143 the loop do not cause us to fail. */
2146 tree_simplify_using_condition (tree cond
, tree expr
)
2148 cond
= expand_simple_operations (cond
);
2150 return tree_simplify_using_condition_1 (cond
, expr
);
2153 /* Tries to simplify EXPR using the conditions on entry to LOOP.
2154 Returns the simplified expression (or EXPR unchanged, if no
2155 simplification was possible). */
2158 simplify_using_initial_conditions (struct loop
*loop
, tree expr
)
2163 tree cond
, expanded
, backup
;
2166 if (TREE_CODE (expr
) == INTEGER_CST
)
2169 backup
= expanded
= expand_simple_operations (expr
);
2171 /* Limit walking the dominators to avoid quadraticness in
2172 the number of BBs times the number of loops in degenerate
2174 for (bb
= loop
->header
;
2175 bb
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
) && cnt
< MAX_DOMINATORS_TO_WALK
;
2176 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
))
2178 if (!single_pred_p (bb
))
2180 e
= single_pred_edge (bb
);
2182 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
2185 stmt
= last_stmt (e
->src
);
2186 cond
= fold_build2 (gimple_cond_code (stmt
),
2188 gimple_cond_lhs (stmt
),
2189 gimple_cond_rhs (stmt
));
2190 if (e
->flags
& EDGE_FALSE_VALUE
)
2191 cond
= invert_truthvalue (cond
);
2192 expanded
= tree_simplify_using_condition (cond
, expanded
);
2193 /* Break if EXPR is simplified to const values. */
2195 && (integer_zerop (expanded
) || integer_nonzerop (expanded
)))
2201 /* Return the original expression if no simplification is done. */
2202 return operand_equal_p (backup
, expanded
, 0) ? expr
: expanded
;
2205 /* Tries to simplify EXPR using the evolutions of the loop invariants
2206 in the superloops of LOOP. Returns the simplified expression
2207 (or EXPR unchanged, if no simplification was possible). */
2210 simplify_using_outer_evolutions (struct loop
*loop
, tree expr
)
2212 enum tree_code code
= TREE_CODE (expr
);
2216 if (is_gimple_min_invariant (expr
))
2219 if (code
== TRUTH_OR_EXPR
2220 || code
== TRUTH_AND_EXPR
2221 || code
== COND_EXPR
)
2225 e0
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 0));
2226 if (TREE_OPERAND (expr
, 0) != e0
)
2229 e1
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 1));
2230 if (TREE_OPERAND (expr
, 1) != e1
)
2233 if (code
== COND_EXPR
)
2235 e2
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 2));
2236 if (TREE_OPERAND (expr
, 2) != e2
)
2244 if (code
== COND_EXPR
)
2245 expr
= fold_build3 (code
, boolean_type_node
, e0
, e1
, e2
);
2247 expr
= fold_build2 (code
, boolean_type_node
, e0
, e1
);
2253 e
= instantiate_parameters (loop
, expr
);
2254 if (is_gimple_min_invariant (e
))
2260 /* Returns true if EXIT is the only possible exit from LOOP. */
2263 loop_only_exit_p (const struct loop
*loop
, const_edge exit
)
2266 gimple_stmt_iterator bsi
;
2269 if (exit
!= single_exit (loop
))
2272 body
= get_loop_body (loop
);
2273 for (i
= 0; i
< loop
->num_nodes
; i
++)
2275 for (bsi
= gsi_start_bb (body
[i
]); !gsi_end_p (bsi
); gsi_next (&bsi
))
2276 if (stmt_can_terminate_bb_p (gsi_stmt (bsi
)))
2287 /* Stores description of number of iterations of LOOP derived from
2288 EXIT (an exit edge of the LOOP) in NITER. Returns true if some useful
2289 information could be derived (and fields of NITER have meaning described
2290 in comments at struct tree_niter_desc declaration), false otherwise.
2291 When EVERY_ITERATION is true, only tests that are known to be executed
2292 every iteration are considered (i.e. only test that alone bounds the loop).
2293 If AT_STMT is not NULL, this function stores LOOP's condition statement in
2294 it when returning true. */
2297 number_of_iterations_exit_assumptions (struct loop
*loop
, edge exit
,
2298 struct tree_niter_desc
*niter
,
2299 gcond
**at_stmt
, bool every_iteration
)
2305 enum tree_code code
;
2309 /* Nothing to analyze if the loop is known to be infinite. */
2310 if (loop_constraint_set_p (loop
, LOOP_C_INFINITE
))
2313 safe
= dominated_by_p (CDI_DOMINATORS
, loop
->latch
, exit
->src
);
2315 if (every_iteration
&& !safe
)
2318 niter
->assumptions
= boolean_false_node
;
2319 niter
->control
.base
= NULL_TREE
;
2320 niter
->control
.step
= NULL_TREE
;
2321 niter
->control
.no_overflow
= false;
2322 last
= last_stmt (exit
->src
);
2325 stmt
= dyn_cast
<gcond
*> (last
);
2329 /* We want the condition for staying inside loop. */
2330 code
= gimple_cond_code (stmt
);
2331 if (exit
->flags
& EDGE_TRUE_VALUE
)
2332 code
= invert_tree_comparison (code
, false);
2347 op0
= gimple_cond_lhs (stmt
);
2348 op1
= gimple_cond_rhs (stmt
);
2349 type
= TREE_TYPE (op0
);
2351 if (TREE_CODE (type
) != INTEGER_TYPE
2352 && !POINTER_TYPE_P (type
))
2355 tree iv0_niters
= NULL_TREE
;
2356 if (!simple_iv_with_niters (loop
, loop_containing_stmt (stmt
),
2357 op0
, &iv0
, &iv0_niters
, false))
2359 tree iv1_niters
= NULL_TREE
;
2360 if (!simple_iv_with_niters (loop
, loop_containing_stmt (stmt
),
2361 op1
, &iv1
, &iv1_niters
, false))
2363 /* Give up on complicated case. */
2364 if (iv0_niters
&& iv1_niters
)
2367 /* We don't want to see undefined signed overflow warnings while
2368 computing the number of iterations. */
2369 fold_defer_overflow_warnings ();
2371 iv0
.base
= expand_simple_operations (iv0
.base
);
2372 iv1
.base
= expand_simple_operations (iv1
.base
);
2373 if (!number_of_iterations_cond (loop
, type
, &iv0
, code
, &iv1
, niter
,
2374 loop_only_exit_p (loop
, exit
), safe
))
2376 fold_undefer_and_ignore_overflow_warnings ();
2380 /* Incorporate additional assumption implied by control iv. */
2381 tree iv_niters
= iv0_niters
? iv0_niters
: iv1_niters
;
2384 tree assumption
= fold_build2 (LE_EXPR
, boolean_type_node
, niter
->niter
,
2385 fold_convert (TREE_TYPE (niter
->niter
),
2388 if (!integer_nonzerop (assumption
))
2389 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
2390 niter
->assumptions
, assumption
);
2392 /* Refine upper bound if possible. */
2393 if (TREE_CODE (iv_niters
) == INTEGER_CST
2394 && niter
->max
> wi::to_widest (iv_niters
))
2395 niter
->max
= wi::to_widest (iv_niters
);
2398 /* There is no assumptions if the loop is known to be finite. */
2399 if (!integer_zerop (niter
->assumptions
)
2400 && loop_constraint_set_p (loop
, LOOP_C_FINITE
))
2401 niter
->assumptions
= boolean_true_node
;
2405 niter
->assumptions
= simplify_using_outer_evolutions (loop
,
2406 niter
->assumptions
);
2407 niter
->may_be_zero
= simplify_using_outer_evolutions (loop
,
2408 niter
->may_be_zero
);
2409 niter
->niter
= simplify_using_outer_evolutions (loop
, niter
->niter
);
2413 = simplify_using_initial_conditions (loop
,
2414 niter
->assumptions
);
2416 = simplify_using_initial_conditions (loop
,
2417 niter
->may_be_zero
);
2419 fold_undefer_and_ignore_overflow_warnings ();
2421 /* If NITER has simplified into a constant, update MAX. */
2422 if (TREE_CODE (niter
->niter
) == INTEGER_CST
)
2423 niter
->max
= wi::to_widest (niter
->niter
);
2428 return (!integer_zerop (niter
->assumptions
));
2431 /* Like number_of_iterations_exit_assumptions, but return TRUE only if
2432 the niter information holds unconditionally. */
2435 number_of_iterations_exit (struct loop
*loop
, edge exit
,
2436 struct tree_niter_desc
*niter
,
2437 bool warn
, bool every_iteration
)
2440 if (!number_of_iterations_exit_assumptions (loop
, exit
, niter
,
2441 &stmt
, every_iteration
))
2444 if (integer_nonzerop (niter
->assumptions
))
2448 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, gimple_location_safe (stmt
),
2449 "missed loop optimization: niters analysis ends up "
2450 "with assumptions.\n");
2455 /* Try to determine the number of iterations of LOOP. If we succeed,
2456 expression giving number of iterations is returned and *EXIT is
2457 set to the edge from that the information is obtained. Otherwise
2458 chrec_dont_know is returned. */
2461 find_loop_niter (struct loop
*loop
, edge
*exit
)
2464 vec
<edge
> exits
= get_loop_exit_edges (loop
);
2466 tree niter
= NULL_TREE
, aniter
;
2467 struct tree_niter_desc desc
;
2470 FOR_EACH_VEC_ELT (exits
, i
, ex
)
2472 if (!number_of_iterations_exit (loop
, ex
, &desc
, false))
2475 if (integer_nonzerop (desc
.may_be_zero
))
2477 /* We exit in the first iteration through this exit.
2478 We won't find anything better. */
2479 niter
= build_int_cst (unsigned_type_node
, 0);
2484 if (!integer_zerop (desc
.may_be_zero
))
2487 aniter
= desc
.niter
;
2491 /* Nothing recorded yet. */
2497 /* Prefer constants, the lower the better. */
2498 if (TREE_CODE (aniter
) != INTEGER_CST
)
2501 if (TREE_CODE (niter
) != INTEGER_CST
)
2508 if (tree_int_cst_lt (aniter
, niter
))
2517 return niter
? niter
: chrec_dont_know
;
2520 /* Return true if loop is known to have bounded number of iterations. */
2523 finite_loop_p (struct loop
*loop
)
2528 flags
= flags_from_decl_or_type (current_function_decl
);
2529 if ((flags
& (ECF_CONST
|ECF_PURE
)) && !(flags
& ECF_LOOPING_CONST_OR_PURE
))
2531 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2532 fprintf (dump_file
, "Found loop %i to be finite: it is within pure or const function.\n",
2537 if (loop
->any_upper_bound
2538 || max_loop_iterations (loop
, &nit
))
2540 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2541 fprintf (dump_file
, "Found loop %i to be finite: upper bound found.\n",
2550 Analysis of a number of iterations of a loop by a brute-force evaluation.
2554 /* Bound on the number of iterations we try to evaluate. */
2556 #define MAX_ITERATIONS_TO_TRACK \
2557 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
2559 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2560 result by a chain of operations such that all but exactly one of their
2561 operands are constants. */
2564 chain_of_csts_start (struct loop
*loop
, tree x
)
2566 gimple
*stmt
= SSA_NAME_DEF_STMT (x
);
2568 basic_block bb
= gimple_bb (stmt
);
2569 enum tree_code code
;
2572 || !flow_bb_inside_loop_p (loop
, bb
))
2575 if (gimple_code (stmt
) == GIMPLE_PHI
)
2577 if (bb
== loop
->header
)
2578 return as_a
<gphi
*> (stmt
);
2583 if (gimple_code (stmt
) != GIMPLE_ASSIGN
2584 || gimple_assign_rhs_class (stmt
) == GIMPLE_TERNARY_RHS
)
2587 code
= gimple_assign_rhs_code (stmt
);
2588 if (gimple_references_memory_p (stmt
)
2589 || TREE_CODE_CLASS (code
) == tcc_reference
2590 || (code
== ADDR_EXPR
2591 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt
))))
2594 use
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_USE
);
2595 if (use
== NULL_TREE
)
2598 return chain_of_csts_start (loop
, use
);
2601 /* Determines whether the expression X is derived from a result of a phi node
2602 in header of LOOP such that
2604 * the derivation of X consists only from operations with constants
2605 * the initial value of the phi node is constant
2606 * the value of the phi node in the next iteration can be derived from the
2607 value in the current iteration by a chain of operations with constants,
2608 or is also a constant
2610 If such phi node exists, it is returned, otherwise NULL is returned. */
2613 get_base_for (struct loop
*loop
, tree x
)
2618 if (is_gimple_min_invariant (x
))
2621 phi
= chain_of_csts_start (loop
, x
);
2625 init
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
2626 next
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
2628 if (!is_gimple_min_invariant (init
))
2631 if (TREE_CODE (next
) == SSA_NAME
2632 && chain_of_csts_start (loop
, next
) != phi
)
2638 /* Given an expression X, then
2640 * if X is NULL_TREE, we return the constant BASE.
2641 * if X is a constant, we return the constant X.
2642 * otherwise X is a SSA name, whose value in the considered loop is derived
2643 by a chain of operations with constant from a result of a phi node in
2644 the header of the loop. Then we return value of X when the value of the
2645 result of this phi node is given by the constant BASE. */
2648 get_val_for (tree x
, tree base
)
2652 gcc_checking_assert (is_gimple_min_invariant (base
));
2656 else if (is_gimple_min_invariant (x
))
2659 stmt
= SSA_NAME_DEF_STMT (x
);
2660 if (gimple_code (stmt
) == GIMPLE_PHI
)
2663 gcc_checking_assert (is_gimple_assign (stmt
));
2665 /* STMT must be either an assignment of a single SSA name or an
2666 expression involving an SSA name and a constant. Try to fold that
2667 expression using the value for the SSA name. */
2668 if (gimple_assign_ssa_name_copy_p (stmt
))
2669 return get_val_for (gimple_assign_rhs1 (stmt
), base
);
2670 else if (gimple_assign_rhs_class (stmt
) == GIMPLE_UNARY_RHS
2671 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
)
2672 return fold_build1 (gimple_assign_rhs_code (stmt
),
2673 gimple_expr_type (stmt
),
2674 get_val_for (gimple_assign_rhs1 (stmt
), base
));
2675 else if (gimple_assign_rhs_class (stmt
) == GIMPLE_BINARY_RHS
)
2677 tree rhs1
= gimple_assign_rhs1 (stmt
);
2678 tree rhs2
= gimple_assign_rhs2 (stmt
);
2679 if (TREE_CODE (rhs1
) == SSA_NAME
)
2680 rhs1
= get_val_for (rhs1
, base
);
2681 else if (TREE_CODE (rhs2
) == SSA_NAME
)
2682 rhs2
= get_val_for (rhs2
, base
);
2685 return fold_build2 (gimple_assign_rhs_code (stmt
),
2686 gimple_expr_type (stmt
), rhs1
, rhs2
);
2693 /* Tries to count the number of iterations of LOOP till it exits by EXIT
2694 by brute force -- i.e. by determining the value of the operands of the
2695 condition at EXIT in first few iterations of the loop (assuming that
2696 these values are constant) and determining the first one in that the
2697 condition is not satisfied. Returns the constant giving the number
2698 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
2701 loop_niter_by_eval (struct loop
*loop
, edge exit
)
2704 tree op
[2], val
[2], next
[2], aval
[2];
2710 cond
= last_stmt (exit
->src
);
2711 if (!cond
|| gimple_code (cond
) != GIMPLE_COND
)
2712 return chrec_dont_know
;
2714 cmp
= gimple_cond_code (cond
);
2715 if (exit
->flags
& EDGE_TRUE_VALUE
)
2716 cmp
= invert_tree_comparison (cmp
, false);
2726 op
[0] = gimple_cond_lhs (cond
);
2727 op
[1] = gimple_cond_rhs (cond
);
2731 return chrec_dont_know
;
2734 for (j
= 0; j
< 2; j
++)
2736 if (is_gimple_min_invariant (op
[j
]))
2739 next
[j
] = NULL_TREE
;
2744 phi
= get_base_for (loop
, op
[j
]);
2746 return chrec_dont_know
;
2747 val
[j
] = PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
2748 next
[j
] = PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
2752 /* Don't issue signed overflow warnings. */
2753 fold_defer_overflow_warnings ();
2755 for (i
= 0; i
< MAX_ITERATIONS_TO_TRACK
; i
++)
2757 for (j
= 0; j
< 2; j
++)
2758 aval
[j
] = get_val_for (op
[j
], val
[j
]);
2760 acnd
= fold_binary (cmp
, boolean_type_node
, aval
[0], aval
[1]);
2761 if (acnd
&& integer_zerop (acnd
))
2763 fold_undefer_and_ignore_overflow_warnings ();
2764 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2766 "Proved that loop %d iterates %d times using brute force.\n",
2768 return build_int_cst (unsigned_type_node
, i
);
2771 for (j
= 0; j
< 2; j
++)
2774 val
[j
] = get_val_for (next
[j
], val
[j
]);
2775 if (!is_gimple_min_invariant (val
[j
]))
2777 fold_undefer_and_ignore_overflow_warnings ();
2778 return chrec_dont_know
;
2782 /* If the next iteration would use the same base values
2783 as the current one, there is no point looping further,
2784 all following iterations will be the same as this one. */
2785 if (val
[0] == aval
[0] && val
[1] == aval
[1])
2789 fold_undefer_and_ignore_overflow_warnings ();
2791 return chrec_dont_know
;
2794 /* Finds the exit of the LOOP by that the loop exits after a constant
2795 number of iterations and stores the exit edge to *EXIT. The constant
2796 giving the number of iterations of LOOP is returned. The number of
2797 iterations is determined using loop_niter_by_eval (i.e. by brute force
2798 evaluation). If we are unable to find the exit for that loop_niter_by_eval
2799 determines the number of iterations, chrec_dont_know is returned. */
2802 find_loop_niter_by_eval (struct loop
*loop
, edge
*exit
)
2805 vec
<edge
> exits
= get_loop_exit_edges (loop
);
2807 tree niter
= NULL_TREE
, aniter
;
2811 /* Loops with multiple exits are expensive to handle and less important. */
2812 if (!flag_expensive_optimizations
2813 && exits
.length () > 1)
2816 return chrec_dont_know
;
2819 FOR_EACH_VEC_ELT (exits
, i
, ex
)
2821 if (!just_once_each_iteration_p (loop
, ex
->src
))
2824 aniter
= loop_niter_by_eval (loop
, ex
);
2825 if (chrec_contains_undetermined (aniter
))
2829 && !tree_int_cst_lt (aniter
, niter
))
2837 return niter
? niter
: chrec_dont_know
;
2842 Analysis of upper bounds on number of iterations of a loop.
2846 static widest_int
derive_constant_upper_bound_ops (tree
, tree
,
2847 enum tree_code
, tree
);
2849 /* Returns a constant upper bound on the value of the right-hand side of
2850 an assignment statement STMT. */
2853 derive_constant_upper_bound_assign (gimple
*stmt
)
2855 enum tree_code code
= gimple_assign_rhs_code (stmt
);
2856 tree op0
= gimple_assign_rhs1 (stmt
);
2857 tree op1
= gimple_assign_rhs2 (stmt
);
2859 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt
)),
2863 /* Returns a constant upper bound on the value of expression VAL. VAL
2864 is considered to be unsigned. If its type is signed, its value must
2868 derive_constant_upper_bound (tree val
)
2870 enum tree_code code
;
2873 extract_ops_from_tree (val
, &code
, &op0
, &op1
, &op2
);
2874 return derive_constant_upper_bound_ops (TREE_TYPE (val
), op0
, code
, op1
);
2877 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
2878 whose type is TYPE. The expression is considered to be unsigned. If
2879 its type is signed, its value must be nonnegative. */
2882 derive_constant_upper_bound_ops (tree type
, tree op0
,
2883 enum tree_code code
, tree op1
)
2886 widest_int bnd
, max
, cst
;
2889 if (INTEGRAL_TYPE_P (type
))
2890 maxt
= TYPE_MAX_VALUE (type
);
2892 maxt
= upper_bound_in_type (type
, type
);
2894 max
= wi::to_widest (maxt
);
2899 return wi::to_widest (op0
);
2902 subtype
= TREE_TYPE (op0
);
2903 if (!TYPE_UNSIGNED (subtype
)
2904 /* If TYPE is also signed, the fact that VAL is nonnegative implies
2905 that OP0 is nonnegative. */
2906 && TYPE_UNSIGNED (type
)
2907 && !tree_expr_nonnegative_p (op0
))
2909 /* If we cannot prove that the casted expression is nonnegative,
2910 we cannot establish more useful upper bound than the precision
2911 of the type gives us. */
2915 /* We now know that op0 is an nonnegative value. Try deriving an upper
2917 bnd
= derive_constant_upper_bound (op0
);
2919 /* If the bound does not fit in TYPE, max. value of TYPE could be
2921 if (wi::ltu_p (max
, bnd
))
2927 case POINTER_PLUS_EXPR
:
2929 if (TREE_CODE (op1
) != INTEGER_CST
2930 || !tree_expr_nonnegative_p (op0
))
2933 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
2934 choose the most logical way how to treat this constant regardless
2935 of the signedness of the type. */
2936 cst
= wi::sext (wi::to_widest (op1
), TYPE_PRECISION (type
));
2937 if (code
!= MINUS_EXPR
)
2940 bnd
= derive_constant_upper_bound (op0
);
2942 if (wi::neg_p (cst
))
2945 /* Avoid CST == 0x80000... */
2946 if (wi::neg_p (cst
))
2949 /* OP0 + CST. We need to check that
2950 BND <= MAX (type) - CST. */
2952 widest_int mmax
= max
- cst
;
2953 if (wi::leu_p (bnd
, mmax
))
2960 /* OP0 - CST, where CST >= 0.
2962 If TYPE is signed, we have already verified that OP0 >= 0, and we
2963 know that the result is nonnegative. This implies that
2966 If TYPE is unsigned, we must additionally know that OP0 >= CST,
2967 otherwise the operation underflows.
2970 /* This should only happen if the type is unsigned; however, for
2971 buggy programs that use overflowing signed arithmetics even with
2972 -fno-wrapv, this condition may also be true for signed values. */
2973 if (wi::ltu_p (bnd
, cst
))
2976 if (TYPE_UNSIGNED (type
))
2978 tree tem
= fold_binary (GE_EXPR
, boolean_type_node
, op0
,
2979 wide_int_to_tree (type
, cst
));
2980 if (!tem
|| integer_nonzerop (tem
))
2989 case FLOOR_DIV_EXPR
:
2990 case EXACT_DIV_EXPR
:
2991 if (TREE_CODE (op1
) != INTEGER_CST
2992 || tree_int_cst_sign_bit (op1
))
2995 bnd
= derive_constant_upper_bound (op0
);
2996 return wi::udiv_floor (bnd
, wi::to_widest (op1
));
2999 if (TREE_CODE (op1
) != INTEGER_CST
3000 || tree_int_cst_sign_bit (op1
))
3002 return wi::to_widest (op1
);
3005 stmt
= SSA_NAME_DEF_STMT (op0
);
3006 if (gimple_code (stmt
) != GIMPLE_ASSIGN
3007 || gimple_assign_lhs (stmt
) != op0
)
3009 return derive_constant_upper_bound_assign (stmt
);
3016 /* Emit a -Waggressive-loop-optimizations warning if needed. */
3019 do_warn_aggressive_loop_optimizations (struct loop
*loop
,
3020 widest_int i_bound
, gimple
*stmt
)
3022 /* Don't warn if the loop doesn't have known constant bound. */
3023 if (!loop
->nb_iterations
3024 || TREE_CODE (loop
->nb_iterations
) != INTEGER_CST
3025 || !warn_aggressive_loop_optimizations
3026 /* To avoid warning multiple times for the same loop,
3027 only start warning when we preserve loops. */
3028 || (cfun
->curr_properties
& PROP_loops
) == 0
3029 /* Only warn once per loop. */
3030 || loop
->warned_aggressive_loop_optimizations
3031 /* Only warn if undefined behavior gives us lower estimate than the
3032 known constant bound. */
3033 || wi::cmpu (i_bound
, wi::to_widest (loop
->nb_iterations
)) >= 0
3034 /* And undefined behavior happens unconditionally. */
3035 || !dominated_by_p (CDI_DOMINATORS
, loop
->latch
, gimple_bb (stmt
)))
3038 edge e
= single_exit (loop
);
3042 gimple
*estmt
= last_stmt (e
->src
);
3043 char buf
[WIDE_INT_PRINT_BUFFER_SIZE
];
3044 print_dec (i_bound
, buf
, TYPE_UNSIGNED (TREE_TYPE (loop
->nb_iterations
))
3045 ? UNSIGNED
: SIGNED
);
3046 if (warning_at (gimple_location (stmt
), OPT_Waggressive_loop_optimizations
,
3047 "iteration %s invokes undefined behavior", buf
))
3048 inform (gimple_location (estmt
), "within this loop");
3049 loop
->warned_aggressive_loop_optimizations
= true;
3052 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
3053 is true if the loop is exited immediately after STMT, and this exit
3054 is taken at last when the STMT is executed BOUND + 1 times.
3055 REALISTIC is true if BOUND is expected to be close to the real number
3056 of iterations. UPPER is true if we are sure the loop iterates at most
3057 BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
3060 record_estimate (struct loop
*loop
, tree bound
, const widest_int
&i_bound
,
3061 gimple
*at_stmt
, bool is_exit
, bool realistic
, bool upper
)
3065 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3067 fprintf (dump_file
, "Statement %s", is_exit
? "(exit)" : "");
3068 print_gimple_stmt (dump_file
, at_stmt
, 0, TDF_SLIM
);
3069 fprintf (dump_file
, " is %sexecuted at most ",
3070 upper
? "" : "probably ");
3071 print_generic_expr (dump_file
, bound
, TDF_SLIM
);
3072 fprintf (dump_file
, " (bounded by ");
3073 print_decu (i_bound
, dump_file
);
3074 fprintf (dump_file
, ") + 1 times in loop %d.\n", loop
->num
);
3077 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
3078 real number of iterations. */
3079 if (TREE_CODE (bound
) != INTEGER_CST
)
3082 gcc_checking_assert (i_bound
== wi::to_widest (bound
));
3084 /* If we have a guaranteed upper bound, record it in the appropriate
3085 list, unless this is an !is_exit bound (i.e. undefined behavior in
3086 at_stmt) in a loop with known constant number of iterations. */
3089 || loop
->nb_iterations
== NULL_TREE
3090 || TREE_CODE (loop
->nb_iterations
) != INTEGER_CST
))
3092 struct nb_iter_bound
*elt
= ggc_alloc
<nb_iter_bound
> ();
3094 elt
->bound
= i_bound
;
3095 elt
->stmt
= at_stmt
;
3096 elt
->is_exit
= is_exit
;
3097 elt
->next
= loop
->bounds
;
3101 /* If statement is executed on every path to the loop latch, we can directly
3102 infer the upper bound on the # of iterations of the loop. */
3103 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, gimple_bb (at_stmt
)))
3106 /* Update the number of iteration estimates according to the bound.
3107 If at_stmt is an exit then the loop latch is executed at most BOUND times,
3108 otherwise it can be executed BOUND + 1 times. We will lower the estimate
3109 later if such statement must be executed on last iteration */
3114 widest_int new_i_bound
= i_bound
+ delta
;
3116 /* If an overflow occurred, ignore the result. */
3117 if (wi::ltu_p (new_i_bound
, delta
))
3120 if (upper
&& !is_exit
)
3121 do_warn_aggressive_loop_optimizations (loop
, new_i_bound
, at_stmt
);
3122 record_niter_bound (loop
, new_i_bound
, realistic
, upper
);
3125 /* Records the control iv analyzed in NITER for LOOP if the iv is valid
3126 and doesn't overflow. */
3129 record_control_iv (struct loop
*loop
, struct tree_niter_desc
*niter
)
3131 struct control_iv
*iv
;
3133 if (!niter
->control
.base
|| !niter
->control
.step
)
3136 if (!integer_onep (niter
->assumptions
) || !niter
->control
.no_overflow
)
3139 iv
= ggc_alloc
<control_iv
> ();
3140 iv
->base
= niter
->control
.base
;
3141 iv
->step
= niter
->control
.step
;
3142 iv
->next
= loop
->control_ivs
;
3143 loop
->control_ivs
= iv
;
3148 /* This function returns TRUE if below conditions are satisfied:
3149 1) VAR is SSA variable.
3150 2) VAR is an IV:{base, step} in its defining loop.
3151 3) IV doesn't overflow.
3152 4) Both base and step are integer constants.
3153 5) Base is the MIN/MAX value depends on IS_MIN.
3154 Store value of base to INIT correspondingly. */
3157 get_cst_init_from_scev (tree var
, wide_int
*init
, bool is_min
)
3159 if (TREE_CODE (var
) != SSA_NAME
)
3162 gimple
*def_stmt
= SSA_NAME_DEF_STMT (var
);
3163 struct loop
*loop
= loop_containing_stmt (def_stmt
);
3169 if (!simple_iv (loop
, loop
, var
, &iv
, false))
3172 if (!iv
.no_overflow
)
3175 if (TREE_CODE (iv
.base
) != INTEGER_CST
|| TREE_CODE (iv
.step
) != INTEGER_CST
)
3178 if (is_min
== tree_int_cst_sign_bit (iv
.step
))
3185 /* Record the estimate on number of iterations of LOOP based on the fact that
3186 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
3187 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
3188 estimated number of iterations is expected to be close to the real one.
3189 UPPER is true if we are sure the induction variable does not wrap. */
3192 record_nonwrapping_iv (struct loop
*loop
, tree base
, tree step
, gimple
*stmt
,
3193 tree low
, tree high
, bool realistic
, bool upper
)
3195 tree niter_bound
, extreme
, delta
;
3196 tree type
= TREE_TYPE (base
), unsigned_type
;
3197 tree orig_base
= base
;
3199 if (TREE_CODE (step
) != INTEGER_CST
|| integer_zerop (step
))
3202 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3204 fprintf (dump_file
, "Induction variable (");
3205 print_generic_expr (dump_file
, TREE_TYPE (base
), TDF_SLIM
);
3206 fprintf (dump_file
, ") ");
3207 print_generic_expr (dump_file
, base
, TDF_SLIM
);
3208 fprintf (dump_file
, " + ");
3209 print_generic_expr (dump_file
, step
, TDF_SLIM
);
3210 fprintf (dump_file
, " * iteration does not wrap in statement ");
3211 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
3212 fprintf (dump_file
, " in loop %d.\n", loop
->num
);
3215 unsigned_type
= unsigned_type_for (type
);
3216 base
= fold_convert (unsigned_type
, base
);
3217 step
= fold_convert (unsigned_type
, step
);
3219 if (tree_int_cst_sign_bit (step
))
3222 extreme
= fold_convert (unsigned_type
, low
);
3223 if (TREE_CODE (orig_base
) == SSA_NAME
3224 && TREE_CODE (high
) == INTEGER_CST
3225 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base
))
3226 && (get_range_info (orig_base
, &min
, &max
) == VR_RANGE
3227 || get_cst_init_from_scev (orig_base
, &max
, false))
3228 && wi::gts_p (high
, max
))
3229 base
= wide_int_to_tree (unsigned_type
, max
);
3230 else if (TREE_CODE (base
) != INTEGER_CST
3231 && dominated_by_p (CDI_DOMINATORS
,
3232 loop
->latch
, gimple_bb (stmt
)))
3233 base
= fold_convert (unsigned_type
, high
);
3234 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, base
, extreme
);
3235 step
= fold_build1 (NEGATE_EXPR
, unsigned_type
, step
);
3240 extreme
= fold_convert (unsigned_type
, high
);
3241 if (TREE_CODE (orig_base
) == SSA_NAME
3242 && TREE_CODE (low
) == INTEGER_CST
3243 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base
))
3244 && (get_range_info (orig_base
, &min
, &max
) == VR_RANGE
3245 || get_cst_init_from_scev (orig_base
, &min
, true))
3246 && wi::gts_p (min
, low
))
3247 base
= wide_int_to_tree (unsigned_type
, min
);
3248 else if (TREE_CODE (base
) != INTEGER_CST
3249 && dominated_by_p (CDI_DOMINATORS
,
3250 loop
->latch
, gimple_bb (stmt
)))
3251 base
= fold_convert (unsigned_type
, low
);
3252 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, extreme
, base
);
3255 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
3256 would get out of the range. */
3257 niter_bound
= fold_build2 (FLOOR_DIV_EXPR
, unsigned_type
, delta
, step
);
3258 widest_int max
= derive_constant_upper_bound (niter_bound
);
3259 record_estimate (loop
, niter_bound
, max
, stmt
, false, realistic
, upper
);
3262 /* Determine information about number of iterations a LOOP from the index
3263 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
3264 guaranteed to be executed in every iteration of LOOP. Callback for
3274 idx_infer_loop_bounds (tree base
, tree
*idx
, void *dta
)
3276 struct ilb_data
*data
= (struct ilb_data
*) dta
;
3277 tree ev
, init
, step
;
3278 tree low
, high
, type
, next
;
3279 bool sign
, upper
= true, at_end
= false;
3280 struct loop
*loop
= data
->loop
;
3282 if (TREE_CODE (base
) != ARRAY_REF
)
3285 /* For arrays at the end of the structure, we are not guaranteed that they
3286 do not really extend over their declared size. However, for arrays of
3287 size greater than one, this is unlikely to be intended. */
3288 if (array_at_struct_end_p (base
))
3294 struct loop
*dloop
= loop_containing_stmt (data
->stmt
);
3298 ev
= analyze_scalar_evolution (dloop
, *idx
);
3299 ev
= instantiate_parameters (loop
, ev
);
3300 init
= initial_condition (ev
);
3301 step
= evolution_part_in_loop_num (ev
, loop
->num
);
3305 || TREE_CODE (step
) != INTEGER_CST
3306 || integer_zerop (step
)
3307 || tree_contains_chrecs (init
, NULL
)
3308 || chrec_contains_symbols_defined_in_loop (init
, loop
->num
))
3311 low
= array_ref_low_bound (base
);
3312 high
= array_ref_up_bound (base
);
3314 /* The case of nonconstant bounds could be handled, but it would be
3316 if (TREE_CODE (low
) != INTEGER_CST
3318 || TREE_CODE (high
) != INTEGER_CST
)
3320 sign
= tree_int_cst_sign_bit (step
);
3321 type
= TREE_TYPE (step
);
3323 /* The array of length 1 at the end of a structure most likely extends
3324 beyond its bounds. */
3326 && operand_equal_p (low
, high
, 0))
3329 /* In case the relevant bound of the array does not fit in type, or
3330 it does, but bound + step (in type) still belongs into the range of the
3331 array, the index may wrap and still stay within the range of the array
3332 (consider e.g. if the array is indexed by the full range of
3335 To make things simpler, we require both bounds to fit into type, although
3336 there are cases where this would not be strictly necessary. */
3337 if (!int_fits_type_p (high
, type
)
3338 || !int_fits_type_p (low
, type
))
3340 low
= fold_convert (type
, low
);
3341 high
= fold_convert (type
, high
);
3344 next
= fold_binary (PLUS_EXPR
, type
, low
, step
);
3346 next
= fold_binary (PLUS_EXPR
, type
, high
, step
);
3348 if (tree_int_cst_compare (low
, next
) <= 0
3349 && tree_int_cst_compare (next
, high
) <= 0)
3352 /* If access is not executed on every iteration, we must ensure that overlow
3353 may not make the access valid later. */
3354 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, gimple_bb (data
->stmt
))
3355 && scev_probably_wraps_p (NULL_TREE
,
3356 initial_condition_in_loop_num (ev
, loop
->num
),
3357 step
, data
->stmt
, loop
, true))
3360 record_nonwrapping_iv (loop
, init
, step
, data
->stmt
, low
, high
, false, upper
);
3364 /* Determine information about number of iterations a LOOP from the bounds
3365 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
3366 STMT is guaranteed to be executed in every iteration of LOOP.*/
3369 infer_loop_bounds_from_ref (struct loop
*loop
, gimple
*stmt
, tree ref
)
3371 struct ilb_data data
;
3375 for_each_index (&ref
, idx_infer_loop_bounds
, &data
);
3378 /* Determine information about number of iterations of a LOOP from the way
3379 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
3380 executed in every iteration of LOOP. */
3383 infer_loop_bounds_from_array (struct loop
*loop
, gimple
*stmt
)
3385 if (is_gimple_assign (stmt
))
3387 tree op0
= gimple_assign_lhs (stmt
);
3388 tree op1
= gimple_assign_rhs1 (stmt
);
3390 /* For each memory access, analyze its access function
3391 and record a bound on the loop iteration domain. */
3392 if (REFERENCE_CLASS_P (op0
))
3393 infer_loop_bounds_from_ref (loop
, stmt
, op0
);
3395 if (REFERENCE_CLASS_P (op1
))
3396 infer_loop_bounds_from_ref (loop
, stmt
, op1
);
3398 else if (is_gimple_call (stmt
))
3401 unsigned i
, n
= gimple_call_num_args (stmt
);
3403 lhs
= gimple_call_lhs (stmt
);
3404 if (lhs
&& REFERENCE_CLASS_P (lhs
))
3405 infer_loop_bounds_from_ref (loop
, stmt
, lhs
);
3407 for (i
= 0; i
< n
; i
++)
3409 arg
= gimple_call_arg (stmt
, i
);
3410 if (REFERENCE_CLASS_P (arg
))
3411 infer_loop_bounds_from_ref (loop
, stmt
, arg
);
3416 /* Determine information about number of iterations of a LOOP from the fact
3417 that pointer arithmetics in STMT does not overflow. */
3420 infer_loop_bounds_from_pointer_arith (struct loop
*loop
, gimple
*stmt
)
3422 tree def
, base
, step
, scev
, type
, low
, high
;
3425 if (!is_gimple_assign (stmt
)
3426 || gimple_assign_rhs_code (stmt
) != POINTER_PLUS_EXPR
)
3429 def
= gimple_assign_lhs (stmt
);
3430 if (TREE_CODE (def
) != SSA_NAME
)
3433 type
= TREE_TYPE (def
);
3434 if (!nowrap_type_p (type
))
3437 ptr
= gimple_assign_rhs1 (stmt
);
3438 if (!expr_invariant_in_loop_p (loop
, ptr
))
3441 var
= gimple_assign_rhs2 (stmt
);
3442 if (TYPE_PRECISION (type
) != TYPE_PRECISION (TREE_TYPE (var
)))
3445 scev
= instantiate_parameters (loop
, analyze_scalar_evolution (loop
, def
));
3446 if (chrec_contains_undetermined (scev
))
3449 base
= initial_condition_in_loop_num (scev
, loop
->num
);
3450 step
= evolution_part_in_loop_num (scev
, loop
->num
);
3453 || TREE_CODE (step
) != INTEGER_CST
3454 || tree_contains_chrecs (base
, NULL
)
3455 || chrec_contains_symbols_defined_in_loop (base
, loop
->num
))
3458 low
= lower_bound_in_type (type
, type
);
3459 high
= upper_bound_in_type (type
, type
);
3461 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
3462 produce a NULL pointer. The contrary would mean NULL points to an object,
3463 while NULL is supposed to compare unequal with the address of all objects.
3464 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
3465 NULL pointer since that would mean wrapping, which we assume here not to
3466 happen. So, we can exclude NULL from the valid range of pointer
3468 if (flag_delete_null_pointer_checks
&& int_cst_value (low
) == 0)
3469 low
= build_int_cstu (TREE_TYPE (low
), TYPE_ALIGN_UNIT (TREE_TYPE (type
)));
3471 record_nonwrapping_iv (loop
, base
, step
, stmt
, low
, high
, false, true);
3474 /* Determine information about number of iterations of a LOOP from the fact
3475 that signed arithmetics in STMT does not overflow. */
3478 infer_loop_bounds_from_signedness (struct loop
*loop
, gimple
*stmt
)
3480 tree def
, base
, step
, scev
, type
, low
, high
;
3482 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
3485 def
= gimple_assign_lhs (stmt
);
3487 if (TREE_CODE (def
) != SSA_NAME
)
3490 type
= TREE_TYPE (def
);
3491 if (!INTEGRAL_TYPE_P (type
)
3492 || !TYPE_OVERFLOW_UNDEFINED (type
))
3495 scev
= instantiate_parameters (loop
, analyze_scalar_evolution (loop
, def
));
3496 if (chrec_contains_undetermined (scev
))
3499 base
= initial_condition_in_loop_num (scev
, loop
->num
);
3500 step
= evolution_part_in_loop_num (scev
, loop
->num
);
3503 || TREE_CODE (step
) != INTEGER_CST
3504 || tree_contains_chrecs (base
, NULL
)
3505 || chrec_contains_symbols_defined_in_loop (base
, loop
->num
))
3508 low
= lower_bound_in_type (type
, type
);
3509 high
= upper_bound_in_type (type
, type
);
3511 record_nonwrapping_iv (loop
, base
, step
, stmt
, low
, high
, false, true);
3514 /* The following analyzers are extracting informations on the bounds
3515 of LOOP from the following undefined behaviors:
3517 - data references should not access elements over the statically
3520 - signed variables should not overflow when flag_wrapv is not set.
3524 infer_loop_bounds_from_undefined (struct loop
*loop
)
3528 gimple_stmt_iterator bsi
;
3532 bbs
= get_loop_body (loop
);
3534 for (i
= 0; i
< loop
->num_nodes
; i
++)
3538 /* If BB is not executed in each iteration of the loop, we cannot
3539 use the operations in it to infer reliable upper bound on the
3540 # of iterations of the loop. However, we can use it as a guess.
3541 Reliable guesses come only from array bounds. */
3542 reliable
= dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
);
3544 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
3546 gimple
*stmt
= gsi_stmt (bsi
);
3548 infer_loop_bounds_from_array (loop
, stmt
);
3552 infer_loop_bounds_from_signedness (loop
, stmt
);
3553 infer_loop_bounds_from_pointer_arith (loop
, stmt
);
3562 /* Compare wide ints, callback for qsort. */
3565 wide_int_cmp (const void *p1
, const void *p2
)
3567 const widest_int
*d1
= (const widest_int
*) p1
;
3568 const widest_int
*d2
= (const widest_int
*) p2
;
3569 return wi::cmpu (*d1
, *d2
);
3572 /* Return index of BOUND in BOUNDS array sorted in increasing order.
3573 Lookup by binary search. */
3576 bound_index (vec
<widest_int
> bounds
, const widest_int
&bound
)
3578 unsigned int end
= bounds
.length ();
3579 unsigned int begin
= 0;
3581 /* Find a matching index by means of a binary search. */
3582 while (begin
!= end
)
3584 unsigned int middle
= (begin
+ end
) / 2;
3585 widest_int index
= bounds
[middle
];
3589 else if (wi::ltu_p (index
, bound
))
3597 /* We recorded loop bounds only for statements dominating loop latch (and thus
3598 executed each loop iteration). If there are any bounds on statements not
3599 dominating the loop latch we can improve the estimate by walking the loop
3600 body and seeing if every path from loop header to loop latch contains
3601 some bounded statement. */
3604 discover_iteration_bound_by_body_walk (struct loop
*loop
)
3606 struct nb_iter_bound
*elt
;
3607 auto_vec
<widest_int
> bounds
;
3608 vec
<vec
<basic_block
> > queues
= vNULL
;
3609 vec
<basic_block
> queue
= vNULL
;
3610 ptrdiff_t queue_index
;
3611 ptrdiff_t latch_index
= 0;
3613 /* Discover what bounds may interest us. */
3614 for (elt
= loop
->bounds
; elt
; elt
= elt
->next
)
3616 widest_int bound
= elt
->bound
;
3618 /* Exit terminates loop at given iteration, while non-exits produce undefined
3619 effect on the next iteration. */
3623 /* If an overflow occurred, ignore the result. */
3628 if (!loop
->any_upper_bound
3629 || wi::ltu_p (bound
, loop
->nb_iterations_upper_bound
))
3630 bounds
.safe_push (bound
);
3633 /* Exit early if there is nothing to do. */
3634 if (!bounds
.exists ())
3637 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3638 fprintf (dump_file
, " Trying to walk loop body to reduce the bound.\n");
3640 /* Sort the bounds in decreasing order. */
3641 bounds
.qsort (wide_int_cmp
);
3643 /* For every basic block record the lowest bound that is guaranteed to
3644 terminate the loop. */
3646 hash_map
<basic_block
, ptrdiff_t> bb_bounds
;
3647 for (elt
= loop
->bounds
; elt
; elt
= elt
->next
)
3649 widest_int bound
= elt
->bound
;
3653 /* If an overflow occurred, ignore the result. */
3658 if (!loop
->any_upper_bound
3659 || wi::ltu_p (bound
, loop
->nb_iterations_upper_bound
))
3661 ptrdiff_t index
= bound_index (bounds
, bound
);
3662 ptrdiff_t *entry
= bb_bounds
.get (gimple_bb (elt
->stmt
));
3664 bb_bounds
.put (gimple_bb (elt
->stmt
), index
);
3665 else if ((ptrdiff_t)*entry
> index
)
3670 hash_map
<basic_block
, ptrdiff_t> block_priority
;
3672 /* Perform shortest path discovery loop->header ... loop->latch.
3674 The "distance" is given by the smallest loop bound of basic block
3675 present in the path and we look for path with largest smallest bound
3678 To avoid the need for fibonacci heap on double ints we simply compress
3679 double ints into indexes to BOUNDS array and then represent the queue
3680 as arrays of queues for every index.
3681 Index of BOUNDS.length() means that the execution of given BB has
3682 no bounds determined.
3684 VISITED is a pointer map translating basic block into smallest index
3685 it was inserted into the priority queue with. */
3688 /* Start walk in loop header with index set to infinite bound. */
3689 queue_index
= bounds
.length ();
3690 queues
.safe_grow_cleared (queue_index
+ 1);
3691 queue
.safe_push (loop
->header
);
3692 queues
[queue_index
] = queue
;
3693 block_priority
.put (loop
->header
, queue_index
);
3695 for (; queue_index
>= 0; queue_index
--)
3697 if (latch_index
< queue_index
)
3699 while (queues
[queue_index
].length ())
3702 ptrdiff_t bound_index
= queue_index
;
3706 queue
= queues
[queue_index
];
3709 /* OK, we later inserted the BB with lower priority, skip it. */
3710 if (*block_priority
.get (bb
) > queue_index
)
3713 /* See if we can improve the bound. */
3714 ptrdiff_t *entry
= bb_bounds
.get (bb
);
3715 if (entry
&& *entry
< bound_index
)
3716 bound_index
= *entry
;
3718 /* Insert succesors into the queue, watch for latch edge
3719 and record greatest index we saw. */
3720 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
3722 bool insert
= false;
3724 if (loop_exit_edge_p (loop
, e
))
3727 if (e
== loop_latch_edge (loop
)
3728 && latch_index
< bound_index
)
3729 latch_index
= bound_index
;
3730 else if (!(entry
= block_priority
.get (e
->dest
)))
3733 block_priority
.put (e
->dest
, bound_index
);
3735 else if (*entry
< bound_index
)
3738 *entry
= bound_index
;
3742 queues
[bound_index
].safe_push (e
->dest
);
3746 queues
[queue_index
].release ();
3749 gcc_assert (latch_index
>= 0);
3750 if ((unsigned)latch_index
< bounds
.length ())
3752 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3754 fprintf (dump_file
, "Found better loop bound ");
3755 print_decu (bounds
[latch_index
], dump_file
);
3756 fprintf (dump_file
, "\n");
3758 record_niter_bound (loop
, bounds
[latch_index
], false, true);
3764 /* See if every path cross the loop goes through a statement that is known
3765 to not execute at the last iteration. In that case we can decrese iteration
3769 maybe_lower_iteration_bound (struct loop
*loop
)
3771 hash_set
<gimple
*> *not_executed_last_iteration
= NULL
;
3772 struct nb_iter_bound
*elt
;
3773 bool found_exit
= false;
3774 auto_vec
<basic_block
> queue
;
3777 /* Collect all statements with interesting (i.e. lower than
3778 nb_iterations_upper_bound) bound on them.
3780 TODO: Due to the way record_estimate choose estimates to store, the bounds
3781 will be always nb_iterations_upper_bound-1. We can change this to record
3782 also statements not dominating the loop latch and update the walk bellow
3783 to the shortest path algorithm. */
3784 for (elt
= loop
->bounds
; elt
; elt
= elt
->next
)
3787 && wi::ltu_p (elt
->bound
, loop
->nb_iterations_upper_bound
))
3789 if (!not_executed_last_iteration
)
3790 not_executed_last_iteration
= new hash_set
<gimple
*>;
3791 not_executed_last_iteration
->add (elt
->stmt
);
3794 if (!not_executed_last_iteration
)
3797 /* Start DFS walk in the loop header and see if we can reach the
3798 loop latch or any of the exits (including statements with side
3799 effects that may terminate the loop otherwise) without visiting
3800 any of the statements known to have undefined effect on the last
3802 queue
.safe_push (loop
->header
);
3803 visited
= BITMAP_ALLOC (NULL
);
3804 bitmap_set_bit (visited
, loop
->header
->index
);
3809 basic_block bb
= queue
.pop ();
3810 gimple_stmt_iterator gsi
;
3811 bool stmt_found
= false;
3813 /* Loop for possible exits and statements bounding the execution. */
3814 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3816 gimple
*stmt
= gsi_stmt (gsi
);
3817 if (not_executed_last_iteration
->contains (stmt
))
3822 if (gimple_has_side_effects (stmt
))
3831 /* If no bounding statement is found, continue the walk. */
3837 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
3839 if (loop_exit_edge_p (loop
, e
)
3840 || e
== loop_latch_edge (loop
))
3845 if (bitmap_set_bit (visited
, e
->dest
->index
))
3846 queue
.safe_push (e
->dest
);
3850 while (queue
.length () && !found_exit
);
3852 /* If every path through the loop reach bounding statement before exit,
3853 then we know the last iteration of the loop will have undefined effect
3854 and we can decrease number of iterations. */
3858 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3859 fprintf (dump_file
, "Reducing loop iteration estimate by 1; "
3860 "undefined statement must be executed at the last iteration.\n");
3861 record_niter_bound (loop
, loop
->nb_iterations_upper_bound
- 1,
3865 BITMAP_FREE (visited
);
3866 delete not_executed_last_iteration
;
3869 /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
3870 is true also use estimates derived from undefined behavior. */
3873 estimate_numbers_of_iterations (struct loop
*loop
)
3878 struct tree_niter_desc niter_desc
;
3883 /* Give up if we already have tried to compute an estimation. */
3884 if (loop
->estimate_state
!= EST_NOT_COMPUTED
)
3887 loop
->estimate_state
= EST_AVAILABLE
;
3889 /* If we have a measured profile, use it to estimate the number of
3890 iterations. Normally this is recorded by branch_prob right after
3891 reading the profile. In case we however found a new loop, record the
3894 Explicitly check for profile status so we do not report
3895 wrong prediction hitrates for guessed loop iterations heuristics.
3896 Do not recompute already recorded bounds - we ought to be better on
3897 updating iteration bounds than updating profile in general and thus
3898 recomputing iteration bounds later in the compilation process will just
3899 introduce random roundoff errors. */
3900 if (!loop
->any_estimate
3901 && loop
->header
->count
> 0)
3903 gcov_type nit
= expected_loop_iterations_unbounded (loop
);
3904 bound
= gcov_type_to_wide_int (nit
);
3905 record_niter_bound (loop
, bound
, true, false);
3908 /* Ensure that loop->nb_iterations is computed if possible. If it turns out
3909 to be constant, we avoid undefined behavior implied bounds and instead
3910 diagnose those loops with -Waggressive-loop-optimizations. */
3911 number_of_latch_executions (loop
);
3913 exits
= get_loop_exit_edges (loop
);
3914 likely_exit
= single_likely_exit (loop
);
3915 FOR_EACH_VEC_ELT (exits
, i
, ex
)
3917 if (!number_of_iterations_exit (loop
, ex
, &niter_desc
, false, false))
3920 niter
= niter_desc
.niter
;
3921 type
= TREE_TYPE (niter
);
3922 if (TREE_CODE (niter_desc
.may_be_zero
) != INTEGER_CST
)
3923 niter
= build3 (COND_EXPR
, type
, niter_desc
.may_be_zero
,
3924 build_int_cst (type
, 0),
3926 record_estimate (loop
, niter
, niter_desc
.max
,
3927 last_stmt (ex
->src
),
3928 true, ex
== likely_exit
, true);
3929 record_control_iv (loop
, &niter_desc
);
3933 if (flag_aggressive_loop_optimizations
)
3934 infer_loop_bounds_from_undefined (loop
);
3936 discover_iteration_bound_by_body_walk (loop
);
3938 maybe_lower_iteration_bound (loop
);
3940 /* If we know the exact number of iterations of this loop, try to
3941 not break code with undefined behavior by not recording smaller
3942 maximum number of iterations. */
3943 if (loop
->nb_iterations
3944 && TREE_CODE (loop
->nb_iterations
) == INTEGER_CST
)
3946 loop
->any_upper_bound
= true;
3947 loop
->nb_iterations_upper_bound
= wi::to_widest (loop
->nb_iterations
);
3951 /* Sets NIT to the estimated number of executions of the latch of the
3952 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
3953 large as the number of iterations. If we have no reliable estimate,
3954 the function returns false, otherwise returns true. */
3957 estimated_loop_iterations (struct loop
*loop
, widest_int
*nit
)
3959 /* When SCEV information is available, try to update loop iterations
3960 estimate. Otherwise just return whatever we recorded earlier. */
3961 if (scev_initialized_p ())
3962 estimate_numbers_of_iterations (loop
);
3964 return (get_estimated_loop_iterations (loop
, nit
));
3967 /* Similar to estimated_loop_iterations, but returns the estimate only
3968 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3969 on the number of iterations of LOOP could not be derived, returns -1. */
3972 estimated_loop_iterations_int (struct loop
*loop
)
3975 HOST_WIDE_INT hwi_nit
;
3977 if (!estimated_loop_iterations (loop
, &nit
))
3980 if (!wi::fits_shwi_p (nit
))
3982 hwi_nit
= nit
.to_shwi ();
3984 return hwi_nit
< 0 ? -1 : hwi_nit
;
3988 /* Sets NIT to an upper bound for the maximum number of executions of the
3989 latch of the LOOP. If we have no reliable estimate, the function returns
3990 false, otherwise returns true. */
3993 max_loop_iterations (struct loop
*loop
, widest_int
*nit
)
3995 /* When SCEV information is available, try to update loop iterations
3996 estimate. Otherwise just return whatever we recorded earlier. */
3997 if (scev_initialized_p ())
3998 estimate_numbers_of_iterations (loop
);
4000 return get_max_loop_iterations (loop
, nit
);
4003 /* Similar to max_loop_iterations, but returns the estimate only
4004 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4005 on the number of iterations of LOOP could not be derived, returns -1. */
4008 max_loop_iterations_int (struct loop
*loop
)
4011 HOST_WIDE_INT hwi_nit
;
4013 if (!max_loop_iterations (loop
, &nit
))
4016 if (!wi::fits_shwi_p (nit
))
4018 hwi_nit
= nit
.to_shwi ();
4020 return hwi_nit
< 0 ? -1 : hwi_nit
;
4023 /* Sets NIT to an likely upper bound for the maximum number of executions of the
4024 latch of the LOOP. If we have no reliable estimate, the function returns
4025 false, otherwise returns true. */
4028 likely_max_loop_iterations (struct loop
*loop
, widest_int
*nit
)
4030 /* When SCEV information is available, try to update loop iterations
4031 estimate. Otherwise just return whatever we recorded earlier. */
4032 if (scev_initialized_p ())
4033 estimate_numbers_of_iterations (loop
);
4035 return get_likely_max_loop_iterations (loop
, nit
);
4038 /* Similar to max_loop_iterations, but returns the estimate only
4039 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4040 on the number of iterations of LOOP could not be derived, returns -1. */
4043 likely_max_loop_iterations_int (struct loop
*loop
)
4046 HOST_WIDE_INT hwi_nit
;
4048 if (!likely_max_loop_iterations (loop
, &nit
))
4051 if (!wi::fits_shwi_p (nit
))
4053 hwi_nit
= nit
.to_shwi ();
4055 return hwi_nit
< 0 ? -1 : hwi_nit
;
4058 /* Returns an estimate for the number of executions of statements
4059 in the LOOP. For statements before the loop exit, this exceeds
4060 the number of execution of the latch by one. */
4063 estimated_stmt_executions_int (struct loop
*loop
)
4065 HOST_WIDE_INT nit
= estimated_loop_iterations_int (loop
);
4071 snit
= (HOST_WIDE_INT
) ((unsigned HOST_WIDE_INT
) nit
+ 1);
4073 /* If the computation overflows, return -1. */
4074 return snit
< 0 ? -1 : snit
;
4077 /* Sets NIT to the maximum number of executions of the latch of the
4078 LOOP, plus one. If we have no reliable estimate, the function returns
4079 false, otherwise returns true. */
4082 max_stmt_executions (struct loop
*loop
, widest_int
*nit
)
4084 widest_int nit_minus_one
;
4086 if (!max_loop_iterations (loop
, nit
))
4089 nit_minus_one
= *nit
;
4093 return wi::gtu_p (*nit
, nit_minus_one
);
4096 /* Sets NIT to the estimated maximum number of executions of the latch of the
4097 LOOP, plus one. If we have no likely estimate, the function returns
4098 false, otherwise returns true. */
4101 likely_max_stmt_executions (struct loop
*loop
, widest_int
*nit
)
4103 widest_int nit_minus_one
;
4105 if (!likely_max_loop_iterations (loop
, nit
))
4108 nit_minus_one
= *nit
;
4112 return wi::gtu_p (*nit
, nit_minus_one
);
4115 /* Sets NIT to the estimated number of executions of the latch of the
4116 LOOP, plus one. If we have no reliable estimate, the function returns
4117 false, otherwise returns true. */
4120 estimated_stmt_executions (struct loop
*loop
, widest_int
*nit
)
4122 widest_int nit_minus_one
;
4124 if (!estimated_loop_iterations (loop
, nit
))
4127 nit_minus_one
= *nit
;
4131 return wi::gtu_p (*nit
, nit_minus_one
);
4134 /* Records estimates on numbers of iterations of loops. */
4137 estimate_numbers_of_iterations (function
*fn
)
4141 /* We don't want to issue signed overflow warnings while getting
4142 loop iteration estimates. */
4143 fold_defer_overflow_warnings ();
4145 FOR_EACH_LOOP_FN (fn
, loop
, 0)
4146 estimate_numbers_of_iterations (loop
);
4148 fold_undefer_and_ignore_overflow_warnings ();
4151 /* Returns true if statement S1 dominates statement S2. */
4154 stmt_dominates_stmt_p (gimple
*s1
, gimple
*s2
)
4156 basic_block bb1
= gimple_bb (s1
), bb2
= gimple_bb (s2
);
4164 gimple_stmt_iterator bsi
;
4166 if (gimple_code (s2
) == GIMPLE_PHI
)
4169 if (gimple_code (s1
) == GIMPLE_PHI
)
4172 for (bsi
= gsi_start_bb (bb1
); gsi_stmt (bsi
) != s2
; gsi_next (&bsi
))
4173 if (gsi_stmt (bsi
) == s1
)
4179 return dominated_by_p (CDI_DOMINATORS
, bb2
, bb1
);
4182 /* Returns true when we can prove that the number of executions of
4183 STMT in the loop is at most NITER, according to the bound on
4184 the number of executions of the statement NITER_BOUND->stmt recorded in
4185 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
4187 ??? This code can become quite a CPU hog - we can have many bounds,
4188 and large basic block forcing stmt_dominates_stmt_p to be queried
4189 many times on a large basic blocks, so the whole thing is O(n^2)
4190 for scev_probably_wraps_p invocation (that can be done n times).
4192 It would make more sense (and give better answers) to remember BB
4193 bounds computed by discover_iteration_bound_by_body_walk. */
4196 n_of_executions_at_most (gimple
*stmt
,
4197 struct nb_iter_bound
*niter_bound
,
4200 widest_int bound
= niter_bound
->bound
;
4201 tree nit_type
= TREE_TYPE (niter
), e
;
4204 gcc_assert (TYPE_UNSIGNED (nit_type
));
4206 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
4207 the number of iterations is small. */
4208 if (!wi::fits_to_tree_p (bound
, nit_type
))
4211 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
4212 times. This means that:
4214 -- if NITER_BOUND->is_exit is true, then everything after
4215 it at most NITER_BOUND->bound times.
4217 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
4218 is executed, then NITER_BOUND->stmt is executed as well in the same
4219 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
4221 If we can determine that NITER_BOUND->stmt is always executed
4222 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
4223 We conclude that if both statements belong to the same
4224 basic block and STMT is before NITER_BOUND->stmt and there are no
4225 statements with side effects in between. */
4227 if (niter_bound
->is_exit
)
4229 if (stmt
== niter_bound
->stmt
4230 || !stmt_dominates_stmt_p (niter_bound
->stmt
, stmt
))
4236 if (!stmt_dominates_stmt_p (niter_bound
->stmt
, stmt
))
4238 gimple_stmt_iterator bsi
;
4239 if (gimple_bb (stmt
) != gimple_bb (niter_bound
->stmt
)
4240 || gimple_code (stmt
) == GIMPLE_PHI
4241 || gimple_code (niter_bound
->stmt
) == GIMPLE_PHI
)
4244 /* By stmt_dominates_stmt_p we already know that STMT appears
4245 before NITER_BOUND->STMT. Still need to test that the loop
4246 can not be terinated by a side effect in between. */
4247 for (bsi
= gsi_for_stmt (stmt
); gsi_stmt (bsi
) != niter_bound
->stmt
;
4249 if (gimple_has_side_effects (gsi_stmt (bsi
)))
4253 || !wi::fits_to_tree_p (bound
, nit_type
))
4259 e
= fold_binary (cmp
, boolean_type_node
,
4260 niter
, wide_int_to_tree (nit_type
, bound
));
4261 return e
&& integer_nonzerop (e
);
4264 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
4267 nowrap_type_p (tree type
)
4269 if (ANY_INTEGRAL_TYPE_P (type
)
4270 && TYPE_OVERFLOW_UNDEFINED (type
))
4273 if (POINTER_TYPE_P (type
))
4279 /* Return true if we can prove LOOP is exited before evolution of induction
4280 variable {BASE, STEP} overflows with respect to its type bound. */
4283 loop_exits_before_overflow (tree base
, tree step
,
4284 gimple
*at_stmt
, struct loop
*loop
)
4287 struct control_iv
*civ
;
4288 struct nb_iter_bound
*bound
;
4289 tree e
, delta
, step_abs
, unsigned_base
;
4290 tree type
= TREE_TYPE (step
);
4291 tree unsigned_type
, valid_niter
;
4293 /* Don't issue signed overflow warnings. */
4294 fold_defer_overflow_warnings ();
4296 /* Compute the number of iterations before we reach the bound of the
4297 type, and verify that the loop is exited before this occurs. */
4298 unsigned_type
= unsigned_type_for (type
);
4299 unsigned_base
= fold_convert (unsigned_type
, base
);
4301 if (tree_int_cst_sign_bit (step
))
4303 tree extreme
= fold_convert (unsigned_type
,
4304 lower_bound_in_type (type
, type
));
4305 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, unsigned_base
, extreme
);
4306 step_abs
= fold_build1 (NEGATE_EXPR
, unsigned_type
,
4307 fold_convert (unsigned_type
, step
));
4311 tree extreme
= fold_convert (unsigned_type
,
4312 upper_bound_in_type (type
, type
));
4313 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, extreme
, unsigned_base
);
4314 step_abs
= fold_convert (unsigned_type
, step
);
4317 valid_niter
= fold_build2 (FLOOR_DIV_EXPR
, unsigned_type
, delta
, step_abs
);
4319 estimate_numbers_of_iterations (loop
);
4321 if (max_loop_iterations (loop
, &niter
)
4322 && wi::fits_to_tree_p (niter
, TREE_TYPE (valid_niter
))
4323 && (e
= fold_binary (GT_EXPR
, boolean_type_node
, valid_niter
,
4324 wide_int_to_tree (TREE_TYPE (valid_niter
),
4326 && integer_nonzerop (e
))
4328 fold_undefer_and_ignore_overflow_warnings ();
4332 for (bound
= loop
->bounds
; bound
; bound
= bound
->next
)
4334 if (n_of_executions_at_most (at_stmt
, bound
, valid_niter
))
4336 fold_undefer_and_ignore_overflow_warnings ();
4340 fold_undefer_and_ignore_overflow_warnings ();
4342 /* Try to prove loop is exited before {base, step} overflows with the
4343 help of analyzed loop control IV. This is done only for IVs with
4344 constant step because otherwise we don't have the information. */
4345 if (TREE_CODE (step
) == INTEGER_CST
)
4347 for (civ
= loop
->control_ivs
; civ
; civ
= civ
->next
)
4349 enum tree_code code
;
4350 tree civ_type
= TREE_TYPE (civ
->step
);
4352 /* Have to consider type difference because operand_equal_p ignores
4353 that for constants. */
4354 if (TYPE_UNSIGNED (type
) != TYPE_UNSIGNED (civ_type
)
4355 || element_precision (type
) != element_precision (civ_type
))
4358 /* Only consider control IV with same step. */
4359 if (!operand_equal_p (step
, civ
->step
, 0))
4362 /* Done proving if this is a no-overflow control IV. */
4363 if (operand_equal_p (base
, civ
->base
, 0))
4366 /* Control IV is recorded after expanding simple operations,
4367 Here we expand base and compare it too. */
4368 tree expanded_base
= expand_simple_operations (base
);
4369 if (operand_equal_p (expanded_base
, civ
->base
, 0))
4372 /* If this is a before stepping control IV, in other words, we have
4374 {civ_base, step} = {base + step, step}
4376 Because civ {base + step, step} doesn't overflow during loop
4377 iterations, {base, step} will not overflow if we can prove the
4378 operation "base + step" does not overflow. Specifically, we try
4379 to prove below conditions are satisfied:
4381 base <= UPPER_BOUND (type) - step ;;step > 0
4382 base >= LOWER_BOUND (type) - step ;;step < 0
4384 by proving the reverse conditions are false using loop's initial
4386 if (POINTER_TYPE_P (TREE_TYPE (base
)))
4387 code
= POINTER_PLUS_EXPR
;
4391 tree stepped
= fold_build2 (code
, TREE_TYPE (base
), base
, step
);
4392 tree expanded_stepped
= fold_build2 (code
, TREE_TYPE (base
),
4393 expanded_base
, step
);
4394 if (operand_equal_p (stepped
, civ
->base
, 0)
4395 || operand_equal_p (expanded_stepped
, civ
->base
, 0))
4399 if (tree_int_cst_sign_bit (step
))
4402 extreme
= lower_bound_in_type (type
, type
);
4407 extreme
= upper_bound_in_type (type
, type
);
4409 extreme
= fold_build2 (MINUS_EXPR
, type
, extreme
, step
);
4410 e
= fold_build2 (code
, boolean_type_node
, base
, extreme
);
4411 e
= simplify_using_initial_conditions (loop
, e
);
4412 if (integer_zerop (e
))
4421 /* VAR is scev variable whose evolution part is constant STEP, this function
4422 proves that VAR can't overflow by using value range info. If VAR's value
4423 range is [MIN, MAX], it can be proven by:
4424 MAX + step doesn't overflow ; if step > 0
4426 MIN + step doesn't underflow ; if step < 0.
4428 We can only do this if var is computed in every loop iteration, i.e, var's
4429 definition has to dominate loop latch. Consider below example:
4437 # RANGE [0, 4294967294] NONZERO 65535
4438 # i_21 = PHI <0(3), i_18(9)>
4445 # RANGE [0, 65533] NONZERO 65535
4446 _6 = i_21 + 4294967295;
4447 # RANGE [0, 65533] NONZERO 65535
4448 _7 = (long unsigned int) _6;
4449 # RANGE [0, 524264] NONZERO 524280
4451 # PT = nonlocal escaped
4456 # RANGE [1, 65535] NONZERO 65535
4470 VAR _6 doesn't overflow only with pre-condition (i_21 != 0), here we
4471 can't use _6 to prove no-overlfow for _7. In fact, var _7 takes value
4472 sequence (4294967295, 0, 1, ..., 65533) in loop life time, rather than
4473 (4294967295, 4294967296, ...). */
4476 scev_var_range_cant_overflow (tree var
, tree step
, struct loop
*loop
)
4479 wide_int minv
, maxv
, diff
, step_wi
;
4480 enum value_range_type rtype
;
4482 if (TREE_CODE (step
) != INTEGER_CST
|| !INTEGRAL_TYPE_P (TREE_TYPE (var
)))
4485 /* Check if VAR evaluates in every loop iteration. It's not the case
4486 if VAR is default definition or does not dominate loop's latch. */
4487 basic_block def_bb
= gimple_bb (SSA_NAME_DEF_STMT (var
));
4488 if (!def_bb
|| !dominated_by_p (CDI_DOMINATORS
, loop
->latch
, def_bb
))
4491 rtype
= get_range_info (var
, &minv
, &maxv
);
4492 if (rtype
!= VR_RANGE
)
4495 /* VAR is a scev whose evolution part is STEP and value range info
4496 is [MIN, MAX], we can prove its no-overflowness by conditions:
4498 type_MAX - MAX >= step ; if step > 0
4499 MIN - type_MIN >= |step| ; if step < 0.
4501 Or VAR must take value outside of value range, which is not true. */
4503 type
= TREE_TYPE (var
);
4504 if (tree_int_cst_sign_bit (step
))
4506 diff
= lower_bound_in_type (type
, type
);
4508 step_wi
= - step_wi
;
4512 diff
= upper_bound_in_type (type
, type
);
4516 return (wi::geu_p (diff
, step_wi
));
4519 /* Return false only when the induction variable BASE + STEP * I is
4520 known to not overflow: i.e. when the number of iterations is small
4521 enough with respect to the step and initial condition in order to
4522 keep the evolution confined in TYPEs bounds. Return true when the
4523 iv is known to overflow or when the property is not computable.
4525 USE_OVERFLOW_SEMANTICS is true if this function should assume that
4526 the rules for overflow of the given language apply (e.g., that signed
4527 arithmetics in C does not overflow).
4529 If VAR is a ssa variable, this function also returns false if VAR can
4530 be proven not overflow with value range info. */
4533 scev_probably_wraps_p (tree var
, tree base
, tree step
,
4534 gimple
*at_stmt
, struct loop
*loop
,
4535 bool use_overflow_semantics
)
4537 /* FIXME: We really need something like
4538 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
4540 We used to test for the following situation that frequently appears
4541 during address arithmetics:
4543 D.1621_13 = (long unsigned intD.4) D.1620_12;
4544 D.1622_14 = D.1621_13 * 8;
4545 D.1623_15 = (doubleD.29 *) D.1622_14;
4547 And derived that the sequence corresponding to D_14
4548 can be proved to not wrap because it is used for computing a
4549 memory access; however, this is not really the case -- for example,
4550 if D_12 = (unsigned char) [254,+,1], then D_14 has values
4551 2032, 2040, 0, 8, ..., but the code is still legal. */
4553 if (chrec_contains_undetermined (base
)
4554 || chrec_contains_undetermined (step
))
4557 if (integer_zerop (step
))
4560 /* If we can use the fact that signed and pointer arithmetics does not
4561 wrap, we are done. */
4562 if (use_overflow_semantics
&& nowrap_type_p (TREE_TYPE (base
)))
4565 /* To be able to use estimates on number of iterations of the loop,
4566 we must have an upper bound on the absolute value of the step. */
4567 if (TREE_CODE (step
) != INTEGER_CST
)
4570 /* Check if var can be proven not overflow with value range info. */
4571 if (var
&& TREE_CODE (var
) == SSA_NAME
4572 && scev_var_range_cant_overflow (var
, step
, loop
))
4575 if (loop_exits_before_overflow (base
, step
, at_stmt
, loop
))
4578 /* At this point we still don't have a proof that the iv does not
4579 overflow: give up. */
4583 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
4586 free_numbers_of_iterations_estimates (struct loop
*loop
)
4588 struct control_iv
*civ
;
4589 struct nb_iter_bound
*bound
;
4591 loop
->nb_iterations
= NULL
;
4592 loop
->estimate_state
= EST_NOT_COMPUTED
;
4593 for (bound
= loop
->bounds
; bound
;)
4595 struct nb_iter_bound
*next
= bound
->next
;
4599 loop
->bounds
= NULL
;
4601 for (civ
= loop
->control_ivs
; civ
;)
4603 struct control_iv
*next
= civ
->next
;
4607 loop
->control_ivs
= NULL
;
4610 /* Frees the information on upper bounds on numbers of iterations of loops. */
4613 free_numbers_of_iterations_estimates (function
*fn
)
4617 FOR_EACH_LOOP_FN (fn
, loop
, 0)
4618 free_numbers_of_iterations_estimates (loop
);
4621 /* Substitute value VAL for ssa name NAME inside expressions held
4625 substitute_in_loop_info (struct loop
*loop
, tree name
, tree val
)
4627 loop
->nb_iterations
= simplify_replace_tree (loop
->nb_iterations
, name
, val
);