1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2016 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
27 #include "tree-pass.h"
29 #include "gimple-pretty-print.h"
30 #include "diagnostic-core.h"
31 #include "stor-layout.h"
32 #include "fold-const.h"
36 #include "gimple-iterator.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-niter.h"
40 #include "tree-ssa-loop.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
47 /* The maximum number of dominator BBs we search for conditions
48 of loop header copies we use for simplifying a conditional
50 #define MAX_DOMINATORS_TO_WALK 8
54 Analysis of number of iterations of an affine exit test.
58 /* Bounds on some value, BELOW <= X <= UP. */
66 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
69 split_to_var_and_offset (tree expr
, tree
*var
, mpz_t offset
)
71 tree type
= TREE_TYPE (expr
);
76 mpz_set_ui (offset
, 0);
78 switch (TREE_CODE (expr
))
85 case POINTER_PLUS_EXPR
:
86 op0
= TREE_OPERAND (expr
, 0);
87 op1
= TREE_OPERAND (expr
, 1);
89 if (TREE_CODE (op1
) != INTEGER_CST
)
93 /* Always sign extend the offset. */
94 wi::to_mpz (op1
, offset
, SIGNED
);
96 mpz_neg (offset
, offset
);
100 *var
= build_int_cst_type (type
, 0);
101 wi::to_mpz (expr
, offset
, TYPE_SIGN (type
));
109 /* From condition C0 CMP C1 derives information regarding the value range
110 of VAR, which is of TYPE. Results are stored in to BELOW and UP. */
113 refine_value_range_using_guard (tree type
, tree var
,
114 tree c0
, enum tree_code cmp
, tree c1
,
115 mpz_t below
, mpz_t up
)
117 tree varc0
, varc1
, ctype
;
119 mpz_t mint
, maxt
, minc1
, maxc1
;
121 bool no_wrap
= nowrap_type_p (type
);
123 signop sgn
= TYPE_SIGN (type
);
131 STRIP_SIGN_NOPS (c0
);
132 STRIP_SIGN_NOPS (c1
);
133 ctype
= TREE_TYPE (c0
);
134 if (!useless_type_conversion_p (ctype
, type
))
140 /* We could derive quite precise information from EQ_EXPR, however,
141 such a guard is unlikely to appear, so we do not bother with
146 /* NE_EXPR comparisons do not contain much of useful information,
147 except for cases of comparing with bounds. */
148 if (TREE_CODE (c1
) != INTEGER_CST
149 || !INTEGRAL_TYPE_P (type
))
152 /* Ensure that the condition speaks about an expression in the same
154 ctype
= TREE_TYPE (c0
);
155 if (TYPE_PRECISION (ctype
) != TYPE_PRECISION (type
))
157 c0
= fold_convert (type
, c0
);
158 c1
= fold_convert (type
, c1
);
160 if (operand_equal_p (var
, c0
, 0))
164 /* Case of comparing VAR with its below/up bounds. */
166 wi::to_mpz (c1
, valc1
, TYPE_SIGN (type
));
167 if (mpz_cmp (valc1
, below
) == 0)
169 if (mpz_cmp (valc1
, up
) == 0)
176 /* Case of comparing with the bounds of the type. */
177 wide_int min
= wi::min_value (type
);
178 wide_int max
= wi::max_value (type
);
180 if (wi::eq_p (c1
, min
))
182 if (wi::eq_p (c1
, max
))
186 /* Quick return if no useful information. */
198 split_to_var_and_offset (expand_simple_operations (c0
), &varc0
, offc0
);
199 split_to_var_and_offset (expand_simple_operations (c1
), &varc1
, offc1
);
201 /* We are only interested in comparisons of expressions based on VAR. */
202 if (operand_equal_p (var
, varc1
, 0))
204 std::swap (varc0
, varc1
);
205 mpz_swap (offc0
, offc1
);
206 cmp
= swap_tree_comparison (cmp
);
208 else if (!operand_equal_p (var
, varc0
, 0))
217 get_type_static_bounds (type
, mint
, maxt
);
220 /* Setup range information for varc1. */
221 if (integer_zerop (varc1
))
223 wi::to_mpz (integer_zero_node
, minc1
, TYPE_SIGN (type
));
224 wi::to_mpz (integer_zero_node
, maxc1
, TYPE_SIGN (type
));
226 else if (TREE_CODE (varc1
) == SSA_NAME
227 && INTEGRAL_TYPE_P (type
)
228 && get_range_info (varc1
, &minv
, &maxv
) == VR_RANGE
)
230 gcc_assert (wi::le_p (minv
, maxv
, sgn
));
231 wi::to_mpz (minv
, minc1
, sgn
);
232 wi::to_mpz (maxv
, maxc1
, sgn
);
236 mpz_set (minc1
, mint
);
237 mpz_set (maxc1
, maxt
);
240 /* Compute valid range information for varc1 + offc1. Note nothing
241 useful can be derived if it overflows or underflows. Overflow or
242 underflow could happen when:
244 offc1 > 0 && varc1 + offc1 > MAX_VAL (type)
245 offc1 < 0 && varc1 + offc1 < MIN_VAL (type). */
246 mpz_add (minc1
, minc1
, offc1
);
247 mpz_add (maxc1
, maxc1
, offc1
);
249 || mpz_sgn (offc1
) == 0
250 || (mpz_sgn (offc1
) < 0 && mpz_cmp (minc1
, mint
) >= 0)
251 || (mpz_sgn (offc1
) > 0 && mpz_cmp (maxc1
, maxt
) <= 0));
255 if (mpz_cmp (minc1
, mint
) < 0)
256 mpz_set (minc1
, mint
);
257 if (mpz_cmp (maxc1
, maxt
) > 0)
258 mpz_set (maxc1
, maxt
);
263 mpz_sub_ui (maxc1
, maxc1
, 1);
268 mpz_add_ui (minc1
, minc1
, 1);
271 /* Compute range information for varc0. If there is no overflow,
272 the condition implied that
274 (varc0) cmp (varc1 + offc1 - offc0)
276 We can possibly improve the upper bound of varc0 if cmp is LE_EXPR,
277 or the below bound if cmp is GE_EXPR.
279 To prove there is no overflow/underflow, we need to check below
281 1) cmp == LE_EXPR && offc0 > 0
283 (varc0 + offc0) doesn't overflow
284 && (varc1 + offc1 - offc0) doesn't underflow
286 2) cmp == LE_EXPR && offc0 < 0
288 (varc0 + offc0) doesn't underflow
289 && (varc1 + offc1 - offc0) doesn't overfloe
291 In this case, (varc0 + offc0) will never underflow if we can
292 prove (varc1 + offc1 - offc0) doesn't overflow.
294 3) cmp == GE_EXPR && offc0 < 0
296 (varc0 + offc0) doesn't underflow
297 && (varc1 + offc1 - offc0) doesn't overflow
299 4) cmp == GE_EXPR && offc0 > 0
301 (varc0 + offc0) doesn't overflow
302 && (varc1 + offc1 - offc0) doesn't underflow
304 In this case, (varc0 + offc0) will never overflow if we can
305 prove (varc1 + offc1 - offc0) doesn't underflow.
307 Note we only handle case 2 and 4 in below code. */
309 mpz_sub (minc1
, minc1
, offc0
);
310 mpz_sub (maxc1
, maxc1
, offc0
);
312 || mpz_sgn (offc0
) == 0
314 && mpz_sgn (offc0
) < 0 && mpz_cmp (maxc1
, maxt
) <= 0)
316 && mpz_sgn (offc0
) > 0 && mpz_cmp (minc1
, mint
) >= 0));
322 if (mpz_cmp (up
, maxc1
) > 0)
327 if (mpz_cmp (below
, minc1
) < 0)
328 mpz_set (below
, minc1
);
340 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
341 in TYPE to MIN and MAX. */
344 determine_value_range (struct loop
*loop
, tree type
, tree var
, mpz_t off
,
345 mpz_t min
, mpz_t max
)
351 enum value_range_type rtype
= VR_VARYING
;
353 /* If the expression is a constant, we know its value exactly. */
354 if (integer_zerop (var
))
361 get_type_static_bounds (type
, min
, max
);
363 /* See if we have some range info from VRP. */
364 if (TREE_CODE (var
) == SSA_NAME
&& INTEGRAL_TYPE_P (type
))
366 edge e
= loop_preheader_edge (loop
);
367 signop sgn
= TYPE_SIGN (type
);
370 /* Either for VAR itself... */
371 rtype
= get_range_info (var
, &minv
, &maxv
);
372 /* Or for PHI results in loop->header where VAR is used as
373 PHI argument from the loop preheader edge. */
374 for (gsi
= gsi_start_phis (loop
->header
); !gsi_end_p (gsi
); gsi_next (&gsi
))
376 gphi
*phi
= gsi
.phi ();
378 if (PHI_ARG_DEF_FROM_EDGE (phi
, e
) == var
379 && (get_range_info (gimple_phi_result (phi
), &minc
, &maxc
)
382 if (rtype
!= VR_RANGE
)
390 minv
= wi::max (minv
, minc
, sgn
);
391 maxv
= wi::min (maxv
, maxc
, sgn
);
392 /* If the PHI result range are inconsistent with
393 the VAR range, give up on looking at the PHI
394 results. This can happen if VR_UNDEFINED is
396 if (wi::gt_p (minv
, maxv
, sgn
))
398 rtype
= get_range_info (var
, &minv
, &maxv
);
406 if (rtype
!= VR_RANGE
)
413 gcc_assert (wi::le_p (minv
, maxv
, sgn
));
414 wi::to_mpz (minv
, minm
, sgn
);
415 wi::to_mpz (maxv
, maxm
, sgn
);
417 /* Now walk the dominators of the loop header and use the entry
418 guards to refine the estimates. */
419 for (bb
= loop
->header
;
420 bb
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
) && cnt
< MAX_DOMINATORS_TO_WALK
;
421 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
))
428 if (!single_pred_p (bb
))
430 e
= single_pred_edge (bb
);
432 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
435 cond
= last_stmt (e
->src
);
436 c0
= gimple_cond_lhs (cond
);
437 cmp
= gimple_cond_code (cond
);
438 c1
= gimple_cond_rhs (cond
);
440 if (e
->flags
& EDGE_FALSE_VALUE
)
441 cmp
= invert_tree_comparison (cmp
, false);
443 refine_value_range_using_guard (type
, var
, c0
, cmp
, c1
, minm
, maxm
);
447 mpz_add (minm
, minm
, off
);
448 mpz_add (maxm
, maxm
, off
);
449 /* If the computation may not wrap or off is zero, then this
450 is always fine. If off is negative and minv + off isn't
451 smaller than type's minimum, or off is positive and
452 maxv + off isn't bigger than type's maximum, use the more
453 precise range too. */
454 if (nowrap_type_p (type
)
455 || mpz_sgn (off
) == 0
456 || (mpz_sgn (off
) < 0 && mpz_cmp (minm
, min
) >= 0)
457 || (mpz_sgn (off
) > 0 && mpz_cmp (maxm
, max
) <= 0))
469 /* If the computation may wrap, we know nothing about the value, except for
470 the range of the type. */
471 if (!nowrap_type_p (type
))
474 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
475 add it to MIN, otherwise to MAX. */
476 if (mpz_sgn (off
) < 0)
477 mpz_add (max
, max
, off
);
479 mpz_add (min
, min
, off
);
482 /* Stores the bounds on the difference of the values of the expressions
483 (var + X) and (var + Y), computed in TYPE, to BNDS. */
486 bound_difference_of_offsetted_base (tree type
, mpz_t x
, mpz_t y
,
489 int rel
= mpz_cmp (x
, y
);
490 bool may_wrap
= !nowrap_type_p (type
);
493 /* If X == Y, then the expressions are always equal.
494 If X > Y, there are the following possibilities:
495 a) neither of var + X and var + Y overflow or underflow, or both of
496 them do. Then their difference is X - Y.
497 b) var + X overflows, and var + Y does not. Then the values of the
498 expressions are var + X - M and var + Y, where M is the range of
499 the type, and their difference is X - Y - M.
500 c) var + Y underflows and var + X does not. Their difference again
502 Therefore, if the arithmetics in type does not overflow, then the
503 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
504 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
505 (X - Y, X - Y + M). */
509 mpz_set_ui (bnds
->below
, 0);
510 mpz_set_ui (bnds
->up
, 0);
515 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type
)), m
, UNSIGNED
);
516 mpz_add_ui (m
, m
, 1);
517 mpz_sub (bnds
->up
, x
, y
);
518 mpz_set (bnds
->below
, bnds
->up
);
523 mpz_sub (bnds
->below
, bnds
->below
, m
);
525 mpz_add (bnds
->up
, bnds
->up
, m
);
531 /* From condition C0 CMP C1 derives information regarding the
532 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
533 and stores it to BNDS. */
536 refine_bounds_using_guard (tree type
, tree varx
, mpz_t offx
,
537 tree vary
, mpz_t offy
,
538 tree c0
, enum tree_code cmp
, tree c1
,
541 tree varc0
, varc1
, ctype
;
542 mpz_t offc0
, offc1
, loffx
, loffy
, bnd
;
544 bool no_wrap
= nowrap_type_p (type
);
553 STRIP_SIGN_NOPS (c0
);
554 STRIP_SIGN_NOPS (c1
);
555 ctype
= TREE_TYPE (c0
);
556 if (!useless_type_conversion_p (ctype
, type
))
562 /* We could derive quite precise information from EQ_EXPR, however, such
563 a guard is unlikely to appear, so we do not bother with handling
568 /* NE_EXPR comparisons do not contain much of useful information, except for
569 special case of comparing with the bounds of the type. */
570 if (TREE_CODE (c1
) != INTEGER_CST
571 || !INTEGRAL_TYPE_P (type
))
574 /* Ensure that the condition speaks about an expression in the same type
576 ctype
= TREE_TYPE (c0
);
577 if (TYPE_PRECISION (ctype
) != TYPE_PRECISION (type
))
579 c0
= fold_convert (type
, c0
);
580 c1
= fold_convert (type
, c1
);
582 if (TYPE_MIN_VALUE (type
)
583 && operand_equal_p (c1
, TYPE_MIN_VALUE (type
), 0))
588 if (TYPE_MAX_VALUE (type
)
589 && operand_equal_p (c1
, TYPE_MAX_VALUE (type
), 0))
602 split_to_var_and_offset (expand_simple_operations (c0
), &varc0
, offc0
);
603 split_to_var_and_offset (expand_simple_operations (c1
), &varc1
, offc1
);
605 /* We are only interested in comparisons of expressions based on VARX and
606 VARY. TODO -- we might also be able to derive some bounds from
607 expressions containing just one of the variables. */
609 if (operand_equal_p (varx
, varc1
, 0))
611 std::swap (varc0
, varc1
);
612 mpz_swap (offc0
, offc1
);
613 cmp
= swap_tree_comparison (cmp
);
616 if (!operand_equal_p (varx
, varc0
, 0)
617 || !operand_equal_p (vary
, varc1
, 0))
620 mpz_init_set (loffx
, offx
);
621 mpz_init_set (loffy
, offy
);
623 if (cmp
== GT_EXPR
|| cmp
== GE_EXPR
)
625 std::swap (varx
, vary
);
626 mpz_swap (offc0
, offc1
);
627 mpz_swap (loffx
, loffy
);
628 cmp
= swap_tree_comparison (cmp
);
632 /* If there is no overflow, the condition implies that
634 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
636 The overflows and underflows may complicate things a bit; each
637 overflow decreases the appropriate offset by M, and underflow
638 increases it by M. The above inequality would not necessarily be
641 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
642 VARX + OFFC0 overflows, but VARX + OFFX does not.
643 This may only happen if OFFX < OFFC0.
644 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
645 VARY + OFFC1 underflows and VARY + OFFY does not.
646 This may only happen if OFFY > OFFC1. */
655 x_ok
= (integer_zerop (varx
)
656 || mpz_cmp (loffx
, offc0
) >= 0);
657 y_ok
= (integer_zerop (vary
)
658 || mpz_cmp (loffy
, offc1
) <= 0);
664 mpz_sub (bnd
, loffx
, loffy
);
665 mpz_add (bnd
, bnd
, offc1
);
666 mpz_sub (bnd
, bnd
, offc0
);
669 mpz_sub_ui (bnd
, bnd
, 1);
674 if (mpz_cmp (bnds
->below
, bnd
) < 0)
675 mpz_set (bnds
->below
, bnd
);
679 if (mpz_cmp (bnd
, bnds
->up
) < 0)
680 mpz_set (bnds
->up
, bnd
);
692 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
693 The subtraction is considered to be performed in arbitrary precision,
696 We do not attempt to be too clever regarding the value ranges of X and
697 Y; most of the time, they are just integers or ssa names offsetted by
698 integer. However, we try to use the information contained in the
699 comparisons before the loop (usually created by loop header copying). */
702 bound_difference (struct loop
*loop
, tree x
, tree y
, bounds
*bnds
)
704 tree type
= TREE_TYPE (x
);
707 mpz_t minx
, maxx
, miny
, maxy
;
715 /* Get rid of unnecessary casts, but preserve the value of
720 mpz_init (bnds
->below
);
724 split_to_var_and_offset (x
, &varx
, offx
);
725 split_to_var_and_offset (y
, &vary
, offy
);
727 if (!integer_zerop (varx
)
728 && operand_equal_p (varx
, vary
, 0))
730 /* Special case VARX == VARY -- we just need to compare the
731 offsets. The matters are a bit more complicated in the
732 case addition of offsets may wrap. */
733 bound_difference_of_offsetted_base (type
, offx
, offy
, bnds
);
737 /* Otherwise, use the value ranges to determine the initial
738 estimates on below and up. */
743 determine_value_range (loop
, type
, varx
, offx
, minx
, maxx
);
744 determine_value_range (loop
, type
, vary
, offy
, miny
, maxy
);
746 mpz_sub (bnds
->below
, minx
, maxy
);
747 mpz_sub (bnds
->up
, maxx
, miny
);
754 /* If both X and Y are constants, we cannot get any more precise. */
755 if (integer_zerop (varx
) && integer_zerop (vary
))
758 /* Now walk the dominators of the loop header and use the entry
759 guards to refine the estimates. */
760 for (bb
= loop
->header
;
761 bb
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
) && cnt
< MAX_DOMINATORS_TO_WALK
;
762 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
))
764 if (!single_pred_p (bb
))
766 e
= single_pred_edge (bb
);
768 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
771 cond
= last_stmt (e
->src
);
772 c0
= gimple_cond_lhs (cond
);
773 cmp
= gimple_cond_code (cond
);
774 c1
= gimple_cond_rhs (cond
);
776 if (e
->flags
& EDGE_FALSE_VALUE
)
777 cmp
= invert_tree_comparison (cmp
, false);
779 refine_bounds_using_guard (type
, varx
, offx
, vary
, offy
,
789 /* Update the bounds in BNDS that restrict the value of X to the bounds
790 that restrict the value of X + DELTA. X can be obtained as a
791 difference of two values in TYPE. */
794 bounds_add (bounds
*bnds
, const widest_int
&delta
, tree type
)
799 wi::to_mpz (delta
, mdelta
, SIGNED
);
802 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type
)), max
, UNSIGNED
);
804 mpz_add (bnds
->up
, bnds
->up
, mdelta
);
805 mpz_add (bnds
->below
, bnds
->below
, mdelta
);
807 if (mpz_cmp (bnds
->up
, max
) > 0)
808 mpz_set (bnds
->up
, max
);
811 if (mpz_cmp (bnds
->below
, max
) < 0)
812 mpz_set (bnds
->below
, max
);
818 /* Update the bounds in BNDS that restrict the value of X to the bounds
819 that restrict the value of -X. */
822 bounds_negate (bounds
*bnds
)
826 mpz_init_set (tmp
, bnds
->up
);
827 mpz_neg (bnds
->up
, bnds
->below
);
828 mpz_neg (bnds
->below
, tmp
);
832 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
835 inverse (tree x
, tree mask
)
837 tree type
= TREE_TYPE (x
);
839 unsigned ctr
= tree_floor_log2 (mask
);
841 if (TYPE_PRECISION (type
) <= HOST_BITS_PER_WIDE_INT
)
843 unsigned HOST_WIDE_INT ix
;
844 unsigned HOST_WIDE_INT imask
;
845 unsigned HOST_WIDE_INT irslt
= 1;
847 gcc_assert (cst_and_fits_in_hwi (x
));
848 gcc_assert (cst_and_fits_in_hwi (mask
));
850 ix
= int_cst_value (x
);
851 imask
= int_cst_value (mask
);
860 rslt
= build_int_cst_type (type
, irslt
);
864 rslt
= build_int_cst (type
, 1);
867 rslt
= int_const_binop (MULT_EXPR
, rslt
, x
);
868 x
= int_const_binop (MULT_EXPR
, x
, x
);
870 rslt
= int_const_binop (BIT_AND_EXPR
, rslt
, mask
);
876 /* Derives the upper bound BND on the number of executions of loop with exit
877 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
878 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
879 that the loop ends through this exit, i.e., the induction variable ever
880 reaches the value of C.
882 The value C is equal to final - base, where final and base are the final and
883 initial value of the actual induction variable in the analysed loop. BNDS
884 bounds the value of this difference when computed in signed type with
885 unbounded range, while the computation of C is performed in an unsigned
886 type with the range matching the range of the type of the induction variable.
887 In particular, BNDS.up contains an upper bound on C in the following cases:
888 -- if the iv must reach its final value without overflow, i.e., if
889 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
890 -- if final >= base, which we know to hold when BNDS.below >= 0. */
893 number_of_iterations_ne_max (mpz_t bnd
, bool no_overflow
, tree c
, tree s
,
894 bounds
*bnds
, bool exit_must_be_taken
)
898 tree type
= TREE_TYPE (c
);
899 bool bnds_u_valid
= ((no_overflow
&& exit_must_be_taken
)
900 || mpz_sgn (bnds
->below
) >= 0);
903 || (TREE_CODE (c
) == INTEGER_CST
904 && TREE_CODE (s
) == INTEGER_CST
905 && wi::mod_trunc (c
, s
, TYPE_SIGN (type
)) == 0)
906 || (TYPE_OVERFLOW_UNDEFINED (type
)
907 && multiple_of_p (type
, c
, s
)))
909 /* If C is an exact multiple of S, then its value will be reached before
910 the induction variable overflows (unless the loop is exited in some
911 other way before). Note that the actual induction variable in the
912 loop (which ranges from base to final instead of from 0 to C) may
913 overflow, in which case BNDS.up will not be giving a correct upper
914 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
916 exit_must_be_taken
= true;
919 /* If the induction variable can overflow, the number of iterations is at
920 most the period of the control variable (or infinite, but in that case
921 the whole # of iterations analysis will fail). */
924 max
= wi::mask
<widest_int
> (TYPE_PRECISION (type
) - wi::ctz (s
), false);
925 wi::to_mpz (max
, bnd
, UNSIGNED
);
929 /* Now we know that the induction variable does not overflow, so the loop
930 iterates at most (range of type / S) times. */
931 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type
)), bnd
, UNSIGNED
);
933 /* If the induction variable is guaranteed to reach the value of C before
935 if (exit_must_be_taken
)
937 /* ... then we can strengthen this to C / S, and possibly we can use
938 the upper bound on C given by BNDS. */
939 if (TREE_CODE (c
) == INTEGER_CST
)
940 wi::to_mpz (c
, bnd
, UNSIGNED
);
941 else if (bnds_u_valid
)
942 mpz_set (bnd
, bnds
->up
);
946 wi::to_mpz (s
, d
, UNSIGNED
);
947 mpz_fdiv_q (bnd
, bnd
, d
);
951 /* Determines number of iterations of loop whose ending condition
952 is IV <> FINAL. TYPE is the type of the iv. The number of
953 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
954 we know that the exit must be taken eventually, i.e., that the IV
955 ever reaches the value FINAL (we derived this earlier, and possibly set
956 NITER->assumptions to make sure this is the case). BNDS contains the
957 bounds on the difference FINAL - IV->base. */
960 number_of_iterations_ne (struct loop
*loop
, tree type
, affine_iv
*iv
,
961 tree final
, struct tree_niter_desc
*niter
,
962 bool exit_must_be_taken
, bounds
*bnds
)
964 tree niter_type
= unsigned_type_for (type
);
965 tree s
, c
, d
, bits
, assumption
, tmp
, bound
;
968 niter
->control
= *iv
;
969 niter
->bound
= final
;
970 niter
->cmp
= NE_EXPR
;
972 /* Rearrange the terms so that we get inequality S * i <> C, with S
973 positive. Also cast everything to the unsigned type. If IV does
974 not overflow, BNDS bounds the value of C. Also, this is the
975 case if the computation |FINAL - IV->base| does not overflow, i.e.,
976 if BNDS->below in the result is nonnegative. */
977 if (tree_int_cst_sign_bit (iv
->step
))
979 s
= fold_convert (niter_type
,
980 fold_build1 (NEGATE_EXPR
, type
, iv
->step
));
981 c
= fold_build2 (MINUS_EXPR
, niter_type
,
982 fold_convert (niter_type
, iv
->base
),
983 fold_convert (niter_type
, final
));
984 bounds_negate (bnds
);
988 s
= fold_convert (niter_type
, iv
->step
);
989 c
= fold_build2 (MINUS_EXPR
, niter_type
,
990 fold_convert (niter_type
, final
),
991 fold_convert (niter_type
, iv
->base
));
995 number_of_iterations_ne_max (max
, iv
->no_overflow
, c
, s
, bnds
,
997 niter
->max
= widest_int::from (wi::from_mpz (niter_type
, max
, false),
998 TYPE_SIGN (niter_type
));
1001 /* Compute no-overflow information for the control iv. This can be
1002 proven when below two conditions are satisfied:
1004 1) IV evaluates toward FINAL at beginning, i.e:
1005 base <= FINAL ; step > 0
1006 base >= FINAL ; step < 0
1008 2) |FINAL - base| is an exact multiple of step.
1010 Unfortunately, it's hard to prove above conditions after pass loop-ch
1011 because loop with exit condition (IV != FINAL) usually will be guarded
1012 by initial-condition (IV.base - IV.step != FINAL). In this case, we
1013 can alternatively try to prove below conditions:
1015 1') IV evaluates toward FINAL at beginning, i.e:
1016 new_base = base - step < FINAL ; step > 0
1017 && base - step doesn't underflow
1018 new_base = base - step > FINAL ; step < 0
1019 && base - step doesn't overflow
1021 2') |FINAL - new_base| is an exact multiple of step.
1023 Please refer to PR34114 as an example of loop-ch's impact, also refer
1024 to PR72817 as an example why condition 2') is necessary.
1026 Note, for NE_EXPR, base equals to FINAL is a special case, in
1027 which the loop exits immediately, and the iv does not overflow. */
1028 if (!niter
->control
.no_overflow
1029 && (integer_onep (s
) || multiple_of_p (type
, c
, s
)))
1031 tree t
, cond
, new_c
, relaxed_cond
= boolean_false_node
;
1033 if (tree_int_cst_sign_bit (iv
->step
))
1035 cond
= fold_build2 (GE_EXPR
, boolean_type_node
, iv
->base
, final
);
1036 if (TREE_CODE (type
) == INTEGER_TYPE
)
1038 /* Only when base - step doesn't overflow. */
1039 t
= TYPE_MAX_VALUE (type
);
1040 t
= fold_build2 (PLUS_EXPR
, type
, t
, iv
->step
);
1041 t
= fold_build2 (GE_EXPR
, boolean_type_node
, t
, iv
->base
);
1042 if (integer_nonzerop (t
))
1044 t
= fold_build2 (MINUS_EXPR
, type
, iv
->base
, iv
->step
);
1045 new_c
= fold_build2 (MINUS_EXPR
, niter_type
,
1046 fold_convert (niter_type
, t
),
1047 fold_convert (niter_type
, final
));
1048 if (multiple_of_p (type
, new_c
, s
))
1049 relaxed_cond
= fold_build2 (GT_EXPR
, boolean_type_node
,
1056 cond
= fold_build2 (LE_EXPR
, boolean_type_node
, iv
->base
, final
);
1057 if (TREE_CODE (type
) == INTEGER_TYPE
)
1059 /* Only when base - step doesn't underflow. */
1060 t
= TYPE_MIN_VALUE (type
);
1061 t
= fold_build2 (PLUS_EXPR
, type
, t
, iv
->step
);
1062 t
= fold_build2 (LE_EXPR
, boolean_type_node
, t
, iv
->base
);
1063 if (integer_nonzerop (t
))
1065 t
= fold_build2 (MINUS_EXPR
, type
, iv
->base
, iv
->step
);
1066 new_c
= fold_build2 (MINUS_EXPR
, niter_type
,
1067 fold_convert (niter_type
, final
),
1068 fold_convert (niter_type
, t
));
1069 if (multiple_of_p (type
, new_c
, s
))
1070 relaxed_cond
= fold_build2 (LT_EXPR
, boolean_type_node
,
1076 t
= simplify_using_initial_conditions (loop
, cond
);
1077 if (!t
|| !integer_onep (t
))
1078 t
= simplify_using_initial_conditions (loop
, relaxed_cond
);
1080 if (t
&& integer_onep (t
))
1081 niter
->control
.no_overflow
= true;
1084 /* First the trivial cases -- when the step is 1. */
1085 if (integer_onep (s
))
1090 if (niter
->control
.no_overflow
&& multiple_of_p (type
, c
, s
))
1092 niter
->niter
= fold_build2 (FLOOR_DIV_EXPR
, niter_type
, c
, s
);
1096 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
1097 is infinite. Otherwise, the number of iterations is
1098 (inverse(s/d) * (c/d)) mod (size of mode/d). */
1099 bits
= num_ending_zeros (s
);
1100 bound
= build_low_bits_mask (niter_type
,
1101 (TYPE_PRECISION (niter_type
)
1102 - tree_to_uhwi (bits
)));
1104 d
= fold_binary_to_constant (LSHIFT_EXPR
, niter_type
,
1105 build_int_cst (niter_type
, 1), bits
);
1106 s
= fold_binary_to_constant (RSHIFT_EXPR
, niter_type
, s
, bits
);
1108 if (!exit_must_be_taken
)
1110 /* If we cannot assume that the exit is taken eventually, record the
1111 assumptions for divisibility of c. */
1112 assumption
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, c
, d
);
1113 assumption
= fold_build2 (EQ_EXPR
, boolean_type_node
,
1114 assumption
, build_int_cst (niter_type
, 0));
1115 if (!integer_nonzerop (assumption
))
1116 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1117 niter
->assumptions
, assumption
);
1120 c
= fold_build2 (EXACT_DIV_EXPR
, niter_type
, c
, d
);
1121 tmp
= fold_build2 (MULT_EXPR
, niter_type
, c
, inverse (s
, bound
));
1122 niter
->niter
= fold_build2 (BIT_AND_EXPR
, niter_type
, tmp
, bound
);
1126 /* Checks whether we can determine the final value of the control variable
1127 of the loop with ending condition IV0 < IV1 (computed in TYPE).
1128 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
1129 of the step. The assumptions necessary to ensure that the computation
1130 of the final value does not overflow are recorded in NITER. If we
1131 find the final value, we adjust DELTA and return TRUE. Otherwise
1132 we return false. BNDS bounds the value of IV1->base - IV0->base,
1133 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
1134 true if we know that the exit must be taken eventually. */
1137 number_of_iterations_lt_to_ne (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
1138 struct tree_niter_desc
*niter
,
1139 tree
*delta
, tree step
,
1140 bool exit_must_be_taken
, bounds
*bnds
)
1142 tree niter_type
= TREE_TYPE (step
);
1143 tree mod
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, *delta
, step
);
1145 tree assumption
= boolean_true_node
, bound
;
1146 tree type1
= (POINTER_TYPE_P (type
)) ? sizetype
: type
;
1148 if (TREE_CODE (mod
) != INTEGER_CST
)
1150 if (integer_nonzerop (mod
))
1151 mod
= fold_build2 (MINUS_EXPR
, niter_type
, step
, mod
);
1152 tmod
= fold_convert (type1
, mod
);
1154 /* If the induction variable does not overflow and the exit is taken,
1155 then the computation of the final value does not overflow. There
1157 1) The case if the new final value is equal to the current one.
1158 2) Induction varaible has pointer type, as the code cannot rely
1159 on the object to that the pointer points being placed at the
1160 end of the address space (and more pragmatically,
1161 TYPE_{MIN,MAX}_VALUE is not defined for pointers).
1162 3) EXIT_MUST_BE_TAKEN is true, note it implies that the induction
1163 variable does not overflow. */
1164 if (!integer_zerop (mod
) && !POINTER_TYPE_P (type
) && !exit_must_be_taken
)
1166 if (integer_nonzerop (iv0
->step
))
1168 /* The final value of the iv is iv1->base + MOD, assuming
1169 that this computation does not overflow, and that
1170 iv0->base <= iv1->base + MOD. */
1171 bound
= fold_build2 (MINUS_EXPR
, type1
,
1172 TYPE_MAX_VALUE (type1
), tmod
);
1173 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
1178 /* The final value of the iv is iv0->base - MOD, assuming
1179 that this computation does not overflow, and that
1180 iv0->base - MOD <= iv1->base. */
1181 bound
= fold_build2 (PLUS_EXPR
, type1
,
1182 TYPE_MIN_VALUE (type1
), tmod
);
1183 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
1186 if (integer_zerop (assumption
))
1188 else if (!integer_nonzerop (assumption
))
1189 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1190 niter
->assumptions
, assumption
);
1193 /* Since we are transforming LT to NE and DELTA is constant, there
1194 is no need to compute may_be_zero because this loop must roll. */
1196 bounds_add (bnds
, wi::to_widest (mod
), type
);
1197 *delta
= fold_build2 (PLUS_EXPR
, niter_type
, *delta
, mod
);
1201 /* Add assertions to NITER that ensure that the control variable of the loop
1202 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
1203 are TYPE. Returns false if we can prove that there is an overflow, true
1204 otherwise. STEP is the absolute value of the step. */
1207 assert_no_overflow_lt (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
1208 struct tree_niter_desc
*niter
, tree step
)
1210 tree bound
, d
, assumption
, diff
;
1211 tree niter_type
= TREE_TYPE (step
);
1213 if (integer_nonzerop (iv0
->step
))
1215 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
1216 if (iv0
->no_overflow
)
1219 /* If iv0->base is a constant, we can determine the last value before
1220 overflow precisely; otherwise we conservatively assume
1223 if (TREE_CODE (iv0
->base
) == INTEGER_CST
)
1225 d
= fold_build2 (MINUS_EXPR
, niter_type
,
1226 fold_convert (niter_type
, TYPE_MAX_VALUE (type
)),
1227 fold_convert (niter_type
, iv0
->base
));
1228 diff
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, d
, step
);
1231 diff
= fold_build2 (MINUS_EXPR
, niter_type
, step
,
1232 build_int_cst (niter_type
, 1));
1233 bound
= fold_build2 (MINUS_EXPR
, type
,
1234 TYPE_MAX_VALUE (type
), fold_convert (type
, diff
));
1235 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
1240 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
1241 if (iv1
->no_overflow
)
1244 if (TREE_CODE (iv1
->base
) == INTEGER_CST
)
1246 d
= fold_build2 (MINUS_EXPR
, niter_type
,
1247 fold_convert (niter_type
, iv1
->base
),
1248 fold_convert (niter_type
, TYPE_MIN_VALUE (type
)));
1249 diff
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, d
, step
);
1252 diff
= fold_build2 (MINUS_EXPR
, niter_type
, step
,
1253 build_int_cst (niter_type
, 1));
1254 bound
= fold_build2 (PLUS_EXPR
, type
,
1255 TYPE_MIN_VALUE (type
), fold_convert (type
, diff
));
1256 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
1260 if (integer_zerop (assumption
))
1262 if (!integer_nonzerop (assumption
))
1263 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1264 niter
->assumptions
, assumption
);
1266 iv0
->no_overflow
= true;
1267 iv1
->no_overflow
= true;
1271 /* Add an assumption to NITER that a loop whose ending condition
1272 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
1273 bounds the value of IV1->base - IV0->base. */
1276 assert_loop_rolls_lt (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
1277 struct tree_niter_desc
*niter
, bounds
*bnds
)
1279 tree assumption
= boolean_true_node
, bound
, diff
;
1280 tree mbz
, mbzl
, mbzr
, type1
;
1281 bool rolls_p
, no_overflow_p
;
1285 /* We are going to compute the number of iterations as
1286 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
1287 variant of TYPE. This formula only works if
1289 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
1291 (where MAX is the maximum value of the unsigned variant of TYPE, and
1292 the computations in this formula are performed in full precision,
1293 i.e., without overflows).
1295 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1296 we have a condition of the form iv0->base - step < iv1->base before the loop,
1297 and for loops iv0->base < iv1->base - step * i the condition
1298 iv0->base < iv1->base + step, due to loop header copying, which enable us
1299 to prove the lower bound.
1301 The upper bound is more complicated. Unless the expressions for initial
1302 and final value themselves contain enough information, we usually cannot
1303 derive it from the context. */
1305 /* First check whether the answer does not follow from the bounds we gathered
1307 if (integer_nonzerop (iv0
->step
))
1308 dstep
= wi::to_widest (iv0
->step
);
1311 dstep
= wi::sext (wi::to_widest (iv1
->step
), TYPE_PRECISION (type
));
1316 wi::to_mpz (dstep
, mstep
, UNSIGNED
);
1317 mpz_neg (mstep
, mstep
);
1318 mpz_add_ui (mstep
, mstep
, 1);
1320 rolls_p
= mpz_cmp (mstep
, bnds
->below
) <= 0;
1323 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type
)), max
, UNSIGNED
);
1324 mpz_add (max
, max
, mstep
);
1325 no_overflow_p
= (mpz_cmp (bnds
->up
, max
) <= 0
1326 /* For pointers, only values lying inside a single object
1327 can be compared or manipulated by pointer arithmetics.
1328 Gcc in general does not allow or handle objects larger
1329 than half of the address space, hence the upper bound
1330 is satisfied for pointers. */
1331 || POINTER_TYPE_P (type
));
1335 if (rolls_p
&& no_overflow_p
)
1339 if (POINTER_TYPE_P (type
))
1342 /* Now the hard part; we must formulate the assumption(s) as expressions, and
1343 we must be careful not to introduce overflow. */
1345 if (integer_nonzerop (iv0
->step
))
1347 diff
= fold_build2 (MINUS_EXPR
, type1
,
1348 iv0
->step
, build_int_cst (type1
, 1));
1350 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
1351 0 address never belongs to any object, we can assume this for
1353 if (!POINTER_TYPE_P (type
))
1355 bound
= fold_build2 (PLUS_EXPR
, type1
,
1356 TYPE_MIN_VALUE (type
), diff
);
1357 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
1361 /* And then we can compute iv0->base - diff, and compare it with
1363 mbzl
= fold_build2 (MINUS_EXPR
, type1
,
1364 fold_convert (type1
, iv0
->base
), diff
);
1365 mbzr
= fold_convert (type1
, iv1
->base
);
1369 diff
= fold_build2 (PLUS_EXPR
, type1
,
1370 iv1
->step
, build_int_cst (type1
, 1));
1372 if (!POINTER_TYPE_P (type
))
1374 bound
= fold_build2 (PLUS_EXPR
, type1
,
1375 TYPE_MAX_VALUE (type
), diff
);
1376 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
1380 mbzl
= fold_convert (type1
, iv0
->base
);
1381 mbzr
= fold_build2 (MINUS_EXPR
, type1
,
1382 fold_convert (type1
, iv1
->base
), diff
);
1385 if (!integer_nonzerop (assumption
))
1386 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1387 niter
->assumptions
, assumption
);
1390 mbz
= fold_build2 (GT_EXPR
, boolean_type_node
, mbzl
, mbzr
);
1391 niter
->may_be_zero
= fold_build2 (TRUTH_OR_EXPR
, boolean_type_node
,
1392 niter
->may_be_zero
, mbz
);
1396 /* Determines number of iterations of loop whose ending condition
1397 is IV0 < IV1. TYPE is the type of the iv. The number of
1398 iterations is stored to NITER. BNDS bounds the difference
1399 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1400 that the exit must be taken eventually. */
1403 number_of_iterations_lt (struct loop
*loop
, tree type
, affine_iv
*iv0
,
1404 affine_iv
*iv1
, struct tree_niter_desc
*niter
,
1405 bool exit_must_be_taken
, bounds
*bnds
)
1407 tree niter_type
= unsigned_type_for (type
);
1408 tree delta
, step
, s
;
1411 if (integer_nonzerop (iv0
->step
))
1413 niter
->control
= *iv0
;
1414 niter
->cmp
= LT_EXPR
;
1415 niter
->bound
= iv1
->base
;
1419 niter
->control
= *iv1
;
1420 niter
->cmp
= GT_EXPR
;
1421 niter
->bound
= iv0
->base
;
1424 delta
= fold_build2 (MINUS_EXPR
, niter_type
,
1425 fold_convert (niter_type
, iv1
->base
),
1426 fold_convert (niter_type
, iv0
->base
));
1428 /* First handle the special case that the step is +-1. */
1429 if ((integer_onep (iv0
->step
) && integer_zerop (iv1
->step
))
1430 || (integer_all_onesp (iv1
->step
) && integer_zerop (iv0
->step
)))
1432 /* for (i = iv0->base; i < iv1->base; i++)
1436 for (i = iv1->base; i > iv0->base; i--).
1438 In both cases # of iterations is iv1->base - iv0->base, assuming that
1439 iv1->base >= iv0->base.
1441 First try to derive a lower bound on the value of
1442 iv1->base - iv0->base, computed in full precision. If the difference
1443 is nonnegative, we are done, otherwise we must record the
1446 if (mpz_sgn (bnds
->below
) < 0)
1447 niter
->may_be_zero
= fold_build2 (LT_EXPR
, boolean_type_node
,
1448 iv1
->base
, iv0
->base
);
1449 niter
->niter
= delta
;
1450 niter
->max
= widest_int::from (wi::from_mpz (niter_type
, bnds
->up
, false),
1451 TYPE_SIGN (niter_type
));
1452 niter
->control
.no_overflow
= true;
1456 if (integer_nonzerop (iv0
->step
))
1457 step
= fold_convert (niter_type
, iv0
->step
);
1459 step
= fold_convert (niter_type
,
1460 fold_build1 (NEGATE_EXPR
, type
, iv1
->step
));
1462 /* If we can determine the final value of the control iv exactly, we can
1463 transform the condition to != comparison. In particular, this will be
1464 the case if DELTA is constant. */
1465 if (number_of_iterations_lt_to_ne (type
, iv0
, iv1
, niter
, &delta
, step
,
1466 exit_must_be_taken
, bnds
))
1470 zps
.base
= build_int_cst (niter_type
, 0);
1472 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1473 zps does not overflow. */
1474 zps
.no_overflow
= true;
1476 return number_of_iterations_ne (loop
, type
, &zps
,
1477 delta
, niter
, true, bnds
);
1480 /* Make sure that the control iv does not overflow. */
1481 if (!assert_no_overflow_lt (type
, iv0
, iv1
, niter
, step
))
1484 /* We determine the number of iterations as (delta + step - 1) / step. For
1485 this to work, we must know that iv1->base >= iv0->base - step + 1,
1486 otherwise the loop does not roll. */
1487 assert_loop_rolls_lt (type
, iv0
, iv1
, niter
, bnds
);
1489 s
= fold_build2 (MINUS_EXPR
, niter_type
,
1490 step
, build_int_cst (niter_type
, 1));
1491 delta
= fold_build2 (PLUS_EXPR
, niter_type
, delta
, s
);
1492 niter
->niter
= fold_build2 (FLOOR_DIV_EXPR
, niter_type
, delta
, step
);
1496 wi::to_mpz (step
, mstep
, UNSIGNED
);
1497 mpz_add (tmp
, bnds
->up
, mstep
);
1498 mpz_sub_ui (tmp
, tmp
, 1);
1499 mpz_fdiv_q (tmp
, tmp
, mstep
);
1500 niter
->max
= widest_int::from (wi::from_mpz (niter_type
, tmp
, false),
1501 TYPE_SIGN (niter_type
));
1508 /* Determines number of iterations of loop whose ending condition
1509 is IV0 <= IV1. TYPE is the type of the iv. The number of
1510 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1511 we know that this condition must eventually become false (we derived this
1512 earlier, and possibly set NITER->assumptions to make sure this
1513 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1516 number_of_iterations_le (struct loop
*loop
, tree type
, affine_iv
*iv0
,
1517 affine_iv
*iv1
, struct tree_niter_desc
*niter
,
1518 bool exit_must_be_taken
, bounds
*bnds
)
1522 if (POINTER_TYPE_P (type
))
1525 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1526 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1527 value of the type. This we must know anyway, since if it is
1528 equal to this value, the loop rolls forever. We do not check
1529 this condition for pointer type ivs, as the code cannot rely on
1530 the object to that the pointer points being placed at the end of
1531 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1532 not defined for pointers). */
1534 if (!exit_must_be_taken
&& !POINTER_TYPE_P (type
))
1536 if (integer_nonzerop (iv0
->step
))
1537 assumption
= fold_build2 (NE_EXPR
, boolean_type_node
,
1538 iv1
->base
, TYPE_MAX_VALUE (type
));
1540 assumption
= fold_build2 (NE_EXPR
, boolean_type_node
,
1541 iv0
->base
, TYPE_MIN_VALUE (type
));
1543 if (integer_zerop (assumption
))
1545 if (!integer_nonzerop (assumption
))
1546 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1547 niter
->assumptions
, assumption
);
1550 if (integer_nonzerop (iv0
->step
))
1552 if (POINTER_TYPE_P (type
))
1553 iv1
->base
= fold_build_pointer_plus_hwi (iv1
->base
, 1);
1555 iv1
->base
= fold_build2 (PLUS_EXPR
, type1
, iv1
->base
,
1556 build_int_cst (type1
, 1));
1558 else if (POINTER_TYPE_P (type
))
1559 iv0
->base
= fold_build_pointer_plus_hwi (iv0
->base
, -1);
1561 iv0
->base
= fold_build2 (MINUS_EXPR
, type1
,
1562 iv0
->base
, build_int_cst (type1
, 1));
1564 bounds_add (bnds
, 1, type1
);
1566 return number_of_iterations_lt (loop
, type
, iv0
, iv1
, niter
, exit_must_be_taken
,
1570 /* Dumps description of affine induction variable IV to FILE. */
1573 dump_affine_iv (FILE *file
, affine_iv
*iv
)
1575 if (!integer_zerop (iv
->step
))
1576 fprintf (file
, "[");
1578 print_generic_expr (dump_file
, iv
->base
, TDF_SLIM
);
1580 if (!integer_zerop (iv
->step
))
1582 fprintf (file
, ", + , ");
1583 print_generic_expr (dump_file
, iv
->step
, TDF_SLIM
);
1584 fprintf (file
, "]%s", iv
->no_overflow
? "(no_overflow)" : "");
1588 /* Determine the number of iterations according to condition (for staying
1589 inside loop) which compares two induction variables using comparison
1590 operator CODE. The induction variable on left side of the comparison
1591 is IV0, the right-hand side is IV1. Both induction variables must have
1592 type TYPE, which must be an integer or pointer type. The steps of the
1593 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1595 LOOP is the loop whose number of iterations we are determining.
1597 ONLY_EXIT is true if we are sure this is the only way the loop could be
1598 exited (including possibly non-returning function calls, exceptions, etc.)
1599 -- in this case we can use the information whether the control induction
1600 variables can overflow or not in a more efficient way.
1602 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1604 The results (number of iterations and assumptions as described in
1605 comments at struct tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1606 Returns false if it fails to determine number of iterations, true if it
1607 was determined (possibly with some assumptions). */
1610 number_of_iterations_cond (struct loop
*loop
,
1611 tree type
, affine_iv
*iv0
, enum tree_code code
,
1612 affine_iv
*iv1
, struct tree_niter_desc
*niter
,
1613 bool only_exit
, bool every_iteration
)
1615 bool exit_must_be_taken
= false, ret
;
1618 /* If the test is not executed every iteration, wrapping may make the test
1620 TODO: the overflow case can be still used as unreliable estimate of upper
1621 bound. But we have no API to pass it down to number of iterations code
1622 and, at present, it will not use it anyway. */
1623 if (!every_iteration
1624 && (!iv0
->no_overflow
|| !iv1
->no_overflow
1625 || code
== NE_EXPR
|| code
== EQ_EXPR
))
1628 /* The meaning of these assumptions is this:
1630 then the rest of information does not have to be valid
1631 if may_be_zero then the loop does not roll, even if
1633 niter
->assumptions
= boolean_true_node
;
1634 niter
->may_be_zero
= boolean_false_node
;
1635 niter
->niter
= NULL_TREE
;
1637 niter
->bound
= NULL_TREE
;
1638 niter
->cmp
= ERROR_MARK
;
1640 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1641 the control variable is on lhs. */
1642 if (code
== GE_EXPR
|| code
== GT_EXPR
1643 || (code
== NE_EXPR
&& integer_zerop (iv0
->step
)))
1645 std::swap (iv0
, iv1
);
1646 code
= swap_tree_comparison (code
);
1649 if (POINTER_TYPE_P (type
))
1651 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1652 to the same object. If they do, the control variable cannot wrap
1653 (as wrap around the bounds of memory will never return a pointer
1654 that would be guaranteed to point to the same object, even if we
1655 avoid undefined behavior by casting to size_t and back). */
1656 iv0
->no_overflow
= true;
1657 iv1
->no_overflow
= true;
1660 /* If the control induction variable does not overflow and the only exit
1661 from the loop is the one that we analyze, we know it must be taken
1665 if (!integer_zerop (iv0
->step
) && iv0
->no_overflow
)
1666 exit_must_be_taken
= true;
1667 else if (!integer_zerop (iv1
->step
) && iv1
->no_overflow
)
1668 exit_must_be_taken
= true;
1671 /* We can handle the case when neither of the sides of the comparison is
1672 invariant, provided that the test is NE_EXPR. This rarely occurs in
1673 practice, but it is simple enough to manage. */
1674 if (!integer_zerop (iv0
->step
) && !integer_zerop (iv1
->step
))
1676 tree step_type
= POINTER_TYPE_P (type
) ? sizetype
: type
;
1677 if (code
!= NE_EXPR
)
1680 iv0
->step
= fold_binary_to_constant (MINUS_EXPR
, step_type
,
1681 iv0
->step
, iv1
->step
);
1682 iv0
->no_overflow
= false;
1683 iv1
->step
= build_int_cst (step_type
, 0);
1684 iv1
->no_overflow
= true;
1687 /* If the result of the comparison is a constant, the loop is weird. More
1688 precise handling would be possible, but the situation is not common enough
1689 to waste time on it. */
1690 if (integer_zerop (iv0
->step
) && integer_zerop (iv1
->step
))
1693 /* Ignore loops of while (i-- < 10) type. */
1694 if (code
!= NE_EXPR
)
1696 if (iv0
->step
&& tree_int_cst_sign_bit (iv0
->step
))
1699 if (!integer_zerop (iv1
->step
) && !tree_int_cst_sign_bit (iv1
->step
))
1703 /* If the loop exits immediately, there is nothing to do. */
1704 tree tem
= fold_binary (code
, boolean_type_node
, iv0
->base
, iv1
->base
);
1705 if (tem
&& integer_zerop (tem
))
1707 niter
->niter
= build_int_cst (unsigned_type_for (type
), 0);
1712 /* OK, now we know we have a senseful loop. Handle several cases, depending
1713 on what comparison operator is used. */
1714 bound_difference (loop
, iv1
->base
, iv0
->base
, &bnds
);
1716 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1719 "Analyzing # of iterations of loop %d\n", loop
->num
);
1721 fprintf (dump_file
, " exit condition ");
1722 dump_affine_iv (dump_file
, iv0
);
1723 fprintf (dump_file
, " %s ",
1724 code
== NE_EXPR
? "!="
1725 : code
== LT_EXPR
? "<"
1727 dump_affine_iv (dump_file
, iv1
);
1728 fprintf (dump_file
, "\n");
1730 fprintf (dump_file
, " bounds on difference of bases: ");
1731 mpz_out_str (dump_file
, 10, bnds
.below
);
1732 fprintf (dump_file
, " ... ");
1733 mpz_out_str (dump_file
, 10, bnds
.up
);
1734 fprintf (dump_file
, "\n");
1740 gcc_assert (integer_zerop (iv1
->step
));
1741 ret
= number_of_iterations_ne (loop
, type
, iv0
, iv1
->base
, niter
,
1742 exit_must_be_taken
, &bnds
);
1746 ret
= number_of_iterations_lt (loop
, type
, iv0
, iv1
, niter
,
1747 exit_must_be_taken
, &bnds
);
1751 ret
= number_of_iterations_le (loop
, type
, iv0
, iv1
, niter
,
1752 exit_must_be_taken
, &bnds
);
1759 mpz_clear (bnds
.up
);
1760 mpz_clear (bnds
.below
);
1762 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1766 fprintf (dump_file
, " result:\n");
1767 if (!integer_nonzerop (niter
->assumptions
))
1769 fprintf (dump_file
, " under assumptions ");
1770 print_generic_expr (dump_file
, niter
->assumptions
, TDF_SLIM
);
1771 fprintf (dump_file
, "\n");
1774 if (!integer_zerop (niter
->may_be_zero
))
1776 fprintf (dump_file
, " zero if ");
1777 print_generic_expr (dump_file
, niter
->may_be_zero
, TDF_SLIM
);
1778 fprintf (dump_file
, "\n");
1781 fprintf (dump_file
, " # of iterations ");
1782 print_generic_expr (dump_file
, niter
->niter
, TDF_SLIM
);
1783 fprintf (dump_file
, ", bounded by ");
1784 print_decu (niter
->max
, dump_file
);
1785 fprintf (dump_file
, "\n");
1788 fprintf (dump_file
, " failed\n\n");
1793 /* Substitute NEW for OLD in EXPR and fold the result. */
1796 simplify_replace_tree (tree expr
, tree old
, tree new_tree
)
1799 tree ret
= NULL_TREE
, e
, se
;
1804 /* Do not bother to replace constants. */
1805 if (CONSTANT_CLASS_P (old
))
1809 || operand_equal_p (expr
, old
, 0))
1810 return unshare_expr (new_tree
);
1815 n
= TREE_OPERAND_LENGTH (expr
);
1816 for (i
= 0; i
< n
; i
++)
1818 e
= TREE_OPERAND (expr
, i
);
1819 se
= simplify_replace_tree (e
, old
, new_tree
);
1824 ret
= copy_node (expr
);
1826 TREE_OPERAND (ret
, i
) = se
;
1829 return (ret
? fold (ret
) : expr
);
1832 /* Expand definitions of ssa names in EXPR as long as they are simple
1833 enough, and return the new expression. If STOP is specified, stop
1834 expanding if EXPR equals to it. */
1837 expand_simple_operations (tree expr
, tree stop
)
1840 tree ret
= NULL_TREE
, e
, ee
, e1
;
1841 enum tree_code code
;
1844 if (expr
== NULL_TREE
)
1847 if (is_gimple_min_invariant (expr
))
1850 code
= TREE_CODE (expr
);
1851 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code
)))
1853 n
= TREE_OPERAND_LENGTH (expr
);
1854 for (i
= 0; i
< n
; i
++)
1856 e
= TREE_OPERAND (expr
, i
);
1857 ee
= expand_simple_operations (e
, stop
);
1862 ret
= copy_node (expr
);
1864 TREE_OPERAND (ret
, i
) = ee
;
1870 fold_defer_overflow_warnings ();
1872 fold_undefer_and_ignore_overflow_warnings ();
1876 /* Stop if it's not ssa name or the one we don't want to expand. */
1877 if (TREE_CODE (expr
) != SSA_NAME
|| expr
== stop
)
1880 stmt
= SSA_NAME_DEF_STMT (expr
);
1881 if (gimple_code (stmt
) == GIMPLE_PHI
)
1883 basic_block src
, dest
;
1885 if (gimple_phi_num_args (stmt
) != 1)
1887 e
= PHI_ARG_DEF (stmt
, 0);
1889 /* Avoid propagating through loop exit phi nodes, which
1890 could break loop-closed SSA form restrictions. */
1891 dest
= gimple_bb (stmt
);
1892 src
= single_pred (dest
);
1893 if (TREE_CODE (e
) == SSA_NAME
1894 && src
->loop_father
!= dest
->loop_father
)
1897 return expand_simple_operations (e
, stop
);
1899 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1902 /* Avoid expanding to expressions that contain SSA names that need
1903 to take part in abnormal coalescing. */
1905 FOR_EACH_SSA_TREE_OPERAND (e
, stmt
, iter
, SSA_OP_USE
)
1906 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e
))
1909 e
= gimple_assign_rhs1 (stmt
);
1910 code
= gimple_assign_rhs_code (stmt
);
1911 if (get_gimple_rhs_class (code
) == GIMPLE_SINGLE_RHS
)
1913 if (is_gimple_min_invariant (e
))
1916 if (code
== SSA_NAME
)
1917 return expand_simple_operations (e
, stop
);
1925 /* Casts are simple. */
1926 ee
= expand_simple_operations (e
, stop
);
1927 return fold_build1 (code
, TREE_TYPE (expr
), ee
);
1931 if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (expr
))
1932 && TYPE_OVERFLOW_TRAPS (TREE_TYPE (expr
)))
1935 case POINTER_PLUS_EXPR
:
1936 /* And increments and decrements by a constant are simple. */
1937 e1
= gimple_assign_rhs2 (stmt
);
1938 if (!is_gimple_min_invariant (e1
))
1941 ee
= expand_simple_operations (e
, stop
);
1942 return fold_build2 (code
, TREE_TYPE (expr
), ee
, e1
);
1949 /* Tries to simplify EXPR using the condition COND. Returns the simplified
1950 expression (or EXPR unchanged, if no simplification was possible). */
1953 tree_simplify_using_condition_1 (tree cond
, tree expr
)
1956 tree e
, e0
, e1
, e2
, notcond
;
1957 enum tree_code code
= TREE_CODE (expr
);
1959 if (code
== INTEGER_CST
)
1962 if (code
== TRUTH_OR_EXPR
1963 || code
== TRUTH_AND_EXPR
1964 || code
== COND_EXPR
)
1968 e0
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 0));
1969 if (TREE_OPERAND (expr
, 0) != e0
)
1972 e1
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 1));
1973 if (TREE_OPERAND (expr
, 1) != e1
)
1976 if (code
== COND_EXPR
)
1978 e2
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 2));
1979 if (TREE_OPERAND (expr
, 2) != e2
)
1987 if (code
== COND_EXPR
)
1988 expr
= fold_build3 (code
, boolean_type_node
, e0
, e1
, e2
);
1990 expr
= fold_build2 (code
, boolean_type_node
, e0
, e1
);
1996 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
1997 propagation, and vice versa. Fold does not handle this, since it is
1998 considered too expensive. */
1999 if (TREE_CODE (cond
) == EQ_EXPR
)
2001 e0
= TREE_OPERAND (cond
, 0);
2002 e1
= TREE_OPERAND (cond
, 1);
2004 /* We know that e0 == e1. Check whether we cannot simplify expr
2006 e
= simplify_replace_tree (expr
, e0
, e1
);
2007 if (integer_zerop (e
) || integer_nonzerop (e
))
2010 e
= simplify_replace_tree (expr
, e1
, e0
);
2011 if (integer_zerop (e
) || integer_nonzerop (e
))
2014 if (TREE_CODE (expr
) == EQ_EXPR
)
2016 e0
= TREE_OPERAND (expr
, 0);
2017 e1
= TREE_OPERAND (expr
, 1);
2019 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
2020 e
= simplify_replace_tree (cond
, e0
, e1
);
2021 if (integer_zerop (e
))
2023 e
= simplify_replace_tree (cond
, e1
, e0
);
2024 if (integer_zerop (e
))
2027 if (TREE_CODE (expr
) == NE_EXPR
)
2029 e0
= TREE_OPERAND (expr
, 0);
2030 e1
= TREE_OPERAND (expr
, 1);
2032 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
2033 e
= simplify_replace_tree (cond
, e0
, e1
);
2034 if (integer_zerop (e
))
2035 return boolean_true_node
;
2036 e
= simplify_replace_tree (cond
, e1
, e0
);
2037 if (integer_zerop (e
))
2038 return boolean_true_node
;
2041 /* Check whether COND ==> EXPR. */
2042 notcond
= invert_truthvalue (cond
);
2043 e
= fold_binary (TRUTH_OR_EXPR
, boolean_type_node
, notcond
, expr
);
2044 if (e
&& integer_nonzerop (e
))
2047 /* Check whether COND ==> not EXPR. */
2048 e
= fold_binary (TRUTH_AND_EXPR
, boolean_type_node
, cond
, expr
);
2049 if (e
&& integer_zerop (e
))
2055 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2056 expression (or EXPR unchanged, if no simplification was possible).
2057 Wrapper around tree_simplify_using_condition_1 that ensures that chains
2058 of simple operations in definitions of ssa names in COND are expanded,
2059 so that things like casts or incrementing the value of the bound before
2060 the loop do not cause us to fail. */
2063 tree_simplify_using_condition (tree cond
, tree expr
)
2065 cond
= expand_simple_operations (cond
);
2067 return tree_simplify_using_condition_1 (cond
, expr
);
2070 /* Tries to simplify EXPR using the conditions on entry to LOOP.
2071 Returns the simplified expression (or EXPR unchanged, if no
2072 simplification was possible). */
2075 simplify_using_initial_conditions (struct loop
*loop
, tree expr
)
2080 tree cond
, expanded
, backup
;
2083 if (TREE_CODE (expr
) == INTEGER_CST
)
2086 backup
= expanded
= expand_simple_operations (expr
);
2088 /* Limit walking the dominators to avoid quadraticness in
2089 the number of BBs times the number of loops in degenerate
2091 for (bb
= loop
->header
;
2092 bb
!= ENTRY_BLOCK_PTR_FOR_FN (cfun
) && cnt
< MAX_DOMINATORS_TO_WALK
;
2093 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
))
2095 if (!single_pred_p (bb
))
2097 e
= single_pred_edge (bb
);
2099 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
2102 stmt
= last_stmt (e
->src
);
2103 cond
= fold_build2 (gimple_cond_code (stmt
),
2105 gimple_cond_lhs (stmt
),
2106 gimple_cond_rhs (stmt
));
2107 if (e
->flags
& EDGE_FALSE_VALUE
)
2108 cond
= invert_truthvalue (cond
);
2109 expanded
= tree_simplify_using_condition (cond
, expanded
);
2110 /* Break if EXPR is simplified to const values. */
2112 && (integer_zerop (expanded
) || integer_nonzerop (expanded
)))
2118 /* Return the original expression if no simplification is done. */
2119 return operand_equal_p (backup
, expanded
, 0) ? expr
: expanded
;
2122 /* Tries to simplify EXPR using the evolutions of the loop invariants
2123 in the superloops of LOOP. Returns the simplified expression
2124 (or EXPR unchanged, if no simplification was possible). */
2127 simplify_using_outer_evolutions (struct loop
*loop
, tree expr
)
2129 enum tree_code code
= TREE_CODE (expr
);
2133 if (is_gimple_min_invariant (expr
))
2136 if (code
== TRUTH_OR_EXPR
2137 || code
== TRUTH_AND_EXPR
2138 || code
== COND_EXPR
)
2142 e0
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 0));
2143 if (TREE_OPERAND (expr
, 0) != e0
)
2146 e1
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 1));
2147 if (TREE_OPERAND (expr
, 1) != e1
)
2150 if (code
== COND_EXPR
)
2152 e2
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 2));
2153 if (TREE_OPERAND (expr
, 2) != e2
)
2161 if (code
== COND_EXPR
)
2162 expr
= fold_build3 (code
, boolean_type_node
, e0
, e1
, e2
);
2164 expr
= fold_build2 (code
, boolean_type_node
, e0
, e1
);
2170 e
= instantiate_parameters (loop
, expr
);
2171 if (is_gimple_min_invariant (e
))
2177 /* Returns true if EXIT is the only possible exit from LOOP. */
2180 loop_only_exit_p (const struct loop
*loop
, const_edge exit
)
2183 gimple_stmt_iterator bsi
;
2186 if (exit
!= single_exit (loop
))
2189 body
= get_loop_body (loop
);
2190 for (i
= 0; i
< loop
->num_nodes
; i
++)
2192 for (bsi
= gsi_start_bb (body
[i
]); !gsi_end_p (bsi
); gsi_next (&bsi
))
2193 if (stmt_can_terminate_bb_p (gsi_stmt (bsi
)))
2204 /* Stores description of number of iterations of LOOP derived from
2205 EXIT (an exit edge of the LOOP) in NITER. Returns true if some useful
2206 information could be derived (and fields of NITER have meaning described
2207 in comments at struct tree_niter_desc declaration), false otherwise.
2208 When EVERY_ITERATION is true, only tests that are known to be executed
2209 every iteration are considered (i.e. only test that alone bounds the loop).
2210 If AT_STMT is not NULL, this function stores LOOP's condition statement in
2211 it when returning true. */
2214 number_of_iterations_exit_assumptions (struct loop
*loop
, edge exit
,
2215 struct tree_niter_desc
*niter
,
2216 gcond
**at_stmt
, bool every_iteration
)
2222 enum tree_code code
;
2226 /* Nothing to analyze if the loop is known to be infinite. */
2227 if (loop_constraint_set_p (loop
, LOOP_C_INFINITE
))
2230 safe
= dominated_by_p (CDI_DOMINATORS
, loop
->latch
, exit
->src
);
2232 if (every_iteration
&& !safe
)
2235 niter
->assumptions
= boolean_false_node
;
2236 niter
->control
.base
= NULL_TREE
;
2237 niter
->control
.step
= NULL_TREE
;
2238 niter
->control
.no_overflow
= false;
2239 last
= last_stmt (exit
->src
);
2242 stmt
= dyn_cast
<gcond
*> (last
);
2246 /* We want the condition for staying inside loop. */
2247 code
= gimple_cond_code (stmt
);
2248 if (exit
->flags
& EDGE_TRUE_VALUE
)
2249 code
= invert_tree_comparison (code
, false);
2264 op0
= gimple_cond_lhs (stmt
);
2265 op1
= gimple_cond_rhs (stmt
);
2266 type
= TREE_TYPE (op0
);
2268 if (TREE_CODE (type
) != INTEGER_TYPE
2269 && !POINTER_TYPE_P (type
))
2272 tree iv0_niters
= NULL_TREE
;
2273 if (!simple_iv_with_niters (loop
, loop_containing_stmt (stmt
),
2274 op0
, &iv0
, &iv0_niters
, false))
2276 tree iv1_niters
= NULL_TREE
;
2277 if (!simple_iv_with_niters (loop
, loop_containing_stmt (stmt
),
2278 op1
, &iv1
, &iv1_niters
, false))
2280 /* Give up on complicated case. */
2281 if (iv0_niters
&& iv1_niters
)
2284 /* We don't want to see undefined signed overflow warnings while
2285 computing the number of iterations. */
2286 fold_defer_overflow_warnings ();
2288 iv0
.base
= expand_simple_operations (iv0
.base
);
2289 iv1
.base
= expand_simple_operations (iv1
.base
);
2290 if (!number_of_iterations_cond (loop
, type
, &iv0
, code
, &iv1
, niter
,
2291 loop_only_exit_p (loop
, exit
), safe
))
2293 fold_undefer_and_ignore_overflow_warnings ();
2297 /* Incorporate additional assumption implied by control iv. */
2298 tree iv_niters
= iv0_niters
? iv0_niters
: iv1_niters
;
2301 tree assumption
= fold_build2 (LE_EXPR
, boolean_type_node
, niter
->niter
,
2302 fold_convert (TREE_TYPE (niter
->niter
),
2305 if (!integer_nonzerop (assumption
))
2306 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
2307 niter
->assumptions
, assumption
);
2309 /* Refine upper bound if possible. */
2310 if (TREE_CODE (iv_niters
) == INTEGER_CST
2311 && niter
->max
> wi::to_widest (iv_niters
))
2312 niter
->max
= wi::to_widest (iv_niters
);
2315 /* There is no assumptions if the loop is known to be finite. */
2316 if (!integer_zerop (niter
->assumptions
)
2317 && loop_constraint_set_p (loop
, LOOP_C_FINITE
))
2318 niter
->assumptions
= boolean_true_node
;
2322 niter
->assumptions
= simplify_using_outer_evolutions (loop
,
2323 niter
->assumptions
);
2324 niter
->may_be_zero
= simplify_using_outer_evolutions (loop
,
2325 niter
->may_be_zero
);
2326 niter
->niter
= simplify_using_outer_evolutions (loop
, niter
->niter
);
2330 = simplify_using_initial_conditions (loop
,
2331 niter
->assumptions
);
2333 = simplify_using_initial_conditions (loop
,
2334 niter
->may_be_zero
);
2336 fold_undefer_and_ignore_overflow_warnings ();
2338 /* If NITER has simplified into a constant, update MAX. */
2339 if (TREE_CODE (niter
->niter
) == INTEGER_CST
)
2340 niter
->max
= wi::to_widest (niter
->niter
);
2345 return (!integer_zerop (niter
->assumptions
));
2348 /* Like number_of_iterations_exit, but return TRUE only if the niter
2349 information holds unconditionally. */
2352 number_of_iterations_exit (struct loop
*loop
, edge exit
,
2353 struct tree_niter_desc
*niter
,
2354 bool warn
, bool every_iteration
)
2357 if (!number_of_iterations_exit_assumptions (loop
, exit
, niter
,
2358 &stmt
, every_iteration
))
2361 if (integer_nonzerop (niter
->assumptions
))
2366 const char *wording
;
2368 wording
= N_("missed loop optimization, the loop counter may overflow");
2369 warning_at (gimple_location_safe (stmt
),
2370 OPT_Wunsafe_loop_optimizations
, "%s", gettext (wording
));
2376 /* Try to determine the number of iterations of LOOP. If we succeed,
2377 expression giving number of iterations is returned and *EXIT is
2378 set to the edge from that the information is obtained. Otherwise
2379 chrec_dont_know is returned. */
2382 find_loop_niter (struct loop
*loop
, edge
*exit
)
2385 vec
<edge
> exits
= get_loop_exit_edges (loop
);
2387 tree niter
= NULL_TREE
, aniter
;
2388 struct tree_niter_desc desc
;
2391 FOR_EACH_VEC_ELT (exits
, i
, ex
)
2393 if (!number_of_iterations_exit (loop
, ex
, &desc
, false))
2396 if (integer_nonzerop (desc
.may_be_zero
))
2398 /* We exit in the first iteration through this exit.
2399 We won't find anything better. */
2400 niter
= build_int_cst (unsigned_type_node
, 0);
2405 if (!integer_zerop (desc
.may_be_zero
))
2408 aniter
= desc
.niter
;
2412 /* Nothing recorded yet. */
2418 /* Prefer constants, the lower the better. */
2419 if (TREE_CODE (aniter
) != INTEGER_CST
)
2422 if (TREE_CODE (niter
) != INTEGER_CST
)
2429 if (tree_int_cst_lt (aniter
, niter
))
2438 return niter
? niter
: chrec_dont_know
;
2441 /* Return true if loop is known to have bounded number of iterations. */
2444 finite_loop_p (struct loop
*loop
)
2449 flags
= flags_from_decl_or_type (current_function_decl
);
2450 if ((flags
& (ECF_CONST
|ECF_PURE
)) && !(flags
& ECF_LOOPING_CONST_OR_PURE
))
2452 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2453 fprintf (dump_file
, "Found loop %i to be finite: it is within pure or const function.\n",
2458 if (loop
->any_upper_bound
2459 || max_loop_iterations (loop
, &nit
))
2461 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2462 fprintf (dump_file
, "Found loop %i to be finite: upper bound found.\n",
2471 Analysis of a number of iterations of a loop by a brute-force evaluation.
2475 /* Bound on the number of iterations we try to evaluate. */
2477 #define MAX_ITERATIONS_TO_TRACK \
2478 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
2480 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2481 result by a chain of operations such that all but exactly one of their
2482 operands are constants. */
2485 chain_of_csts_start (struct loop
*loop
, tree x
)
2487 gimple
*stmt
= SSA_NAME_DEF_STMT (x
);
2489 basic_block bb
= gimple_bb (stmt
);
2490 enum tree_code code
;
2493 || !flow_bb_inside_loop_p (loop
, bb
))
2496 if (gimple_code (stmt
) == GIMPLE_PHI
)
2498 if (bb
== loop
->header
)
2499 return as_a
<gphi
*> (stmt
);
2504 if (gimple_code (stmt
) != GIMPLE_ASSIGN
2505 || gimple_assign_rhs_class (stmt
) == GIMPLE_TERNARY_RHS
)
2508 code
= gimple_assign_rhs_code (stmt
);
2509 if (gimple_references_memory_p (stmt
)
2510 || TREE_CODE_CLASS (code
) == tcc_reference
2511 || (code
== ADDR_EXPR
2512 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt
))))
2515 use
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_USE
);
2516 if (use
== NULL_TREE
)
2519 return chain_of_csts_start (loop
, use
);
2522 /* Determines whether the expression X is derived from a result of a phi node
2523 in header of LOOP such that
2525 * the derivation of X consists only from operations with constants
2526 * the initial value of the phi node is constant
2527 * the value of the phi node in the next iteration can be derived from the
2528 value in the current iteration by a chain of operations with constants.
2530 If such phi node exists, it is returned, otherwise NULL is returned. */
2533 get_base_for (struct loop
*loop
, tree x
)
2538 if (is_gimple_min_invariant (x
))
2541 phi
= chain_of_csts_start (loop
, x
);
2545 init
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
2546 next
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
2548 if (TREE_CODE (next
) != SSA_NAME
)
2551 if (!is_gimple_min_invariant (init
))
2554 if (chain_of_csts_start (loop
, next
) != phi
)
2560 /* Given an expression X, then
2562 * if X is NULL_TREE, we return the constant BASE.
2563 * otherwise X is a SSA name, whose value in the considered loop is derived
2564 by a chain of operations with constant from a result of a phi node in
2565 the header of the loop. Then we return value of X when the value of the
2566 result of this phi node is given by the constant BASE. */
2569 get_val_for (tree x
, tree base
)
2573 gcc_checking_assert (is_gimple_min_invariant (base
));
2578 stmt
= SSA_NAME_DEF_STMT (x
);
2579 if (gimple_code (stmt
) == GIMPLE_PHI
)
2582 gcc_checking_assert (is_gimple_assign (stmt
));
2584 /* STMT must be either an assignment of a single SSA name or an
2585 expression involving an SSA name and a constant. Try to fold that
2586 expression using the value for the SSA name. */
2587 if (gimple_assign_ssa_name_copy_p (stmt
))
2588 return get_val_for (gimple_assign_rhs1 (stmt
), base
);
2589 else if (gimple_assign_rhs_class (stmt
) == GIMPLE_UNARY_RHS
2590 && TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
)
2592 return fold_build1 (gimple_assign_rhs_code (stmt
),
2593 gimple_expr_type (stmt
),
2594 get_val_for (gimple_assign_rhs1 (stmt
), base
));
2596 else if (gimple_assign_rhs_class (stmt
) == GIMPLE_BINARY_RHS
)
2598 tree rhs1
= gimple_assign_rhs1 (stmt
);
2599 tree rhs2
= gimple_assign_rhs2 (stmt
);
2600 if (TREE_CODE (rhs1
) == SSA_NAME
)
2601 rhs1
= get_val_for (rhs1
, base
);
2602 else if (TREE_CODE (rhs2
) == SSA_NAME
)
2603 rhs2
= get_val_for (rhs2
, base
);
2606 return fold_build2 (gimple_assign_rhs_code (stmt
),
2607 gimple_expr_type (stmt
), rhs1
, rhs2
);
2614 /* Tries to count the number of iterations of LOOP till it exits by EXIT
2615 by brute force -- i.e. by determining the value of the operands of the
2616 condition at EXIT in first few iterations of the loop (assuming that
2617 these values are constant) and determining the first one in that the
2618 condition is not satisfied. Returns the constant giving the number
2619 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
2622 loop_niter_by_eval (struct loop
*loop
, edge exit
)
2625 tree op
[2], val
[2], next
[2], aval
[2];
2631 cond
= last_stmt (exit
->src
);
2632 if (!cond
|| gimple_code (cond
) != GIMPLE_COND
)
2633 return chrec_dont_know
;
2635 cmp
= gimple_cond_code (cond
);
2636 if (exit
->flags
& EDGE_TRUE_VALUE
)
2637 cmp
= invert_tree_comparison (cmp
, false);
2647 op
[0] = gimple_cond_lhs (cond
);
2648 op
[1] = gimple_cond_rhs (cond
);
2652 return chrec_dont_know
;
2655 for (j
= 0; j
< 2; j
++)
2657 if (is_gimple_min_invariant (op
[j
]))
2660 next
[j
] = NULL_TREE
;
2665 phi
= get_base_for (loop
, op
[j
]);
2667 return chrec_dont_know
;
2668 val
[j
] = PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
2669 next
[j
] = PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
2673 /* Don't issue signed overflow warnings. */
2674 fold_defer_overflow_warnings ();
2676 for (i
= 0; i
< MAX_ITERATIONS_TO_TRACK
; i
++)
2678 for (j
= 0; j
< 2; j
++)
2679 aval
[j
] = get_val_for (op
[j
], val
[j
]);
2681 acnd
= fold_binary (cmp
, boolean_type_node
, aval
[0], aval
[1]);
2682 if (acnd
&& integer_zerop (acnd
))
2684 fold_undefer_and_ignore_overflow_warnings ();
2685 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2687 "Proved that loop %d iterates %d times using brute force.\n",
2689 return build_int_cst (unsigned_type_node
, i
);
2692 for (j
= 0; j
< 2; j
++)
2694 val
[j
] = get_val_for (next
[j
], val
[j
]);
2695 if (!is_gimple_min_invariant (val
[j
]))
2697 fold_undefer_and_ignore_overflow_warnings ();
2698 return chrec_dont_know
;
2703 fold_undefer_and_ignore_overflow_warnings ();
2705 return chrec_dont_know
;
2708 /* Finds the exit of the LOOP by that the loop exits after a constant
2709 number of iterations and stores the exit edge to *EXIT. The constant
2710 giving the number of iterations of LOOP is returned. The number of
2711 iterations is determined using loop_niter_by_eval (i.e. by brute force
2712 evaluation). If we are unable to find the exit for that loop_niter_by_eval
2713 determines the number of iterations, chrec_dont_know is returned. */
2716 find_loop_niter_by_eval (struct loop
*loop
, edge
*exit
)
2719 vec
<edge
> exits
= get_loop_exit_edges (loop
);
2721 tree niter
= NULL_TREE
, aniter
;
2725 /* Loops with multiple exits are expensive to handle and less important. */
2726 if (!flag_expensive_optimizations
2727 && exits
.length () > 1)
2730 return chrec_dont_know
;
2733 FOR_EACH_VEC_ELT (exits
, i
, ex
)
2735 if (!just_once_each_iteration_p (loop
, ex
->src
))
2738 aniter
= loop_niter_by_eval (loop
, ex
);
2739 if (chrec_contains_undetermined (aniter
))
2743 && !tree_int_cst_lt (aniter
, niter
))
2751 return niter
? niter
: chrec_dont_know
;
2756 Analysis of upper bounds on number of iterations of a loop.
2760 static widest_int
derive_constant_upper_bound_ops (tree
, tree
,
2761 enum tree_code
, tree
);
2763 /* Returns a constant upper bound on the value of the right-hand side of
2764 an assignment statement STMT. */
2767 derive_constant_upper_bound_assign (gimple
*stmt
)
2769 enum tree_code code
= gimple_assign_rhs_code (stmt
);
2770 tree op0
= gimple_assign_rhs1 (stmt
);
2771 tree op1
= gimple_assign_rhs2 (stmt
);
2773 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt
)),
2777 /* Returns a constant upper bound on the value of expression VAL. VAL
2778 is considered to be unsigned. If its type is signed, its value must
2782 derive_constant_upper_bound (tree val
)
2784 enum tree_code code
;
2787 extract_ops_from_tree (val
, &code
, &op0
, &op1
, &op2
);
2788 return derive_constant_upper_bound_ops (TREE_TYPE (val
), op0
, code
, op1
);
2791 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
2792 whose type is TYPE. The expression is considered to be unsigned. If
2793 its type is signed, its value must be nonnegative. */
2796 derive_constant_upper_bound_ops (tree type
, tree op0
,
2797 enum tree_code code
, tree op1
)
2800 widest_int bnd
, max
, cst
;
2803 if (INTEGRAL_TYPE_P (type
))
2804 maxt
= TYPE_MAX_VALUE (type
);
2806 maxt
= upper_bound_in_type (type
, type
);
2808 max
= wi::to_widest (maxt
);
2813 return wi::to_widest (op0
);
2816 subtype
= TREE_TYPE (op0
);
2817 if (!TYPE_UNSIGNED (subtype
)
2818 /* If TYPE is also signed, the fact that VAL is nonnegative implies
2819 that OP0 is nonnegative. */
2820 && TYPE_UNSIGNED (type
)
2821 && !tree_expr_nonnegative_p (op0
))
2823 /* If we cannot prove that the casted expression is nonnegative,
2824 we cannot establish more useful upper bound than the precision
2825 of the type gives us. */
2829 /* We now know that op0 is an nonnegative value. Try deriving an upper
2831 bnd
= derive_constant_upper_bound (op0
);
2833 /* If the bound does not fit in TYPE, max. value of TYPE could be
2835 if (wi::ltu_p (max
, bnd
))
2841 case POINTER_PLUS_EXPR
:
2843 if (TREE_CODE (op1
) != INTEGER_CST
2844 || !tree_expr_nonnegative_p (op0
))
2847 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
2848 choose the most logical way how to treat this constant regardless
2849 of the signedness of the type. */
2850 cst
= wi::sext (wi::to_widest (op1
), TYPE_PRECISION (type
));
2851 if (code
!= MINUS_EXPR
)
2854 bnd
= derive_constant_upper_bound (op0
);
2856 if (wi::neg_p (cst
))
2859 /* Avoid CST == 0x80000... */
2860 if (wi::neg_p (cst
))
2863 /* OP0 + CST. We need to check that
2864 BND <= MAX (type) - CST. */
2866 widest_int mmax
= max
- cst
;
2867 if (wi::leu_p (bnd
, mmax
))
2874 /* OP0 - CST, where CST >= 0.
2876 If TYPE is signed, we have already verified that OP0 >= 0, and we
2877 know that the result is nonnegative. This implies that
2880 If TYPE is unsigned, we must additionally know that OP0 >= CST,
2881 otherwise the operation underflows.
2884 /* This should only happen if the type is unsigned; however, for
2885 buggy programs that use overflowing signed arithmetics even with
2886 -fno-wrapv, this condition may also be true for signed values. */
2887 if (wi::ltu_p (bnd
, cst
))
2890 if (TYPE_UNSIGNED (type
))
2892 tree tem
= fold_binary (GE_EXPR
, boolean_type_node
, op0
,
2893 wide_int_to_tree (type
, cst
));
2894 if (!tem
|| integer_nonzerop (tem
))
2903 case FLOOR_DIV_EXPR
:
2904 case EXACT_DIV_EXPR
:
2905 if (TREE_CODE (op1
) != INTEGER_CST
2906 || tree_int_cst_sign_bit (op1
))
2909 bnd
= derive_constant_upper_bound (op0
);
2910 return wi::udiv_floor (bnd
, wi::to_widest (op1
));
2913 if (TREE_CODE (op1
) != INTEGER_CST
2914 || tree_int_cst_sign_bit (op1
))
2916 return wi::to_widest (op1
);
2919 stmt
= SSA_NAME_DEF_STMT (op0
);
2920 if (gimple_code (stmt
) != GIMPLE_ASSIGN
2921 || gimple_assign_lhs (stmt
) != op0
)
2923 return derive_constant_upper_bound_assign (stmt
);
2930 /* Emit a -Waggressive-loop-optimizations warning if needed. */
2933 do_warn_aggressive_loop_optimizations (struct loop
*loop
,
2934 widest_int i_bound
, gimple
*stmt
)
2936 /* Don't warn if the loop doesn't have known constant bound. */
2937 if (!loop
->nb_iterations
2938 || TREE_CODE (loop
->nb_iterations
) != INTEGER_CST
2939 || !warn_aggressive_loop_optimizations
2940 /* To avoid warning multiple times for the same loop,
2941 only start warning when we preserve loops. */
2942 || (cfun
->curr_properties
& PROP_loops
) == 0
2943 /* Only warn once per loop. */
2944 || loop
->warned_aggressive_loop_optimizations
2945 /* Only warn if undefined behavior gives us lower estimate than the
2946 known constant bound. */
2947 || wi::cmpu (i_bound
, wi::to_widest (loop
->nb_iterations
)) >= 0
2948 /* And undefined behavior happens unconditionally. */
2949 || !dominated_by_p (CDI_DOMINATORS
, loop
->latch
, gimple_bb (stmt
)))
2952 edge e
= single_exit (loop
);
2956 gimple
*estmt
= last_stmt (e
->src
);
2957 char buf
[WIDE_INT_PRINT_BUFFER_SIZE
];
2958 print_dec (i_bound
, buf
, TYPE_UNSIGNED (TREE_TYPE (loop
->nb_iterations
))
2959 ? UNSIGNED
: SIGNED
);
2960 if (warning_at (gimple_location (stmt
), OPT_Waggressive_loop_optimizations
,
2961 "iteration %s invokes undefined behavior", buf
))
2962 inform (gimple_location (estmt
), "within this loop");
2963 loop
->warned_aggressive_loop_optimizations
= true;
2966 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
2967 is true if the loop is exited immediately after STMT, and this exit
2968 is taken at last when the STMT is executed BOUND + 1 times.
2969 REALISTIC is true if BOUND is expected to be close to the real number
2970 of iterations. UPPER is true if we are sure the loop iterates at most
2971 BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
2974 record_estimate (struct loop
*loop
, tree bound
, const widest_int
&i_bound
,
2975 gimple
*at_stmt
, bool is_exit
, bool realistic
, bool upper
)
2979 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2981 fprintf (dump_file
, "Statement %s", is_exit
? "(exit)" : "");
2982 print_gimple_stmt (dump_file
, at_stmt
, 0, TDF_SLIM
);
2983 fprintf (dump_file
, " is %sexecuted at most ",
2984 upper
? "" : "probably ");
2985 print_generic_expr (dump_file
, bound
, TDF_SLIM
);
2986 fprintf (dump_file
, " (bounded by ");
2987 print_decu (i_bound
, dump_file
);
2988 fprintf (dump_file
, ") + 1 times in loop %d.\n", loop
->num
);
2991 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
2992 real number of iterations. */
2993 if (TREE_CODE (bound
) != INTEGER_CST
)
2996 gcc_checking_assert (i_bound
== wi::to_widest (bound
));
2998 /* If we have a guaranteed upper bound, record it in the appropriate
2999 list, unless this is an !is_exit bound (i.e. undefined behavior in
3000 at_stmt) in a loop with known constant number of iterations. */
3003 || loop
->nb_iterations
== NULL_TREE
3004 || TREE_CODE (loop
->nb_iterations
) != INTEGER_CST
))
3006 struct nb_iter_bound
*elt
= ggc_alloc
<nb_iter_bound
> ();
3008 elt
->bound
= i_bound
;
3009 elt
->stmt
= at_stmt
;
3010 elt
->is_exit
= is_exit
;
3011 elt
->next
= loop
->bounds
;
3015 /* If statement is executed on every path to the loop latch, we can directly
3016 infer the upper bound on the # of iterations of the loop. */
3017 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, gimple_bb (at_stmt
)))
3020 /* Update the number of iteration estimates according to the bound.
3021 If at_stmt is an exit then the loop latch is executed at most BOUND times,
3022 otherwise it can be executed BOUND + 1 times. We will lower the estimate
3023 later if such statement must be executed on last iteration */
3028 widest_int new_i_bound
= i_bound
+ delta
;
3030 /* If an overflow occurred, ignore the result. */
3031 if (wi::ltu_p (new_i_bound
, delta
))
3034 if (upper
&& !is_exit
)
3035 do_warn_aggressive_loop_optimizations (loop
, new_i_bound
, at_stmt
);
3036 record_niter_bound (loop
, new_i_bound
, realistic
, upper
);
3039 /* Records the control iv analyzed in NITER for LOOP if the iv is valid
3040 and doesn't overflow. */
3043 record_control_iv (struct loop
*loop
, struct tree_niter_desc
*niter
)
3045 struct control_iv
*iv
;
3047 if (!niter
->control
.base
|| !niter
->control
.step
)
3050 if (!integer_onep (niter
->assumptions
) || !niter
->control
.no_overflow
)
3053 iv
= ggc_alloc
<control_iv
> ();
3054 iv
->base
= niter
->control
.base
;
3055 iv
->step
= niter
->control
.step
;
3056 iv
->next
= loop
->control_ivs
;
3057 loop
->control_ivs
= iv
;
3062 /* Record the estimate on number of iterations of LOOP based on the fact that
3063 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
3064 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
3065 estimated number of iterations is expected to be close to the real one.
3066 UPPER is true if we are sure the induction variable does not wrap. */
3069 record_nonwrapping_iv (struct loop
*loop
, tree base
, tree step
, gimple
*stmt
,
3070 tree low
, tree high
, bool realistic
, bool upper
)
3072 tree niter_bound
, extreme
, delta
;
3073 tree type
= TREE_TYPE (base
), unsigned_type
;
3074 tree orig_base
= base
;
3076 if (TREE_CODE (step
) != INTEGER_CST
|| integer_zerop (step
))
3079 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3081 fprintf (dump_file
, "Induction variable (");
3082 print_generic_expr (dump_file
, TREE_TYPE (base
), TDF_SLIM
);
3083 fprintf (dump_file
, ") ");
3084 print_generic_expr (dump_file
, base
, TDF_SLIM
);
3085 fprintf (dump_file
, " + ");
3086 print_generic_expr (dump_file
, step
, TDF_SLIM
);
3087 fprintf (dump_file
, " * iteration does not wrap in statement ");
3088 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
3089 fprintf (dump_file
, " in loop %d.\n", loop
->num
);
3092 unsigned_type
= unsigned_type_for (type
);
3093 base
= fold_convert (unsigned_type
, base
);
3094 step
= fold_convert (unsigned_type
, step
);
3096 if (tree_int_cst_sign_bit (step
))
3099 extreme
= fold_convert (unsigned_type
, low
);
3100 if (TREE_CODE (orig_base
) == SSA_NAME
3101 && TREE_CODE (high
) == INTEGER_CST
3102 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base
))
3103 && get_range_info (orig_base
, &min
, &max
) == VR_RANGE
3104 && wi::gts_p (high
, max
))
3105 base
= wide_int_to_tree (unsigned_type
, max
);
3106 else if (TREE_CODE (base
) != INTEGER_CST
3107 && dominated_by_p (CDI_DOMINATORS
,
3108 loop
->latch
, gimple_bb (stmt
)))
3109 base
= fold_convert (unsigned_type
, high
);
3110 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, base
, extreme
);
3111 step
= fold_build1 (NEGATE_EXPR
, unsigned_type
, step
);
3116 extreme
= fold_convert (unsigned_type
, high
);
3117 if (TREE_CODE (orig_base
) == SSA_NAME
3118 && TREE_CODE (low
) == INTEGER_CST
3119 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base
))
3120 && get_range_info (orig_base
, &min
, &max
) == VR_RANGE
3121 && wi::gts_p (min
, low
))
3122 base
= wide_int_to_tree (unsigned_type
, min
);
3123 else if (TREE_CODE (base
) != INTEGER_CST
3124 && dominated_by_p (CDI_DOMINATORS
,
3125 loop
->latch
, gimple_bb (stmt
)))
3126 base
= fold_convert (unsigned_type
, low
);
3127 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, extreme
, base
);
3130 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
3131 would get out of the range. */
3132 niter_bound
= fold_build2 (FLOOR_DIV_EXPR
, unsigned_type
, delta
, step
);
3133 widest_int max
= derive_constant_upper_bound (niter_bound
);
3134 record_estimate (loop
, niter_bound
, max
, stmt
, false, realistic
, upper
);
3137 /* Determine information about number of iterations a LOOP from the index
3138 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
3139 guaranteed to be executed in every iteration of LOOP. Callback for
3149 idx_infer_loop_bounds (tree base
, tree
*idx
, void *dta
)
3151 struct ilb_data
*data
= (struct ilb_data
*) dta
;
3152 tree ev
, init
, step
;
3153 tree low
, high
, type
, next
;
3154 bool sign
, upper
= true, at_end
= false;
3155 struct loop
*loop
= data
->loop
;
3157 if (TREE_CODE (base
) != ARRAY_REF
)
3160 /* For arrays at the end of the structure, we are not guaranteed that they
3161 do not really extend over their declared size. However, for arrays of
3162 size greater than one, this is unlikely to be intended. */
3163 if (array_at_struct_end_p (base
))
3169 struct loop
*dloop
= loop_containing_stmt (data
->stmt
);
3173 ev
= analyze_scalar_evolution (dloop
, *idx
);
3174 ev
= instantiate_parameters (loop
, ev
);
3175 init
= initial_condition (ev
);
3176 step
= evolution_part_in_loop_num (ev
, loop
->num
);
3180 || TREE_CODE (step
) != INTEGER_CST
3181 || integer_zerop (step
)
3182 || tree_contains_chrecs (init
, NULL
)
3183 || chrec_contains_symbols_defined_in_loop (init
, loop
->num
))
3186 low
= array_ref_low_bound (base
);
3187 high
= array_ref_up_bound (base
);
3189 /* The case of nonconstant bounds could be handled, but it would be
3191 if (TREE_CODE (low
) != INTEGER_CST
3193 || TREE_CODE (high
) != INTEGER_CST
)
3195 sign
= tree_int_cst_sign_bit (step
);
3196 type
= TREE_TYPE (step
);
3198 /* The array of length 1 at the end of a structure most likely extends
3199 beyond its bounds. */
3201 && operand_equal_p (low
, high
, 0))
3204 /* In case the relevant bound of the array does not fit in type, or
3205 it does, but bound + step (in type) still belongs into the range of the
3206 array, the index may wrap and still stay within the range of the array
3207 (consider e.g. if the array is indexed by the full range of
3210 To make things simpler, we require both bounds to fit into type, although
3211 there are cases where this would not be strictly necessary. */
3212 if (!int_fits_type_p (high
, type
)
3213 || !int_fits_type_p (low
, type
))
3215 low
= fold_convert (type
, low
);
3216 high
= fold_convert (type
, high
);
3219 next
= fold_binary (PLUS_EXPR
, type
, low
, step
);
3221 next
= fold_binary (PLUS_EXPR
, type
, high
, step
);
3223 if (tree_int_cst_compare (low
, next
) <= 0
3224 && tree_int_cst_compare (next
, high
) <= 0)
3227 /* If access is not executed on every iteration, we must ensure that overlow
3228 may not make the access valid later. */
3229 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, gimple_bb (data
->stmt
))
3230 && scev_probably_wraps_p (NULL_TREE
,
3231 initial_condition_in_loop_num (ev
, loop
->num
),
3232 step
, data
->stmt
, loop
, true))
3235 record_nonwrapping_iv (loop
, init
, step
, data
->stmt
, low
, high
, false, upper
);
3239 /* Determine information about number of iterations a LOOP from the bounds
3240 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
3241 STMT is guaranteed to be executed in every iteration of LOOP.*/
3244 infer_loop_bounds_from_ref (struct loop
*loop
, gimple
*stmt
, tree ref
)
3246 struct ilb_data data
;
3250 for_each_index (&ref
, idx_infer_loop_bounds
, &data
);
3253 /* Determine information about number of iterations of a LOOP from the way
3254 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
3255 executed in every iteration of LOOP. */
3258 infer_loop_bounds_from_array (struct loop
*loop
, gimple
*stmt
)
3260 if (is_gimple_assign (stmt
))
3262 tree op0
= gimple_assign_lhs (stmt
);
3263 tree op1
= gimple_assign_rhs1 (stmt
);
3265 /* For each memory access, analyze its access function
3266 and record a bound on the loop iteration domain. */
3267 if (REFERENCE_CLASS_P (op0
))
3268 infer_loop_bounds_from_ref (loop
, stmt
, op0
);
3270 if (REFERENCE_CLASS_P (op1
))
3271 infer_loop_bounds_from_ref (loop
, stmt
, op1
);
3273 else if (is_gimple_call (stmt
))
3276 unsigned i
, n
= gimple_call_num_args (stmt
);
3278 lhs
= gimple_call_lhs (stmt
);
3279 if (lhs
&& REFERENCE_CLASS_P (lhs
))
3280 infer_loop_bounds_from_ref (loop
, stmt
, lhs
);
3282 for (i
= 0; i
< n
; i
++)
3284 arg
= gimple_call_arg (stmt
, i
);
3285 if (REFERENCE_CLASS_P (arg
))
3286 infer_loop_bounds_from_ref (loop
, stmt
, arg
);
3291 /* Determine information about number of iterations of a LOOP from the fact
3292 that pointer arithmetics in STMT does not overflow. */
3295 infer_loop_bounds_from_pointer_arith (struct loop
*loop
, gimple
*stmt
)
3297 tree def
, base
, step
, scev
, type
, low
, high
;
3300 if (!is_gimple_assign (stmt
)
3301 || gimple_assign_rhs_code (stmt
) != POINTER_PLUS_EXPR
)
3304 def
= gimple_assign_lhs (stmt
);
3305 if (TREE_CODE (def
) != SSA_NAME
)
3308 type
= TREE_TYPE (def
);
3309 if (!nowrap_type_p (type
))
3312 ptr
= gimple_assign_rhs1 (stmt
);
3313 if (!expr_invariant_in_loop_p (loop
, ptr
))
3316 var
= gimple_assign_rhs2 (stmt
);
3317 if (TYPE_PRECISION (type
) != TYPE_PRECISION (TREE_TYPE (var
)))
3320 scev
= instantiate_parameters (loop
, analyze_scalar_evolution (loop
, def
));
3321 if (chrec_contains_undetermined (scev
))
3324 base
= initial_condition_in_loop_num (scev
, loop
->num
);
3325 step
= evolution_part_in_loop_num (scev
, loop
->num
);
3328 || TREE_CODE (step
) != INTEGER_CST
3329 || tree_contains_chrecs (base
, NULL
)
3330 || chrec_contains_symbols_defined_in_loop (base
, loop
->num
))
3333 low
= lower_bound_in_type (type
, type
);
3334 high
= upper_bound_in_type (type
, type
);
3336 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
3337 produce a NULL pointer. The contrary would mean NULL points to an object,
3338 while NULL is supposed to compare unequal with the address of all objects.
3339 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
3340 NULL pointer since that would mean wrapping, which we assume here not to
3341 happen. So, we can exclude NULL from the valid range of pointer
3343 if (flag_delete_null_pointer_checks
&& int_cst_value (low
) == 0)
3344 low
= build_int_cstu (TREE_TYPE (low
), TYPE_ALIGN_UNIT (TREE_TYPE (type
)));
3346 record_nonwrapping_iv (loop
, base
, step
, stmt
, low
, high
, false, true);
3349 /* Determine information about number of iterations of a LOOP from the fact
3350 that signed arithmetics in STMT does not overflow. */
3353 infer_loop_bounds_from_signedness (struct loop
*loop
, gimple
*stmt
)
3355 tree def
, base
, step
, scev
, type
, low
, high
;
3357 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
3360 def
= gimple_assign_lhs (stmt
);
3362 if (TREE_CODE (def
) != SSA_NAME
)
3365 type
= TREE_TYPE (def
);
3366 if (!INTEGRAL_TYPE_P (type
)
3367 || !TYPE_OVERFLOW_UNDEFINED (type
))
3370 scev
= instantiate_parameters (loop
, analyze_scalar_evolution (loop
, def
));
3371 if (chrec_contains_undetermined (scev
))
3374 base
= initial_condition_in_loop_num (scev
, loop
->num
);
3375 step
= evolution_part_in_loop_num (scev
, loop
->num
);
3378 || TREE_CODE (step
) != INTEGER_CST
3379 || tree_contains_chrecs (base
, NULL
)
3380 || chrec_contains_symbols_defined_in_loop (base
, loop
->num
))
3383 low
= lower_bound_in_type (type
, type
);
3384 high
= upper_bound_in_type (type
, type
);
3386 record_nonwrapping_iv (loop
, base
, step
, stmt
, low
, high
, false, true);
3389 /* The following analyzers are extracting informations on the bounds
3390 of LOOP from the following undefined behaviors:
3392 - data references should not access elements over the statically
3395 - signed variables should not overflow when flag_wrapv is not set.
3399 infer_loop_bounds_from_undefined (struct loop
*loop
)
3403 gimple_stmt_iterator bsi
;
3407 bbs
= get_loop_body (loop
);
3409 for (i
= 0; i
< loop
->num_nodes
; i
++)
3413 /* If BB is not executed in each iteration of the loop, we cannot
3414 use the operations in it to infer reliable upper bound on the
3415 # of iterations of the loop. However, we can use it as a guess.
3416 Reliable guesses come only from array bounds. */
3417 reliable
= dominated_by_p (CDI_DOMINATORS
, loop
->latch
, bb
);
3419 for (bsi
= gsi_start_bb (bb
); !gsi_end_p (bsi
); gsi_next (&bsi
))
3421 gimple
*stmt
= gsi_stmt (bsi
);
3423 infer_loop_bounds_from_array (loop
, stmt
);
3427 infer_loop_bounds_from_signedness (loop
, stmt
);
3428 infer_loop_bounds_from_pointer_arith (loop
, stmt
);
3437 /* Compare wide ints, callback for qsort. */
3440 wide_int_cmp (const void *p1
, const void *p2
)
3442 const widest_int
*d1
= (const widest_int
*) p1
;
3443 const widest_int
*d2
= (const widest_int
*) p2
;
3444 return wi::cmpu (*d1
, *d2
);
3447 /* Return index of BOUND in BOUNDS array sorted in increasing order.
3448 Lookup by binary search. */
3451 bound_index (vec
<widest_int
> bounds
, const widest_int
&bound
)
3453 unsigned int end
= bounds
.length ();
3454 unsigned int begin
= 0;
3456 /* Find a matching index by means of a binary search. */
3457 while (begin
!= end
)
3459 unsigned int middle
= (begin
+ end
) / 2;
3460 widest_int index
= bounds
[middle
];
3464 else if (wi::ltu_p (index
, bound
))
3472 /* We recorded loop bounds only for statements dominating loop latch (and thus
3473 executed each loop iteration). If there are any bounds on statements not
3474 dominating the loop latch we can improve the estimate by walking the loop
3475 body and seeing if every path from loop header to loop latch contains
3476 some bounded statement. */
3479 discover_iteration_bound_by_body_walk (struct loop
*loop
)
3481 struct nb_iter_bound
*elt
;
3482 auto_vec
<widest_int
> bounds
;
3483 vec
<vec
<basic_block
> > queues
= vNULL
;
3484 vec
<basic_block
> queue
= vNULL
;
3485 ptrdiff_t queue_index
;
3486 ptrdiff_t latch_index
= 0;
3488 /* Discover what bounds may interest us. */
3489 for (elt
= loop
->bounds
; elt
; elt
= elt
->next
)
3491 widest_int bound
= elt
->bound
;
3493 /* Exit terminates loop at given iteration, while non-exits produce undefined
3494 effect on the next iteration. */
3498 /* If an overflow occurred, ignore the result. */
3503 if (!loop
->any_upper_bound
3504 || wi::ltu_p (bound
, loop
->nb_iterations_upper_bound
))
3505 bounds
.safe_push (bound
);
3508 /* Exit early if there is nothing to do. */
3509 if (!bounds
.exists ())
3512 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3513 fprintf (dump_file
, " Trying to walk loop body to reduce the bound.\n");
3515 /* Sort the bounds in decreasing order. */
3516 bounds
.qsort (wide_int_cmp
);
3518 /* For every basic block record the lowest bound that is guaranteed to
3519 terminate the loop. */
3521 hash_map
<basic_block
, ptrdiff_t> bb_bounds
;
3522 for (elt
= loop
->bounds
; elt
; elt
= elt
->next
)
3524 widest_int bound
= elt
->bound
;
3528 /* If an overflow occurred, ignore the result. */
3533 if (!loop
->any_upper_bound
3534 || wi::ltu_p (bound
, loop
->nb_iterations_upper_bound
))
3536 ptrdiff_t index
= bound_index (bounds
, bound
);
3537 ptrdiff_t *entry
= bb_bounds
.get (gimple_bb (elt
->stmt
));
3539 bb_bounds
.put (gimple_bb (elt
->stmt
), index
);
3540 else if ((ptrdiff_t)*entry
> index
)
3545 hash_map
<basic_block
, ptrdiff_t> block_priority
;
3547 /* Perform shortest path discovery loop->header ... loop->latch.
3549 The "distance" is given by the smallest loop bound of basic block
3550 present in the path and we look for path with largest smallest bound
3553 To avoid the need for fibonacci heap on double ints we simply compress
3554 double ints into indexes to BOUNDS array and then represent the queue
3555 as arrays of queues for every index.
3556 Index of BOUNDS.length() means that the execution of given BB has
3557 no bounds determined.
3559 VISITED is a pointer map translating basic block into smallest index
3560 it was inserted into the priority queue with. */
3563 /* Start walk in loop header with index set to infinite bound. */
3564 queue_index
= bounds
.length ();
3565 queues
.safe_grow_cleared (queue_index
+ 1);
3566 queue
.safe_push (loop
->header
);
3567 queues
[queue_index
] = queue
;
3568 block_priority
.put (loop
->header
, queue_index
);
3570 for (; queue_index
>= 0; queue_index
--)
3572 if (latch_index
< queue_index
)
3574 while (queues
[queue_index
].length ())
3577 ptrdiff_t bound_index
= queue_index
;
3581 queue
= queues
[queue_index
];
3584 /* OK, we later inserted the BB with lower priority, skip it. */
3585 if (*block_priority
.get (bb
) > queue_index
)
3588 /* See if we can improve the bound. */
3589 ptrdiff_t *entry
= bb_bounds
.get (bb
);
3590 if (entry
&& *entry
< bound_index
)
3591 bound_index
= *entry
;
3593 /* Insert succesors into the queue, watch for latch edge
3594 and record greatest index we saw. */
3595 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
3597 bool insert
= false;
3599 if (loop_exit_edge_p (loop
, e
))
3602 if (e
== loop_latch_edge (loop
)
3603 && latch_index
< bound_index
)
3604 latch_index
= bound_index
;
3605 else if (!(entry
= block_priority
.get (e
->dest
)))
3608 block_priority
.put (e
->dest
, bound_index
);
3610 else if (*entry
< bound_index
)
3613 *entry
= bound_index
;
3617 queues
[bound_index
].safe_push (e
->dest
);
3621 queues
[queue_index
].release ();
3624 gcc_assert (latch_index
>= 0);
3625 if ((unsigned)latch_index
< bounds
.length ())
3627 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3629 fprintf (dump_file
, "Found better loop bound ");
3630 print_decu (bounds
[latch_index
], dump_file
);
3631 fprintf (dump_file
, "\n");
3633 record_niter_bound (loop
, bounds
[latch_index
], false, true);
3639 /* See if every path cross the loop goes through a statement that is known
3640 to not execute at the last iteration. In that case we can decrese iteration
3644 maybe_lower_iteration_bound (struct loop
*loop
)
3646 hash_set
<gimple
*> *not_executed_last_iteration
= NULL
;
3647 struct nb_iter_bound
*elt
;
3648 bool found_exit
= false;
3649 auto_vec
<basic_block
> queue
;
3652 /* Collect all statements with interesting (i.e. lower than
3653 nb_iterations_upper_bound) bound on them.
3655 TODO: Due to the way record_estimate choose estimates to store, the bounds
3656 will be always nb_iterations_upper_bound-1. We can change this to record
3657 also statements not dominating the loop latch and update the walk bellow
3658 to the shortest path algorthm. */
3659 for (elt
= loop
->bounds
; elt
; elt
= elt
->next
)
3662 && wi::ltu_p (elt
->bound
, loop
->nb_iterations_upper_bound
))
3664 if (!not_executed_last_iteration
)
3665 not_executed_last_iteration
= new hash_set
<gimple
*>;
3666 not_executed_last_iteration
->add (elt
->stmt
);
3669 if (!not_executed_last_iteration
)
3672 /* Start DFS walk in the loop header and see if we can reach the
3673 loop latch or any of the exits (including statements with side
3674 effects that may terminate the loop otherwise) without visiting
3675 any of the statements known to have undefined effect on the last
3677 queue
.safe_push (loop
->header
);
3678 visited
= BITMAP_ALLOC (NULL
);
3679 bitmap_set_bit (visited
, loop
->header
->index
);
3684 basic_block bb
= queue
.pop ();
3685 gimple_stmt_iterator gsi
;
3686 bool stmt_found
= false;
3688 /* Loop for possible exits and statements bounding the execution. */
3689 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
3691 gimple
*stmt
= gsi_stmt (gsi
);
3692 if (not_executed_last_iteration
->contains (stmt
))
3697 if (gimple_has_side_effects (stmt
))
3706 /* If no bounding statement is found, continue the walk. */
3712 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
3714 if (loop_exit_edge_p (loop
, e
)
3715 || e
== loop_latch_edge (loop
))
3720 if (bitmap_set_bit (visited
, e
->dest
->index
))
3721 queue
.safe_push (e
->dest
);
3725 while (queue
.length () && !found_exit
);
3727 /* If every path through the loop reach bounding statement before exit,
3728 then we know the last iteration of the loop will have undefined effect
3729 and we can decrease number of iterations. */
3733 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3734 fprintf (dump_file
, "Reducing loop iteration estimate by 1; "
3735 "undefined statement must be executed at the last iteration.\n");
3736 record_niter_bound (loop
, loop
->nb_iterations_upper_bound
- 1,
3740 BITMAP_FREE (visited
);
3741 delete not_executed_last_iteration
;
3744 /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
3745 is true also use estimates derived from undefined behavior. */
3748 estimate_numbers_of_iterations_loop (struct loop
*loop
)
3753 struct tree_niter_desc niter_desc
;
3758 /* Give up if we already have tried to compute an estimation. */
3759 if (loop
->estimate_state
!= EST_NOT_COMPUTED
)
3762 loop
->estimate_state
= EST_AVAILABLE
;
3764 /* If we have a measured profile, use it to estimate the number of
3765 iterations. Normally this is recorded by branch_prob right after
3766 reading the profile. In case we however found a new loop, record the
3769 Explicitly check for profile status so we do not report
3770 wrong prediction hitrates for guessed loop iterations heuristics.
3771 Do not recompute already recorded bounds - we ought to be better on
3772 updating iteration bounds than updating profile in general and thus
3773 recomputing iteration bounds later in the compilation process will just
3774 introduce random roundoff errors. */
3775 if (!loop
->any_estimate
3776 && loop
->header
->count
!= 0
3777 && profile_status_for_fn (cfun
) >= PROFILE_READ
)
3779 gcov_type nit
= expected_loop_iterations_unbounded (loop
);
3780 bound
= gcov_type_to_wide_int (nit
);
3781 record_niter_bound (loop
, bound
, true, false);
3784 /* Ensure that loop->nb_iterations is computed if possible. If it turns out
3785 to be constant, we avoid undefined behavior implied bounds and instead
3786 diagnose those loops with -Waggressive-loop-optimizations. */
3787 number_of_latch_executions (loop
);
3789 exits
= get_loop_exit_edges (loop
);
3790 likely_exit
= single_likely_exit (loop
);
3791 FOR_EACH_VEC_ELT (exits
, i
, ex
)
3793 if (!number_of_iterations_exit (loop
, ex
, &niter_desc
, false, false))
3796 niter
= niter_desc
.niter
;
3797 type
= TREE_TYPE (niter
);
3798 if (TREE_CODE (niter_desc
.may_be_zero
) != INTEGER_CST
)
3799 niter
= build3 (COND_EXPR
, type
, niter_desc
.may_be_zero
,
3800 build_int_cst (type
, 0),
3802 record_estimate (loop
, niter
, niter_desc
.max
,
3803 last_stmt (ex
->src
),
3804 true, ex
== likely_exit
, true);
3805 record_control_iv (loop
, &niter_desc
);
3809 if (flag_aggressive_loop_optimizations
)
3810 infer_loop_bounds_from_undefined (loop
);
3812 discover_iteration_bound_by_body_walk (loop
);
3814 maybe_lower_iteration_bound (loop
);
3816 /* If we know the exact number of iterations of this loop, try to
3817 not break code with undefined behavior by not recording smaller
3818 maximum number of iterations. */
3819 if (loop
->nb_iterations
3820 && TREE_CODE (loop
->nb_iterations
) == INTEGER_CST
)
3822 loop
->any_upper_bound
= true;
3823 loop
->nb_iterations_upper_bound
= wi::to_widest (loop
->nb_iterations
);
3827 /* Sets NIT to the estimated number of executions of the latch of the
3828 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
3829 large as the number of iterations. If we have no reliable estimate,
3830 the function returns false, otherwise returns true. */
3833 estimated_loop_iterations (struct loop
*loop
, widest_int
*nit
)
3835 /* When SCEV information is available, try to update loop iterations
3836 estimate. Otherwise just return whatever we recorded earlier. */
3837 if (scev_initialized_p ())
3838 estimate_numbers_of_iterations_loop (loop
);
3840 return (get_estimated_loop_iterations (loop
, nit
));
3843 /* Similar to estimated_loop_iterations, but returns the estimate only
3844 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3845 on the number of iterations of LOOP could not be derived, returns -1. */
3848 estimated_loop_iterations_int (struct loop
*loop
)
3851 HOST_WIDE_INT hwi_nit
;
3853 if (!estimated_loop_iterations (loop
, &nit
))
3856 if (!wi::fits_shwi_p (nit
))
3858 hwi_nit
= nit
.to_shwi ();
3860 return hwi_nit
< 0 ? -1 : hwi_nit
;
3864 /* Sets NIT to an upper bound for the maximum number of executions of the
3865 latch of the LOOP. If we have no reliable estimate, the function returns
3866 false, otherwise returns true. */
3869 max_loop_iterations (struct loop
*loop
, widest_int
*nit
)
3871 /* When SCEV information is available, try to update loop iterations
3872 estimate. Otherwise just return whatever we recorded earlier. */
3873 if (scev_initialized_p ())
3874 estimate_numbers_of_iterations_loop (loop
);
3876 return get_max_loop_iterations (loop
, nit
);
3879 /* Similar to max_loop_iterations, but returns the estimate only
3880 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3881 on the number of iterations of LOOP could not be derived, returns -1. */
3884 max_loop_iterations_int (struct loop
*loop
)
3887 HOST_WIDE_INT hwi_nit
;
3889 if (!max_loop_iterations (loop
, &nit
))
3892 if (!wi::fits_shwi_p (nit
))
3894 hwi_nit
= nit
.to_shwi ();
3896 return hwi_nit
< 0 ? -1 : hwi_nit
;
3899 /* Sets NIT to an likely upper bound for the maximum number of executions of the
3900 latch of the LOOP. If we have no reliable estimate, the function returns
3901 false, otherwise returns true. */
3904 likely_max_loop_iterations (struct loop
*loop
, widest_int
*nit
)
3906 /* When SCEV information is available, try to update loop iterations
3907 estimate. Otherwise just return whatever we recorded earlier. */
3908 if (scev_initialized_p ())
3909 estimate_numbers_of_iterations_loop (loop
);
3911 return get_likely_max_loop_iterations (loop
, nit
);
3914 /* Similar to max_loop_iterations, but returns the estimate only
3915 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
3916 on the number of iterations of LOOP could not be derived, returns -1. */
3919 likely_max_loop_iterations_int (struct loop
*loop
)
3922 HOST_WIDE_INT hwi_nit
;
3924 if (!likely_max_loop_iterations (loop
, &nit
))
3927 if (!wi::fits_shwi_p (nit
))
3929 hwi_nit
= nit
.to_shwi ();
3931 return hwi_nit
< 0 ? -1 : hwi_nit
;
3934 /* Returns an estimate for the number of executions of statements
3935 in the LOOP. For statements before the loop exit, this exceeds
3936 the number of execution of the latch by one. */
3939 estimated_stmt_executions_int (struct loop
*loop
)
3941 HOST_WIDE_INT nit
= estimated_loop_iterations_int (loop
);
3947 snit
= (HOST_WIDE_INT
) ((unsigned HOST_WIDE_INT
) nit
+ 1);
3949 /* If the computation overflows, return -1. */
3950 return snit
< 0 ? -1 : snit
;
3953 /* Sets NIT to the maximum number of executions of the latch of the
3954 LOOP, plus one. If we have no reliable estimate, the function returns
3955 false, otherwise returns true. */
3958 max_stmt_executions (struct loop
*loop
, widest_int
*nit
)
3960 widest_int nit_minus_one
;
3962 if (!max_loop_iterations (loop
, nit
))
3965 nit_minus_one
= *nit
;
3969 return wi::gtu_p (*nit
, nit_minus_one
);
3972 /* Sets NIT to the estimated maximum number of executions of the latch of the
3973 LOOP, plus one. If we have no likely estimate, the function returns
3974 false, otherwise returns true. */
3977 likely_max_stmt_executions (struct loop
*loop
, widest_int
*nit
)
3979 widest_int nit_minus_one
;
3981 if (!likely_max_loop_iterations (loop
, nit
))
3984 nit_minus_one
= *nit
;
3988 return wi::gtu_p (*nit
, nit_minus_one
);
3991 /* Sets NIT to the estimated number of executions of the latch of the
3992 LOOP, plus one. If we have no reliable estimate, the function returns
3993 false, otherwise returns true. */
3996 estimated_stmt_executions (struct loop
*loop
, widest_int
*nit
)
3998 widest_int nit_minus_one
;
4000 if (!estimated_loop_iterations (loop
, nit
))
4003 nit_minus_one
= *nit
;
4007 return wi::gtu_p (*nit
, nit_minus_one
);
4010 /* Records estimates on numbers of iterations of loops. */
4013 estimate_numbers_of_iterations (void)
4017 /* We don't want to issue signed overflow warnings while getting
4018 loop iteration estimates. */
4019 fold_defer_overflow_warnings ();
4021 FOR_EACH_LOOP (loop
, 0)
4023 estimate_numbers_of_iterations_loop (loop
);
4026 fold_undefer_and_ignore_overflow_warnings ();
4029 /* Returns true if statement S1 dominates statement S2. */
4032 stmt_dominates_stmt_p (gimple
*s1
, gimple
*s2
)
4034 basic_block bb1
= gimple_bb (s1
), bb2
= gimple_bb (s2
);
4042 gimple_stmt_iterator bsi
;
4044 if (gimple_code (s2
) == GIMPLE_PHI
)
4047 if (gimple_code (s1
) == GIMPLE_PHI
)
4050 for (bsi
= gsi_start_bb (bb1
); gsi_stmt (bsi
) != s2
; gsi_next (&bsi
))
4051 if (gsi_stmt (bsi
) == s1
)
4057 return dominated_by_p (CDI_DOMINATORS
, bb2
, bb1
);
4060 /* Returns true when we can prove that the number of executions of
4061 STMT in the loop is at most NITER, according to the bound on
4062 the number of executions of the statement NITER_BOUND->stmt recorded in
4063 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
4065 ??? This code can become quite a CPU hog - we can have many bounds,
4066 and large basic block forcing stmt_dominates_stmt_p to be queried
4067 many times on a large basic blocks, so the whole thing is O(n^2)
4068 for scev_probably_wraps_p invocation (that can be done n times).
4070 It would make more sense (and give better answers) to remember BB
4071 bounds computed by discover_iteration_bound_by_body_walk. */
4074 n_of_executions_at_most (gimple
*stmt
,
4075 struct nb_iter_bound
*niter_bound
,
4078 widest_int bound
= niter_bound
->bound
;
4079 tree nit_type
= TREE_TYPE (niter
), e
;
4082 gcc_assert (TYPE_UNSIGNED (nit_type
));
4084 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
4085 the number of iterations is small. */
4086 if (!wi::fits_to_tree_p (bound
, nit_type
))
4089 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
4090 times. This means that:
4092 -- if NITER_BOUND->is_exit is true, then everything after
4093 it at most NITER_BOUND->bound times.
4095 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
4096 is executed, then NITER_BOUND->stmt is executed as well in the same
4097 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
4099 If we can determine that NITER_BOUND->stmt is always executed
4100 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
4101 We conclude that if both statements belong to the same
4102 basic block and STMT is before NITER_BOUND->stmt and there are no
4103 statements with side effects in between. */
4105 if (niter_bound
->is_exit
)
4107 if (stmt
== niter_bound
->stmt
4108 || !stmt_dominates_stmt_p (niter_bound
->stmt
, stmt
))
4114 if (!stmt_dominates_stmt_p (niter_bound
->stmt
, stmt
))
4116 gimple_stmt_iterator bsi
;
4117 if (gimple_bb (stmt
) != gimple_bb (niter_bound
->stmt
)
4118 || gimple_code (stmt
) == GIMPLE_PHI
4119 || gimple_code (niter_bound
->stmt
) == GIMPLE_PHI
)
4122 /* By stmt_dominates_stmt_p we already know that STMT appears
4123 before NITER_BOUND->STMT. Still need to test that the loop
4124 can not be terinated by a side effect in between. */
4125 for (bsi
= gsi_for_stmt (stmt
); gsi_stmt (bsi
) != niter_bound
->stmt
;
4127 if (gimple_has_side_effects (gsi_stmt (bsi
)))
4131 || !wi::fits_to_tree_p (bound
, nit_type
))
4137 e
= fold_binary (cmp
, boolean_type_node
,
4138 niter
, wide_int_to_tree (nit_type
, bound
));
4139 return e
&& integer_nonzerop (e
);
4142 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
4145 nowrap_type_p (tree type
)
4147 if (ANY_INTEGRAL_TYPE_P (type
)
4148 && TYPE_OVERFLOW_UNDEFINED (type
))
4151 if (POINTER_TYPE_P (type
))
4157 /* Return true if we can prove LOOP is exited before evolution of induction
4158 variabled {BASE, STEP} overflows with respect to its type bound. */
4161 loop_exits_before_overflow (tree base
, tree step
,
4162 gimple
*at_stmt
, struct loop
*loop
)
4165 struct control_iv
*civ
;
4166 struct nb_iter_bound
*bound
;
4167 tree e
, delta
, step_abs
, unsigned_base
;
4168 tree type
= TREE_TYPE (step
);
4169 tree unsigned_type
, valid_niter
;
4171 /* Don't issue signed overflow warnings. */
4172 fold_defer_overflow_warnings ();
4174 /* Compute the number of iterations before we reach the bound of the
4175 type, and verify that the loop is exited before this occurs. */
4176 unsigned_type
= unsigned_type_for (type
);
4177 unsigned_base
= fold_convert (unsigned_type
, base
);
4179 if (tree_int_cst_sign_bit (step
))
4181 tree extreme
= fold_convert (unsigned_type
,
4182 lower_bound_in_type (type
, type
));
4183 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, unsigned_base
, extreme
);
4184 step_abs
= fold_build1 (NEGATE_EXPR
, unsigned_type
,
4185 fold_convert (unsigned_type
, step
));
4189 tree extreme
= fold_convert (unsigned_type
,
4190 upper_bound_in_type (type
, type
));
4191 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, extreme
, unsigned_base
);
4192 step_abs
= fold_convert (unsigned_type
, step
);
4195 valid_niter
= fold_build2 (FLOOR_DIV_EXPR
, unsigned_type
, delta
, step_abs
);
4197 estimate_numbers_of_iterations_loop (loop
);
4199 if (max_loop_iterations (loop
, &niter
)
4200 && wi::fits_to_tree_p (niter
, TREE_TYPE (valid_niter
))
4201 && (e
= fold_binary (GT_EXPR
, boolean_type_node
, valid_niter
,
4202 wide_int_to_tree (TREE_TYPE (valid_niter
),
4204 && integer_nonzerop (e
))
4206 fold_undefer_and_ignore_overflow_warnings ();
4210 for (bound
= loop
->bounds
; bound
; bound
= bound
->next
)
4212 if (n_of_executions_at_most (at_stmt
, bound
, valid_niter
))
4214 fold_undefer_and_ignore_overflow_warnings ();
4218 fold_undefer_and_ignore_overflow_warnings ();
4220 /* Try to prove loop is exited before {base, step} overflows with the
4221 help of analyzed loop control IV. This is done only for IVs with
4222 constant step because otherwise we don't have the information. */
4223 if (TREE_CODE (step
) == INTEGER_CST
)
4225 for (civ
= loop
->control_ivs
; civ
; civ
= civ
->next
)
4227 enum tree_code code
;
4228 tree civ_type
= TREE_TYPE (civ
->step
);
4230 /* Have to consider type difference because operand_equal_p ignores
4231 that for constants. */
4232 if (TYPE_UNSIGNED (type
) != TYPE_UNSIGNED (civ_type
)
4233 || element_precision (type
) != element_precision (civ_type
))
4236 /* Only consider control IV with same step. */
4237 if (!operand_equal_p (step
, civ
->step
, 0))
4240 /* Done proving if this is a no-overflow control IV. */
4241 if (operand_equal_p (base
, civ
->base
, 0))
4244 /* Control IV is recorded after expanding simple operations,
4245 Here we expand base and compare it too. */
4246 tree expanded_base
= expand_simple_operations (base
);
4247 if (operand_equal_p (expanded_base
, civ
->base
, 0))
4250 /* If this is a before stepping control IV, in other words, we have
4252 {civ_base, step} = {base + step, step}
4254 Because civ {base + step, step} doesn't overflow during loop
4255 iterations, {base, step} will not overflow if we can prove the
4256 operation "base + step" does not overflow. Specifically, we try
4257 to prove below conditions are satisfied:
4259 base <= UPPER_BOUND (type) - step ;;step > 0
4260 base >= LOWER_BOUND (type) - step ;;step < 0
4262 by proving the reverse conditions are false using loop's initial
4264 if (POINTER_TYPE_P (TREE_TYPE (base
)))
4265 code
= POINTER_PLUS_EXPR
;
4269 tree stepped
= fold_build2 (code
, TREE_TYPE (base
), base
, step
);
4270 tree expanded_stepped
= fold_build2 (code
, TREE_TYPE (base
),
4271 expanded_base
, step
);
4272 if (operand_equal_p (stepped
, civ
->base
, 0)
4273 || operand_equal_p (expanded_stepped
, civ
->base
, 0))
4277 if (tree_int_cst_sign_bit (step
))
4280 extreme
= lower_bound_in_type (type
, type
);
4285 extreme
= upper_bound_in_type (type
, type
);
4287 extreme
= fold_build2 (MINUS_EXPR
, type
, extreme
, step
);
4288 e
= fold_build2 (code
, boolean_type_node
, base
, extreme
);
4289 e
= simplify_using_initial_conditions (loop
, e
);
4290 if (integer_zerop (e
))
4299 /* VAR is scev variable whose evolution part is constant STEP, this function
4300 proves that VAR can't overflow by using value range info. If VAR's value
4301 range is [MIN, MAX], it can be proven by:
4302 MAX + step doesn't overflow ; if step > 0
4304 MIN + step doesn't underflow ; if step < 0.
4306 We can only do this if var is computed in every loop iteration, i.e, var's
4307 definition has to dominate loop latch. Consider below example:
4315 # RANGE [0, 4294967294] NONZERO 65535
4316 # i_21 = PHI <0(3), i_18(9)>
4323 # RANGE [0, 65533] NONZERO 65535
4324 _6 = i_21 + 4294967295;
4325 # RANGE [0, 65533] NONZERO 65535
4326 _7 = (long unsigned int) _6;
4327 # RANGE [0, 524264] NONZERO 524280
4329 # PT = nonlocal escaped
4334 # RANGE [1, 65535] NONZERO 65535
4348 VAR _6 doesn't overflow only with pre-condition (i_21 != 0), here we
4349 can't use _6 to prove no-overlfow for _7. In fact, var _7 takes value
4350 sequence (4294967295, 0, 1, ..., 65533) in loop life time, rather than
4351 (4294967295, 4294967296, ...). */
4354 scev_var_range_cant_overflow (tree var
, tree step
, struct loop
*loop
)
4357 wide_int minv
, maxv
, diff
, step_wi
;
4358 enum value_range_type rtype
;
4360 if (TREE_CODE (step
) != INTEGER_CST
|| !INTEGRAL_TYPE_P (TREE_TYPE (var
)))
4363 /* Check if VAR evaluates in every loop iteration. It's not the case
4364 if VAR is default definition or does not dominate loop's latch. */
4365 basic_block def_bb
= gimple_bb (SSA_NAME_DEF_STMT (var
));
4366 if (!def_bb
|| !dominated_by_p (CDI_DOMINATORS
, loop
->latch
, def_bb
))
4369 rtype
= get_range_info (var
, &minv
, &maxv
);
4370 if (rtype
!= VR_RANGE
)
4373 /* VAR is a scev whose evolution part is STEP and value range info
4374 is [MIN, MAX], we can prove its no-overflowness by conditions:
4376 type_MAX - MAX >= step ; if step > 0
4377 MIN - type_MIN >= |step| ; if step < 0.
4379 Or VAR must take value outside of value range, which is not true. */
4381 type
= TREE_TYPE (var
);
4382 if (tree_int_cst_sign_bit (step
))
4384 diff
= lower_bound_in_type (type
, type
);
4386 step_wi
= - step_wi
;
4390 diff
= upper_bound_in_type (type
, type
);
4394 return (wi::geu_p (diff
, step_wi
));
4397 /* Return false only when the induction variable BASE + STEP * I is
4398 known to not overflow: i.e. when the number of iterations is small
4399 enough with respect to the step and initial condition in order to
4400 keep the evolution confined in TYPEs bounds. Return true when the
4401 iv is known to overflow or when the property is not computable.
4403 USE_OVERFLOW_SEMANTICS is true if this function should assume that
4404 the rules for overflow of the given language apply (e.g., that signed
4405 arithmetics in C does not overflow).
4407 If VAR is a ssa variable, this function also returns false if VAR can
4408 be proven not overflow with value range info. */
4411 scev_probably_wraps_p (tree var
, tree base
, tree step
,
4412 gimple
*at_stmt
, struct loop
*loop
,
4413 bool use_overflow_semantics
)
4415 /* FIXME: We really need something like
4416 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
4418 We used to test for the following situation that frequently appears
4419 during address arithmetics:
4421 D.1621_13 = (long unsigned intD.4) D.1620_12;
4422 D.1622_14 = D.1621_13 * 8;
4423 D.1623_15 = (doubleD.29 *) D.1622_14;
4425 And derived that the sequence corresponding to D_14
4426 can be proved to not wrap because it is used for computing a
4427 memory access; however, this is not really the case -- for example,
4428 if D_12 = (unsigned char) [254,+,1], then D_14 has values
4429 2032, 2040, 0, 8, ..., but the code is still legal. */
4431 if (chrec_contains_undetermined (base
)
4432 || chrec_contains_undetermined (step
))
4435 if (integer_zerop (step
))
4438 /* If we can use the fact that signed and pointer arithmetics does not
4439 wrap, we are done. */
4440 if (use_overflow_semantics
&& nowrap_type_p (TREE_TYPE (base
)))
4443 /* To be able to use estimates on number of iterations of the loop,
4444 we must have an upper bound on the absolute value of the step. */
4445 if (TREE_CODE (step
) != INTEGER_CST
)
4448 /* Check if var can be proven not overflow with value range info. */
4449 if (var
&& TREE_CODE (var
) == SSA_NAME
4450 && scev_var_range_cant_overflow (var
, step
, loop
))
4453 if (loop_exits_before_overflow (base
, step
, at_stmt
, loop
))
4456 /* At this point we still don't have a proof that the iv does not
4457 overflow: give up. */
4461 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
4464 free_numbers_of_iterations_estimates_loop (struct loop
*loop
)
4466 struct control_iv
*civ
;
4467 struct nb_iter_bound
*bound
;
4469 loop
->nb_iterations
= NULL
;
4470 loop
->estimate_state
= EST_NOT_COMPUTED
;
4471 for (bound
= loop
->bounds
; bound
;)
4473 struct nb_iter_bound
*next
= bound
->next
;
4477 loop
->bounds
= NULL
;
4479 for (civ
= loop
->control_ivs
; civ
;)
4481 struct control_iv
*next
= civ
->next
;
4485 loop
->control_ivs
= NULL
;
4488 /* Frees the information on upper bounds on numbers of iterations of loops. */
4491 free_numbers_of_iterations_estimates (function
*fn
)
4495 FOR_EACH_LOOP_FN (fn
, loop
, 0)
4497 free_numbers_of_iterations_estimates_loop (loop
);
4501 /* Substitute value VAL for ssa name NAME inside expressions held
4505 substitute_in_loop_info (struct loop
*loop
, tree name
, tree val
)
4507 loop
->nb_iterations
= simplify_replace_tree (loop
->nb_iterations
, name
, val
);