DWARF array bounds missing from C++ array definitions
[official-gcc.git] / gcc / tree-ssa-loop-niter.c
blobcd2ced369719c37afd4aac08ff360719d7702e42
1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "diagnostic-core.h"
31 #include "stor-layout.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "intl.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-niter.h"
40 #include "tree-ssa-loop.h"
41 #include "cfgloop.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
44 #include "params.h"
45 #include "tree-dfa.h"
48 /* The maximum number of dominator BBs we search for conditions
49 of loop header copies we use for simplifying a conditional
50 expression. */
51 #define MAX_DOMINATORS_TO_WALK 8
55 Analysis of number of iterations of an affine exit test.
59 /* Bounds on some value, BELOW <= X <= UP. */
61 struct bounds
63 mpz_t below, up;
66 static bool number_of_iterations_popcount (loop_p loop, edge exit,
67 enum tree_code code,
68 class tree_niter_desc *niter);
71 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
73 static void
74 split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
76 tree type = TREE_TYPE (expr);
77 tree op0, op1;
78 bool negate = false;
80 *var = expr;
81 mpz_set_ui (offset, 0);
83 switch (TREE_CODE (expr))
85 case MINUS_EXPR:
86 negate = true;
87 /* Fallthru. */
89 case PLUS_EXPR:
90 case POINTER_PLUS_EXPR:
91 op0 = TREE_OPERAND (expr, 0);
92 op1 = TREE_OPERAND (expr, 1);
94 if (TREE_CODE (op1) != INTEGER_CST)
95 break;
97 *var = op0;
98 /* Always sign extend the offset. */
99 wi::to_mpz (wi::to_wide (op1), offset, SIGNED);
100 if (negate)
101 mpz_neg (offset, offset);
102 break;
104 case INTEGER_CST:
105 *var = build_int_cst_type (type, 0);
106 wi::to_mpz (wi::to_wide (expr), offset, TYPE_SIGN (type));
107 break;
109 default:
110 break;
114 /* From condition C0 CMP C1 derives information regarding the value range
115 of VAR, which is of TYPE. Results are stored in to BELOW and UP. */
117 static void
118 refine_value_range_using_guard (tree type, tree var,
119 tree c0, enum tree_code cmp, tree c1,
120 mpz_t below, mpz_t up)
122 tree varc0, varc1, ctype;
123 mpz_t offc0, offc1;
124 mpz_t mint, maxt, minc1, maxc1;
125 wide_int minv, maxv;
126 bool no_wrap = nowrap_type_p (type);
127 bool c0_ok, c1_ok;
128 signop sgn = TYPE_SIGN (type);
130 switch (cmp)
132 case LT_EXPR:
133 case LE_EXPR:
134 case GT_EXPR:
135 case GE_EXPR:
136 STRIP_SIGN_NOPS (c0);
137 STRIP_SIGN_NOPS (c1);
138 ctype = TREE_TYPE (c0);
139 if (!useless_type_conversion_p (ctype, type))
140 return;
142 break;
144 case EQ_EXPR:
145 /* We could derive quite precise information from EQ_EXPR, however,
146 such a guard is unlikely to appear, so we do not bother with
147 handling it. */
148 return;
150 case NE_EXPR:
151 /* NE_EXPR comparisons do not contain much of useful information,
152 except for cases of comparing with bounds. */
153 if (TREE_CODE (c1) != INTEGER_CST
154 || !INTEGRAL_TYPE_P (type))
155 return;
157 /* Ensure that the condition speaks about an expression in the same
158 type as X and Y. */
159 ctype = TREE_TYPE (c0);
160 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
161 return;
162 c0 = fold_convert (type, c0);
163 c1 = fold_convert (type, c1);
165 if (operand_equal_p (var, c0, 0))
167 mpz_t valc1;
169 /* Case of comparing VAR with its below/up bounds. */
170 mpz_init (valc1);
171 wi::to_mpz (wi::to_wide (c1), valc1, TYPE_SIGN (type));
172 if (mpz_cmp (valc1, below) == 0)
173 cmp = GT_EXPR;
174 if (mpz_cmp (valc1, up) == 0)
175 cmp = LT_EXPR;
177 mpz_clear (valc1);
179 else
181 /* Case of comparing with the bounds of the type. */
182 wide_int min = wi::min_value (type);
183 wide_int max = wi::max_value (type);
185 if (wi::to_wide (c1) == min)
186 cmp = GT_EXPR;
187 if (wi::to_wide (c1) == max)
188 cmp = LT_EXPR;
191 /* Quick return if no useful information. */
192 if (cmp == NE_EXPR)
193 return;
195 break;
197 default:
198 return;
201 mpz_init (offc0);
202 mpz_init (offc1);
203 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
204 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
206 /* We are only interested in comparisons of expressions based on VAR. */
207 if (operand_equal_p (var, varc1, 0))
209 std::swap (varc0, varc1);
210 mpz_swap (offc0, offc1);
211 cmp = swap_tree_comparison (cmp);
213 else if (!operand_equal_p (var, varc0, 0))
215 mpz_clear (offc0);
216 mpz_clear (offc1);
217 return;
220 mpz_init (mint);
221 mpz_init (maxt);
222 get_type_static_bounds (type, mint, maxt);
223 mpz_init (minc1);
224 mpz_init (maxc1);
225 /* Setup range information for varc1. */
226 if (integer_zerop (varc1))
228 wi::to_mpz (0, minc1, TYPE_SIGN (type));
229 wi::to_mpz (0, maxc1, TYPE_SIGN (type));
231 else if (TREE_CODE (varc1) == SSA_NAME
232 && INTEGRAL_TYPE_P (type)
233 && get_range_info (varc1, &minv, &maxv) == VR_RANGE)
235 gcc_assert (wi::le_p (minv, maxv, sgn));
236 wi::to_mpz (minv, minc1, sgn);
237 wi::to_mpz (maxv, maxc1, sgn);
239 else
241 mpz_set (minc1, mint);
242 mpz_set (maxc1, maxt);
245 /* Compute valid range information for varc1 + offc1. Note nothing
246 useful can be derived if it overflows or underflows. Overflow or
247 underflow could happen when:
249 offc1 > 0 && varc1 + offc1 > MAX_VAL (type)
250 offc1 < 0 && varc1 + offc1 < MIN_VAL (type). */
251 mpz_add (minc1, minc1, offc1);
252 mpz_add (maxc1, maxc1, offc1);
253 c1_ok = (no_wrap
254 || mpz_sgn (offc1) == 0
255 || (mpz_sgn (offc1) < 0 && mpz_cmp (minc1, mint) >= 0)
256 || (mpz_sgn (offc1) > 0 && mpz_cmp (maxc1, maxt) <= 0));
257 if (!c1_ok)
258 goto end;
260 if (mpz_cmp (minc1, mint) < 0)
261 mpz_set (minc1, mint);
262 if (mpz_cmp (maxc1, maxt) > 0)
263 mpz_set (maxc1, maxt);
265 if (cmp == LT_EXPR)
267 cmp = LE_EXPR;
268 mpz_sub_ui (maxc1, maxc1, 1);
270 if (cmp == GT_EXPR)
272 cmp = GE_EXPR;
273 mpz_add_ui (minc1, minc1, 1);
276 /* Compute range information for varc0. If there is no overflow,
277 the condition implied that
279 (varc0) cmp (varc1 + offc1 - offc0)
281 We can possibly improve the upper bound of varc0 if cmp is LE_EXPR,
282 or the below bound if cmp is GE_EXPR.
284 To prove there is no overflow/underflow, we need to check below
285 four cases:
286 1) cmp == LE_EXPR && offc0 > 0
288 (varc0 + offc0) doesn't overflow
289 && (varc1 + offc1 - offc0) doesn't underflow
291 2) cmp == LE_EXPR && offc0 < 0
293 (varc0 + offc0) doesn't underflow
294 && (varc1 + offc1 - offc0) doesn't overfloe
296 In this case, (varc0 + offc0) will never underflow if we can
297 prove (varc1 + offc1 - offc0) doesn't overflow.
299 3) cmp == GE_EXPR && offc0 < 0
301 (varc0 + offc0) doesn't underflow
302 && (varc1 + offc1 - offc0) doesn't overflow
304 4) cmp == GE_EXPR && offc0 > 0
306 (varc0 + offc0) doesn't overflow
307 && (varc1 + offc1 - offc0) doesn't underflow
309 In this case, (varc0 + offc0) will never overflow if we can
310 prove (varc1 + offc1 - offc0) doesn't underflow.
312 Note we only handle case 2 and 4 in below code. */
314 mpz_sub (minc1, minc1, offc0);
315 mpz_sub (maxc1, maxc1, offc0);
316 c0_ok = (no_wrap
317 || mpz_sgn (offc0) == 0
318 || (cmp == LE_EXPR
319 && mpz_sgn (offc0) < 0 && mpz_cmp (maxc1, maxt) <= 0)
320 || (cmp == GE_EXPR
321 && mpz_sgn (offc0) > 0 && mpz_cmp (minc1, mint) >= 0));
322 if (!c0_ok)
323 goto end;
325 if (cmp == LE_EXPR)
327 if (mpz_cmp (up, maxc1) > 0)
328 mpz_set (up, maxc1);
330 else
332 if (mpz_cmp (below, minc1) < 0)
333 mpz_set (below, minc1);
336 end:
337 mpz_clear (mint);
338 mpz_clear (maxt);
339 mpz_clear (minc1);
340 mpz_clear (maxc1);
341 mpz_clear (offc0);
342 mpz_clear (offc1);
345 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
346 in TYPE to MIN and MAX. */
348 static void
349 determine_value_range (class loop *loop, tree type, tree var, mpz_t off,
350 mpz_t min, mpz_t max)
352 int cnt = 0;
353 mpz_t minm, maxm;
354 basic_block bb;
355 wide_int minv, maxv;
356 enum value_range_kind rtype = VR_VARYING;
358 /* If the expression is a constant, we know its value exactly. */
359 if (integer_zerop (var))
361 mpz_set (min, off);
362 mpz_set (max, off);
363 return;
366 get_type_static_bounds (type, min, max);
368 /* See if we have some range info from VRP. */
369 if (TREE_CODE (var) == SSA_NAME && INTEGRAL_TYPE_P (type))
371 edge e = loop_preheader_edge (loop);
372 signop sgn = TYPE_SIGN (type);
373 gphi_iterator gsi;
375 /* Either for VAR itself... */
376 rtype = get_range_info (var, &minv, &maxv);
377 /* Or for PHI results in loop->header where VAR is used as
378 PHI argument from the loop preheader edge. */
379 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
381 gphi *phi = gsi.phi ();
382 wide_int minc, maxc;
383 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == var
384 && (get_range_info (gimple_phi_result (phi), &minc, &maxc)
385 == VR_RANGE))
387 if (rtype != VR_RANGE)
389 rtype = VR_RANGE;
390 minv = minc;
391 maxv = maxc;
393 else
395 minv = wi::max (minv, minc, sgn);
396 maxv = wi::min (maxv, maxc, sgn);
397 /* If the PHI result range are inconsistent with
398 the VAR range, give up on looking at the PHI
399 results. This can happen if VR_UNDEFINED is
400 involved. */
401 if (wi::gt_p (minv, maxv, sgn))
403 rtype = get_range_info (var, &minv, &maxv);
404 break;
409 mpz_init (minm);
410 mpz_init (maxm);
411 if (rtype != VR_RANGE)
413 mpz_set (minm, min);
414 mpz_set (maxm, max);
416 else
418 gcc_assert (wi::le_p (minv, maxv, sgn));
419 wi::to_mpz (minv, minm, sgn);
420 wi::to_mpz (maxv, maxm, sgn);
422 /* Now walk the dominators of the loop header and use the entry
423 guards to refine the estimates. */
424 for (bb = loop->header;
425 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
426 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
428 edge e;
429 tree c0, c1;
430 gimple *cond;
431 enum tree_code cmp;
433 if (!single_pred_p (bb))
434 continue;
435 e = single_pred_edge (bb);
437 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
438 continue;
440 cond = last_stmt (e->src);
441 c0 = gimple_cond_lhs (cond);
442 cmp = gimple_cond_code (cond);
443 c1 = gimple_cond_rhs (cond);
445 if (e->flags & EDGE_FALSE_VALUE)
446 cmp = invert_tree_comparison (cmp, false);
448 refine_value_range_using_guard (type, var, c0, cmp, c1, minm, maxm);
449 ++cnt;
452 mpz_add (minm, minm, off);
453 mpz_add (maxm, maxm, off);
454 /* If the computation may not wrap or off is zero, then this
455 is always fine. If off is negative and minv + off isn't
456 smaller than type's minimum, or off is positive and
457 maxv + off isn't bigger than type's maximum, use the more
458 precise range too. */
459 if (nowrap_type_p (type)
460 || mpz_sgn (off) == 0
461 || (mpz_sgn (off) < 0 && mpz_cmp (minm, min) >= 0)
462 || (mpz_sgn (off) > 0 && mpz_cmp (maxm, max) <= 0))
464 mpz_set (min, minm);
465 mpz_set (max, maxm);
466 mpz_clear (minm);
467 mpz_clear (maxm);
468 return;
470 mpz_clear (minm);
471 mpz_clear (maxm);
474 /* If the computation may wrap, we know nothing about the value, except for
475 the range of the type. */
476 if (!nowrap_type_p (type))
477 return;
479 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
480 add it to MIN, otherwise to MAX. */
481 if (mpz_sgn (off) < 0)
482 mpz_add (max, max, off);
483 else
484 mpz_add (min, min, off);
487 /* Stores the bounds on the difference of the values of the expressions
488 (var + X) and (var + Y), computed in TYPE, to BNDS. */
490 static void
491 bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
492 bounds *bnds)
494 int rel = mpz_cmp (x, y);
495 bool may_wrap = !nowrap_type_p (type);
496 mpz_t m;
498 /* If X == Y, then the expressions are always equal.
499 If X > Y, there are the following possibilities:
500 a) neither of var + X and var + Y overflow or underflow, or both of
501 them do. Then their difference is X - Y.
502 b) var + X overflows, and var + Y does not. Then the values of the
503 expressions are var + X - M and var + Y, where M is the range of
504 the type, and their difference is X - Y - M.
505 c) var + Y underflows and var + X does not. Their difference again
506 is M - X + Y.
507 Therefore, if the arithmetics in type does not overflow, then the
508 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
509 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
510 (X - Y, X - Y + M). */
512 if (rel == 0)
514 mpz_set_ui (bnds->below, 0);
515 mpz_set_ui (bnds->up, 0);
516 return;
519 mpz_init (m);
520 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED);
521 mpz_add_ui (m, m, 1);
522 mpz_sub (bnds->up, x, y);
523 mpz_set (bnds->below, bnds->up);
525 if (may_wrap)
527 if (rel > 0)
528 mpz_sub (bnds->below, bnds->below, m);
529 else
530 mpz_add (bnds->up, bnds->up, m);
533 mpz_clear (m);
536 /* From condition C0 CMP C1 derives information regarding the
537 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
538 and stores it to BNDS. */
540 static void
541 refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
542 tree vary, mpz_t offy,
543 tree c0, enum tree_code cmp, tree c1,
544 bounds *bnds)
546 tree varc0, varc1, ctype;
547 mpz_t offc0, offc1, loffx, loffy, bnd;
548 bool lbound = false;
549 bool no_wrap = nowrap_type_p (type);
550 bool x_ok, y_ok;
552 switch (cmp)
554 case LT_EXPR:
555 case LE_EXPR:
556 case GT_EXPR:
557 case GE_EXPR:
558 STRIP_SIGN_NOPS (c0);
559 STRIP_SIGN_NOPS (c1);
560 ctype = TREE_TYPE (c0);
561 if (!useless_type_conversion_p (ctype, type))
562 return;
564 break;
566 case EQ_EXPR:
567 /* We could derive quite precise information from EQ_EXPR, however, such
568 a guard is unlikely to appear, so we do not bother with handling
569 it. */
570 return;
572 case NE_EXPR:
573 /* NE_EXPR comparisons do not contain much of useful information, except for
574 special case of comparing with the bounds of the type. */
575 if (TREE_CODE (c1) != INTEGER_CST
576 || !INTEGRAL_TYPE_P (type))
577 return;
579 /* Ensure that the condition speaks about an expression in the same type
580 as X and Y. */
581 ctype = TREE_TYPE (c0);
582 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
583 return;
584 c0 = fold_convert (type, c0);
585 c1 = fold_convert (type, c1);
587 if (TYPE_MIN_VALUE (type)
588 && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
590 cmp = GT_EXPR;
591 break;
593 if (TYPE_MAX_VALUE (type)
594 && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
596 cmp = LT_EXPR;
597 break;
600 return;
601 default:
602 return;
605 mpz_init (offc0);
606 mpz_init (offc1);
607 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
608 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
610 /* We are only interested in comparisons of expressions based on VARX and
611 VARY. TODO -- we might also be able to derive some bounds from
612 expressions containing just one of the variables. */
614 if (operand_equal_p (varx, varc1, 0))
616 std::swap (varc0, varc1);
617 mpz_swap (offc0, offc1);
618 cmp = swap_tree_comparison (cmp);
621 if (!operand_equal_p (varx, varc0, 0)
622 || !operand_equal_p (vary, varc1, 0))
623 goto end;
625 mpz_init_set (loffx, offx);
626 mpz_init_set (loffy, offy);
628 if (cmp == GT_EXPR || cmp == GE_EXPR)
630 std::swap (varx, vary);
631 mpz_swap (offc0, offc1);
632 mpz_swap (loffx, loffy);
633 cmp = swap_tree_comparison (cmp);
634 lbound = true;
637 /* If there is no overflow, the condition implies that
639 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
641 The overflows and underflows may complicate things a bit; each
642 overflow decreases the appropriate offset by M, and underflow
643 increases it by M. The above inequality would not necessarily be
644 true if
646 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
647 VARX + OFFC0 overflows, but VARX + OFFX does not.
648 This may only happen if OFFX < OFFC0.
649 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
650 VARY + OFFC1 underflows and VARY + OFFY does not.
651 This may only happen if OFFY > OFFC1. */
653 if (no_wrap)
655 x_ok = true;
656 y_ok = true;
658 else
660 x_ok = (integer_zerop (varx)
661 || mpz_cmp (loffx, offc0) >= 0);
662 y_ok = (integer_zerop (vary)
663 || mpz_cmp (loffy, offc1) <= 0);
666 if (x_ok && y_ok)
668 mpz_init (bnd);
669 mpz_sub (bnd, loffx, loffy);
670 mpz_add (bnd, bnd, offc1);
671 mpz_sub (bnd, bnd, offc0);
673 if (cmp == LT_EXPR)
674 mpz_sub_ui (bnd, bnd, 1);
676 if (lbound)
678 mpz_neg (bnd, bnd);
679 if (mpz_cmp (bnds->below, bnd) < 0)
680 mpz_set (bnds->below, bnd);
682 else
684 if (mpz_cmp (bnd, bnds->up) < 0)
685 mpz_set (bnds->up, bnd);
687 mpz_clear (bnd);
690 mpz_clear (loffx);
691 mpz_clear (loffy);
692 end:
693 mpz_clear (offc0);
694 mpz_clear (offc1);
697 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
698 The subtraction is considered to be performed in arbitrary precision,
699 without overflows.
701 We do not attempt to be too clever regarding the value ranges of X and
702 Y; most of the time, they are just integers or ssa names offsetted by
703 integer. However, we try to use the information contained in the
704 comparisons before the loop (usually created by loop header copying). */
706 static void
707 bound_difference (class loop *loop, tree x, tree y, bounds *bnds)
709 tree type = TREE_TYPE (x);
710 tree varx, vary;
711 mpz_t offx, offy;
712 mpz_t minx, maxx, miny, maxy;
713 int cnt = 0;
714 edge e;
715 basic_block bb;
716 tree c0, c1;
717 gimple *cond;
718 enum tree_code cmp;
720 /* Get rid of unnecessary casts, but preserve the value of
721 the expressions. */
722 STRIP_SIGN_NOPS (x);
723 STRIP_SIGN_NOPS (y);
725 mpz_init (bnds->below);
726 mpz_init (bnds->up);
727 mpz_init (offx);
728 mpz_init (offy);
729 split_to_var_and_offset (x, &varx, offx);
730 split_to_var_and_offset (y, &vary, offy);
732 if (!integer_zerop (varx)
733 && operand_equal_p (varx, vary, 0))
735 /* Special case VARX == VARY -- we just need to compare the
736 offsets. The matters are a bit more complicated in the
737 case addition of offsets may wrap. */
738 bound_difference_of_offsetted_base (type, offx, offy, bnds);
740 else
742 /* Otherwise, use the value ranges to determine the initial
743 estimates on below and up. */
744 mpz_init (minx);
745 mpz_init (maxx);
746 mpz_init (miny);
747 mpz_init (maxy);
748 determine_value_range (loop, type, varx, offx, minx, maxx);
749 determine_value_range (loop, type, vary, offy, miny, maxy);
751 mpz_sub (bnds->below, minx, maxy);
752 mpz_sub (bnds->up, maxx, miny);
753 mpz_clear (minx);
754 mpz_clear (maxx);
755 mpz_clear (miny);
756 mpz_clear (maxy);
759 /* If both X and Y are constants, we cannot get any more precise. */
760 if (integer_zerop (varx) && integer_zerop (vary))
761 goto end;
763 /* Now walk the dominators of the loop header and use the entry
764 guards to refine the estimates. */
765 for (bb = loop->header;
766 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
767 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
769 if (!single_pred_p (bb))
770 continue;
771 e = single_pred_edge (bb);
773 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
774 continue;
776 cond = last_stmt (e->src);
777 c0 = gimple_cond_lhs (cond);
778 cmp = gimple_cond_code (cond);
779 c1 = gimple_cond_rhs (cond);
781 if (e->flags & EDGE_FALSE_VALUE)
782 cmp = invert_tree_comparison (cmp, false);
784 refine_bounds_using_guard (type, varx, offx, vary, offy,
785 c0, cmp, c1, bnds);
786 ++cnt;
789 end:
790 mpz_clear (offx);
791 mpz_clear (offy);
794 /* Update the bounds in BNDS that restrict the value of X to the bounds
795 that restrict the value of X + DELTA. X can be obtained as a
796 difference of two values in TYPE. */
798 static void
799 bounds_add (bounds *bnds, const widest_int &delta, tree type)
801 mpz_t mdelta, max;
803 mpz_init (mdelta);
804 wi::to_mpz (delta, mdelta, SIGNED);
806 mpz_init (max);
807 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
809 mpz_add (bnds->up, bnds->up, mdelta);
810 mpz_add (bnds->below, bnds->below, mdelta);
812 if (mpz_cmp (bnds->up, max) > 0)
813 mpz_set (bnds->up, max);
815 mpz_neg (max, max);
816 if (mpz_cmp (bnds->below, max) < 0)
817 mpz_set (bnds->below, max);
819 mpz_clear (mdelta);
820 mpz_clear (max);
823 /* Update the bounds in BNDS that restrict the value of X to the bounds
824 that restrict the value of -X. */
826 static void
827 bounds_negate (bounds *bnds)
829 mpz_t tmp;
831 mpz_init_set (tmp, bnds->up);
832 mpz_neg (bnds->up, bnds->below);
833 mpz_neg (bnds->below, tmp);
834 mpz_clear (tmp);
837 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
839 static tree
840 inverse (tree x, tree mask)
842 tree type = TREE_TYPE (x);
843 tree rslt;
844 unsigned ctr = tree_floor_log2 (mask);
846 if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
848 unsigned HOST_WIDE_INT ix;
849 unsigned HOST_WIDE_INT imask;
850 unsigned HOST_WIDE_INT irslt = 1;
852 gcc_assert (cst_and_fits_in_hwi (x));
853 gcc_assert (cst_and_fits_in_hwi (mask));
855 ix = int_cst_value (x);
856 imask = int_cst_value (mask);
858 for (; ctr; ctr--)
860 irslt *= ix;
861 ix *= ix;
863 irslt &= imask;
865 rslt = build_int_cst_type (type, irslt);
867 else
869 rslt = build_int_cst (type, 1);
870 for (; ctr; ctr--)
872 rslt = int_const_binop (MULT_EXPR, rslt, x);
873 x = int_const_binop (MULT_EXPR, x, x);
875 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
878 return rslt;
881 /* Derives the upper bound BND on the number of executions of loop with exit
882 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
883 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
884 that the loop ends through this exit, i.e., the induction variable ever
885 reaches the value of C.
887 The value C is equal to final - base, where final and base are the final and
888 initial value of the actual induction variable in the analysed loop. BNDS
889 bounds the value of this difference when computed in signed type with
890 unbounded range, while the computation of C is performed in an unsigned
891 type with the range matching the range of the type of the induction variable.
892 In particular, BNDS.up contains an upper bound on C in the following cases:
893 -- if the iv must reach its final value without overflow, i.e., if
894 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
895 -- if final >= base, which we know to hold when BNDS.below >= 0. */
897 static void
898 number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
899 bounds *bnds, bool exit_must_be_taken)
901 widest_int max;
902 mpz_t d;
903 tree type = TREE_TYPE (c);
904 bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
905 || mpz_sgn (bnds->below) >= 0);
907 if (integer_onep (s)
908 || (TREE_CODE (c) == INTEGER_CST
909 && TREE_CODE (s) == INTEGER_CST
910 && wi::mod_trunc (wi::to_wide (c), wi::to_wide (s),
911 TYPE_SIGN (type)) == 0)
912 || (TYPE_OVERFLOW_UNDEFINED (type)
913 && multiple_of_p (type, c, s)))
915 /* If C is an exact multiple of S, then its value will be reached before
916 the induction variable overflows (unless the loop is exited in some
917 other way before). Note that the actual induction variable in the
918 loop (which ranges from base to final instead of from 0 to C) may
919 overflow, in which case BNDS.up will not be giving a correct upper
920 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
921 no_overflow = true;
922 exit_must_be_taken = true;
925 /* If the induction variable can overflow, the number of iterations is at
926 most the period of the control variable (or infinite, but in that case
927 the whole # of iterations analysis will fail). */
928 if (!no_overflow)
930 max = wi::mask <widest_int> (TYPE_PRECISION (type)
931 - wi::ctz (wi::to_wide (s)), false);
932 wi::to_mpz (max, bnd, UNSIGNED);
933 return;
936 /* Now we know that the induction variable does not overflow, so the loop
937 iterates at most (range of type / S) times. */
938 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED);
940 /* If the induction variable is guaranteed to reach the value of C before
941 overflow, ... */
942 if (exit_must_be_taken)
944 /* ... then we can strengthen this to C / S, and possibly we can use
945 the upper bound on C given by BNDS. */
946 if (TREE_CODE (c) == INTEGER_CST)
947 wi::to_mpz (wi::to_wide (c), bnd, UNSIGNED);
948 else if (bnds_u_valid)
949 mpz_set (bnd, bnds->up);
952 mpz_init (d);
953 wi::to_mpz (wi::to_wide (s), d, UNSIGNED);
954 mpz_fdiv_q (bnd, bnd, d);
955 mpz_clear (d);
958 /* Determines number of iterations of loop whose ending condition
959 is IV <> FINAL. TYPE is the type of the iv. The number of
960 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
961 we know that the exit must be taken eventually, i.e., that the IV
962 ever reaches the value FINAL (we derived this earlier, and possibly set
963 NITER->assumptions to make sure this is the case). BNDS contains the
964 bounds on the difference FINAL - IV->base. */
966 static bool
967 number_of_iterations_ne (class loop *loop, tree type, affine_iv *iv,
968 tree final, class tree_niter_desc *niter,
969 bool exit_must_be_taken, bounds *bnds)
971 tree niter_type = unsigned_type_for (type);
972 tree s, c, d, bits, assumption, tmp, bound;
973 mpz_t max;
975 niter->control = *iv;
976 niter->bound = final;
977 niter->cmp = NE_EXPR;
979 /* Rearrange the terms so that we get inequality S * i <> C, with S
980 positive. Also cast everything to the unsigned type. If IV does
981 not overflow, BNDS bounds the value of C. Also, this is the
982 case if the computation |FINAL - IV->base| does not overflow, i.e.,
983 if BNDS->below in the result is nonnegative. */
984 if (tree_int_cst_sign_bit (iv->step))
986 s = fold_convert (niter_type,
987 fold_build1 (NEGATE_EXPR, type, iv->step));
988 c = fold_build2 (MINUS_EXPR, niter_type,
989 fold_convert (niter_type, iv->base),
990 fold_convert (niter_type, final));
991 bounds_negate (bnds);
993 else
995 s = fold_convert (niter_type, iv->step);
996 c = fold_build2 (MINUS_EXPR, niter_type,
997 fold_convert (niter_type, final),
998 fold_convert (niter_type, iv->base));
1001 mpz_init (max);
1002 number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
1003 exit_must_be_taken);
1004 niter->max = widest_int::from (wi::from_mpz (niter_type, max, false),
1005 TYPE_SIGN (niter_type));
1006 mpz_clear (max);
1008 /* Compute no-overflow information for the control iv. This can be
1009 proven when below two conditions are satisfied:
1011 1) IV evaluates toward FINAL at beginning, i.e:
1012 base <= FINAL ; step > 0
1013 base >= FINAL ; step < 0
1015 2) |FINAL - base| is an exact multiple of step.
1017 Unfortunately, it's hard to prove above conditions after pass loop-ch
1018 because loop with exit condition (IV != FINAL) usually will be guarded
1019 by initial-condition (IV.base - IV.step != FINAL). In this case, we
1020 can alternatively try to prove below conditions:
1022 1') IV evaluates toward FINAL at beginning, i.e:
1023 new_base = base - step < FINAL ; step > 0
1024 && base - step doesn't underflow
1025 new_base = base - step > FINAL ; step < 0
1026 && base - step doesn't overflow
1028 2') |FINAL - new_base| is an exact multiple of step.
1030 Please refer to PR34114 as an example of loop-ch's impact, also refer
1031 to PR72817 as an example why condition 2') is necessary.
1033 Note, for NE_EXPR, base equals to FINAL is a special case, in
1034 which the loop exits immediately, and the iv does not overflow. */
1035 if (!niter->control.no_overflow
1036 && (integer_onep (s) || multiple_of_p (type, c, s)))
1038 tree t, cond, new_c, relaxed_cond = boolean_false_node;
1040 if (tree_int_cst_sign_bit (iv->step))
1042 cond = fold_build2 (GE_EXPR, boolean_type_node, iv->base, final);
1043 if (TREE_CODE (type) == INTEGER_TYPE)
1045 /* Only when base - step doesn't overflow. */
1046 t = TYPE_MAX_VALUE (type);
1047 t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1048 t = fold_build2 (GE_EXPR, boolean_type_node, t, iv->base);
1049 if (integer_nonzerop (t))
1051 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
1052 new_c = fold_build2 (MINUS_EXPR, niter_type,
1053 fold_convert (niter_type, t),
1054 fold_convert (niter_type, final));
1055 if (multiple_of_p (type, new_c, s))
1056 relaxed_cond = fold_build2 (GT_EXPR, boolean_type_node,
1057 t, final);
1061 else
1063 cond = fold_build2 (LE_EXPR, boolean_type_node, iv->base, final);
1064 if (TREE_CODE (type) == INTEGER_TYPE)
1066 /* Only when base - step doesn't underflow. */
1067 t = TYPE_MIN_VALUE (type);
1068 t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1069 t = fold_build2 (LE_EXPR, boolean_type_node, t, iv->base);
1070 if (integer_nonzerop (t))
1072 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
1073 new_c = fold_build2 (MINUS_EXPR, niter_type,
1074 fold_convert (niter_type, final),
1075 fold_convert (niter_type, t));
1076 if (multiple_of_p (type, new_c, s))
1077 relaxed_cond = fold_build2 (LT_EXPR, boolean_type_node,
1078 t, final);
1083 t = simplify_using_initial_conditions (loop, cond);
1084 if (!t || !integer_onep (t))
1085 t = simplify_using_initial_conditions (loop, relaxed_cond);
1087 if (t && integer_onep (t))
1088 niter->control.no_overflow = true;
1091 /* First the trivial cases -- when the step is 1. */
1092 if (integer_onep (s))
1094 niter->niter = c;
1095 return true;
1097 if (niter->control.no_overflow && multiple_of_p (type, c, s))
1099 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, c, s);
1100 return true;
1103 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
1104 is infinite. Otherwise, the number of iterations is
1105 (inverse(s/d) * (c/d)) mod (size of mode/d). */
1106 bits = num_ending_zeros (s);
1107 bound = build_low_bits_mask (niter_type,
1108 (TYPE_PRECISION (niter_type)
1109 - tree_to_uhwi (bits)));
1111 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
1112 build_int_cst (niter_type, 1), bits);
1113 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
1115 if (!exit_must_be_taken)
1117 /* If we cannot assume that the exit is taken eventually, record the
1118 assumptions for divisibility of c. */
1119 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
1120 assumption = fold_build2 (EQ_EXPR, boolean_type_node,
1121 assumption, build_int_cst (niter_type, 0));
1122 if (!integer_nonzerop (assumption))
1123 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1124 niter->assumptions, assumption);
1127 c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
1128 if (integer_onep (s))
1130 niter->niter = c;
1132 else
1134 tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
1135 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
1137 return true;
1140 /* Checks whether we can determine the final value of the control variable
1141 of the loop with ending condition IV0 < IV1 (computed in TYPE).
1142 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
1143 of the step. The assumptions necessary to ensure that the computation
1144 of the final value does not overflow are recorded in NITER. If we
1145 find the final value, we adjust DELTA and return TRUE. Otherwise
1146 we return false. BNDS bounds the value of IV1->base - IV0->base,
1147 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
1148 true if we know that the exit must be taken eventually. */
1150 static bool
1151 number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
1152 class tree_niter_desc *niter,
1153 tree *delta, tree step,
1154 bool exit_must_be_taken, bounds *bnds)
1156 tree niter_type = TREE_TYPE (step);
1157 tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
1158 tree tmod;
1159 mpz_t mmod;
1160 tree assumption = boolean_true_node, bound, noloop;
1161 bool ret = false, fv_comp_no_overflow;
1162 tree type1 = type;
1163 if (POINTER_TYPE_P (type))
1164 type1 = sizetype;
1166 if (TREE_CODE (mod) != INTEGER_CST)
1167 return false;
1168 if (integer_nonzerop (mod))
1169 mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
1170 tmod = fold_convert (type1, mod);
1172 mpz_init (mmod);
1173 wi::to_mpz (wi::to_wide (mod), mmod, UNSIGNED);
1174 mpz_neg (mmod, mmod);
1176 /* If the induction variable does not overflow and the exit is taken,
1177 then the computation of the final value does not overflow. This is
1178 also obviously the case if the new final value is equal to the
1179 current one. Finally, we postulate this for pointer type variables,
1180 as the code cannot rely on the object to that the pointer points being
1181 placed at the end of the address space (and more pragmatically,
1182 TYPE_{MIN,MAX}_VALUE is not defined for pointers). */
1183 if (integer_zerop (mod) || POINTER_TYPE_P (type))
1184 fv_comp_no_overflow = true;
1185 else if (!exit_must_be_taken)
1186 fv_comp_no_overflow = false;
1187 else
1188 fv_comp_no_overflow =
1189 (iv0->no_overflow && integer_nonzerop (iv0->step))
1190 || (iv1->no_overflow && integer_nonzerop (iv1->step));
1192 if (integer_nonzerop (iv0->step))
1194 /* The final value of the iv is iv1->base + MOD, assuming that this
1195 computation does not overflow, and that
1196 iv0->base <= iv1->base + MOD. */
1197 if (!fv_comp_no_overflow)
1199 bound = fold_build2 (MINUS_EXPR, type1,
1200 TYPE_MAX_VALUE (type1), tmod);
1201 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1202 iv1->base, bound);
1203 if (integer_zerop (assumption))
1204 goto end;
1206 if (mpz_cmp (mmod, bnds->below) < 0)
1207 noloop = boolean_false_node;
1208 else if (POINTER_TYPE_P (type))
1209 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1210 iv0->base,
1211 fold_build_pointer_plus (iv1->base, tmod));
1212 else
1213 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1214 iv0->base,
1215 fold_build2 (PLUS_EXPR, type1,
1216 iv1->base, tmod));
1218 else
1220 /* The final value of the iv is iv0->base - MOD, assuming that this
1221 computation does not overflow, and that
1222 iv0->base - MOD <= iv1->base. */
1223 if (!fv_comp_no_overflow)
1225 bound = fold_build2 (PLUS_EXPR, type1,
1226 TYPE_MIN_VALUE (type1), tmod);
1227 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1228 iv0->base, bound);
1229 if (integer_zerop (assumption))
1230 goto end;
1232 if (mpz_cmp (mmod, bnds->below) < 0)
1233 noloop = boolean_false_node;
1234 else if (POINTER_TYPE_P (type))
1235 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1236 fold_build_pointer_plus (iv0->base,
1237 fold_build1 (NEGATE_EXPR,
1238 type1, tmod)),
1239 iv1->base);
1240 else
1241 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1242 fold_build2 (MINUS_EXPR, type1,
1243 iv0->base, tmod),
1244 iv1->base);
1247 if (!integer_nonzerop (assumption))
1248 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1249 niter->assumptions,
1250 assumption);
1251 if (!integer_zerop (noloop))
1252 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1253 niter->may_be_zero,
1254 noloop);
1255 bounds_add (bnds, wi::to_widest (mod), type);
1256 *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
1258 ret = true;
1259 end:
1260 mpz_clear (mmod);
1261 return ret;
1264 /* Add assertions to NITER that ensure that the control variable of the loop
1265 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
1266 are TYPE. Returns false if we can prove that there is an overflow, true
1267 otherwise. STEP is the absolute value of the step. */
1269 static bool
1270 assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1271 class tree_niter_desc *niter, tree step)
1273 tree bound, d, assumption, diff;
1274 tree niter_type = TREE_TYPE (step);
1276 if (integer_nonzerop (iv0->step))
1278 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
1279 if (iv0->no_overflow)
1280 return true;
1282 /* If iv0->base is a constant, we can determine the last value before
1283 overflow precisely; otherwise we conservatively assume
1284 MAX - STEP + 1. */
1286 if (TREE_CODE (iv0->base) == INTEGER_CST)
1288 d = fold_build2 (MINUS_EXPR, niter_type,
1289 fold_convert (niter_type, TYPE_MAX_VALUE (type)),
1290 fold_convert (niter_type, iv0->base));
1291 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1293 else
1294 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1295 build_int_cst (niter_type, 1));
1296 bound = fold_build2 (MINUS_EXPR, type,
1297 TYPE_MAX_VALUE (type), fold_convert (type, diff));
1298 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1299 iv1->base, bound);
1301 else
1303 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
1304 if (iv1->no_overflow)
1305 return true;
1307 if (TREE_CODE (iv1->base) == INTEGER_CST)
1309 d = fold_build2 (MINUS_EXPR, niter_type,
1310 fold_convert (niter_type, iv1->base),
1311 fold_convert (niter_type, TYPE_MIN_VALUE (type)));
1312 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1314 else
1315 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1316 build_int_cst (niter_type, 1));
1317 bound = fold_build2 (PLUS_EXPR, type,
1318 TYPE_MIN_VALUE (type), fold_convert (type, diff));
1319 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1320 iv0->base, bound);
1323 if (integer_zerop (assumption))
1324 return false;
1325 if (!integer_nonzerop (assumption))
1326 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1327 niter->assumptions, assumption);
1329 iv0->no_overflow = true;
1330 iv1->no_overflow = true;
1331 return true;
1334 /* Add an assumption to NITER that a loop whose ending condition
1335 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
1336 bounds the value of IV1->base - IV0->base. */
1338 static void
1339 assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1340 class tree_niter_desc *niter, bounds *bnds)
1342 tree assumption = boolean_true_node, bound, diff;
1343 tree mbz, mbzl, mbzr, type1;
1344 bool rolls_p, no_overflow_p;
1345 widest_int dstep;
1346 mpz_t mstep, max;
1348 /* We are going to compute the number of iterations as
1349 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
1350 variant of TYPE. This formula only works if
1352 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
1354 (where MAX is the maximum value of the unsigned variant of TYPE, and
1355 the computations in this formula are performed in full precision,
1356 i.e., without overflows).
1358 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1359 we have a condition of the form iv0->base - step < iv1->base before the loop,
1360 and for loops iv0->base < iv1->base - step * i the condition
1361 iv0->base < iv1->base + step, due to loop header copying, which enable us
1362 to prove the lower bound.
1364 The upper bound is more complicated. Unless the expressions for initial
1365 and final value themselves contain enough information, we usually cannot
1366 derive it from the context. */
1368 /* First check whether the answer does not follow from the bounds we gathered
1369 before. */
1370 if (integer_nonzerop (iv0->step))
1371 dstep = wi::to_widest (iv0->step);
1372 else
1374 dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type));
1375 dstep = -dstep;
1378 mpz_init (mstep);
1379 wi::to_mpz (dstep, mstep, UNSIGNED);
1380 mpz_neg (mstep, mstep);
1381 mpz_add_ui (mstep, mstep, 1);
1383 rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
1385 mpz_init (max);
1386 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
1387 mpz_add (max, max, mstep);
1388 no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
1389 /* For pointers, only values lying inside a single object
1390 can be compared or manipulated by pointer arithmetics.
1391 Gcc in general does not allow or handle objects larger
1392 than half of the address space, hence the upper bound
1393 is satisfied for pointers. */
1394 || POINTER_TYPE_P (type));
1395 mpz_clear (mstep);
1396 mpz_clear (max);
1398 if (rolls_p && no_overflow_p)
1399 return;
1401 type1 = type;
1402 if (POINTER_TYPE_P (type))
1403 type1 = sizetype;
1405 /* Now the hard part; we must formulate the assumption(s) as expressions, and
1406 we must be careful not to introduce overflow. */
1408 if (integer_nonzerop (iv0->step))
1410 diff = fold_build2 (MINUS_EXPR, type1,
1411 iv0->step, build_int_cst (type1, 1));
1413 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
1414 0 address never belongs to any object, we can assume this for
1415 pointers. */
1416 if (!POINTER_TYPE_P (type))
1418 bound = fold_build2 (PLUS_EXPR, type1,
1419 TYPE_MIN_VALUE (type), diff);
1420 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1421 iv0->base, bound);
1424 /* And then we can compute iv0->base - diff, and compare it with
1425 iv1->base. */
1426 mbzl = fold_build2 (MINUS_EXPR, type1,
1427 fold_convert (type1, iv0->base), diff);
1428 mbzr = fold_convert (type1, iv1->base);
1430 else
1432 diff = fold_build2 (PLUS_EXPR, type1,
1433 iv1->step, build_int_cst (type1, 1));
1435 if (!POINTER_TYPE_P (type))
1437 bound = fold_build2 (PLUS_EXPR, type1,
1438 TYPE_MAX_VALUE (type), diff);
1439 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1440 iv1->base, bound);
1443 mbzl = fold_convert (type1, iv0->base);
1444 mbzr = fold_build2 (MINUS_EXPR, type1,
1445 fold_convert (type1, iv1->base), diff);
1448 if (!integer_nonzerop (assumption))
1449 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1450 niter->assumptions, assumption);
1451 if (!rolls_p)
1453 mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
1454 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1455 niter->may_be_zero, mbz);
1459 /* Determines number of iterations of loop whose ending condition
1460 is IV0 < IV1. TYPE is the type of the iv. The number of
1461 iterations is stored to NITER. BNDS bounds the difference
1462 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1463 that the exit must be taken eventually. */
1465 static bool
1466 number_of_iterations_lt (class loop *loop, tree type, affine_iv *iv0,
1467 affine_iv *iv1, class tree_niter_desc *niter,
1468 bool exit_must_be_taken, bounds *bnds)
1470 tree niter_type = unsigned_type_for (type);
1471 tree delta, step, s;
1472 mpz_t mstep, tmp;
1474 if (integer_nonzerop (iv0->step))
1476 niter->control = *iv0;
1477 niter->cmp = LT_EXPR;
1478 niter->bound = iv1->base;
1480 else
1482 niter->control = *iv1;
1483 niter->cmp = GT_EXPR;
1484 niter->bound = iv0->base;
1487 delta = fold_build2 (MINUS_EXPR, niter_type,
1488 fold_convert (niter_type, iv1->base),
1489 fold_convert (niter_type, iv0->base));
1491 /* First handle the special case that the step is +-1. */
1492 if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1493 || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
1495 /* for (i = iv0->base; i < iv1->base; i++)
1499 for (i = iv1->base; i > iv0->base; i--).
1501 In both cases # of iterations is iv1->base - iv0->base, assuming that
1502 iv1->base >= iv0->base.
1504 First try to derive a lower bound on the value of
1505 iv1->base - iv0->base, computed in full precision. If the difference
1506 is nonnegative, we are done, otherwise we must record the
1507 condition. */
1509 if (mpz_sgn (bnds->below) < 0)
1510 niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1511 iv1->base, iv0->base);
1512 niter->niter = delta;
1513 niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false),
1514 TYPE_SIGN (niter_type));
1515 niter->control.no_overflow = true;
1516 return true;
1519 if (integer_nonzerop (iv0->step))
1520 step = fold_convert (niter_type, iv0->step);
1521 else
1522 step = fold_convert (niter_type,
1523 fold_build1 (NEGATE_EXPR, type, iv1->step));
1525 /* If we can determine the final value of the control iv exactly, we can
1526 transform the condition to != comparison. In particular, this will be
1527 the case if DELTA is constant. */
1528 if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1529 exit_must_be_taken, bnds))
1531 affine_iv zps;
1533 zps.base = build_int_cst (niter_type, 0);
1534 zps.step = step;
1535 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1536 zps does not overflow. */
1537 zps.no_overflow = true;
1539 return number_of_iterations_ne (loop, type, &zps,
1540 delta, niter, true, bnds);
1543 /* Make sure that the control iv does not overflow. */
1544 if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1545 return false;
1547 /* We determine the number of iterations as (delta + step - 1) / step. For
1548 this to work, we must know that iv1->base >= iv0->base - step + 1,
1549 otherwise the loop does not roll. */
1550 assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1552 s = fold_build2 (MINUS_EXPR, niter_type,
1553 step, build_int_cst (niter_type, 1));
1554 delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1555 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1557 mpz_init (mstep);
1558 mpz_init (tmp);
1559 wi::to_mpz (wi::to_wide (step), mstep, UNSIGNED);
1560 mpz_add (tmp, bnds->up, mstep);
1561 mpz_sub_ui (tmp, tmp, 1);
1562 mpz_fdiv_q (tmp, tmp, mstep);
1563 niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false),
1564 TYPE_SIGN (niter_type));
1565 mpz_clear (mstep);
1566 mpz_clear (tmp);
1568 return true;
1571 /* Determines number of iterations of loop whose ending condition
1572 is IV0 <= IV1. TYPE is the type of the iv. The number of
1573 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1574 we know that this condition must eventually become false (we derived this
1575 earlier, and possibly set NITER->assumptions to make sure this
1576 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1578 static bool
1579 number_of_iterations_le (class loop *loop, tree type, affine_iv *iv0,
1580 affine_iv *iv1, class tree_niter_desc *niter,
1581 bool exit_must_be_taken, bounds *bnds)
1583 tree assumption;
1584 tree type1 = type;
1585 if (POINTER_TYPE_P (type))
1586 type1 = sizetype;
1588 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1589 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1590 value of the type. This we must know anyway, since if it is
1591 equal to this value, the loop rolls forever. We do not check
1592 this condition for pointer type ivs, as the code cannot rely on
1593 the object to that the pointer points being placed at the end of
1594 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1595 not defined for pointers). */
1597 if (!exit_must_be_taken && !POINTER_TYPE_P (type))
1599 if (integer_nonzerop (iv0->step))
1600 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1601 iv1->base, TYPE_MAX_VALUE (type));
1602 else
1603 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1604 iv0->base, TYPE_MIN_VALUE (type));
1606 if (integer_zerop (assumption))
1607 return false;
1608 if (!integer_nonzerop (assumption))
1609 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1610 niter->assumptions, assumption);
1613 if (integer_nonzerop (iv0->step))
1615 if (POINTER_TYPE_P (type))
1616 iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1);
1617 else
1618 iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
1619 build_int_cst (type1, 1));
1621 else if (POINTER_TYPE_P (type))
1622 iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1);
1623 else
1624 iv0->base = fold_build2 (MINUS_EXPR, type1,
1625 iv0->base, build_int_cst (type1, 1));
1627 bounds_add (bnds, 1, type1);
1629 return number_of_iterations_lt (loop, type, iv0, iv1, niter, exit_must_be_taken,
1630 bnds);
1633 /* Dumps description of affine induction variable IV to FILE. */
1635 static void
1636 dump_affine_iv (FILE *file, affine_iv *iv)
1638 if (!integer_zerop (iv->step))
1639 fprintf (file, "[");
1641 print_generic_expr (dump_file, iv->base, TDF_SLIM);
1643 if (!integer_zerop (iv->step))
1645 fprintf (file, ", + , ");
1646 print_generic_expr (dump_file, iv->step, TDF_SLIM);
1647 fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1651 /* Given exit condition IV0 CODE IV1 in TYPE, this function adjusts
1652 the condition for loop-until-wrap cases. For example:
1653 (unsigned){8, -1}_loop < 10 => {0, 1} != 9
1654 10 < (unsigned){0, max - 7}_loop => {0, 1} != 8
1655 Return true if condition is successfully adjusted. */
1657 static bool
1658 adjust_cond_for_loop_until_wrap (tree type, affine_iv *iv0, tree_code *code,
1659 affine_iv *iv1)
1661 /* Only support simple cases for the moment. */
1662 if (TREE_CODE (iv0->base) != INTEGER_CST
1663 || TREE_CODE (iv1->base) != INTEGER_CST)
1664 return false;
1666 tree niter_type = unsigned_type_for (type), high, low;
1667 /* Case: i-- < 10. */
1668 if (integer_zerop (iv1->step))
1670 /* TODO: Should handle case in which abs(step) != 1. */
1671 if (!integer_minus_onep (iv0->step))
1672 return false;
1673 /* Give up on infinite loop. */
1674 if (*code == LE_EXPR
1675 && tree_int_cst_equal (iv1->base, TYPE_MAX_VALUE (type)))
1676 return false;
1677 high = fold_build2 (PLUS_EXPR, niter_type,
1678 fold_convert (niter_type, iv0->base),
1679 build_int_cst (niter_type, 1));
1680 low = fold_convert (niter_type, TYPE_MIN_VALUE (type));
1682 else if (integer_zerop (iv0->step))
1684 /* TODO: Should handle case in which abs(step) != 1. */
1685 if (!integer_onep (iv1->step))
1686 return false;
1687 /* Give up on infinite loop. */
1688 if (*code == LE_EXPR
1689 && tree_int_cst_equal (iv0->base, TYPE_MIN_VALUE (type)))
1690 return false;
1691 high = fold_convert (niter_type, TYPE_MAX_VALUE (type));
1692 low = fold_build2 (MINUS_EXPR, niter_type,
1693 fold_convert (niter_type, iv1->base),
1694 build_int_cst (niter_type, 1));
1696 else
1697 gcc_unreachable ();
1699 iv0->base = low;
1700 iv0->step = fold_convert (niter_type, integer_one_node);
1701 iv1->base = high;
1702 iv1->step = build_int_cst (niter_type, 0);
1703 *code = NE_EXPR;
1704 return true;
1707 /* Determine the number of iterations according to condition (for staying
1708 inside loop) which compares two induction variables using comparison
1709 operator CODE. The induction variable on left side of the comparison
1710 is IV0, the right-hand side is IV1. Both induction variables must have
1711 type TYPE, which must be an integer or pointer type. The steps of the
1712 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1714 LOOP is the loop whose number of iterations we are determining.
1716 ONLY_EXIT is true if we are sure this is the only way the loop could be
1717 exited (including possibly non-returning function calls, exceptions, etc.)
1718 -- in this case we can use the information whether the control induction
1719 variables can overflow or not in a more efficient way.
1721 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1723 The results (number of iterations and assumptions as described in
1724 comments at class tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1725 Returns false if it fails to determine number of iterations, true if it
1726 was determined (possibly with some assumptions). */
1728 static bool
1729 number_of_iterations_cond (class loop *loop,
1730 tree type, affine_iv *iv0, enum tree_code code,
1731 affine_iv *iv1, class tree_niter_desc *niter,
1732 bool only_exit, bool every_iteration)
1734 bool exit_must_be_taken = false, ret;
1735 bounds bnds;
1737 /* If the test is not executed every iteration, wrapping may make the test
1738 to pass again.
1739 TODO: the overflow case can be still used as unreliable estimate of upper
1740 bound. But we have no API to pass it down to number of iterations code
1741 and, at present, it will not use it anyway. */
1742 if (!every_iteration
1743 && (!iv0->no_overflow || !iv1->no_overflow
1744 || code == NE_EXPR || code == EQ_EXPR))
1745 return false;
1747 /* The meaning of these assumptions is this:
1748 if !assumptions
1749 then the rest of information does not have to be valid
1750 if may_be_zero then the loop does not roll, even if
1751 niter != 0. */
1752 niter->assumptions = boolean_true_node;
1753 niter->may_be_zero = boolean_false_node;
1754 niter->niter = NULL_TREE;
1755 niter->max = 0;
1756 niter->bound = NULL_TREE;
1757 niter->cmp = ERROR_MARK;
1759 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1760 the control variable is on lhs. */
1761 if (code == GE_EXPR || code == GT_EXPR
1762 || (code == NE_EXPR && integer_zerop (iv0->step)))
1764 std::swap (iv0, iv1);
1765 code = swap_tree_comparison (code);
1768 if (POINTER_TYPE_P (type))
1770 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1771 to the same object. If they do, the control variable cannot wrap
1772 (as wrap around the bounds of memory will never return a pointer
1773 that would be guaranteed to point to the same object, even if we
1774 avoid undefined behavior by casting to size_t and back). */
1775 iv0->no_overflow = true;
1776 iv1->no_overflow = true;
1779 /* If the control induction variable does not overflow and the only exit
1780 from the loop is the one that we analyze, we know it must be taken
1781 eventually. */
1782 if (only_exit)
1784 if (!integer_zerop (iv0->step) && iv0->no_overflow)
1785 exit_must_be_taken = true;
1786 else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1787 exit_must_be_taken = true;
1790 /* We can handle cases which neither of the sides of the comparison is
1791 invariant:
1793 {iv0.base, iv0.step} cmp_code {iv1.base, iv1.step}
1794 as if:
1795 {iv0.base, iv0.step - iv1.step} cmp_code {iv1.base, 0}
1797 provided that either below condition is satisfied:
1799 a) the test is NE_EXPR;
1800 b) iv0.step - iv1.step is integer and iv0/iv1 don't overflow.
1802 This rarely occurs in practice, but it is simple enough to manage. */
1803 if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1805 tree step_type = POINTER_TYPE_P (type) ? sizetype : type;
1806 tree step = fold_binary_to_constant (MINUS_EXPR, step_type,
1807 iv0->step, iv1->step);
1809 /* No need to check sign of the new step since below code takes care
1810 of this well. */
1811 if (code != NE_EXPR
1812 && (TREE_CODE (step) != INTEGER_CST
1813 || !iv0->no_overflow || !iv1->no_overflow))
1814 return false;
1816 iv0->step = step;
1817 if (!POINTER_TYPE_P (type))
1818 iv0->no_overflow = false;
1820 iv1->step = build_int_cst (step_type, 0);
1821 iv1->no_overflow = true;
1824 /* If the result of the comparison is a constant, the loop is weird. More
1825 precise handling would be possible, but the situation is not common enough
1826 to waste time on it. */
1827 if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1828 return false;
1830 /* If the loop exits immediately, there is nothing to do. */
1831 tree tem = fold_binary (code, boolean_type_node, iv0->base, iv1->base);
1832 if (tem && integer_zerop (tem))
1834 if (!every_iteration)
1835 return false;
1836 niter->niter = build_int_cst (unsigned_type_for (type), 0);
1837 niter->max = 0;
1838 return true;
1841 /* Handle special case loops: while (i-- < 10) and while (10 < i++) by
1842 adjusting iv0, iv1 and code. */
1843 if (code != NE_EXPR
1844 && (tree_int_cst_sign_bit (iv0->step)
1845 || (!integer_zerop (iv1->step)
1846 && !tree_int_cst_sign_bit (iv1->step)))
1847 && !adjust_cond_for_loop_until_wrap (type, iv0, &code, iv1))
1848 return false;
1850 /* OK, now we know we have a senseful loop. Handle several cases, depending
1851 on what comparison operator is used. */
1852 bound_difference (loop, iv1->base, iv0->base, &bnds);
1854 if (dump_file && (dump_flags & TDF_DETAILS))
1856 fprintf (dump_file,
1857 "Analyzing # of iterations of loop %d\n", loop->num);
1859 fprintf (dump_file, " exit condition ");
1860 dump_affine_iv (dump_file, iv0);
1861 fprintf (dump_file, " %s ",
1862 code == NE_EXPR ? "!="
1863 : code == LT_EXPR ? "<"
1864 : "<=");
1865 dump_affine_iv (dump_file, iv1);
1866 fprintf (dump_file, "\n");
1868 fprintf (dump_file, " bounds on difference of bases: ");
1869 mpz_out_str (dump_file, 10, bnds.below);
1870 fprintf (dump_file, " ... ");
1871 mpz_out_str (dump_file, 10, bnds.up);
1872 fprintf (dump_file, "\n");
1875 switch (code)
1877 case NE_EXPR:
1878 gcc_assert (integer_zerop (iv1->step));
1879 ret = number_of_iterations_ne (loop, type, iv0, iv1->base, niter,
1880 exit_must_be_taken, &bnds);
1881 break;
1883 case LT_EXPR:
1884 ret = number_of_iterations_lt (loop, type, iv0, iv1, niter,
1885 exit_must_be_taken, &bnds);
1886 break;
1888 case LE_EXPR:
1889 ret = number_of_iterations_le (loop, type, iv0, iv1, niter,
1890 exit_must_be_taken, &bnds);
1891 break;
1893 default:
1894 gcc_unreachable ();
1897 mpz_clear (bnds.up);
1898 mpz_clear (bnds.below);
1900 if (dump_file && (dump_flags & TDF_DETAILS))
1902 if (ret)
1904 fprintf (dump_file, " result:\n");
1905 if (!integer_nonzerop (niter->assumptions))
1907 fprintf (dump_file, " under assumptions ");
1908 print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1909 fprintf (dump_file, "\n");
1912 if (!integer_zerop (niter->may_be_zero))
1914 fprintf (dump_file, " zero if ");
1915 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1916 fprintf (dump_file, "\n");
1919 fprintf (dump_file, " # of iterations ");
1920 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1921 fprintf (dump_file, ", bounded by ");
1922 print_decu (niter->max, dump_file);
1923 fprintf (dump_file, "\n");
1925 else
1926 fprintf (dump_file, " failed\n\n");
1928 return ret;
1931 /* Substitute NEW_TREE for OLD in EXPR and fold the result.
1932 If VALUEIZE is non-NULL then OLD and NEW_TREE are ignored and instead
1933 all SSA names are replaced with the result of calling the VALUEIZE
1934 function with the SSA name as argument. */
1936 tree
1937 simplify_replace_tree (tree expr, tree old, tree new_tree,
1938 tree (*valueize) (tree))
1940 unsigned i, n;
1941 tree ret = NULL_TREE, e, se;
1943 if (!expr)
1944 return NULL_TREE;
1946 /* Do not bother to replace constants. */
1947 if (CONSTANT_CLASS_P (expr))
1948 return expr;
1950 if (valueize)
1952 if (TREE_CODE (expr) == SSA_NAME)
1954 new_tree = valueize (expr);
1955 if (new_tree != expr)
1956 return new_tree;
1959 else if (expr == old
1960 || operand_equal_p (expr, old, 0))
1961 return unshare_expr (new_tree);
1963 if (!EXPR_P (expr))
1964 return expr;
1966 n = TREE_OPERAND_LENGTH (expr);
1967 for (i = 0; i < n; i++)
1969 e = TREE_OPERAND (expr, i);
1970 se = simplify_replace_tree (e, old, new_tree, valueize);
1971 if (e == se)
1972 continue;
1974 if (!ret)
1975 ret = copy_node (expr);
1977 TREE_OPERAND (ret, i) = se;
1980 return (ret ? fold (ret) : expr);
1983 /* Expand definitions of ssa names in EXPR as long as they are simple
1984 enough, and return the new expression. If STOP is specified, stop
1985 expanding if EXPR equals to it. */
1987 static tree
1988 expand_simple_operations (tree expr, tree stop, hash_map<tree, tree> &cache)
1990 unsigned i, n;
1991 tree ret = NULL_TREE, e, ee, e1;
1992 enum tree_code code;
1993 gimple *stmt;
1995 if (expr == NULL_TREE)
1996 return expr;
1998 if (is_gimple_min_invariant (expr))
1999 return expr;
2001 code = TREE_CODE (expr);
2002 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
2004 n = TREE_OPERAND_LENGTH (expr);
2005 for (i = 0; i < n; i++)
2007 e = TREE_OPERAND (expr, i);
2008 /* SCEV analysis feeds us with a proper expression
2009 graph matching the SSA graph. Avoid turning it
2010 into a tree here, thus handle tree sharing
2011 properly.
2012 ??? The SSA walk below still turns the SSA graph
2013 into a tree but until we find a testcase do not
2014 introduce additional tree sharing here. */
2015 bool existed_p;
2016 tree &cee = cache.get_or_insert (e, &existed_p);
2017 if (existed_p)
2018 ee = cee;
2019 else
2021 cee = e;
2022 ee = expand_simple_operations (e, stop, cache);
2023 if (ee != e)
2024 *cache.get (e) = ee;
2026 if (e == ee)
2027 continue;
2029 if (!ret)
2030 ret = copy_node (expr);
2032 TREE_OPERAND (ret, i) = ee;
2035 if (!ret)
2036 return expr;
2038 fold_defer_overflow_warnings ();
2039 ret = fold (ret);
2040 fold_undefer_and_ignore_overflow_warnings ();
2041 return ret;
2044 /* Stop if it's not ssa name or the one we don't want to expand. */
2045 if (TREE_CODE (expr) != SSA_NAME || expr == stop)
2046 return expr;
2048 stmt = SSA_NAME_DEF_STMT (expr);
2049 if (gimple_code (stmt) == GIMPLE_PHI)
2051 basic_block src, dest;
2053 if (gimple_phi_num_args (stmt) != 1)
2054 return expr;
2055 e = PHI_ARG_DEF (stmt, 0);
2057 /* Avoid propagating through loop exit phi nodes, which
2058 could break loop-closed SSA form restrictions. */
2059 dest = gimple_bb (stmt);
2060 src = single_pred (dest);
2061 if (TREE_CODE (e) == SSA_NAME
2062 && src->loop_father != dest->loop_father)
2063 return expr;
2065 return expand_simple_operations (e, stop, cache);
2067 if (gimple_code (stmt) != GIMPLE_ASSIGN)
2068 return expr;
2070 /* Avoid expanding to expressions that contain SSA names that need
2071 to take part in abnormal coalescing. */
2072 ssa_op_iter iter;
2073 FOR_EACH_SSA_TREE_OPERAND (e, stmt, iter, SSA_OP_USE)
2074 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e))
2075 return expr;
2077 e = gimple_assign_rhs1 (stmt);
2078 code = gimple_assign_rhs_code (stmt);
2079 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
2081 if (is_gimple_min_invariant (e))
2082 return e;
2084 if (code == SSA_NAME)
2085 return expand_simple_operations (e, stop, cache);
2086 else if (code == ADDR_EXPR)
2088 poly_int64 offset;
2089 tree base = get_addr_base_and_unit_offset (TREE_OPERAND (e, 0),
2090 &offset);
2091 if (base
2092 && TREE_CODE (base) == MEM_REF)
2094 ee = expand_simple_operations (TREE_OPERAND (base, 0), stop,
2095 cache);
2096 return fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (expr), ee,
2097 wide_int_to_tree (sizetype,
2098 mem_ref_offset (base)
2099 + offset));
2103 return expr;
2106 switch (code)
2108 CASE_CONVERT:
2109 /* Casts are simple. */
2110 ee = expand_simple_operations (e, stop, cache);
2111 return fold_build1 (code, TREE_TYPE (expr), ee);
2113 case PLUS_EXPR:
2114 case MINUS_EXPR:
2115 if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (expr))
2116 && TYPE_OVERFLOW_TRAPS (TREE_TYPE (expr)))
2117 return expr;
2118 /* Fallthru. */
2119 case POINTER_PLUS_EXPR:
2120 /* And increments and decrements by a constant are simple. */
2121 e1 = gimple_assign_rhs2 (stmt);
2122 if (!is_gimple_min_invariant (e1))
2123 return expr;
2125 ee = expand_simple_operations (e, stop, cache);
2126 return fold_build2 (code, TREE_TYPE (expr), ee, e1);
2128 default:
2129 return expr;
2133 tree
2134 expand_simple_operations (tree expr, tree stop)
2136 hash_map<tree, tree> cache;
2137 return expand_simple_operations (expr, stop, cache);
2140 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2141 expression (or EXPR unchanged, if no simplification was possible). */
2143 static tree
2144 tree_simplify_using_condition_1 (tree cond, tree expr)
2146 bool changed;
2147 tree e, e0, e1, e2, notcond;
2148 enum tree_code code = TREE_CODE (expr);
2150 if (code == INTEGER_CST)
2151 return expr;
2153 if (code == TRUTH_OR_EXPR
2154 || code == TRUTH_AND_EXPR
2155 || code == COND_EXPR)
2157 changed = false;
2159 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
2160 if (TREE_OPERAND (expr, 0) != e0)
2161 changed = true;
2163 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
2164 if (TREE_OPERAND (expr, 1) != e1)
2165 changed = true;
2167 if (code == COND_EXPR)
2169 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
2170 if (TREE_OPERAND (expr, 2) != e2)
2171 changed = true;
2173 else
2174 e2 = NULL_TREE;
2176 if (changed)
2178 if (code == COND_EXPR)
2179 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2180 else
2181 expr = fold_build2 (code, boolean_type_node, e0, e1);
2184 return expr;
2187 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
2188 propagation, and vice versa. Fold does not handle this, since it is
2189 considered too expensive. */
2190 if (TREE_CODE (cond) == EQ_EXPR)
2192 e0 = TREE_OPERAND (cond, 0);
2193 e1 = TREE_OPERAND (cond, 1);
2195 /* We know that e0 == e1. Check whether we cannot simplify expr
2196 using this fact. */
2197 e = simplify_replace_tree (expr, e0, e1);
2198 if (integer_zerop (e) || integer_nonzerop (e))
2199 return e;
2201 e = simplify_replace_tree (expr, e1, e0);
2202 if (integer_zerop (e) || integer_nonzerop (e))
2203 return e;
2205 if (TREE_CODE (expr) == EQ_EXPR)
2207 e0 = TREE_OPERAND (expr, 0);
2208 e1 = TREE_OPERAND (expr, 1);
2210 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
2211 e = simplify_replace_tree (cond, e0, e1);
2212 if (integer_zerop (e))
2213 return e;
2214 e = simplify_replace_tree (cond, e1, e0);
2215 if (integer_zerop (e))
2216 return e;
2218 if (TREE_CODE (expr) == NE_EXPR)
2220 e0 = TREE_OPERAND (expr, 0);
2221 e1 = TREE_OPERAND (expr, 1);
2223 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
2224 e = simplify_replace_tree (cond, e0, e1);
2225 if (integer_zerop (e))
2226 return boolean_true_node;
2227 e = simplify_replace_tree (cond, e1, e0);
2228 if (integer_zerop (e))
2229 return boolean_true_node;
2232 /* Check whether COND ==> EXPR. */
2233 notcond = invert_truthvalue (cond);
2234 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, expr);
2235 if (e && integer_nonzerop (e))
2236 return e;
2238 /* Check whether COND ==> not EXPR. */
2239 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, expr);
2240 if (e && integer_zerop (e))
2241 return e;
2243 return expr;
2246 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2247 expression (or EXPR unchanged, if no simplification was possible).
2248 Wrapper around tree_simplify_using_condition_1 that ensures that chains
2249 of simple operations in definitions of ssa names in COND are expanded,
2250 so that things like casts or incrementing the value of the bound before
2251 the loop do not cause us to fail. */
2253 static tree
2254 tree_simplify_using_condition (tree cond, tree expr)
2256 cond = expand_simple_operations (cond);
2258 return tree_simplify_using_condition_1 (cond, expr);
2261 /* Tries to simplify EXPR using the conditions on entry to LOOP.
2262 Returns the simplified expression (or EXPR unchanged, if no
2263 simplification was possible). */
2265 tree
2266 simplify_using_initial_conditions (class loop *loop, tree expr)
2268 edge e;
2269 basic_block bb;
2270 gimple *stmt;
2271 tree cond, expanded, backup;
2272 int cnt = 0;
2274 if (TREE_CODE (expr) == INTEGER_CST)
2275 return expr;
2277 backup = expanded = expand_simple_operations (expr);
2279 /* Limit walking the dominators to avoid quadraticness in
2280 the number of BBs times the number of loops in degenerate
2281 cases. */
2282 for (bb = loop->header;
2283 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
2284 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
2286 if (!single_pred_p (bb))
2287 continue;
2288 e = single_pred_edge (bb);
2290 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
2291 continue;
2293 stmt = last_stmt (e->src);
2294 cond = fold_build2 (gimple_cond_code (stmt),
2295 boolean_type_node,
2296 gimple_cond_lhs (stmt),
2297 gimple_cond_rhs (stmt));
2298 if (e->flags & EDGE_FALSE_VALUE)
2299 cond = invert_truthvalue (cond);
2300 expanded = tree_simplify_using_condition (cond, expanded);
2301 /* Break if EXPR is simplified to const values. */
2302 if (expanded
2303 && (integer_zerop (expanded) || integer_nonzerop (expanded)))
2304 return expanded;
2306 ++cnt;
2309 /* Return the original expression if no simplification is done. */
2310 return operand_equal_p (backup, expanded, 0) ? expr : expanded;
2313 /* Tries to simplify EXPR using the evolutions of the loop invariants
2314 in the superloops of LOOP. Returns the simplified expression
2315 (or EXPR unchanged, if no simplification was possible). */
2317 static tree
2318 simplify_using_outer_evolutions (class loop *loop, tree expr)
2320 enum tree_code code = TREE_CODE (expr);
2321 bool changed;
2322 tree e, e0, e1, e2;
2324 if (is_gimple_min_invariant (expr))
2325 return expr;
2327 if (code == TRUTH_OR_EXPR
2328 || code == TRUTH_AND_EXPR
2329 || code == COND_EXPR)
2331 changed = false;
2333 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
2334 if (TREE_OPERAND (expr, 0) != e0)
2335 changed = true;
2337 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
2338 if (TREE_OPERAND (expr, 1) != e1)
2339 changed = true;
2341 if (code == COND_EXPR)
2343 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
2344 if (TREE_OPERAND (expr, 2) != e2)
2345 changed = true;
2347 else
2348 e2 = NULL_TREE;
2350 if (changed)
2352 if (code == COND_EXPR)
2353 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2354 else
2355 expr = fold_build2 (code, boolean_type_node, e0, e1);
2358 return expr;
2361 e = instantiate_parameters (loop, expr);
2362 if (is_gimple_min_invariant (e))
2363 return e;
2365 return expr;
2368 /* Returns true if EXIT is the only possible exit from LOOP. */
2370 bool
2371 loop_only_exit_p (const class loop *loop, const_edge exit)
2373 basic_block *body;
2374 gimple_stmt_iterator bsi;
2375 unsigned i;
2377 if (exit != single_exit (loop))
2378 return false;
2380 body = get_loop_body (loop);
2381 for (i = 0; i < loop->num_nodes; i++)
2383 for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
2384 if (stmt_can_terminate_bb_p (gsi_stmt (bsi)))
2386 free (body);
2387 return true;
2391 free (body);
2392 return true;
2395 /* Stores description of number of iterations of LOOP derived from
2396 EXIT (an exit edge of the LOOP) in NITER. Returns true if some useful
2397 information could be derived (and fields of NITER have meaning described
2398 in comments at class tree_niter_desc declaration), false otherwise.
2399 When EVERY_ITERATION is true, only tests that are known to be executed
2400 every iteration are considered (i.e. only test that alone bounds the loop).
2401 If AT_STMT is not NULL, this function stores LOOP's condition statement in
2402 it when returning true. */
2404 bool
2405 number_of_iterations_exit_assumptions (class loop *loop, edge exit,
2406 class tree_niter_desc *niter,
2407 gcond **at_stmt, bool every_iteration)
2409 gimple *last;
2410 gcond *stmt;
2411 tree type;
2412 tree op0, op1;
2413 enum tree_code code;
2414 affine_iv iv0, iv1;
2415 bool safe;
2417 /* Nothing to analyze if the loop is known to be infinite. */
2418 if (loop_constraint_set_p (loop, LOOP_C_INFINITE))
2419 return false;
2421 safe = dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src);
2423 if (every_iteration && !safe)
2424 return false;
2426 niter->assumptions = boolean_false_node;
2427 niter->control.base = NULL_TREE;
2428 niter->control.step = NULL_TREE;
2429 niter->control.no_overflow = false;
2430 last = last_stmt (exit->src);
2431 if (!last)
2432 return false;
2433 stmt = dyn_cast <gcond *> (last);
2434 if (!stmt)
2435 return false;
2437 /* We want the condition for staying inside loop. */
2438 code = gimple_cond_code (stmt);
2439 if (exit->flags & EDGE_TRUE_VALUE)
2440 code = invert_tree_comparison (code, false);
2442 switch (code)
2444 case GT_EXPR:
2445 case GE_EXPR:
2446 case LT_EXPR:
2447 case LE_EXPR:
2448 case NE_EXPR:
2449 break;
2451 default:
2452 return false;
2455 op0 = gimple_cond_lhs (stmt);
2456 op1 = gimple_cond_rhs (stmt);
2457 type = TREE_TYPE (op0);
2459 if (TREE_CODE (type) != INTEGER_TYPE
2460 && !POINTER_TYPE_P (type))
2461 return false;
2463 tree iv0_niters = NULL_TREE;
2464 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2465 op0, &iv0, safe ? &iv0_niters : NULL, false))
2466 return number_of_iterations_popcount (loop, exit, code, niter);
2467 tree iv1_niters = NULL_TREE;
2468 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2469 op1, &iv1, safe ? &iv1_niters : NULL, false))
2470 return false;
2471 /* Give up on complicated case. */
2472 if (iv0_niters && iv1_niters)
2473 return false;
2475 /* We don't want to see undefined signed overflow warnings while
2476 computing the number of iterations. */
2477 fold_defer_overflow_warnings ();
2479 iv0.base = expand_simple_operations (iv0.base);
2480 iv1.base = expand_simple_operations (iv1.base);
2481 if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
2482 loop_only_exit_p (loop, exit), safe))
2484 fold_undefer_and_ignore_overflow_warnings ();
2485 return false;
2488 /* Incorporate additional assumption implied by control iv. */
2489 tree iv_niters = iv0_niters ? iv0_niters : iv1_niters;
2490 if (iv_niters)
2492 tree assumption = fold_build2 (LE_EXPR, boolean_type_node, niter->niter,
2493 fold_convert (TREE_TYPE (niter->niter),
2494 iv_niters));
2496 if (!integer_nonzerop (assumption))
2497 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2498 niter->assumptions, assumption);
2500 /* Refine upper bound if possible. */
2501 if (TREE_CODE (iv_niters) == INTEGER_CST
2502 && niter->max > wi::to_widest (iv_niters))
2503 niter->max = wi::to_widest (iv_niters);
2506 /* There is no assumptions if the loop is known to be finite. */
2507 if (!integer_zerop (niter->assumptions)
2508 && loop_constraint_set_p (loop, LOOP_C_FINITE))
2509 niter->assumptions = boolean_true_node;
2511 if (optimize >= 3)
2513 niter->assumptions = simplify_using_outer_evolutions (loop,
2514 niter->assumptions);
2515 niter->may_be_zero = simplify_using_outer_evolutions (loop,
2516 niter->may_be_zero);
2517 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
2520 niter->assumptions
2521 = simplify_using_initial_conditions (loop,
2522 niter->assumptions);
2523 niter->may_be_zero
2524 = simplify_using_initial_conditions (loop,
2525 niter->may_be_zero);
2527 fold_undefer_and_ignore_overflow_warnings ();
2529 /* If NITER has simplified into a constant, update MAX. */
2530 if (TREE_CODE (niter->niter) == INTEGER_CST)
2531 niter->max = wi::to_widest (niter->niter);
2533 if (at_stmt)
2534 *at_stmt = stmt;
2536 return (!integer_zerop (niter->assumptions));
2540 /* Utility function to check if OP is defined by a stmt
2541 that is a val - 1. */
2543 static bool
2544 ssa_defined_by_minus_one_stmt_p (tree op, tree val)
2546 gimple *stmt;
2547 return (TREE_CODE (op) == SSA_NAME
2548 && (stmt = SSA_NAME_DEF_STMT (op))
2549 && is_gimple_assign (stmt)
2550 && (gimple_assign_rhs_code (stmt) == PLUS_EXPR)
2551 && val == gimple_assign_rhs1 (stmt)
2552 && integer_minus_onep (gimple_assign_rhs2 (stmt)));
2556 /* See if LOOP is a popcout implementation, determine NITER for the loop
2558 We match:
2559 <bb 2>
2560 goto <bb 4>
2562 <bb 3>
2563 _1 = b_11 + -1
2564 b_6 = _1 & b_11
2566 <bb 4>
2567 b_11 = PHI <b_5(D)(2), b_6(3)>
2569 exit block
2570 if (b_11 != 0)
2571 goto <bb 3>
2572 else
2573 goto <bb 5>
2575 OR we match copy-header version:
2576 if (b_5 != 0)
2577 goto <bb 3>
2578 else
2579 goto <bb 4>
2581 <bb 3>
2582 b_11 = PHI <b_5(2), b_6(3)>
2583 _1 = b_11 + -1
2584 b_6 = _1 & b_11
2586 exit block
2587 if (b_6 != 0)
2588 goto <bb 3>
2589 else
2590 goto <bb 4>
2592 If popcount pattern, update NITER accordingly.
2593 i.e., set NITER to __builtin_popcount (b)
2594 return true if we did, false otherwise.
2598 static bool
2599 number_of_iterations_popcount (loop_p loop, edge exit,
2600 enum tree_code code,
2601 class tree_niter_desc *niter)
2603 bool adjust = true;
2604 tree iter;
2605 HOST_WIDE_INT max;
2606 adjust = true;
2607 tree fn = NULL_TREE;
2609 /* Check loop terminating branch is like
2610 if (b != 0). */
2611 gimple *stmt = last_stmt (exit->src);
2612 if (!stmt
2613 || gimple_code (stmt) != GIMPLE_COND
2614 || code != NE_EXPR
2615 || !integer_zerop (gimple_cond_rhs (stmt))
2616 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME)
2617 return false;
2619 gimple *and_stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
2621 /* Depending on copy-header is performed, feeding PHI stmts might be in
2622 the loop header or loop latch, handle this. */
2623 if (gimple_code (and_stmt) == GIMPLE_PHI
2624 && gimple_bb (and_stmt) == loop->header
2625 && gimple_phi_num_args (and_stmt) == 2
2626 && (TREE_CODE (gimple_phi_arg_def (and_stmt,
2627 loop_latch_edge (loop)->dest_idx))
2628 == SSA_NAME))
2630 /* SSA used in exit condition is defined by PHI stmt
2631 b_11 = PHI <b_5(D)(2), b_6(3)>
2632 from the PHI stmt, get the and_stmt
2633 b_6 = _1 & b_11. */
2634 tree t = gimple_phi_arg_def (and_stmt, loop_latch_edge (loop)->dest_idx);
2635 and_stmt = SSA_NAME_DEF_STMT (t);
2636 adjust = false;
2639 /* Make sure it is indeed an and stmt (b_6 = _1 & b_11). */
2640 if (!is_gimple_assign (and_stmt)
2641 || gimple_assign_rhs_code (and_stmt) != BIT_AND_EXPR)
2642 return false;
2644 tree b_11 = gimple_assign_rhs1 (and_stmt);
2645 tree _1 = gimple_assign_rhs2 (and_stmt);
2647 /* Check that _1 is defined by _b11 + -1 (_1 = b_11 + -1).
2648 Also make sure that b_11 is the same in and_stmt and _1 defining stmt.
2649 Also canonicalize if _1 and _b11 are revrsed. */
2650 if (ssa_defined_by_minus_one_stmt_p (b_11, _1))
2651 std::swap (b_11, _1);
2652 else if (ssa_defined_by_minus_one_stmt_p (_1, b_11))
2654 else
2655 return false;
2656 /* Check the recurrence:
2657 ... = PHI <b_5(2), b_6(3)>. */
2658 gimple *phi = SSA_NAME_DEF_STMT (b_11);
2659 if (gimple_code (phi) != GIMPLE_PHI
2660 || (gimple_bb (phi) != loop_latch_edge (loop)->dest)
2661 || (gimple_assign_lhs (and_stmt)
2662 != gimple_phi_arg_def (phi, loop_latch_edge (loop)->dest_idx)))
2663 return false;
2665 /* We found a match. Get the corresponding popcount builtin. */
2666 tree src = gimple_phi_arg_def (phi, loop_preheader_edge (loop)->dest_idx);
2667 if (TYPE_PRECISION (TREE_TYPE (src)) == TYPE_PRECISION (integer_type_node))
2668 fn = builtin_decl_implicit (BUILT_IN_POPCOUNT);
2669 else if (TYPE_PRECISION (TREE_TYPE (src)) == TYPE_PRECISION
2670 (long_integer_type_node))
2671 fn = builtin_decl_implicit (BUILT_IN_POPCOUNTL);
2672 else if (TYPE_PRECISION (TREE_TYPE (src)) == TYPE_PRECISION
2673 (long_long_integer_type_node))
2674 fn = builtin_decl_implicit (BUILT_IN_POPCOUNTLL);
2676 /* ??? Support promoting char/short to int. */
2677 if (!fn)
2678 return false;
2680 /* Update NITER params accordingly */
2681 tree utype = unsigned_type_for (TREE_TYPE (src));
2682 src = fold_convert (utype, src);
2683 tree call = fold_convert (utype, build_call_expr (fn, 1, src));
2684 if (adjust)
2685 iter = fold_build2 (MINUS_EXPR, utype,
2686 call,
2687 build_int_cst (utype, 1));
2688 else
2689 iter = call;
2691 if (TREE_CODE (call) == INTEGER_CST)
2692 max = tree_to_uhwi (call);
2693 else
2694 max = TYPE_PRECISION (TREE_TYPE (src));
2695 if (adjust)
2696 max = max - 1;
2698 niter->niter = iter;
2699 niter->assumptions = boolean_true_node;
2701 if (adjust)
2703 tree may_be_zero = fold_build2 (EQ_EXPR, boolean_type_node, src,
2704 build_zero_cst
2705 (TREE_TYPE (src)));
2706 niter->may_be_zero =
2707 simplify_using_initial_conditions (loop, may_be_zero);
2709 else
2710 niter->may_be_zero = boolean_false_node;
2712 niter->max = max;
2713 niter->bound = NULL_TREE;
2714 niter->cmp = ERROR_MARK;
2715 return true;
2719 /* Like number_of_iterations_exit_assumptions, but return TRUE only if
2720 the niter information holds unconditionally. */
2722 bool
2723 number_of_iterations_exit (class loop *loop, edge exit,
2724 class tree_niter_desc *niter,
2725 bool warn, bool every_iteration)
2727 gcond *stmt;
2728 if (!number_of_iterations_exit_assumptions (loop, exit, niter,
2729 &stmt, every_iteration))
2730 return false;
2732 if (integer_nonzerop (niter->assumptions))
2733 return true;
2735 if (warn && dump_enabled_p ())
2736 dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmt,
2737 "missed loop optimization: niters analysis ends up "
2738 "with assumptions.\n");
2740 return false;
2743 /* Try to determine the number of iterations of LOOP. If we succeed,
2744 expression giving number of iterations is returned and *EXIT is
2745 set to the edge from that the information is obtained. Otherwise
2746 chrec_dont_know is returned. */
2748 tree
2749 find_loop_niter (class loop *loop, edge *exit)
2751 unsigned i;
2752 vec<edge> exits = get_loop_exit_edges (loop);
2753 edge ex;
2754 tree niter = NULL_TREE, aniter;
2755 class tree_niter_desc desc;
2757 *exit = NULL;
2758 FOR_EACH_VEC_ELT (exits, i, ex)
2760 if (!number_of_iterations_exit (loop, ex, &desc, false))
2761 continue;
2763 if (integer_nonzerop (desc.may_be_zero))
2765 /* We exit in the first iteration through this exit.
2766 We won't find anything better. */
2767 niter = build_int_cst (unsigned_type_node, 0);
2768 *exit = ex;
2769 break;
2772 if (!integer_zerop (desc.may_be_zero))
2773 continue;
2775 aniter = desc.niter;
2777 if (!niter)
2779 /* Nothing recorded yet. */
2780 niter = aniter;
2781 *exit = ex;
2782 continue;
2785 /* Prefer constants, the lower the better. */
2786 if (TREE_CODE (aniter) != INTEGER_CST)
2787 continue;
2789 if (TREE_CODE (niter) != INTEGER_CST)
2791 niter = aniter;
2792 *exit = ex;
2793 continue;
2796 if (tree_int_cst_lt (aniter, niter))
2798 niter = aniter;
2799 *exit = ex;
2800 continue;
2803 exits.release ();
2805 return niter ? niter : chrec_dont_know;
2808 /* Return true if loop is known to have bounded number of iterations. */
2810 bool
2811 finite_loop_p (class loop *loop)
2813 widest_int nit;
2814 int flags;
2816 flags = flags_from_decl_or_type (current_function_decl);
2817 if ((flags & (ECF_CONST|ECF_PURE)) && !(flags & ECF_LOOPING_CONST_OR_PURE))
2819 if (dump_file && (dump_flags & TDF_DETAILS))
2820 fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
2821 loop->num);
2822 return true;
2825 if (loop->any_upper_bound
2826 || max_loop_iterations (loop, &nit))
2828 if (dump_file && (dump_flags & TDF_DETAILS))
2829 fprintf (dump_file, "Found loop %i to be finite: upper bound found.\n",
2830 loop->num);
2831 return true;
2834 if (flag_finite_loops)
2836 unsigned i;
2837 vec<edge> exits = get_loop_exit_edges (loop);
2838 edge ex;
2840 /* If the loop has a normal exit, we can assume it will terminate. */
2841 FOR_EACH_VEC_ELT (exits, i, ex)
2842 if (!(ex->flags & (EDGE_EH | EDGE_ABNORMAL | EDGE_FAKE)))
2844 exits.release ();
2845 if (dump_file)
2846 fprintf (dump_file, "Assume loop %i to be finite: it has an exit "
2847 "and -ffinite-loops is on.\n", loop->num);
2848 return true;
2851 exits.release ();
2854 return false;
2859 Analysis of a number of iterations of a loop by a brute-force evaluation.
2863 /* Bound on the number of iterations we try to evaluate. */
2865 #define MAX_ITERATIONS_TO_TRACK \
2866 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
2868 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2869 result by a chain of operations such that all but exactly one of their
2870 operands are constants. */
2872 static gphi *
2873 chain_of_csts_start (class loop *loop, tree x)
2875 gimple *stmt = SSA_NAME_DEF_STMT (x);
2876 tree use;
2877 basic_block bb = gimple_bb (stmt);
2878 enum tree_code code;
2880 if (!bb
2881 || !flow_bb_inside_loop_p (loop, bb))
2882 return NULL;
2884 if (gimple_code (stmt) == GIMPLE_PHI)
2886 if (bb == loop->header)
2887 return as_a <gphi *> (stmt);
2889 return NULL;
2892 if (gimple_code (stmt) != GIMPLE_ASSIGN
2893 || gimple_assign_rhs_class (stmt) == GIMPLE_TERNARY_RHS)
2894 return NULL;
2896 code = gimple_assign_rhs_code (stmt);
2897 if (gimple_references_memory_p (stmt)
2898 || TREE_CODE_CLASS (code) == tcc_reference
2899 || (code == ADDR_EXPR
2900 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
2901 return NULL;
2903 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
2904 if (use == NULL_TREE)
2905 return NULL;
2907 return chain_of_csts_start (loop, use);
2910 /* Determines whether the expression X is derived from a result of a phi node
2911 in header of LOOP such that
2913 * the derivation of X consists only from operations with constants
2914 * the initial value of the phi node is constant
2915 * the value of the phi node in the next iteration can be derived from the
2916 value in the current iteration by a chain of operations with constants,
2917 or is also a constant
2919 If such phi node exists, it is returned, otherwise NULL is returned. */
2921 static gphi *
2922 get_base_for (class loop *loop, tree x)
2924 gphi *phi;
2925 tree init, next;
2927 if (is_gimple_min_invariant (x))
2928 return NULL;
2930 phi = chain_of_csts_start (loop, x);
2931 if (!phi)
2932 return NULL;
2934 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2935 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2937 if (!is_gimple_min_invariant (init))
2938 return NULL;
2940 if (TREE_CODE (next) == SSA_NAME
2941 && chain_of_csts_start (loop, next) != phi)
2942 return NULL;
2944 return phi;
2947 /* Given an expression X, then
2949 * if X is NULL_TREE, we return the constant BASE.
2950 * if X is a constant, we return the constant X.
2951 * otherwise X is a SSA name, whose value in the considered loop is derived
2952 by a chain of operations with constant from a result of a phi node in
2953 the header of the loop. Then we return value of X when the value of the
2954 result of this phi node is given by the constant BASE. */
2956 static tree
2957 get_val_for (tree x, tree base)
2959 gimple *stmt;
2961 gcc_checking_assert (is_gimple_min_invariant (base));
2963 if (!x)
2964 return base;
2965 else if (is_gimple_min_invariant (x))
2966 return x;
2968 stmt = SSA_NAME_DEF_STMT (x);
2969 if (gimple_code (stmt) == GIMPLE_PHI)
2970 return base;
2972 gcc_checking_assert (is_gimple_assign (stmt));
2974 /* STMT must be either an assignment of a single SSA name or an
2975 expression involving an SSA name and a constant. Try to fold that
2976 expression using the value for the SSA name. */
2977 if (gimple_assign_ssa_name_copy_p (stmt))
2978 return get_val_for (gimple_assign_rhs1 (stmt), base);
2979 else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
2980 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
2981 return fold_build1 (gimple_assign_rhs_code (stmt),
2982 gimple_expr_type (stmt),
2983 get_val_for (gimple_assign_rhs1 (stmt), base));
2984 else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
2986 tree rhs1 = gimple_assign_rhs1 (stmt);
2987 tree rhs2 = gimple_assign_rhs2 (stmt);
2988 if (TREE_CODE (rhs1) == SSA_NAME)
2989 rhs1 = get_val_for (rhs1, base);
2990 else if (TREE_CODE (rhs2) == SSA_NAME)
2991 rhs2 = get_val_for (rhs2, base);
2992 else
2993 gcc_unreachable ();
2994 return fold_build2 (gimple_assign_rhs_code (stmt),
2995 gimple_expr_type (stmt), rhs1, rhs2);
2997 else
2998 gcc_unreachable ();
3002 /* Tries to count the number of iterations of LOOP till it exits by EXIT
3003 by brute force -- i.e. by determining the value of the operands of the
3004 condition at EXIT in first few iterations of the loop (assuming that
3005 these values are constant) and determining the first one in that the
3006 condition is not satisfied. Returns the constant giving the number
3007 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
3009 tree
3010 loop_niter_by_eval (class loop *loop, edge exit)
3012 tree acnd;
3013 tree op[2], val[2], next[2], aval[2];
3014 gphi *phi;
3015 gimple *cond;
3016 unsigned i, j;
3017 enum tree_code cmp;
3019 cond = last_stmt (exit->src);
3020 if (!cond || gimple_code (cond) != GIMPLE_COND)
3021 return chrec_dont_know;
3023 cmp = gimple_cond_code (cond);
3024 if (exit->flags & EDGE_TRUE_VALUE)
3025 cmp = invert_tree_comparison (cmp, false);
3027 switch (cmp)
3029 case EQ_EXPR:
3030 case NE_EXPR:
3031 case GT_EXPR:
3032 case GE_EXPR:
3033 case LT_EXPR:
3034 case LE_EXPR:
3035 op[0] = gimple_cond_lhs (cond);
3036 op[1] = gimple_cond_rhs (cond);
3037 break;
3039 default:
3040 return chrec_dont_know;
3043 for (j = 0; j < 2; j++)
3045 if (is_gimple_min_invariant (op[j]))
3047 val[j] = op[j];
3048 next[j] = NULL_TREE;
3049 op[j] = NULL_TREE;
3051 else
3053 phi = get_base_for (loop, op[j]);
3054 if (!phi)
3055 return chrec_dont_know;
3056 val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
3057 next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
3061 /* Don't issue signed overflow warnings. */
3062 fold_defer_overflow_warnings ();
3064 for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
3066 for (j = 0; j < 2; j++)
3067 aval[j] = get_val_for (op[j], val[j]);
3069 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
3070 if (acnd && integer_zerop (acnd))
3072 fold_undefer_and_ignore_overflow_warnings ();
3073 if (dump_file && (dump_flags & TDF_DETAILS))
3074 fprintf (dump_file,
3075 "Proved that loop %d iterates %d times using brute force.\n",
3076 loop->num, i);
3077 return build_int_cst (unsigned_type_node, i);
3080 for (j = 0; j < 2; j++)
3082 aval[j] = val[j];
3083 val[j] = get_val_for (next[j], val[j]);
3084 if (!is_gimple_min_invariant (val[j]))
3086 fold_undefer_and_ignore_overflow_warnings ();
3087 return chrec_dont_know;
3091 /* If the next iteration would use the same base values
3092 as the current one, there is no point looping further,
3093 all following iterations will be the same as this one. */
3094 if (val[0] == aval[0] && val[1] == aval[1])
3095 break;
3098 fold_undefer_and_ignore_overflow_warnings ();
3100 return chrec_dont_know;
3103 /* Finds the exit of the LOOP by that the loop exits after a constant
3104 number of iterations and stores the exit edge to *EXIT. The constant
3105 giving the number of iterations of LOOP is returned. The number of
3106 iterations is determined using loop_niter_by_eval (i.e. by brute force
3107 evaluation). If we are unable to find the exit for that loop_niter_by_eval
3108 determines the number of iterations, chrec_dont_know is returned. */
3110 tree
3111 find_loop_niter_by_eval (class loop *loop, edge *exit)
3113 unsigned i;
3114 vec<edge> exits = get_loop_exit_edges (loop);
3115 edge ex;
3116 tree niter = NULL_TREE, aniter;
3118 *exit = NULL;
3120 /* Loops with multiple exits are expensive to handle and less important. */
3121 if (!flag_expensive_optimizations
3122 && exits.length () > 1)
3124 exits.release ();
3125 return chrec_dont_know;
3128 FOR_EACH_VEC_ELT (exits, i, ex)
3130 if (!just_once_each_iteration_p (loop, ex->src))
3131 continue;
3133 aniter = loop_niter_by_eval (loop, ex);
3134 if (chrec_contains_undetermined (aniter))
3135 continue;
3137 if (niter
3138 && !tree_int_cst_lt (aniter, niter))
3139 continue;
3141 niter = aniter;
3142 *exit = ex;
3144 exits.release ();
3146 return niter ? niter : chrec_dont_know;
3151 Analysis of upper bounds on number of iterations of a loop.
3155 static widest_int derive_constant_upper_bound_ops (tree, tree,
3156 enum tree_code, tree);
3158 /* Returns a constant upper bound on the value of the right-hand side of
3159 an assignment statement STMT. */
3161 static widest_int
3162 derive_constant_upper_bound_assign (gimple *stmt)
3164 enum tree_code code = gimple_assign_rhs_code (stmt);
3165 tree op0 = gimple_assign_rhs1 (stmt);
3166 tree op1 = gimple_assign_rhs2 (stmt);
3168 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
3169 op0, code, op1);
3172 /* Returns a constant upper bound on the value of expression VAL. VAL
3173 is considered to be unsigned. If its type is signed, its value must
3174 be nonnegative. */
3176 static widest_int
3177 derive_constant_upper_bound (tree val)
3179 enum tree_code code;
3180 tree op0, op1, op2;
3182 extract_ops_from_tree (val, &code, &op0, &op1, &op2);
3183 return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
3186 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
3187 whose type is TYPE. The expression is considered to be unsigned. If
3188 its type is signed, its value must be nonnegative. */
3190 static widest_int
3191 derive_constant_upper_bound_ops (tree type, tree op0,
3192 enum tree_code code, tree op1)
3194 tree subtype, maxt;
3195 widest_int bnd, max, cst;
3196 gimple *stmt;
3198 if (INTEGRAL_TYPE_P (type))
3199 maxt = TYPE_MAX_VALUE (type);
3200 else
3201 maxt = upper_bound_in_type (type, type);
3203 max = wi::to_widest (maxt);
3205 switch (code)
3207 case INTEGER_CST:
3208 return wi::to_widest (op0);
3210 CASE_CONVERT:
3211 subtype = TREE_TYPE (op0);
3212 if (!TYPE_UNSIGNED (subtype)
3213 /* If TYPE is also signed, the fact that VAL is nonnegative implies
3214 that OP0 is nonnegative. */
3215 && TYPE_UNSIGNED (type)
3216 && !tree_expr_nonnegative_p (op0))
3218 /* If we cannot prove that the casted expression is nonnegative,
3219 we cannot establish more useful upper bound than the precision
3220 of the type gives us. */
3221 return max;
3224 /* We now know that op0 is an nonnegative value. Try deriving an upper
3225 bound for it. */
3226 bnd = derive_constant_upper_bound (op0);
3228 /* If the bound does not fit in TYPE, max. value of TYPE could be
3229 attained. */
3230 if (wi::ltu_p (max, bnd))
3231 return max;
3233 return bnd;
3235 case PLUS_EXPR:
3236 case POINTER_PLUS_EXPR:
3237 case MINUS_EXPR:
3238 if (TREE_CODE (op1) != INTEGER_CST
3239 || !tree_expr_nonnegative_p (op0))
3240 return max;
3242 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
3243 choose the most logical way how to treat this constant regardless
3244 of the signedness of the type. */
3245 cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type));
3246 if (code != MINUS_EXPR)
3247 cst = -cst;
3249 bnd = derive_constant_upper_bound (op0);
3251 if (wi::neg_p (cst))
3253 cst = -cst;
3254 /* Avoid CST == 0x80000... */
3255 if (wi::neg_p (cst))
3256 return max;
3258 /* OP0 + CST. We need to check that
3259 BND <= MAX (type) - CST. */
3261 widest_int mmax = max - cst;
3262 if (wi::leu_p (bnd, mmax))
3263 return max;
3265 return bnd + cst;
3267 else
3269 /* OP0 - CST, where CST >= 0.
3271 If TYPE is signed, we have already verified that OP0 >= 0, and we
3272 know that the result is nonnegative. This implies that
3273 VAL <= BND - CST.
3275 If TYPE is unsigned, we must additionally know that OP0 >= CST,
3276 otherwise the operation underflows.
3279 /* This should only happen if the type is unsigned; however, for
3280 buggy programs that use overflowing signed arithmetics even with
3281 -fno-wrapv, this condition may also be true for signed values. */
3282 if (wi::ltu_p (bnd, cst))
3283 return max;
3285 if (TYPE_UNSIGNED (type))
3287 tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
3288 wide_int_to_tree (type, cst));
3289 if (!tem || integer_nonzerop (tem))
3290 return max;
3293 bnd -= cst;
3296 return bnd;
3298 case FLOOR_DIV_EXPR:
3299 case EXACT_DIV_EXPR:
3300 if (TREE_CODE (op1) != INTEGER_CST
3301 || tree_int_cst_sign_bit (op1))
3302 return max;
3304 bnd = derive_constant_upper_bound (op0);
3305 return wi::udiv_floor (bnd, wi::to_widest (op1));
3307 case BIT_AND_EXPR:
3308 if (TREE_CODE (op1) != INTEGER_CST
3309 || tree_int_cst_sign_bit (op1))
3310 return max;
3311 return wi::to_widest (op1);
3313 case SSA_NAME:
3314 stmt = SSA_NAME_DEF_STMT (op0);
3315 if (gimple_code (stmt) != GIMPLE_ASSIGN
3316 || gimple_assign_lhs (stmt) != op0)
3317 return max;
3318 return derive_constant_upper_bound_assign (stmt);
3320 default:
3321 return max;
3325 /* Emit a -Waggressive-loop-optimizations warning if needed. */
3327 static void
3328 do_warn_aggressive_loop_optimizations (class loop *loop,
3329 widest_int i_bound, gimple *stmt)
3331 /* Don't warn if the loop doesn't have known constant bound. */
3332 if (!loop->nb_iterations
3333 || TREE_CODE (loop->nb_iterations) != INTEGER_CST
3334 || !warn_aggressive_loop_optimizations
3335 /* To avoid warning multiple times for the same loop,
3336 only start warning when we preserve loops. */
3337 || (cfun->curr_properties & PROP_loops) == 0
3338 /* Only warn once per loop. */
3339 || loop->warned_aggressive_loop_optimizations
3340 /* Only warn if undefined behavior gives us lower estimate than the
3341 known constant bound. */
3342 || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
3343 /* And undefined behavior happens unconditionally. */
3344 || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
3345 return;
3347 edge e = single_exit (loop);
3348 if (e == NULL)
3349 return;
3351 gimple *estmt = last_stmt (e->src);
3352 char buf[WIDE_INT_PRINT_BUFFER_SIZE];
3353 print_dec (i_bound, buf, TYPE_UNSIGNED (TREE_TYPE (loop->nb_iterations))
3354 ? UNSIGNED : SIGNED);
3355 auto_diagnostic_group d;
3356 if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
3357 "iteration %s invokes undefined behavior", buf))
3358 inform (gimple_location (estmt), "within this loop");
3359 loop->warned_aggressive_loop_optimizations = true;
3362 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
3363 is true if the loop is exited immediately after STMT, and this exit
3364 is taken at last when the STMT is executed BOUND + 1 times.
3365 REALISTIC is true if BOUND is expected to be close to the real number
3366 of iterations. UPPER is true if we are sure the loop iterates at most
3367 BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
3369 static void
3370 record_estimate (class loop *loop, tree bound, const widest_int &i_bound,
3371 gimple *at_stmt, bool is_exit, bool realistic, bool upper)
3373 widest_int delta;
3375 if (dump_file && (dump_flags & TDF_DETAILS))
3377 fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
3378 print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
3379 fprintf (dump_file, " is %sexecuted at most ",
3380 upper ? "" : "probably ");
3381 print_generic_expr (dump_file, bound, TDF_SLIM);
3382 fprintf (dump_file, " (bounded by ");
3383 print_decu (i_bound, dump_file);
3384 fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
3387 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
3388 real number of iterations. */
3389 if (TREE_CODE (bound) != INTEGER_CST)
3390 realistic = false;
3391 else
3392 gcc_checking_assert (i_bound == wi::to_widest (bound));
3394 /* If we have a guaranteed upper bound, record it in the appropriate
3395 list, unless this is an !is_exit bound (i.e. undefined behavior in
3396 at_stmt) in a loop with known constant number of iterations. */
3397 if (upper
3398 && (is_exit
3399 || loop->nb_iterations == NULL_TREE
3400 || TREE_CODE (loop->nb_iterations) != INTEGER_CST))
3402 class nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
3404 elt->bound = i_bound;
3405 elt->stmt = at_stmt;
3406 elt->is_exit = is_exit;
3407 elt->next = loop->bounds;
3408 loop->bounds = elt;
3411 /* If statement is executed on every path to the loop latch, we can directly
3412 infer the upper bound on the # of iterations of the loop. */
3413 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (at_stmt)))
3414 upper = false;
3416 /* Update the number of iteration estimates according to the bound.
3417 If at_stmt is an exit then the loop latch is executed at most BOUND times,
3418 otherwise it can be executed BOUND + 1 times. We will lower the estimate
3419 later if such statement must be executed on last iteration */
3420 if (is_exit)
3421 delta = 0;
3422 else
3423 delta = 1;
3424 widest_int new_i_bound = i_bound + delta;
3426 /* If an overflow occurred, ignore the result. */
3427 if (wi::ltu_p (new_i_bound, delta))
3428 return;
3430 if (upper && !is_exit)
3431 do_warn_aggressive_loop_optimizations (loop, new_i_bound, at_stmt);
3432 record_niter_bound (loop, new_i_bound, realistic, upper);
3435 /* Records the control iv analyzed in NITER for LOOP if the iv is valid
3436 and doesn't overflow. */
3438 static void
3439 record_control_iv (class loop *loop, class tree_niter_desc *niter)
3441 struct control_iv *iv;
3443 if (!niter->control.base || !niter->control.step)
3444 return;
3446 if (!integer_onep (niter->assumptions) || !niter->control.no_overflow)
3447 return;
3449 iv = ggc_alloc<control_iv> ();
3450 iv->base = niter->control.base;
3451 iv->step = niter->control.step;
3452 iv->next = loop->control_ivs;
3453 loop->control_ivs = iv;
3455 return;
3458 /* This function returns TRUE if below conditions are satisfied:
3459 1) VAR is SSA variable.
3460 2) VAR is an IV:{base, step} in its defining loop.
3461 3) IV doesn't overflow.
3462 4) Both base and step are integer constants.
3463 5) Base is the MIN/MAX value depends on IS_MIN.
3464 Store value of base to INIT correspondingly. */
3466 static bool
3467 get_cst_init_from_scev (tree var, wide_int *init, bool is_min)
3469 if (TREE_CODE (var) != SSA_NAME)
3470 return false;
3472 gimple *def_stmt = SSA_NAME_DEF_STMT (var);
3473 class loop *loop = loop_containing_stmt (def_stmt);
3475 if (loop == NULL)
3476 return false;
3478 affine_iv iv;
3479 if (!simple_iv (loop, loop, var, &iv, false))
3480 return false;
3482 if (!iv.no_overflow)
3483 return false;
3485 if (TREE_CODE (iv.base) != INTEGER_CST || TREE_CODE (iv.step) != INTEGER_CST)
3486 return false;
3488 if (is_min == tree_int_cst_sign_bit (iv.step))
3489 return false;
3491 *init = wi::to_wide (iv.base);
3492 return true;
3495 /* Record the estimate on number of iterations of LOOP based on the fact that
3496 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
3497 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
3498 estimated number of iterations is expected to be close to the real one.
3499 UPPER is true if we are sure the induction variable does not wrap. */
3501 static void
3502 record_nonwrapping_iv (class loop *loop, tree base, tree step, gimple *stmt,
3503 tree low, tree high, bool realistic, bool upper)
3505 tree niter_bound, extreme, delta;
3506 tree type = TREE_TYPE (base), unsigned_type;
3507 tree orig_base = base;
3509 if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
3510 return;
3512 if (dump_file && (dump_flags & TDF_DETAILS))
3514 fprintf (dump_file, "Induction variable (");
3515 print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
3516 fprintf (dump_file, ") ");
3517 print_generic_expr (dump_file, base, TDF_SLIM);
3518 fprintf (dump_file, " + ");
3519 print_generic_expr (dump_file, step, TDF_SLIM);
3520 fprintf (dump_file, " * iteration does not wrap in statement ");
3521 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
3522 fprintf (dump_file, " in loop %d.\n", loop->num);
3525 unsigned_type = unsigned_type_for (type);
3526 base = fold_convert (unsigned_type, base);
3527 step = fold_convert (unsigned_type, step);
3529 if (tree_int_cst_sign_bit (step))
3531 wide_int min, max;
3532 extreme = fold_convert (unsigned_type, low);
3533 if (TREE_CODE (orig_base) == SSA_NAME
3534 && TREE_CODE (high) == INTEGER_CST
3535 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3536 && (get_range_info (orig_base, &min, &max) == VR_RANGE
3537 || get_cst_init_from_scev (orig_base, &max, false))
3538 && wi::gts_p (wi::to_wide (high), max))
3539 base = wide_int_to_tree (unsigned_type, max);
3540 else if (TREE_CODE (base) != INTEGER_CST
3541 && dominated_by_p (CDI_DOMINATORS,
3542 loop->latch, gimple_bb (stmt)))
3543 base = fold_convert (unsigned_type, high);
3544 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3545 step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
3547 else
3549 wide_int min, max;
3550 extreme = fold_convert (unsigned_type, high);
3551 if (TREE_CODE (orig_base) == SSA_NAME
3552 && TREE_CODE (low) == INTEGER_CST
3553 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3554 && (get_range_info (orig_base, &min, &max) == VR_RANGE
3555 || get_cst_init_from_scev (orig_base, &min, true))
3556 && wi::gts_p (min, wi::to_wide (low)))
3557 base = wide_int_to_tree (unsigned_type, min);
3558 else if (TREE_CODE (base) != INTEGER_CST
3559 && dominated_by_p (CDI_DOMINATORS,
3560 loop->latch, gimple_bb (stmt)))
3561 base = fold_convert (unsigned_type, low);
3562 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3565 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
3566 would get out of the range. */
3567 niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
3568 widest_int max = derive_constant_upper_bound (niter_bound);
3569 record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
3572 /* Determine information about number of iterations a LOOP from the index
3573 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
3574 guaranteed to be executed in every iteration of LOOP. Callback for
3575 for_each_index. */
3577 struct ilb_data
3579 class loop *loop;
3580 gimple *stmt;
3583 static bool
3584 idx_infer_loop_bounds (tree base, tree *idx, void *dta)
3586 struct ilb_data *data = (struct ilb_data *) dta;
3587 tree ev, init, step;
3588 tree low, high, type, next;
3589 bool sign, upper = true, at_end = false;
3590 class loop *loop = data->loop;
3592 if (TREE_CODE (base) != ARRAY_REF)
3593 return true;
3595 /* For arrays at the end of the structure, we are not guaranteed that they
3596 do not really extend over their declared size. However, for arrays of
3597 size greater than one, this is unlikely to be intended. */
3598 if (array_at_struct_end_p (base))
3600 at_end = true;
3601 upper = false;
3604 class loop *dloop = loop_containing_stmt (data->stmt);
3605 if (!dloop)
3606 return true;
3608 ev = analyze_scalar_evolution (dloop, *idx);
3609 ev = instantiate_parameters (loop, ev);
3610 init = initial_condition (ev);
3611 step = evolution_part_in_loop_num (ev, loop->num);
3613 if (!init
3614 || !step
3615 || TREE_CODE (step) != INTEGER_CST
3616 || integer_zerop (step)
3617 || tree_contains_chrecs (init, NULL)
3618 || chrec_contains_symbols_defined_in_loop (init, loop->num))
3619 return true;
3621 low = array_ref_low_bound (base);
3622 high = array_ref_up_bound (base);
3624 /* The case of nonconstant bounds could be handled, but it would be
3625 complicated. */
3626 if (TREE_CODE (low) != INTEGER_CST
3627 || !high
3628 || TREE_CODE (high) != INTEGER_CST)
3629 return true;
3630 sign = tree_int_cst_sign_bit (step);
3631 type = TREE_TYPE (step);
3633 /* The array of length 1 at the end of a structure most likely extends
3634 beyond its bounds. */
3635 if (at_end
3636 && operand_equal_p (low, high, 0))
3637 return true;
3639 /* In case the relevant bound of the array does not fit in type, or
3640 it does, but bound + step (in type) still belongs into the range of the
3641 array, the index may wrap and still stay within the range of the array
3642 (consider e.g. if the array is indexed by the full range of
3643 unsigned char).
3645 To make things simpler, we require both bounds to fit into type, although
3646 there are cases where this would not be strictly necessary. */
3647 if (!int_fits_type_p (high, type)
3648 || !int_fits_type_p (low, type))
3649 return true;
3650 low = fold_convert (type, low);
3651 high = fold_convert (type, high);
3653 if (sign)
3654 next = fold_binary (PLUS_EXPR, type, low, step);
3655 else
3656 next = fold_binary (PLUS_EXPR, type, high, step);
3658 if (tree_int_cst_compare (low, next) <= 0
3659 && tree_int_cst_compare (next, high) <= 0)
3660 return true;
3662 /* If access is not executed on every iteration, we must ensure that overlow
3663 may not make the access valid later. */
3664 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (data->stmt))
3665 && scev_probably_wraps_p (NULL_TREE,
3666 initial_condition_in_loop_num (ev, loop->num),
3667 step, data->stmt, loop, true))
3668 upper = false;
3670 record_nonwrapping_iv (loop, init, step, data->stmt, low, high, false, upper);
3671 return true;
3674 /* Determine information about number of iterations a LOOP from the bounds
3675 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
3676 STMT is guaranteed to be executed in every iteration of LOOP.*/
3678 static void
3679 infer_loop_bounds_from_ref (class loop *loop, gimple *stmt, tree ref)
3681 struct ilb_data data;
3683 data.loop = loop;
3684 data.stmt = stmt;
3685 for_each_index (&ref, idx_infer_loop_bounds, &data);
3688 /* Determine information about number of iterations of a LOOP from the way
3689 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
3690 executed in every iteration of LOOP. */
3692 static void
3693 infer_loop_bounds_from_array (class loop *loop, gimple *stmt)
3695 if (is_gimple_assign (stmt))
3697 tree op0 = gimple_assign_lhs (stmt);
3698 tree op1 = gimple_assign_rhs1 (stmt);
3700 /* For each memory access, analyze its access function
3701 and record a bound on the loop iteration domain. */
3702 if (REFERENCE_CLASS_P (op0))
3703 infer_loop_bounds_from_ref (loop, stmt, op0);
3705 if (REFERENCE_CLASS_P (op1))
3706 infer_loop_bounds_from_ref (loop, stmt, op1);
3708 else if (is_gimple_call (stmt))
3710 tree arg, lhs;
3711 unsigned i, n = gimple_call_num_args (stmt);
3713 lhs = gimple_call_lhs (stmt);
3714 if (lhs && REFERENCE_CLASS_P (lhs))
3715 infer_loop_bounds_from_ref (loop, stmt, lhs);
3717 for (i = 0; i < n; i++)
3719 arg = gimple_call_arg (stmt, i);
3720 if (REFERENCE_CLASS_P (arg))
3721 infer_loop_bounds_from_ref (loop, stmt, arg);
3726 /* Determine information about number of iterations of a LOOP from the fact
3727 that pointer arithmetics in STMT does not overflow. */
3729 static void
3730 infer_loop_bounds_from_pointer_arith (class loop *loop, gimple *stmt)
3732 tree def, base, step, scev, type, low, high;
3733 tree var, ptr;
3735 if (!is_gimple_assign (stmt)
3736 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
3737 return;
3739 def = gimple_assign_lhs (stmt);
3740 if (TREE_CODE (def) != SSA_NAME)
3741 return;
3743 type = TREE_TYPE (def);
3744 if (!nowrap_type_p (type))
3745 return;
3747 ptr = gimple_assign_rhs1 (stmt);
3748 if (!expr_invariant_in_loop_p (loop, ptr))
3749 return;
3751 var = gimple_assign_rhs2 (stmt);
3752 if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
3753 return;
3755 class loop *uloop = loop_containing_stmt (stmt);
3756 scev = instantiate_parameters (loop, analyze_scalar_evolution (uloop, def));
3757 if (chrec_contains_undetermined (scev))
3758 return;
3760 base = initial_condition_in_loop_num (scev, loop->num);
3761 step = evolution_part_in_loop_num (scev, loop->num);
3763 if (!base || !step
3764 || TREE_CODE (step) != INTEGER_CST
3765 || tree_contains_chrecs (base, NULL)
3766 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3767 return;
3769 low = lower_bound_in_type (type, type);
3770 high = upper_bound_in_type (type, type);
3772 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
3773 produce a NULL pointer. The contrary would mean NULL points to an object,
3774 while NULL is supposed to compare unequal with the address of all objects.
3775 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
3776 NULL pointer since that would mean wrapping, which we assume here not to
3777 happen. So, we can exclude NULL from the valid range of pointer
3778 arithmetic. */
3779 if (flag_delete_null_pointer_checks && int_cst_value (low) == 0)
3780 low = build_int_cstu (TREE_TYPE (low), TYPE_ALIGN_UNIT (TREE_TYPE (type)));
3782 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3785 /* Determine information about number of iterations of a LOOP from the fact
3786 that signed arithmetics in STMT does not overflow. */
3788 static void
3789 infer_loop_bounds_from_signedness (class loop *loop, gimple *stmt)
3791 tree def, base, step, scev, type, low, high;
3793 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3794 return;
3796 def = gimple_assign_lhs (stmt);
3798 if (TREE_CODE (def) != SSA_NAME)
3799 return;
3801 type = TREE_TYPE (def);
3802 if (!INTEGRAL_TYPE_P (type)
3803 || !TYPE_OVERFLOW_UNDEFINED (type))
3804 return;
3806 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3807 if (chrec_contains_undetermined (scev))
3808 return;
3810 base = initial_condition_in_loop_num (scev, loop->num);
3811 step = evolution_part_in_loop_num (scev, loop->num);
3813 if (!base || !step
3814 || TREE_CODE (step) != INTEGER_CST
3815 || tree_contains_chrecs (base, NULL)
3816 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3817 return;
3819 low = lower_bound_in_type (type, type);
3820 high = upper_bound_in_type (type, type);
3821 wide_int minv, maxv;
3822 if (get_range_info (def, &minv, &maxv) == VR_RANGE)
3824 low = wide_int_to_tree (type, minv);
3825 high = wide_int_to_tree (type, maxv);
3828 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3831 /* The following analyzers are extracting informations on the bounds
3832 of LOOP from the following undefined behaviors:
3834 - data references should not access elements over the statically
3835 allocated size,
3837 - signed variables should not overflow when flag_wrapv is not set.
3840 static void
3841 infer_loop_bounds_from_undefined (class loop *loop)
3843 unsigned i;
3844 basic_block *bbs;
3845 gimple_stmt_iterator bsi;
3846 basic_block bb;
3847 bool reliable;
3849 bbs = get_loop_body (loop);
3851 for (i = 0; i < loop->num_nodes; i++)
3853 bb = bbs[i];
3855 /* If BB is not executed in each iteration of the loop, we cannot
3856 use the operations in it to infer reliable upper bound on the
3857 # of iterations of the loop. However, we can use it as a guess.
3858 Reliable guesses come only from array bounds. */
3859 reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
3861 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
3863 gimple *stmt = gsi_stmt (bsi);
3865 infer_loop_bounds_from_array (loop, stmt);
3867 if (reliable)
3869 infer_loop_bounds_from_signedness (loop, stmt);
3870 infer_loop_bounds_from_pointer_arith (loop, stmt);
3876 free (bbs);
3879 /* Compare wide ints, callback for qsort. */
3881 static int
3882 wide_int_cmp (const void *p1, const void *p2)
3884 const widest_int *d1 = (const widest_int *) p1;
3885 const widest_int *d2 = (const widest_int *) p2;
3886 return wi::cmpu (*d1, *d2);
3889 /* Return index of BOUND in BOUNDS array sorted in increasing order.
3890 Lookup by binary search. */
3892 static int
3893 bound_index (vec<widest_int> bounds, const widest_int &bound)
3895 unsigned int end = bounds.length ();
3896 unsigned int begin = 0;
3898 /* Find a matching index by means of a binary search. */
3899 while (begin != end)
3901 unsigned int middle = (begin + end) / 2;
3902 widest_int index = bounds[middle];
3904 if (index == bound)
3905 return middle;
3906 else if (wi::ltu_p (index, bound))
3907 begin = middle + 1;
3908 else
3909 end = middle;
3911 gcc_unreachable ();
3914 /* We recorded loop bounds only for statements dominating loop latch (and thus
3915 executed each loop iteration). If there are any bounds on statements not
3916 dominating the loop latch we can improve the estimate by walking the loop
3917 body and seeing if every path from loop header to loop latch contains
3918 some bounded statement. */
3920 static void
3921 discover_iteration_bound_by_body_walk (class loop *loop)
3923 class nb_iter_bound *elt;
3924 auto_vec<widest_int> bounds;
3925 vec<vec<basic_block> > queues = vNULL;
3926 vec<basic_block> queue = vNULL;
3927 ptrdiff_t queue_index;
3928 ptrdiff_t latch_index = 0;
3930 /* Discover what bounds may interest us. */
3931 for (elt = loop->bounds; elt; elt = elt->next)
3933 widest_int bound = elt->bound;
3935 /* Exit terminates loop at given iteration, while non-exits produce undefined
3936 effect on the next iteration. */
3937 if (!elt->is_exit)
3939 bound += 1;
3940 /* If an overflow occurred, ignore the result. */
3941 if (bound == 0)
3942 continue;
3945 if (!loop->any_upper_bound
3946 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3947 bounds.safe_push (bound);
3950 /* Exit early if there is nothing to do. */
3951 if (!bounds.exists ())
3952 return;
3954 if (dump_file && (dump_flags & TDF_DETAILS))
3955 fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n");
3957 /* Sort the bounds in decreasing order. */
3958 bounds.qsort (wide_int_cmp);
3960 /* For every basic block record the lowest bound that is guaranteed to
3961 terminate the loop. */
3963 hash_map<basic_block, ptrdiff_t> bb_bounds;
3964 for (elt = loop->bounds; elt; elt = elt->next)
3966 widest_int bound = elt->bound;
3967 if (!elt->is_exit)
3969 bound += 1;
3970 /* If an overflow occurred, ignore the result. */
3971 if (bound == 0)
3972 continue;
3975 if (!loop->any_upper_bound
3976 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3978 ptrdiff_t index = bound_index (bounds, bound);
3979 ptrdiff_t *entry = bb_bounds.get (gimple_bb (elt->stmt));
3980 if (!entry)
3981 bb_bounds.put (gimple_bb (elt->stmt), index);
3982 else if ((ptrdiff_t)*entry > index)
3983 *entry = index;
3987 hash_map<basic_block, ptrdiff_t> block_priority;
3989 /* Perform shortest path discovery loop->header ... loop->latch.
3991 The "distance" is given by the smallest loop bound of basic block
3992 present in the path and we look for path with largest smallest bound
3993 on it.
3995 To avoid the need for fibonacci heap on double ints we simply compress
3996 double ints into indexes to BOUNDS array and then represent the queue
3997 as arrays of queues for every index.
3998 Index of BOUNDS.length() means that the execution of given BB has
3999 no bounds determined.
4001 VISITED is a pointer map translating basic block into smallest index
4002 it was inserted into the priority queue with. */
4003 latch_index = -1;
4005 /* Start walk in loop header with index set to infinite bound. */
4006 queue_index = bounds.length ();
4007 queues.safe_grow_cleared (queue_index + 1);
4008 queue.safe_push (loop->header);
4009 queues[queue_index] = queue;
4010 block_priority.put (loop->header, queue_index);
4012 for (; queue_index >= 0; queue_index--)
4014 if (latch_index < queue_index)
4016 while (queues[queue_index].length ())
4018 basic_block bb;
4019 ptrdiff_t bound_index = queue_index;
4020 edge e;
4021 edge_iterator ei;
4023 queue = queues[queue_index];
4024 bb = queue.pop ();
4026 /* OK, we later inserted the BB with lower priority, skip it. */
4027 if (*block_priority.get (bb) > queue_index)
4028 continue;
4030 /* See if we can improve the bound. */
4031 ptrdiff_t *entry = bb_bounds.get (bb);
4032 if (entry && *entry < bound_index)
4033 bound_index = *entry;
4035 /* Insert succesors into the queue, watch for latch edge
4036 and record greatest index we saw. */
4037 FOR_EACH_EDGE (e, ei, bb->succs)
4039 bool insert = false;
4041 if (loop_exit_edge_p (loop, e))
4042 continue;
4044 if (e == loop_latch_edge (loop)
4045 && latch_index < bound_index)
4046 latch_index = bound_index;
4047 else if (!(entry = block_priority.get (e->dest)))
4049 insert = true;
4050 block_priority.put (e->dest, bound_index);
4052 else if (*entry < bound_index)
4054 insert = true;
4055 *entry = bound_index;
4058 if (insert)
4059 queues[bound_index].safe_push (e->dest);
4063 queues[queue_index].release ();
4066 gcc_assert (latch_index >= 0);
4067 if ((unsigned)latch_index < bounds.length ())
4069 if (dump_file && (dump_flags & TDF_DETAILS))
4071 fprintf (dump_file, "Found better loop bound ");
4072 print_decu (bounds[latch_index], dump_file);
4073 fprintf (dump_file, "\n");
4075 record_niter_bound (loop, bounds[latch_index], false, true);
4078 queues.release ();
4081 /* See if every path cross the loop goes through a statement that is known
4082 to not execute at the last iteration. In that case we can decrese iteration
4083 count by 1. */
4085 static void
4086 maybe_lower_iteration_bound (class loop *loop)
4088 hash_set<gimple *> *not_executed_last_iteration = NULL;
4089 class nb_iter_bound *elt;
4090 bool found_exit = false;
4091 auto_vec<basic_block> queue;
4092 bitmap visited;
4094 /* Collect all statements with interesting (i.e. lower than
4095 nb_iterations_upper_bound) bound on them.
4097 TODO: Due to the way record_estimate choose estimates to store, the bounds
4098 will be always nb_iterations_upper_bound-1. We can change this to record
4099 also statements not dominating the loop latch and update the walk bellow
4100 to the shortest path algorithm. */
4101 for (elt = loop->bounds; elt; elt = elt->next)
4103 if (!elt->is_exit
4104 && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound))
4106 if (!not_executed_last_iteration)
4107 not_executed_last_iteration = new hash_set<gimple *>;
4108 not_executed_last_iteration->add (elt->stmt);
4111 if (!not_executed_last_iteration)
4112 return;
4114 /* Start DFS walk in the loop header and see if we can reach the
4115 loop latch or any of the exits (including statements with side
4116 effects that may terminate the loop otherwise) without visiting
4117 any of the statements known to have undefined effect on the last
4118 iteration. */
4119 queue.safe_push (loop->header);
4120 visited = BITMAP_ALLOC (NULL);
4121 bitmap_set_bit (visited, loop->header->index);
4122 found_exit = false;
4126 basic_block bb = queue.pop ();
4127 gimple_stmt_iterator gsi;
4128 bool stmt_found = false;
4130 /* Loop for possible exits and statements bounding the execution. */
4131 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4133 gimple *stmt = gsi_stmt (gsi);
4134 if (not_executed_last_iteration->contains (stmt))
4136 stmt_found = true;
4137 break;
4139 if (gimple_has_side_effects (stmt))
4141 found_exit = true;
4142 break;
4145 if (found_exit)
4146 break;
4148 /* If no bounding statement is found, continue the walk. */
4149 if (!stmt_found)
4151 edge e;
4152 edge_iterator ei;
4154 FOR_EACH_EDGE (e, ei, bb->succs)
4156 if (loop_exit_edge_p (loop, e)
4157 || e == loop_latch_edge (loop))
4159 found_exit = true;
4160 break;
4162 if (bitmap_set_bit (visited, e->dest->index))
4163 queue.safe_push (e->dest);
4167 while (queue.length () && !found_exit);
4169 /* If every path through the loop reach bounding statement before exit,
4170 then we know the last iteration of the loop will have undefined effect
4171 and we can decrease number of iterations. */
4173 if (!found_exit)
4175 if (dump_file && (dump_flags & TDF_DETAILS))
4176 fprintf (dump_file, "Reducing loop iteration estimate by 1; "
4177 "undefined statement must be executed at the last iteration.\n");
4178 record_niter_bound (loop, loop->nb_iterations_upper_bound - 1,
4179 false, true);
4182 BITMAP_FREE (visited);
4183 delete not_executed_last_iteration;
4186 /* Get expected upper bound for number of loop iterations for
4187 BUILT_IN_EXPECT_WITH_PROBABILITY for a condition COND. */
4189 static tree
4190 get_upper_bound_based_on_builtin_expr_with_prob (gcond *cond)
4192 if (cond == NULL)
4193 return NULL_TREE;
4195 tree lhs = gimple_cond_lhs (cond);
4196 if (TREE_CODE (lhs) != SSA_NAME)
4197 return NULL_TREE;
4199 gimple *stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
4200 gcall *def = dyn_cast<gcall *> (stmt);
4201 if (def == NULL)
4202 return NULL_TREE;
4204 tree decl = gimple_call_fndecl (def);
4205 if (!decl
4206 || !fndecl_built_in_p (decl, BUILT_IN_EXPECT_WITH_PROBABILITY)
4207 || gimple_call_num_args (stmt) != 3)
4208 return NULL_TREE;
4210 tree c = gimple_call_arg (def, 1);
4211 tree condt = TREE_TYPE (lhs);
4212 tree res = fold_build2 (gimple_cond_code (cond),
4213 condt, c,
4214 gimple_cond_rhs (cond));
4215 if (TREE_CODE (res) != INTEGER_CST)
4216 return NULL_TREE;
4219 tree prob = gimple_call_arg (def, 2);
4220 tree t = TREE_TYPE (prob);
4221 tree one
4222 = build_real_from_int_cst (t,
4223 integer_one_node);
4224 if (integer_zerop (res))
4225 prob = fold_build2 (MINUS_EXPR, t, one, prob);
4226 tree r = fold_build2 (RDIV_EXPR, t, one, prob);
4227 if (TREE_CODE (r) != REAL_CST)
4228 return NULL_TREE;
4230 HOST_WIDE_INT probi
4231 = real_to_integer (TREE_REAL_CST_PTR (r));
4232 return build_int_cst (condt, probi);
4235 /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
4236 is true also use estimates derived from undefined behavior. */
4238 void
4239 estimate_numbers_of_iterations (class loop *loop)
4241 vec<edge> exits;
4242 tree niter, type;
4243 unsigned i;
4244 class tree_niter_desc niter_desc;
4245 edge ex;
4246 widest_int bound;
4247 edge likely_exit;
4249 /* Give up if we already have tried to compute an estimation. */
4250 if (loop->estimate_state != EST_NOT_COMPUTED)
4251 return;
4253 loop->estimate_state = EST_AVAILABLE;
4255 /* If we have a measured profile, use it to estimate the number of
4256 iterations. Normally this is recorded by branch_prob right after
4257 reading the profile. In case we however found a new loop, record the
4258 information here.
4260 Explicitly check for profile status so we do not report
4261 wrong prediction hitrates for guessed loop iterations heuristics.
4262 Do not recompute already recorded bounds - we ought to be better on
4263 updating iteration bounds than updating profile in general and thus
4264 recomputing iteration bounds later in the compilation process will just
4265 introduce random roundoff errors. */
4266 if (!loop->any_estimate
4267 && loop->header->count.reliable_p ())
4269 gcov_type nit = expected_loop_iterations_unbounded (loop);
4270 bound = gcov_type_to_wide_int (nit);
4271 record_niter_bound (loop, bound, true, false);
4274 /* Ensure that loop->nb_iterations is computed if possible. If it turns out
4275 to be constant, we avoid undefined behavior implied bounds and instead
4276 diagnose those loops with -Waggressive-loop-optimizations. */
4277 number_of_latch_executions (loop);
4279 exits = get_loop_exit_edges (loop);
4280 likely_exit = single_likely_exit (loop);
4281 FOR_EACH_VEC_ELT (exits, i, ex)
4283 if (ex == likely_exit)
4285 gimple *stmt = last_stmt (ex->src);
4286 if (stmt != NULL)
4288 gcond *cond = dyn_cast<gcond *> (stmt);
4289 tree niter_bound
4290 = get_upper_bound_based_on_builtin_expr_with_prob (cond);
4291 if (niter_bound != NULL_TREE)
4293 widest_int max = derive_constant_upper_bound (niter_bound);
4294 record_estimate (loop, niter_bound, max, cond,
4295 true, true, false);
4300 if (!number_of_iterations_exit (loop, ex, &niter_desc, false, false))
4301 continue;
4303 niter = niter_desc.niter;
4304 type = TREE_TYPE (niter);
4305 if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
4306 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
4307 build_int_cst (type, 0),
4308 niter);
4309 record_estimate (loop, niter, niter_desc.max,
4310 last_stmt (ex->src),
4311 true, ex == likely_exit, true);
4312 record_control_iv (loop, &niter_desc);
4314 exits.release ();
4316 if (flag_aggressive_loop_optimizations)
4317 infer_loop_bounds_from_undefined (loop);
4319 discover_iteration_bound_by_body_walk (loop);
4321 maybe_lower_iteration_bound (loop);
4323 /* If we know the exact number of iterations of this loop, try to
4324 not break code with undefined behavior by not recording smaller
4325 maximum number of iterations. */
4326 if (loop->nb_iterations
4327 && TREE_CODE (loop->nb_iterations) == INTEGER_CST)
4329 loop->any_upper_bound = true;
4330 loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations);
4334 /* Sets NIT to the estimated number of executions of the latch of the
4335 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
4336 large as the number of iterations. If we have no reliable estimate,
4337 the function returns false, otherwise returns true. */
4339 bool
4340 estimated_loop_iterations (class loop *loop, widest_int *nit)
4342 /* When SCEV information is available, try to update loop iterations
4343 estimate. Otherwise just return whatever we recorded earlier. */
4344 if (scev_initialized_p ())
4345 estimate_numbers_of_iterations (loop);
4347 return (get_estimated_loop_iterations (loop, nit));
4350 /* Similar to estimated_loop_iterations, but returns the estimate only
4351 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4352 on the number of iterations of LOOP could not be derived, returns -1. */
4354 HOST_WIDE_INT
4355 estimated_loop_iterations_int (class loop *loop)
4357 widest_int nit;
4358 HOST_WIDE_INT hwi_nit;
4360 if (!estimated_loop_iterations (loop, &nit))
4361 return -1;
4363 if (!wi::fits_shwi_p (nit))
4364 return -1;
4365 hwi_nit = nit.to_shwi ();
4367 return hwi_nit < 0 ? -1 : hwi_nit;
4371 /* Sets NIT to an upper bound for the maximum number of executions of the
4372 latch of the LOOP. If we have no reliable estimate, the function returns
4373 false, otherwise returns true. */
4375 bool
4376 max_loop_iterations (class loop *loop, widest_int *nit)
4378 /* When SCEV information is available, try to update loop iterations
4379 estimate. Otherwise just return whatever we recorded earlier. */
4380 if (scev_initialized_p ())
4381 estimate_numbers_of_iterations (loop);
4383 return get_max_loop_iterations (loop, nit);
4386 /* Similar to max_loop_iterations, but returns the estimate only
4387 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4388 on the number of iterations of LOOP could not be derived, returns -1. */
4390 HOST_WIDE_INT
4391 max_loop_iterations_int (class loop *loop)
4393 widest_int nit;
4394 HOST_WIDE_INT hwi_nit;
4396 if (!max_loop_iterations (loop, &nit))
4397 return -1;
4399 if (!wi::fits_shwi_p (nit))
4400 return -1;
4401 hwi_nit = nit.to_shwi ();
4403 return hwi_nit < 0 ? -1 : hwi_nit;
4406 /* Sets NIT to an likely upper bound for the maximum number of executions of the
4407 latch of the LOOP. If we have no reliable estimate, the function returns
4408 false, otherwise returns true. */
4410 bool
4411 likely_max_loop_iterations (class loop *loop, widest_int *nit)
4413 /* When SCEV information is available, try to update loop iterations
4414 estimate. Otherwise just return whatever we recorded earlier. */
4415 if (scev_initialized_p ())
4416 estimate_numbers_of_iterations (loop);
4418 return get_likely_max_loop_iterations (loop, nit);
4421 /* Similar to max_loop_iterations, but returns the estimate only
4422 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4423 on the number of iterations of LOOP could not be derived, returns -1. */
4425 HOST_WIDE_INT
4426 likely_max_loop_iterations_int (class loop *loop)
4428 widest_int nit;
4429 HOST_WIDE_INT hwi_nit;
4431 if (!likely_max_loop_iterations (loop, &nit))
4432 return -1;
4434 if (!wi::fits_shwi_p (nit))
4435 return -1;
4436 hwi_nit = nit.to_shwi ();
4438 return hwi_nit < 0 ? -1 : hwi_nit;
4441 /* Returns an estimate for the number of executions of statements
4442 in the LOOP. For statements before the loop exit, this exceeds
4443 the number of execution of the latch by one. */
4445 HOST_WIDE_INT
4446 estimated_stmt_executions_int (class loop *loop)
4448 HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
4449 HOST_WIDE_INT snit;
4451 if (nit == -1)
4452 return -1;
4454 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
4456 /* If the computation overflows, return -1. */
4457 return snit < 0 ? -1 : snit;
4460 /* Sets NIT to the maximum number of executions of the latch of the
4461 LOOP, plus one. If we have no reliable estimate, the function returns
4462 false, otherwise returns true. */
4464 bool
4465 max_stmt_executions (class loop *loop, widest_int *nit)
4467 widest_int nit_minus_one;
4469 if (!max_loop_iterations (loop, nit))
4470 return false;
4472 nit_minus_one = *nit;
4474 *nit += 1;
4476 return wi::gtu_p (*nit, nit_minus_one);
4479 /* Sets NIT to the estimated maximum number of executions of the latch of the
4480 LOOP, plus one. If we have no likely estimate, the function returns
4481 false, otherwise returns true. */
4483 bool
4484 likely_max_stmt_executions (class loop *loop, widest_int *nit)
4486 widest_int nit_minus_one;
4488 if (!likely_max_loop_iterations (loop, nit))
4489 return false;
4491 nit_minus_one = *nit;
4493 *nit += 1;
4495 return wi::gtu_p (*nit, nit_minus_one);
4498 /* Sets NIT to the estimated number of executions of the latch of the
4499 LOOP, plus one. If we have no reliable estimate, the function returns
4500 false, otherwise returns true. */
4502 bool
4503 estimated_stmt_executions (class loop *loop, widest_int *nit)
4505 widest_int nit_minus_one;
4507 if (!estimated_loop_iterations (loop, nit))
4508 return false;
4510 nit_minus_one = *nit;
4512 *nit += 1;
4514 return wi::gtu_p (*nit, nit_minus_one);
4517 /* Records estimates on numbers of iterations of loops. */
4519 void
4520 estimate_numbers_of_iterations (function *fn)
4522 class loop *loop;
4524 /* We don't want to issue signed overflow warnings while getting
4525 loop iteration estimates. */
4526 fold_defer_overflow_warnings ();
4528 FOR_EACH_LOOP_FN (fn, loop, 0)
4529 estimate_numbers_of_iterations (loop);
4531 fold_undefer_and_ignore_overflow_warnings ();
4534 /* Returns true if statement S1 dominates statement S2. */
4536 bool
4537 stmt_dominates_stmt_p (gimple *s1, gimple *s2)
4539 basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
4541 if (!bb1
4542 || s1 == s2)
4543 return true;
4545 if (bb1 == bb2)
4547 gimple_stmt_iterator bsi;
4549 if (gimple_code (s2) == GIMPLE_PHI)
4550 return false;
4552 if (gimple_code (s1) == GIMPLE_PHI)
4553 return true;
4555 for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
4556 if (gsi_stmt (bsi) == s1)
4557 return true;
4559 return false;
4562 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
4565 /* Returns true when we can prove that the number of executions of
4566 STMT in the loop is at most NITER, according to the bound on
4567 the number of executions of the statement NITER_BOUND->stmt recorded in
4568 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
4570 ??? This code can become quite a CPU hog - we can have many bounds,
4571 and large basic block forcing stmt_dominates_stmt_p to be queried
4572 many times on a large basic blocks, so the whole thing is O(n^2)
4573 for scev_probably_wraps_p invocation (that can be done n times).
4575 It would make more sense (and give better answers) to remember BB
4576 bounds computed by discover_iteration_bound_by_body_walk. */
4578 static bool
4579 n_of_executions_at_most (gimple *stmt,
4580 class nb_iter_bound *niter_bound,
4581 tree niter)
4583 widest_int bound = niter_bound->bound;
4584 tree nit_type = TREE_TYPE (niter), e;
4585 enum tree_code cmp;
4587 gcc_assert (TYPE_UNSIGNED (nit_type));
4589 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
4590 the number of iterations is small. */
4591 if (!wi::fits_to_tree_p (bound, nit_type))
4592 return false;
4594 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
4595 times. This means that:
4597 -- if NITER_BOUND->is_exit is true, then everything after
4598 it at most NITER_BOUND->bound times.
4600 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
4601 is executed, then NITER_BOUND->stmt is executed as well in the same
4602 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
4604 If we can determine that NITER_BOUND->stmt is always executed
4605 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
4606 We conclude that if both statements belong to the same
4607 basic block and STMT is before NITER_BOUND->stmt and there are no
4608 statements with side effects in between. */
4610 if (niter_bound->is_exit)
4612 if (stmt == niter_bound->stmt
4613 || !stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4614 return false;
4615 cmp = GE_EXPR;
4617 else
4619 if (!stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4621 gimple_stmt_iterator bsi;
4622 if (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
4623 || gimple_code (stmt) == GIMPLE_PHI
4624 || gimple_code (niter_bound->stmt) == GIMPLE_PHI)
4625 return false;
4627 /* By stmt_dominates_stmt_p we already know that STMT appears
4628 before NITER_BOUND->STMT. Still need to test that the loop
4629 cannot be terinated by a side effect in between. */
4630 for (bsi = gsi_for_stmt (stmt); gsi_stmt (bsi) != niter_bound->stmt;
4631 gsi_next (&bsi))
4632 if (gimple_has_side_effects (gsi_stmt (bsi)))
4633 return false;
4634 bound += 1;
4635 if (bound == 0
4636 || !wi::fits_to_tree_p (bound, nit_type))
4637 return false;
4639 cmp = GT_EXPR;
4642 e = fold_binary (cmp, boolean_type_node,
4643 niter, wide_int_to_tree (nit_type, bound));
4644 return e && integer_nonzerop (e);
4647 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
4649 bool
4650 nowrap_type_p (tree type)
4652 if (ANY_INTEGRAL_TYPE_P (type)
4653 && TYPE_OVERFLOW_UNDEFINED (type))
4654 return true;
4656 if (POINTER_TYPE_P (type))
4657 return true;
4659 return false;
4662 /* Return true if we can prove LOOP is exited before evolution of induction
4663 variable {BASE, STEP} overflows with respect to its type bound. */
4665 static bool
4666 loop_exits_before_overflow (tree base, tree step,
4667 gimple *at_stmt, class loop *loop)
4669 widest_int niter;
4670 struct control_iv *civ;
4671 class nb_iter_bound *bound;
4672 tree e, delta, step_abs, unsigned_base;
4673 tree type = TREE_TYPE (step);
4674 tree unsigned_type, valid_niter;
4676 /* Don't issue signed overflow warnings. */
4677 fold_defer_overflow_warnings ();
4679 /* Compute the number of iterations before we reach the bound of the
4680 type, and verify that the loop is exited before this occurs. */
4681 unsigned_type = unsigned_type_for (type);
4682 unsigned_base = fold_convert (unsigned_type, base);
4684 if (tree_int_cst_sign_bit (step))
4686 tree extreme = fold_convert (unsigned_type,
4687 lower_bound_in_type (type, type));
4688 delta = fold_build2 (MINUS_EXPR, unsigned_type, unsigned_base, extreme);
4689 step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
4690 fold_convert (unsigned_type, step));
4692 else
4694 tree extreme = fold_convert (unsigned_type,
4695 upper_bound_in_type (type, type));
4696 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, unsigned_base);
4697 step_abs = fold_convert (unsigned_type, step);
4700 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
4702 estimate_numbers_of_iterations (loop);
4704 if (max_loop_iterations (loop, &niter)
4705 && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter))
4706 && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
4707 wide_int_to_tree (TREE_TYPE (valid_niter),
4708 niter))) != NULL
4709 && integer_nonzerop (e))
4711 fold_undefer_and_ignore_overflow_warnings ();
4712 return true;
4714 if (at_stmt)
4715 for (bound = loop->bounds; bound; bound = bound->next)
4717 if (n_of_executions_at_most (at_stmt, bound, valid_niter))
4719 fold_undefer_and_ignore_overflow_warnings ();
4720 return true;
4723 fold_undefer_and_ignore_overflow_warnings ();
4725 /* Try to prove loop is exited before {base, step} overflows with the
4726 help of analyzed loop control IV. This is done only for IVs with
4727 constant step because otherwise we don't have the information. */
4728 if (TREE_CODE (step) == INTEGER_CST)
4730 for (civ = loop->control_ivs; civ; civ = civ->next)
4732 enum tree_code code;
4733 tree civ_type = TREE_TYPE (civ->step);
4735 /* Have to consider type difference because operand_equal_p ignores
4736 that for constants. */
4737 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (civ_type)
4738 || element_precision (type) != element_precision (civ_type))
4739 continue;
4741 /* Only consider control IV with same step. */
4742 if (!operand_equal_p (step, civ->step, 0))
4743 continue;
4745 /* Done proving if this is a no-overflow control IV. */
4746 if (operand_equal_p (base, civ->base, 0))
4747 return true;
4749 /* Control IV is recorded after expanding simple operations,
4750 Here we expand base and compare it too. */
4751 tree expanded_base = expand_simple_operations (base);
4752 if (operand_equal_p (expanded_base, civ->base, 0))
4753 return true;
4755 /* If this is a before stepping control IV, in other words, we have
4757 {civ_base, step} = {base + step, step}
4759 Because civ {base + step, step} doesn't overflow during loop
4760 iterations, {base, step} will not overflow if we can prove the
4761 operation "base + step" does not overflow. Specifically, we try
4762 to prove below conditions are satisfied:
4764 base <= UPPER_BOUND (type) - step ;;step > 0
4765 base >= LOWER_BOUND (type) - step ;;step < 0
4767 by proving the reverse conditions are false using loop's initial
4768 condition. */
4769 if (POINTER_TYPE_P (TREE_TYPE (base)))
4770 code = POINTER_PLUS_EXPR;
4771 else
4772 code = PLUS_EXPR;
4774 tree stepped = fold_build2 (code, TREE_TYPE (base), base, step);
4775 tree expanded_stepped = fold_build2 (code, TREE_TYPE (base),
4776 expanded_base, step);
4777 if (operand_equal_p (stepped, civ->base, 0)
4778 || operand_equal_p (expanded_stepped, civ->base, 0))
4780 tree extreme;
4782 if (tree_int_cst_sign_bit (step))
4784 code = LT_EXPR;
4785 extreme = lower_bound_in_type (type, type);
4787 else
4789 code = GT_EXPR;
4790 extreme = upper_bound_in_type (type, type);
4792 extreme = fold_build2 (MINUS_EXPR, type, extreme, step);
4793 e = fold_build2 (code, boolean_type_node, base, extreme);
4794 e = simplify_using_initial_conditions (loop, e);
4795 if (integer_zerop (e))
4796 return true;
4801 return false;
4804 /* VAR is scev variable whose evolution part is constant STEP, this function
4805 proves that VAR can't overflow by using value range info. If VAR's value
4806 range is [MIN, MAX], it can be proven by:
4807 MAX + step doesn't overflow ; if step > 0
4809 MIN + step doesn't underflow ; if step < 0.
4811 We can only do this if var is computed in every loop iteration, i.e, var's
4812 definition has to dominate loop latch. Consider below example:
4815 unsigned int i;
4817 <bb 3>:
4819 <bb 4>:
4820 # RANGE [0, 4294967294] NONZERO 65535
4821 # i_21 = PHI <0(3), i_18(9)>
4822 if (i_21 != 0)
4823 goto <bb 6>;
4824 else
4825 goto <bb 8>;
4827 <bb 6>:
4828 # RANGE [0, 65533] NONZERO 65535
4829 _6 = i_21 + 4294967295;
4830 # RANGE [0, 65533] NONZERO 65535
4831 _7 = (long unsigned int) _6;
4832 # RANGE [0, 524264] NONZERO 524280
4833 _8 = _7 * 8;
4834 # PT = nonlocal escaped
4835 _9 = a_14 + _8;
4836 *_9 = 0;
4838 <bb 8>:
4839 # RANGE [1, 65535] NONZERO 65535
4840 i_18 = i_21 + 1;
4841 if (i_18 >= 65535)
4842 goto <bb 10>;
4843 else
4844 goto <bb 9>;
4846 <bb 9>:
4847 goto <bb 4>;
4849 <bb 10>:
4850 return;
4853 VAR _6 doesn't overflow only with pre-condition (i_21 != 0), here we
4854 can't use _6 to prove no-overlfow for _7. In fact, var _7 takes value
4855 sequence (4294967295, 0, 1, ..., 65533) in loop life time, rather than
4856 (4294967295, 4294967296, ...). */
4858 static bool
4859 scev_var_range_cant_overflow (tree var, tree step, class loop *loop)
4861 tree type;
4862 wide_int minv, maxv, diff, step_wi;
4863 enum value_range_kind rtype;
4865 if (TREE_CODE (step) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (var)))
4866 return false;
4868 /* Check if VAR evaluates in every loop iteration. It's not the case
4869 if VAR is default definition or does not dominate loop's latch. */
4870 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
4871 if (!def_bb || !dominated_by_p (CDI_DOMINATORS, loop->latch, def_bb))
4872 return false;
4874 rtype = get_range_info (var, &minv, &maxv);
4875 if (rtype != VR_RANGE)
4876 return false;
4878 /* VAR is a scev whose evolution part is STEP and value range info
4879 is [MIN, MAX], we can prove its no-overflowness by conditions:
4881 type_MAX - MAX >= step ; if step > 0
4882 MIN - type_MIN >= |step| ; if step < 0.
4884 Or VAR must take value outside of value range, which is not true. */
4885 step_wi = wi::to_wide (step);
4886 type = TREE_TYPE (var);
4887 if (tree_int_cst_sign_bit (step))
4889 diff = minv - wi::to_wide (lower_bound_in_type (type, type));
4890 step_wi = - step_wi;
4892 else
4893 diff = wi::to_wide (upper_bound_in_type (type, type)) - maxv;
4895 return (wi::geu_p (diff, step_wi));
4898 /* Return false only when the induction variable BASE + STEP * I is
4899 known to not overflow: i.e. when the number of iterations is small
4900 enough with respect to the step and initial condition in order to
4901 keep the evolution confined in TYPEs bounds. Return true when the
4902 iv is known to overflow or when the property is not computable.
4904 USE_OVERFLOW_SEMANTICS is true if this function should assume that
4905 the rules for overflow of the given language apply (e.g., that signed
4906 arithmetics in C does not overflow).
4908 If VAR is a ssa variable, this function also returns false if VAR can
4909 be proven not overflow with value range info. */
4911 bool
4912 scev_probably_wraps_p (tree var, tree base, tree step,
4913 gimple *at_stmt, class loop *loop,
4914 bool use_overflow_semantics)
4916 /* FIXME: We really need something like
4917 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
4919 We used to test for the following situation that frequently appears
4920 during address arithmetics:
4922 D.1621_13 = (long unsigned intD.4) D.1620_12;
4923 D.1622_14 = D.1621_13 * 8;
4924 D.1623_15 = (doubleD.29 *) D.1622_14;
4926 And derived that the sequence corresponding to D_14
4927 can be proved to not wrap because it is used for computing a
4928 memory access; however, this is not really the case -- for example,
4929 if D_12 = (unsigned char) [254,+,1], then D_14 has values
4930 2032, 2040, 0, 8, ..., but the code is still legal. */
4932 if (chrec_contains_undetermined (base)
4933 || chrec_contains_undetermined (step))
4934 return true;
4936 if (integer_zerop (step))
4937 return false;
4939 /* If we can use the fact that signed and pointer arithmetics does not
4940 wrap, we are done. */
4941 if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
4942 return false;
4944 /* To be able to use estimates on number of iterations of the loop,
4945 we must have an upper bound on the absolute value of the step. */
4946 if (TREE_CODE (step) != INTEGER_CST)
4947 return true;
4949 /* Check if var can be proven not overflow with value range info. */
4950 if (var && TREE_CODE (var) == SSA_NAME
4951 && scev_var_range_cant_overflow (var, step, loop))
4952 return false;
4954 if (loop_exits_before_overflow (base, step, at_stmt, loop))
4955 return false;
4957 /* At this point we still don't have a proof that the iv does not
4958 overflow: give up. */
4959 return true;
4962 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
4964 void
4965 free_numbers_of_iterations_estimates (class loop *loop)
4967 struct control_iv *civ;
4968 class nb_iter_bound *bound;
4970 loop->nb_iterations = NULL;
4971 loop->estimate_state = EST_NOT_COMPUTED;
4972 for (bound = loop->bounds; bound;)
4974 class nb_iter_bound *next = bound->next;
4975 ggc_free (bound);
4976 bound = next;
4978 loop->bounds = NULL;
4980 for (civ = loop->control_ivs; civ;)
4982 struct control_iv *next = civ->next;
4983 ggc_free (civ);
4984 civ = next;
4986 loop->control_ivs = NULL;
4989 /* Frees the information on upper bounds on numbers of iterations of loops. */
4991 void
4992 free_numbers_of_iterations_estimates (function *fn)
4994 class loop *loop;
4996 FOR_EACH_LOOP_FN (fn, loop, 0)
4997 free_numbers_of_iterations_estimates (loop);
5000 /* Substitute value VAL for ssa name NAME inside expressions held
5001 at LOOP. */
5003 void
5004 substitute_in_loop_info (class loop *loop, tree name, tree val)
5006 loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);