Don't warn when alignment of global common data exceeds maximum alignment.
[official-gcc.git] / gcc / tree-ssa-loop-niter.c
blob466158a5eb16e1432a28853f5992c97ce076172f
1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "tree-pass.h"
28 #include "ssa.h"
29 #include "gimple-pretty-print.h"
30 #include "diagnostic-core.h"
31 #include "stor-layout.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "intl.h"
35 #include "gimplify.h"
36 #include "gimple-iterator.h"
37 #include "tree-cfg.h"
38 #include "tree-ssa-loop-ivopts.h"
39 #include "tree-ssa-loop-niter.h"
40 #include "tree-ssa-loop.h"
41 #include "cfgloop.h"
42 #include "tree-chrec.h"
43 #include "tree-scalar-evolution.h"
44 #include "tree-dfa.h"
45 #include "gimple-range.h"
48 /* The maximum number of dominator BBs we search for conditions
49 of loop header copies we use for simplifying a conditional
50 expression. */
51 #define MAX_DOMINATORS_TO_WALK 8
55 Analysis of number of iterations of an affine exit test.
59 /* Bounds on some value, BELOW <= X <= UP. */
61 struct bounds
63 mpz_t below, up;
66 static bool number_of_iterations_popcount (loop_p loop, edge exit,
67 enum tree_code code,
68 class tree_niter_desc *niter);
71 /* Splits expression EXPR to a variable part VAR and constant OFFSET. */
73 static void
74 split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
76 tree type = TREE_TYPE (expr);
77 tree op0, op1;
78 bool negate = false;
80 *var = expr;
81 mpz_set_ui (offset, 0);
83 switch (TREE_CODE (expr))
85 case MINUS_EXPR:
86 negate = true;
87 /* Fallthru. */
89 case PLUS_EXPR:
90 case POINTER_PLUS_EXPR:
91 op0 = TREE_OPERAND (expr, 0);
92 op1 = TREE_OPERAND (expr, 1);
94 if (TREE_CODE (op1) != INTEGER_CST)
95 break;
97 *var = op0;
98 /* Always sign extend the offset. */
99 wi::to_mpz (wi::to_wide (op1), offset, SIGNED);
100 if (negate)
101 mpz_neg (offset, offset);
102 break;
104 case INTEGER_CST:
105 *var = build_int_cst_type (type, 0);
106 wi::to_mpz (wi::to_wide (expr), offset, TYPE_SIGN (type));
107 break;
109 default:
110 break;
114 /* From condition C0 CMP C1 derives information regarding the value range
115 of VAR, which is of TYPE. Results are stored in to BELOW and UP. */
117 static void
118 refine_value_range_using_guard (tree type, tree var,
119 tree c0, enum tree_code cmp, tree c1,
120 mpz_t below, mpz_t up)
122 tree varc0, varc1, ctype;
123 mpz_t offc0, offc1;
124 mpz_t mint, maxt, minc1, maxc1;
125 bool no_wrap = nowrap_type_p (type);
126 bool c0_ok, c1_ok;
127 signop sgn = TYPE_SIGN (type);
129 switch (cmp)
131 case LT_EXPR:
132 case LE_EXPR:
133 case GT_EXPR:
134 case GE_EXPR:
135 STRIP_SIGN_NOPS (c0);
136 STRIP_SIGN_NOPS (c1);
137 ctype = TREE_TYPE (c0);
138 if (!useless_type_conversion_p (ctype, type))
139 return;
141 break;
143 case EQ_EXPR:
144 /* We could derive quite precise information from EQ_EXPR, however,
145 such a guard is unlikely to appear, so we do not bother with
146 handling it. */
147 return;
149 case NE_EXPR:
150 /* NE_EXPR comparisons do not contain much of useful information,
151 except for cases of comparing with bounds. */
152 if (TREE_CODE (c1) != INTEGER_CST
153 || !INTEGRAL_TYPE_P (type))
154 return;
156 /* Ensure that the condition speaks about an expression in the same
157 type as X and Y. */
158 ctype = TREE_TYPE (c0);
159 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
160 return;
161 c0 = fold_convert (type, c0);
162 c1 = fold_convert (type, c1);
164 if (operand_equal_p (var, c0, 0))
166 mpz_t valc1;
168 /* Case of comparing VAR with its below/up bounds. */
169 mpz_init (valc1);
170 wi::to_mpz (wi::to_wide (c1), valc1, TYPE_SIGN (type));
171 if (mpz_cmp (valc1, below) == 0)
172 cmp = GT_EXPR;
173 if (mpz_cmp (valc1, up) == 0)
174 cmp = LT_EXPR;
176 mpz_clear (valc1);
178 else
180 /* Case of comparing with the bounds of the type. */
181 wide_int min = wi::min_value (type);
182 wide_int max = wi::max_value (type);
184 if (wi::to_wide (c1) == min)
185 cmp = GT_EXPR;
186 if (wi::to_wide (c1) == max)
187 cmp = LT_EXPR;
190 /* Quick return if no useful information. */
191 if (cmp == NE_EXPR)
192 return;
194 break;
196 default:
197 return;
200 mpz_init (offc0);
201 mpz_init (offc1);
202 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
203 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
205 /* We are only interested in comparisons of expressions based on VAR. */
206 if (operand_equal_p (var, varc1, 0))
208 std::swap (varc0, varc1);
209 mpz_swap (offc0, offc1);
210 cmp = swap_tree_comparison (cmp);
212 else if (!operand_equal_p (var, varc0, 0))
214 mpz_clear (offc0);
215 mpz_clear (offc1);
216 return;
219 mpz_init (mint);
220 mpz_init (maxt);
221 get_type_static_bounds (type, mint, maxt);
222 mpz_init (minc1);
223 mpz_init (maxc1);
224 value_range r;
225 /* Setup range information for varc1. */
226 if (integer_zerop (varc1))
228 wi::to_mpz (0, minc1, TYPE_SIGN (type));
229 wi::to_mpz (0, maxc1, TYPE_SIGN (type));
231 else if (TREE_CODE (varc1) == SSA_NAME
232 && INTEGRAL_TYPE_P (type)
233 && get_range_query (cfun)->range_of_expr (r, varc1)
234 && r.kind () == VR_RANGE)
236 gcc_assert (wi::le_p (r.lower_bound (), r.upper_bound (), sgn));
237 wi::to_mpz (r.lower_bound (), minc1, sgn);
238 wi::to_mpz (r.upper_bound (), maxc1, sgn);
240 else
242 mpz_set (minc1, mint);
243 mpz_set (maxc1, maxt);
246 /* Compute valid range information for varc1 + offc1. Note nothing
247 useful can be derived if it overflows or underflows. Overflow or
248 underflow could happen when:
250 offc1 > 0 && varc1 + offc1 > MAX_VAL (type)
251 offc1 < 0 && varc1 + offc1 < MIN_VAL (type). */
252 mpz_add (minc1, minc1, offc1);
253 mpz_add (maxc1, maxc1, offc1);
254 c1_ok = (no_wrap
255 || mpz_sgn (offc1) == 0
256 || (mpz_sgn (offc1) < 0 && mpz_cmp (minc1, mint) >= 0)
257 || (mpz_sgn (offc1) > 0 && mpz_cmp (maxc1, maxt) <= 0));
258 if (!c1_ok)
259 goto end;
261 if (mpz_cmp (minc1, mint) < 0)
262 mpz_set (minc1, mint);
263 if (mpz_cmp (maxc1, maxt) > 0)
264 mpz_set (maxc1, maxt);
266 if (cmp == LT_EXPR)
268 cmp = LE_EXPR;
269 mpz_sub_ui (maxc1, maxc1, 1);
271 if (cmp == GT_EXPR)
273 cmp = GE_EXPR;
274 mpz_add_ui (minc1, minc1, 1);
277 /* Compute range information for varc0. If there is no overflow,
278 the condition implied that
280 (varc0) cmp (varc1 + offc1 - offc0)
282 We can possibly improve the upper bound of varc0 if cmp is LE_EXPR,
283 or the below bound if cmp is GE_EXPR.
285 To prove there is no overflow/underflow, we need to check below
286 four cases:
287 1) cmp == LE_EXPR && offc0 > 0
289 (varc0 + offc0) doesn't overflow
290 && (varc1 + offc1 - offc0) doesn't underflow
292 2) cmp == LE_EXPR && offc0 < 0
294 (varc0 + offc0) doesn't underflow
295 && (varc1 + offc1 - offc0) doesn't overfloe
297 In this case, (varc0 + offc0) will never underflow if we can
298 prove (varc1 + offc1 - offc0) doesn't overflow.
300 3) cmp == GE_EXPR && offc0 < 0
302 (varc0 + offc0) doesn't underflow
303 && (varc1 + offc1 - offc0) doesn't overflow
305 4) cmp == GE_EXPR && offc0 > 0
307 (varc0 + offc0) doesn't overflow
308 && (varc1 + offc1 - offc0) doesn't underflow
310 In this case, (varc0 + offc0) will never overflow if we can
311 prove (varc1 + offc1 - offc0) doesn't underflow.
313 Note we only handle case 2 and 4 in below code. */
315 mpz_sub (minc1, minc1, offc0);
316 mpz_sub (maxc1, maxc1, offc0);
317 c0_ok = (no_wrap
318 || mpz_sgn (offc0) == 0
319 || (cmp == LE_EXPR
320 && mpz_sgn (offc0) < 0 && mpz_cmp (maxc1, maxt) <= 0)
321 || (cmp == GE_EXPR
322 && mpz_sgn (offc0) > 0 && mpz_cmp (minc1, mint) >= 0));
323 if (!c0_ok)
324 goto end;
326 if (cmp == LE_EXPR)
328 if (mpz_cmp (up, maxc1) > 0)
329 mpz_set (up, maxc1);
331 else
333 if (mpz_cmp (below, minc1) < 0)
334 mpz_set (below, minc1);
337 end:
338 mpz_clear (mint);
339 mpz_clear (maxt);
340 mpz_clear (minc1);
341 mpz_clear (maxc1);
342 mpz_clear (offc0);
343 mpz_clear (offc1);
346 /* Stores estimate on the minimum/maximum value of the expression VAR + OFF
347 in TYPE to MIN and MAX. */
349 static void
350 determine_value_range (class loop *loop, tree type, tree var, mpz_t off,
351 mpz_t min, mpz_t max)
353 int cnt = 0;
354 mpz_t minm, maxm;
355 basic_block bb;
356 wide_int minv, maxv;
357 enum value_range_kind rtype = VR_VARYING;
359 /* If the expression is a constant, we know its value exactly. */
360 if (integer_zerop (var))
362 mpz_set (min, off);
363 mpz_set (max, off);
364 return;
367 get_type_static_bounds (type, min, max);
369 /* See if we have some range info from VRP. */
370 if (TREE_CODE (var) == SSA_NAME && INTEGRAL_TYPE_P (type))
372 edge e = loop_preheader_edge (loop);
373 signop sgn = TYPE_SIGN (type);
374 gphi_iterator gsi;
376 /* Either for VAR itself... */
377 value_range var_range;
378 get_range_query (cfun)->range_of_expr (var_range, var);
379 rtype = var_range.kind ();
380 if (!var_range.undefined_p ())
382 minv = var_range.lower_bound ();
383 maxv = var_range.upper_bound ();
386 /* Or for PHI results in loop->header where VAR is used as
387 PHI argument from the loop preheader edge. */
388 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
390 gphi *phi = gsi.phi ();
391 value_range phi_range;
392 if (PHI_ARG_DEF_FROM_EDGE (phi, e) == var
393 && get_range_query (cfun)->range_of_expr (phi_range,
394 gimple_phi_result (phi))
395 && phi_range.kind () == VR_RANGE)
397 if (rtype != VR_RANGE)
399 rtype = VR_RANGE;
400 minv = phi_range.lower_bound ();
401 maxv = phi_range.upper_bound ();
403 else
405 minv = wi::max (minv, phi_range.lower_bound (), sgn);
406 maxv = wi::min (maxv, phi_range.upper_bound (), sgn);
407 /* If the PHI result range are inconsistent with
408 the VAR range, give up on looking at the PHI
409 results. This can happen if VR_UNDEFINED is
410 involved. */
411 if (wi::gt_p (minv, maxv, sgn))
413 value_range vr;
414 get_range_query (cfun)->range_of_expr (vr, var);
415 rtype = vr.kind ();
416 if (!vr.undefined_p ())
418 minv = vr.lower_bound ();
419 maxv = vr.upper_bound ();
421 break;
426 mpz_init (minm);
427 mpz_init (maxm);
428 if (rtype != VR_RANGE)
430 mpz_set (minm, min);
431 mpz_set (maxm, max);
433 else
435 gcc_assert (wi::le_p (minv, maxv, sgn));
436 wi::to_mpz (minv, minm, sgn);
437 wi::to_mpz (maxv, maxm, sgn);
439 /* Now walk the dominators of the loop header and use the entry
440 guards to refine the estimates. */
441 for (bb = loop->header;
442 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
443 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
445 edge e;
446 tree c0, c1;
447 gimple *cond;
448 enum tree_code cmp;
450 if (!single_pred_p (bb))
451 continue;
452 e = single_pred_edge (bb);
454 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
455 continue;
457 cond = last_stmt (e->src);
458 c0 = gimple_cond_lhs (cond);
459 cmp = gimple_cond_code (cond);
460 c1 = gimple_cond_rhs (cond);
462 if (e->flags & EDGE_FALSE_VALUE)
463 cmp = invert_tree_comparison (cmp, false);
465 refine_value_range_using_guard (type, var, c0, cmp, c1, minm, maxm);
466 ++cnt;
469 mpz_add (minm, minm, off);
470 mpz_add (maxm, maxm, off);
471 /* If the computation may not wrap or off is zero, then this
472 is always fine. If off is negative and minv + off isn't
473 smaller than type's minimum, or off is positive and
474 maxv + off isn't bigger than type's maximum, use the more
475 precise range too. */
476 if (nowrap_type_p (type)
477 || mpz_sgn (off) == 0
478 || (mpz_sgn (off) < 0 && mpz_cmp (minm, min) >= 0)
479 || (mpz_sgn (off) > 0 && mpz_cmp (maxm, max) <= 0))
481 mpz_set (min, minm);
482 mpz_set (max, maxm);
483 mpz_clear (minm);
484 mpz_clear (maxm);
485 return;
487 mpz_clear (minm);
488 mpz_clear (maxm);
491 /* If the computation may wrap, we know nothing about the value, except for
492 the range of the type. */
493 if (!nowrap_type_p (type))
494 return;
496 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
497 add it to MIN, otherwise to MAX. */
498 if (mpz_sgn (off) < 0)
499 mpz_add (max, max, off);
500 else
501 mpz_add (min, min, off);
504 /* Stores the bounds on the difference of the values of the expressions
505 (var + X) and (var + Y), computed in TYPE, to BNDS. */
507 static void
508 bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
509 bounds *bnds)
511 int rel = mpz_cmp (x, y);
512 bool may_wrap = !nowrap_type_p (type);
513 mpz_t m;
515 /* If X == Y, then the expressions are always equal.
516 If X > Y, there are the following possibilities:
517 a) neither of var + X and var + Y overflow or underflow, or both of
518 them do. Then their difference is X - Y.
519 b) var + X overflows, and var + Y does not. Then the values of the
520 expressions are var + X - M and var + Y, where M is the range of
521 the type, and their difference is X - Y - M.
522 c) var + Y underflows and var + X does not. Their difference again
523 is M - X + Y.
524 Therefore, if the arithmetics in type does not overflow, then the
525 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
526 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
527 (X - Y, X - Y + M). */
529 if (rel == 0)
531 mpz_set_ui (bnds->below, 0);
532 mpz_set_ui (bnds->up, 0);
533 return;
536 mpz_init (m);
537 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED);
538 mpz_add_ui (m, m, 1);
539 mpz_sub (bnds->up, x, y);
540 mpz_set (bnds->below, bnds->up);
542 if (may_wrap)
544 if (rel > 0)
545 mpz_sub (bnds->below, bnds->below, m);
546 else
547 mpz_add (bnds->up, bnds->up, m);
550 mpz_clear (m);
553 /* From condition C0 CMP C1 derives information regarding the
554 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
555 and stores it to BNDS. */
557 static void
558 refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
559 tree vary, mpz_t offy,
560 tree c0, enum tree_code cmp, tree c1,
561 bounds *bnds)
563 tree varc0, varc1, ctype;
564 mpz_t offc0, offc1, loffx, loffy, bnd;
565 bool lbound = false;
566 bool no_wrap = nowrap_type_p (type);
567 bool x_ok, y_ok;
569 switch (cmp)
571 case LT_EXPR:
572 case LE_EXPR:
573 case GT_EXPR:
574 case GE_EXPR:
575 STRIP_SIGN_NOPS (c0);
576 STRIP_SIGN_NOPS (c1);
577 ctype = TREE_TYPE (c0);
578 if (!useless_type_conversion_p (ctype, type))
579 return;
581 break;
583 case EQ_EXPR:
584 /* We could derive quite precise information from EQ_EXPR, however, such
585 a guard is unlikely to appear, so we do not bother with handling
586 it. */
587 return;
589 case NE_EXPR:
590 /* NE_EXPR comparisons do not contain much of useful information, except for
591 special case of comparing with the bounds of the type. */
592 if (TREE_CODE (c1) != INTEGER_CST
593 || !INTEGRAL_TYPE_P (type))
594 return;
596 /* Ensure that the condition speaks about an expression in the same type
597 as X and Y. */
598 ctype = TREE_TYPE (c0);
599 if (TYPE_PRECISION (ctype) != TYPE_PRECISION (type))
600 return;
601 c0 = fold_convert (type, c0);
602 c1 = fold_convert (type, c1);
604 if (TYPE_MIN_VALUE (type)
605 && operand_equal_p (c1, TYPE_MIN_VALUE (type), 0))
607 cmp = GT_EXPR;
608 break;
610 if (TYPE_MAX_VALUE (type)
611 && operand_equal_p (c1, TYPE_MAX_VALUE (type), 0))
613 cmp = LT_EXPR;
614 break;
617 return;
618 default:
619 return;
622 mpz_init (offc0);
623 mpz_init (offc1);
624 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
625 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
627 /* We are only interested in comparisons of expressions based on VARX and
628 VARY. TODO -- we might also be able to derive some bounds from
629 expressions containing just one of the variables. */
631 if (operand_equal_p (varx, varc1, 0))
633 std::swap (varc0, varc1);
634 mpz_swap (offc0, offc1);
635 cmp = swap_tree_comparison (cmp);
638 if (!operand_equal_p (varx, varc0, 0)
639 || !operand_equal_p (vary, varc1, 0))
640 goto end;
642 mpz_init_set (loffx, offx);
643 mpz_init_set (loffy, offy);
645 if (cmp == GT_EXPR || cmp == GE_EXPR)
647 std::swap (varx, vary);
648 mpz_swap (offc0, offc1);
649 mpz_swap (loffx, loffy);
650 cmp = swap_tree_comparison (cmp);
651 lbound = true;
654 /* If there is no overflow, the condition implies that
656 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
658 The overflows and underflows may complicate things a bit; each
659 overflow decreases the appropriate offset by M, and underflow
660 increases it by M. The above inequality would not necessarily be
661 true if
663 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
664 VARX + OFFC0 overflows, but VARX + OFFX does not.
665 This may only happen if OFFX < OFFC0.
666 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
667 VARY + OFFC1 underflows and VARY + OFFY does not.
668 This may only happen if OFFY > OFFC1. */
670 if (no_wrap)
672 x_ok = true;
673 y_ok = true;
675 else
677 x_ok = (integer_zerop (varx)
678 || mpz_cmp (loffx, offc0) >= 0);
679 y_ok = (integer_zerop (vary)
680 || mpz_cmp (loffy, offc1) <= 0);
683 if (x_ok && y_ok)
685 mpz_init (bnd);
686 mpz_sub (bnd, loffx, loffy);
687 mpz_add (bnd, bnd, offc1);
688 mpz_sub (bnd, bnd, offc0);
690 if (cmp == LT_EXPR)
691 mpz_sub_ui (bnd, bnd, 1);
693 if (lbound)
695 mpz_neg (bnd, bnd);
696 if (mpz_cmp (bnds->below, bnd) < 0)
697 mpz_set (bnds->below, bnd);
699 else
701 if (mpz_cmp (bnd, bnds->up) < 0)
702 mpz_set (bnds->up, bnd);
704 mpz_clear (bnd);
707 mpz_clear (loffx);
708 mpz_clear (loffy);
709 end:
710 mpz_clear (offc0);
711 mpz_clear (offc1);
714 /* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
715 The subtraction is considered to be performed in arbitrary precision,
716 without overflows.
718 We do not attempt to be too clever regarding the value ranges of X and
719 Y; most of the time, they are just integers or ssa names offsetted by
720 integer. However, we try to use the information contained in the
721 comparisons before the loop (usually created by loop header copying). */
723 static void
724 bound_difference (class loop *loop, tree x, tree y, bounds *bnds)
726 tree type = TREE_TYPE (x);
727 tree varx, vary;
728 mpz_t offx, offy;
729 mpz_t minx, maxx, miny, maxy;
730 int cnt = 0;
731 edge e;
732 basic_block bb;
733 tree c0, c1;
734 gimple *cond;
735 enum tree_code cmp;
737 /* Get rid of unnecessary casts, but preserve the value of
738 the expressions. */
739 STRIP_SIGN_NOPS (x);
740 STRIP_SIGN_NOPS (y);
742 mpz_init (bnds->below);
743 mpz_init (bnds->up);
744 mpz_init (offx);
745 mpz_init (offy);
746 split_to_var_and_offset (x, &varx, offx);
747 split_to_var_and_offset (y, &vary, offy);
749 if (!integer_zerop (varx)
750 && operand_equal_p (varx, vary, 0))
752 /* Special case VARX == VARY -- we just need to compare the
753 offsets. The matters are a bit more complicated in the
754 case addition of offsets may wrap. */
755 bound_difference_of_offsetted_base (type, offx, offy, bnds);
757 else
759 /* Otherwise, use the value ranges to determine the initial
760 estimates on below and up. */
761 mpz_init (minx);
762 mpz_init (maxx);
763 mpz_init (miny);
764 mpz_init (maxy);
765 determine_value_range (loop, type, varx, offx, minx, maxx);
766 determine_value_range (loop, type, vary, offy, miny, maxy);
768 mpz_sub (bnds->below, minx, maxy);
769 mpz_sub (bnds->up, maxx, miny);
770 mpz_clear (minx);
771 mpz_clear (maxx);
772 mpz_clear (miny);
773 mpz_clear (maxy);
776 /* If both X and Y are constants, we cannot get any more precise. */
777 if (integer_zerop (varx) && integer_zerop (vary))
778 goto end;
780 /* Now walk the dominators of the loop header and use the entry
781 guards to refine the estimates. */
782 for (bb = loop->header;
783 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
784 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
786 if (!single_pred_p (bb))
787 continue;
788 e = single_pred_edge (bb);
790 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
791 continue;
793 cond = last_stmt (e->src);
794 c0 = gimple_cond_lhs (cond);
795 cmp = gimple_cond_code (cond);
796 c1 = gimple_cond_rhs (cond);
798 if (e->flags & EDGE_FALSE_VALUE)
799 cmp = invert_tree_comparison (cmp, false);
801 refine_bounds_using_guard (type, varx, offx, vary, offy,
802 c0, cmp, c1, bnds);
803 ++cnt;
806 end:
807 mpz_clear (offx);
808 mpz_clear (offy);
811 /* Update the bounds in BNDS that restrict the value of X to the bounds
812 that restrict the value of X + DELTA. X can be obtained as a
813 difference of two values in TYPE. */
815 static void
816 bounds_add (bounds *bnds, const widest_int &delta, tree type)
818 mpz_t mdelta, max;
820 mpz_init (mdelta);
821 wi::to_mpz (delta, mdelta, SIGNED);
823 mpz_init (max);
824 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
826 mpz_add (bnds->up, bnds->up, mdelta);
827 mpz_add (bnds->below, bnds->below, mdelta);
829 if (mpz_cmp (bnds->up, max) > 0)
830 mpz_set (bnds->up, max);
832 mpz_neg (max, max);
833 if (mpz_cmp (bnds->below, max) < 0)
834 mpz_set (bnds->below, max);
836 mpz_clear (mdelta);
837 mpz_clear (max);
840 /* Update the bounds in BNDS that restrict the value of X to the bounds
841 that restrict the value of -X. */
843 static void
844 bounds_negate (bounds *bnds)
846 mpz_t tmp;
848 mpz_init_set (tmp, bnds->up);
849 mpz_neg (bnds->up, bnds->below);
850 mpz_neg (bnds->below, tmp);
851 mpz_clear (tmp);
854 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
856 static tree
857 inverse (tree x, tree mask)
859 tree type = TREE_TYPE (x);
860 tree rslt;
861 unsigned ctr = tree_floor_log2 (mask);
863 if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)
865 unsigned HOST_WIDE_INT ix;
866 unsigned HOST_WIDE_INT imask;
867 unsigned HOST_WIDE_INT irslt = 1;
869 gcc_assert (cst_and_fits_in_hwi (x));
870 gcc_assert (cst_and_fits_in_hwi (mask));
872 ix = int_cst_value (x);
873 imask = int_cst_value (mask);
875 for (; ctr; ctr--)
877 irslt *= ix;
878 ix *= ix;
880 irslt &= imask;
882 rslt = build_int_cst_type (type, irslt);
884 else
886 rslt = build_int_cst (type, 1);
887 for (; ctr; ctr--)
889 rslt = int_const_binop (MULT_EXPR, rslt, x);
890 x = int_const_binop (MULT_EXPR, x, x);
892 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
895 return rslt;
898 /* Derives the upper bound BND on the number of executions of loop with exit
899 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
900 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
901 that the loop ends through this exit, i.e., the induction variable ever
902 reaches the value of C.
904 The value C is equal to final - base, where final and base are the final and
905 initial value of the actual induction variable in the analysed loop. BNDS
906 bounds the value of this difference when computed in signed type with
907 unbounded range, while the computation of C is performed in an unsigned
908 type with the range matching the range of the type of the induction variable.
909 In particular, BNDS.up contains an upper bound on C in the following cases:
910 -- if the iv must reach its final value without overflow, i.e., if
911 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
912 -- if final >= base, which we know to hold when BNDS.below >= 0. */
914 static void
915 number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
916 bounds *bnds, bool exit_must_be_taken)
918 widest_int max;
919 mpz_t d;
920 tree type = TREE_TYPE (c);
921 bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
922 || mpz_sgn (bnds->below) >= 0);
924 if (integer_onep (s)
925 || (TREE_CODE (c) == INTEGER_CST
926 && TREE_CODE (s) == INTEGER_CST
927 && wi::mod_trunc (wi::to_wide (c), wi::to_wide (s),
928 TYPE_SIGN (type)) == 0)
929 || (TYPE_OVERFLOW_UNDEFINED (type)
930 && multiple_of_p (type, c, s)))
932 /* If C is an exact multiple of S, then its value will be reached before
933 the induction variable overflows (unless the loop is exited in some
934 other way before). Note that the actual induction variable in the
935 loop (which ranges from base to final instead of from 0 to C) may
936 overflow, in which case BNDS.up will not be giving a correct upper
937 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
938 no_overflow = true;
939 exit_must_be_taken = true;
942 /* If the induction variable can overflow, the number of iterations is at
943 most the period of the control variable (or infinite, but in that case
944 the whole # of iterations analysis will fail). */
945 if (!no_overflow)
947 max = wi::mask <widest_int> (TYPE_PRECISION (type)
948 - wi::ctz (wi::to_wide (s)), false);
949 wi::to_mpz (max, bnd, UNSIGNED);
950 return;
953 /* Now we know that the induction variable does not overflow, so the loop
954 iterates at most (range of type / S) times. */
955 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED);
957 /* If the induction variable is guaranteed to reach the value of C before
958 overflow, ... */
959 if (exit_must_be_taken)
961 /* ... then we can strengthen this to C / S, and possibly we can use
962 the upper bound on C given by BNDS. */
963 if (TREE_CODE (c) == INTEGER_CST)
964 wi::to_mpz (wi::to_wide (c), bnd, UNSIGNED);
965 else if (bnds_u_valid)
966 mpz_set (bnd, bnds->up);
969 mpz_init (d);
970 wi::to_mpz (wi::to_wide (s), d, UNSIGNED);
971 mpz_fdiv_q (bnd, bnd, d);
972 mpz_clear (d);
975 /* Determines number of iterations of loop whose ending condition
976 is IV <> FINAL. TYPE is the type of the iv. The number of
977 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
978 we know that the exit must be taken eventually, i.e., that the IV
979 ever reaches the value FINAL (we derived this earlier, and possibly set
980 NITER->assumptions to make sure this is the case). BNDS contains the
981 bounds on the difference FINAL - IV->base. */
983 static bool
984 number_of_iterations_ne (class loop *loop, tree type, affine_iv *iv,
985 tree final, class tree_niter_desc *niter,
986 bool exit_must_be_taken, bounds *bnds)
988 tree niter_type = unsigned_type_for (type);
989 tree s, c, d, bits, assumption, tmp, bound;
990 mpz_t max;
992 niter->control = *iv;
993 niter->bound = final;
994 niter->cmp = NE_EXPR;
996 /* Rearrange the terms so that we get inequality S * i <> C, with S
997 positive. Also cast everything to the unsigned type. If IV does
998 not overflow, BNDS bounds the value of C. Also, this is the
999 case if the computation |FINAL - IV->base| does not overflow, i.e.,
1000 if BNDS->below in the result is nonnegative. */
1001 if (tree_int_cst_sign_bit (iv->step))
1003 s = fold_convert (niter_type,
1004 fold_build1 (NEGATE_EXPR, type, iv->step));
1005 c = fold_build2 (MINUS_EXPR, niter_type,
1006 fold_convert (niter_type, iv->base),
1007 fold_convert (niter_type, final));
1008 bounds_negate (bnds);
1010 else
1012 s = fold_convert (niter_type, iv->step);
1013 c = fold_build2 (MINUS_EXPR, niter_type,
1014 fold_convert (niter_type, final),
1015 fold_convert (niter_type, iv->base));
1018 mpz_init (max);
1019 number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
1020 exit_must_be_taken);
1021 niter->max = widest_int::from (wi::from_mpz (niter_type, max, false),
1022 TYPE_SIGN (niter_type));
1023 mpz_clear (max);
1025 /* Compute no-overflow information for the control iv. This can be
1026 proven when below two conditions are satisfied:
1028 1) IV evaluates toward FINAL at beginning, i.e:
1029 base <= FINAL ; step > 0
1030 base >= FINAL ; step < 0
1032 2) |FINAL - base| is an exact multiple of step.
1034 Unfortunately, it's hard to prove above conditions after pass loop-ch
1035 because loop with exit condition (IV != FINAL) usually will be guarded
1036 by initial-condition (IV.base - IV.step != FINAL). In this case, we
1037 can alternatively try to prove below conditions:
1039 1') IV evaluates toward FINAL at beginning, i.e:
1040 new_base = base - step < FINAL ; step > 0
1041 && base - step doesn't underflow
1042 new_base = base - step > FINAL ; step < 0
1043 && base - step doesn't overflow
1045 2') |FINAL - new_base| is an exact multiple of step.
1047 Please refer to PR34114 as an example of loop-ch's impact, also refer
1048 to PR72817 as an example why condition 2') is necessary.
1050 Note, for NE_EXPR, base equals to FINAL is a special case, in
1051 which the loop exits immediately, and the iv does not overflow. */
1052 if (!niter->control.no_overflow
1053 && (integer_onep (s) || multiple_of_p (type, c, s)))
1055 tree t, cond, new_c, relaxed_cond = boolean_false_node;
1057 if (tree_int_cst_sign_bit (iv->step))
1059 cond = fold_build2 (GE_EXPR, boolean_type_node, iv->base, final);
1060 if (TREE_CODE (type) == INTEGER_TYPE)
1062 /* Only when base - step doesn't overflow. */
1063 t = TYPE_MAX_VALUE (type);
1064 t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1065 t = fold_build2 (GE_EXPR, boolean_type_node, t, iv->base);
1066 if (integer_nonzerop (t))
1068 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
1069 new_c = fold_build2 (MINUS_EXPR, niter_type,
1070 fold_convert (niter_type, t),
1071 fold_convert (niter_type, final));
1072 if (multiple_of_p (type, new_c, s))
1073 relaxed_cond = fold_build2 (GT_EXPR, boolean_type_node,
1074 t, final);
1078 else
1080 cond = fold_build2 (LE_EXPR, boolean_type_node, iv->base, final);
1081 if (TREE_CODE (type) == INTEGER_TYPE)
1083 /* Only when base - step doesn't underflow. */
1084 t = TYPE_MIN_VALUE (type);
1085 t = fold_build2 (PLUS_EXPR, type, t, iv->step);
1086 t = fold_build2 (LE_EXPR, boolean_type_node, t, iv->base);
1087 if (integer_nonzerop (t))
1089 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step);
1090 new_c = fold_build2 (MINUS_EXPR, niter_type,
1091 fold_convert (niter_type, final),
1092 fold_convert (niter_type, t));
1093 if (multiple_of_p (type, new_c, s))
1094 relaxed_cond = fold_build2 (LT_EXPR, boolean_type_node,
1095 t, final);
1100 t = simplify_using_initial_conditions (loop, cond);
1101 if (!t || !integer_onep (t))
1102 t = simplify_using_initial_conditions (loop, relaxed_cond);
1104 if (t && integer_onep (t))
1105 niter->control.no_overflow = true;
1108 /* First the trivial cases -- when the step is 1. */
1109 if (integer_onep (s))
1111 niter->niter = c;
1112 return true;
1114 if (niter->control.no_overflow && multiple_of_p (type, c, s))
1116 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, c, s);
1117 return true;
1120 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
1121 is infinite. Otherwise, the number of iterations is
1122 (inverse(s/d) * (c/d)) mod (size of mode/d). */
1123 bits = num_ending_zeros (s);
1124 bound = build_low_bits_mask (niter_type,
1125 (TYPE_PRECISION (niter_type)
1126 - tree_to_uhwi (bits)));
1128 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
1129 build_int_cst (niter_type, 1), bits);
1130 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
1132 if (!exit_must_be_taken)
1134 /* If we cannot assume that the exit is taken eventually, record the
1135 assumptions for divisibility of c. */
1136 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d);
1137 assumption = fold_build2 (EQ_EXPR, boolean_type_node,
1138 assumption, build_int_cst (niter_type, 0));
1139 if (!integer_nonzerop (assumption))
1140 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1141 niter->assumptions, assumption);
1144 c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d);
1145 if (integer_onep (s))
1147 niter->niter = c;
1149 else
1151 tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound));
1152 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound);
1154 return true;
1157 /* Checks whether we can determine the final value of the control variable
1158 of the loop with ending condition IV0 < IV1 (computed in TYPE).
1159 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
1160 of the step. The assumptions necessary to ensure that the computation
1161 of the final value does not overflow are recorded in NITER. If we
1162 find the final value, we adjust DELTA and return TRUE. Otherwise
1163 we return false. BNDS bounds the value of IV1->base - IV0->base,
1164 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
1165 true if we know that the exit must be taken eventually. */
1167 static bool
1168 number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
1169 class tree_niter_desc *niter,
1170 tree *delta, tree step,
1171 bool exit_must_be_taken, bounds *bnds)
1173 tree niter_type = TREE_TYPE (step);
1174 tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step);
1175 tree tmod;
1176 mpz_t mmod;
1177 tree assumption = boolean_true_node, bound, noloop;
1178 bool ret = false, fv_comp_no_overflow;
1179 tree type1 = type;
1180 if (POINTER_TYPE_P (type))
1181 type1 = sizetype;
1183 if (TREE_CODE (mod) != INTEGER_CST)
1184 return false;
1185 if (integer_nonzerop (mod))
1186 mod = fold_build2 (MINUS_EXPR, niter_type, step, mod);
1187 tmod = fold_convert (type1, mod);
1189 mpz_init (mmod);
1190 wi::to_mpz (wi::to_wide (mod), mmod, UNSIGNED);
1191 mpz_neg (mmod, mmod);
1193 /* If the induction variable does not overflow and the exit is taken,
1194 then the computation of the final value does not overflow. This is
1195 also obviously the case if the new final value is equal to the
1196 current one. Finally, we postulate this for pointer type variables,
1197 as the code cannot rely on the object to that the pointer points being
1198 placed at the end of the address space (and more pragmatically,
1199 TYPE_{MIN,MAX}_VALUE is not defined for pointers). */
1200 if (integer_zerop (mod) || POINTER_TYPE_P (type))
1201 fv_comp_no_overflow = true;
1202 else if (!exit_must_be_taken)
1203 fv_comp_no_overflow = false;
1204 else
1205 fv_comp_no_overflow =
1206 (iv0->no_overflow && integer_nonzerop (iv0->step))
1207 || (iv1->no_overflow && integer_nonzerop (iv1->step));
1209 if (integer_nonzerop (iv0->step))
1211 /* The final value of the iv is iv1->base + MOD, assuming that this
1212 computation does not overflow, and that
1213 iv0->base <= iv1->base + MOD. */
1214 if (!fv_comp_no_overflow)
1216 bound = fold_build2 (MINUS_EXPR, type1,
1217 TYPE_MAX_VALUE (type1), tmod);
1218 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1219 iv1->base, bound);
1220 if (integer_zerop (assumption))
1221 goto end;
1223 if (mpz_cmp (mmod, bnds->below) < 0)
1224 noloop = boolean_false_node;
1225 else if (POINTER_TYPE_P (type))
1226 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1227 iv0->base,
1228 fold_build_pointer_plus (iv1->base, tmod));
1229 else
1230 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1231 iv0->base,
1232 fold_build2 (PLUS_EXPR, type1,
1233 iv1->base, tmod));
1235 else
1237 /* The final value of the iv is iv0->base - MOD, assuming that this
1238 computation does not overflow, and that
1239 iv0->base - MOD <= iv1->base. */
1240 if (!fv_comp_no_overflow)
1242 bound = fold_build2 (PLUS_EXPR, type1,
1243 TYPE_MIN_VALUE (type1), tmod);
1244 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1245 iv0->base, bound);
1246 if (integer_zerop (assumption))
1247 goto end;
1249 if (mpz_cmp (mmod, bnds->below) < 0)
1250 noloop = boolean_false_node;
1251 else if (POINTER_TYPE_P (type))
1252 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1253 fold_build_pointer_plus (iv0->base,
1254 fold_build1 (NEGATE_EXPR,
1255 type1, tmod)),
1256 iv1->base);
1257 else
1258 noloop = fold_build2 (GT_EXPR, boolean_type_node,
1259 fold_build2 (MINUS_EXPR, type1,
1260 iv0->base, tmod),
1261 iv1->base);
1264 if (!integer_nonzerop (assumption))
1265 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1266 niter->assumptions,
1267 assumption);
1268 if (!integer_zerop (noloop))
1269 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1270 niter->may_be_zero,
1271 noloop);
1272 bounds_add (bnds, wi::to_widest (mod), type);
1273 *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
1275 ret = true;
1276 end:
1277 mpz_clear (mmod);
1278 return ret;
1281 /* Add assertions to NITER that ensure that the control variable of the loop
1282 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
1283 are TYPE. Returns false if we can prove that there is an overflow, true
1284 otherwise. STEP is the absolute value of the step. */
1286 static bool
1287 assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1288 class tree_niter_desc *niter, tree step)
1290 tree bound, d, assumption, diff;
1291 tree niter_type = TREE_TYPE (step);
1293 if (integer_nonzerop (iv0->step))
1295 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
1296 if (iv0->no_overflow)
1297 return true;
1299 /* If iv0->base is a constant, we can determine the last value before
1300 overflow precisely; otherwise we conservatively assume
1301 MAX - STEP + 1. */
1303 if (TREE_CODE (iv0->base) == INTEGER_CST)
1305 d = fold_build2 (MINUS_EXPR, niter_type,
1306 fold_convert (niter_type, TYPE_MAX_VALUE (type)),
1307 fold_convert (niter_type, iv0->base));
1308 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1310 else
1311 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1312 build_int_cst (niter_type, 1));
1313 bound = fold_build2 (MINUS_EXPR, type,
1314 TYPE_MAX_VALUE (type), fold_convert (type, diff));
1315 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1316 iv1->base, bound);
1318 else
1320 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
1321 if (iv1->no_overflow)
1322 return true;
1324 if (TREE_CODE (iv1->base) == INTEGER_CST)
1326 d = fold_build2 (MINUS_EXPR, niter_type,
1327 fold_convert (niter_type, iv1->base),
1328 fold_convert (niter_type, TYPE_MIN_VALUE (type)));
1329 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step);
1331 else
1332 diff = fold_build2 (MINUS_EXPR, niter_type, step,
1333 build_int_cst (niter_type, 1));
1334 bound = fold_build2 (PLUS_EXPR, type,
1335 TYPE_MIN_VALUE (type), fold_convert (type, diff));
1336 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1337 iv0->base, bound);
1340 if (integer_zerop (assumption))
1341 return false;
1342 if (!integer_nonzerop (assumption))
1343 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1344 niter->assumptions, assumption);
1346 iv0->no_overflow = true;
1347 iv1->no_overflow = true;
1348 return true;
1351 /* Add an assumption to NITER that a loop whose ending condition
1352 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
1353 bounds the value of IV1->base - IV0->base. */
1355 static void
1356 assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1357 class tree_niter_desc *niter, bounds *bnds)
1359 tree assumption = boolean_true_node, bound, diff;
1360 tree mbz, mbzl, mbzr, type1;
1361 bool rolls_p, no_overflow_p;
1362 widest_int dstep;
1363 mpz_t mstep, max;
1365 /* We are going to compute the number of iterations as
1366 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
1367 variant of TYPE. This formula only works if
1369 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
1371 (where MAX is the maximum value of the unsigned variant of TYPE, and
1372 the computations in this formula are performed in full precision,
1373 i.e., without overflows).
1375 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1376 we have a condition of the form iv0->base - step < iv1->base before the loop,
1377 and for loops iv0->base < iv1->base - step * i the condition
1378 iv0->base < iv1->base + step, due to loop header copying, which enable us
1379 to prove the lower bound.
1381 The upper bound is more complicated. Unless the expressions for initial
1382 and final value themselves contain enough information, we usually cannot
1383 derive it from the context. */
1385 /* First check whether the answer does not follow from the bounds we gathered
1386 before. */
1387 if (integer_nonzerop (iv0->step))
1388 dstep = wi::to_widest (iv0->step);
1389 else
1391 dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type));
1392 dstep = -dstep;
1395 mpz_init (mstep);
1396 wi::to_mpz (dstep, mstep, UNSIGNED);
1397 mpz_neg (mstep, mstep);
1398 mpz_add_ui (mstep, mstep, 1);
1400 rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
1402 mpz_init (max);
1403 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
1404 mpz_add (max, max, mstep);
1405 no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
1406 /* For pointers, only values lying inside a single object
1407 can be compared or manipulated by pointer arithmetics.
1408 Gcc in general does not allow or handle objects larger
1409 than half of the address space, hence the upper bound
1410 is satisfied for pointers. */
1411 || POINTER_TYPE_P (type));
1412 mpz_clear (mstep);
1413 mpz_clear (max);
1415 if (rolls_p && no_overflow_p)
1416 return;
1418 type1 = type;
1419 if (POINTER_TYPE_P (type))
1420 type1 = sizetype;
1422 /* Now the hard part; we must formulate the assumption(s) as expressions, and
1423 we must be careful not to introduce overflow. */
1425 if (integer_nonzerop (iv0->step))
1427 diff = fold_build2 (MINUS_EXPR, type1,
1428 iv0->step, build_int_cst (type1, 1));
1430 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
1431 0 address never belongs to any object, we can assume this for
1432 pointers. */
1433 if (!POINTER_TYPE_P (type))
1435 bound = fold_build2 (PLUS_EXPR, type1,
1436 TYPE_MIN_VALUE (type), diff);
1437 assumption = fold_build2 (GE_EXPR, boolean_type_node,
1438 iv0->base, bound);
1441 /* And then we can compute iv0->base - diff, and compare it with
1442 iv1->base. */
1443 mbzl = fold_build2 (MINUS_EXPR, type1,
1444 fold_convert (type1, iv0->base), diff);
1445 mbzr = fold_convert (type1, iv1->base);
1447 else
1449 diff = fold_build2 (PLUS_EXPR, type1,
1450 iv1->step, build_int_cst (type1, 1));
1452 if (!POINTER_TYPE_P (type))
1454 bound = fold_build2 (PLUS_EXPR, type1,
1455 TYPE_MAX_VALUE (type), diff);
1456 assumption = fold_build2 (LE_EXPR, boolean_type_node,
1457 iv1->base, bound);
1460 mbzl = fold_convert (type1, iv0->base);
1461 mbzr = fold_build2 (MINUS_EXPR, type1,
1462 fold_convert (type1, iv1->base), diff);
1465 if (!integer_nonzerop (assumption))
1466 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1467 niter->assumptions, assumption);
1468 if (!rolls_p)
1470 mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr);
1471 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1472 niter->may_be_zero, mbz);
1476 /* Determines number of iterations of loop whose ending condition
1477 is IV0 < IV1. TYPE is the type of the iv. The number of
1478 iterations is stored to NITER. BNDS bounds the difference
1479 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1480 that the exit must be taken eventually. */
1482 static bool
1483 number_of_iterations_lt (class loop *loop, tree type, affine_iv *iv0,
1484 affine_iv *iv1, class tree_niter_desc *niter,
1485 bool exit_must_be_taken, bounds *bnds)
1487 tree niter_type = unsigned_type_for (type);
1488 tree delta, step, s;
1489 mpz_t mstep, tmp;
1491 if (integer_nonzerop (iv0->step))
1493 niter->control = *iv0;
1494 niter->cmp = LT_EXPR;
1495 niter->bound = iv1->base;
1497 else
1499 niter->control = *iv1;
1500 niter->cmp = GT_EXPR;
1501 niter->bound = iv0->base;
1504 delta = fold_build2 (MINUS_EXPR, niter_type,
1505 fold_convert (niter_type, iv1->base),
1506 fold_convert (niter_type, iv0->base));
1508 /* First handle the special case that the step is +-1. */
1509 if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1510 || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
1512 /* for (i = iv0->base; i < iv1->base; i++)
1516 for (i = iv1->base; i > iv0->base; i--).
1518 In both cases # of iterations is iv1->base - iv0->base, assuming that
1519 iv1->base >= iv0->base.
1521 First try to derive a lower bound on the value of
1522 iv1->base - iv0->base, computed in full precision. If the difference
1523 is nonnegative, we are done, otherwise we must record the
1524 condition. */
1526 if (mpz_sgn (bnds->below) < 0)
1527 niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
1528 iv1->base, iv0->base);
1529 niter->niter = delta;
1530 niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false),
1531 TYPE_SIGN (niter_type));
1532 niter->control.no_overflow = true;
1533 return true;
1536 if (integer_nonzerop (iv0->step))
1537 step = fold_convert (niter_type, iv0->step);
1538 else
1539 step = fold_convert (niter_type,
1540 fold_build1 (NEGATE_EXPR, type, iv1->step));
1542 /* If we can determine the final value of the control iv exactly, we can
1543 transform the condition to != comparison. In particular, this will be
1544 the case if DELTA is constant. */
1545 if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1546 exit_must_be_taken, bnds))
1548 affine_iv zps;
1550 zps.base = build_int_cst (niter_type, 0);
1551 zps.step = step;
1552 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1553 zps does not overflow. */
1554 zps.no_overflow = true;
1556 return number_of_iterations_ne (loop, type, &zps,
1557 delta, niter, true, bnds);
1560 /* Make sure that the control iv does not overflow. */
1561 if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1562 return false;
1564 /* We determine the number of iterations as (delta + step - 1) / step. For
1565 this to work, we must know that iv1->base >= iv0->base - step + 1,
1566 otherwise the loop does not roll. */
1567 assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1569 s = fold_build2 (MINUS_EXPR, niter_type,
1570 step, build_int_cst (niter_type, 1));
1571 delta = fold_build2 (PLUS_EXPR, niter_type, delta, s);
1572 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step);
1574 mpz_init (mstep);
1575 mpz_init (tmp);
1576 wi::to_mpz (wi::to_wide (step), mstep, UNSIGNED);
1577 mpz_add (tmp, bnds->up, mstep);
1578 mpz_sub_ui (tmp, tmp, 1);
1579 mpz_fdiv_q (tmp, tmp, mstep);
1580 niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false),
1581 TYPE_SIGN (niter_type));
1582 mpz_clear (mstep);
1583 mpz_clear (tmp);
1585 return true;
1588 /* Determines number of iterations of loop whose ending condition
1589 is IV0 <= IV1. TYPE is the type of the iv. The number of
1590 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1591 we know that this condition must eventually become false (we derived this
1592 earlier, and possibly set NITER->assumptions to make sure this
1593 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1595 static bool
1596 number_of_iterations_le (class loop *loop, tree type, affine_iv *iv0,
1597 affine_iv *iv1, class tree_niter_desc *niter,
1598 bool exit_must_be_taken, bounds *bnds)
1600 tree assumption;
1601 tree type1 = type;
1602 if (POINTER_TYPE_P (type))
1603 type1 = sizetype;
1605 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1606 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1607 value of the type. This we must know anyway, since if it is
1608 equal to this value, the loop rolls forever. We do not check
1609 this condition for pointer type ivs, as the code cannot rely on
1610 the object to that the pointer points being placed at the end of
1611 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1612 not defined for pointers). */
1614 if (!exit_must_be_taken && !POINTER_TYPE_P (type))
1616 if (integer_nonzerop (iv0->step))
1617 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1618 iv1->base, TYPE_MAX_VALUE (type));
1619 else
1620 assumption = fold_build2 (NE_EXPR, boolean_type_node,
1621 iv0->base, TYPE_MIN_VALUE (type));
1623 if (integer_zerop (assumption))
1624 return false;
1625 if (!integer_nonzerop (assumption))
1626 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
1627 niter->assumptions, assumption);
1630 if (integer_nonzerop (iv0->step))
1632 if (POINTER_TYPE_P (type))
1633 iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1);
1634 else
1635 iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,
1636 build_int_cst (type1, 1));
1638 else if (POINTER_TYPE_P (type))
1639 iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1);
1640 else
1641 iv0->base = fold_build2 (MINUS_EXPR, type1,
1642 iv0->base, build_int_cst (type1, 1));
1644 bounds_add (bnds, 1, type1);
1646 return number_of_iterations_lt (loop, type, iv0, iv1, niter, exit_must_be_taken,
1647 bnds);
1650 /* Dumps description of affine induction variable IV to FILE. */
1652 static void
1653 dump_affine_iv (FILE *file, affine_iv *iv)
1655 if (!integer_zerop (iv->step))
1656 fprintf (file, "[");
1658 print_generic_expr (dump_file, iv->base, TDF_SLIM);
1660 if (!integer_zerop (iv->step))
1662 fprintf (file, ", + , ");
1663 print_generic_expr (dump_file, iv->step, TDF_SLIM);
1664 fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1668 /* Given exit condition IV0 CODE IV1 in TYPE, this function adjusts
1669 the condition for loop-until-wrap cases. For example:
1670 (unsigned){8, -1}_loop < 10 => {0, 1} != 9
1671 10 < (unsigned){0, max - 7}_loop => {0, 1} != 8
1672 Return true if condition is successfully adjusted. */
1674 static bool
1675 adjust_cond_for_loop_until_wrap (tree type, affine_iv *iv0, tree_code *code,
1676 affine_iv *iv1)
1678 /* Only support simple cases for the moment. */
1679 if (TREE_CODE (iv0->base) != INTEGER_CST
1680 || TREE_CODE (iv1->base) != INTEGER_CST)
1681 return false;
1683 tree niter_type = unsigned_type_for (type), high, low;
1684 /* Case: i-- < 10. */
1685 if (integer_zerop (iv1->step))
1687 /* TODO: Should handle case in which abs(step) != 1. */
1688 if (!integer_minus_onep (iv0->step))
1689 return false;
1690 /* Give up on infinite loop. */
1691 if (*code == LE_EXPR
1692 && tree_int_cst_equal (iv1->base, TYPE_MAX_VALUE (type)))
1693 return false;
1694 high = fold_build2 (PLUS_EXPR, niter_type,
1695 fold_convert (niter_type, iv0->base),
1696 build_int_cst (niter_type, 1));
1697 low = fold_convert (niter_type, TYPE_MIN_VALUE (type));
1699 else if (integer_zerop (iv0->step))
1701 /* TODO: Should handle case in which abs(step) != 1. */
1702 if (!integer_onep (iv1->step))
1703 return false;
1704 /* Give up on infinite loop. */
1705 if (*code == LE_EXPR
1706 && tree_int_cst_equal (iv0->base, TYPE_MIN_VALUE (type)))
1707 return false;
1708 high = fold_convert (niter_type, TYPE_MAX_VALUE (type));
1709 low = fold_build2 (MINUS_EXPR, niter_type,
1710 fold_convert (niter_type, iv1->base),
1711 build_int_cst (niter_type, 1));
1713 else
1714 gcc_unreachable ();
1716 iv0->base = low;
1717 iv0->step = fold_convert (niter_type, integer_one_node);
1718 iv1->base = high;
1719 iv1->step = build_int_cst (niter_type, 0);
1720 *code = NE_EXPR;
1721 return true;
1724 /* Determine the number of iterations according to condition (for staying
1725 inside loop) which compares two induction variables using comparison
1726 operator CODE. The induction variable on left side of the comparison
1727 is IV0, the right-hand side is IV1. Both induction variables must have
1728 type TYPE, which must be an integer or pointer type. The steps of the
1729 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1731 LOOP is the loop whose number of iterations we are determining.
1733 ONLY_EXIT is true if we are sure this is the only way the loop could be
1734 exited (including possibly non-returning function calls, exceptions, etc.)
1735 -- in this case we can use the information whether the control induction
1736 variables can overflow or not in a more efficient way.
1738 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1740 The results (number of iterations and assumptions as described in
1741 comments at class tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1742 Returns false if it fails to determine number of iterations, true if it
1743 was determined (possibly with some assumptions). */
1745 static bool
1746 number_of_iterations_cond (class loop *loop,
1747 tree type, affine_iv *iv0, enum tree_code code,
1748 affine_iv *iv1, class tree_niter_desc *niter,
1749 bool only_exit, bool every_iteration)
1751 bool exit_must_be_taken = false, ret;
1752 bounds bnds;
1754 /* If the test is not executed every iteration, wrapping may make the test
1755 to pass again.
1756 TODO: the overflow case can be still used as unreliable estimate of upper
1757 bound. But we have no API to pass it down to number of iterations code
1758 and, at present, it will not use it anyway. */
1759 if (!every_iteration
1760 && (!iv0->no_overflow || !iv1->no_overflow
1761 || code == NE_EXPR || code == EQ_EXPR))
1762 return false;
1764 /* The meaning of these assumptions is this:
1765 if !assumptions
1766 then the rest of information does not have to be valid
1767 if may_be_zero then the loop does not roll, even if
1768 niter != 0. */
1769 niter->assumptions = boolean_true_node;
1770 niter->may_be_zero = boolean_false_node;
1771 niter->niter = NULL_TREE;
1772 niter->max = 0;
1773 niter->bound = NULL_TREE;
1774 niter->cmp = ERROR_MARK;
1776 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1777 the control variable is on lhs. */
1778 if (code == GE_EXPR || code == GT_EXPR
1779 || (code == NE_EXPR && integer_zerop (iv0->step)))
1781 std::swap (iv0, iv1);
1782 code = swap_tree_comparison (code);
1785 if (POINTER_TYPE_P (type))
1787 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1788 to the same object. If they do, the control variable cannot wrap
1789 (as wrap around the bounds of memory will never return a pointer
1790 that would be guaranteed to point to the same object, even if we
1791 avoid undefined behavior by casting to size_t and back). */
1792 iv0->no_overflow = true;
1793 iv1->no_overflow = true;
1796 /* If the control induction variable does not overflow and the only exit
1797 from the loop is the one that we analyze, we know it must be taken
1798 eventually. */
1799 if (only_exit)
1801 if (!integer_zerop (iv0->step) && iv0->no_overflow)
1802 exit_must_be_taken = true;
1803 else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1804 exit_must_be_taken = true;
1807 /* We can handle cases which neither of the sides of the comparison is
1808 invariant:
1810 {iv0.base, iv0.step} cmp_code {iv1.base, iv1.step}
1811 as if:
1812 {iv0.base, iv0.step - iv1.step} cmp_code {iv1.base, 0}
1814 provided that either below condition is satisfied:
1816 a) the test is NE_EXPR;
1817 b) iv0.step - iv1.step is integer and iv0/iv1 don't overflow.
1819 This rarely occurs in practice, but it is simple enough to manage. */
1820 if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1822 tree step_type = POINTER_TYPE_P (type) ? sizetype : type;
1823 tree step = fold_binary_to_constant (MINUS_EXPR, step_type,
1824 iv0->step, iv1->step);
1826 /* No need to check sign of the new step since below code takes care
1827 of this well. */
1828 if (code != NE_EXPR
1829 && (TREE_CODE (step) != INTEGER_CST
1830 || !iv0->no_overflow || !iv1->no_overflow))
1831 return false;
1833 iv0->step = step;
1834 if (!POINTER_TYPE_P (type))
1835 iv0->no_overflow = false;
1837 iv1->step = build_int_cst (step_type, 0);
1838 iv1->no_overflow = true;
1841 /* If the result of the comparison is a constant, the loop is weird. More
1842 precise handling would be possible, but the situation is not common enough
1843 to waste time on it. */
1844 if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1845 return false;
1847 /* If the loop exits immediately, there is nothing to do. */
1848 tree tem = fold_binary (code, boolean_type_node, iv0->base, iv1->base);
1849 if (tem && integer_zerop (tem))
1851 if (!every_iteration)
1852 return false;
1853 niter->niter = build_int_cst (unsigned_type_for (type), 0);
1854 niter->max = 0;
1855 return true;
1858 /* Handle special case loops: while (i-- < 10) and while (10 < i++) by
1859 adjusting iv0, iv1 and code. */
1860 if (code != NE_EXPR
1861 && (tree_int_cst_sign_bit (iv0->step)
1862 || (!integer_zerop (iv1->step)
1863 && !tree_int_cst_sign_bit (iv1->step)))
1864 && !adjust_cond_for_loop_until_wrap (type, iv0, &code, iv1))
1865 return false;
1867 /* OK, now we know we have a senseful loop. Handle several cases, depending
1868 on what comparison operator is used. */
1869 bound_difference (loop, iv1->base, iv0->base, &bnds);
1871 if (dump_file && (dump_flags & TDF_DETAILS))
1873 fprintf (dump_file,
1874 "Analyzing # of iterations of loop %d\n", loop->num);
1876 fprintf (dump_file, " exit condition ");
1877 dump_affine_iv (dump_file, iv0);
1878 fprintf (dump_file, " %s ",
1879 code == NE_EXPR ? "!="
1880 : code == LT_EXPR ? "<"
1881 : "<=");
1882 dump_affine_iv (dump_file, iv1);
1883 fprintf (dump_file, "\n");
1885 fprintf (dump_file, " bounds on difference of bases: ");
1886 mpz_out_str (dump_file, 10, bnds.below);
1887 fprintf (dump_file, " ... ");
1888 mpz_out_str (dump_file, 10, bnds.up);
1889 fprintf (dump_file, "\n");
1892 switch (code)
1894 case NE_EXPR:
1895 gcc_assert (integer_zerop (iv1->step));
1896 ret = number_of_iterations_ne (loop, type, iv0, iv1->base, niter,
1897 exit_must_be_taken, &bnds);
1898 break;
1900 case LT_EXPR:
1901 ret = number_of_iterations_lt (loop, type, iv0, iv1, niter,
1902 exit_must_be_taken, &bnds);
1903 break;
1905 case LE_EXPR:
1906 ret = number_of_iterations_le (loop, type, iv0, iv1, niter,
1907 exit_must_be_taken, &bnds);
1908 break;
1910 default:
1911 gcc_unreachable ();
1914 mpz_clear (bnds.up);
1915 mpz_clear (bnds.below);
1917 if (dump_file && (dump_flags & TDF_DETAILS))
1919 if (ret)
1921 fprintf (dump_file, " result:\n");
1922 if (!integer_nonzerop (niter->assumptions))
1924 fprintf (dump_file, " under assumptions ");
1925 print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
1926 fprintf (dump_file, "\n");
1929 if (!integer_zerop (niter->may_be_zero))
1931 fprintf (dump_file, " zero if ");
1932 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
1933 fprintf (dump_file, "\n");
1936 fprintf (dump_file, " # of iterations ");
1937 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
1938 fprintf (dump_file, ", bounded by ");
1939 print_decu (niter->max, dump_file);
1940 fprintf (dump_file, "\n");
1942 else
1943 fprintf (dump_file, " failed\n\n");
1945 return ret;
1948 /* Substitute NEW_TREE for OLD in EXPR and fold the result.
1949 If VALUEIZE is non-NULL then OLD and NEW_TREE are ignored and instead
1950 all SSA names are replaced with the result of calling the VALUEIZE
1951 function with the SSA name as argument. */
1953 tree
1954 simplify_replace_tree (tree expr, tree old, tree new_tree,
1955 tree (*valueize) (tree, void*), void *context,
1956 bool do_fold)
1958 unsigned i, n;
1959 tree ret = NULL_TREE, e, se;
1961 if (!expr)
1962 return NULL_TREE;
1964 /* Do not bother to replace constants. */
1965 if (CONSTANT_CLASS_P (expr))
1966 return expr;
1968 if (valueize)
1970 if (TREE_CODE (expr) == SSA_NAME)
1972 new_tree = valueize (expr, context);
1973 if (new_tree != expr)
1974 return new_tree;
1977 else if (expr == old
1978 || operand_equal_p (expr, old, 0))
1979 return unshare_expr (new_tree);
1981 if (!EXPR_P (expr))
1982 return expr;
1984 n = TREE_OPERAND_LENGTH (expr);
1985 for (i = 0; i < n; i++)
1987 e = TREE_OPERAND (expr, i);
1988 se = simplify_replace_tree (e, old, new_tree, valueize, context, do_fold);
1989 if (e == se)
1990 continue;
1992 if (!ret)
1993 ret = copy_node (expr);
1995 TREE_OPERAND (ret, i) = se;
1998 return (ret ? (do_fold ? fold (ret) : ret) : expr);
2001 /* Expand definitions of ssa names in EXPR as long as they are simple
2002 enough, and return the new expression. If STOP is specified, stop
2003 expanding if EXPR equals to it. */
2005 static tree
2006 expand_simple_operations (tree expr, tree stop, hash_map<tree, tree> &cache)
2008 unsigned i, n;
2009 tree ret = NULL_TREE, e, ee, e1;
2010 enum tree_code code;
2011 gimple *stmt;
2013 if (expr == NULL_TREE)
2014 return expr;
2016 if (is_gimple_min_invariant (expr))
2017 return expr;
2019 code = TREE_CODE (expr);
2020 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code)))
2022 n = TREE_OPERAND_LENGTH (expr);
2023 for (i = 0; i < n; i++)
2025 e = TREE_OPERAND (expr, i);
2026 /* SCEV analysis feeds us with a proper expression
2027 graph matching the SSA graph. Avoid turning it
2028 into a tree here, thus handle tree sharing
2029 properly.
2030 ??? The SSA walk below still turns the SSA graph
2031 into a tree but until we find a testcase do not
2032 introduce additional tree sharing here. */
2033 bool existed_p;
2034 tree &cee = cache.get_or_insert (e, &existed_p);
2035 if (existed_p)
2036 ee = cee;
2037 else
2039 cee = e;
2040 ee = expand_simple_operations (e, stop, cache);
2041 if (ee != e)
2042 *cache.get (e) = ee;
2044 if (e == ee)
2045 continue;
2047 if (!ret)
2048 ret = copy_node (expr);
2050 TREE_OPERAND (ret, i) = ee;
2053 if (!ret)
2054 return expr;
2056 fold_defer_overflow_warnings ();
2057 ret = fold (ret);
2058 fold_undefer_and_ignore_overflow_warnings ();
2059 return ret;
2062 /* Stop if it's not ssa name or the one we don't want to expand. */
2063 if (TREE_CODE (expr) != SSA_NAME || expr == stop)
2064 return expr;
2066 stmt = SSA_NAME_DEF_STMT (expr);
2067 if (gimple_code (stmt) == GIMPLE_PHI)
2069 basic_block src, dest;
2071 if (gimple_phi_num_args (stmt) != 1)
2072 return expr;
2073 e = PHI_ARG_DEF (stmt, 0);
2075 /* Avoid propagating through loop exit phi nodes, which
2076 could break loop-closed SSA form restrictions. */
2077 dest = gimple_bb (stmt);
2078 src = single_pred (dest);
2079 if (TREE_CODE (e) == SSA_NAME
2080 && src->loop_father != dest->loop_father)
2081 return expr;
2083 return expand_simple_operations (e, stop, cache);
2085 if (gimple_code (stmt) != GIMPLE_ASSIGN)
2086 return expr;
2088 /* Avoid expanding to expressions that contain SSA names that need
2089 to take part in abnormal coalescing. */
2090 ssa_op_iter iter;
2091 FOR_EACH_SSA_TREE_OPERAND (e, stmt, iter, SSA_OP_USE)
2092 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e))
2093 return expr;
2095 e = gimple_assign_rhs1 (stmt);
2096 code = gimple_assign_rhs_code (stmt);
2097 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
2099 if (is_gimple_min_invariant (e))
2100 return e;
2102 if (code == SSA_NAME)
2103 return expand_simple_operations (e, stop, cache);
2104 else if (code == ADDR_EXPR)
2106 poly_int64 offset;
2107 tree base = get_addr_base_and_unit_offset (TREE_OPERAND (e, 0),
2108 &offset);
2109 if (base
2110 && TREE_CODE (base) == MEM_REF)
2112 ee = expand_simple_operations (TREE_OPERAND (base, 0), stop,
2113 cache);
2114 return fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (expr), ee,
2115 wide_int_to_tree (sizetype,
2116 mem_ref_offset (base)
2117 + offset));
2121 return expr;
2124 switch (code)
2126 CASE_CONVERT:
2127 /* Casts are simple. */
2128 ee = expand_simple_operations (e, stop, cache);
2129 return fold_build1 (code, TREE_TYPE (expr), ee);
2131 case PLUS_EXPR:
2132 case MINUS_EXPR:
2133 if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (expr))
2134 && TYPE_OVERFLOW_TRAPS (TREE_TYPE (expr)))
2135 return expr;
2136 /* Fallthru. */
2137 case POINTER_PLUS_EXPR:
2138 /* And increments and decrements by a constant are simple. */
2139 e1 = gimple_assign_rhs2 (stmt);
2140 if (!is_gimple_min_invariant (e1))
2141 return expr;
2143 ee = expand_simple_operations (e, stop, cache);
2144 return fold_build2 (code, TREE_TYPE (expr), ee, e1);
2146 default:
2147 return expr;
2151 tree
2152 expand_simple_operations (tree expr, tree stop)
2154 hash_map<tree, tree> cache;
2155 return expand_simple_operations (expr, stop, cache);
2158 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2159 expression (or EXPR unchanged, if no simplification was possible). */
2161 static tree
2162 tree_simplify_using_condition_1 (tree cond, tree expr)
2164 bool changed;
2165 tree e, e0, e1, e2, notcond;
2166 enum tree_code code = TREE_CODE (expr);
2168 if (code == INTEGER_CST)
2169 return expr;
2171 if (code == TRUTH_OR_EXPR
2172 || code == TRUTH_AND_EXPR
2173 || code == COND_EXPR)
2175 changed = false;
2177 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0));
2178 if (TREE_OPERAND (expr, 0) != e0)
2179 changed = true;
2181 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1));
2182 if (TREE_OPERAND (expr, 1) != e1)
2183 changed = true;
2185 if (code == COND_EXPR)
2187 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2));
2188 if (TREE_OPERAND (expr, 2) != e2)
2189 changed = true;
2191 else
2192 e2 = NULL_TREE;
2194 if (changed)
2196 if (code == COND_EXPR)
2197 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2198 else
2199 expr = fold_build2 (code, boolean_type_node, e0, e1);
2202 return expr;
2205 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
2206 propagation, and vice versa. Fold does not handle this, since it is
2207 considered too expensive. */
2208 if (TREE_CODE (cond) == EQ_EXPR)
2210 e0 = TREE_OPERAND (cond, 0);
2211 e1 = TREE_OPERAND (cond, 1);
2213 /* We know that e0 == e1. Check whether we cannot simplify expr
2214 using this fact. */
2215 e = simplify_replace_tree (expr, e0, e1);
2216 if (integer_zerop (e) || integer_nonzerop (e))
2217 return e;
2219 e = simplify_replace_tree (expr, e1, e0);
2220 if (integer_zerop (e) || integer_nonzerop (e))
2221 return e;
2223 if (TREE_CODE (expr) == EQ_EXPR)
2225 e0 = TREE_OPERAND (expr, 0);
2226 e1 = TREE_OPERAND (expr, 1);
2228 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
2229 e = simplify_replace_tree (cond, e0, e1);
2230 if (integer_zerop (e))
2231 return e;
2232 e = simplify_replace_tree (cond, e1, e0);
2233 if (integer_zerop (e))
2234 return e;
2236 if (TREE_CODE (expr) == NE_EXPR)
2238 e0 = TREE_OPERAND (expr, 0);
2239 e1 = TREE_OPERAND (expr, 1);
2241 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
2242 e = simplify_replace_tree (cond, e0, e1);
2243 if (integer_zerop (e))
2244 return boolean_true_node;
2245 e = simplify_replace_tree (cond, e1, e0);
2246 if (integer_zerop (e))
2247 return boolean_true_node;
2250 /* Check whether COND ==> EXPR. */
2251 notcond = invert_truthvalue (cond);
2252 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, expr);
2253 if (e && integer_nonzerop (e))
2254 return e;
2256 /* Check whether COND ==> not EXPR. */
2257 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, expr);
2258 if (e && integer_zerop (e))
2259 return e;
2261 return expr;
2264 /* Tries to simplify EXPR using the condition COND. Returns the simplified
2265 expression (or EXPR unchanged, if no simplification was possible).
2266 Wrapper around tree_simplify_using_condition_1 that ensures that chains
2267 of simple operations in definitions of ssa names in COND are expanded,
2268 so that things like casts or incrementing the value of the bound before
2269 the loop do not cause us to fail. */
2271 static tree
2272 tree_simplify_using_condition (tree cond, tree expr)
2274 cond = expand_simple_operations (cond);
2276 return tree_simplify_using_condition_1 (cond, expr);
2279 /* Tries to simplify EXPR using the conditions on entry to LOOP.
2280 Returns the simplified expression (or EXPR unchanged, if no
2281 simplification was possible). */
2283 tree
2284 simplify_using_initial_conditions (class loop *loop, tree expr)
2286 edge e;
2287 basic_block bb;
2288 gimple *stmt;
2289 tree cond, expanded, backup;
2290 int cnt = 0;
2292 if (TREE_CODE (expr) == INTEGER_CST)
2293 return expr;
2295 backup = expanded = expand_simple_operations (expr);
2297 /* Limit walking the dominators to avoid quadraticness in
2298 the number of BBs times the number of loops in degenerate
2299 cases. */
2300 for (bb = loop->header;
2301 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) && cnt < MAX_DOMINATORS_TO_WALK;
2302 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
2304 if (!single_pred_p (bb))
2305 continue;
2306 e = single_pred_edge (bb);
2308 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
2309 continue;
2311 stmt = last_stmt (e->src);
2312 cond = fold_build2 (gimple_cond_code (stmt),
2313 boolean_type_node,
2314 gimple_cond_lhs (stmt),
2315 gimple_cond_rhs (stmt));
2316 if (e->flags & EDGE_FALSE_VALUE)
2317 cond = invert_truthvalue (cond);
2318 expanded = tree_simplify_using_condition (cond, expanded);
2319 /* Break if EXPR is simplified to const values. */
2320 if (expanded
2321 && (integer_zerop (expanded) || integer_nonzerop (expanded)))
2322 return expanded;
2324 ++cnt;
2327 /* Return the original expression if no simplification is done. */
2328 return operand_equal_p (backup, expanded, 0) ? expr : expanded;
2331 /* Tries to simplify EXPR using the evolutions of the loop invariants
2332 in the superloops of LOOP. Returns the simplified expression
2333 (or EXPR unchanged, if no simplification was possible). */
2335 static tree
2336 simplify_using_outer_evolutions (class loop *loop, tree expr)
2338 enum tree_code code = TREE_CODE (expr);
2339 bool changed;
2340 tree e, e0, e1, e2;
2342 if (is_gimple_min_invariant (expr))
2343 return expr;
2345 if (code == TRUTH_OR_EXPR
2346 || code == TRUTH_AND_EXPR
2347 || code == COND_EXPR)
2349 changed = false;
2351 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0));
2352 if (TREE_OPERAND (expr, 0) != e0)
2353 changed = true;
2355 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1));
2356 if (TREE_OPERAND (expr, 1) != e1)
2357 changed = true;
2359 if (code == COND_EXPR)
2361 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2));
2362 if (TREE_OPERAND (expr, 2) != e2)
2363 changed = true;
2365 else
2366 e2 = NULL_TREE;
2368 if (changed)
2370 if (code == COND_EXPR)
2371 expr = fold_build3 (code, boolean_type_node, e0, e1, e2);
2372 else
2373 expr = fold_build2 (code, boolean_type_node, e0, e1);
2376 return expr;
2379 e = instantiate_parameters (loop, expr);
2380 if (is_gimple_min_invariant (e))
2381 return e;
2383 return expr;
2386 /* Returns true if EXIT is the only possible exit from LOOP. */
2388 bool
2389 loop_only_exit_p (const class loop *loop, basic_block *body, const_edge exit)
2391 gimple_stmt_iterator bsi;
2392 unsigned i;
2394 if (exit != single_exit (loop))
2395 return false;
2397 for (i = 0; i < loop->num_nodes; i++)
2398 for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
2399 if (stmt_can_terminate_bb_p (gsi_stmt (bsi)))
2400 return false;
2402 return true;
2405 /* Stores description of number of iterations of LOOP derived from
2406 EXIT (an exit edge of the LOOP) in NITER. Returns true if some useful
2407 information could be derived (and fields of NITER have meaning described
2408 in comments at class tree_niter_desc declaration), false otherwise.
2409 When EVERY_ITERATION is true, only tests that are known to be executed
2410 every iteration are considered (i.e. only test that alone bounds the loop).
2411 If AT_STMT is not NULL, this function stores LOOP's condition statement in
2412 it when returning true. */
2414 bool
2415 number_of_iterations_exit_assumptions (class loop *loop, edge exit,
2416 class tree_niter_desc *niter,
2417 gcond **at_stmt, bool every_iteration,
2418 basic_block *body)
2420 gimple *last;
2421 gcond *stmt;
2422 tree type;
2423 tree op0, op1;
2424 enum tree_code code;
2425 affine_iv iv0, iv1;
2426 bool safe;
2428 /* The condition at a fake exit (if it exists) does not control its
2429 execution. */
2430 if (exit->flags & EDGE_FAKE)
2431 return false;
2433 /* Nothing to analyze if the loop is known to be infinite. */
2434 if (loop_constraint_set_p (loop, LOOP_C_INFINITE))
2435 return false;
2437 safe = dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src);
2439 if (every_iteration && !safe)
2440 return false;
2442 niter->assumptions = boolean_false_node;
2443 niter->control.base = NULL_TREE;
2444 niter->control.step = NULL_TREE;
2445 niter->control.no_overflow = false;
2446 last = last_stmt (exit->src);
2447 if (!last)
2448 return false;
2449 stmt = dyn_cast <gcond *> (last);
2450 if (!stmt)
2451 return false;
2453 /* We want the condition for staying inside loop. */
2454 code = gimple_cond_code (stmt);
2455 if (exit->flags & EDGE_TRUE_VALUE)
2456 code = invert_tree_comparison (code, false);
2458 switch (code)
2460 case GT_EXPR:
2461 case GE_EXPR:
2462 case LT_EXPR:
2463 case LE_EXPR:
2464 case NE_EXPR:
2465 break;
2467 default:
2468 return false;
2471 op0 = gimple_cond_lhs (stmt);
2472 op1 = gimple_cond_rhs (stmt);
2473 type = TREE_TYPE (op0);
2475 if (TREE_CODE (type) != INTEGER_TYPE
2476 && !POINTER_TYPE_P (type))
2477 return false;
2479 tree iv0_niters = NULL_TREE;
2480 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2481 op0, &iv0, safe ? &iv0_niters : NULL, false))
2482 return number_of_iterations_popcount (loop, exit, code, niter);
2483 tree iv1_niters = NULL_TREE;
2484 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
2485 op1, &iv1, safe ? &iv1_niters : NULL, false))
2486 return false;
2487 /* Give up on complicated case. */
2488 if (iv0_niters && iv1_niters)
2489 return false;
2491 /* We don't want to see undefined signed overflow warnings while
2492 computing the number of iterations. */
2493 fold_defer_overflow_warnings ();
2495 iv0.base = expand_simple_operations (iv0.base);
2496 iv1.base = expand_simple_operations (iv1.base);
2497 bool body_from_caller = true;
2498 if (!body)
2500 body = get_loop_body (loop);
2501 body_from_caller = false;
2503 bool only_exit_p = loop_only_exit_p (loop, body, exit);
2504 if (!body_from_caller)
2505 free (body);
2506 if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
2507 only_exit_p, safe))
2509 fold_undefer_and_ignore_overflow_warnings ();
2510 return false;
2513 /* Incorporate additional assumption implied by control iv. */
2514 tree iv_niters = iv0_niters ? iv0_niters : iv1_niters;
2515 if (iv_niters)
2517 tree assumption = fold_build2 (LE_EXPR, boolean_type_node, niter->niter,
2518 fold_convert (TREE_TYPE (niter->niter),
2519 iv_niters));
2521 if (!integer_nonzerop (assumption))
2522 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2523 niter->assumptions, assumption);
2525 /* Refine upper bound if possible. */
2526 if (TREE_CODE (iv_niters) == INTEGER_CST
2527 && niter->max > wi::to_widest (iv_niters))
2528 niter->max = wi::to_widest (iv_niters);
2531 /* There is no assumptions if the loop is known to be finite. */
2532 if (!integer_zerop (niter->assumptions)
2533 && loop_constraint_set_p (loop, LOOP_C_FINITE))
2534 niter->assumptions = boolean_true_node;
2536 if (optimize >= 3)
2538 niter->assumptions = simplify_using_outer_evolutions (loop,
2539 niter->assumptions);
2540 niter->may_be_zero = simplify_using_outer_evolutions (loop,
2541 niter->may_be_zero);
2542 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
2545 niter->assumptions
2546 = simplify_using_initial_conditions (loop,
2547 niter->assumptions);
2548 niter->may_be_zero
2549 = simplify_using_initial_conditions (loop,
2550 niter->may_be_zero);
2552 fold_undefer_and_ignore_overflow_warnings ();
2554 /* If NITER has simplified into a constant, update MAX. */
2555 if (TREE_CODE (niter->niter) == INTEGER_CST)
2556 niter->max = wi::to_widest (niter->niter);
2558 if (at_stmt)
2559 *at_stmt = stmt;
2561 return (!integer_zerop (niter->assumptions));
2565 /* Utility function to check if OP is defined by a stmt
2566 that is a val - 1. */
2568 static bool
2569 ssa_defined_by_minus_one_stmt_p (tree op, tree val)
2571 gimple *stmt;
2572 return (TREE_CODE (op) == SSA_NAME
2573 && (stmt = SSA_NAME_DEF_STMT (op))
2574 && is_gimple_assign (stmt)
2575 && (gimple_assign_rhs_code (stmt) == PLUS_EXPR)
2576 && val == gimple_assign_rhs1 (stmt)
2577 && integer_minus_onep (gimple_assign_rhs2 (stmt)));
2581 /* See if LOOP is a popcout implementation, determine NITER for the loop
2583 We match:
2584 <bb 2>
2585 goto <bb 4>
2587 <bb 3>
2588 _1 = b_11 + -1
2589 b_6 = _1 & b_11
2591 <bb 4>
2592 b_11 = PHI <b_5(D)(2), b_6(3)>
2594 exit block
2595 if (b_11 != 0)
2596 goto <bb 3>
2597 else
2598 goto <bb 5>
2600 OR we match copy-header version:
2601 if (b_5 != 0)
2602 goto <bb 3>
2603 else
2604 goto <bb 4>
2606 <bb 3>
2607 b_11 = PHI <b_5(2), b_6(3)>
2608 _1 = b_11 + -1
2609 b_6 = _1 & b_11
2611 exit block
2612 if (b_6 != 0)
2613 goto <bb 3>
2614 else
2615 goto <bb 4>
2617 If popcount pattern, update NITER accordingly.
2618 i.e., set NITER to __builtin_popcount (b)
2619 return true if we did, false otherwise.
2623 static bool
2624 number_of_iterations_popcount (loop_p loop, edge exit,
2625 enum tree_code code,
2626 class tree_niter_desc *niter)
2628 bool adjust = true;
2629 tree iter;
2630 HOST_WIDE_INT max;
2631 adjust = true;
2632 tree fn = NULL_TREE;
2634 /* Check loop terminating branch is like
2635 if (b != 0). */
2636 gimple *stmt = last_stmt (exit->src);
2637 if (!stmt
2638 || gimple_code (stmt) != GIMPLE_COND
2639 || code != NE_EXPR
2640 || !integer_zerop (gimple_cond_rhs (stmt))
2641 || TREE_CODE (gimple_cond_lhs (stmt)) != SSA_NAME)
2642 return false;
2644 gimple *and_stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (stmt));
2646 /* Depending on copy-header is performed, feeding PHI stmts might be in
2647 the loop header or loop latch, handle this. */
2648 if (gimple_code (and_stmt) == GIMPLE_PHI
2649 && gimple_bb (and_stmt) == loop->header
2650 && gimple_phi_num_args (and_stmt) == 2
2651 && (TREE_CODE (gimple_phi_arg_def (and_stmt,
2652 loop_latch_edge (loop)->dest_idx))
2653 == SSA_NAME))
2655 /* SSA used in exit condition is defined by PHI stmt
2656 b_11 = PHI <b_5(D)(2), b_6(3)>
2657 from the PHI stmt, get the and_stmt
2658 b_6 = _1 & b_11. */
2659 tree t = gimple_phi_arg_def (and_stmt, loop_latch_edge (loop)->dest_idx);
2660 and_stmt = SSA_NAME_DEF_STMT (t);
2661 adjust = false;
2664 /* Make sure it is indeed an and stmt (b_6 = _1 & b_11). */
2665 if (!is_gimple_assign (and_stmt)
2666 || gimple_assign_rhs_code (and_stmt) != BIT_AND_EXPR)
2667 return false;
2669 tree b_11 = gimple_assign_rhs1 (and_stmt);
2670 tree _1 = gimple_assign_rhs2 (and_stmt);
2672 /* Check that _1 is defined by _b11 + -1 (_1 = b_11 + -1).
2673 Also make sure that b_11 is the same in and_stmt and _1 defining stmt.
2674 Also canonicalize if _1 and _b11 are revrsed. */
2675 if (ssa_defined_by_minus_one_stmt_p (b_11, _1))
2676 std::swap (b_11, _1);
2677 else if (ssa_defined_by_minus_one_stmt_p (_1, b_11))
2679 else
2680 return false;
2681 /* Check the recurrence:
2682 ... = PHI <b_5(2), b_6(3)>. */
2683 gimple *phi = SSA_NAME_DEF_STMT (b_11);
2684 if (gimple_code (phi) != GIMPLE_PHI
2685 || (gimple_bb (phi) != loop_latch_edge (loop)->dest)
2686 || (gimple_assign_lhs (and_stmt)
2687 != gimple_phi_arg_def (phi, loop_latch_edge (loop)->dest_idx)))
2688 return false;
2690 /* We found a match. Get the corresponding popcount builtin. */
2691 tree src = gimple_phi_arg_def (phi, loop_preheader_edge (loop)->dest_idx);
2692 if (TYPE_PRECISION (TREE_TYPE (src)) <= TYPE_PRECISION (integer_type_node))
2693 fn = builtin_decl_implicit (BUILT_IN_POPCOUNT);
2694 else if (TYPE_PRECISION (TREE_TYPE (src))
2695 == TYPE_PRECISION (long_integer_type_node))
2696 fn = builtin_decl_implicit (BUILT_IN_POPCOUNTL);
2697 else if (TYPE_PRECISION (TREE_TYPE (src))
2698 == TYPE_PRECISION (long_long_integer_type_node)
2699 || (TYPE_PRECISION (TREE_TYPE (src))
2700 == 2 * TYPE_PRECISION (long_long_integer_type_node)))
2701 fn = builtin_decl_implicit (BUILT_IN_POPCOUNTLL);
2703 if (!fn)
2704 return false;
2706 /* Update NITER params accordingly */
2707 tree utype = unsigned_type_for (TREE_TYPE (src));
2708 src = fold_convert (utype, src);
2709 if (TYPE_PRECISION (TREE_TYPE (src)) < TYPE_PRECISION (integer_type_node))
2710 src = fold_convert (unsigned_type_node, src);
2711 tree call;
2712 if (TYPE_PRECISION (TREE_TYPE (src))
2713 == 2 * TYPE_PRECISION (long_long_integer_type_node))
2715 int prec = TYPE_PRECISION (long_long_integer_type_node);
2716 tree src1 = fold_convert (long_long_unsigned_type_node,
2717 fold_build2 (RSHIFT_EXPR, TREE_TYPE (src),
2718 unshare_expr (src),
2719 build_int_cst (integer_type_node,
2720 prec)));
2721 tree src2 = fold_convert (long_long_unsigned_type_node, src);
2722 call = build_call_expr (fn, 1, src1);
2723 call = fold_build2 (PLUS_EXPR, TREE_TYPE (call), call,
2724 build_call_expr (fn, 1, src2));
2725 call = fold_convert (utype, call);
2727 else
2728 call = fold_convert (utype, build_call_expr (fn, 1, src));
2729 if (adjust)
2730 iter = fold_build2 (MINUS_EXPR, utype, call, build_int_cst (utype, 1));
2731 else
2732 iter = call;
2734 if (TREE_CODE (call) == INTEGER_CST)
2735 max = tree_to_uhwi (call);
2736 else
2737 max = TYPE_PRECISION (TREE_TYPE (src));
2738 if (adjust)
2739 max = max - 1;
2741 niter->niter = iter;
2742 niter->assumptions = boolean_true_node;
2744 if (adjust)
2746 tree may_be_zero = fold_build2 (EQ_EXPR, boolean_type_node, src,
2747 build_zero_cst (TREE_TYPE (src)));
2748 niter->may_be_zero
2749 = simplify_using_initial_conditions (loop, may_be_zero);
2751 else
2752 niter->may_be_zero = boolean_false_node;
2754 niter->max = max;
2755 niter->bound = NULL_TREE;
2756 niter->cmp = ERROR_MARK;
2757 return true;
2761 /* Like number_of_iterations_exit_assumptions, but return TRUE only if
2762 the niter information holds unconditionally. */
2764 bool
2765 number_of_iterations_exit (class loop *loop, edge exit,
2766 class tree_niter_desc *niter,
2767 bool warn, bool every_iteration,
2768 basic_block *body)
2770 gcond *stmt;
2771 if (!number_of_iterations_exit_assumptions (loop, exit, niter,
2772 &stmt, every_iteration, body))
2773 return false;
2775 if (integer_nonzerop (niter->assumptions))
2776 return true;
2778 if (warn && dump_enabled_p ())
2779 dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmt,
2780 "missed loop optimization: niters analysis ends up "
2781 "with assumptions.\n");
2783 return false;
2786 /* Try to determine the number of iterations of LOOP. If we succeed,
2787 expression giving number of iterations is returned and *EXIT is
2788 set to the edge from that the information is obtained. Otherwise
2789 chrec_dont_know is returned. */
2791 tree
2792 find_loop_niter (class loop *loop, edge *exit)
2794 unsigned i;
2795 auto_vec<edge> exits = get_loop_exit_edges (loop);
2796 edge ex;
2797 tree niter = NULL_TREE, aniter;
2798 class tree_niter_desc desc;
2800 *exit = NULL;
2801 FOR_EACH_VEC_ELT (exits, i, ex)
2803 if (!number_of_iterations_exit (loop, ex, &desc, false))
2804 continue;
2806 if (integer_nonzerop (desc.may_be_zero))
2808 /* We exit in the first iteration through this exit.
2809 We won't find anything better. */
2810 niter = build_int_cst (unsigned_type_node, 0);
2811 *exit = ex;
2812 break;
2815 if (!integer_zerop (desc.may_be_zero))
2816 continue;
2818 aniter = desc.niter;
2820 if (!niter)
2822 /* Nothing recorded yet. */
2823 niter = aniter;
2824 *exit = ex;
2825 continue;
2828 /* Prefer constants, the lower the better. */
2829 if (TREE_CODE (aniter) != INTEGER_CST)
2830 continue;
2832 if (TREE_CODE (niter) != INTEGER_CST)
2834 niter = aniter;
2835 *exit = ex;
2836 continue;
2839 if (tree_int_cst_lt (aniter, niter))
2841 niter = aniter;
2842 *exit = ex;
2843 continue;
2847 return niter ? niter : chrec_dont_know;
2850 /* Return true if loop is known to have bounded number of iterations. */
2852 bool
2853 finite_loop_p (class loop *loop)
2855 widest_int nit;
2856 int flags;
2858 flags = flags_from_decl_or_type (current_function_decl);
2859 if ((flags & (ECF_CONST|ECF_PURE)) && !(flags & ECF_LOOPING_CONST_OR_PURE))
2861 if (dump_file && (dump_flags & TDF_DETAILS))
2862 fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
2863 loop->num);
2864 return true;
2867 if (loop->any_upper_bound
2868 || max_loop_iterations (loop, &nit))
2870 if (dump_file && (dump_flags & TDF_DETAILS))
2871 fprintf (dump_file, "Found loop %i to be finite: upper bound found.\n",
2872 loop->num);
2873 return true;
2876 if (loop->finite_p)
2878 unsigned i;
2879 auto_vec<edge> exits = get_loop_exit_edges (loop);
2880 edge ex;
2882 /* If the loop has a normal exit, we can assume it will terminate. */
2883 FOR_EACH_VEC_ELT (exits, i, ex)
2884 if (!(ex->flags & (EDGE_EH | EDGE_ABNORMAL | EDGE_FAKE)))
2886 if (dump_file)
2887 fprintf (dump_file, "Assume loop %i to be finite: it has an exit "
2888 "and -ffinite-loops is on.\n", loop->num);
2889 return true;
2893 return false;
2898 Analysis of a number of iterations of a loop by a brute-force evaluation.
2902 /* Bound on the number of iterations we try to evaluate. */
2904 #define MAX_ITERATIONS_TO_TRACK \
2905 ((unsigned) param_max_iterations_to_track)
2907 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
2908 result by a chain of operations such that all but exactly one of their
2909 operands are constants. */
2911 static gphi *
2912 chain_of_csts_start (class loop *loop, tree x)
2914 gimple *stmt = SSA_NAME_DEF_STMT (x);
2915 tree use;
2916 basic_block bb = gimple_bb (stmt);
2917 enum tree_code code;
2919 if (!bb
2920 || !flow_bb_inside_loop_p (loop, bb))
2921 return NULL;
2923 if (gimple_code (stmt) == GIMPLE_PHI)
2925 if (bb == loop->header)
2926 return as_a <gphi *> (stmt);
2928 return NULL;
2931 if (gimple_code (stmt) != GIMPLE_ASSIGN
2932 || gimple_assign_rhs_class (stmt) == GIMPLE_TERNARY_RHS)
2933 return NULL;
2935 code = gimple_assign_rhs_code (stmt);
2936 if (gimple_references_memory_p (stmt)
2937 || TREE_CODE_CLASS (code) == tcc_reference
2938 || (code == ADDR_EXPR
2939 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
2940 return NULL;
2942 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE);
2943 if (use == NULL_TREE)
2944 return NULL;
2946 return chain_of_csts_start (loop, use);
2949 /* Determines whether the expression X is derived from a result of a phi node
2950 in header of LOOP such that
2952 * the derivation of X consists only from operations with constants
2953 * the initial value of the phi node is constant
2954 * the value of the phi node in the next iteration can be derived from the
2955 value in the current iteration by a chain of operations with constants,
2956 or is also a constant
2958 If such phi node exists, it is returned, otherwise NULL is returned. */
2960 static gphi *
2961 get_base_for (class loop *loop, tree x)
2963 gphi *phi;
2964 tree init, next;
2966 if (is_gimple_min_invariant (x))
2967 return NULL;
2969 phi = chain_of_csts_start (loop, x);
2970 if (!phi)
2971 return NULL;
2973 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
2974 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
2976 if (!is_gimple_min_invariant (init))
2977 return NULL;
2979 if (TREE_CODE (next) == SSA_NAME
2980 && chain_of_csts_start (loop, next) != phi)
2981 return NULL;
2983 return phi;
2986 /* Given an expression X, then
2988 * if X is NULL_TREE, we return the constant BASE.
2989 * if X is a constant, we return the constant X.
2990 * otherwise X is a SSA name, whose value in the considered loop is derived
2991 by a chain of operations with constant from a result of a phi node in
2992 the header of the loop. Then we return value of X when the value of the
2993 result of this phi node is given by the constant BASE. */
2995 static tree
2996 get_val_for (tree x, tree base)
2998 gimple *stmt;
3000 gcc_checking_assert (is_gimple_min_invariant (base));
3002 if (!x)
3003 return base;
3004 else if (is_gimple_min_invariant (x))
3005 return x;
3007 stmt = SSA_NAME_DEF_STMT (x);
3008 if (gimple_code (stmt) == GIMPLE_PHI)
3009 return base;
3011 gcc_checking_assert (is_gimple_assign (stmt));
3013 /* STMT must be either an assignment of a single SSA name or an
3014 expression involving an SSA name and a constant. Try to fold that
3015 expression using the value for the SSA name. */
3016 if (gimple_assign_ssa_name_copy_p (stmt))
3017 return get_val_for (gimple_assign_rhs1 (stmt), base);
3018 else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
3019 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
3020 return fold_build1 (gimple_assign_rhs_code (stmt),
3021 TREE_TYPE (gimple_assign_lhs (stmt)),
3022 get_val_for (gimple_assign_rhs1 (stmt), base));
3023 else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
3025 tree rhs1 = gimple_assign_rhs1 (stmt);
3026 tree rhs2 = gimple_assign_rhs2 (stmt);
3027 if (TREE_CODE (rhs1) == SSA_NAME)
3028 rhs1 = get_val_for (rhs1, base);
3029 else if (TREE_CODE (rhs2) == SSA_NAME)
3030 rhs2 = get_val_for (rhs2, base);
3031 else
3032 gcc_unreachable ();
3033 return fold_build2 (gimple_assign_rhs_code (stmt),
3034 TREE_TYPE (gimple_assign_lhs (stmt)), rhs1, rhs2);
3036 else
3037 gcc_unreachable ();
3041 /* Tries to count the number of iterations of LOOP till it exits by EXIT
3042 by brute force -- i.e. by determining the value of the operands of the
3043 condition at EXIT in first few iterations of the loop (assuming that
3044 these values are constant) and determining the first one in that the
3045 condition is not satisfied. Returns the constant giving the number
3046 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
3048 tree
3049 loop_niter_by_eval (class loop *loop, edge exit)
3051 tree acnd;
3052 tree op[2], val[2], next[2], aval[2];
3053 gphi *phi;
3054 gimple *cond;
3055 unsigned i, j;
3056 enum tree_code cmp;
3058 cond = last_stmt (exit->src);
3059 if (!cond || gimple_code (cond) != GIMPLE_COND)
3060 return chrec_dont_know;
3062 cmp = gimple_cond_code (cond);
3063 if (exit->flags & EDGE_TRUE_VALUE)
3064 cmp = invert_tree_comparison (cmp, false);
3066 switch (cmp)
3068 case EQ_EXPR:
3069 case NE_EXPR:
3070 case GT_EXPR:
3071 case GE_EXPR:
3072 case LT_EXPR:
3073 case LE_EXPR:
3074 op[0] = gimple_cond_lhs (cond);
3075 op[1] = gimple_cond_rhs (cond);
3076 break;
3078 default:
3079 return chrec_dont_know;
3082 for (j = 0; j < 2; j++)
3084 if (is_gimple_min_invariant (op[j]))
3086 val[j] = op[j];
3087 next[j] = NULL_TREE;
3088 op[j] = NULL_TREE;
3090 else
3092 phi = get_base_for (loop, op[j]);
3093 if (!phi)
3094 return chrec_dont_know;
3095 val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
3096 next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
3100 /* Don't issue signed overflow warnings. */
3101 fold_defer_overflow_warnings ();
3103 for (i = 0; i < MAX_ITERATIONS_TO_TRACK; i++)
3105 for (j = 0; j < 2; j++)
3106 aval[j] = get_val_for (op[j], val[j]);
3108 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1]);
3109 if (acnd && integer_zerop (acnd))
3111 fold_undefer_and_ignore_overflow_warnings ();
3112 if (dump_file && (dump_flags & TDF_DETAILS))
3113 fprintf (dump_file,
3114 "Proved that loop %d iterates %d times using brute force.\n",
3115 loop->num, i);
3116 return build_int_cst (unsigned_type_node, i);
3119 for (j = 0; j < 2; j++)
3121 aval[j] = val[j];
3122 val[j] = get_val_for (next[j], val[j]);
3123 if (!is_gimple_min_invariant (val[j]))
3125 fold_undefer_and_ignore_overflow_warnings ();
3126 return chrec_dont_know;
3130 /* If the next iteration would use the same base values
3131 as the current one, there is no point looping further,
3132 all following iterations will be the same as this one. */
3133 if (val[0] == aval[0] && val[1] == aval[1])
3134 break;
3137 fold_undefer_and_ignore_overflow_warnings ();
3139 return chrec_dont_know;
3142 /* Finds the exit of the LOOP by that the loop exits after a constant
3143 number of iterations and stores the exit edge to *EXIT. The constant
3144 giving the number of iterations of LOOP is returned. The number of
3145 iterations is determined using loop_niter_by_eval (i.e. by brute force
3146 evaluation). If we are unable to find the exit for that loop_niter_by_eval
3147 determines the number of iterations, chrec_dont_know is returned. */
3149 tree
3150 find_loop_niter_by_eval (class loop *loop, edge *exit)
3152 unsigned i;
3153 auto_vec<edge> exits = get_loop_exit_edges (loop);
3154 edge ex;
3155 tree niter = NULL_TREE, aniter;
3157 *exit = NULL;
3159 /* Loops with multiple exits are expensive to handle and less important. */
3160 if (!flag_expensive_optimizations
3161 && exits.length () > 1)
3162 return chrec_dont_know;
3164 FOR_EACH_VEC_ELT (exits, i, ex)
3166 if (!just_once_each_iteration_p (loop, ex->src))
3167 continue;
3169 aniter = loop_niter_by_eval (loop, ex);
3170 if (chrec_contains_undetermined (aniter))
3171 continue;
3173 if (niter
3174 && !tree_int_cst_lt (aniter, niter))
3175 continue;
3177 niter = aniter;
3178 *exit = ex;
3181 return niter ? niter : chrec_dont_know;
3186 Analysis of upper bounds on number of iterations of a loop.
3190 static widest_int derive_constant_upper_bound_ops (tree, tree,
3191 enum tree_code, tree);
3193 /* Returns a constant upper bound on the value of the right-hand side of
3194 an assignment statement STMT. */
3196 static widest_int
3197 derive_constant_upper_bound_assign (gimple *stmt)
3199 enum tree_code code = gimple_assign_rhs_code (stmt);
3200 tree op0 = gimple_assign_rhs1 (stmt);
3201 tree op1 = gimple_assign_rhs2 (stmt);
3203 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt)),
3204 op0, code, op1);
3207 /* Returns a constant upper bound on the value of expression VAL. VAL
3208 is considered to be unsigned. If its type is signed, its value must
3209 be nonnegative. */
3211 static widest_int
3212 derive_constant_upper_bound (tree val)
3214 enum tree_code code;
3215 tree op0, op1, op2;
3217 extract_ops_from_tree (val, &code, &op0, &op1, &op2);
3218 return derive_constant_upper_bound_ops (TREE_TYPE (val), op0, code, op1);
3221 /* Returns a constant upper bound on the value of expression OP0 CODE OP1,
3222 whose type is TYPE. The expression is considered to be unsigned. If
3223 its type is signed, its value must be nonnegative. */
3225 static widest_int
3226 derive_constant_upper_bound_ops (tree type, tree op0,
3227 enum tree_code code, tree op1)
3229 tree subtype, maxt;
3230 widest_int bnd, max, cst;
3231 gimple *stmt;
3233 if (INTEGRAL_TYPE_P (type))
3234 maxt = TYPE_MAX_VALUE (type);
3235 else
3236 maxt = upper_bound_in_type (type, type);
3238 max = wi::to_widest (maxt);
3240 switch (code)
3242 case INTEGER_CST:
3243 return wi::to_widest (op0);
3245 CASE_CONVERT:
3246 subtype = TREE_TYPE (op0);
3247 if (!TYPE_UNSIGNED (subtype)
3248 /* If TYPE is also signed, the fact that VAL is nonnegative implies
3249 that OP0 is nonnegative. */
3250 && TYPE_UNSIGNED (type)
3251 && !tree_expr_nonnegative_p (op0))
3253 /* If we cannot prove that the casted expression is nonnegative,
3254 we cannot establish more useful upper bound than the precision
3255 of the type gives us. */
3256 return max;
3259 /* We now know that op0 is an nonnegative value. Try deriving an upper
3260 bound for it. */
3261 bnd = derive_constant_upper_bound (op0);
3263 /* If the bound does not fit in TYPE, max. value of TYPE could be
3264 attained. */
3265 if (wi::ltu_p (max, bnd))
3266 return max;
3268 return bnd;
3270 case PLUS_EXPR:
3271 case POINTER_PLUS_EXPR:
3272 case MINUS_EXPR:
3273 if (TREE_CODE (op1) != INTEGER_CST
3274 || !tree_expr_nonnegative_p (op0))
3275 return max;
3277 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
3278 choose the most logical way how to treat this constant regardless
3279 of the signedness of the type. */
3280 cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type));
3281 if (code != MINUS_EXPR)
3282 cst = -cst;
3284 bnd = derive_constant_upper_bound (op0);
3286 if (wi::neg_p (cst))
3288 cst = -cst;
3289 /* Avoid CST == 0x80000... */
3290 if (wi::neg_p (cst))
3291 return max;
3293 /* OP0 + CST. We need to check that
3294 BND <= MAX (type) - CST. */
3296 widest_int mmax = max - cst;
3297 if (wi::leu_p (bnd, mmax))
3298 return max;
3300 return bnd + cst;
3302 else
3304 /* OP0 - CST, where CST >= 0.
3306 If TYPE is signed, we have already verified that OP0 >= 0, and we
3307 know that the result is nonnegative. This implies that
3308 VAL <= BND - CST.
3310 If TYPE is unsigned, we must additionally know that OP0 >= CST,
3311 otherwise the operation underflows.
3314 /* This should only happen if the type is unsigned; however, for
3315 buggy programs that use overflowing signed arithmetics even with
3316 -fno-wrapv, this condition may also be true for signed values. */
3317 if (wi::ltu_p (bnd, cst))
3318 return max;
3320 if (TYPE_UNSIGNED (type))
3322 tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
3323 wide_int_to_tree (type, cst));
3324 if (!tem || integer_nonzerop (tem))
3325 return max;
3328 bnd -= cst;
3331 return bnd;
3333 case FLOOR_DIV_EXPR:
3334 case EXACT_DIV_EXPR:
3335 if (TREE_CODE (op1) != INTEGER_CST
3336 || tree_int_cst_sign_bit (op1))
3337 return max;
3339 bnd = derive_constant_upper_bound (op0);
3340 return wi::udiv_floor (bnd, wi::to_widest (op1));
3342 case BIT_AND_EXPR:
3343 if (TREE_CODE (op1) != INTEGER_CST
3344 || tree_int_cst_sign_bit (op1))
3345 return max;
3346 return wi::to_widest (op1);
3348 case SSA_NAME:
3349 stmt = SSA_NAME_DEF_STMT (op0);
3350 if (gimple_code (stmt) != GIMPLE_ASSIGN
3351 || gimple_assign_lhs (stmt) != op0)
3352 return max;
3353 return derive_constant_upper_bound_assign (stmt);
3355 default:
3356 return max;
3360 /* Emit a -Waggressive-loop-optimizations warning if needed. */
3362 static void
3363 do_warn_aggressive_loop_optimizations (class loop *loop,
3364 widest_int i_bound, gimple *stmt)
3366 /* Don't warn if the loop doesn't have known constant bound. */
3367 if (!loop->nb_iterations
3368 || TREE_CODE (loop->nb_iterations) != INTEGER_CST
3369 || !warn_aggressive_loop_optimizations
3370 /* To avoid warning multiple times for the same loop,
3371 only start warning when we preserve loops. */
3372 || (cfun->curr_properties & PROP_loops) == 0
3373 /* Only warn once per loop. */
3374 || loop->warned_aggressive_loop_optimizations
3375 /* Only warn if undefined behavior gives us lower estimate than the
3376 known constant bound. */
3377 || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
3378 /* And undefined behavior happens unconditionally. */
3379 || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
3380 return;
3382 edge e = single_exit (loop);
3383 if (e == NULL)
3384 return;
3386 gimple *estmt = last_stmt (e->src);
3387 char buf[WIDE_INT_PRINT_BUFFER_SIZE];
3388 print_dec (i_bound, buf, TYPE_UNSIGNED (TREE_TYPE (loop->nb_iterations))
3389 ? UNSIGNED : SIGNED);
3390 auto_diagnostic_group d;
3391 if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
3392 "iteration %s invokes undefined behavior", buf))
3393 inform (gimple_location (estmt), "within this loop");
3394 loop->warned_aggressive_loop_optimizations = true;
3397 /* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
3398 is true if the loop is exited immediately after STMT, and this exit
3399 is taken at last when the STMT is executed BOUND + 1 times.
3400 REALISTIC is true if BOUND is expected to be close to the real number
3401 of iterations. UPPER is true if we are sure the loop iterates at most
3402 BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
3404 static void
3405 record_estimate (class loop *loop, tree bound, const widest_int &i_bound,
3406 gimple *at_stmt, bool is_exit, bool realistic, bool upper)
3408 widest_int delta;
3410 if (dump_file && (dump_flags & TDF_DETAILS))
3412 fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
3413 print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
3414 fprintf (dump_file, " is %sexecuted at most ",
3415 upper ? "" : "probably ");
3416 print_generic_expr (dump_file, bound, TDF_SLIM);
3417 fprintf (dump_file, " (bounded by ");
3418 print_decu (i_bound, dump_file);
3419 fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
3422 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
3423 real number of iterations. */
3424 if (TREE_CODE (bound) != INTEGER_CST)
3425 realistic = false;
3426 else
3427 gcc_checking_assert (i_bound == wi::to_widest (bound));
3429 /* If we have a guaranteed upper bound, record it in the appropriate
3430 list, unless this is an !is_exit bound (i.e. undefined behavior in
3431 at_stmt) in a loop with known constant number of iterations. */
3432 if (upper
3433 && (is_exit
3434 || loop->nb_iterations == NULL_TREE
3435 || TREE_CODE (loop->nb_iterations) != INTEGER_CST))
3437 class nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
3439 elt->bound = i_bound;
3440 elt->stmt = at_stmt;
3441 elt->is_exit = is_exit;
3442 elt->next = loop->bounds;
3443 loop->bounds = elt;
3446 /* If statement is executed on every path to the loop latch, we can directly
3447 infer the upper bound on the # of iterations of the loop. */
3448 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (at_stmt)))
3449 upper = false;
3451 /* Update the number of iteration estimates according to the bound.
3452 If at_stmt is an exit then the loop latch is executed at most BOUND times,
3453 otherwise it can be executed BOUND + 1 times. We will lower the estimate
3454 later if such statement must be executed on last iteration */
3455 if (is_exit)
3456 delta = 0;
3457 else
3458 delta = 1;
3459 widest_int new_i_bound = i_bound + delta;
3461 /* If an overflow occurred, ignore the result. */
3462 if (wi::ltu_p (new_i_bound, delta))
3463 return;
3465 if (upper && !is_exit)
3466 do_warn_aggressive_loop_optimizations (loop, new_i_bound, at_stmt);
3467 record_niter_bound (loop, new_i_bound, realistic, upper);
3470 /* Records the control iv analyzed in NITER for LOOP if the iv is valid
3471 and doesn't overflow. */
3473 static void
3474 record_control_iv (class loop *loop, class tree_niter_desc *niter)
3476 struct control_iv *iv;
3478 if (!niter->control.base || !niter->control.step)
3479 return;
3481 if (!integer_onep (niter->assumptions) || !niter->control.no_overflow)
3482 return;
3484 iv = ggc_alloc<control_iv> ();
3485 iv->base = niter->control.base;
3486 iv->step = niter->control.step;
3487 iv->next = loop->control_ivs;
3488 loop->control_ivs = iv;
3490 return;
3493 /* This function returns TRUE if below conditions are satisfied:
3494 1) VAR is SSA variable.
3495 2) VAR is an IV:{base, step} in its defining loop.
3496 3) IV doesn't overflow.
3497 4) Both base and step are integer constants.
3498 5) Base is the MIN/MAX value depends on IS_MIN.
3499 Store value of base to INIT correspondingly. */
3501 static bool
3502 get_cst_init_from_scev (tree var, wide_int *init, bool is_min)
3504 if (TREE_CODE (var) != SSA_NAME)
3505 return false;
3507 gimple *def_stmt = SSA_NAME_DEF_STMT (var);
3508 class loop *loop = loop_containing_stmt (def_stmt);
3510 if (loop == NULL)
3511 return false;
3513 affine_iv iv;
3514 if (!simple_iv (loop, loop, var, &iv, false))
3515 return false;
3517 if (!iv.no_overflow)
3518 return false;
3520 if (TREE_CODE (iv.base) != INTEGER_CST || TREE_CODE (iv.step) != INTEGER_CST)
3521 return false;
3523 if (is_min == tree_int_cst_sign_bit (iv.step))
3524 return false;
3526 *init = wi::to_wide (iv.base);
3527 return true;
3530 /* Record the estimate on number of iterations of LOOP based on the fact that
3531 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
3532 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
3533 estimated number of iterations is expected to be close to the real one.
3534 UPPER is true if we are sure the induction variable does not wrap. */
3536 static void
3537 record_nonwrapping_iv (class loop *loop, tree base, tree step, gimple *stmt,
3538 tree low, tree high, bool realistic, bool upper)
3540 tree niter_bound, extreme, delta;
3541 tree type = TREE_TYPE (base), unsigned_type;
3542 tree orig_base = base;
3544 if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
3545 return;
3547 if (dump_file && (dump_flags & TDF_DETAILS))
3549 fprintf (dump_file, "Induction variable (");
3550 print_generic_expr (dump_file, TREE_TYPE (base), TDF_SLIM);
3551 fprintf (dump_file, ") ");
3552 print_generic_expr (dump_file, base, TDF_SLIM);
3553 fprintf (dump_file, " + ");
3554 print_generic_expr (dump_file, step, TDF_SLIM);
3555 fprintf (dump_file, " * iteration does not wrap in statement ");
3556 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
3557 fprintf (dump_file, " in loop %d.\n", loop->num);
3560 unsigned_type = unsigned_type_for (type);
3561 base = fold_convert (unsigned_type, base);
3562 step = fold_convert (unsigned_type, step);
3564 if (tree_int_cst_sign_bit (step))
3566 wide_int max;
3567 value_range base_range;
3568 if (get_range_query (cfun)->range_of_expr (base_range, orig_base)
3569 && !base_range.undefined_p ())
3570 max = base_range.upper_bound ();
3571 extreme = fold_convert (unsigned_type, low);
3572 if (TREE_CODE (orig_base) == SSA_NAME
3573 && TREE_CODE (high) == INTEGER_CST
3574 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3575 && (base_range.kind () == VR_RANGE
3576 || get_cst_init_from_scev (orig_base, &max, false))
3577 && wi::gts_p (wi::to_wide (high), max))
3578 base = wide_int_to_tree (unsigned_type, max);
3579 else if (TREE_CODE (base) != INTEGER_CST
3580 && dominated_by_p (CDI_DOMINATORS,
3581 loop->latch, gimple_bb (stmt)))
3582 base = fold_convert (unsigned_type, high);
3583 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme);
3584 step = fold_build1 (NEGATE_EXPR, unsigned_type, step);
3586 else
3588 wide_int min;
3589 value_range base_range;
3590 if (get_range_query (cfun)->range_of_expr (base_range, orig_base)
3591 && !base_range.undefined_p ())
3592 min = base_range.lower_bound ();
3593 extreme = fold_convert (unsigned_type, high);
3594 if (TREE_CODE (orig_base) == SSA_NAME
3595 && TREE_CODE (low) == INTEGER_CST
3596 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))
3597 && (base_range.kind () == VR_RANGE
3598 || get_cst_init_from_scev (orig_base, &min, true))
3599 && wi::gts_p (min, wi::to_wide (low)))
3600 base = wide_int_to_tree (unsigned_type, min);
3601 else if (TREE_CODE (base) != INTEGER_CST
3602 && dominated_by_p (CDI_DOMINATORS,
3603 loop->latch, gimple_bb (stmt)))
3604 base = fold_convert (unsigned_type, low);
3605 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base);
3608 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
3609 would get out of the range. */
3610 niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
3611 widest_int max = derive_constant_upper_bound (niter_bound);
3612 record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
3615 /* Determine information about number of iterations a LOOP from the index
3616 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
3617 guaranteed to be executed in every iteration of LOOP. Callback for
3618 for_each_index. */
3620 struct ilb_data
3622 class loop *loop;
3623 gimple *stmt;
3626 static bool
3627 idx_infer_loop_bounds (tree base, tree *idx, void *dta)
3629 struct ilb_data *data = (struct ilb_data *) dta;
3630 tree ev, init, step;
3631 tree low, high, type, next;
3632 bool sign, upper = true, at_end = false;
3633 class loop *loop = data->loop;
3635 if (TREE_CODE (base) != ARRAY_REF)
3636 return true;
3638 /* For arrays at the end of the structure, we are not guaranteed that they
3639 do not really extend over their declared size. However, for arrays of
3640 size greater than one, this is unlikely to be intended. */
3641 if (array_at_struct_end_p (base))
3643 at_end = true;
3644 upper = false;
3647 class loop *dloop = loop_containing_stmt (data->stmt);
3648 if (!dloop)
3649 return true;
3651 ev = analyze_scalar_evolution (dloop, *idx);
3652 ev = instantiate_parameters (loop, ev);
3653 init = initial_condition (ev);
3654 step = evolution_part_in_loop_num (ev, loop->num);
3656 if (!init
3657 || !step
3658 || TREE_CODE (step) != INTEGER_CST
3659 || integer_zerop (step)
3660 || tree_contains_chrecs (init, NULL)
3661 || chrec_contains_symbols_defined_in_loop (init, loop->num))
3662 return true;
3664 low = array_ref_low_bound (base);
3665 high = array_ref_up_bound (base);
3667 /* The case of nonconstant bounds could be handled, but it would be
3668 complicated. */
3669 if (TREE_CODE (low) != INTEGER_CST
3670 || !high
3671 || TREE_CODE (high) != INTEGER_CST)
3672 return true;
3673 sign = tree_int_cst_sign_bit (step);
3674 type = TREE_TYPE (step);
3676 /* The array of length 1 at the end of a structure most likely extends
3677 beyond its bounds. */
3678 if (at_end
3679 && operand_equal_p (low, high, 0))
3680 return true;
3682 /* In case the relevant bound of the array does not fit in type, or
3683 it does, but bound + step (in type) still belongs into the range of the
3684 array, the index may wrap and still stay within the range of the array
3685 (consider e.g. if the array is indexed by the full range of
3686 unsigned char).
3688 To make things simpler, we require both bounds to fit into type, although
3689 there are cases where this would not be strictly necessary. */
3690 if (!int_fits_type_p (high, type)
3691 || !int_fits_type_p (low, type))
3692 return true;
3693 low = fold_convert (type, low);
3694 high = fold_convert (type, high);
3696 if (sign)
3697 next = fold_binary (PLUS_EXPR, type, low, step);
3698 else
3699 next = fold_binary (PLUS_EXPR, type, high, step);
3701 if (tree_int_cst_compare (low, next) <= 0
3702 && tree_int_cst_compare (next, high) <= 0)
3703 return true;
3705 /* If access is not executed on every iteration, we must ensure that overlow
3706 may not make the access valid later. */
3707 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (data->stmt))
3708 && scev_probably_wraps_p (NULL_TREE,
3709 initial_condition_in_loop_num (ev, loop->num),
3710 step, data->stmt, loop, true))
3711 upper = false;
3713 record_nonwrapping_iv (loop, init, step, data->stmt, low, high, false, upper);
3714 return true;
3717 /* Determine information about number of iterations a LOOP from the bounds
3718 of arrays in the data reference REF accessed in STMT. RELIABLE is true if
3719 STMT is guaranteed to be executed in every iteration of LOOP.*/
3721 static void
3722 infer_loop_bounds_from_ref (class loop *loop, gimple *stmt, tree ref)
3724 struct ilb_data data;
3726 data.loop = loop;
3727 data.stmt = stmt;
3728 for_each_index (&ref, idx_infer_loop_bounds, &data);
3731 /* Determine information about number of iterations of a LOOP from the way
3732 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be
3733 executed in every iteration of LOOP. */
3735 static void
3736 infer_loop_bounds_from_array (class loop *loop, gimple *stmt)
3738 if (is_gimple_assign (stmt))
3740 tree op0 = gimple_assign_lhs (stmt);
3741 tree op1 = gimple_assign_rhs1 (stmt);
3743 /* For each memory access, analyze its access function
3744 and record a bound on the loop iteration domain. */
3745 if (REFERENCE_CLASS_P (op0))
3746 infer_loop_bounds_from_ref (loop, stmt, op0);
3748 if (REFERENCE_CLASS_P (op1))
3749 infer_loop_bounds_from_ref (loop, stmt, op1);
3751 else if (is_gimple_call (stmt))
3753 tree arg, lhs;
3754 unsigned i, n = gimple_call_num_args (stmt);
3756 lhs = gimple_call_lhs (stmt);
3757 if (lhs && REFERENCE_CLASS_P (lhs))
3758 infer_loop_bounds_from_ref (loop, stmt, lhs);
3760 for (i = 0; i < n; i++)
3762 arg = gimple_call_arg (stmt, i);
3763 if (REFERENCE_CLASS_P (arg))
3764 infer_loop_bounds_from_ref (loop, stmt, arg);
3769 /* Determine information about number of iterations of a LOOP from the fact
3770 that pointer arithmetics in STMT does not overflow. */
3772 static void
3773 infer_loop_bounds_from_pointer_arith (class loop *loop, gimple *stmt)
3775 tree def, base, step, scev, type, low, high;
3776 tree var, ptr;
3778 if (!is_gimple_assign (stmt)
3779 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
3780 return;
3782 def = gimple_assign_lhs (stmt);
3783 if (TREE_CODE (def) != SSA_NAME)
3784 return;
3786 type = TREE_TYPE (def);
3787 if (!nowrap_type_p (type))
3788 return;
3790 ptr = gimple_assign_rhs1 (stmt);
3791 if (!expr_invariant_in_loop_p (loop, ptr))
3792 return;
3794 var = gimple_assign_rhs2 (stmt);
3795 if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
3796 return;
3798 class loop *uloop = loop_containing_stmt (stmt);
3799 scev = instantiate_parameters (loop, analyze_scalar_evolution (uloop, def));
3800 if (chrec_contains_undetermined (scev))
3801 return;
3803 base = initial_condition_in_loop_num (scev, loop->num);
3804 step = evolution_part_in_loop_num (scev, loop->num);
3806 if (!base || !step
3807 || TREE_CODE (step) != INTEGER_CST
3808 || tree_contains_chrecs (base, NULL)
3809 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3810 return;
3812 low = lower_bound_in_type (type, type);
3813 high = upper_bound_in_type (type, type);
3815 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot
3816 produce a NULL pointer. The contrary would mean NULL points to an object,
3817 while NULL is supposed to compare unequal with the address of all objects.
3818 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a
3819 NULL pointer since that would mean wrapping, which we assume here not to
3820 happen. So, we can exclude NULL from the valid range of pointer
3821 arithmetic. */
3822 if (flag_delete_null_pointer_checks && int_cst_value (low) == 0)
3823 low = build_int_cstu (TREE_TYPE (low), TYPE_ALIGN_UNIT (TREE_TYPE (type)));
3825 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3828 /* Determine information about number of iterations of a LOOP from the fact
3829 that signed arithmetics in STMT does not overflow. */
3831 static void
3832 infer_loop_bounds_from_signedness (class loop *loop, gimple *stmt)
3834 tree def, base, step, scev, type, low, high;
3836 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3837 return;
3839 def = gimple_assign_lhs (stmt);
3841 if (TREE_CODE (def) != SSA_NAME)
3842 return;
3844 type = TREE_TYPE (def);
3845 if (!INTEGRAL_TYPE_P (type)
3846 || !TYPE_OVERFLOW_UNDEFINED (type))
3847 return;
3849 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def));
3850 if (chrec_contains_undetermined (scev))
3851 return;
3853 base = initial_condition_in_loop_num (scev, loop->num);
3854 step = evolution_part_in_loop_num (scev, loop->num);
3856 if (!base || !step
3857 || TREE_CODE (step) != INTEGER_CST
3858 || tree_contains_chrecs (base, NULL)
3859 || chrec_contains_symbols_defined_in_loop (base, loop->num))
3860 return;
3862 low = lower_bound_in_type (type, type);
3863 high = upper_bound_in_type (type, type);
3864 value_range r;
3865 get_range_query (cfun)->range_of_expr (r, def);
3866 if (r.kind () == VR_RANGE)
3868 low = wide_int_to_tree (type, r.lower_bound ());
3869 high = wide_int_to_tree (type, r.upper_bound ());
3872 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true);
3875 /* The following analyzers are extracting informations on the bounds
3876 of LOOP from the following undefined behaviors:
3878 - data references should not access elements over the statically
3879 allocated size,
3881 - signed variables should not overflow when flag_wrapv is not set.
3884 static void
3885 infer_loop_bounds_from_undefined (class loop *loop, basic_block *bbs)
3887 unsigned i;
3888 gimple_stmt_iterator bsi;
3889 basic_block bb;
3890 bool reliable;
3892 for (i = 0; i < loop->num_nodes; i++)
3894 bb = bbs[i];
3896 /* If BB is not executed in each iteration of the loop, we cannot
3897 use the operations in it to infer reliable upper bound on the
3898 # of iterations of the loop. However, we can use it as a guess.
3899 Reliable guesses come only from array bounds. */
3900 reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb);
3902 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
3904 gimple *stmt = gsi_stmt (bsi);
3906 infer_loop_bounds_from_array (loop, stmt);
3908 if (reliable)
3910 infer_loop_bounds_from_signedness (loop, stmt);
3911 infer_loop_bounds_from_pointer_arith (loop, stmt);
3918 /* Compare wide ints, callback for qsort. */
3920 static int
3921 wide_int_cmp (const void *p1, const void *p2)
3923 const widest_int *d1 = (const widest_int *) p1;
3924 const widest_int *d2 = (const widest_int *) p2;
3925 return wi::cmpu (*d1, *d2);
3928 /* Return index of BOUND in BOUNDS array sorted in increasing order.
3929 Lookup by binary search. */
3931 static int
3932 bound_index (const vec<widest_int> &bounds, const widest_int &bound)
3934 unsigned int end = bounds.length ();
3935 unsigned int begin = 0;
3937 /* Find a matching index by means of a binary search. */
3938 while (begin != end)
3940 unsigned int middle = (begin + end) / 2;
3941 widest_int index = bounds[middle];
3943 if (index == bound)
3944 return middle;
3945 else if (wi::ltu_p (index, bound))
3946 begin = middle + 1;
3947 else
3948 end = middle;
3950 gcc_unreachable ();
3953 /* We recorded loop bounds only for statements dominating loop latch (and thus
3954 executed each loop iteration). If there are any bounds on statements not
3955 dominating the loop latch we can improve the estimate by walking the loop
3956 body and seeing if every path from loop header to loop latch contains
3957 some bounded statement. */
3959 static void
3960 discover_iteration_bound_by_body_walk (class loop *loop)
3962 class nb_iter_bound *elt;
3963 auto_vec<widest_int> bounds;
3964 vec<vec<basic_block> > queues = vNULL;
3965 vec<basic_block> queue = vNULL;
3966 ptrdiff_t queue_index;
3967 ptrdiff_t latch_index = 0;
3969 /* Discover what bounds may interest us. */
3970 for (elt = loop->bounds; elt; elt = elt->next)
3972 widest_int bound = elt->bound;
3974 /* Exit terminates loop at given iteration, while non-exits produce undefined
3975 effect on the next iteration. */
3976 if (!elt->is_exit)
3978 bound += 1;
3979 /* If an overflow occurred, ignore the result. */
3980 if (bound == 0)
3981 continue;
3984 if (!loop->any_upper_bound
3985 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
3986 bounds.safe_push (bound);
3989 /* Exit early if there is nothing to do. */
3990 if (!bounds.exists ())
3991 return;
3993 if (dump_file && (dump_flags & TDF_DETAILS))
3994 fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n");
3996 /* Sort the bounds in decreasing order. */
3997 bounds.qsort (wide_int_cmp);
3999 /* For every basic block record the lowest bound that is guaranteed to
4000 terminate the loop. */
4002 hash_map<basic_block, ptrdiff_t> bb_bounds;
4003 for (elt = loop->bounds; elt; elt = elt->next)
4005 widest_int bound = elt->bound;
4006 if (!elt->is_exit)
4008 bound += 1;
4009 /* If an overflow occurred, ignore the result. */
4010 if (bound == 0)
4011 continue;
4014 if (!loop->any_upper_bound
4015 || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
4017 ptrdiff_t index = bound_index (bounds, bound);
4018 ptrdiff_t *entry = bb_bounds.get (gimple_bb (elt->stmt));
4019 if (!entry)
4020 bb_bounds.put (gimple_bb (elt->stmt), index);
4021 else if ((ptrdiff_t)*entry > index)
4022 *entry = index;
4026 hash_map<basic_block, ptrdiff_t> block_priority;
4028 /* Perform shortest path discovery loop->header ... loop->latch.
4030 The "distance" is given by the smallest loop bound of basic block
4031 present in the path and we look for path with largest smallest bound
4032 on it.
4034 To avoid the need for fibonacci heap on double ints we simply compress
4035 double ints into indexes to BOUNDS array and then represent the queue
4036 as arrays of queues for every index.
4037 Index of BOUNDS.length() means that the execution of given BB has
4038 no bounds determined.
4040 VISITED is a pointer map translating basic block into smallest index
4041 it was inserted into the priority queue with. */
4042 latch_index = -1;
4044 /* Start walk in loop header with index set to infinite bound. */
4045 queue_index = bounds.length ();
4046 queues.safe_grow_cleared (queue_index + 1, true);
4047 queue.safe_push (loop->header);
4048 queues[queue_index] = queue;
4049 block_priority.put (loop->header, queue_index);
4051 for (; queue_index >= 0; queue_index--)
4053 if (latch_index < queue_index)
4055 while (queues[queue_index].length ())
4057 basic_block bb;
4058 ptrdiff_t bound_index = queue_index;
4059 edge e;
4060 edge_iterator ei;
4062 queue = queues[queue_index];
4063 bb = queue.pop ();
4065 /* OK, we later inserted the BB with lower priority, skip it. */
4066 if (*block_priority.get (bb) > queue_index)
4067 continue;
4069 /* See if we can improve the bound. */
4070 ptrdiff_t *entry = bb_bounds.get (bb);
4071 if (entry && *entry < bound_index)
4072 bound_index = *entry;
4074 /* Insert succesors into the queue, watch for latch edge
4075 and record greatest index we saw. */
4076 FOR_EACH_EDGE (e, ei, bb->succs)
4078 bool insert = false;
4080 if (loop_exit_edge_p (loop, e))
4081 continue;
4083 if (e == loop_latch_edge (loop)
4084 && latch_index < bound_index)
4085 latch_index = bound_index;
4086 else if (!(entry = block_priority.get (e->dest)))
4088 insert = true;
4089 block_priority.put (e->dest, bound_index);
4091 else if (*entry < bound_index)
4093 insert = true;
4094 *entry = bound_index;
4097 if (insert)
4098 queues[bound_index].safe_push (e->dest);
4102 queues[queue_index].release ();
4105 gcc_assert (latch_index >= 0);
4106 if ((unsigned)latch_index < bounds.length ())
4108 if (dump_file && (dump_flags & TDF_DETAILS))
4110 fprintf (dump_file, "Found better loop bound ");
4111 print_decu (bounds[latch_index], dump_file);
4112 fprintf (dump_file, "\n");
4114 record_niter_bound (loop, bounds[latch_index], false, true);
4117 queues.release ();
4120 /* See if every path cross the loop goes through a statement that is known
4121 to not execute at the last iteration. In that case we can decrese iteration
4122 count by 1. */
4124 static void
4125 maybe_lower_iteration_bound (class loop *loop)
4127 hash_set<gimple *> *not_executed_last_iteration = NULL;
4128 class nb_iter_bound *elt;
4129 bool found_exit = false;
4130 auto_vec<basic_block> queue;
4131 bitmap visited;
4133 /* Collect all statements with interesting (i.e. lower than
4134 nb_iterations_upper_bound) bound on them.
4136 TODO: Due to the way record_estimate choose estimates to store, the bounds
4137 will be always nb_iterations_upper_bound-1. We can change this to record
4138 also statements not dominating the loop latch and update the walk bellow
4139 to the shortest path algorithm. */
4140 for (elt = loop->bounds; elt; elt = elt->next)
4142 if (!elt->is_exit
4143 && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound))
4145 if (!not_executed_last_iteration)
4146 not_executed_last_iteration = new hash_set<gimple *>;
4147 not_executed_last_iteration->add (elt->stmt);
4150 if (!not_executed_last_iteration)
4151 return;
4153 /* Start DFS walk in the loop header and see if we can reach the
4154 loop latch or any of the exits (including statements with side
4155 effects that may terminate the loop otherwise) without visiting
4156 any of the statements known to have undefined effect on the last
4157 iteration. */
4158 queue.safe_push (loop->header);
4159 visited = BITMAP_ALLOC (NULL);
4160 bitmap_set_bit (visited, loop->header->index);
4161 found_exit = false;
4165 basic_block bb = queue.pop ();
4166 gimple_stmt_iterator gsi;
4167 bool stmt_found = false;
4169 /* Loop for possible exits and statements bounding the execution. */
4170 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4172 gimple *stmt = gsi_stmt (gsi);
4173 if (not_executed_last_iteration->contains (stmt))
4175 stmt_found = true;
4176 break;
4178 if (gimple_has_side_effects (stmt))
4180 found_exit = true;
4181 break;
4184 if (found_exit)
4185 break;
4187 /* If no bounding statement is found, continue the walk. */
4188 if (!stmt_found)
4190 edge e;
4191 edge_iterator ei;
4193 FOR_EACH_EDGE (e, ei, bb->succs)
4195 if (loop_exit_edge_p (loop, e)
4196 || e == loop_latch_edge (loop))
4198 found_exit = true;
4199 break;
4201 if (bitmap_set_bit (visited, e->dest->index))
4202 queue.safe_push (e->dest);
4206 while (queue.length () && !found_exit);
4208 /* If every path through the loop reach bounding statement before exit,
4209 then we know the last iteration of the loop will have undefined effect
4210 and we can decrease number of iterations. */
4212 if (!found_exit)
4214 if (dump_file && (dump_flags & TDF_DETAILS))
4215 fprintf (dump_file, "Reducing loop iteration estimate by 1; "
4216 "undefined statement must be executed at the last iteration.\n");
4217 record_niter_bound (loop, loop->nb_iterations_upper_bound - 1,
4218 false, true);
4221 BITMAP_FREE (visited);
4222 delete not_executed_last_iteration;
4225 /* Get expected upper bound for number of loop iterations for
4226 BUILT_IN_EXPECT_WITH_PROBABILITY for a condition COND. */
4228 static tree
4229 get_upper_bound_based_on_builtin_expr_with_prob (gcond *cond)
4231 if (cond == NULL)
4232 return NULL_TREE;
4234 tree lhs = gimple_cond_lhs (cond);
4235 if (TREE_CODE (lhs) != SSA_NAME)
4236 return NULL_TREE;
4238 gimple *stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
4239 gcall *def = dyn_cast<gcall *> (stmt);
4240 if (def == NULL)
4241 return NULL_TREE;
4243 tree decl = gimple_call_fndecl (def);
4244 if (!decl
4245 || !fndecl_built_in_p (decl, BUILT_IN_EXPECT_WITH_PROBABILITY)
4246 || gimple_call_num_args (stmt) != 3)
4247 return NULL_TREE;
4249 tree c = gimple_call_arg (def, 1);
4250 tree condt = TREE_TYPE (lhs);
4251 tree res = fold_build2 (gimple_cond_code (cond),
4252 condt, c,
4253 gimple_cond_rhs (cond));
4254 if (TREE_CODE (res) != INTEGER_CST)
4255 return NULL_TREE;
4258 tree prob = gimple_call_arg (def, 2);
4259 tree t = TREE_TYPE (prob);
4260 tree one
4261 = build_real_from_int_cst (t,
4262 integer_one_node);
4263 if (integer_zerop (res))
4264 prob = fold_build2 (MINUS_EXPR, t, one, prob);
4265 tree r = fold_build2 (RDIV_EXPR, t, one, prob);
4266 if (TREE_CODE (r) != REAL_CST)
4267 return NULL_TREE;
4269 HOST_WIDE_INT probi
4270 = real_to_integer (TREE_REAL_CST_PTR (r));
4271 return build_int_cst (condt, probi);
4274 /* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P
4275 is true also use estimates derived from undefined behavior. */
4277 void
4278 estimate_numbers_of_iterations (class loop *loop)
4280 tree niter, type;
4281 unsigned i;
4282 class tree_niter_desc niter_desc;
4283 edge ex;
4284 widest_int bound;
4285 edge likely_exit;
4287 /* Give up if we already have tried to compute an estimation. */
4288 if (loop->estimate_state != EST_NOT_COMPUTED)
4289 return;
4291 loop->estimate_state = EST_AVAILABLE;
4293 /* If we have a measured profile, use it to estimate the number of
4294 iterations. Normally this is recorded by branch_prob right after
4295 reading the profile. In case we however found a new loop, record the
4296 information here.
4298 Explicitly check for profile status so we do not report
4299 wrong prediction hitrates for guessed loop iterations heuristics.
4300 Do not recompute already recorded bounds - we ought to be better on
4301 updating iteration bounds than updating profile in general and thus
4302 recomputing iteration bounds later in the compilation process will just
4303 introduce random roundoff errors. */
4304 if (!loop->any_estimate
4305 && loop->header->count.reliable_p ())
4307 gcov_type nit = expected_loop_iterations_unbounded (loop);
4308 bound = gcov_type_to_wide_int (nit);
4309 record_niter_bound (loop, bound, true, false);
4312 /* Ensure that loop->nb_iterations is computed if possible. If it turns out
4313 to be constant, we avoid undefined behavior implied bounds and instead
4314 diagnose those loops with -Waggressive-loop-optimizations. */
4315 number_of_latch_executions (loop);
4317 basic_block *body = get_loop_body (loop);
4318 auto_vec<edge> exits = get_loop_exit_edges (loop, body);
4319 likely_exit = single_likely_exit (loop, exits);
4320 FOR_EACH_VEC_ELT (exits, i, ex)
4322 if (ex == likely_exit)
4324 gimple *stmt = last_stmt (ex->src);
4325 if (stmt != NULL)
4327 gcond *cond = dyn_cast<gcond *> (stmt);
4328 tree niter_bound
4329 = get_upper_bound_based_on_builtin_expr_with_prob (cond);
4330 if (niter_bound != NULL_TREE)
4332 widest_int max = derive_constant_upper_bound (niter_bound);
4333 record_estimate (loop, niter_bound, max, cond,
4334 true, true, false);
4339 if (!number_of_iterations_exit (loop, ex, &niter_desc,
4340 false, false, body))
4341 continue;
4343 niter = niter_desc.niter;
4344 type = TREE_TYPE (niter);
4345 if (TREE_CODE (niter_desc.may_be_zero) != INTEGER_CST)
4346 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero,
4347 build_int_cst (type, 0),
4348 niter);
4349 record_estimate (loop, niter, niter_desc.max,
4350 last_stmt (ex->src),
4351 true, ex == likely_exit, true);
4352 record_control_iv (loop, &niter_desc);
4355 if (flag_aggressive_loop_optimizations)
4356 infer_loop_bounds_from_undefined (loop, body);
4357 free (body);
4359 discover_iteration_bound_by_body_walk (loop);
4361 maybe_lower_iteration_bound (loop);
4363 /* If we know the exact number of iterations of this loop, try to
4364 not break code with undefined behavior by not recording smaller
4365 maximum number of iterations. */
4366 if (loop->nb_iterations
4367 && TREE_CODE (loop->nb_iterations) == INTEGER_CST)
4369 loop->any_upper_bound = true;
4370 loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations);
4374 /* Sets NIT to the estimated number of executions of the latch of the
4375 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as
4376 large as the number of iterations. If we have no reliable estimate,
4377 the function returns false, otherwise returns true. */
4379 bool
4380 estimated_loop_iterations (class loop *loop, widest_int *nit)
4382 /* When SCEV information is available, try to update loop iterations
4383 estimate. Otherwise just return whatever we recorded earlier. */
4384 if (scev_initialized_p ())
4385 estimate_numbers_of_iterations (loop);
4387 return (get_estimated_loop_iterations (loop, nit));
4390 /* Similar to estimated_loop_iterations, but returns the estimate only
4391 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4392 on the number of iterations of LOOP could not be derived, returns -1. */
4394 HOST_WIDE_INT
4395 estimated_loop_iterations_int (class loop *loop)
4397 widest_int nit;
4398 HOST_WIDE_INT hwi_nit;
4400 if (!estimated_loop_iterations (loop, &nit))
4401 return -1;
4403 if (!wi::fits_shwi_p (nit))
4404 return -1;
4405 hwi_nit = nit.to_shwi ();
4407 return hwi_nit < 0 ? -1 : hwi_nit;
4411 /* Sets NIT to an upper bound for the maximum number of executions of the
4412 latch of the LOOP. If we have no reliable estimate, the function returns
4413 false, otherwise returns true. */
4415 bool
4416 max_loop_iterations (class loop *loop, widest_int *nit)
4418 /* When SCEV information is available, try to update loop iterations
4419 estimate. Otherwise just return whatever we recorded earlier. */
4420 if (scev_initialized_p ())
4421 estimate_numbers_of_iterations (loop);
4423 return get_max_loop_iterations (loop, nit);
4426 /* Similar to max_loop_iterations, but returns the estimate only
4427 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4428 on the number of iterations of LOOP could not be derived, returns -1. */
4430 HOST_WIDE_INT
4431 max_loop_iterations_int (class loop *loop)
4433 widest_int nit;
4434 HOST_WIDE_INT hwi_nit;
4436 if (!max_loop_iterations (loop, &nit))
4437 return -1;
4439 if (!wi::fits_shwi_p (nit))
4440 return -1;
4441 hwi_nit = nit.to_shwi ();
4443 return hwi_nit < 0 ? -1 : hwi_nit;
4446 /* Sets NIT to an likely upper bound for the maximum number of executions of the
4447 latch of the LOOP. If we have no reliable estimate, the function returns
4448 false, otherwise returns true. */
4450 bool
4451 likely_max_loop_iterations (class loop *loop, widest_int *nit)
4453 /* When SCEV information is available, try to update loop iterations
4454 estimate. Otherwise just return whatever we recorded earlier. */
4455 if (scev_initialized_p ())
4456 estimate_numbers_of_iterations (loop);
4458 return get_likely_max_loop_iterations (loop, nit);
4461 /* Similar to max_loop_iterations, but returns the estimate only
4462 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
4463 on the number of iterations of LOOP could not be derived, returns -1. */
4465 HOST_WIDE_INT
4466 likely_max_loop_iterations_int (class loop *loop)
4468 widest_int nit;
4469 HOST_WIDE_INT hwi_nit;
4471 if (!likely_max_loop_iterations (loop, &nit))
4472 return -1;
4474 if (!wi::fits_shwi_p (nit))
4475 return -1;
4476 hwi_nit = nit.to_shwi ();
4478 return hwi_nit < 0 ? -1 : hwi_nit;
4481 /* Returns an estimate for the number of executions of statements
4482 in the LOOP. For statements before the loop exit, this exceeds
4483 the number of execution of the latch by one. */
4485 HOST_WIDE_INT
4486 estimated_stmt_executions_int (class loop *loop)
4488 HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
4489 HOST_WIDE_INT snit;
4491 if (nit == -1)
4492 return -1;
4494 snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
4496 /* If the computation overflows, return -1. */
4497 return snit < 0 ? -1 : snit;
4500 /* Sets NIT to the maximum number of executions of the latch of the
4501 LOOP, plus one. If we have no reliable estimate, the function returns
4502 false, otherwise returns true. */
4504 bool
4505 max_stmt_executions (class loop *loop, widest_int *nit)
4507 widest_int nit_minus_one;
4509 if (!max_loop_iterations (loop, nit))
4510 return false;
4512 nit_minus_one = *nit;
4514 *nit += 1;
4516 return wi::gtu_p (*nit, nit_minus_one);
4519 /* Sets NIT to the estimated maximum number of executions of the latch of the
4520 LOOP, plus one. If we have no likely estimate, the function returns
4521 false, otherwise returns true. */
4523 bool
4524 likely_max_stmt_executions (class loop *loop, widest_int *nit)
4526 widest_int nit_minus_one;
4528 if (!likely_max_loop_iterations (loop, nit))
4529 return false;
4531 nit_minus_one = *nit;
4533 *nit += 1;
4535 return wi::gtu_p (*nit, nit_minus_one);
4538 /* Sets NIT to the estimated number of executions of the latch of the
4539 LOOP, plus one. If we have no reliable estimate, the function returns
4540 false, otherwise returns true. */
4542 bool
4543 estimated_stmt_executions (class loop *loop, widest_int *nit)
4545 widest_int nit_minus_one;
4547 if (!estimated_loop_iterations (loop, nit))
4548 return false;
4550 nit_minus_one = *nit;
4552 *nit += 1;
4554 return wi::gtu_p (*nit, nit_minus_one);
4557 /* Records estimates on numbers of iterations of loops. */
4559 void
4560 estimate_numbers_of_iterations (function *fn)
4562 /* We don't want to issue signed overflow warnings while getting
4563 loop iteration estimates. */
4564 fold_defer_overflow_warnings ();
4566 for (auto loop : loops_list (fn, 0))
4567 estimate_numbers_of_iterations (loop);
4569 fold_undefer_and_ignore_overflow_warnings ();
4572 /* Returns true if statement S1 dominates statement S2. */
4574 bool
4575 stmt_dominates_stmt_p (gimple *s1, gimple *s2)
4577 basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2);
4579 if (!bb1
4580 || s1 == s2)
4581 return true;
4583 if (bb1 == bb2)
4585 gimple_stmt_iterator bsi;
4587 if (gimple_code (s2) == GIMPLE_PHI)
4588 return false;
4590 if (gimple_code (s1) == GIMPLE_PHI)
4591 return true;
4593 for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi))
4594 if (gsi_stmt (bsi) == s1)
4595 return true;
4597 return false;
4600 return dominated_by_p (CDI_DOMINATORS, bb2, bb1);
4603 /* Returns true when we can prove that the number of executions of
4604 STMT in the loop is at most NITER, according to the bound on
4605 the number of executions of the statement NITER_BOUND->stmt recorded in
4606 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT.
4608 ??? This code can become quite a CPU hog - we can have many bounds,
4609 and large basic block forcing stmt_dominates_stmt_p to be queried
4610 many times on a large basic blocks, so the whole thing is O(n^2)
4611 for scev_probably_wraps_p invocation (that can be done n times).
4613 It would make more sense (and give better answers) to remember BB
4614 bounds computed by discover_iteration_bound_by_body_walk. */
4616 static bool
4617 n_of_executions_at_most (gimple *stmt,
4618 class nb_iter_bound *niter_bound,
4619 tree niter)
4621 widest_int bound = niter_bound->bound;
4622 tree nit_type = TREE_TYPE (niter), e;
4623 enum tree_code cmp;
4625 gcc_assert (TYPE_UNSIGNED (nit_type));
4627 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that
4628 the number of iterations is small. */
4629 if (!wi::fits_to_tree_p (bound, nit_type))
4630 return false;
4632 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
4633 times. This means that:
4635 -- if NITER_BOUND->is_exit is true, then everything after
4636 it at most NITER_BOUND->bound times.
4638 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT
4639 is executed, then NITER_BOUND->stmt is executed as well in the same
4640 iteration then STMT is executed at most NITER_BOUND->bound + 1 times.
4642 If we can determine that NITER_BOUND->stmt is always executed
4643 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times.
4644 We conclude that if both statements belong to the same
4645 basic block and STMT is before NITER_BOUND->stmt and there are no
4646 statements with side effects in between. */
4648 if (niter_bound->is_exit)
4650 if (stmt == niter_bound->stmt
4651 || !stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4652 return false;
4653 cmp = GE_EXPR;
4655 else
4657 if (!stmt_dominates_stmt_p (niter_bound->stmt, stmt))
4659 gimple_stmt_iterator bsi;
4660 if (gimple_bb (stmt) != gimple_bb (niter_bound->stmt)
4661 || gimple_code (stmt) == GIMPLE_PHI
4662 || gimple_code (niter_bound->stmt) == GIMPLE_PHI)
4663 return false;
4665 /* By stmt_dominates_stmt_p we already know that STMT appears
4666 before NITER_BOUND->STMT. Still need to test that the loop
4667 cannot be terinated by a side effect in between. */
4668 for (bsi = gsi_for_stmt (stmt); gsi_stmt (bsi) != niter_bound->stmt;
4669 gsi_next (&bsi))
4670 if (gimple_has_side_effects (gsi_stmt (bsi)))
4671 return false;
4672 bound += 1;
4673 if (bound == 0
4674 || !wi::fits_to_tree_p (bound, nit_type))
4675 return false;
4677 cmp = GT_EXPR;
4680 e = fold_binary (cmp, boolean_type_node,
4681 niter, wide_int_to_tree (nit_type, bound));
4682 return e && integer_nonzerop (e);
4685 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
4687 bool
4688 nowrap_type_p (tree type)
4690 if (ANY_INTEGRAL_TYPE_P (type)
4691 && TYPE_OVERFLOW_UNDEFINED (type))
4692 return true;
4694 if (POINTER_TYPE_P (type))
4695 return true;
4697 return false;
4700 /* Return true if we can prove LOOP is exited before evolution of induction
4701 variable {BASE, STEP} overflows with respect to its type bound. */
4703 static bool
4704 loop_exits_before_overflow (tree base, tree step,
4705 gimple *at_stmt, class loop *loop)
4707 widest_int niter;
4708 struct control_iv *civ;
4709 class nb_iter_bound *bound;
4710 tree e, delta, step_abs, unsigned_base;
4711 tree type = TREE_TYPE (step);
4712 tree unsigned_type, valid_niter;
4714 /* Don't issue signed overflow warnings. */
4715 fold_defer_overflow_warnings ();
4717 /* Compute the number of iterations before we reach the bound of the
4718 type, and verify that the loop is exited before this occurs. */
4719 unsigned_type = unsigned_type_for (type);
4720 unsigned_base = fold_convert (unsigned_type, base);
4722 if (tree_int_cst_sign_bit (step))
4724 tree extreme = fold_convert (unsigned_type,
4725 lower_bound_in_type (type, type));
4726 delta = fold_build2 (MINUS_EXPR, unsigned_type, unsigned_base, extreme);
4727 step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,
4728 fold_convert (unsigned_type, step));
4730 else
4732 tree extreme = fold_convert (unsigned_type,
4733 upper_bound_in_type (type, type));
4734 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, unsigned_base);
4735 step_abs = fold_convert (unsigned_type, step);
4738 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs);
4740 estimate_numbers_of_iterations (loop);
4742 if (max_loop_iterations (loop, &niter)
4743 && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter))
4744 && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
4745 wide_int_to_tree (TREE_TYPE (valid_niter),
4746 niter))) != NULL
4747 && integer_nonzerop (e))
4749 fold_undefer_and_ignore_overflow_warnings ();
4750 return true;
4752 if (at_stmt)
4753 for (bound = loop->bounds; bound; bound = bound->next)
4755 if (n_of_executions_at_most (at_stmt, bound, valid_niter))
4757 fold_undefer_and_ignore_overflow_warnings ();
4758 return true;
4761 fold_undefer_and_ignore_overflow_warnings ();
4763 /* Try to prove loop is exited before {base, step} overflows with the
4764 help of analyzed loop control IV. This is done only for IVs with
4765 constant step because otherwise we don't have the information. */
4766 if (TREE_CODE (step) == INTEGER_CST)
4768 for (civ = loop->control_ivs; civ; civ = civ->next)
4770 enum tree_code code;
4771 tree civ_type = TREE_TYPE (civ->step);
4773 /* Have to consider type difference because operand_equal_p ignores
4774 that for constants. */
4775 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (civ_type)
4776 || element_precision (type) != element_precision (civ_type))
4777 continue;
4779 /* Only consider control IV with same step. */
4780 if (!operand_equal_p (step, civ->step, 0))
4781 continue;
4783 /* Done proving if this is a no-overflow control IV. */
4784 if (operand_equal_p (base, civ->base, 0))
4785 return true;
4787 /* Control IV is recorded after expanding simple operations,
4788 Here we expand base and compare it too. */
4789 tree expanded_base = expand_simple_operations (base);
4790 if (operand_equal_p (expanded_base, civ->base, 0))
4791 return true;
4793 /* If this is a before stepping control IV, in other words, we have
4795 {civ_base, step} = {base + step, step}
4797 Because civ {base + step, step} doesn't overflow during loop
4798 iterations, {base, step} will not overflow if we can prove the
4799 operation "base + step" does not overflow. Specifically, we try
4800 to prove below conditions are satisfied:
4802 base <= UPPER_BOUND (type) - step ;;step > 0
4803 base >= LOWER_BOUND (type) - step ;;step < 0
4805 by proving the reverse conditions are false using loop's initial
4806 condition. */
4807 if (POINTER_TYPE_P (TREE_TYPE (base)))
4808 code = POINTER_PLUS_EXPR;
4809 else
4810 code = PLUS_EXPR;
4812 tree stepped = fold_build2 (code, TREE_TYPE (base), base, step);
4813 tree expanded_stepped = fold_build2 (code, TREE_TYPE (base),
4814 expanded_base, step);
4815 if (operand_equal_p (stepped, civ->base, 0)
4816 || operand_equal_p (expanded_stepped, civ->base, 0))
4818 tree extreme;
4820 if (tree_int_cst_sign_bit (step))
4822 code = LT_EXPR;
4823 extreme = lower_bound_in_type (type, type);
4825 else
4827 code = GT_EXPR;
4828 extreme = upper_bound_in_type (type, type);
4830 extreme = fold_build2 (MINUS_EXPR, type, extreme, step);
4831 e = fold_build2 (code, boolean_type_node, base, extreme);
4832 e = simplify_using_initial_conditions (loop, e);
4833 if (integer_zerop (e))
4834 return true;
4839 return false;
4842 /* VAR is scev variable whose evolution part is constant STEP, this function
4843 proves that VAR can't overflow by using value range info. If VAR's value
4844 range is [MIN, MAX], it can be proven by:
4845 MAX + step doesn't overflow ; if step > 0
4847 MIN + step doesn't underflow ; if step < 0.
4849 We can only do this if var is computed in every loop iteration, i.e, var's
4850 definition has to dominate loop latch. Consider below example:
4853 unsigned int i;
4855 <bb 3>:
4857 <bb 4>:
4858 # RANGE [0, 4294967294] NONZERO 65535
4859 # i_21 = PHI <0(3), i_18(9)>
4860 if (i_21 != 0)
4861 goto <bb 6>;
4862 else
4863 goto <bb 8>;
4865 <bb 6>:
4866 # RANGE [0, 65533] NONZERO 65535
4867 _6 = i_21 + 4294967295;
4868 # RANGE [0, 65533] NONZERO 65535
4869 _7 = (long unsigned int) _6;
4870 # RANGE [0, 524264] NONZERO 524280
4871 _8 = _7 * 8;
4872 # PT = nonlocal escaped
4873 _9 = a_14 + _8;
4874 *_9 = 0;
4876 <bb 8>:
4877 # RANGE [1, 65535] NONZERO 65535
4878 i_18 = i_21 + 1;
4879 if (i_18 >= 65535)
4880 goto <bb 10>;
4881 else
4882 goto <bb 9>;
4884 <bb 9>:
4885 goto <bb 4>;
4887 <bb 10>:
4888 return;
4891 VAR _6 doesn't overflow only with pre-condition (i_21 != 0), here we
4892 can't use _6 to prove no-overlfow for _7. In fact, var _7 takes value
4893 sequence (4294967295, 0, 1, ..., 65533) in loop life time, rather than
4894 (4294967295, 4294967296, ...). */
4896 static bool
4897 scev_var_range_cant_overflow (tree var, tree step, class loop *loop)
4899 tree type;
4900 wide_int minv, maxv, diff, step_wi;
4902 if (TREE_CODE (step) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (var)))
4903 return false;
4905 /* Check if VAR evaluates in every loop iteration. It's not the case
4906 if VAR is default definition or does not dominate loop's latch. */
4907 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
4908 if (!def_bb || !dominated_by_p (CDI_DOMINATORS, loop->latch, def_bb))
4909 return false;
4911 value_range r;
4912 get_range_query (cfun)->range_of_expr (r, var);
4913 if (r.kind () != VR_RANGE)
4914 return false;
4916 /* VAR is a scev whose evolution part is STEP and value range info
4917 is [MIN, MAX], we can prove its no-overflowness by conditions:
4919 type_MAX - MAX >= step ; if step > 0
4920 MIN - type_MIN >= |step| ; if step < 0.
4922 Or VAR must take value outside of value range, which is not true. */
4923 step_wi = wi::to_wide (step);
4924 type = TREE_TYPE (var);
4925 if (tree_int_cst_sign_bit (step))
4927 diff = r.lower_bound () - wi::to_wide (lower_bound_in_type (type, type));
4928 step_wi = - step_wi;
4930 else
4931 diff = wi::to_wide (upper_bound_in_type (type, type)) - r.upper_bound ();
4933 return (wi::geu_p (diff, step_wi));
4936 /* Return false only when the induction variable BASE + STEP * I is
4937 known to not overflow: i.e. when the number of iterations is small
4938 enough with respect to the step and initial condition in order to
4939 keep the evolution confined in TYPEs bounds. Return true when the
4940 iv is known to overflow or when the property is not computable.
4942 USE_OVERFLOW_SEMANTICS is true if this function should assume that
4943 the rules for overflow of the given language apply (e.g., that signed
4944 arithmetics in C does not overflow).
4946 If VAR is a ssa variable, this function also returns false if VAR can
4947 be proven not overflow with value range info. */
4949 bool
4950 scev_probably_wraps_p (tree var, tree base, tree step,
4951 gimple *at_stmt, class loop *loop,
4952 bool use_overflow_semantics)
4954 /* FIXME: We really need something like
4955 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
4957 We used to test for the following situation that frequently appears
4958 during address arithmetics:
4960 D.1621_13 = (long unsigned intD.4) D.1620_12;
4961 D.1622_14 = D.1621_13 * 8;
4962 D.1623_15 = (doubleD.29 *) D.1622_14;
4964 And derived that the sequence corresponding to D_14
4965 can be proved to not wrap because it is used for computing a
4966 memory access; however, this is not really the case -- for example,
4967 if D_12 = (unsigned char) [254,+,1], then D_14 has values
4968 2032, 2040, 0, 8, ..., but the code is still legal. */
4970 if (chrec_contains_undetermined (base)
4971 || chrec_contains_undetermined (step))
4972 return true;
4974 if (integer_zerop (step))
4975 return false;
4977 /* If we can use the fact that signed and pointer arithmetics does not
4978 wrap, we are done. */
4979 if (use_overflow_semantics && nowrap_type_p (TREE_TYPE (base)))
4980 return false;
4982 /* To be able to use estimates on number of iterations of the loop,
4983 we must have an upper bound on the absolute value of the step. */
4984 if (TREE_CODE (step) != INTEGER_CST)
4985 return true;
4987 /* Check if var can be proven not overflow with value range info. */
4988 if (var && TREE_CODE (var) == SSA_NAME
4989 && scev_var_range_cant_overflow (var, step, loop))
4990 return false;
4992 if (loop_exits_before_overflow (base, step, at_stmt, loop))
4993 return false;
4995 /* At this point we still don't have a proof that the iv does not
4996 overflow: give up. */
4997 return true;
5000 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
5002 void
5003 free_numbers_of_iterations_estimates (class loop *loop)
5005 struct control_iv *civ;
5006 class nb_iter_bound *bound;
5008 loop->nb_iterations = NULL;
5009 loop->estimate_state = EST_NOT_COMPUTED;
5010 for (bound = loop->bounds; bound;)
5012 class nb_iter_bound *next = bound->next;
5013 ggc_free (bound);
5014 bound = next;
5016 loop->bounds = NULL;
5018 for (civ = loop->control_ivs; civ;)
5020 struct control_iv *next = civ->next;
5021 ggc_free (civ);
5022 civ = next;
5024 loop->control_ivs = NULL;
5027 /* Frees the information on upper bounds on numbers of iterations of loops. */
5029 void
5030 free_numbers_of_iterations_estimates (function *fn)
5032 for (auto loop : loops_list (fn, 0))
5033 free_numbers_of_iterations_estimates (loop);
5036 /* Substitute value VAL for ssa name NAME inside expressions held
5037 at LOOP. */
5039 void
5040 substitute_in_loop_info (class loop *loop, tree name, tree val)
5042 loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);