1 /* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004, 2005 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 2, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
23 #include "coretypes.h"
28 #include "hard-reg-set.h"
29 #include "basic-block.h"
31 #include "diagnostic.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
36 #include "tree-pass.h"
38 #include "tree-chrec.h"
39 #include "tree-scalar-evolution.h"
40 #include "tree-data-ref.h"
44 #include "tree-inline.h"
46 #define SWAP(X, Y) do { void *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
51 Analysis of number of iterations of an affine exit test.
55 /* Returns true if ARG is either NULL_TREE or constant zero. Unlike
56 integer_zerop, it does not care about overflow flags. */
64 if (TREE_CODE (arg
) != INTEGER_CST
)
67 return (TREE_INT_CST_LOW (arg
) == 0 && TREE_INT_CST_HIGH (arg
) == 0);
70 /* Returns true if ARG a nonzero constant. Unlike integer_nonzerop, it does
71 not care about overflow flags. */
79 if (TREE_CODE (arg
) != INTEGER_CST
)
82 return (TREE_INT_CST_LOW (arg
) != 0 || TREE_INT_CST_HIGH (arg
) != 0);
85 /* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
88 inverse (tree x
, tree mask
)
90 tree type
= TREE_TYPE (x
);
92 unsigned ctr
= tree_floor_log2 (mask
);
94 if (TYPE_PRECISION (type
) <= HOST_BITS_PER_WIDE_INT
)
96 unsigned HOST_WIDE_INT ix
;
97 unsigned HOST_WIDE_INT imask
;
98 unsigned HOST_WIDE_INT irslt
= 1;
100 gcc_assert (cst_and_fits_in_hwi (x
));
101 gcc_assert (cst_and_fits_in_hwi (mask
));
103 ix
= int_cst_value (x
);
104 imask
= int_cst_value (mask
);
113 rslt
= build_int_cst_type (type
, irslt
);
117 rslt
= build_int_cst (type
, 1);
120 rslt
= int_const_binop (MULT_EXPR
, rslt
, x
, 0);
121 x
= int_const_binop (MULT_EXPR
, x
, x
, 0);
123 rslt
= int_const_binop (BIT_AND_EXPR
, rslt
, mask
, 0);
129 /* Determines number of iterations of loop whose ending condition
130 is IV <> FINAL. TYPE is the type of the iv. The number of
131 iterations is stored to NITER. NEVER_INFINITE is true if
132 we know that the exit must be taken eventually, i.e., that the IV
133 ever reaches the value FINAL (we derived this earlier, and possibly set
134 NITER->assumptions to make sure this is the case). */
137 number_of_iterations_ne (tree type
, affine_iv
*iv
, tree final
,
138 struct tree_niter_desc
*niter
, bool never_infinite
)
140 tree niter_type
= unsigned_type_for (type
);
141 tree s
, c
, d
, bits
, assumption
, tmp
, bound
;
143 niter
->control
= *iv
;
144 niter
->bound
= final
;
145 niter
->cmp
= NE_EXPR
;
147 /* Rearrange the terms so that we get inequality s * i <> c, with s
148 positive. Also cast everything to the unsigned type. */
149 if (tree_int_cst_sign_bit (iv
->step
))
151 s
= fold_convert (niter_type
,
152 fold_build1 (NEGATE_EXPR
, type
, iv
->step
));
153 c
= fold_build2 (MINUS_EXPR
, niter_type
,
154 fold_convert (niter_type
, iv
->base
),
155 fold_convert (niter_type
, final
));
159 s
= fold_convert (niter_type
, iv
->step
);
160 c
= fold_build2 (MINUS_EXPR
, niter_type
,
161 fold_convert (niter_type
, final
),
162 fold_convert (niter_type
, iv
->base
));
165 /* First the trivial cases -- when the step is 1. */
166 if (integer_onep (s
))
172 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
173 is infinite. Otherwise, the number of iterations is
174 (inverse(s/d) * (c/d)) mod (size of mode/d). */
175 bits
= num_ending_zeros (s
);
176 bound
= build_low_bits_mask (niter_type
,
177 (TYPE_PRECISION (niter_type
)
178 - tree_low_cst (bits
, 1)));
180 d
= fold_binary_to_constant (LSHIFT_EXPR
, niter_type
,
181 build_int_cst (niter_type
, 1), bits
);
182 s
= fold_binary_to_constant (RSHIFT_EXPR
, niter_type
, s
, bits
);
186 /* If we cannot assume that the loop is not infinite, record the
187 assumptions for divisibility of c. */
188 assumption
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, c
, d
);
189 assumption
= fold_build2 (EQ_EXPR
, boolean_type_node
,
190 assumption
, build_int_cst (niter_type
, 0));
191 if (!nonzero_p (assumption
))
192 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
193 niter
->assumptions
, assumption
);
196 c
= fold_build2 (EXACT_DIV_EXPR
, niter_type
, c
, d
);
197 tmp
= fold_build2 (MULT_EXPR
, niter_type
, c
, inverse (s
, bound
));
198 niter
->niter
= fold_build2 (BIT_AND_EXPR
, niter_type
, tmp
, bound
);
202 /* Checks whether we can determine the final value of the control variable
203 of the loop with ending condition IV0 < IV1 (computed in TYPE).
204 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
205 of the step. The assumptions necessary to ensure that the computation
206 of the final value does not overflow are recorded in NITER. If we
207 find the final value, we adjust DELTA and return TRUE. Otherwise
211 number_of_iterations_lt_to_ne (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
212 struct tree_niter_desc
*niter
,
213 tree
*delta
, tree step
)
215 tree niter_type
= TREE_TYPE (step
);
216 tree mod
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, *delta
, step
);
218 tree assumption
= boolean_true_node
, bound
, noloop
;
220 if (TREE_CODE (mod
) != INTEGER_CST
)
223 mod
= fold_build2 (MINUS_EXPR
, niter_type
, step
, mod
);
224 tmod
= fold_convert (type
, mod
);
226 if (nonzero_p (iv0
->step
))
228 /* The final value of the iv is iv1->base + MOD, assuming that this
229 computation does not overflow, and that
230 iv0->base <= iv1->base + MOD. */
231 if (!iv1
->no_overflow
&& !zero_p (mod
))
233 bound
= fold_build2 (MINUS_EXPR
, type
,
234 TYPE_MAX_VALUE (type
), tmod
);
235 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
237 if (zero_p (assumption
))
240 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
242 fold_build2 (PLUS_EXPR
, type
,
247 /* The final value of the iv is iv0->base - MOD, assuming that this
248 computation does not overflow, and that
249 iv0->base - MOD <= iv1->base. */
250 if (!iv0
->no_overflow
&& !zero_p (mod
))
252 bound
= fold_build2 (PLUS_EXPR
, type
,
253 TYPE_MIN_VALUE (type
), tmod
);
254 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
256 if (zero_p (assumption
))
259 noloop
= fold_build2 (GT_EXPR
, boolean_type_node
,
260 fold_build2 (MINUS_EXPR
, type
,
265 if (!nonzero_p (assumption
))
266 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
269 if (!zero_p (noloop
))
270 niter
->may_be_zero
= fold_build2 (TRUTH_OR_EXPR
, boolean_type_node
,
273 *delta
= fold_build2 (PLUS_EXPR
, niter_type
, *delta
, mod
);
277 /* Add assertions to NITER that ensure that the control variable of the loop
278 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
279 are TYPE. Returns false if we can prove that there is an overflow, true
280 otherwise. STEP is the absolute value of the step. */
283 assert_no_overflow_lt (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
284 struct tree_niter_desc
*niter
, tree step
)
286 tree bound
, d
, assumption
, diff
;
287 tree niter_type
= TREE_TYPE (step
);
289 if (nonzero_p (iv0
->step
))
291 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
292 if (iv0
->no_overflow
)
295 /* If iv0->base is a constant, we can determine the last value before
296 overflow precisely; otherwise we conservatively assume
299 if (TREE_CODE (iv0
->base
) == INTEGER_CST
)
301 d
= fold_build2 (MINUS_EXPR
, niter_type
,
302 fold_convert (niter_type
, TYPE_MAX_VALUE (type
)),
303 fold_convert (niter_type
, iv0
->base
));
304 diff
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, d
, step
);
307 diff
= fold_build2 (MINUS_EXPR
, niter_type
, step
,
308 build_int_cst (niter_type
, 1));
309 bound
= fold_build2 (MINUS_EXPR
, type
,
310 TYPE_MAX_VALUE (type
), fold_convert (type
, diff
));
311 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
316 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
317 if (iv1
->no_overflow
)
320 if (TREE_CODE (iv1
->base
) == INTEGER_CST
)
322 d
= fold_build2 (MINUS_EXPR
, niter_type
,
323 fold_convert (niter_type
, iv1
->base
),
324 fold_convert (niter_type
, TYPE_MIN_VALUE (type
)));
325 diff
= fold_build2 (FLOOR_MOD_EXPR
, niter_type
, d
, step
);
328 diff
= fold_build2 (MINUS_EXPR
, niter_type
, step
,
329 build_int_cst (niter_type
, 1));
330 bound
= fold_build2 (PLUS_EXPR
, type
,
331 TYPE_MIN_VALUE (type
), fold_convert (type
, diff
));
332 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
336 if (zero_p (assumption
))
338 if (!nonzero_p (assumption
))
339 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
340 niter
->assumptions
, assumption
);
342 iv0
->no_overflow
= true;
343 iv1
->no_overflow
= true;
347 /* Add an assumption to NITER that a loop whose ending condition
348 is IV0 < IV1 rolls. TYPE is the type of the control iv. */
351 assert_loop_rolls_lt (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
352 struct tree_niter_desc
*niter
)
354 tree assumption
= boolean_true_node
, bound
, diff
;
355 tree mbz
, mbzl
, mbzr
;
357 if (nonzero_p (iv0
->step
))
359 diff
= fold_build2 (MINUS_EXPR
, type
,
360 iv0
->step
, build_int_cst (type
, 1));
362 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
363 0 address never belongs to any object, we can assume this for
365 if (!POINTER_TYPE_P (type
))
367 bound
= fold_build2 (PLUS_EXPR
, type
,
368 TYPE_MIN_VALUE (type
), diff
);
369 assumption
= fold_build2 (GE_EXPR
, boolean_type_node
,
373 /* And then we can compute iv0->base - diff, and compare it with
375 mbzl
= fold_build2 (MINUS_EXPR
, type
, iv0
->base
, diff
);
380 diff
= fold_build2 (PLUS_EXPR
, type
,
381 iv1
->step
, build_int_cst (type
, 1));
383 if (!POINTER_TYPE_P (type
))
385 bound
= fold_build2 (PLUS_EXPR
, type
,
386 TYPE_MAX_VALUE (type
), diff
);
387 assumption
= fold_build2 (LE_EXPR
, boolean_type_node
,
392 mbzr
= fold_build2 (MINUS_EXPR
, type
, iv1
->base
, diff
);
395 mbz
= fold_build2 (GT_EXPR
, boolean_type_node
, mbzl
, mbzr
);
397 if (!nonzero_p (assumption
))
398 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
399 niter
->assumptions
, assumption
);
401 niter
->may_be_zero
= fold_build2 (TRUTH_OR_EXPR
, boolean_type_node
,
402 niter
->may_be_zero
, mbz
);
405 /* Determines number of iterations of loop whose ending condition
406 is IV0 < IV1. TYPE is the type of the iv. The number of
407 iterations is stored to NITER. */
410 number_of_iterations_lt (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
411 struct tree_niter_desc
*niter
,
412 bool never_infinite ATTRIBUTE_UNUSED
)
414 tree niter_type
= unsigned_type_for (type
);
417 if (nonzero_p (iv0
->step
))
419 niter
->control
= *iv0
;
420 niter
->cmp
= LT_EXPR
;
421 niter
->bound
= iv1
->base
;
425 niter
->control
= *iv1
;
426 niter
->cmp
= GT_EXPR
;
427 niter
->bound
= iv0
->base
;
430 delta
= fold_build2 (MINUS_EXPR
, niter_type
,
431 fold_convert (niter_type
, iv1
->base
),
432 fold_convert (niter_type
, iv0
->base
));
434 /* First handle the special case that the step is +-1. */
435 if ((iv0
->step
&& integer_onep (iv0
->step
)
436 && zero_p (iv1
->step
))
437 || (iv1
->step
&& integer_all_onesp (iv1
->step
)
438 && zero_p (iv0
->step
)))
440 /* for (i = iv0->base; i < iv1->base; i++)
444 for (i = iv1->base; i > iv0->base; i--).
446 In both cases # of iterations is iv1->base - iv0->base, assuming that
447 iv1->base >= iv0->base. */
448 niter
->may_be_zero
= fold_build2 (LT_EXPR
, boolean_type_node
,
449 iv1
->base
, iv0
->base
);
450 niter
->niter
= delta
;
454 if (nonzero_p (iv0
->step
))
455 step
= fold_convert (niter_type
, iv0
->step
);
457 step
= fold_convert (niter_type
,
458 fold_build1 (NEGATE_EXPR
, type
, iv1
->step
));
460 /* If we can determine the final value of the control iv exactly, we can
461 transform the condition to != comparison. In particular, this will be
462 the case if DELTA is constant. */
463 if (number_of_iterations_lt_to_ne (type
, iv0
, iv1
, niter
, &delta
, step
))
467 zps
.base
= build_int_cst (niter_type
, 0);
469 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
470 zps does not overflow. */
471 zps
.no_overflow
= true;
473 return number_of_iterations_ne (type
, &zps
, delta
, niter
, true);
476 /* Make sure that the control iv does not overflow. */
477 if (!assert_no_overflow_lt (type
, iv0
, iv1
, niter
, step
))
480 /* We determine the number of iterations as (delta + step - 1) / step. For
481 this to work, we must know that iv1->base >= iv0->base - step + 1,
482 otherwise the loop does not roll. */
483 assert_loop_rolls_lt (type
, iv0
, iv1
, niter
);
485 s
= fold_build2 (MINUS_EXPR
, niter_type
,
486 step
, build_int_cst (niter_type
, 1));
487 delta
= fold_build2 (PLUS_EXPR
, niter_type
, delta
, s
);
488 niter
->niter
= fold_build2 (FLOOR_DIV_EXPR
, niter_type
, delta
, step
);
492 /* Determines number of iterations of loop whose ending condition
493 is IV0 <= IV1. TYPE is the type of the iv. The number of
494 iterations is stored to NITER. NEVER_INFINITE is true if
495 we know that this condition must eventually become false (we derived this
496 earlier, and possibly set NITER->assumptions to make sure this
500 number_of_iterations_le (tree type
, affine_iv
*iv0
, affine_iv
*iv1
,
501 struct tree_niter_desc
*niter
, bool never_infinite
)
505 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
506 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
507 value of the type. This we must know anyway, since if it is
508 equal to this value, the loop rolls forever. */
512 if (nonzero_p (iv0
->step
))
513 assumption
= fold_build2 (NE_EXPR
, boolean_type_node
,
514 iv1
->base
, TYPE_MAX_VALUE (type
));
516 assumption
= fold_build2 (NE_EXPR
, boolean_type_node
,
517 iv0
->base
, TYPE_MIN_VALUE (type
));
519 if (zero_p (assumption
))
521 if (!nonzero_p (assumption
))
522 niter
->assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
523 niter
->assumptions
, assumption
);
526 if (nonzero_p (iv0
->step
))
527 iv1
->base
= fold_build2 (PLUS_EXPR
, type
,
528 iv1
->base
, build_int_cst (type
, 1));
530 iv0
->base
= fold_build2 (MINUS_EXPR
, type
,
531 iv0
->base
, build_int_cst (type
, 1));
532 return number_of_iterations_lt (type
, iv0
, iv1
, niter
, never_infinite
);
535 /* Determine the number of iterations according to condition (for staying
536 inside loop) which compares two induction variables using comparison
537 operator CODE. The induction variable on left side of the comparison
538 is IV0, the right-hand side is IV1. Both induction variables must have
539 type TYPE, which must be an integer or pointer type. The steps of the
540 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
542 ONLY_EXIT is true if we are sure this is the only way the loop could be
543 exited (including possibly non-returning function calls, exceptions, etc.)
544 -- in this case we can use the information whether the control induction
545 variables can overflow or not in a more efficient way.
547 The results (number of iterations and assumptions as described in
548 comments at struct tree_niter_desc in tree-flow.h) are stored to NITER.
549 Returns false if it fails to determine number of iterations, true if it
550 was determined (possibly with some assumptions). */
553 number_of_iterations_cond (tree type
, affine_iv
*iv0
, enum tree_code code
,
554 affine_iv
*iv1
, struct tree_niter_desc
*niter
,
559 /* The meaning of these assumptions is this:
561 then the rest of information does not have to be valid
562 if may_be_zero then the loop does not roll, even if
564 niter
->assumptions
= boolean_true_node
;
565 niter
->may_be_zero
= boolean_false_node
;
566 niter
->niter
= NULL_TREE
;
567 niter
->additional_info
= boolean_true_node
;
569 niter
->bound
= NULL_TREE
;
570 niter
->cmp
= ERROR_MARK
;
572 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
573 the control variable is on lhs. */
574 if (code
== GE_EXPR
|| code
== GT_EXPR
575 || (code
== NE_EXPR
&& zero_p (iv0
->step
)))
578 code
= swap_tree_comparison (code
);
583 /* If this is not the only possible exit from the loop, the information
584 that the induction variables cannot overflow as derived from
585 signedness analysis cannot be relied upon. We use them e.g. in the
586 following way: given loop for (i = 0; i <= n; i++), if i is
587 signed, it cannot overflow, thus this loop is equivalent to
588 for (i = 0; i < n + 1; i++); however, if n == MAX, but the loop
589 is exited in some other way before i overflows, this transformation
590 is incorrect (the new loop exits immediately). */
591 iv0
->no_overflow
= false;
592 iv1
->no_overflow
= false;
595 if (POINTER_TYPE_P (type
))
597 /* Comparison of pointers is undefined unless both iv0 and iv1 point
598 to the same object. If they do, the control variable cannot wrap
599 (as wrap around the bounds of memory will never return a pointer
600 that would be guaranteed to point to the same object, even if we
601 avoid undefined behavior by casting to size_t and back). The
602 restrictions on pointer arithmetics and comparisons of pointers
603 ensure that using the no-overflow assumptions is correct in this
604 case even if ONLY_EXIT is false. */
605 iv0
->no_overflow
= true;
606 iv1
->no_overflow
= true;
609 /* If the control induction variable does not overflow, the loop obviously
610 cannot be infinite. */
611 if (!zero_p (iv0
->step
) && iv0
->no_overflow
)
612 never_infinite
= true;
613 else if (!zero_p (iv1
->step
) && iv1
->no_overflow
)
614 never_infinite
= true;
616 never_infinite
= false;
618 /* We can handle the case when neither of the sides of the comparison is
619 invariant, provided that the test is NE_EXPR. This rarely occurs in
620 practice, but it is simple enough to manage. */
621 if (!zero_p (iv0
->step
) && !zero_p (iv1
->step
))
626 iv0
->step
= fold_binary_to_constant (MINUS_EXPR
, type
,
627 iv0
->step
, iv1
->step
);
628 iv0
->no_overflow
= false;
629 iv1
->step
= NULL_TREE
;
630 iv1
->no_overflow
= true;
633 /* If the result of the comparison is a constant, the loop is weird. More
634 precise handling would be possible, but the situation is not common enough
635 to waste time on it. */
636 if (zero_p (iv0
->step
) && zero_p (iv1
->step
))
639 /* Ignore loops of while (i-- < 10) type. */
642 if (iv0
->step
&& tree_int_cst_sign_bit (iv0
->step
))
645 if (!zero_p (iv1
->step
) && !tree_int_cst_sign_bit (iv1
->step
))
649 /* If the loop exits immediately, there is nothing to do. */
650 if (zero_p (fold_build2 (code
, boolean_type_node
, iv0
->base
, iv1
->base
)))
652 niter
->niter
= build_int_cst (unsigned_type_for (type
), 0);
656 /* OK, now we know we have a senseful loop. Handle several cases, depending
657 on what comparison operator is used. */
661 gcc_assert (zero_p (iv1
->step
));
662 return number_of_iterations_ne (type
, iv0
, iv1
->base
, niter
, never_infinite
);
664 return number_of_iterations_lt (type
, iv0
, iv1
, niter
, never_infinite
);
666 return number_of_iterations_le (type
, iv0
, iv1
, niter
, never_infinite
);
672 /* Substitute NEW for OLD in EXPR and fold the result. */
675 simplify_replace_tree (tree expr
, tree old
, tree
new)
678 tree ret
= NULL_TREE
, e
, se
;
684 || operand_equal_p (expr
, old
, 0))
685 return unshare_expr (new);
690 n
= TREE_CODE_LENGTH (TREE_CODE (expr
));
691 for (i
= 0; i
< n
; i
++)
693 e
= TREE_OPERAND (expr
, i
);
694 se
= simplify_replace_tree (e
, old
, new);
699 ret
= copy_node (expr
);
701 TREE_OPERAND (ret
, i
) = se
;
704 return (ret
? fold (ret
) : expr
);
707 /* Expand definitions of ssa names in EXPR as long as they are simple
708 enough, and return the new expression. */
711 expand_simple_operations (tree expr
)
714 tree ret
= NULL_TREE
, e
, ee
, stmt
;
717 if (expr
== NULL_TREE
)
720 if (is_gimple_min_invariant (expr
))
723 code
= TREE_CODE (expr
);
724 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code
)))
726 n
= TREE_CODE_LENGTH (code
);
727 for (i
= 0; i
< n
; i
++)
729 e
= TREE_OPERAND (expr
, i
);
730 ee
= expand_simple_operations (e
);
735 ret
= copy_node (expr
);
737 TREE_OPERAND (ret
, i
) = ee
;
740 return (ret
? fold (ret
) : expr
);
743 if (TREE_CODE (expr
) != SSA_NAME
)
746 stmt
= SSA_NAME_DEF_STMT (expr
);
747 if (TREE_CODE (stmt
) != MODIFY_EXPR
)
750 e
= TREE_OPERAND (stmt
, 1);
751 if (/* Casts are simple. */
752 TREE_CODE (e
) != NOP_EXPR
753 && TREE_CODE (e
) != CONVERT_EXPR
754 /* Copies are simple. */
755 && TREE_CODE (e
) != SSA_NAME
756 /* Assignments of invariants are simple. */
757 && !is_gimple_min_invariant (e
)
758 /* And increments and decrements by a constant are simple. */
759 && !((TREE_CODE (e
) == PLUS_EXPR
760 || TREE_CODE (e
) == MINUS_EXPR
)
761 && is_gimple_min_invariant (TREE_OPERAND (e
, 1))))
764 return expand_simple_operations (e
);
767 /* Tries to simplify EXPR using the condition COND. Returns the simplified
768 expression (or EXPR unchanged, if no simplification was possible). */
771 tree_simplify_using_condition_1 (tree cond
, tree expr
)
774 tree e
, te
, e0
, e1
, e2
, notcond
;
775 enum tree_code code
= TREE_CODE (expr
);
777 if (code
== INTEGER_CST
)
780 if (code
== TRUTH_OR_EXPR
781 || code
== TRUTH_AND_EXPR
782 || code
== COND_EXPR
)
786 e0
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 0));
787 if (TREE_OPERAND (expr
, 0) != e0
)
790 e1
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 1));
791 if (TREE_OPERAND (expr
, 1) != e1
)
794 if (code
== COND_EXPR
)
796 e2
= tree_simplify_using_condition_1 (cond
, TREE_OPERAND (expr
, 2));
797 if (TREE_OPERAND (expr
, 2) != e2
)
805 if (code
== COND_EXPR
)
806 expr
= fold_build3 (code
, boolean_type_node
, e0
, e1
, e2
);
808 expr
= fold_build2 (code
, boolean_type_node
, e0
, e1
);
814 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
815 propagation, and vice versa. Fold does not handle this, since it is
816 considered too expensive. */
817 if (TREE_CODE (cond
) == EQ_EXPR
)
819 e0
= TREE_OPERAND (cond
, 0);
820 e1
= TREE_OPERAND (cond
, 1);
822 /* We know that e0 == e1. Check whether we cannot simplify expr
824 e
= simplify_replace_tree (expr
, e0
, e1
);
825 if (zero_p (e
) || nonzero_p (e
))
828 e
= simplify_replace_tree (expr
, e1
, e0
);
829 if (zero_p (e
) || nonzero_p (e
))
832 if (TREE_CODE (expr
) == EQ_EXPR
)
834 e0
= TREE_OPERAND (expr
, 0);
835 e1
= TREE_OPERAND (expr
, 1);
837 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
838 e
= simplify_replace_tree (cond
, e0
, e1
);
841 e
= simplify_replace_tree (cond
, e1
, e0
);
845 if (TREE_CODE (expr
) == NE_EXPR
)
847 e0
= TREE_OPERAND (expr
, 0);
848 e1
= TREE_OPERAND (expr
, 1);
850 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
851 e
= simplify_replace_tree (cond
, e0
, e1
);
853 return boolean_true_node
;
854 e
= simplify_replace_tree (cond
, e1
, e0
);
856 return boolean_true_node
;
859 te
= expand_simple_operations (expr
);
861 /* Check whether COND ==> EXPR. */
862 notcond
= invert_truthvalue (cond
);
863 e
= fold_binary (TRUTH_OR_EXPR
, boolean_type_node
, notcond
, te
);
867 /* Check whether COND ==> not EXPR. */
868 e
= fold_binary (TRUTH_AND_EXPR
, boolean_type_node
, cond
, te
);
875 /* Tries to simplify EXPR using the condition COND. Returns the simplified
876 expression (or EXPR unchanged, if no simplification was possible).
877 Wrapper around tree_simplify_using_condition_1 that ensures that chains
878 of simple operations in definitions of ssa names in COND are expanded,
879 so that things like casts or incrementing the value of the bound before
880 the loop do not cause us to fail. */
883 tree_simplify_using_condition (tree cond
, tree expr
)
885 cond
= expand_simple_operations (cond
);
887 return tree_simplify_using_condition_1 (cond
, expr
);
890 /* The maximum number of dominator BBs we search for conditions
891 of loop header copies we use for simplifying a conditional
893 #define MAX_DOMINATORS_TO_WALK 8
895 /* Tries to simplify EXPR using the conditions on entry to LOOP.
896 Record the conditions used for simplification to CONDS_USED.
897 Returns the simplified expression (or EXPR unchanged, if no
898 simplification was possible).*/
901 simplify_using_initial_conditions (struct loop
*loop
, tree expr
,
909 if (TREE_CODE (expr
) == INTEGER_CST
)
912 /* Limit walking the dominators to avoid quadraticness in
913 the number of BBs times the number of loops in degenerate
915 for (bb
= loop
->header
;
916 bb
!= ENTRY_BLOCK_PTR
&& cnt
< MAX_DOMINATORS_TO_WALK
;
917 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
))
919 if (!single_pred_p (bb
))
921 e
= single_pred_edge (bb
);
923 if (!(e
->flags
& (EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
)))
926 cond
= COND_EXPR_COND (last_stmt (e
->src
));
927 if (e
->flags
& EDGE_FALSE_VALUE
)
928 cond
= invert_truthvalue (cond
);
929 exp
= tree_simplify_using_condition (cond
, expr
);
932 *conds_used
= fold_build2 (TRUTH_AND_EXPR
,
944 /* Tries to simplify EXPR using the evolutions of the loop invariants
945 in the superloops of LOOP. Returns the simplified expression
946 (or EXPR unchanged, if no simplification was possible). */
949 simplify_using_outer_evolutions (struct loop
*loop
, tree expr
)
951 enum tree_code code
= TREE_CODE (expr
);
955 if (is_gimple_min_invariant (expr
))
958 if (code
== TRUTH_OR_EXPR
959 || code
== TRUTH_AND_EXPR
960 || code
== COND_EXPR
)
964 e0
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 0));
965 if (TREE_OPERAND (expr
, 0) != e0
)
968 e1
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 1));
969 if (TREE_OPERAND (expr
, 1) != e1
)
972 if (code
== COND_EXPR
)
974 e2
= simplify_using_outer_evolutions (loop
, TREE_OPERAND (expr
, 2));
975 if (TREE_OPERAND (expr
, 2) != e2
)
983 if (code
== COND_EXPR
)
984 expr
= fold_build3 (code
, boolean_type_node
, e0
, e1
, e2
);
986 expr
= fold_build2 (code
, boolean_type_node
, e0
, e1
);
992 e
= instantiate_parameters (loop
, expr
);
993 if (is_gimple_min_invariant (e
))
999 /* Returns true if EXIT is the only possible exit from LOOP. */
1002 loop_only_exit_p (struct loop
*loop
, edge exit
)
1005 block_stmt_iterator bsi
;
1009 if (exit
!= loop
->single_exit
)
1012 body
= get_loop_body (loop
);
1013 for (i
= 0; i
< loop
->num_nodes
; i
++)
1015 for (bsi
= bsi_start (body
[0]); !bsi_end_p (bsi
); bsi_next (&bsi
))
1017 call
= get_call_expr_in (bsi_stmt (bsi
));
1018 if (call
&& TREE_SIDE_EFFECTS (call
))
1030 /* Stores description of number of iterations of LOOP derived from
1031 EXIT (an exit edge of the LOOP) in NITER. Returns true if some
1032 useful information could be derived (and fields of NITER has
1033 meaning described in comments at struct tree_niter_desc
1034 declaration), false otherwise. If WARN is true and
1035 -Wunsafe-loop-optimizations was given, warn if the optimizer is going to use
1036 potentially unsafe assumptions. */
1039 number_of_iterations_exit (struct loop
*loop
, edge exit
,
1040 struct tree_niter_desc
*niter
,
1043 tree stmt
, cond
, type
;
1045 enum tree_code code
;
1048 if (!dominated_by_p (CDI_DOMINATORS
, loop
->latch
, exit
->src
))
1051 niter
->assumptions
= boolean_false_node
;
1052 stmt
= last_stmt (exit
->src
);
1053 if (!stmt
|| TREE_CODE (stmt
) != COND_EXPR
)
1056 /* We want the condition for staying inside loop. */
1057 cond
= COND_EXPR_COND (stmt
);
1058 if (exit
->flags
& EDGE_TRUE_VALUE
)
1059 cond
= invert_truthvalue (cond
);
1061 code
= TREE_CODE (cond
);
1075 op0
= TREE_OPERAND (cond
, 0);
1076 op1
= TREE_OPERAND (cond
, 1);
1077 type
= TREE_TYPE (op0
);
1079 if (TREE_CODE (type
) != INTEGER_TYPE
1080 && !POINTER_TYPE_P (type
))
1083 if (!simple_iv (loop
, stmt
, op0
, &iv0
, false))
1085 if (!simple_iv (loop
, stmt
, op1
, &iv1
, false))
1088 iv0
.base
= expand_simple_operations (iv0
.base
);
1089 iv1
.base
= expand_simple_operations (iv1
.base
);
1090 if (!number_of_iterations_cond (type
, &iv0
, code
, &iv1
, niter
,
1091 loop_only_exit_p (loop
, exit
)))
1096 niter
->assumptions
= simplify_using_outer_evolutions (loop
,
1097 niter
->assumptions
);
1098 niter
->may_be_zero
= simplify_using_outer_evolutions (loop
,
1099 niter
->may_be_zero
);
1100 niter
->niter
= simplify_using_outer_evolutions (loop
, niter
->niter
);
1103 niter
->additional_info
= boolean_true_node
;
1105 = simplify_using_initial_conditions (loop
,
1107 &niter
->additional_info
);
1109 = simplify_using_initial_conditions (loop
,
1111 &niter
->additional_info
);
1113 if (integer_onep (niter
->assumptions
))
1116 /* With -funsafe-loop-optimizations we assume that nothing bad can happen.
1117 But if we can prove that there is overflow or some other source of weird
1118 behavior, ignore the loop even with -funsafe-loop-optimizations. */
1119 if (integer_zerop (niter
->assumptions
))
1122 if (flag_unsafe_loop_optimizations
)
1123 niter
->assumptions
= boolean_true_node
;
1127 const char *wording
;
1128 location_t loc
= EXPR_LOCATION (stmt
);
1130 /* We can provide a more specific warning if one of the operator is
1131 constant and the other advances by +1 or -1. */
1132 if (!zero_p (iv1
.step
)
1133 ? (zero_p (iv0
.step
)
1134 && (integer_onep (iv1
.step
) || integer_all_onesp (iv1
.step
)))
1136 && (integer_onep (iv0
.step
) || integer_all_onesp (iv0
.step
))))
1138 flag_unsafe_loop_optimizations
1139 ? N_("assuming that the loop is not infinite")
1140 : N_("cannot optimize possibly infinite loops");
1143 flag_unsafe_loop_optimizations
1144 ? N_("assuming that the loop counter does not overflow")
1145 : N_("cannot optimize loop, the loop counter may overflow");
1147 if (LOCATION_LINE (loc
) > 0)
1148 warning (OPT_Wunsafe_loop_optimizations
, "%H%s", &loc
, gettext (wording
));
1150 warning (OPT_Wunsafe_loop_optimizations
, "%s", gettext (wording
));
1153 return flag_unsafe_loop_optimizations
;
1156 /* Try to determine the number of iterations of LOOP. If we succeed,
1157 expression giving number of iterations is returned and *EXIT is
1158 set to the edge from that the information is obtained. Otherwise
1159 chrec_dont_know is returned. */
1162 find_loop_niter (struct loop
*loop
, edge
*exit
)
1164 unsigned n_exits
, i
;
1165 edge
*exits
= get_loop_exit_edges (loop
, &n_exits
);
1167 tree niter
= NULL_TREE
, aniter
;
1168 struct tree_niter_desc desc
;
1171 for (i
= 0; i
< n_exits
; i
++)
1174 if (!just_once_each_iteration_p (loop
, ex
->src
))
1177 if (!number_of_iterations_exit (loop
, ex
, &desc
, false))
1180 if (nonzero_p (desc
.may_be_zero
))
1182 /* We exit in the first iteration through this exit.
1183 We won't find anything better. */
1184 niter
= build_int_cst (unsigned_type_node
, 0);
1189 if (!zero_p (desc
.may_be_zero
))
1192 aniter
= desc
.niter
;
1196 /* Nothing recorded yet. */
1202 /* Prefer constants, the lower the better. */
1203 if (TREE_CODE (aniter
) != INTEGER_CST
)
1206 if (TREE_CODE (niter
) != INTEGER_CST
)
1213 if (tree_int_cst_lt (aniter
, niter
))
1222 return niter
? niter
: chrec_dont_know
;
1227 Analysis of a number of iterations of a loop by a brute-force evaluation.
1231 /* Bound on the number of iterations we try to evaluate. */
1233 #define MAX_ITERATIONS_TO_TRACK \
1234 ((unsigned) PARAM_VALUE (PARAM_MAX_ITERATIONS_TO_TRACK))
1236 /* Returns the loop phi node of LOOP such that ssa name X is derived from its
1237 result by a chain of operations such that all but exactly one of their
1238 operands are constants. */
1241 chain_of_csts_start (struct loop
*loop
, tree x
)
1243 tree stmt
= SSA_NAME_DEF_STMT (x
);
1245 basic_block bb
= bb_for_stmt (stmt
);
1248 || !flow_bb_inside_loop_p (loop
, bb
))
1251 if (TREE_CODE (stmt
) == PHI_NODE
)
1253 if (bb
== loop
->header
)
1259 if (TREE_CODE (stmt
) != MODIFY_EXPR
)
1262 if (!ZERO_SSA_OPERANDS (stmt
, SSA_OP_ALL_VIRTUALS
))
1264 if (SINGLE_SSA_DEF_OPERAND (stmt
, SSA_OP_DEF
) == NULL_DEF_OPERAND_P
)
1267 use
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_USE
);
1268 if (use
== NULL_USE_OPERAND_P
)
1271 return chain_of_csts_start (loop
, use
);
1274 /* Determines whether the expression X is derived from a result of a phi node
1275 in header of LOOP such that
1277 * the derivation of X consists only from operations with constants
1278 * the initial value of the phi node is constant
1279 * the value of the phi node in the next iteration can be derived from the
1280 value in the current iteration by a chain of operations with constants.
1282 If such phi node exists, it is returned. If X is a constant, X is returned
1283 unchanged. Otherwise NULL_TREE is returned. */
1286 get_base_for (struct loop
*loop
, tree x
)
1288 tree phi
, init
, next
;
1290 if (is_gimple_min_invariant (x
))
1293 phi
= chain_of_csts_start (loop
, x
);
1297 init
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_preheader_edge (loop
));
1298 next
= PHI_ARG_DEF_FROM_EDGE (phi
, loop_latch_edge (loop
));
1300 if (TREE_CODE (next
) != SSA_NAME
)
1303 if (!is_gimple_min_invariant (init
))
1306 if (chain_of_csts_start (loop
, next
) != phi
)
1312 /* Given an expression X, then
1314 * if X is NULL_TREE, we return the constant BASE.
1315 * otherwise X is a SSA name, whose value in the considered loop is derived
1316 by a chain of operations with constant from a result of a phi node in
1317 the header of the loop. Then we return value of X when the value of the
1318 result of this phi node is given by the constant BASE. */
1321 get_val_for (tree x
, tree base
)
1327 gcc_assert (is_gimple_min_invariant (base
));
1332 stmt
= SSA_NAME_DEF_STMT (x
);
1333 if (TREE_CODE (stmt
) == PHI_NODE
)
1336 FOR_EACH_SSA_USE_OPERAND (op
, stmt
, iter
, SSA_OP_USE
)
1338 nx
= USE_FROM_PTR (op
);
1339 val
= get_val_for (nx
, base
);
1341 val
= fold (TREE_OPERAND (stmt
, 1));
1343 /* only iterate loop once. */
1347 /* Should never reach here. */
1351 /* Tries to count the number of iterations of LOOP till it exits by EXIT
1352 by brute force -- i.e. by determining the value of the operands of the
1353 condition at EXIT in first few iterations of the loop (assuming that
1354 these values are constant) and determining the first one in that the
1355 condition is not satisfied. Returns the constant giving the number
1356 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
1359 loop_niter_by_eval (struct loop
*loop
, edge exit
)
1361 tree cond
, cnd
, acnd
;
1362 tree op
[2], val
[2], next
[2], aval
[2], phi
[2];
1366 cond
= last_stmt (exit
->src
);
1367 if (!cond
|| TREE_CODE (cond
) != COND_EXPR
)
1368 return chrec_dont_know
;
1370 cnd
= COND_EXPR_COND (cond
);
1371 if (exit
->flags
& EDGE_TRUE_VALUE
)
1372 cnd
= invert_truthvalue (cnd
);
1374 cmp
= TREE_CODE (cnd
);
1383 for (j
= 0; j
< 2; j
++)
1384 op
[j
] = TREE_OPERAND (cnd
, j
);
1388 return chrec_dont_know
;
1391 for (j
= 0; j
< 2; j
++)
1393 phi
[j
] = get_base_for (loop
, op
[j
]);
1395 return chrec_dont_know
;
1398 for (j
= 0; j
< 2; j
++)
1400 if (TREE_CODE (phi
[j
]) == PHI_NODE
)
1402 val
[j
] = PHI_ARG_DEF_FROM_EDGE (phi
[j
], loop_preheader_edge (loop
));
1403 next
[j
] = PHI_ARG_DEF_FROM_EDGE (phi
[j
], loop_latch_edge (loop
));
1408 next
[j
] = NULL_TREE
;
1413 for (i
= 0; i
< MAX_ITERATIONS_TO_TRACK
; i
++)
1415 for (j
= 0; j
< 2; j
++)
1416 aval
[j
] = get_val_for (op
[j
], val
[j
]);
1418 acnd
= fold_binary (cmp
, boolean_type_node
, aval
[0], aval
[1]);
1419 if (acnd
&& zero_p (acnd
))
1421 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1423 "Proved that loop %d iterates %d times using brute force.\n",
1425 return build_int_cst (unsigned_type_node
, i
);
1428 for (j
= 0; j
< 2; j
++)
1430 val
[j
] = get_val_for (next
[j
], val
[j
]);
1431 if (!is_gimple_min_invariant (val
[j
]))
1432 return chrec_dont_know
;
1436 return chrec_dont_know
;
1439 /* Finds the exit of the LOOP by that the loop exits after a constant
1440 number of iterations and stores the exit edge to *EXIT. The constant
1441 giving the number of iterations of LOOP is returned. The number of
1442 iterations is determined using loop_niter_by_eval (i.e. by brute force
1443 evaluation). If we are unable to find the exit for that loop_niter_by_eval
1444 determines the number of iterations, chrec_dont_know is returned. */
1447 find_loop_niter_by_eval (struct loop
*loop
, edge
*exit
)
1449 unsigned n_exits
, i
;
1450 edge
*exits
= get_loop_exit_edges (loop
, &n_exits
);
1452 tree niter
= NULL_TREE
, aniter
;
1455 for (i
= 0; i
< n_exits
; i
++)
1458 if (!just_once_each_iteration_p (loop
, ex
->src
))
1461 aniter
= loop_niter_by_eval (loop
, ex
);
1462 if (chrec_contains_undetermined (aniter
))
1466 && !tree_int_cst_lt (aniter
, niter
))
1474 return niter
? niter
: chrec_dont_know
;
1479 Analysis of upper bounds on number of iterations of a loop.
1483 /* Returns true if we can prove that COND ==> VAL >= 0. */
1486 implies_nonnegative_p (tree cond
, tree val
)
1488 tree type
= TREE_TYPE (val
);
1491 if (tree_expr_nonnegative_p (val
))
1494 if (nonzero_p (cond
))
1497 compare
= fold_build2 (GE_EXPR
,
1498 boolean_type_node
, val
, build_int_cst (type
, 0));
1499 compare
= tree_simplify_using_condition_1 (cond
, compare
);
1501 return nonzero_p (compare
);
1504 /* Returns true if we can prove that COND ==> A >= B. */
1507 implies_ge_p (tree cond
, tree a
, tree b
)
1509 tree compare
= fold_build2 (GE_EXPR
, boolean_type_node
, a
, b
);
1511 if (nonzero_p (compare
))
1514 if (nonzero_p (cond
))
1517 compare
= tree_simplify_using_condition_1 (cond
, compare
);
1519 return nonzero_p (compare
);
1522 /* Returns a constant upper bound on the value of expression VAL. VAL
1523 is considered to be unsigned. If its type is signed, its value must
1526 The condition ADDITIONAL must be satisfied (for example, if VAL is
1527 "(unsigned) n" and ADDITIONAL is "n > 0", then we can derive that
1528 VAL is at most (unsigned) MAX_INT). */
1531 derive_constant_upper_bound (tree val
, tree additional
)
1533 tree type
= TREE_TYPE (val
);
1534 tree op0
, op1
, subtype
, maxt
;
1535 double_int bnd
, max
, mmax
, cst
;
1537 if (INTEGRAL_TYPE_P (type
))
1538 maxt
= TYPE_MAX_VALUE (type
);
1540 maxt
= upper_bound_in_type (type
, type
);
1542 max
= tree_to_double_int (maxt
);
1544 switch (TREE_CODE (val
))
1547 return tree_to_double_int (val
);
1551 op0
= TREE_OPERAND (val
, 0);
1552 subtype
= TREE_TYPE (op0
);
1553 if (!TYPE_UNSIGNED (subtype
)
1554 /* If TYPE is also signed, the fact that VAL is nonnegative implies
1555 that OP0 is nonnegative. */
1556 && TYPE_UNSIGNED (type
)
1557 && !implies_nonnegative_p (additional
, op0
))
1559 /* If we cannot prove that the casted expression is nonnegative,
1560 we cannot establish more useful upper bound than the precision
1561 of the type gives us. */
1565 /* We now know that op0 is an nonnegative value. Try deriving an upper
1567 bnd
= derive_constant_upper_bound (op0
, additional
);
1569 /* If the bound does not fit in TYPE, max. value of TYPE could be
1571 if (double_int_ucmp (max
, bnd
) < 0)
1578 op0
= TREE_OPERAND (val
, 0);
1579 op1
= TREE_OPERAND (val
, 1);
1581 if (TREE_CODE (op1
) != INTEGER_CST
1582 || !implies_nonnegative_p (additional
, op0
))
1585 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
1586 choose the most logical way how to treat this constant regardless
1587 of the signedness of the type. */
1588 cst
= tree_to_double_int (op1
);
1589 cst
= double_int_sext (cst
, TYPE_PRECISION (type
));
1590 if (TREE_CODE (val
) == PLUS_EXPR
)
1591 cst
= double_int_neg (cst
);
1593 bnd
= derive_constant_upper_bound (op0
, additional
);
1595 if (double_int_negative_p (cst
))
1597 cst
= double_int_neg (cst
);
1598 /* Avoid CST == 0x80000... */
1599 if (double_int_negative_p (cst
))
1602 /* OP0 + CST. We need to check that
1603 BND <= MAX (type) - CST. */
1605 mmax
= double_int_add (max
, double_int_neg (cst
));
1606 if (double_int_ucmp (bnd
, mmax
) > 0)
1609 return double_int_add (bnd
, cst
);
1613 /* OP0 - CST, where CST >= 0.
1615 If TYPE is signed, we have already verified that OP0 >= 0, and we
1616 know that the result is nonnegative. This implies that
1619 If TYPE is unsigned, we must additionally know that OP0 >= CST,
1620 otherwise the operation underflows.
1623 /* This should only happen if the type is unsigned; however, for
1624 programs that use overflowing signed arithmetics even with
1625 -fno-wrapv, this condition may also be true for signed values. */
1626 if (double_int_ucmp (bnd
, cst
) < 0)
1629 if (TYPE_UNSIGNED (type
)
1630 && !implies_ge_p (additional
,
1631 op0
, double_int_to_tree (type
, cst
)))
1634 bnd
= double_int_add (bnd
, double_int_neg (cst
));
1639 case FLOOR_DIV_EXPR
:
1640 case EXACT_DIV_EXPR
:
1641 op0
= TREE_OPERAND (val
, 0);
1642 op1
= TREE_OPERAND (val
, 1);
1643 if (TREE_CODE (op1
) != INTEGER_CST
1644 || tree_int_cst_sign_bit (op1
))
1647 bnd
= derive_constant_upper_bound (op0
, additional
);
1648 return double_int_udiv (bnd
, tree_to_double_int (op1
), FLOOR_DIV_EXPR
);
1655 /* Records that AT_STMT is executed at most BOUND times in LOOP. The
1656 additional condition ADDITIONAL is recorded with the bound. */
1659 record_estimate (struct loop
*loop
, tree bound
, tree additional
, tree at_stmt
)
1661 struct nb_iter_bound
*elt
= xmalloc (sizeof (struct nb_iter_bound
));
1662 double_int i_bound
= derive_constant_upper_bound (bound
, additional
);
1663 tree c_bound
= double_int_to_tree (unsigned_type_for (TREE_TYPE (bound
)),
1666 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1668 fprintf (dump_file
, "Statements after ");
1669 print_generic_expr (dump_file
, at_stmt
, TDF_SLIM
);
1670 fprintf (dump_file
, " are executed at most ");
1671 print_generic_expr (dump_file
, bound
, TDF_SLIM
);
1672 fprintf (dump_file
, " (bounded by ");
1673 print_generic_expr (dump_file
, c_bound
, TDF_SLIM
);
1674 fprintf (dump_file
, ") times in loop %d.\n", loop
->num
);
1677 elt
->bound
= c_bound
;
1678 elt
->at_stmt
= at_stmt
;
1679 elt
->next
= loop
->bounds
;
1683 /* Initialize LOOP->ESTIMATED_NB_ITERATIONS with the lowest safe
1684 approximation of the number of iterations for LOOP. */
1687 compute_estimated_nb_iterations (struct loop
*loop
)
1689 struct nb_iter_bound
*bound
;
1691 for (bound
= loop
->bounds
; bound
; bound
= bound
->next
)
1693 if (TREE_CODE (bound
->bound
) != INTEGER_CST
)
1696 /* Update only when there is no previous estimation, or when the current
1697 estimation is smaller. */
1698 if (chrec_contains_undetermined (loop
->estimated_nb_iterations
)
1699 || tree_int_cst_lt (bound
->bound
, loop
->estimated_nb_iterations
))
1700 loop
->estimated_nb_iterations
= bound
->bound
;
1704 /* The following analyzers are extracting informations on the bounds
1705 of LOOP from the following undefined behaviors:
1707 - data references should not access elements over the statically
1710 - signed variables should not overflow when flag_wrapv is not set.
1714 infer_loop_bounds_from_undefined (struct loop
*loop
)
1717 basic_block bb
, *bbs
;
1718 block_stmt_iterator bsi
;
1720 bbs
= get_loop_body (loop
);
1722 for (i
= 0; i
< loop
->num_nodes
; i
++)
1726 for (bsi
= bsi_start (bb
); !bsi_end_p (bsi
); bsi_next (&bsi
))
1728 tree stmt
= bsi_stmt (bsi
);
1730 switch (TREE_CODE (stmt
))
1734 tree op0
= TREE_OPERAND (stmt
, 0);
1735 tree op1
= TREE_OPERAND (stmt
, 1);
1737 /* For each array access, analyze its access function
1738 and record a bound on the loop iteration domain. */
1739 if (TREE_CODE (op1
) == ARRAY_REF
1740 && !array_ref_contains_indirect_ref (op1
))
1741 estimate_iters_using_array (stmt
, op1
);
1743 if (TREE_CODE (op0
) == ARRAY_REF
1744 && !array_ref_contains_indirect_ref (op0
))
1745 estimate_iters_using_array (stmt
, op0
);
1747 /* For each signed type variable in LOOP, analyze its
1748 scalar evolution and record a bound of the loop
1749 based on the type's ranges. */
1750 else if (!flag_wrapv
&& TREE_CODE (op0
) == SSA_NAME
)
1752 tree init
, step
, diff
, estimation
;
1753 tree scev
= instantiate_parameters
1754 (loop
, analyze_scalar_evolution (loop
, op0
));
1755 tree type
= chrec_type (scev
);
1757 if (chrec_contains_undetermined (scev
)
1758 || TYPE_UNSIGNED (type
))
1761 init
= initial_condition_in_loop_num (scev
, loop
->num
);
1762 step
= evolution_part_in_loop_num (scev
, loop
->num
);
1764 if (init
== NULL_TREE
1765 || step
== NULL_TREE
1766 || TREE_CODE (init
) != INTEGER_CST
1767 || TREE_CODE (step
) != INTEGER_CST
1768 || TYPE_MIN_VALUE (type
) == NULL_TREE
1769 || TYPE_MAX_VALUE (type
) == NULL_TREE
)
1772 if (integer_nonzerop (step
))
1776 if (tree_int_cst_lt (step
, integer_zero_node
))
1777 diff
= fold_build2 (MINUS_EXPR
, type
, init
,
1778 TYPE_MIN_VALUE (type
));
1780 diff
= fold_build2 (MINUS_EXPR
, type
,
1781 TYPE_MAX_VALUE (type
), init
);
1783 utype
= unsigned_type_for (type
);
1784 estimation
= fold_build2 (CEIL_DIV_EXPR
, type
, diff
,
1786 record_estimate (loop
,
1787 fold_convert (utype
, estimation
),
1788 boolean_true_node
, stmt
);
1799 for (args
= TREE_OPERAND (stmt
, 1); args
;
1800 args
= TREE_CHAIN (args
))
1801 if (TREE_CODE (TREE_VALUE (args
)) == ARRAY_REF
1802 && !array_ref_contains_indirect_ref (TREE_VALUE (args
)))
1803 estimate_iters_using_array (stmt
, TREE_VALUE (args
));
1814 compute_estimated_nb_iterations (loop
);
1818 /* Records estimates on numbers of iterations of LOOP. */
1821 estimate_numbers_of_iterations_loop (struct loop
*loop
)
1825 unsigned i
, n_exits
;
1826 struct tree_niter_desc niter_desc
;
1828 /* Give up if we already have tried to compute an estimation. */
1829 if (loop
->estimated_nb_iterations
== chrec_dont_know
1830 /* Or when we already have an estimation. */
1831 || (loop
->estimated_nb_iterations
!= NULL_TREE
1832 && TREE_CODE (loop
->estimated_nb_iterations
) == INTEGER_CST
))
1835 loop
->estimated_nb_iterations
= chrec_dont_know
;
1837 exits
= get_loop_exit_edges (loop
, &n_exits
);
1838 for (i
= 0; i
< n_exits
; i
++)
1840 if (!number_of_iterations_exit (loop
, exits
[i
], &niter_desc
, false))
1843 niter
= niter_desc
.niter
;
1844 type
= TREE_TYPE (niter
);
1845 if (!zero_p (niter_desc
.may_be_zero
)
1846 && !nonzero_p (niter_desc
.may_be_zero
))
1847 niter
= build3 (COND_EXPR
, type
, niter_desc
.may_be_zero
,
1848 build_int_cst (type
, 0),
1850 record_estimate (loop
, niter
,
1851 niter_desc
.additional_info
,
1852 last_stmt (exits
[i
]->src
));
1856 if (chrec_contains_undetermined (loop
->estimated_nb_iterations
))
1857 infer_loop_bounds_from_undefined (loop
);
1860 /* Records estimates on numbers of iterations of LOOPS. */
1863 estimate_numbers_of_iterations (struct loops
*loops
)
1868 for (i
= 1; i
< loops
->num
; i
++)
1870 loop
= loops
->parray
[i
];
1872 estimate_numbers_of_iterations_loop (loop
);
1876 /* Returns true if statement S1 dominates statement S2. */
1879 stmt_dominates_stmt_p (tree s1
, tree s2
)
1881 basic_block bb1
= bb_for_stmt (s1
), bb2
= bb_for_stmt (s2
);
1889 block_stmt_iterator bsi
;
1891 for (bsi
= bsi_start (bb1
); bsi_stmt (bsi
) != s2
; bsi_next (&bsi
))
1892 if (bsi_stmt (bsi
) == s1
)
1898 return dominated_by_p (CDI_DOMINATORS
, bb2
, bb1
);
1901 /* Returns true when we can prove that the number of executions of
1902 STMT in the loop is at most NITER, according to the fact
1903 that the statement NITER_BOUND->at_stmt is executed at most
1904 NITER_BOUND->bound times. */
1907 n_of_executions_at_most (tree stmt
,
1908 struct nb_iter_bound
*niter_bound
,
1912 tree bound
= niter_bound
->bound
;
1913 tree bound_type
= TREE_TYPE (bound
);
1914 tree nit_type
= TREE_TYPE (niter
);
1917 gcc_assert (TYPE_UNSIGNED (bound_type
)
1918 && TYPE_UNSIGNED (nit_type
)
1919 && is_gimple_min_invariant (bound
));
1920 if (TYPE_PRECISION (nit_type
) > TYPE_PRECISION (bound_type
))
1921 bound
= fold_convert (nit_type
, bound
);
1923 niter
= fold_convert (bound_type
, niter
);
1925 /* After the statement niter_bound->at_stmt we know that anything is
1926 executed at most BOUND times. */
1927 if (stmt
&& stmt_dominates_stmt_p (niter_bound
->at_stmt
, stmt
))
1929 /* Before the statement niter_bound->at_stmt we know that anything
1930 is executed at most BOUND + 1 times. */
1934 cond
= fold_binary (cmp
, boolean_type_node
, niter
, bound
);
1935 return nonzero_p (cond
);
1938 /* Returns true if the arithmetics in TYPE can be assumed not to wrap. */
1941 nowrap_type_p (tree type
)
1944 && INTEGRAL_TYPE_P (type
)
1945 && !TYPE_UNSIGNED (type
))
1948 if (POINTER_TYPE_P (type
))
1954 /* Return false only when the induction variable BASE + STEP * I is
1955 known to not overflow: i.e. when the number of iterations is small
1956 enough with respect to the step and initial condition in order to
1957 keep the evolution confined in TYPEs bounds. Return true when the
1958 iv is known to overflow or when the property is not computable.
1960 USE_OVERFLOW_SEMANTICS is true if this function should assume that
1961 the rules for overflow of the given language apply (e.g., that signed
1962 arithmetics in C does not overflow). */
1965 scev_probably_wraps_p (tree base
, tree step
,
1966 tree at_stmt
, struct loop
*loop
,
1967 bool use_overflow_semantics
)
1969 struct nb_iter_bound
*bound
;
1970 tree delta
, step_abs
;
1971 tree unsigned_type
, valid_niter
;
1972 tree type
= TREE_TYPE (step
);
1974 /* FIXME: We really need something like
1975 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html.
1977 We used to test for the following situation that frequently appears
1978 during address arithmetics:
1980 D.1621_13 = (long unsigned intD.4) D.1620_12;
1981 D.1622_14 = D.1621_13 * 8;
1982 D.1623_15 = (doubleD.29 *) D.1622_14;
1984 And derived that the sequence corresponding to D_14
1985 can be proved to not wrap because it is used for computing a
1986 memory access; however, this is not really the case -- for example,
1987 if D_12 = (unsigned char) [254,+,1], then D_14 has values
1988 2032, 2040, 0, 8, ..., but the code is still legal. */
1990 if (chrec_contains_undetermined (base
)
1991 || chrec_contains_undetermined (step
)
1992 || TREE_CODE (step
) != INTEGER_CST
)
1998 /* If we can use the fact that signed and pointer arithmetics does not
1999 wrap, we are done. */
2000 if (use_overflow_semantics
&& nowrap_type_p (type
))
2003 /* Otherwise, compute the number of iterations before we reach the
2004 bound of the type, and verify that the loop is exited before this
2006 unsigned_type
= unsigned_type_for (type
);
2007 base
= fold_convert (unsigned_type
, base
);
2009 if (tree_int_cst_sign_bit (step
))
2011 tree extreme
= fold_convert (unsigned_type
,
2012 lower_bound_in_type (type
, type
));
2013 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, base
, extreme
);
2014 step_abs
= fold_build1 (NEGATE_EXPR
, unsigned_type
,
2015 fold_convert (unsigned_type
, step
));
2019 tree extreme
= fold_convert (unsigned_type
,
2020 upper_bound_in_type (type
, type
));
2021 delta
= fold_build2 (MINUS_EXPR
, unsigned_type
, extreme
, base
);
2022 step_abs
= fold_convert (unsigned_type
, step
);
2025 valid_niter
= fold_build2 (FLOOR_DIV_EXPR
, unsigned_type
, delta
, step_abs
);
2027 estimate_numbers_of_iterations_loop (loop
);
2028 for (bound
= loop
->bounds
; bound
; bound
= bound
->next
)
2029 if (n_of_executions_at_most (at_stmt
, bound
, valid_niter
))
2032 /* At this point we still don't have a proof that the iv does not
2033 overflow: give up. */
2037 /* Frees the information on upper bounds on numbers of iterations of LOOP. */
2040 free_numbers_of_iterations_estimates_loop (struct loop
*loop
)
2042 struct nb_iter_bound
*bound
, *next
;
2044 loop
->nb_iterations
= NULL
;
2045 loop
->estimated_nb_iterations
= NULL
;
2046 for (bound
= loop
->bounds
; bound
; bound
= next
)
2052 loop
->bounds
= NULL
;
2055 /* Frees the information on upper bounds on numbers of iterations of LOOPS. */
2058 free_numbers_of_iterations_estimates (struct loops
*loops
)
2063 for (i
= 1; i
< loops
->num
; i
++)
2065 loop
= loops
->parray
[i
];
2067 free_numbers_of_iterations_estimates_loop (loop
);
2071 /* Substitute value VAL for ssa name NAME inside expressions held
2075 substitute_in_loop_info (struct loop
*loop
, tree name
, tree val
)
2077 loop
->nb_iterations
= simplify_replace_tree (loop
->nb_iterations
, name
, val
);
2078 loop
->estimated_nb_iterations
2079 = simplify_replace_tree (loop
->estimated_nb_iterations
, name
, val
);