1 /* Operations with affine combinations of trees.
2 Copyright (C) 2005-2022 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "tree-pretty-print.h"
29 #include "fold-const.h"
30 #include "tree-affine.h"
33 #include "cfgexpand.h"
34 #include "value-query.h"
36 /* Extends CST as appropriate for the affine combinations COMB. */
39 wide_int_ext_for_comb (const widest_int
&cst
, tree type
)
41 return wi::sext (cst
, TYPE_PRECISION (type
));
44 /* Likewise for polynomial offsets. */
46 static poly_widest_int
47 wide_int_ext_for_comb (const poly_widest_int
&cst
, tree type
)
49 return wi::sext (cst
, TYPE_PRECISION (type
));
52 /* Initializes affine combination COMB so that its value is zero in TYPE. */
55 aff_combination_zero (aff_tree
*comb
, tree type
)
61 for (i
= 0; i
< MAX_AFF_ELTS
; i
++)
62 comb
->elts
[i
].coef
= 0;
63 comb
->rest
= NULL_TREE
;
66 /* Sets COMB to CST. */
69 aff_combination_const (aff_tree
*comb
, tree type
, const poly_widest_int
&cst
)
71 aff_combination_zero (comb
, type
);
72 comb
->offset
= wide_int_ext_for_comb (cst
, comb
->type
);;
75 /* Sets COMB to single element ELT. */
78 aff_combination_elt (aff_tree
*comb
, tree type
, tree elt
)
80 aff_combination_zero (comb
, type
);
83 comb
->elts
[0].val
= elt
;
84 comb
->elts
[0].coef
= 1;
87 /* Scales COMB by SCALE. */
90 aff_combination_scale (aff_tree
*comb
, const widest_int
&scale_in
)
94 widest_int scale
= wide_int_ext_for_comb (scale_in
, comb
->type
);
100 aff_combination_zero (comb
, comb
->type
);
104 comb
->offset
= wide_int_ext_for_comb (scale
* comb
->offset
, comb
->type
);
105 for (i
= 0, j
= 0; i
< comb
->n
; i
++)
108 = wide_int_ext_for_comb (scale
* comb
->elts
[i
].coef
, comb
->type
);
109 /* A coefficient may become zero due to overflow. Remove the zero
113 comb
->elts
[j
].coef
= new_coef
;
114 comb
->elts
[j
].val
= comb
->elts
[i
].val
;
121 tree type
= comb
->type
;
122 if (POINTER_TYPE_P (type
))
124 if (comb
->n
< MAX_AFF_ELTS
)
126 comb
->elts
[comb
->n
].coef
= scale
;
127 comb
->elts
[comb
->n
].val
= comb
->rest
;
128 comb
->rest
= NULL_TREE
;
132 comb
->rest
= fold_build2 (MULT_EXPR
, type
, comb
->rest
,
133 wide_int_to_tree (type
, scale
));
137 /* Adds ELT * SCALE to COMB. */
140 aff_combination_add_elt (aff_tree
*comb
, tree elt
, const widest_int
&scale_in
)
145 widest_int scale
= wide_int_ext_for_comb (scale_in
, comb
->type
);
149 for (i
= 0; i
< comb
->n
; i
++)
150 if (operand_equal_p (comb
->elts
[i
].val
, elt
, 0))
153 = wide_int_ext_for_comb (comb
->elts
[i
].coef
+ scale
, comb
->type
);
156 comb
->elts
[i
].coef
= new_coef
;
161 comb
->elts
[i
] = comb
->elts
[comb
->n
];
165 gcc_assert (comb
->n
== MAX_AFF_ELTS
- 1);
166 comb
->elts
[comb
->n
].coef
= 1;
167 comb
->elts
[comb
->n
].val
= comb
->rest
;
168 comb
->rest
= NULL_TREE
;
173 if (comb
->n
< MAX_AFF_ELTS
)
175 comb
->elts
[comb
->n
].coef
= scale
;
176 comb
->elts
[comb
->n
].val
= elt
;
182 if (POINTER_TYPE_P (type
))
186 elt
= fold_convert (type
, elt
);
188 elt
= fold_build2 (MULT_EXPR
, type
,
189 fold_convert (type
, elt
),
190 wide_int_to_tree (type
, scale
));
193 comb
->rest
= fold_build2 (PLUS_EXPR
, type
, comb
->rest
,
202 aff_combination_add_cst (aff_tree
*c
, const poly_widest_int
&cst
)
204 c
->offset
= wide_int_ext_for_comb (c
->offset
+ cst
, c
->type
);
207 /* Adds COMB2 to COMB1. */
210 aff_combination_add (aff_tree
*comb1
, aff_tree
*comb2
)
214 aff_combination_add_cst (comb1
, comb2
->offset
);
215 for (i
= 0; i
< comb2
->n
; i
++)
216 aff_combination_add_elt (comb1
, comb2
->elts
[i
].val
, comb2
->elts
[i
].coef
);
218 aff_combination_add_elt (comb1
, comb2
->rest
, 1);
221 /* Converts affine combination COMB to TYPE. */
224 aff_combination_convert (aff_tree
*comb
, tree type
)
227 tree comb_type
= comb
->type
;
229 if (TYPE_PRECISION (type
) > TYPE_PRECISION (comb_type
))
231 tree val
= fold_convert (type
, aff_combination_to_tree (comb
));
232 tree_to_aff_combination (val
, type
, comb
);
237 if (comb
->rest
&& !POINTER_TYPE_P (type
))
238 comb
->rest
= fold_convert (type
, comb
->rest
);
240 if (TYPE_PRECISION (type
) == TYPE_PRECISION (comb_type
))
243 comb
->offset
= wide_int_ext_for_comb (comb
->offset
, comb
->type
);
244 for (i
= j
= 0; i
< comb
->n
; i
++)
246 if (comb
->elts
[i
].coef
== 0)
248 comb
->elts
[j
].coef
= comb
->elts
[i
].coef
;
249 comb
->elts
[j
].val
= fold_convert (type
, comb
->elts
[i
].val
);
254 if (comb
->n
< MAX_AFF_ELTS
&& comb
->rest
)
256 comb
->elts
[comb
->n
].coef
= 1;
257 comb
->elts
[comb
->n
].val
= comb
->rest
;
258 comb
->rest
= NULL_TREE
;
263 /* Tries to handle OP0 CODE OP1 as affine combination of parts. Returns
264 true when that was successful and returns the combination in COMB. */
267 expr_to_aff_combination (aff_tree
*comb
, tree_code code
, tree type
,
268 tree op0
, tree op1
= NULL_TREE
)
271 poly_int64 bitpos
, bitsize
, bytepos
;
275 case POINTER_PLUS_EXPR
:
276 tree_to_aff_combination (op0
, type
, comb
);
277 tree_to_aff_combination (op1
, sizetype
, &tmp
);
278 aff_combination_add (comb
, &tmp
);
283 tree_to_aff_combination (op0
, type
, comb
);
284 tree_to_aff_combination (op1
, type
, &tmp
);
285 if (code
== MINUS_EXPR
)
286 aff_combination_scale (&tmp
, -1);
287 aff_combination_add (comb
, &tmp
);
291 if (TREE_CODE (op1
) != INTEGER_CST
)
293 tree_to_aff_combination (op0
, type
, comb
);
294 aff_combination_scale (comb
, wi::to_widest (op1
));
298 tree_to_aff_combination (op0
, type
, comb
);
299 aff_combination_scale (comb
, -1);
304 tree_to_aff_combination (op0
, type
, comb
);
305 aff_combination_scale (comb
, -1);
306 aff_combination_add_cst (comb
, -1);
313 tree itype
= TREE_TYPE (inner
);
314 enum tree_code icode
= TREE_CODE (inner
);
317 if (tree_nop_conversion_p (otype
, itype
))
319 tree_to_aff_combination (op0
, type
, comb
);
323 /* In principle this is a valid folding, but it isn't necessarily
324 an optimization, so do it here and not in fold_unary. */
325 if ((icode
== PLUS_EXPR
|| icode
== MINUS_EXPR
|| icode
== MULT_EXPR
)
326 && TREE_CODE (itype
) == INTEGER_TYPE
327 && TREE_CODE (otype
) == INTEGER_TYPE
328 && TYPE_PRECISION (otype
) > TYPE_PRECISION (itype
))
330 tree op0
= TREE_OPERAND (inner
, 0), op1
= TREE_OPERAND (inner
, 1);
332 /* If inner type has undefined overflow behavior, fold conversion
334 (T1)(X *+- CST) -> (T1)X *+- (T1)CST
335 (T1)(X + X) -> (T1)X + (T1)X. */
336 if (TYPE_OVERFLOW_UNDEFINED (itype
)
337 && (TREE_CODE (op1
) == INTEGER_CST
338 || (icode
== PLUS_EXPR
&& operand_equal_p (op0
, op1
, 0))))
340 op0
= fold_convert (otype
, op0
);
341 op1
= fold_convert (otype
, op1
);
342 return expr_to_aff_combination (comb
, icode
, otype
, op0
, op1
);
345 /* If inner type has wrapping overflow behavior, fold conversion
347 (T1)(X *+- CST) -> (T1)X *+- (T1)CST
348 if X *+- CST doesn't overflow by range information. */
350 if (TYPE_UNSIGNED (itype
)
351 && TYPE_OVERFLOW_WRAPS (itype
)
352 && TREE_CODE (op1
) == INTEGER_CST
353 && get_range_query (cfun
)->range_of_expr (vr
, op0
)
354 && vr
.kind () == VR_RANGE
)
356 wide_int minv
= vr
.lower_bound ();
357 wide_int maxv
= vr
.upper_bound ();
358 wi::overflow_type overflow
= wi::OVF_NONE
;
359 signop sign
= UNSIGNED
;
360 if (icode
== PLUS_EXPR
)
361 wi::add (maxv
, wi::to_wide (op1
), sign
, &overflow
);
362 else if (icode
== MULT_EXPR
)
363 wi::mul (maxv
, wi::to_wide (op1
), sign
, &overflow
);
365 wi::sub (minv
, wi::to_wide (op1
), sign
, &overflow
);
367 if (overflow
== wi::OVF_NONE
)
369 op0
= fold_convert (otype
, op0
);
370 op1
= fold_convert (otype
, op1
);
371 return expr_to_aff_combination (comb
, icode
, otype
, op0
,
385 /* Splits EXPR into an affine combination of parts. */
388 tree_to_aff_combination (tree expr
, tree type
, aff_tree
*comb
)
393 poly_int64 bitpos
, bitsize
, bytepos
;
395 int unsignedp
, reversep
, volatilep
;
399 code
= TREE_CODE (expr
);
402 case POINTER_PLUS_EXPR
:
406 if (expr_to_aff_combination (comb
, code
, type
, TREE_OPERAND (expr
, 0),
407 TREE_OPERAND (expr
, 1)))
413 if (expr_to_aff_combination (comb
, code
, type
, TREE_OPERAND (expr
, 0)))
418 /* ??? TREE_TYPE (expr) should be equal to type here, but IVOPTS
419 calls this with not showing an outer widening cast. */
420 if (expr_to_aff_combination (comb
, code
,
421 TREE_TYPE (expr
), TREE_OPERAND (expr
, 0)))
423 aff_combination_convert (comb
, type
);
429 /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR. */
430 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == MEM_REF
)
432 expr
= TREE_OPERAND (expr
, 0);
433 tree_to_aff_combination (TREE_OPERAND (expr
, 0), type
, comb
);
434 tree_to_aff_combination (TREE_OPERAND (expr
, 1), sizetype
, &tmp
);
435 aff_combination_add (comb
, &tmp
);
438 core
= get_inner_reference (TREE_OPERAND (expr
, 0), &bitsize
, &bitpos
,
439 &toffset
, &mode
, &unsignedp
, &reversep
,
441 if (!multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
))
443 aff_combination_const (comb
, type
, bytepos
);
444 if (TREE_CODE (core
) == MEM_REF
)
446 tree mem_offset
= TREE_OPERAND (core
, 1);
447 aff_combination_add_cst (comb
, wi::to_poly_widest (mem_offset
));
448 core
= TREE_OPERAND (core
, 0);
451 core
= build_fold_addr_expr (core
);
453 if (TREE_CODE (core
) == ADDR_EXPR
)
454 aff_combination_add_elt (comb
, core
, 1);
457 tree_to_aff_combination (core
, type
, &tmp
);
458 aff_combination_add (comb
, &tmp
);
462 tree_to_aff_combination (toffset
, type
, &tmp
);
463 aff_combination_add (comb
, &tmp
);
469 if (poly_int_tree_p (expr
))
471 aff_combination_const (comb
, type
, wi::to_poly_widest (expr
));
478 aff_combination_elt (comb
, type
, expr
);
481 /* Creates EXPR + ELT * SCALE in TYPE. EXPR is taken from affine
485 add_elt_to_tree (tree expr
, tree type
, tree elt
, const widest_int
&scale_in
)
489 widest_int scale
= wide_int_ext_for_comb (scale_in
, type
);
491 elt
= fold_convert (type
, elt
);
497 return fold_build2 (PLUS_EXPR
, type
, expr
, elt
);
503 return fold_build1 (NEGATE_EXPR
, type
, elt
);
505 return fold_build2 (MINUS_EXPR
, type
, expr
, elt
);
509 return fold_build2 (MULT_EXPR
, type
, elt
, wide_int_to_tree (type
, scale
));
511 if (wi::neg_p (scale
))
519 elt
= fold_build2 (MULT_EXPR
, type
, elt
, wide_int_to_tree (type
, scale
));
520 return fold_build2 (code
, type
, expr
, elt
);
523 /* Makes tree from the affine combination COMB. */
526 aff_combination_to_tree (aff_tree
*comb
)
528 tree type
= comb
->type
, base
= NULL_TREE
, expr
= NULL_TREE
;
533 gcc_assert (comb
->n
== MAX_AFF_ELTS
|| comb
->rest
== NULL_TREE
);
536 if (POINTER_TYPE_P (type
))
539 if (comb
->n
> 0 && comb
->elts
[0].coef
== 1
540 && POINTER_TYPE_P (TREE_TYPE (comb
->elts
[0].val
)))
542 base
= comb
->elts
[0].val
;
547 for (; i
< comb
->n
; i
++)
548 expr
= add_elt_to_tree (expr
, type
, comb
->elts
[i
].val
, comb
->elts
[i
].coef
);
551 expr
= add_elt_to_tree (expr
, type
, comb
->rest
, 1);
553 /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
555 if (known_lt (comb
->offset
, 0))
565 expr
= add_elt_to_tree (expr
, type
, wide_int_to_tree (type
, off
), sgn
);
568 return fold_build_pointer_plus (base
, expr
);
570 return fold_convert (comb
->type
, expr
);
573 /* Copies the tree elements of COMB to ensure that they are not shared. */
576 unshare_aff_combination (aff_tree
*comb
)
580 for (i
= 0; i
< comb
->n
; i
++)
581 comb
->elts
[i
].val
= unshare_expr (comb
->elts
[i
].val
);
583 comb
->rest
= unshare_expr (comb
->rest
);
586 /* Remove M-th element from COMB. */
589 aff_combination_remove_elt (aff_tree
*comb
, unsigned m
)
593 comb
->elts
[m
] = comb
->elts
[comb
->n
];
596 comb
->elts
[comb
->n
].coef
= 1;
597 comb
->elts
[comb
->n
].val
= comb
->rest
;
598 comb
->rest
= NULL_TREE
;
603 /* Adds C * COEF * VAL to R. VAL may be NULL, in that case only
604 C * COEF is added to R. */
608 aff_combination_add_product (aff_tree
*c
, const widest_int
&coef
, tree val
,
614 for (i
= 0; i
< c
->n
; i
++)
616 aval
= c
->elts
[i
].val
;
619 type
= TREE_TYPE (aval
);
620 aval
= fold_build2 (MULT_EXPR
, type
, aval
,
621 fold_convert (type
, val
));
624 aff_combination_add_elt (r
, aval
, coef
* c
->elts
[i
].coef
);
632 type
= TREE_TYPE (aval
);
633 aval
= fold_build2 (MULT_EXPR
, type
, aval
,
634 fold_convert (type
, val
));
637 aff_combination_add_elt (r
, aval
, coef
);
642 if (c
->offset
.is_constant ())
643 /* Access coeffs[0] directly, for efficiency. */
644 aff_combination_add_elt (r
, val
, coef
* c
->offset
.coeffs
[0]);
647 /* c->offset is polynomial, so multiply VAL rather than COEF
649 tree offset
= wide_int_to_tree (TREE_TYPE (val
), c
->offset
);
650 val
= fold_build2 (MULT_EXPR
, TREE_TYPE (val
), val
, offset
);
651 aff_combination_add_elt (r
, val
, coef
);
655 aff_combination_add_cst (r
, coef
* c
->offset
);
658 /* Multiplies C1 by C2, storing the result to R */
661 aff_combination_mult (aff_tree
*c1
, aff_tree
*c2
, aff_tree
*r
)
664 gcc_assert (TYPE_PRECISION (c1
->type
) == TYPE_PRECISION (c2
->type
));
666 aff_combination_zero (r
, c1
->type
);
668 for (i
= 0; i
< c2
->n
; i
++)
669 aff_combination_add_product (c1
, c2
->elts
[i
].coef
, c2
->elts
[i
].val
, r
);
671 aff_combination_add_product (c1
, 1, c2
->rest
, r
);
672 if (c2
->offset
.is_constant ())
673 /* Access coeffs[0] directly, for efficiency. */
674 aff_combination_add_product (c1
, c2
->offset
.coeffs
[0], NULL
, r
);
677 /* c2->offset is polynomial, so do the multiplication in tree form. */
678 tree offset
= wide_int_to_tree (c2
->type
, c2
->offset
);
679 aff_combination_add_product (c1
, 1, offset
, r
);
683 /* Returns the element of COMB whose value is VAL, or NULL if no such
684 element exists. If IDX is not NULL, it is set to the index of VAL in
687 static class aff_comb_elt
*
688 aff_combination_find_elt (aff_tree
*comb
, tree val
, unsigned *idx
)
692 for (i
= 0; i
< comb
->n
; i
++)
693 if (operand_equal_p (comb
->elts
[i
].val
, val
, 0))
698 return &comb
->elts
[i
];
704 /* Element of the cache that maps ssa name NAME to its expanded form
705 as an affine expression EXPANSION. */
712 /* True if the expansion for the name is just being generated. */
713 unsigned in_progress
: 1;
716 /* Expands SSA names in COMB recursively. CACHE is used to cache the
720 aff_combination_expand (aff_tree
*comb ATTRIBUTE_UNUSED
,
721 hash_map
<tree
, name_expansion
*> **cache
)
724 aff_tree to_add
, current
, curre
;
728 class name_expansion
*exp
;
730 aff_combination_zero (&to_add
, comb
->type
);
731 for (i
= 0; i
< comb
->n
; i
++)
736 e
= comb
->elts
[i
].val
;
737 type
= TREE_TYPE (e
);
739 /* Look through some conversions. */
740 if (CONVERT_EXPR_P (e
)
741 && (TYPE_PRECISION (type
)
742 >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e
, 0)))))
743 name
= TREE_OPERAND (e
, 0);
744 if (TREE_CODE (name
) != SSA_NAME
)
746 def
= SSA_NAME_DEF_STMT (name
);
747 if (!is_gimple_assign (def
) || gimple_assign_lhs (def
) != name
)
750 code
= gimple_assign_rhs_code (def
);
752 && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code
))
753 && (get_gimple_rhs_class (code
) != GIMPLE_SINGLE_RHS
754 || !is_gimple_min_invariant (gimple_assign_rhs1 (def
))))
757 /* We do not know whether the reference retains its value at the
758 place where the expansion is used. */
759 if (TREE_CODE_CLASS (code
) == tcc_reference
)
762 name_expansion
**slot
= NULL
;
764 slot
= (*cache
)->get (name
);
765 exp
= slot
? *slot
: NULL
;
768 /* Only bother to handle cases tree_to_aff_combination will. */
771 case POINTER_PLUS_EXPR
:
775 if (!expr_to_aff_combination (¤t
, code
, TREE_TYPE (name
),
776 gimple_assign_rhs1 (def
),
777 gimple_assign_rhs2 (def
)))
782 if (!expr_to_aff_combination (¤t
, code
, TREE_TYPE (name
),
783 gimple_assign_rhs1 (def
)))
787 if (!expr_to_aff_combination (¤t
, code
, TREE_TYPE (name
),
788 gimple_assign_rhs1 (def
)))
789 /* This makes us always expand conversions which we did
790 in the past and makes gcc.dg/tree-ssa/ivopts-lt-2.c
791 PASS, eliminating one induction variable in IVOPTs.
792 ??? But it is really excessive and we should try
793 harder to do without it. */
794 aff_combination_elt (¤t
, TREE_TYPE (name
),
795 fold_convert (TREE_TYPE (name
),
796 gimple_assign_rhs1 (def
)));
801 tree_to_aff_combination (gimple_assign_rhs1 (def
),
802 TREE_TYPE (name
), ¤t
);
807 exp
= XNEW (class name_expansion
);
808 exp
->in_progress
= 1;
810 *cache
= new hash_map
<tree
, name_expansion
*>;
811 (*cache
)->put (name
, exp
);
812 aff_combination_expand (¤t
, cache
);
813 exp
->expansion
= current
;
814 exp
->in_progress
= 0;
818 /* Since we follow the definitions in the SSA form, we should not
819 enter a cycle unless we pass through a phi node. */
820 gcc_assert (!exp
->in_progress
);
821 current
= exp
->expansion
;
823 if (!useless_type_conversion_p (comb
->type
, current
.type
))
824 aff_combination_convert (¤t
, comb
->type
);
826 /* Accumulate the new terms to TO_ADD, so that we do not modify
827 COMB while traversing it; include the term -coef * E, to remove
829 scale
= comb
->elts
[i
].coef
;
830 aff_combination_zero (&curre
, comb
->type
);
831 aff_combination_add_elt (&curre
, e
, -scale
);
832 aff_combination_scale (¤t
, scale
);
833 aff_combination_add (&to_add
, ¤t
);
834 aff_combination_add (&to_add
, &curre
);
836 aff_combination_add (comb
, &to_add
);
839 /* Similar to tree_to_aff_combination, but follows SSA name definitions
840 and expands them recursively. CACHE is used to cache the expansions
841 of the ssa names, to avoid exponential time complexity for cases
850 tree_to_aff_combination_expand (tree expr
, tree type
, aff_tree
*comb
,
851 hash_map
<tree
, name_expansion
*> **cache
)
853 tree_to_aff_combination (expr
, type
, comb
);
854 aff_combination_expand (comb
, cache
);
857 /* Frees memory occupied by struct name_expansion in *VALUE. Callback for
858 hash_map::traverse. */
861 free_name_expansion (tree
const &, name_expansion
**value
, void *)
867 /* Frees memory allocated for the CACHE used by
868 tree_to_aff_combination_expand. */
871 free_affine_expand_cache (hash_map
<tree
, name_expansion
*> **cache
)
876 (*cache
)->traverse
<void *, free_name_expansion
> (NULL
);
881 /* If VAL != CST * DIV for any constant CST, returns false.
882 Otherwise, if *MULT_SET is true, additionally compares CST and MULT,
883 and if they are different, returns false. Finally, if neither of these
884 two cases occur, true is returned, and CST is stored to MULT and MULT_SET
888 wide_int_constant_multiple_p (const poly_widest_int
&val
,
889 const poly_widest_int
&div
,
890 bool *mult_set
, poly_widest_int
*mult
)
892 poly_widest_int rem
, cst
;
894 if (known_eq (val
, 0))
896 if (*mult_set
&& maybe_ne (*mult
, 0))
903 if (maybe_eq (div
, 0))
906 if (!multiple_p (val
, div
, &cst
))
909 if (*mult_set
&& maybe_ne (*mult
, cst
))
917 /* Returns true if VAL = X * DIV for some constant X. If this is the case,
918 X is stored to MULT. */
921 aff_combination_constant_multiple_p (aff_tree
*val
, aff_tree
*div
,
922 poly_widest_int
*mult
)
924 bool mult_set
= false;
927 if (val
->n
== 0 && known_eq (val
->offset
, 0))
932 if (val
->n
!= div
->n
)
935 if (val
->rest
|| div
->rest
)
938 if (!wide_int_constant_multiple_p (val
->offset
, div
->offset
,
942 for (i
= 0; i
< div
->n
; i
++)
944 class aff_comb_elt
*elt
945 = aff_combination_find_elt (val
, div
->elts
[i
].val
, NULL
);
948 if (!wide_int_constant_multiple_p (elt
->coef
, div
->elts
[i
].coef
,
953 gcc_assert (mult_set
);
957 /* Prints the affine VAL to the FILE. */
960 print_aff (FILE *file
, aff_tree
*val
)
963 signop sgn
= TYPE_SIGN (val
->type
);
964 if (POINTER_TYPE_P (val
->type
))
966 fprintf (file
, "{\n type = ");
967 print_generic_expr (file
, val
->type
, TDF_VOPS
|TDF_MEMSYMS
);
968 fprintf (file
, "\n offset = ");
969 print_dec (val
->offset
, file
, sgn
);
972 fprintf (file
, "\n elements = {\n");
973 for (i
= 0; i
< val
->n
; i
++)
975 fprintf (file
, " [%d] = ", i
);
976 print_generic_expr (file
, val
->elts
[i
].val
, TDF_VOPS
|TDF_MEMSYMS
);
978 fprintf (file
, " * ");
979 print_dec (val
->elts
[i
].coef
, file
, sgn
);
981 fprintf (file
, ", \n");
983 fprintf (file
, "\n }");
987 fprintf (file
, "\n rest = ");
988 print_generic_expr (file
, val
->rest
, TDF_VOPS
|TDF_MEMSYMS
);
990 fprintf (file
, "\n}");
993 /* Prints the affine VAL to the standard error, used for debugging. */
996 debug_aff (aff_tree
*val
)
998 print_aff (stderr
, val
);
999 fprintf (stderr
, "\n");
1002 /* Computes address of the reference REF in ADDR. The size of the accessed
1003 location is stored to SIZE. Returns the ultimate containing object to
1004 which REF refers. */
1007 get_inner_reference_aff (tree ref
, aff_tree
*addr
, poly_widest_int
*size
)
1009 poly_int64 bitsize
, bitpos
;
1014 tree base
= get_inner_reference (ref
, &bitsize
, &bitpos
, &toff
, &mode
,
1016 tree base_addr
= build_fold_addr_expr (base
);
1018 /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT. */
1020 tree_to_aff_combination (base_addr
, sizetype
, addr
);
1024 tree_to_aff_combination (toff
, sizetype
, &tmp
);
1025 aff_combination_add (addr
, &tmp
);
1028 aff_combination_const (&tmp
, sizetype
, bits_to_bytes_round_down (bitpos
));
1029 aff_combination_add (addr
, &tmp
);
1031 *size
= bits_to_bytes_round_up (bitsize
);
1036 /* Returns true if a region of size SIZE1 at position 0 and a region of
1037 size SIZE2 at position DIFF cannot overlap. */
1040 aff_comb_cannot_overlap_p (aff_tree
*diff
, const poly_widest_int
&size1
,
1041 const poly_widest_int
&size2
)
1043 /* Unless the difference is a constant, we fail. */
1047 if (!ordered_p (diff
->offset
, 0))
1050 if (maybe_lt (diff
->offset
, 0))
1052 /* The second object is before the first one, we succeed if the last
1053 element of the second object is before the start of the first one. */
1054 return known_le (diff
->offset
+ size2
, 0);
1058 /* We succeed if the second object starts after the first one ends. */
1059 return known_le (size1
, diff
->offset
);