1 /* Operations with affine combinations of trees.
2 Copyright (C) 2005-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "tree-pretty-print.h"
29 #include "fold-const.h"
30 #include "tree-affine.h"
33 #include "cfgexpand.h"
35 /* Extends CST as appropriate for the affine combinations COMB. */
38 wide_int_ext_for_comb (const widest_int
&cst
, tree type
)
40 return wi::sext (cst
, TYPE_PRECISION (type
));
43 /* Likewise for polynomial offsets. */
45 static poly_widest_int
46 wide_int_ext_for_comb (const poly_widest_int
&cst
, tree type
)
48 return wi::sext (cst
, TYPE_PRECISION (type
));
51 /* Initializes affine combination COMB so that its value is zero in TYPE. */
54 aff_combination_zero (aff_tree
*comb
, tree type
)
60 for (i
= 0; i
< MAX_AFF_ELTS
; i
++)
61 comb
->elts
[i
].coef
= 0;
62 comb
->rest
= NULL_TREE
;
65 /* Sets COMB to CST. */
68 aff_combination_const (aff_tree
*comb
, tree type
, const poly_widest_int
&cst
)
70 aff_combination_zero (comb
, type
);
71 comb
->offset
= wide_int_ext_for_comb (cst
, comb
->type
);;
74 /* Sets COMB to single element ELT. */
77 aff_combination_elt (aff_tree
*comb
, tree type
, tree elt
)
79 aff_combination_zero (comb
, type
);
82 comb
->elts
[0].val
= elt
;
83 comb
->elts
[0].coef
= 1;
86 /* Scales COMB by SCALE. */
89 aff_combination_scale (aff_tree
*comb
, const widest_int
&scale_in
)
93 widest_int scale
= wide_int_ext_for_comb (scale_in
, comb
->type
);
99 aff_combination_zero (comb
, comb
->type
);
103 comb
->offset
= wide_int_ext_for_comb (scale
* comb
->offset
, comb
->type
);
104 for (i
= 0, j
= 0; i
< comb
->n
; i
++)
107 = wide_int_ext_for_comb (scale
* comb
->elts
[i
].coef
, comb
->type
);
108 /* A coefficient may become zero due to overflow. Remove the zero
112 comb
->elts
[j
].coef
= new_coef
;
113 comb
->elts
[j
].val
= comb
->elts
[i
].val
;
120 tree type
= comb
->type
;
121 if (POINTER_TYPE_P (type
))
123 if (comb
->n
< MAX_AFF_ELTS
)
125 comb
->elts
[comb
->n
].coef
= scale
;
126 comb
->elts
[comb
->n
].val
= comb
->rest
;
127 comb
->rest
= NULL_TREE
;
131 comb
->rest
= fold_build2 (MULT_EXPR
, type
, comb
->rest
,
132 wide_int_to_tree (type
, scale
));
136 /* Adds ELT * SCALE to COMB. */
139 aff_combination_add_elt (aff_tree
*comb
, tree elt
, const widest_int
&scale_in
)
144 widest_int scale
= wide_int_ext_for_comb (scale_in
, comb
->type
);
148 for (i
= 0; i
< comb
->n
; i
++)
149 if (operand_equal_p (comb
->elts
[i
].val
, elt
, 0))
152 = wide_int_ext_for_comb (comb
->elts
[i
].coef
+ scale
, comb
->type
);
155 comb
->elts
[i
].coef
= new_coef
;
160 comb
->elts
[i
] = comb
->elts
[comb
->n
];
164 gcc_assert (comb
->n
== MAX_AFF_ELTS
- 1);
165 comb
->elts
[comb
->n
].coef
= 1;
166 comb
->elts
[comb
->n
].val
= comb
->rest
;
167 comb
->rest
= NULL_TREE
;
172 if (comb
->n
< MAX_AFF_ELTS
)
174 comb
->elts
[comb
->n
].coef
= scale
;
175 comb
->elts
[comb
->n
].val
= elt
;
181 if (POINTER_TYPE_P (type
))
185 elt
= fold_convert (type
, elt
);
187 elt
= fold_build2 (MULT_EXPR
, type
,
188 fold_convert (type
, elt
),
189 wide_int_to_tree (type
, scale
));
192 comb
->rest
= fold_build2 (PLUS_EXPR
, type
, comb
->rest
,
201 aff_combination_add_cst (aff_tree
*c
, const poly_widest_int
&cst
)
203 c
->offset
= wide_int_ext_for_comb (c
->offset
+ cst
, c
->type
);
206 /* Adds COMB2 to COMB1. */
209 aff_combination_add (aff_tree
*comb1
, aff_tree
*comb2
)
213 aff_combination_add_cst (comb1
, comb2
->offset
);
214 for (i
= 0; i
< comb2
->n
; i
++)
215 aff_combination_add_elt (comb1
, comb2
->elts
[i
].val
, comb2
->elts
[i
].coef
);
217 aff_combination_add_elt (comb1
, comb2
->rest
, 1);
220 /* Converts affine combination COMB to TYPE. */
223 aff_combination_convert (aff_tree
*comb
, tree type
)
226 tree comb_type
= comb
->type
;
228 if (TYPE_PRECISION (type
) > TYPE_PRECISION (comb_type
))
230 tree val
= fold_convert (type
, aff_combination_to_tree (comb
));
231 tree_to_aff_combination (val
, type
, comb
);
236 if (comb
->rest
&& !POINTER_TYPE_P (type
))
237 comb
->rest
= fold_convert (type
, comb
->rest
);
239 if (TYPE_PRECISION (type
) == TYPE_PRECISION (comb_type
))
242 comb
->offset
= wide_int_ext_for_comb (comb
->offset
, comb
->type
);
243 for (i
= j
= 0; i
< comb
->n
; i
++)
245 if (comb
->elts
[i
].coef
== 0)
247 comb
->elts
[j
].coef
= comb
->elts
[i
].coef
;
248 comb
->elts
[j
].val
= fold_convert (type
, comb
->elts
[i
].val
);
253 if (comb
->n
< MAX_AFF_ELTS
&& comb
->rest
)
255 comb
->elts
[comb
->n
].coef
= 1;
256 comb
->elts
[comb
->n
].val
= comb
->rest
;
257 comb
->rest
= NULL_TREE
;
262 /* Tries to handle OP0 CODE OP1 as affine combination of parts. Returns
263 true when that was successful and returns the combination in COMB. */
266 expr_to_aff_combination (aff_tree
*comb
, tree_code code
, tree type
,
267 tree op0
, tree op1
= NULL_TREE
)
270 poly_int64 bitpos
, bitsize
, bytepos
;
274 case POINTER_PLUS_EXPR
:
275 tree_to_aff_combination (op0
, type
, comb
);
276 tree_to_aff_combination (op1
, sizetype
, &tmp
);
277 aff_combination_add (comb
, &tmp
);
282 tree_to_aff_combination (op0
, type
, comb
);
283 tree_to_aff_combination (op1
, type
, &tmp
);
284 if (code
== MINUS_EXPR
)
285 aff_combination_scale (&tmp
, -1);
286 aff_combination_add (comb
, &tmp
);
290 if (TREE_CODE (op1
) != INTEGER_CST
)
292 tree_to_aff_combination (op0
, type
, comb
);
293 aff_combination_scale (comb
, wi::to_widest (op1
));
297 tree_to_aff_combination (op0
, type
, comb
);
298 aff_combination_scale (comb
, -1);
303 tree_to_aff_combination (op0
, type
, comb
);
304 aff_combination_scale (comb
, -1);
305 aff_combination_add_cst (comb
, -1);
312 tree itype
= TREE_TYPE (inner
);
313 enum tree_code icode
= TREE_CODE (inner
);
316 if (tree_nop_conversion_p (otype
, itype
))
318 tree_to_aff_combination (op0
, type
, comb
);
322 /* In principle this is a valid folding, but it isn't necessarily
323 an optimization, so do it here and not in fold_unary. */
324 if ((icode
== PLUS_EXPR
|| icode
== MINUS_EXPR
|| icode
== MULT_EXPR
)
325 && TREE_CODE (itype
) == INTEGER_TYPE
326 && TREE_CODE (otype
) == INTEGER_TYPE
327 && TYPE_PRECISION (otype
) > TYPE_PRECISION (itype
))
329 tree op0
= TREE_OPERAND (inner
, 0), op1
= TREE_OPERAND (inner
, 1);
331 /* If inner type has undefined overflow behavior, fold conversion
333 (T1)(X *+- CST) -> (T1)X *+- (T1)CST
334 (T1)(X + X) -> (T1)X + (T1)X. */
335 if (TYPE_OVERFLOW_UNDEFINED (itype
)
336 && (TREE_CODE (op1
) == INTEGER_CST
337 || (icode
== PLUS_EXPR
&& operand_equal_p (op0
, op1
, 0))))
339 op0
= fold_convert (otype
, op0
);
340 op1
= fold_convert (otype
, op1
);
341 return expr_to_aff_combination (comb
, icode
, otype
, op0
, op1
);
344 /* If inner type has wrapping overflow behavior, fold conversion
346 (T1)(X *+- CST) -> (T1)X *+- (T1)CST
347 if X *+- CST doesn't overflow by range information. */
348 if (TYPE_UNSIGNED (itype
)
349 && TYPE_OVERFLOW_WRAPS (itype
)
350 && TREE_CODE (op1
) == INTEGER_CST
351 && determine_value_range (op0
, &minv
, &maxv
) == VR_RANGE
)
353 wi::overflow_type overflow
= wi::OVF_NONE
;
354 signop sign
= UNSIGNED
;
355 if (icode
== PLUS_EXPR
)
356 wi::add (maxv
, wi::to_wide (op1
), sign
, &overflow
);
357 else if (icode
== MULT_EXPR
)
358 wi::mul (maxv
, wi::to_wide (op1
), sign
, &overflow
);
360 wi::sub (minv
, wi::to_wide (op1
), sign
, &overflow
);
362 if (overflow
== wi::OVF_NONE
)
364 op0
= fold_convert (otype
, op0
);
365 op1
= fold_convert (otype
, op1
);
366 return expr_to_aff_combination (comb
, icode
, otype
, op0
,
380 /* Splits EXPR into an affine combination of parts. */
383 tree_to_aff_combination (tree expr
, tree type
, aff_tree
*comb
)
388 poly_int64 bitpos
, bitsize
, bytepos
;
390 int unsignedp
, reversep
, volatilep
;
394 code
= TREE_CODE (expr
);
397 case POINTER_PLUS_EXPR
:
401 if (expr_to_aff_combination (comb
, code
, type
, TREE_OPERAND (expr
, 0),
402 TREE_OPERAND (expr
, 1)))
408 if (expr_to_aff_combination (comb
, code
, type
, TREE_OPERAND (expr
, 0)))
413 /* ??? TREE_TYPE (expr) should be equal to type here, but IVOPTS
414 calls this with not showing an outer widening cast. */
415 if (expr_to_aff_combination (comb
, code
,
416 TREE_TYPE (expr
), TREE_OPERAND (expr
, 0)))
418 aff_combination_convert (comb
, type
);
424 /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR. */
425 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == MEM_REF
)
427 expr
= TREE_OPERAND (expr
, 0);
428 tree_to_aff_combination (TREE_OPERAND (expr
, 0), type
, comb
);
429 tree_to_aff_combination (TREE_OPERAND (expr
, 1), sizetype
, &tmp
);
430 aff_combination_add (comb
, &tmp
);
433 core
= get_inner_reference (TREE_OPERAND (expr
, 0), &bitsize
, &bitpos
,
434 &toffset
, &mode
, &unsignedp
, &reversep
,
436 if (!multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
))
438 aff_combination_const (comb
, type
, bytepos
);
439 if (TREE_CODE (core
) == MEM_REF
)
441 tree mem_offset
= TREE_OPERAND (core
, 1);
442 aff_combination_add_cst (comb
, wi::to_poly_widest (mem_offset
));
443 core
= TREE_OPERAND (core
, 0);
446 core
= build_fold_addr_expr (core
);
448 if (TREE_CODE (core
) == ADDR_EXPR
)
449 aff_combination_add_elt (comb
, core
, 1);
452 tree_to_aff_combination (core
, type
, &tmp
);
453 aff_combination_add (comb
, &tmp
);
457 tree_to_aff_combination (toffset
, type
, &tmp
);
458 aff_combination_add (comb
, &tmp
);
464 if (poly_int_tree_p (expr
))
466 aff_combination_const (comb
, type
, wi::to_poly_widest (expr
));
473 aff_combination_elt (comb
, type
, expr
);
476 /* Creates EXPR + ELT * SCALE in TYPE. EXPR is taken from affine
480 add_elt_to_tree (tree expr
, tree type
, tree elt
, const widest_int
&scale_in
)
484 widest_int scale
= wide_int_ext_for_comb (scale_in
, type
);
486 elt
= fold_convert (type
, elt
);
492 return fold_build2 (PLUS_EXPR
, type
, expr
, elt
);
498 return fold_build1 (NEGATE_EXPR
, type
, elt
);
500 return fold_build2 (MINUS_EXPR
, type
, expr
, elt
);
504 return fold_build2 (MULT_EXPR
, type
, elt
, wide_int_to_tree (type
, scale
));
506 if (wi::neg_p (scale
))
514 elt
= fold_build2 (MULT_EXPR
, type
, elt
, wide_int_to_tree (type
, scale
));
515 return fold_build2 (code
, type
, expr
, elt
);
518 /* Makes tree from the affine combination COMB. */
521 aff_combination_to_tree (aff_tree
*comb
)
523 tree type
= comb
->type
, base
= NULL_TREE
, expr
= NULL_TREE
;
528 gcc_assert (comb
->n
== MAX_AFF_ELTS
|| comb
->rest
== NULL_TREE
);
531 if (POINTER_TYPE_P (type
))
534 if (comb
->n
> 0 && comb
->elts
[0].coef
== 1
535 && POINTER_TYPE_P (TREE_TYPE (comb
->elts
[0].val
)))
537 base
= comb
->elts
[0].val
;
542 for (; i
< comb
->n
; i
++)
543 expr
= add_elt_to_tree (expr
, type
, comb
->elts
[i
].val
, comb
->elts
[i
].coef
);
546 expr
= add_elt_to_tree (expr
, type
, comb
->rest
, 1);
548 /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
550 if (known_lt (comb
->offset
, 0))
560 expr
= add_elt_to_tree (expr
, type
, wide_int_to_tree (type
, off
), sgn
);
563 return fold_build_pointer_plus (base
, expr
);
565 return fold_convert (comb
->type
, expr
);
568 /* Copies the tree elements of COMB to ensure that they are not shared. */
571 unshare_aff_combination (aff_tree
*comb
)
575 for (i
= 0; i
< comb
->n
; i
++)
576 comb
->elts
[i
].val
= unshare_expr (comb
->elts
[i
].val
);
578 comb
->rest
= unshare_expr (comb
->rest
);
581 /* Remove M-th element from COMB. */
584 aff_combination_remove_elt (aff_tree
*comb
, unsigned m
)
588 comb
->elts
[m
] = comb
->elts
[comb
->n
];
591 comb
->elts
[comb
->n
].coef
= 1;
592 comb
->elts
[comb
->n
].val
= comb
->rest
;
593 comb
->rest
= NULL_TREE
;
598 /* Adds C * COEF * VAL to R. VAL may be NULL, in that case only
599 C * COEF is added to R. */
603 aff_combination_add_product (aff_tree
*c
, const widest_int
&coef
, tree val
,
609 for (i
= 0; i
< c
->n
; i
++)
611 aval
= c
->elts
[i
].val
;
614 type
= TREE_TYPE (aval
);
615 aval
= fold_build2 (MULT_EXPR
, type
, aval
,
616 fold_convert (type
, val
));
619 aff_combination_add_elt (r
, aval
, coef
* c
->elts
[i
].coef
);
627 type
= TREE_TYPE (aval
);
628 aval
= fold_build2 (MULT_EXPR
, type
, aval
,
629 fold_convert (type
, val
));
632 aff_combination_add_elt (r
, aval
, coef
);
637 if (c
->offset
.is_constant ())
638 /* Access coeffs[0] directly, for efficiency. */
639 aff_combination_add_elt (r
, val
, coef
* c
->offset
.coeffs
[0]);
642 /* c->offset is polynomial, so multiply VAL rather than COEF
644 tree offset
= wide_int_to_tree (TREE_TYPE (val
), c
->offset
);
645 val
= fold_build2 (MULT_EXPR
, TREE_TYPE (val
), val
, offset
);
646 aff_combination_add_elt (r
, val
, coef
);
650 aff_combination_add_cst (r
, coef
* c
->offset
);
653 /* Multiplies C1 by C2, storing the result to R */
656 aff_combination_mult (aff_tree
*c1
, aff_tree
*c2
, aff_tree
*r
)
659 gcc_assert (TYPE_PRECISION (c1
->type
) == TYPE_PRECISION (c2
->type
));
661 aff_combination_zero (r
, c1
->type
);
663 for (i
= 0; i
< c2
->n
; i
++)
664 aff_combination_add_product (c1
, c2
->elts
[i
].coef
, c2
->elts
[i
].val
, r
);
666 aff_combination_add_product (c1
, 1, c2
->rest
, r
);
667 if (c2
->offset
.is_constant ())
668 /* Access coeffs[0] directly, for efficiency. */
669 aff_combination_add_product (c1
, c2
->offset
.coeffs
[0], NULL
, r
);
672 /* c2->offset is polynomial, so do the multiplication in tree form. */
673 tree offset
= wide_int_to_tree (c2
->type
, c2
->offset
);
674 aff_combination_add_product (c1
, 1, offset
, r
);
678 /* Returns the element of COMB whose value is VAL, or NULL if no such
679 element exists. If IDX is not NULL, it is set to the index of VAL in
682 static class aff_comb_elt
*
683 aff_combination_find_elt (aff_tree
*comb
, tree val
, unsigned *idx
)
687 for (i
= 0; i
< comb
->n
; i
++)
688 if (operand_equal_p (comb
->elts
[i
].val
, val
, 0))
693 return &comb
->elts
[i
];
699 /* Element of the cache that maps ssa name NAME to its expanded form
700 as an affine expression EXPANSION. */
707 /* True if the expansion for the name is just being generated. */
708 unsigned in_progress
: 1;
711 /* Expands SSA names in COMB recursively. CACHE is used to cache the
715 aff_combination_expand (aff_tree
*comb ATTRIBUTE_UNUSED
,
716 hash_map
<tree
, name_expansion
*> **cache
)
719 aff_tree to_add
, current
, curre
;
723 class name_expansion
*exp
;
725 aff_combination_zero (&to_add
, comb
->type
);
726 for (i
= 0; i
< comb
->n
; i
++)
731 e
= comb
->elts
[i
].val
;
732 type
= TREE_TYPE (e
);
734 /* Look through some conversions. */
735 if (CONVERT_EXPR_P (e
)
736 && (TYPE_PRECISION (type
)
737 >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e
, 0)))))
738 name
= TREE_OPERAND (e
, 0);
739 if (TREE_CODE (name
) != SSA_NAME
)
741 def
= SSA_NAME_DEF_STMT (name
);
742 if (!is_gimple_assign (def
) || gimple_assign_lhs (def
) != name
)
745 code
= gimple_assign_rhs_code (def
);
747 && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code
))
748 && (get_gimple_rhs_class (code
) != GIMPLE_SINGLE_RHS
749 || !is_gimple_min_invariant (gimple_assign_rhs1 (def
))))
752 /* We do not know whether the reference retains its value at the
753 place where the expansion is used. */
754 if (TREE_CODE_CLASS (code
) == tcc_reference
)
757 name_expansion
**slot
= NULL
;
759 slot
= (*cache
)->get (name
);
760 exp
= slot
? *slot
: NULL
;
763 /* Only bother to handle cases tree_to_aff_combination will. */
766 case POINTER_PLUS_EXPR
:
770 if (!expr_to_aff_combination (¤t
, code
, TREE_TYPE (name
),
771 gimple_assign_rhs1 (def
),
772 gimple_assign_rhs2 (def
)))
777 if (!expr_to_aff_combination (¤t
, code
, TREE_TYPE (name
),
778 gimple_assign_rhs1 (def
)))
782 if (!expr_to_aff_combination (¤t
, code
, TREE_TYPE (name
),
783 gimple_assign_rhs1 (def
)))
784 /* This makes us always expand conversions which we did
785 in the past and makes gcc.dg/tree-ssa/ivopts-lt-2.c
786 PASS, eliminating one induction variable in IVOPTs.
787 ??? But it is really excessive and we should try
788 harder to do without it. */
789 aff_combination_elt (¤t
, TREE_TYPE (name
),
790 fold_convert (TREE_TYPE (name
),
791 gimple_assign_rhs1 (def
)));
796 tree_to_aff_combination (gimple_assign_rhs1 (def
),
797 TREE_TYPE (name
), ¤t
);
802 exp
= XNEW (class name_expansion
);
803 exp
->in_progress
= 1;
805 *cache
= new hash_map
<tree
, name_expansion
*>;
806 (*cache
)->put (name
, exp
);
807 aff_combination_expand (¤t
, cache
);
808 exp
->expansion
= current
;
809 exp
->in_progress
= 0;
813 /* Since we follow the definitions in the SSA form, we should not
814 enter a cycle unless we pass through a phi node. */
815 gcc_assert (!exp
->in_progress
);
816 current
= exp
->expansion
;
818 if (!useless_type_conversion_p (comb
->type
, current
.type
))
819 aff_combination_convert (¤t
, comb
->type
);
821 /* Accumulate the new terms to TO_ADD, so that we do not modify
822 COMB while traversing it; include the term -coef * E, to remove
824 scale
= comb
->elts
[i
].coef
;
825 aff_combination_zero (&curre
, comb
->type
);
826 aff_combination_add_elt (&curre
, e
, -scale
);
827 aff_combination_scale (¤t
, scale
);
828 aff_combination_add (&to_add
, ¤t
);
829 aff_combination_add (&to_add
, &curre
);
831 aff_combination_add (comb
, &to_add
);
834 /* Similar to tree_to_aff_combination, but follows SSA name definitions
835 and expands them recursively. CACHE is used to cache the expansions
836 of the ssa names, to avoid exponential time complexity for cases
845 tree_to_aff_combination_expand (tree expr
, tree type
, aff_tree
*comb
,
846 hash_map
<tree
, name_expansion
*> **cache
)
848 tree_to_aff_combination (expr
, type
, comb
);
849 aff_combination_expand (comb
, cache
);
852 /* Frees memory occupied by struct name_expansion in *VALUE. Callback for
853 hash_map::traverse. */
856 free_name_expansion (tree
const &, name_expansion
**value
, void *)
862 /* Frees memory allocated for the CACHE used by
863 tree_to_aff_combination_expand. */
866 free_affine_expand_cache (hash_map
<tree
, name_expansion
*> **cache
)
871 (*cache
)->traverse
<void *, free_name_expansion
> (NULL
);
876 /* If VAL != CST * DIV for any constant CST, returns false.
877 Otherwise, if *MULT_SET is true, additionally compares CST and MULT,
878 and if they are different, returns false. Finally, if neither of these
879 two cases occur, true is returned, and CST is stored to MULT and MULT_SET
883 wide_int_constant_multiple_p (const poly_widest_int
&val
,
884 const poly_widest_int
&div
,
885 bool *mult_set
, poly_widest_int
*mult
)
887 poly_widest_int rem
, cst
;
889 if (known_eq (val
, 0))
891 if (*mult_set
&& maybe_ne (*mult
, 0))
898 if (maybe_eq (div
, 0))
901 if (!multiple_p (val
, div
, &cst
))
904 if (*mult_set
&& maybe_ne (*mult
, cst
))
912 /* Returns true if VAL = X * DIV for some constant X. If this is the case,
913 X is stored to MULT. */
916 aff_combination_constant_multiple_p (aff_tree
*val
, aff_tree
*div
,
917 poly_widest_int
*mult
)
919 bool mult_set
= false;
922 if (val
->n
== 0 && known_eq (val
->offset
, 0))
927 if (val
->n
!= div
->n
)
930 if (val
->rest
|| div
->rest
)
933 if (!wide_int_constant_multiple_p (val
->offset
, div
->offset
,
937 for (i
= 0; i
< div
->n
; i
++)
939 class aff_comb_elt
*elt
940 = aff_combination_find_elt (val
, div
->elts
[i
].val
, NULL
);
943 if (!wide_int_constant_multiple_p (elt
->coef
, div
->elts
[i
].coef
,
948 gcc_assert (mult_set
);
952 /* Prints the affine VAL to the FILE. */
955 print_aff (FILE *file
, aff_tree
*val
)
958 signop sgn
= TYPE_SIGN (val
->type
);
959 if (POINTER_TYPE_P (val
->type
))
961 fprintf (file
, "{\n type = ");
962 print_generic_expr (file
, val
->type
, TDF_VOPS
|TDF_MEMSYMS
);
963 fprintf (file
, "\n offset = ");
964 print_dec (val
->offset
, file
, sgn
);
967 fprintf (file
, "\n elements = {\n");
968 for (i
= 0; i
< val
->n
; i
++)
970 fprintf (file
, " [%d] = ", i
);
971 print_generic_expr (file
, val
->elts
[i
].val
, TDF_VOPS
|TDF_MEMSYMS
);
973 fprintf (file
, " * ");
974 print_dec (val
->elts
[i
].coef
, file
, sgn
);
976 fprintf (file
, ", \n");
978 fprintf (file
, "\n }");
982 fprintf (file
, "\n rest = ");
983 print_generic_expr (file
, val
->rest
, TDF_VOPS
|TDF_MEMSYMS
);
985 fprintf (file
, "\n}");
988 /* Prints the affine VAL to the standard error, used for debugging. */
991 debug_aff (aff_tree
*val
)
993 print_aff (stderr
, val
);
994 fprintf (stderr
, "\n");
997 /* Computes address of the reference REF in ADDR. The size of the accessed
998 location is stored to SIZE. Returns the ultimate containing object to
1002 get_inner_reference_aff (tree ref
, aff_tree
*addr
, poly_widest_int
*size
)
1004 poly_int64 bitsize
, bitpos
;
1009 tree base
= get_inner_reference (ref
, &bitsize
, &bitpos
, &toff
, &mode
,
1011 tree base_addr
= build_fold_addr_expr (base
);
1013 /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT. */
1015 tree_to_aff_combination (base_addr
, sizetype
, addr
);
1019 tree_to_aff_combination (toff
, sizetype
, &tmp
);
1020 aff_combination_add (addr
, &tmp
);
1023 aff_combination_const (&tmp
, sizetype
, bits_to_bytes_round_down (bitpos
));
1024 aff_combination_add (addr
, &tmp
);
1026 *size
= bits_to_bytes_round_up (bitsize
);
1031 /* Returns true if a region of size SIZE1 at position 0 and a region of
1032 size SIZE2 at position DIFF cannot overlap. */
1035 aff_comb_cannot_overlap_p (aff_tree
*diff
, const poly_widest_int
&size1
,
1036 const poly_widest_int
&size2
)
1038 /* Unless the difference is a constant, we fail. */
1042 if (!ordered_p (diff
->offset
, 0))
1045 if (maybe_lt (diff
->offset
, 0))
1047 /* The second object is before the first one, we succeed if the last
1048 element of the second object is before the start of the first one. */
1049 return known_le (diff
->offset
+ size2
, 0);
1053 /* We succeed if the second object starts after the first one ends. */
1054 return known_le (size1
, diff
->offset
);