1 /* Operations with affine combinations of trees.
2 Copyright (C) 2005-2019 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "tree-pretty-print.h"
29 #include "fold-const.h"
30 #include "tree-affine.h"
33 #include "cfgexpand.h"
35 /* Extends CST as appropriate for the affine combinations COMB. */
38 wide_int_ext_for_comb (const widest_int
&cst
, tree type
)
40 return wi::sext (cst
, TYPE_PRECISION (type
));
43 /* Likewise for polynomial offsets. */
45 static poly_widest_int
46 wide_int_ext_for_comb (const poly_widest_int
&cst
, tree type
)
48 return wi::sext (cst
, TYPE_PRECISION (type
));
51 /* Initializes affine combination COMB so that its value is zero in TYPE. */
54 aff_combination_zero (aff_tree
*comb
, tree type
)
60 for (i
= 0; i
< MAX_AFF_ELTS
; i
++)
61 comb
->elts
[i
].coef
= 0;
62 comb
->rest
= NULL_TREE
;
65 /* Sets COMB to CST. */
68 aff_combination_const (aff_tree
*comb
, tree type
, const poly_widest_int
&cst
)
70 aff_combination_zero (comb
, type
);
71 comb
->offset
= wide_int_ext_for_comb (cst
, comb
->type
);;
74 /* Sets COMB to single element ELT. */
77 aff_combination_elt (aff_tree
*comb
, tree type
, tree elt
)
79 aff_combination_zero (comb
, type
);
82 comb
->elts
[0].val
= elt
;
83 comb
->elts
[0].coef
= 1;
86 /* Scales COMB by SCALE. */
89 aff_combination_scale (aff_tree
*comb
, const widest_int
&scale_in
)
93 widest_int scale
= wide_int_ext_for_comb (scale_in
, comb
->type
);
99 aff_combination_zero (comb
, comb
->type
);
103 comb
->offset
= wide_int_ext_for_comb (scale
* comb
->offset
, comb
->type
);
104 for (i
= 0, j
= 0; i
< comb
->n
; i
++)
107 = wide_int_ext_for_comb (scale
* comb
->elts
[i
].coef
, comb
->type
);
108 /* A coefficient may become zero due to overflow. Remove the zero
112 comb
->elts
[j
].coef
= new_coef
;
113 comb
->elts
[j
].val
= comb
->elts
[i
].val
;
120 tree type
= comb
->type
;
121 if (POINTER_TYPE_P (type
))
123 if (comb
->n
< MAX_AFF_ELTS
)
125 comb
->elts
[comb
->n
].coef
= scale
;
126 comb
->elts
[comb
->n
].val
= comb
->rest
;
127 comb
->rest
= NULL_TREE
;
131 comb
->rest
= fold_build2 (MULT_EXPR
, type
, comb
->rest
,
132 wide_int_to_tree (type
, scale
));
136 /* Adds ELT * SCALE to COMB. */
139 aff_combination_add_elt (aff_tree
*comb
, tree elt
, const widest_int
&scale_in
)
144 widest_int scale
= wide_int_ext_for_comb (scale_in
, comb
->type
);
148 for (i
= 0; i
< comb
->n
; i
++)
149 if (operand_equal_p (comb
->elts
[i
].val
, elt
, 0))
152 = wide_int_ext_for_comb (comb
->elts
[i
].coef
+ scale
, comb
->type
);
155 comb
->elts
[i
].coef
= new_coef
;
160 comb
->elts
[i
] = comb
->elts
[comb
->n
];
164 gcc_assert (comb
->n
== MAX_AFF_ELTS
- 1);
165 comb
->elts
[comb
->n
].coef
= 1;
166 comb
->elts
[comb
->n
].val
= comb
->rest
;
167 comb
->rest
= NULL_TREE
;
172 if (comb
->n
< MAX_AFF_ELTS
)
174 comb
->elts
[comb
->n
].coef
= scale
;
175 comb
->elts
[comb
->n
].val
= elt
;
181 if (POINTER_TYPE_P (type
))
185 elt
= fold_convert (type
, elt
);
187 elt
= fold_build2 (MULT_EXPR
, type
,
188 fold_convert (type
, elt
),
189 wide_int_to_tree (type
, scale
));
192 comb
->rest
= fold_build2 (PLUS_EXPR
, type
, comb
->rest
,
201 aff_combination_add_cst (aff_tree
*c
, const poly_widest_int
&cst
)
203 c
->offset
= wide_int_ext_for_comb (c
->offset
+ cst
, c
->type
);
206 /* Adds COMB2 to COMB1. */
209 aff_combination_add (aff_tree
*comb1
, aff_tree
*comb2
)
213 aff_combination_add_cst (comb1
, comb2
->offset
);
214 for (i
= 0; i
< comb2
->n
; i
++)
215 aff_combination_add_elt (comb1
, comb2
->elts
[i
].val
, comb2
->elts
[i
].coef
);
217 aff_combination_add_elt (comb1
, comb2
->rest
, 1);
220 /* Converts affine combination COMB to TYPE. */
223 aff_combination_convert (aff_tree
*comb
, tree type
)
226 tree comb_type
= comb
->type
;
228 if (TYPE_PRECISION (type
) > TYPE_PRECISION (comb_type
))
230 tree val
= fold_convert (type
, aff_combination_to_tree (comb
));
231 tree_to_aff_combination (val
, type
, comb
);
236 if (comb
->rest
&& !POINTER_TYPE_P (type
))
237 comb
->rest
= fold_convert (type
, comb
->rest
);
239 if (TYPE_PRECISION (type
) == TYPE_PRECISION (comb_type
))
242 comb
->offset
= wide_int_ext_for_comb (comb
->offset
, comb
->type
);
243 for (i
= j
= 0; i
< comb
->n
; i
++)
245 if (comb
->elts
[i
].coef
== 0)
247 comb
->elts
[j
].coef
= comb
->elts
[i
].coef
;
248 comb
->elts
[j
].val
= fold_convert (type
, comb
->elts
[i
].val
);
253 if (comb
->n
< MAX_AFF_ELTS
&& comb
->rest
)
255 comb
->elts
[comb
->n
].coef
= 1;
256 comb
->elts
[comb
->n
].val
= comb
->rest
;
257 comb
->rest
= NULL_TREE
;
262 /* Tries to handle OP0 CODE OP1 as affine combination of parts. Returns
263 true when that was successful and returns the combination in COMB. */
266 expr_to_aff_combination (aff_tree
*comb
, tree_code code
, tree type
,
267 tree op0
, tree op1
= NULL_TREE
)
270 poly_int64 bitpos
, bitsize
, bytepos
;
274 case POINTER_PLUS_EXPR
:
275 tree_to_aff_combination (op0
, type
, comb
);
276 tree_to_aff_combination (op1
, sizetype
, &tmp
);
277 aff_combination_add (comb
, &tmp
);
282 tree_to_aff_combination (op0
, type
, comb
);
283 tree_to_aff_combination (op1
, type
, &tmp
);
284 if (code
== MINUS_EXPR
)
285 aff_combination_scale (&tmp
, -1);
286 aff_combination_add (comb
, &tmp
);
290 if (TREE_CODE (op1
) != INTEGER_CST
)
292 tree_to_aff_combination (op0
, type
, comb
);
293 aff_combination_scale (comb
, wi::to_widest (op1
));
297 tree_to_aff_combination (op0
, type
, comb
);
298 aff_combination_scale (comb
, -1);
303 tree_to_aff_combination (op0
, type
, comb
);
304 aff_combination_scale (comb
, -1);
305 aff_combination_add_cst (comb
, -1);
312 tree itype
= TREE_TYPE (inner
);
313 enum tree_code icode
= TREE_CODE (inner
);
316 if (tree_nop_conversion_p (otype
, itype
))
318 tree_to_aff_combination (op0
, type
, comb
);
322 /* In principle this is a valid folding, but it isn't necessarily
323 an optimization, so do it here and not in fold_unary. */
324 if ((icode
== PLUS_EXPR
|| icode
== MINUS_EXPR
|| icode
== MULT_EXPR
)
325 && TREE_CODE (itype
) == INTEGER_TYPE
326 && TREE_CODE (otype
) == INTEGER_TYPE
327 && TYPE_PRECISION (otype
) > TYPE_PRECISION (itype
))
329 tree op0
= TREE_OPERAND (inner
, 0), op1
= TREE_OPERAND (inner
, 1);
331 /* If inner type has undefined overflow behavior, fold conversion
333 (T1)(X *+- CST) -> (T1)X *+- (T1)CST
334 (T1)(X + X) -> (T1)X + (T1)X. */
335 if (TYPE_OVERFLOW_UNDEFINED (itype
)
336 && (TREE_CODE (op1
) == INTEGER_CST
337 || (icode
== PLUS_EXPR
&& operand_equal_p (op0
, op1
, 0))))
339 op0
= fold_convert (otype
, op0
);
340 op1
= fold_convert (otype
, op1
);
341 return expr_to_aff_combination (comb
, icode
, otype
, op0
, op1
);
344 /* If inner type has wrapping overflow behavior, fold conversion
346 (T1)(X - CST) -> (T1)X - (T1)CST
347 if X - CST doesn't overflow by range information. Also handle
348 (T1)(X + CST) as (T1)(X - (-CST)). */
349 if (TYPE_UNSIGNED (itype
)
350 && TYPE_OVERFLOW_WRAPS (itype
)
351 && TREE_CODE (op0
) == SSA_NAME
352 && TREE_CODE (op1
) == INTEGER_CST
353 && icode
!= MULT_EXPR
354 && get_range_info (op0
, &minv
, &maxv
) == VR_RANGE
)
356 if (icode
== PLUS_EXPR
)
357 op1
= wide_int_to_tree (itype
, -wi::to_wide (op1
));
358 if (wi::geu_p (minv
, wi::to_wide (op1
)))
360 op0
= fold_convert (otype
, op0
);
361 op1
= fold_convert (otype
, op1
);
362 return expr_to_aff_combination (comb
, MINUS_EXPR
, otype
,
376 /* Splits EXPR into an affine combination of parts. */
379 tree_to_aff_combination (tree expr
, tree type
, aff_tree
*comb
)
384 poly_int64 bitpos
, bitsize
, bytepos
;
386 int unsignedp
, reversep
, volatilep
;
390 code
= TREE_CODE (expr
);
393 case POINTER_PLUS_EXPR
:
397 if (expr_to_aff_combination (comb
, code
, type
, TREE_OPERAND (expr
, 0),
398 TREE_OPERAND (expr
, 1)))
404 if (expr_to_aff_combination (comb
, code
, type
, TREE_OPERAND (expr
, 0)))
409 /* ??? TREE_TYPE (expr) should be equal to type here, but IVOPTS
410 calls this with not showing an outer widening cast. */
411 if (expr_to_aff_combination (comb
, code
,
412 TREE_TYPE (expr
), TREE_OPERAND (expr
, 0)))
414 aff_combination_convert (comb
, type
);
420 /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR. */
421 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == MEM_REF
)
423 expr
= TREE_OPERAND (expr
, 0);
424 tree_to_aff_combination (TREE_OPERAND (expr
, 0), type
, comb
);
425 tree_to_aff_combination (TREE_OPERAND (expr
, 1), sizetype
, &tmp
);
426 aff_combination_add (comb
, &tmp
);
429 core
= get_inner_reference (TREE_OPERAND (expr
, 0), &bitsize
, &bitpos
,
430 &toffset
, &mode
, &unsignedp
, &reversep
,
432 if (!multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
))
434 aff_combination_const (comb
, type
, bytepos
);
435 if (TREE_CODE (core
) == MEM_REF
)
437 tree mem_offset
= TREE_OPERAND (core
, 1);
438 aff_combination_add_cst (comb
, wi::to_poly_widest (mem_offset
));
439 core
= TREE_OPERAND (core
, 0);
442 core
= build_fold_addr_expr (core
);
444 if (TREE_CODE (core
) == ADDR_EXPR
)
445 aff_combination_add_elt (comb
, core
, 1);
448 tree_to_aff_combination (core
, type
, &tmp
);
449 aff_combination_add (comb
, &tmp
);
453 tree_to_aff_combination (toffset
, type
, &tmp
);
454 aff_combination_add (comb
, &tmp
);
460 if (poly_int_tree_p (expr
))
462 aff_combination_const (comb
, type
, wi::to_poly_widest (expr
));
469 aff_combination_elt (comb
, type
, expr
);
472 /* Creates EXPR + ELT * SCALE in TYPE. EXPR is taken from affine
476 add_elt_to_tree (tree expr
, tree type
, tree elt
, const widest_int
&scale_in
)
480 widest_int scale
= wide_int_ext_for_comb (scale_in
, type
);
482 elt
= fold_convert (type
, elt
);
488 return fold_build2 (PLUS_EXPR
, type
, expr
, elt
);
494 return fold_build1 (NEGATE_EXPR
, type
, elt
);
496 return fold_build2 (MINUS_EXPR
, type
, expr
, elt
);
500 return fold_build2 (MULT_EXPR
, type
, elt
, wide_int_to_tree (type
, scale
));
502 if (wi::neg_p (scale
))
510 elt
= fold_build2 (MULT_EXPR
, type
, elt
, wide_int_to_tree (type
, scale
));
511 return fold_build2 (code
, type
, expr
, elt
);
514 /* Makes tree from the affine combination COMB. */
517 aff_combination_to_tree (aff_tree
*comb
)
519 tree type
= comb
->type
, base
= NULL_TREE
, expr
= NULL_TREE
;
524 gcc_assert (comb
->n
== MAX_AFF_ELTS
|| comb
->rest
== NULL_TREE
);
527 if (POINTER_TYPE_P (type
))
530 if (comb
->n
> 0 && comb
->elts
[0].coef
== 1
531 && POINTER_TYPE_P (TREE_TYPE (comb
->elts
[0].val
)))
533 base
= comb
->elts
[0].val
;
538 for (; i
< comb
->n
; i
++)
539 expr
= add_elt_to_tree (expr
, type
, comb
->elts
[i
].val
, comb
->elts
[i
].coef
);
542 expr
= add_elt_to_tree (expr
, type
, comb
->rest
, 1);
544 /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
546 if (known_lt (comb
->offset
, 0))
556 expr
= add_elt_to_tree (expr
, type
, wide_int_to_tree (type
, off
), sgn
);
559 return fold_build_pointer_plus (base
, expr
);
561 return fold_convert (comb
->type
, expr
);
564 /* Copies the tree elements of COMB to ensure that they are not shared. */
567 unshare_aff_combination (aff_tree
*comb
)
571 for (i
= 0; i
< comb
->n
; i
++)
572 comb
->elts
[i
].val
= unshare_expr (comb
->elts
[i
].val
);
574 comb
->rest
= unshare_expr (comb
->rest
);
577 /* Remove M-th element from COMB. */
580 aff_combination_remove_elt (aff_tree
*comb
, unsigned m
)
584 comb
->elts
[m
] = comb
->elts
[comb
->n
];
587 comb
->elts
[comb
->n
].coef
= 1;
588 comb
->elts
[comb
->n
].val
= comb
->rest
;
589 comb
->rest
= NULL_TREE
;
594 /* Adds C * COEF * VAL to R. VAL may be NULL, in that case only
595 C * COEF is added to R. */
599 aff_combination_add_product (aff_tree
*c
, const widest_int
&coef
, tree val
,
605 for (i
= 0; i
< c
->n
; i
++)
607 aval
= c
->elts
[i
].val
;
610 type
= TREE_TYPE (aval
);
611 aval
= fold_build2 (MULT_EXPR
, type
, aval
,
612 fold_convert (type
, val
));
615 aff_combination_add_elt (r
, aval
, coef
* c
->elts
[i
].coef
);
623 type
= TREE_TYPE (aval
);
624 aval
= fold_build2 (MULT_EXPR
, type
, aval
,
625 fold_convert (type
, val
));
628 aff_combination_add_elt (r
, aval
, coef
);
633 if (c
->offset
.is_constant ())
634 /* Access coeffs[0] directly, for efficiency. */
635 aff_combination_add_elt (r
, val
, coef
* c
->offset
.coeffs
[0]);
638 /* c->offset is polynomial, so multiply VAL rather than COEF
640 tree offset
= wide_int_to_tree (TREE_TYPE (val
), c
->offset
);
641 val
= fold_build2 (MULT_EXPR
, TREE_TYPE (val
), val
, offset
);
642 aff_combination_add_elt (r
, val
, coef
);
646 aff_combination_add_cst (r
, coef
* c
->offset
);
649 /* Multiplies C1 by C2, storing the result to R */
652 aff_combination_mult (aff_tree
*c1
, aff_tree
*c2
, aff_tree
*r
)
655 gcc_assert (TYPE_PRECISION (c1
->type
) == TYPE_PRECISION (c2
->type
));
657 aff_combination_zero (r
, c1
->type
);
659 for (i
= 0; i
< c2
->n
; i
++)
660 aff_combination_add_product (c1
, c2
->elts
[i
].coef
, c2
->elts
[i
].val
, r
);
662 aff_combination_add_product (c1
, 1, c2
->rest
, r
);
663 if (c2
->offset
.is_constant ())
664 /* Access coeffs[0] directly, for efficiency. */
665 aff_combination_add_product (c1
, c2
->offset
.coeffs
[0], NULL
, r
);
668 /* c2->offset is polynomial, so do the multiplication in tree form. */
669 tree offset
= wide_int_to_tree (c2
->type
, c2
->offset
);
670 aff_combination_add_product (c1
, 1, offset
, r
);
674 /* Returns the element of COMB whose value is VAL, or NULL if no such
675 element exists. If IDX is not NULL, it is set to the index of VAL in
678 static class aff_comb_elt
*
679 aff_combination_find_elt (aff_tree
*comb
, tree val
, unsigned *idx
)
683 for (i
= 0; i
< comb
->n
; i
++)
684 if (operand_equal_p (comb
->elts
[i
].val
, val
, 0))
689 return &comb
->elts
[i
];
695 /* Element of the cache that maps ssa name NAME to its expanded form
696 as an affine expression EXPANSION. */
703 /* True if the expansion for the name is just being generated. */
704 unsigned in_progress
: 1;
707 /* Expands SSA names in COMB recursively. CACHE is used to cache the
711 aff_combination_expand (aff_tree
*comb ATTRIBUTE_UNUSED
,
712 hash_map
<tree
, name_expansion
*> **cache
)
715 aff_tree to_add
, current
, curre
;
719 class name_expansion
*exp
;
721 aff_combination_zero (&to_add
, comb
->type
);
722 for (i
= 0; i
< comb
->n
; i
++)
727 e
= comb
->elts
[i
].val
;
728 type
= TREE_TYPE (e
);
730 /* Look through some conversions. */
731 if (CONVERT_EXPR_P (e
)
732 && (TYPE_PRECISION (type
)
733 >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e
, 0)))))
734 name
= TREE_OPERAND (e
, 0);
735 if (TREE_CODE (name
) != SSA_NAME
)
737 def
= SSA_NAME_DEF_STMT (name
);
738 if (!is_gimple_assign (def
) || gimple_assign_lhs (def
) != name
)
741 code
= gimple_assign_rhs_code (def
);
743 && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code
))
744 && (get_gimple_rhs_class (code
) != GIMPLE_SINGLE_RHS
745 || !is_gimple_min_invariant (gimple_assign_rhs1 (def
))))
748 /* We do not know whether the reference retains its value at the
749 place where the expansion is used. */
750 if (TREE_CODE_CLASS (code
) == tcc_reference
)
753 name_expansion
**slot
= NULL
;
755 slot
= (*cache
)->get (name
);
756 exp
= slot
? *slot
: NULL
;
759 /* Only bother to handle cases tree_to_aff_combination will. */
762 case POINTER_PLUS_EXPR
:
766 if (!expr_to_aff_combination (¤t
, code
, TREE_TYPE (name
),
767 gimple_assign_rhs1 (def
),
768 gimple_assign_rhs2 (def
)))
773 if (!expr_to_aff_combination (¤t
, code
, TREE_TYPE (name
),
774 gimple_assign_rhs1 (def
)))
778 if (!expr_to_aff_combination (¤t
, code
, TREE_TYPE (name
),
779 gimple_assign_rhs1 (def
)))
780 /* This makes us always expand conversions which we did
781 in the past and makes gcc.dg/tree-ssa/ivopts-lt-2.c
782 PASS, eliminating one induction variable in IVOPTs.
783 ??? But it is really excessive and we should try
784 harder to do without it. */
785 aff_combination_elt (¤t
, TREE_TYPE (name
),
786 fold_convert (TREE_TYPE (name
),
787 gimple_assign_rhs1 (def
)));
792 tree_to_aff_combination (gimple_assign_rhs1 (def
),
793 TREE_TYPE (name
), ¤t
);
798 exp
= XNEW (class name_expansion
);
799 exp
->in_progress
= 1;
801 *cache
= new hash_map
<tree
, name_expansion
*>;
802 (*cache
)->put (name
, exp
);
803 aff_combination_expand (¤t
, cache
);
804 exp
->expansion
= current
;
805 exp
->in_progress
= 0;
809 /* Since we follow the definitions in the SSA form, we should not
810 enter a cycle unless we pass through a phi node. */
811 gcc_assert (!exp
->in_progress
);
812 current
= exp
->expansion
;
814 if (!useless_type_conversion_p (comb
->type
, current
.type
))
815 aff_combination_convert (¤t
, comb
->type
);
817 /* Accumulate the new terms to TO_ADD, so that we do not modify
818 COMB while traversing it; include the term -coef * E, to remove
820 scale
= comb
->elts
[i
].coef
;
821 aff_combination_zero (&curre
, comb
->type
);
822 aff_combination_add_elt (&curre
, e
, -scale
);
823 aff_combination_scale (¤t
, scale
);
824 aff_combination_add (&to_add
, ¤t
);
825 aff_combination_add (&to_add
, &curre
);
827 aff_combination_add (comb
, &to_add
);
830 /* Similar to tree_to_aff_combination, but follows SSA name definitions
831 and expands them recursively. CACHE is used to cache the expansions
832 of the ssa names, to avoid exponential time complexity for cases
841 tree_to_aff_combination_expand (tree expr
, tree type
, aff_tree
*comb
,
842 hash_map
<tree
, name_expansion
*> **cache
)
844 tree_to_aff_combination (expr
, type
, comb
);
845 aff_combination_expand (comb
, cache
);
848 /* Frees memory occupied by struct name_expansion in *VALUE. Callback for
849 hash_map::traverse. */
852 free_name_expansion (tree
const &, name_expansion
**value
, void *)
858 /* Frees memory allocated for the CACHE used by
859 tree_to_aff_combination_expand. */
862 free_affine_expand_cache (hash_map
<tree
, name_expansion
*> **cache
)
867 (*cache
)->traverse
<void *, free_name_expansion
> (NULL
);
872 /* If VAL != CST * DIV for any constant CST, returns false.
873 Otherwise, if *MULT_SET is true, additionally compares CST and MULT,
874 and if they are different, returns false. Finally, if neither of these
875 two cases occur, true is returned, and CST is stored to MULT and MULT_SET
879 wide_int_constant_multiple_p (const poly_widest_int
&val
,
880 const poly_widest_int
&div
,
881 bool *mult_set
, poly_widest_int
*mult
)
883 poly_widest_int rem
, cst
;
885 if (known_eq (val
, 0))
887 if (*mult_set
&& maybe_ne (*mult
, 0))
894 if (maybe_eq (div
, 0))
897 if (!multiple_p (val
, div
, &cst
))
900 if (*mult_set
&& maybe_ne (*mult
, cst
))
908 /* Returns true if VAL = X * DIV for some constant X. If this is the case,
909 X is stored to MULT. */
912 aff_combination_constant_multiple_p (aff_tree
*val
, aff_tree
*div
,
913 poly_widest_int
*mult
)
915 bool mult_set
= false;
918 if (val
->n
== 0 && known_eq (val
->offset
, 0))
923 if (val
->n
!= div
->n
)
926 if (val
->rest
|| div
->rest
)
929 if (!wide_int_constant_multiple_p (val
->offset
, div
->offset
,
933 for (i
= 0; i
< div
->n
; i
++)
935 class aff_comb_elt
*elt
936 = aff_combination_find_elt (val
, div
->elts
[i
].val
, NULL
);
939 if (!wide_int_constant_multiple_p (elt
->coef
, div
->elts
[i
].coef
,
944 gcc_assert (mult_set
);
948 /* Prints the affine VAL to the FILE. */
951 print_aff (FILE *file
, aff_tree
*val
)
954 signop sgn
= TYPE_SIGN (val
->type
);
955 if (POINTER_TYPE_P (val
->type
))
957 fprintf (file
, "{\n type = ");
958 print_generic_expr (file
, val
->type
, TDF_VOPS
|TDF_MEMSYMS
);
959 fprintf (file
, "\n offset = ");
960 print_dec (val
->offset
, file
, sgn
);
963 fprintf (file
, "\n elements = {\n");
964 for (i
= 0; i
< val
->n
; i
++)
966 fprintf (file
, " [%d] = ", i
);
967 print_generic_expr (file
, val
->elts
[i
].val
, TDF_VOPS
|TDF_MEMSYMS
);
969 fprintf (file
, " * ");
970 print_dec (val
->elts
[i
].coef
, file
, sgn
);
972 fprintf (file
, ", \n");
974 fprintf (file
, "\n }");
978 fprintf (file
, "\n rest = ");
979 print_generic_expr (file
, val
->rest
, TDF_VOPS
|TDF_MEMSYMS
);
981 fprintf (file
, "\n}");
984 /* Prints the affine VAL to the standard error, used for debugging. */
987 debug_aff (aff_tree
*val
)
989 print_aff (stderr
, val
);
990 fprintf (stderr
, "\n");
993 /* Computes address of the reference REF in ADDR. The size of the accessed
994 location is stored to SIZE. Returns the ultimate containing object to
998 get_inner_reference_aff (tree ref
, aff_tree
*addr
, poly_widest_int
*size
)
1000 poly_int64 bitsize
, bitpos
;
1005 tree base
= get_inner_reference (ref
, &bitsize
, &bitpos
, &toff
, &mode
,
1007 tree base_addr
= build_fold_addr_expr (base
);
1009 /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT. */
1011 tree_to_aff_combination (base_addr
, sizetype
, addr
);
1015 tree_to_aff_combination (toff
, sizetype
, &tmp
);
1016 aff_combination_add (addr
, &tmp
);
1019 aff_combination_const (&tmp
, sizetype
, bits_to_bytes_round_down (bitpos
));
1020 aff_combination_add (addr
, &tmp
);
1022 *size
= bits_to_bytes_round_up (bitsize
);
1027 /* Returns true if a region of size SIZE1 at position 0 and a region of
1028 size SIZE2 at position DIFF cannot overlap. */
1031 aff_comb_cannot_overlap_p (aff_tree
*diff
, const poly_widest_int
&size1
,
1032 const poly_widest_int
&size2
)
1034 /* Unless the difference is a constant, we fail. */
1038 if (!ordered_p (diff
->offset
, 0))
1041 if (maybe_lt (diff
->offset
, 0))
1043 /* The second object is before the first one, we succeed if the last
1044 element of the second object is before the start of the first one. */
1045 return known_le (diff
->offset
+ size2
, 0);
1049 /* We succeed if the second object starts after the first one ends. */
1050 return known_le (size1
, diff
->offset
);