1 /* Operations with affine combinations of trees.
2 Copyright (C) 2005-2018 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
28 #include "tree-pretty-print.h"
29 #include "fold-const.h"
30 #include "tree-affine.h"
33 #include "cfgexpand.h"
35 /* Extends CST as appropriate for the affine combinations COMB. */
38 wide_int_ext_for_comb (const widest_int
&cst
, tree type
)
40 return wi::sext (cst
, TYPE_PRECISION (type
));
43 /* Likewise for polynomial offsets. */
45 static poly_widest_int
46 wide_int_ext_for_comb (const poly_widest_int
&cst
, tree type
)
48 return wi::sext (cst
, TYPE_PRECISION (type
));
51 /* Initializes affine combination COMB so that its value is zero in TYPE. */
54 aff_combination_zero (aff_tree
*comb
, tree type
)
60 for (i
= 0; i
< MAX_AFF_ELTS
; i
++)
61 comb
->elts
[i
].coef
= 0;
62 comb
->rest
= NULL_TREE
;
65 /* Sets COMB to CST. */
68 aff_combination_const (aff_tree
*comb
, tree type
, const poly_widest_int
&cst
)
70 aff_combination_zero (comb
, type
);
71 comb
->offset
= wide_int_ext_for_comb (cst
, comb
->type
);;
74 /* Sets COMB to single element ELT. */
77 aff_combination_elt (aff_tree
*comb
, tree type
, tree elt
)
79 aff_combination_zero (comb
, type
);
82 comb
->elts
[0].val
= elt
;
83 comb
->elts
[0].coef
= 1;
86 /* Scales COMB by SCALE. */
89 aff_combination_scale (aff_tree
*comb
, const widest_int
&scale_in
)
93 widest_int scale
= wide_int_ext_for_comb (scale_in
, comb
->type
);
99 aff_combination_zero (comb
, comb
->type
);
103 comb
->offset
= wide_int_ext_for_comb (scale
* comb
->offset
, comb
->type
);
104 for (i
= 0, j
= 0; i
< comb
->n
; i
++)
107 = wide_int_ext_for_comb (scale
* comb
->elts
[i
].coef
, comb
->type
);
108 /* A coefficient may become zero due to overflow. Remove the zero
112 comb
->elts
[j
].coef
= new_coef
;
113 comb
->elts
[j
].val
= comb
->elts
[i
].val
;
120 tree type
= comb
->type
;
121 if (POINTER_TYPE_P (type
))
123 if (comb
->n
< MAX_AFF_ELTS
)
125 comb
->elts
[comb
->n
].coef
= scale
;
126 comb
->elts
[comb
->n
].val
= comb
->rest
;
127 comb
->rest
= NULL_TREE
;
131 comb
->rest
= fold_build2 (MULT_EXPR
, type
, comb
->rest
,
132 wide_int_to_tree (type
, scale
));
136 /* Adds ELT * SCALE to COMB. */
139 aff_combination_add_elt (aff_tree
*comb
, tree elt
, const widest_int
&scale_in
)
144 widest_int scale
= wide_int_ext_for_comb (scale_in
, comb
->type
);
148 for (i
= 0; i
< comb
->n
; i
++)
149 if (operand_equal_p (comb
->elts
[i
].val
, elt
, 0))
152 = wide_int_ext_for_comb (comb
->elts
[i
].coef
+ scale
, comb
->type
);
155 comb
->elts
[i
].coef
= new_coef
;
160 comb
->elts
[i
] = comb
->elts
[comb
->n
];
164 gcc_assert (comb
->n
== MAX_AFF_ELTS
- 1);
165 comb
->elts
[comb
->n
].coef
= 1;
166 comb
->elts
[comb
->n
].val
= comb
->rest
;
167 comb
->rest
= NULL_TREE
;
172 if (comb
->n
< MAX_AFF_ELTS
)
174 comb
->elts
[comb
->n
].coef
= scale
;
175 comb
->elts
[comb
->n
].val
= elt
;
181 if (POINTER_TYPE_P (type
))
185 elt
= fold_convert (type
, elt
);
187 elt
= fold_build2 (MULT_EXPR
, type
,
188 fold_convert (type
, elt
),
189 wide_int_to_tree (type
, scale
));
192 comb
->rest
= fold_build2 (PLUS_EXPR
, type
, comb
->rest
,
201 aff_combination_add_cst (aff_tree
*c
, const poly_widest_int
&cst
)
203 c
->offset
= wide_int_ext_for_comb (c
->offset
+ cst
, c
->type
);
206 /* Adds COMB2 to COMB1. */
209 aff_combination_add (aff_tree
*comb1
, aff_tree
*comb2
)
213 aff_combination_add_cst (comb1
, comb2
->offset
);
214 for (i
= 0; i
< comb2
->n
; i
++)
215 aff_combination_add_elt (comb1
, comb2
->elts
[i
].val
, comb2
->elts
[i
].coef
);
217 aff_combination_add_elt (comb1
, comb2
->rest
, 1);
220 /* Converts affine combination COMB to TYPE. */
223 aff_combination_convert (aff_tree
*comb
, tree type
)
226 tree comb_type
= comb
->type
;
228 if (TYPE_PRECISION (type
) > TYPE_PRECISION (comb_type
))
230 tree val
= fold_convert (type
, aff_combination_to_tree (comb
));
231 tree_to_aff_combination (val
, type
, comb
);
236 if (comb
->rest
&& !POINTER_TYPE_P (type
))
237 comb
->rest
= fold_convert (type
, comb
->rest
);
239 if (TYPE_PRECISION (type
) == TYPE_PRECISION (comb_type
))
242 comb
->offset
= wide_int_ext_for_comb (comb
->offset
, comb
->type
);
243 for (i
= j
= 0; i
< comb
->n
; i
++)
245 if (comb
->elts
[i
].coef
== 0)
247 comb
->elts
[j
].coef
= comb
->elts
[i
].coef
;
248 comb
->elts
[j
].val
= fold_convert (type
, comb
->elts
[i
].val
);
253 if (comb
->n
< MAX_AFF_ELTS
&& comb
->rest
)
255 comb
->elts
[comb
->n
].coef
= 1;
256 comb
->elts
[comb
->n
].val
= comb
->rest
;
257 comb
->rest
= NULL_TREE
;
262 /* Splits EXPR into an affine combination of parts. */
265 tree_to_aff_combination (tree expr
, tree type
, aff_tree
*comb
)
269 tree cst
, core
, toffset
;
270 poly_int64 bitpos
, bitsize
, bytepos
;
272 int unsignedp
, reversep
, volatilep
;
276 code
= TREE_CODE (expr
);
279 case POINTER_PLUS_EXPR
:
280 tree_to_aff_combination (TREE_OPERAND (expr
, 0), type
, comb
);
281 tree_to_aff_combination (TREE_OPERAND (expr
, 1), sizetype
, &tmp
);
282 aff_combination_add (comb
, &tmp
);
287 tree_to_aff_combination (TREE_OPERAND (expr
, 0), type
, comb
);
288 tree_to_aff_combination (TREE_OPERAND (expr
, 1), type
, &tmp
);
289 if (code
== MINUS_EXPR
)
290 aff_combination_scale (&tmp
, -1);
291 aff_combination_add (comb
, &tmp
);
295 cst
= TREE_OPERAND (expr
, 1);
296 if (TREE_CODE (cst
) != INTEGER_CST
)
298 tree_to_aff_combination (TREE_OPERAND (expr
, 0), type
, comb
);
299 aff_combination_scale (comb
, wi::to_widest (cst
));
303 tree_to_aff_combination (TREE_OPERAND (expr
, 0), type
, comb
);
304 aff_combination_scale (comb
, -1);
309 tree_to_aff_combination (TREE_OPERAND (expr
, 0), type
, comb
);
310 aff_combination_scale (comb
, -1);
311 aff_combination_add_cst (comb
, -1);
315 /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR. */
316 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == MEM_REF
)
318 expr
= TREE_OPERAND (expr
, 0);
319 tree_to_aff_combination (TREE_OPERAND (expr
, 0), type
, comb
);
320 tree_to_aff_combination (TREE_OPERAND (expr
, 1), sizetype
, &tmp
);
321 aff_combination_add (comb
, &tmp
);
324 core
= get_inner_reference (TREE_OPERAND (expr
, 0), &bitsize
, &bitpos
,
325 &toffset
, &mode
, &unsignedp
, &reversep
,
327 if (!multiple_p (bitpos
, BITS_PER_UNIT
, &bytepos
))
329 aff_combination_const (comb
, type
, bytepos
);
330 if (TREE_CODE (core
) == MEM_REF
)
332 tree mem_offset
= TREE_OPERAND (core
, 1);
333 aff_combination_add_cst (comb
, wi::to_poly_widest (mem_offset
));
334 core
= TREE_OPERAND (core
, 0);
337 core
= build_fold_addr_expr (core
);
339 if (TREE_CODE (core
) == ADDR_EXPR
)
340 aff_combination_add_elt (comb
, core
, 1);
343 tree_to_aff_combination (core
, type
, &tmp
);
344 aff_combination_add (comb
, &tmp
);
348 tree_to_aff_combination (toffset
, type
, &tmp
);
349 aff_combination_add (comb
, &tmp
);
354 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == ADDR_EXPR
)
355 tree_to_aff_combination (TREE_OPERAND (TREE_OPERAND (expr
, 0), 0),
357 else if (integer_zerop (TREE_OPERAND (expr
, 1)))
359 aff_combination_elt (comb
, type
, expr
);
363 aff_combination_elt (comb
, type
,
364 build2 (MEM_REF
, TREE_TYPE (expr
),
365 TREE_OPERAND (expr
, 0),
367 (TREE_TYPE (TREE_OPERAND (expr
, 1)), 0)));
368 tree_to_aff_combination (TREE_OPERAND (expr
, 1), sizetype
, &tmp
);
369 aff_combination_add (comb
, &tmp
);
374 tree otype
= TREE_TYPE (expr
);
375 tree inner
= TREE_OPERAND (expr
, 0);
376 tree itype
= TREE_TYPE (inner
);
377 enum tree_code icode
= TREE_CODE (inner
);
379 /* In principle this is a valid folding, but it isn't necessarily
380 an optimization, so do it here and not in fold_unary. */
381 if ((icode
== PLUS_EXPR
|| icode
== MINUS_EXPR
|| icode
== MULT_EXPR
)
382 && TREE_CODE (itype
) == INTEGER_TYPE
383 && TREE_CODE (otype
) == INTEGER_TYPE
384 && TYPE_PRECISION (otype
) > TYPE_PRECISION (itype
))
386 tree op0
= TREE_OPERAND (inner
, 0), op1
= TREE_OPERAND (inner
, 1);
388 /* If inner type has undefined overflow behavior, fold conversion
390 (T1)(X *+- CST) -> (T1)X *+- (T1)CST
391 (T1)(X + X) -> (T1)X + (T1)X. */
392 if (TYPE_OVERFLOW_UNDEFINED (itype
)
393 && (TREE_CODE (op1
) == INTEGER_CST
394 || (icode
== PLUS_EXPR
&& operand_equal_p (op0
, op1
, 0))))
396 op0
= fold_convert (otype
, op0
);
397 op1
= fold_convert (otype
, op1
);
398 expr
= fold_build2 (icode
, otype
, op0
, op1
);
399 tree_to_aff_combination (expr
, type
, comb
);
403 /* If inner type has wrapping overflow behavior, fold conversion
405 (T1)(X - CST) -> (T1)X - (T1)CST
406 if X - CST doesn't overflow by range information. Also handle
407 (T1)(X + CST) as (T1)(X - (-CST)). */
408 if (TYPE_UNSIGNED (itype
)
409 && TYPE_OVERFLOW_WRAPS (itype
)
410 && TREE_CODE (op0
) == SSA_NAME
411 && TREE_CODE (op1
) == INTEGER_CST
412 && icode
!= MULT_EXPR
413 && get_range_info (op0
, &minv
, &maxv
) == VR_RANGE
)
415 if (icode
== PLUS_EXPR
)
416 op1
= wide_int_to_tree (itype
, -wi::to_wide (op1
));
417 if (wi::geu_p (minv
, wi::to_wide (op1
)))
419 op0
= fold_convert (otype
, op0
);
420 op1
= fold_convert (otype
, op1
);
421 expr
= fold_build2 (MINUS_EXPR
, otype
, op0
, op1
);
422 tree_to_aff_combination (expr
, type
, comb
);
432 if (poly_int_tree_p (expr
))
434 aff_combination_const (comb
, type
, wi::to_poly_widest (expr
));
441 aff_combination_elt (comb
, type
, expr
);
444 /* Creates EXPR + ELT * SCALE in TYPE. EXPR is taken from affine
448 add_elt_to_tree (tree expr
, tree type
, tree elt
, const widest_int
&scale_in
)
452 widest_int scale
= wide_int_ext_for_comb (scale_in
, type
);
454 elt
= fold_convert (type
, elt
);
460 return fold_build2 (PLUS_EXPR
, type
, expr
, elt
);
466 return fold_build1 (NEGATE_EXPR
, type
, elt
);
468 return fold_build2 (MINUS_EXPR
, type
, expr
, elt
);
472 return fold_build2 (MULT_EXPR
, type
, elt
, wide_int_to_tree (type
, scale
));
474 if (wi::neg_p (scale
))
482 elt
= fold_build2 (MULT_EXPR
, type
, elt
, wide_int_to_tree (type
, scale
));
483 return fold_build2 (code
, type
, expr
, elt
);
486 /* Makes tree from the affine combination COMB. */
489 aff_combination_to_tree (aff_tree
*comb
)
491 tree type
= comb
->type
, base
= NULL_TREE
, expr
= NULL_TREE
;
496 gcc_assert (comb
->n
== MAX_AFF_ELTS
|| comb
->rest
== NULL_TREE
);
499 if (POINTER_TYPE_P (type
))
502 if (comb
->n
> 0 && comb
->elts
[0].coef
== 1
503 && POINTER_TYPE_P (TREE_TYPE (comb
->elts
[0].val
)))
505 base
= comb
->elts
[0].val
;
510 for (; i
< comb
->n
; i
++)
511 expr
= add_elt_to_tree (expr
, type
, comb
->elts
[i
].val
, comb
->elts
[i
].coef
);
514 expr
= add_elt_to_tree (expr
, type
, comb
->rest
, 1);
516 /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
518 if (known_lt (comb
->offset
, 0))
528 expr
= add_elt_to_tree (expr
, type
, wide_int_to_tree (type
, off
), sgn
);
531 return fold_build_pointer_plus (base
, expr
);
533 return fold_convert (comb
->type
, expr
);
536 /* Copies the tree elements of COMB to ensure that they are not shared. */
539 unshare_aff_combination (aff_tree
*comb
)
543 for (i
= 0; i
< comb
->n
; i
++)
544 comb
->elts
[i
].val
= unshare_expr (comb
->elts
[i
].val
);
546 comb
->rest
= unshare_expr (comb
->rest
);
549 /* Remove M-th element from COMB. */
552 aff_combination_remove_elt (aff_tree
*comb
, unsigned m
)
556 comb
->elts
[m
] = comb
->elts
[comb
->n
];
559 comb
->elts
[comb
->n
].coef
= 1;
560 comb
->elts
[comb
->n
].val
= comb
->rest
;
561 comb
->rest
= NULL_TREE
;
566 /* Adds C * COEF * VAL to R. VAL may be NULL, in that case only
567 C * COEF is added to R. */
571 aff_combination_add_product (aff_tree
*c
, const widest_int
&coef
, tree val
,
577 for (i
= 0; i
< c
->n
; i
++)
579 aval
= c
->elts
[i
].val
;
582 type
= TREE_TYPE (aval
);
583 aval
= fold_build2 (MULT_EXPR
, type
, aval
,
584 fold_convert (type
, val
));
587 aff_combination_add_elt (r
, aval
, coef
* c
->elts
[i
].coef
);
595 type
= TREE_TYPE (aval
);
596 aval
= fold_build2 (MULT_EXPR
, type
, aval
,
597 fold_convert (type
, val
));
600 aff_combination_add_elt (r
, aval
, coef
);
605 if (c
->offset
.is_constant ())
606 /* Access coeffs[0] directly, for efficiency. */
607 aff_combination_add_elt (r
, val
, coef
* c
->offset
.coeffs
[0]);
610 /* c->offset is polynomial, so multiply VAL rather than COEF
612 tree offset
= wide_int_to_tree (TREE_TYPE (val
), c
->offset
);
613 val
= fold_build2 (MULT_EXPR
, TREE_TYPE (val
), val
, offset
);
614 aff_combination_add_elt (r
, val
, coef
);
618 aff_combination_add_cst (r
, coef
* c
->offset
);
621 /* Multiplies C1 by C2, storing the result to R */
624 aff_combination_mult (aff_tree
*c1
, aff_tree
*c2
, aff_tree
*r
)
627 gcc_assert (TYPE_PRECISION (c1
->type
) == TYPE_PRECISION (c2
->type
));
629 aff_combination_zero (r
, c1
->type
);
631 for (i
= 0; i
< c2
->n
; i
++)
632 aff_combination_add_product (c1
, c2
->elts
[i
].coef
, c2
->elts
[i
].val
, r
);
634 aff_combination_add_product (c1
, 1, c2
->rest
, r
);
635 if (c2
->offset
.is_constant ())
636 /* Access coeffs[0] directly, for efficiency. */
637 aff_combination_add_product (c1
, c2
->offset
.coeffs
[0], NULL
, r
);
640 /* c2->offset is polynomial, so do the multiplication in tree form. */
641 tree offset
= wide_int_to_tree (c2
->type
, c2
->offset
);
642 aff_combination_add_product (c1
, 1, offset
, r
);
646 /* Returns the element of COMB whose value is VAL, or NULL if no such
647 element exists. If IDX is not NULL, it is set to the index of VAL in
650 static struct aff_comb_elt
*
651 aff_combination_find_elt (aff_tree
*comb
, tree val
, unsigned *idx
)
655 for (i
= 0; i
< comb
->n
; i
++)
656 if (operand_equal_p (comb
->elts
[i
].val
, val
, 0))
661 return &comb
->elts
[i
];
667 /* Element of the cache that maps ssa name NAME to its expanded form
668 as an affine expression EXPANSION. */
670 struct name_expansion
674 /* True if the expansion for the name is just being generated. */
675 unsigned in_progress
: 1;
678 /* Expands SSA names in COMB recursively. CACHE is used to cache the
682 aff_combination_expand (aff_tree
*comb ATTRIBUTE_UNUSED
,
683 hash_map
<tree
, name_expansion
*> **cache
)
686 aff_tree to_add
, current
, curre
;
690 struct name_expansion
*exp
;
692 aff_combination_zero (&to_add
, comb
->type
);
693 for (i
= 0; i
< comb
->n
; i
++)
698 e
= comb
->elts
[i
].val
;
699 type
= TREE_TYPE (e
);
701 /* Look through some conversions. */
702 if (CONVERT_EXPR_P (e
)
703 && (TYPE_PRECISION (type
)
704 >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e
, 0)))))
705 name
= TREE_OPERAND (e
, 0);
706 if (TREE_CODE (name
) != SSA_NAME
)
708 def
= SSA_NAME_DEF_STMT (name
);
709 if (!is_gimple_assign (def
) || gimple_assign_lhs (def
) != name
)
712 code
= gimple_assign_rhs_code (def
);
714 && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code
))
715 && (get_gimple_rhs_class (code
) != GIMPLE_SINGLE_RHS
716 || !is_gimple_min_invariant (gimple_assign_rhs1 (def
))))
719 /* We do not know whether the reference retains its value at the
720 place where the expansion is used. */
721 if (TREE_CODE_CLASS (code
) == tcc_reference
)
725 *cache
= new hash_map
<tree
, name_expansion
*>;
726 name_expansion
**slot
= &(*cache
)->get_or_insert (e
);
731 exp
= XNEW (struct name_expansion
);
732 exp
->in_progress
= 1;
734 rhs
= gimple_assign_rhs_to_tree (def
);
736 rhs
= fold_convert (type
, rhs
);
738 tree_to_aff_combination_expand (rhs
, comb
->type
, ¤t
, cache
);
739 exp
->expansion
= current
;
740 exp
->in_progress
= 0;
744 /* Since we follow the definitions in the SSA form, we should not
745 enter a cycle unless we pass through a phi node. */
746 gcc_assert (!exp
->in_progress
);
747 current
= exp
->expansion
;
750 /* Accumulate the new terms to TO_ADD, so that we do not modify
751 COMB while traversing it; include the term -coef * E, to remove
753 scale
= comb
->elts
[i
].coef
;
754 aff_combination_zero (&curre
, comb
->type
);
755 aff_combination_add_elt (&curre
, e
, -scale
);
756 aff_combination_scale (¤t
, scale
);
757 aff_combination_add (&to_add
, ¤t
);
758 aff_combination_add (&to_add
, &curre
);
760 aff_combination_add (comb
, &to_add
);
763 /* Similar to tree_to_aff_combination, but follows SSA name definitions
764 and expands them recursively. CACHE is used to cache the expansions
765 of the ssa names, to avoid exponential time complexity for cases
774 tree_to_aff_combination_expand (tree expr
, tree type
, aff_tree
*comb
,
775 hash_map
<tree
, name_expansion
*> **cache
)
777 tree_to_aff_combination (expr
, type
, comb
);
778 aff_combination_expand (comb
, cache
);
781 /* Frees memory occupied by struct name_expansion in *VALUE. Callback for
782 hash_map::traverse. */
785 free_name_expansion (tree
const &, name_expansion
**value
, void *)
791 /* Frees memory allocated for the CACHE used by
792 tree_to_aff_combination_expand. */
795 free_affine_expand_cache (hash_map
<tree
, name_expansion
*> **cache
)
800 (*cache
)->traverse
<void *, free_name_expansion
> (NULL
);
805 /* If VAL != CST * DIV for any constant CST, returns false.
806 Otherwise, if *MULT_SET is true, additionally compares CST and MULT,
807 and if they are different, returns false. Finally, if neither of these
808 two cases occur, true is returned, and CST is stored to MULT and MULT_SET
812 wide_int_constant_multiple_p (const poly_widest_int
&val
,
813 const poly_widest_int
&div
,
814 bool *mult_set
, poly_widest_int
*mult
)
816 poly_widest_int rem
, cst
;
818 if (known_eq (val
, 0))
820 if (*mult_set
&& maybe_ne (*mult
, 0))
827 if (maybe_eq (div
, 0))
830 if (!multiple_p (val
, div
, &cst
))
833 if (*mult_set
&& maybe_ne (*mult
, cst
))
841 /* Returns true if VAL = X * DIV for some constant X. If this is the case,
842 X is stored to MULT. */
845 aff_combination_constant_multiple_p (aff_tree
*val
, aff_tree
*div
,
846 poly_widest_int
*mult
)
848 bool mult_set
= false;
851 if (val
->n
== 0 && known_eq (val
->offset
, 0))
856 if (val
->n
!= div
->n
)
859 if (val
->rest
|| div
->rest
)
862 if (!wide_int_constant_multiple_p (val
->offset
, div
->offset
,
866 for (i
= 0; i
< div
->n
; i
++)
868 struct aff_comb_elt
*elt
869 = aff_combination_find_elt (val
, div
->elts
[i
].val
, NULL
);
872 if (!wide_int_constant_multiple_p (elt
->coef
, div
->elts
[i
].coef
,
877 gcc_assert (mult_set
);
881 /* Prints the affine VAL to the FILE. */
884 print_aff (FILE *file
, aff_tree
*val
)
887 signop sgn
= TYPE_SIGN (val
->type
);
888 if (POINTER_TYPE_P (val
->type
))
890 fprintf (file
, "{\n type = ");
891 print_generic_expr (file
, val
->type
, TDF_VOPS
|TDF_MEMSYMS
);
892 fprintf (file
, "\n offset = ");
893 print_dec (val
->offset
, file
, sgn
);
896 fprintf (file
, "\n elements = {\n");
897 for (i
= 0; i
< val
->n
; i
++)
899 fprintf (file
, " [%d] = ", i
);
900 print_generic_expr (file
, val
->elts
[i
].val
, TDF_VOPS
|TDF_MEMSYMS
);
902 fprintf (file
, " * ");
903 print_dec (val
->elts
[i
].coef
, file
, sgn
);
905 fprintf (file
, ", \n");
907 fprintf (file
, "\n }");
911 fprintf (file
, "\n rest = ");
912 print_generic_expr (file
, val
->rest
, TDF_VOPS
|TDF_MEMSYMS
);
914 fprintf (file
, "\n}");
917 /* Prints the affine VAL to the standard error, used for debugging. */
920 debug_aff (aff_tree
*val
)
922 print_aff (stderr
, val
);
923 fprintf (stderr
, "\n");
926 /* Computes address of the reference REF in ADDR. The size of the accessed
927 location is stored to SIZE. Returns the ultimate containing object to
931 get_inner_reference_aff (tree ref
, aff_tree
*addr
, poly_widest_int
*size
)
933 poly_int64 bitsize
, bitpos
;
938 tree base
= get_inner_reference (ref
, &bitsize
, &bitpos
, &toff
, &mode
,
940 tree base_addr
= build_fold_addr_expr (base
);
942 /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT. */
944 tree_to_aff_combination (base_addr
, sizetype
, addr
);
948 tree_to_aff_combination (toff
, sizetype
, &tmp
);
949 aff_combination_add (addr
, &tmp
);
952 aff_combination_const (&tmp
, sizetype
, bits_to_bytes_round_down (bitpos
));
953 aff_combination_add (addr
, &tmp
);
955 *size
= bits_to_bytes_round_up (bitsize
);
960 /* Returns true if a region of size SIZE1 at position 0 and a region of
961 size SIZE2 at position DIFF cannot overlap. */
964 aff_comb_cannot_overlap_p (aff_tree
*diff
, const poly_widest_int
&size1
,
965 const poly_widest_int
&size2
)
967 /* Unless the difference is a constant, we fail. */
971 if (!ordered_p (diff
->offset
, 0))
974 if (maybe_lt (diff
->offset
, 0))
976 /* The second object is before the first one, we succeed if the last
977 element of the second object is before the start of the first one. */
978 return known_le (diff
->offset
+ size2
, 0);
982 /* We succeed if the second object starts after the first one ends. */
983 return known_le (size1
, diff
->offset
);