compiler: support -fgo-importcfg
[official-gcc.git] / gcc / tree-affine.cc
blobee327e63a236effd95b9de77dbeb8d28f08df824
1 /* Operations with affine combinations of trees.
2 Copyright (C) 2005-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "ssa.h"
28 #include "tree-pretty-print.h"
29 #include "fold-const.h"
30 #include "tree-affine.h"
31 #include "gimplify.h"
32 #include "dumpfile.h"
33 #include "cfgexpand.h"
34 #include "value-query.h"
36 /* Extends CST as appropriate for the affine combinations COMB. */
38 static widest_int
39 wide_int_ext_for_comb (const widest_int &cst, tree type)
41 return wi::sext (cst, TYPE_PRECISION (type));
44 /* Likewise for polynomial offsets. */
46 static poly_widest_int
47 wide_int_ext_for_comb (const poly_widest_int &cst, tree type)
49 return wi::sext (cst, TYPE_PRECISION (type));
52 /* Initializes affine combination COMB so that its value is zero in TYPE. */
54 static void
55 aff_combination_zero (aff_tree *comb, tree type)
57 int i;
58 comb->type = type;
59 comb->offset = 0;
60 comb->n = 0;
61 for (i = 0; i < MAX_AFF_ELTS; i++)
62 comb->elts[i].coef = 0;
63 comb->rest = NULL_TREE;
66 /* Sets COMB to CST. */
68 void
69 aff_combination_const (aff_tree *comb, tree type, const poly_widest_int &cst)
71 aff_combination_zero (comb, type);
72 comb->offset = wide_int_ext_for_comb (cst, comb->type);;
75 /* Sets COMB to single element ELT. */
77 void
78 aff_combination_elt (aff_tree *comb, tree type, tree elt)
80 aff_combination_zero (comb, type);
82 comb->n = 1;
83 comb->elts[0].val = elt;
84 comb->elts[0].coef = 1;
87 /* Scales COMB by SCALE. */
89 void
90 aff_combination_scale (aff_tree *comb, const widest_int &scale_in)
92 unsigned i, j;
94 widest_int scale = wide_int_ext_for_comb (scale_in, comb->type);
95 if (scale == 1)
96 return;
98 if (scale == 0)
100 aff_combination_zero (comb, comb->type);
101 return;
104 comb->offset = wide_int_ext_for_comb (scale * comb->offset, comb->type);
105 for (i = 0, j = 0; i < comb->n; i++)
107 widest_int new_coef
108 = wide_int_ext_for_comb (scale * comb->elts[i].coef, comb->type);
109 /* A coefficient may become zero due to overflow. Remove the zero
110 elements. */
111 if (new_coef == 0)
112 continue;
113 comb->elts[j].coef = new_coef;
114 comb->elts[j].val = comb->elts[i].val;
115 j++;
117 comb->n = j;
119 if (comb->rest)
121 tree type = comb->type;
122 if (POINTER_TYPE_P (type))
123 type = sizetype;
124 if (comb->n < MAX_AFF_ELTS)
126 comb->elts[comb->n].coef = scale;
127 comb->elts[comb->n].val = comb->rest;
128 comb->rest = NULL_TREE;
129 comb->n++;
131 else
132 comb->rest = fold_build2 (MULT_EXPR, type, comb->rest,
133 wide_int_to_tree (type, scale));
137 /* Adds ELT * SCALE to COMB. */
139 void
140 aff_combination_add_elt (aff_tree *comb, tree elt, const widest_int &scale_in)
142 unsigned i;
143 tree type;
145 widest_int scale = wide_int_ext_for_comb (scale_in, comb->type);
146 if (scale == 0)
147 return;
149 for (i = 0; i < comb->n; i++)
150 if (operand_equal_p (comb->elts[i].val, elt, 0))
152 widest_int new_coef
153 = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb->type);
154 if (new_coef != 0)
156 comb->elts[i].coef = new_coef;
157 return;
160 comb->n--;
161 comb->elts[i] = comb->elts[comb->n];
163 if (comb->rest)
165 gcc_assert (comb->n == MAX_AFF_ELTS - 1);
166 comb->elts[comb->n].coef = 1;
167 comb->elts[comb->n].val = comb->rest;
168 comb->rest = NULL_TREE;
169 comb->n++;
171 return;
173 if (comb->n < MAX_AFF_ELTS)
175 comb->elts[comb->n].coef = scale;
176 comb->elts[comb->n].val = elt;
177 comb->n++;
178 return;
181 type = comb->type;
182 if (POINTER_TYPE_P (type))
183 type = sizetype;
185 if (scale == 1)
186 elt = fold_convert (type, elt);
187 else
188 elt = fold_build2 (MULT_EXPR, type,
189 fold_convert (type, elt),
190 wide_int_to_tree (type, scale));
192 if (comb->rest)
193 comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest,
194 elt);
195 else
196 comb->rest = elt;
199 /* Adds CST to C. */
201 static void
202 aff_combination_add_cst (aff_tree *c, const poly_widest_int &cst)
204 c->offset = wide_int_ext_for_comb (c->offset + cst, c->type);
207 /* Adds COMB2 to COMB1. */
209 void
210 aff_combination_add (aff_tree *comb1, aff_tree *comb2)
212 unsigned i;
214 aff_combination_add_cst (comb1, comb2->offset);
215 for (i = 0; i < comb2->n; i++)
216 aff_combination_add_elt (comb1, comb2->elts[i].val, comb2->elts[i].coef);
217 if (comb2->rest)
218 aff_combination_add_elt (comb1, comb2->rest, 1);
221 /* Converts affine combination COMB to TYPE. */
223 void
224 aff_combination_convert (aff_tree *comb, tree type)
226 unsigned i, j;
227 tree comb_type = comb->type;
229 if (TYPE_PRECISION (type) > TYPE_PRECISION (comb_type))
231 tree val = fold_convert (type, aff_combination_to_tree (comb));
232 tree_to_aff_combination (val, type, comb);
233 return;
236 comb->type = type;
237 if (comb->rest && !POINTER_TYPE_P (type))
238 comb->rest = fold_convert (type, comb->rest);
240 if (TYPE_PRECISION (type) == TYPE_PRECISION (comb_type))
241 return;
243 comb->offset = wide_int_ext_for_comb (comb->offset, comb->type);
244 for (i = j = 0; i < comb->n; i++)
246 if (comb->elts[i].coef == 0)
247 continue;
248 comb->elts[j].coef = comb->elts[i].coef;
249 comb->elts[j].val = fold_convert (type, comb->elts[i].val);
250 j++;
253 comb->n = j;
254 if (comb->n < MAX_AFF_ELTS && comb->rest)
256 comb->elts[comb->n].coef = 1;
257 comb->elts[comb->n].val = comb->rest;
258 comb->rest = NULL_TREE;
259 comb->n++;
263 /* Tries to handle OP0 CODE OP1 as affine combination of parts. Returns
264 true when that was successful and returns the combination in COMB. */
266 static bool
267 expr_to_aff_combination (aff_tree *comb, tree_code code, tree type,
268 tree op0, tree op1 = NULL_TREE)
270 aff_tree tmp;
271 poly_int64 bitpos, bitsize, bytepos;
273 switch (code)
275 case POINTER_PLUS_EXPR:
276 tree_to_aff_combination (op0, type, comb);
277 tree_to_aff_combination (op1, sizetype, &tmp);
278 aff_combination_add (comb, &tmp);
279 return true;
281 case PLUS_EXPR:
282 case MINUS_EXPR:
283 tree_to_aff_combination (op0, type, comb);
284 tree_to_aff_combination (op1, type, &tmp);
285 if (code == MINUS_EXPR)
286 aff_combination_scale (&tmp, -1);
287 aff_combination_add (comb, &tmp);
288 return true;
290 case MULT_EXPR:
291 if (TREE_CODE (op1) != INTEGER_CST)
292 break;
293 tree_to_aff_combination (op0, type, comb);
294 aff_combination_scale (comb, wi::to_widest (op1));
295 return true;
297 case NEGATE_EXPR:
298 tree_to_aff_combination (op0, type, comb);
299 aff_combination_scale (comb, -1);
300 return true;
302 case BIT_NOT_EXPR:
303 /* ~x = -x - 1 */
304 tree_to_aff_combination (op0, type, comb);
305 aff_combination_scale (comb, -1);
306 aff_combination_add_cst (comb, -1);
307 return true;
309 CASE_CONVERT:
311 tree otype = type;
312 tree inner = op0;
313 tree itype = TREE_TYPE (inner);
314 enum tree_code icode = TREE_CODE (inner);
316 /* STRIP_NOPS */
317 if (tree_nop_conversion_p (otype, itype))
319 tree_to_aff_combination (op0, type, comb);
320 return true;
323 /* In principle this is a valid folding, but it isn't necessarily
324 an optimization, so do it here and not in fold_unary. */
325 if ((icode == PLUS_EXPR || icode == MINUS_EXPR || icode == MULT_EXPR)
326 && TREE_CODE (itype) == INTEGER_TYPE
327 && TREE_CODE (otype) == INTEGER_TYPE
328 && TYPE_PRECISION (otype) > TYPE_PRECISION (itype))
330 tree op0 = TREE_OPERAND (inner, 0), op1 = TREE_OPERAND (inner, 1);
332 /* If inner type has undefined overflow behavior, fold conversion
333 for below two cases:
334 (T1)(X *+- CST) -> (T1)X *+- (T1)CST
335 (T1)(X + X) -> (T1)X + (T1)X. */
336 if (TYPE_OVERFLOW_UNDEFINED (itype)
337 && (TREE_CODE (op1) == INTEGER_CST
338 || (icode == PLUS_EXPR && operand_equal_p (op0, op1, 0))))
340 op0 = fold_convert (otype, op0);
341 op1 = fold_convert (otype, op1);
342 return expr_to_aff_combination (comb, icode, otype, op0, op1);
344 wide_int minv, maxv;
345 /* If inner type has wrapping overflow behavior, fold conversion
346 for below case:
347 (T1)(X *+- CST) -> (T1)X *+- (T1)CST
348 if X *+- CST doesn't overflow by range information. */
349 value_range vr;
350 if (TYPE_UNSIGNED (itype)
351 && TYPE_OVERFLOW_WRAPS (itype)
352 && TREE_CODE (op1) == INTEGER_CST
353 && get_range_query (cfun)->range_of_expr (vr, op0)
354 && !vr.varying_p ()
355 && !vr.undefined_p ())
357 wide_int minv = vr.lower_bound ();
358 wide_int maxv = vr.upper_bound ();
359 wi::overflow_type overflow = wi::OVF_NONE;
360 signop sign = UNSIGNED;
361 if (icode == PLUS_EXPR)
362 wi::add (maxv, wi::to_wide (op1), sign, &overflow);
363 else if (icode == MULT_EXPR)
364 wi::mul (maxv, wi::to_wide (op1), sign, &overflow);
365 else
366 wi::sub (minv, wi::to_wide (op1), sign, &overflow);
368 if (overflow == wi::OVF_NONE)
370 op0 = fold_convert (otype, op0);
371 op1 = fold_convert (otype, op1);
372 return expr_to_aff_combination (comb, icode, otype, op0,
373 op1);
378 break;
380 default:;
383 return false;
386 /* Splits EXPR into an affine combination of parts. */
388 void
389 tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
391 aff_tree tmp;
392 enum tree_code code;
393 tree core, toffset;
394 poly_int64 bitpos, bitsize, bytepos;
395 machine_mode mode;
396 int unsignedp, reversep, volatilep;
398 STRIP_NOPS (expr);
400 code = TREE_CODE (expr);
401 switch (code)
403 case POINTER_PLUS_EXPR:
404 case PLUS_EXPR:
405 case MINUS_EXPR:
406 case MULT_EXPR:
407 if (expr_to_aff_combination (comb, code, type, TREE_OPERAND (expr, 0),
408 TREE_OPERAND (expr, 1)))
409 return;
410 break;
412 case NEGATE_EXPR:
413 case BIT_NOT_EXPR:
414 if (expr_to_aff_combination (comb, code, type, TREE_OPERAND (expr, 0)))
415 return;
416 break;
418 CASE_CONVERT:
419 /* ??? TREE_TYPE (expr) should be equal to type here, but IVOPTS
420 calls this with not showing an outer widening cast. */
421 if (expr_to_aff_combination (comb, code,
422 TREE_TYPE (expr), TREE_OPERAND (expr, 0)))
424 aff_combination_convert (comb, type);
425 return;
427 break;
429 case ADDR_EXPR:
430 /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR. */
431 if (TREE_CODE (TREE_OPERAND (expr, 0)) == MEM_REF)
433 expr = TREE_OPERAND (expr, 0);
434 tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
435 tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
436 aff_combination_add (comb, &tmp);
437 return;
439 core = get_inner_reference (TREE_OPERAND (expr, 0), &bitsize, &bitpos,
440 &toffset, &mode, &unsignedp, &reversep,
441 &volatilep);
442 if (!multiple_p (bitpos, BITS_PER_UNIT, &bytepos))
443 break;
444 aff_combination_const (comb, type, bytepos);
445 if (TREE_CODE (core) == MEM_REF)
447 tree mem_offset = TREE_OPERAND (core, 1);
448 aff_combination_add_cst (comb, wi::to_poly_widest (mem_offset));
449 core = TREE_OPERAND (core, 0);
451 else
452 core = build_fold_addr_expr (core);
454 if (TREE_CODE (core) == ADDR_EXPR)
455 aff_combination_add_elt (comb, core, 1);
456 else
458 tree_to_aff_combination (core, type, &tmp);
459 aff_combination_add (comb, &tmp);
461 if (toffset)
463 tree_to_aff_combination (toffset, type, &tmp);
464 aff_combination_add (comb, &tmp);
466 return;
468 default:
470 if (poly_int_tree_p (expr))
472 aff_combination_const (comb, type, wi::to_poly_widest (expr));
473 return;
475 break;
479 aff_combination_elt (comb, type, expr);
482 /* Creates EXPR + ELT * SCALE in TYPE. EXPR is taken from affine
483 combination COMB. */
485 static tree
486 add_elt_to_tree (tree expr, tree type, tree elt, const widest_int &scale_in)
488 enum tree_code code;
490 widest_int scale = wide_int_ext_for_comb (scale_in, type);
492 elt = fold_convert (type, elt);
493 if (scale == 1)
495 if (!expr)
496 return elt;
498 return fold_build2 (PLUS_EXPR, type, expr, elt);
501 if (scale == -1)
503 if (!expr)
504 return fold_build1 (NEGATE_EXPR, type, elt);
506 return fold_build2 (MINUS_EXPR, type, expr, elt);
509 if (!expr)
510 return fold_build2 (MULT_EXPR, type, elt, wide_int_to_tree (type, scale));
512 if (wi::neg_p (scale))
514 code = MINUS_EXPR;
515 scale = -scale;
517 else
518 code = PLUS_EXPR;
520 elt = fold_build2 (MULT_EXPR, type, elt, wide_int_to_tree (type, scale));
521 return fold_build2 (code, type, expr, elt);
524 /* Makes tree from the affine combination COMB. */
526 tree
527 aff_combination_to_tree (aff_tree *comb)
529 tree type = comb->type, base = NULL_TREE, expr = NULL_TREE;
530 unsigned i;
531 poly_widest_int off;
532 int sgn;
534 gcc_assert (comb->n == MAX_AFF_ELTS || comb->rest == NULL_TREE);
536 i = 0;
537 if (POINTER_TYPE_P (type))
539 type = sizetype;
540 if (comb->n > 0 && comb->elts[0].coef == 1
541 && POINTER_TYPE_P (TREE_TYPE (comb->elts[0].val)))
543 base = comb->elts[0].val;
544 ++i;
548 for (; i < comb->n; i++)
549 expr = add_elt_to_tree (expr, type, comb->elts[i].val, comb->elts[i].coef);
551 if (comb->rest)
552 expr = add_elt_to_tree (expr, type, comb->rest, 1);
554 /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
555 unsigned. */
556 if (known_lt (comb->offset, 0))
558 off = -comb->offset;
559 sgn = -1;
561 else
563 off = comb->offset;
564 sgn = 1;
566 expr = add_elt_to_tree (expr, type, wide_int_to_tree (type, off), sgn);
568 if (base)
569 return fold_build_pointer_plus (base, expr);
570 else
571 return fold_convert (comb->type, expr);
574 /* Copies the tree elements of COMB to ensure that they are not shared. */
576 void
577 unshare_aff_combination (aff_tree *comb)
579 unsigned i;
581 for (i = 0; i < comb->n; i++)
582 comb->elts[i].val = unshare_expr (comb->elts[i].val);
583 if (comb->rest)
584 comb->rest = unshare_expr (comb->rest);
587 /* Remove M-th element from COMB. */
589 void
590 aff_combination_remove_elt (aff_tree *comb, unsigned m)
592 comb->n--;
593 if (m <= comb->n)
594 comb->elts[m] = comb->elts[comb->n];
595 if (comb->rest)
597 comb->elts[comb->n].coef = 1;
598 comb->elts[comb->n].val = comb->rest;
599 comb->rest = NULL_TREE;
600 comb->n++;
604 /* Adds C * COEF * VAL to R. VAL may be NULL, in that case only
605 C * COEF is added to R. */
608 static void
609 aff_combination_add_product (aff_tree *c, const widest_int &coef, tree val,
610 aff_tree *r)
612 unsigned i;
613 tree aval, type;
615 for (i = 0; i < c->n; i++)
617 aval = c->elts[i].val;
618 if (val)
620 type = TREE_TYPE (aval);
621 aval = fold_build2 (MULT_EXPR, type, aval,
622 fold_convert (type, val));
625 aff_combination_add_elt (r, aval, coef * c->elts[i].coef);
628 if (c->rest)
630 aval = c->rest;
631 if (val)
633 type = TREE_TYPE (aval);
634 aval = fold_build2 (MULT_EXPR, type, aval,
635 fold_convert (type, val));
638 aff_combination_add_elt (r, aval, coef);
641 if (val)
643 if (c->offset.is_constant ())
644 /* Access coeffs[0] directly, for efficiency. */
645 aff_combination_add_elt (r, val, coef * c->offset.coeffs[0]);
646 else
648 /* c->offset is polynomial, so multiply VAL rather than COEF
649 by it. */
650 tree offset = wide_int_to_tree (TREE_TYPE (val), c->offset);
651 val = fold_build2 (MULT_EXPR, TREE_TYPE (val), val, offset);
652 aff_combination_add_elt (r, val, coef);
655 else
656 aff_combination_add_cst (r, coef * c->offset);
659 /* Multiplies C1 by C2, storing the result to R */
661 void
662 aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r)
664 unsigned i;
665 gcc_assert (TYPE_PRECISION (c1->type) == TYPE_PRECISION (c2->type));
667 aff_combination_zero (r, c1->type);
669 for (i = 0; i < c2->n; i++)
670 aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r);
671 if (c2->rest)
672 aff_combination_add_product (c1, 1, c2->rest, r);
673 if (c2->offset.is_constant ())
674 /* Access coeffs[0] directly, for efficiency. */
675 aff_combination_add_product (c1, c2->offset.coeffs[0], NULL, r);
676 else
678 /* c2->offset is polynomial, so do the multiplication in tree form. */
679 tree offset = wide_int_to_tree (c2->type, c2->offset);
680 aff_combination_add_product (c1, 1, offset, r);
684 /* Returns the element of COMB whose value is VAL, or NULL if no such
685 element exists. If IDX is not NULL, it is set to the index of VAL in
686 COMB. */
688 static class aff_comb_elt *
689 aff_combination_find_elt (aff_tree *comb, tree val, unsigned *idx)
691 unsigned i;
693 for (i = 0; i < comb->n; i++)
694 if (operand_equal_p (comb->elts[i].val, val, 0))
696 if (idx)
697 *idx = i;
699 return &comb->elts[i];
702 return NULL;
705 /* Element of the cache that maps ssa name NAME to its expanded form
706 as an affine expression EXPANSION. */
708 class name_expansion
710 public:
711 aff_tree expansion;
713 /* True if the expansion for the name is just being generated. */
714 unsigned in_progress : 1;
717 /* Expands SSA names in COMB recursively. CACHE is used to cache the
718 results. */
720 void
721 aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED,
722 hash_map<tree, name_expansion *> **cache)
724 unsigned i;
725 aff_tree to_add, current, curre;
726 tree e;
727 gimple *def;
728 widest_int scale;
729 class name_expansion *exp;
731 aff_combination_zero (&to_add, comb->type);
732 for (i = 0; i < comb->n; i++)
734 tree type, name;
735 enum tree_code code;
737 e = comb->elts[i].val;
738 type = TREE_TYPE (e);
739 name = e;
740 /* Look through some conversions. */
741 if (CONVERT_EXPR_P (e)
742 && (TYPE_PRECISION (type)
743 >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e, 0)))))
744 name = TREE_OPERAND (e, 0);
745 if (TREE_CODE (name) != SSA_NAME)
746 continue;
747 def = SSA_NAME_DEF_STMT (name);
748 if (!is_gimple_assign (def) || gimple_assign_lhs (def) != name)
749 continue;
751 code = gimple_assign_rhs_code (def);
752 if (code != SSA_NAME
753 && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code))
754 && (get_gimple_rhs_class (code) != GIMPLE_SINGLE_RHS
755 || !is_gimple_min_invariant (gimple_assign_rhs1 (def))))
756 continue;
758 /* We do not know whether the reference retains its value at the
759 place where the expansion is used. */
760 if (TREE_CODE_CLASS (code) == tcc_reference)
761 continue;
763 name_expansion **slot = NULL;
764 if (*cache)
765 slot = (*cache)->get (name);
766 exp = slot ? *slot : NULL;
767 if (!exp)
769 /* Only bother to handle cases tree_to_aff_combination will. */
770 switch (code)
772 case POINTER_PLUS_EXPR:
773 case PLUS_EXPR:
774 case MINUS_EXPR:
775 case MULT_EXPR:
776 if (!expr_to_aff_combination (&current, code, TREE_TYPE (name),
777 gimple_assign_rhs1 (def),
778 gimple_assign_rhs2 (def)))
779 continue;
780 break;
781 case NEGATE_EXPR:
782 case BIT_NOT_EXPR:
783 if (!expr_to_aff_combination (&current, code, TREE_TYPE (name),
784 gimple_assign_rhs1 (def)))
785 continue;
786 break;
787 CASE_CONVERT:
788 if (!expr_to_aff_combination (&current, code, TREE_TYPE (name),
789 gimple_assign_rhs1 (def)))
790 /* This makes us always expand conversions which we did
791 in the past and makes gcc.dg/tree-ssa/ivopts-lt-2.c
792 PASS, eliminating one induction variable in IVOPTs.
793 ??? But it is really excessive and we should try
794 harder to do without it. */
795 aff_combination_elt (&current, TREE_TYPE (name),
796 fold_convert (TREE_TYPE (name),
797 gimple_assign_rhs1 (def)));
798 break;
799 case ADDR_EXPR:
800 case INTEGER_CST:
801 case POLY_INT_CST:
802 tree_to_aff_combination (gimple_assign_rhs1 (def),
803 TREE_TYPE (name), &current);
804 break;
805 default:
806 continue;
808 exp = XNEW (class name_expansion);
809 exp->in_progress = 1;
810 if (!*cache)
811 *cache = new hash_map<tree, name_expansion *>;
812 (*cache)->put (name, exp);
813 aff_combination_expand (&current, cache);
814 exp->expansion = current;
815 exp->in_progress = 0;
817 else
819 /* Since we follow the definitions in the SSA form, we should not
820 enter a cycle unless we pass through a phi node. */
821 gcc_assert (!exp->in_progress);
822 current = exp->expansion;
824 if (!useless_type_conversion_p (comb->type, current.type))
825 aff_combination_convert (&current, comb->type);
827 /* Accumulate the new terms to TO_ADD, so that we do not modify
828 COMB while traversing it; include the term -coef * E, to remove
829 it from COMB. */
830 scale = comb->elts[i].coef;
831 aff_combination_zero (&curre, comb->type);
832 aff_combination_add_elt (&curre, e, -scale);
833 aff_combination_scale (&current, scale);
834 aff_combination_add (&to_add, &current);
835 aff_combination_add (&to_add, &curre);
837 aff_combination_add (comb, &to_add);
840 /* Similar to tree_to_aff_combination, but follows SSA name definitions
841 and expands them recursively. CACHE is used to cache the expansions
842 of the ssa names, to avoid exponential time complexity for cases
843 like
845 a1 = a0 + a0;
846 a2 = a1 + a1;
847 a3 = a2 + a2;
848 ... */
850 void
851 tree_to_aff_combination_expand (tree expr, tree type, aff_tree *comb,
852 hash_map<tree, name_expansion *> **cache)
854 tree_to_aff_combination (expr, type, comb);
855 aff_combination_expand (comb, cache);
858 /* Frees memory occupied by struct name_expansion in *VALUE. Callback for
859 hash_map::traverse. */
861 bool
862 free_name_expansion (tree const &, name_expansion **value, void *)
864 free (*value);
865 return true;
868 /* Frees memory allocated for the CACHE used by
869 tree_to_aff_combination_expand. */
871 void
872 free_affine_expand_cache (hash_map<tree, name_expansion *> **cache)
874 if (!*cache)
875 return;
877 (*cache)->traverse<void *, free_name_expansion> (NULL);
878 delete (*cache);
879 *cache = NULL;
882 /* If VAL != CST * DIV for any constant CST, returns false.
883 Otherwise, if *MULT_SET is true, additionally compares CST and MULT,
884 and if they are different, returns false. Finally, if neither of these
885 two cases occur, true is returned, and CST is stored to MULT and MULT_SET
886 is set to true. */
888 static bool
889 wide_int_constant_multiple_p (const poly_widest_int &val,
890 const poly_widest_int &div,
891 bool *mult_set, poly_widest_int *mult)
893 poly_widest_int rem, cst;
895 if (known_eq (val, 0))
897 if (*mult_set && maybe_ne (*mult, 0))
898 return false;
899 *mult_set = true;
900 *mult = 0;
901 return true;
904 if (maybe_eq (div, 0))
905 return false;
907 if (!multiple_p (val, div, &cst))
908 return false;
910 if (*mult_set && maybe_ne (*mult, cst))
911 return false;
913 *mult_set = true;
914 *mult = cst;
915 return true;
918 /* Returns true if VAL = X * DIV for some constant X. If this is the case,
919 X is stored to MULT. */
921 bool
922 aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
923 poly_widest_int *mult)
925 bool mult_set = false;
926 unsigned i;
928 if (val->n == 0 && known_eq (val->offset, 0))
930 *mult = 0;
931 return true;
933 if (val->n != div->n)
934 return false;
936 if (val->rest || div->rest)
937 return false;
939 if (!wide_int_constant_multiple_p (val->offset, div->offset,
940 &mult_set, mult))
941 return false;
943 for (i = 0; i < div->n; i++)
945 class aff_comb_elt *elt
946 = aff_combination_find_elt (val, div->elts[i].val, NULL);
947 if (!elt)
948 return false;
949 if (!wide_int_constant_multiple_p (elt->coef, div->elts[i].coef,
950 &mult_set, mult))
951 return false;
954 gcc_assert (mult_set);
955 return true;
958 /* Prints the affine VAL to the FILE. */
960 static void
961 print_aff (FILE *file, aff_tree *val)
963 unsigned i;
964 signop sgn = TYPE_SIGN (val->type);
965 if (POINTER_TYPE_P (val->type))
966 sgn = SIGNED;
967 fprintf (file, "{\n type = ");
968 print_generic_expr (file, val->type, TDF_VOPS|TDF_MEMSYMS);
969 fprintf (file, "\n offset = ");
970 print_dec (val->offset, file, sgn);
971 if (val->n > 0)
973 fprintf (file, "\n elements = {\n");
974 for (i = 0; i < val->n; i++)
976 fprintf (file, " [%d] = ", i);
977 print_generic_expr (file, val->elts[i].val, TDF_VOPS|TDF_MEMSYMS);
979 fprintf (file, " * ");
980 print_dec (val->elts[i].coef, file, sgn);
981 if (i != val->n - 1)
982 fprintf (file, ", \n");
984 fprintf (file, "\n }");
986 if (val->rest)
988 fprintf (file, "\n rest = ");
989 print_generic_expr (file, val->rest, TDF_VOPS|TDF_MEMSYMS);
991 fprintf (file, "\n}");
994 /* Prints the affine VAL to the standard error, used for debugging. */
996 DEBUG_FUNCTION void
997 debug_aff (aff_tree *val)
999 print_aff (stderr, val);
1000 fprintf (stderr, "\n");
1003 /* Computes address of the reference REF in ADDR. The size of the accessed
1004 location is stored to SIZE. Returns the ultimate containing object to
1005 which REF refers. */
1007 tree
1008 get_inner_reference_aff (tree ref, aff_tree *addr, poly_widest_int *size)
1010 poly_int64 bitsize, bitpos;
1011 tree toff;
1012 machine_mode mode;
1013 int uns, rev, vol;
1014 aff_tree tmp;
1015 tree base = get_inner_reference (ref, &bitsize, &bitpos, &toff, &mode,
1016 &uns, &rev, &vol);
1017 tree base_addr = build_fold_addr_expr (base);
1019 /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT. */
1021 tree_to_aff_combination (base_addr, sizetype, addr);
1023 if (toff)
1025 tree_to_aff_combination (toff, sizetype, &tmp);
1026 aff_combination_add (addr, &tmp);
1029 aff_combination_const (&tmp, sizetype, bits_to_bytes_round_down (bitpos));
1030 aff_combination_add (addr, &tmp);
1032 *size = bits_to_bytes_round_up (bitsize);
1034 return base;
1037 /* Returns true if a region of size SIZE1 at position 0 and a region of
1038 size SIZE2 at position DIFF cannot overlap. */
1040 bool
1041 aff_comb_cannot_overlap_p (aff_tree *diff, const poly_widest_int &size1,
1042 const poly_widest_int &size2)
1044 /* Unless the difference is a constant, we fail. */
1045 if (diff->n != 0)
1046 return false;
1048 if (!ordered_p (diff->offset, 0))
1049 return false;
1051 if (maybe_lt (diff->offset, 0))
1053 /* The second object is before the first one, we succeed if the last
1054 element of the second object is before the start of the first one. */
1055 return known_le (diff->offset + size2, 0);
1057 else
1059 /* We succeed if the second object starts after the first one ends. */
1060 return known_le (size1, diff->offset);