1 /* Operations with affine combinations of trees.
2 Copyright (C) 2005, 2007, 2008, 2010 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
22 #include "coretypes.h"
24 #include "tree-pretty-print.h"
25 #include "pointer-set.h"
26 #include "tree-affine.h"
31 /* Extends CST as appropriate for the affine combinations COMB. */
34 double_int_ext_for_comb (double_int cst
, aff_tree
*comb
)
36 return cst
.sext (TYPE_PRECISION (comb
->type
));
39 /* Initializes affine combination COMB so that its value is zero in TYPE. */
42 aff_combination_zero (aff_tree
*comb
, tree type
)
45 comb
->offset
= double_int_zero
;
47 comb
->rest
= NULL_TREE
;
50 /* Sets COMB to CST. */
53 aff_combination_const (aff_tree
*comb
, tree type
, double_int cst
)
55 aff_combination_zero (comb
, type
);
56 comb
->offset
= double_int_ext_for_comb (cst
, comb
);
59 /* Sets COMB to single element ELT. */
62 aff_combination_elt (aff_tree
*comb
, tree type
, tree elt
)
64 aff_combination_zero (comb
, type
);
67 comb
->elts
[0].val
= elt
;
68 comb
->elts
[0].coef
= double_int_one
;
71 /* Scales COMB by SCALE. */
74 aff_combination_scale (aff_tree
*comb
, double_int scale
)
78 scale
= double_int_ext_for_comb (scale
, comb
);
84 aff_combination_zero (comb
, comb
->type
);
89 = double_int_ext_for_comb (scale
* comb
->offset
, comb
);
90 for (i
= 0, j
= 0; i
< comb
->n
; i
++)
95 = double_int_ext_for_comb (scale
* comb
->elts
[i
].coef
, comb
);
96 /* A coefficient may become zero due to overflow. Remove the zero
98 if (new_coef
.is_zero ())
100 comb
->elts
[j
].coef
= new_coef
;
101 comb
->elts
[j
].val
= comb
->elts
[i
].val
;
108 tree type
= comb
->type
;
109 if (POINTER_TYPE_P (type
))
111 if (comb
->n
< MAX_AFF_ELTS
)
113 comb
->elts
[comb
->n
].coef
= scale
;
114 comb
->elts
[comb
->n
].val
= comb
->rest
;
115 comb
->rest
= NULL_TREE
;
119 comb
->rest
= fold_build2 (MULT_EXPR
, type
, comb
->rest
,
120 double_int_to_tree (type
, scale
));
124 /* Adds ELT * SCALE to COMB. */
127 aff_combination_add_elt (aff_tree
*comb
, tree elt
, double_int scale
)
132 scale
= double_int_ext_for_comb (scale
, comb
);
133 if (scale
.is_zero ())
136 for (i
= 0; i
< comb
->n
; i
++)
137 if (operand_equal_p (comb
->elts
[i
].val
, elt
, 0))
141 new_coef
= comb
->elts
[i
].coef
+ scale
;
142 new_coef
= double_int_ext_for_comb (new_coef
, comb
);
143 if (!new_coef
.is_zero ())
145 comb
->elts
[i
].coef
= new_coef
;
150 comb
->elts
[i
] = comb
->elts
[comb
->n
];
154 gcc_assert (comb
->n
== MAX_AFF_ELTS
- 1);
155 comb
->elts
[comb
->n
].coef
= double_int_one
;
156 comb
->elts
[comb
->n
].val
= comb
->rest
;
157 comb
->rest
= NULL_TREE
;
162 if (comb
->n
< MAX_AFF_ELTS
)
164 comb
->elts
[comb
->n
].coef
= scale
;
165 comb
->elts
[comb
->n
].val
= elt
;
171 if (POINTER_TYPE_P (type
))
175 elt
= fold_convert (type
, elt
);
177 elt
= fold_build2 (MULT_EXPR
, type
,
178 fold_convert (type
, elt
),
179 double_int_to_tree (type
, scale
));
182 comb
->rest
= fold_build2 (PLUS_EXPR
, type
, comb
->rest
,
191 aff_combination_add_cst (aff_tree
*c
, double_int cst
)
193 c
->offset
= double_int_ext_for_comb (c
->offset
+ cst
, c
);
196 /* Adds COMB2 to COMB1. */
199 aff_combination_add (aff_tree
*comb1
, aff_tree
*comb2
)
203 aff_combination_add_cst (comb1
, comb2
->offset
);
204 for (i
= 0; i
< comb2
->n
; i
++)
205 aff_combination_add_elt (comb1
, comb2
->elts
[i
].val
, comb2
->elts
[i
].coef
);
207 aff_combination_add_elt (comb1
, comb2
->rest
, double_int_one
);
210 /* Converts affine combination COMB to TYPE. */
213 aff_combination_convert (aff_tree
*comb
, tree type
)
216 tree comb_type
= comb
->type
;
218 if (TYPE_PRECISION (type
) > TYPE_PRECISION (comb_type
))
220 tree val
= fold_convert (type
, aff_combination_to_tree (comb
));
221 tree_to_aff_combination (val
, type
, comb
);
226 if (comb
->rest
&& !POINTER_TYPE_P (type
))
227 comb
->rest
= fold_convert (type
, comb
->rest
);
229 if (TYPE_PRECISION (type
) == TYPE_PRECISION (comb_type
))
232 comb
->offset
= double_int_ext_for_comb (comb
->offset
, comb
);
233 for (i
= j
= 0; i
< comb
->n
; i
++)
235 double_int new_coef
= double_int_ext_for_comb (comb
->elts
[i
].coef
, comb
);
236 if (new_coef
.is_zero ())
238 comb
->elts
[j
].coef
= new_coef
;
239 comb
->elts
[j
].val
= fold_convert (type
, comb
->elts
[i
].val
);
244 if (comb
->n
< MAX_AFF_ELTS
&& comb
->rest
)
246 comb
->elts
[comb
->n
].coef
= double_int_one
;
247 comb
->elts
[comb
->n
].val
= comb
->rest
;
248 comb
->rest
= NULL_TREE
;
253 /* Splits EXPR into an affine combination of parts. */
256 tree_to_aff_combination (tree expr
, tree type
, aff_tree
*comb
)
260 tree cst
, core
, toffset
;
261 HOST_WIDE_INT bitpos
, bitsize
;
262 enum machine_mode mode
;
263 int unsignedp
, volatilep
;
267 code
= TREE_CODE (expr
);
271 aff_combination_const (comb
, type
, tree_to_double_int (expr
));
274 case POINTER_PLUS_EXPR
:
275 tree_to_aff_combination (TREE_OPERAND (expr
, 0), type
, comb
);
276 tree_to_aff_combination (TREE_OPERAND (expr
, 1), sizetype
, &tmp
);
277 aff_combination_add (comb
, &tmp
);
282 tree_to_aff_combination (TREE_OPERAND (expr
, 0), type
, comb
);
283 tree_to_aff_combination (TREE_OPERAND (expr
, 1), type
, &tmp
);
284 if (code
== MINUS_EXPR
)
285 aff_combination_scale (&tmp
, double_int_minus_one
);
286 aff_combination_add (comb
, &tmp
);
290 cst
= TREE_OPERAND (expr
, 1);
291 if (TREE_CODE (cst
) != INTEGER_CST
)
293 tree_to_aff_combination (TREE_OPERAND (expr
, 0), type
, comb
);
294 aff_combination_scale (comb
, tree_to_double_int (cst
));
298 tree_to_aff_combination (TREE_OPERAND (expr
, 0), type
, comb
);
299 aff_combination_scale (comb
, double_int_minus_one
);
304 tree_to_aff_combination (TREE_OPERAND (expr
, 0), type
, comb
);
305 aff_combination_scale (comb
, double_int_minus_one
);
306 aff_combination_add_cst (comb
, double_int_minus_one
);
310 /* Handle &MEM[ptr + CST] which is equivalent to POINTER_PLUS_EXPR. */
311 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == MEM_REF
)
313 expr
= TREE_OPERAND (expr
, 0);
314 tree_to_aff_combination (TREE_OPERAND (expr
, 0), type
, comb
);
315 tree_to_aff_combination (TREE_OPERAND (expr
, 1), sizetype
, &tmp
);
316 aff_combination_add (comb
, &tmp
);
319 core
= get_inner_reference (TREE_OPERAND (expr
, 0), &bitsize
, &bitpos
,
320 &toffset
, &mode
, &unsignedp
, &volatilep
,
322 if (bitpos
% BITS_PER_UNIT
!= 0)
324 aff_combination_const (comb
, type
,
325 double_int::from_uhwi (bitpos
/ BITS_PER_UNIT
));
326 core
= build_fold_addr_expr (core
);
327 if (TREE_CODE (core
) == ADDR_EXPR
)
328 aff_combination_add_elt (comb
, core
, double_int_one
);
331 tree_to_aff_combination (core
, type
, &tmp
);
332 aff_combination_add (comb
, &tmp
);
336 tree_to_aff_combination (toffset
, type
, &tmp
);
337 aff_combination_add (comb
, &tmp
);
342 if (TREE_CODE (TREE_OPERAND (expr
, 0)) == ADDR_EXPR
)
343 tree_to_aff_combination (TREE_OPERAND (TREE_OPERAND (expr
, 0), 0),
345 else if (integer_zerop (TREE_OPERAND (expr
, 1)))
347 aff_combination_elt (comb
, type
, expr
);
351 aff_combination_elt (comb
, type
,
352 build2 (MEM_REF
, TREE_TYPE (expr
),
353 TREE_OPERAND (expr
, 0),
355 (TREE_TYPE (TREE_OPERAND (expr
, 1)), 0)));
356 tree_to_aff_combination (TREE_OPERAND (expr
, 1), sizetype
, &tmp
);
357 aff_combination_add (comb
, &tmp
);
364 aff_combination_elt (comb
, type
, expr
);
367 /* Creates EXPR + ELT * SCALE in TYPE. EXPR is taken from affine
371 add_elt_to_tree (tree expr
, tree type
, tree elt
, double_int scale
,
376 if (POINTER_TYPE_P (type
))
379 scale
= double_int_ext_for_comb (scale
, comb
);
380 elt
= fold_convert (type1
, elt
);
385 return fold_convert (type
, elt
);
387 if (POINTER_TYPE_P (type
))
388 return fold_build_pointer_plus (expr
, elt
);
389 return fold_build2 (PLUS_EXPR
, type
, expr
, elt
);
392 if (scale
.is_minus_one ())
395 return fold_convert (type
, fold_build1 (NEGATE_EXPR
, type1
, elt
));
397 if (POINTER_TYPE_P (type
))
399 elt
= fold_build1 (NEGATE_EXPR
, type1
, elt
);
400 return fold_build_pointer_plus (expr
, elt
);
402 return fold_build2 (MINUS_EXPR
, type
, expr
, elt
);
406 return fold_convert (type
,
407 fold_build2 (MULT_EXPR
, type1
, elt
,
408 double_int_to_tree (type1
, scale
)));
410 if (scale
.is_negative ())
418 elt
= fold_build2 (MULT_EXPR
, type1
, elt
,
419 double_int_to_tree (type1
, scale
));
420 if (POINTER_TYPE_P (type
))
422 if (code
== MINUS_EXPR
)
423 elt
= fold_build1 (NEGATE_EXPR
, type1
, elt
);
424 return fold_build_pointer_plus (expr
, elt
);
426 return fold_build2 (code
, type
, expr
, elt
);
429 /* Makes tree from the affine combination COMB. */
432 aff_combination_to_tree (aff_tree
*comb
)
434 tree type
= comb
->type
;
435 tree expr
= NULL_TREE
;
439 if (POINTER_TYPE_P (type
))
442 gcc_assert (comb
->n
== MAX_AFF_ELTS
|| comb
->rest
== NULL_TREE
);
444 for (i
= 0; i
< comb
->n
; i
++)
445 expr
= add_elt_to_tree (expr
, type
, comb
->elts
[i
].val
, comb
->elts
[i
].coef
,
449 expr
= add_elt_to_tree (expr
, type
, comb
->rest
, double_int_one
, comb
);
451 /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
453 if (comb
->offset
.is_negative ())
456 sgn
= double_int_minus_one
;
461 sgn
= double_int_one
;
463 return add_elt_to_tree (expr
, type
, double_int_to_tree (type1
, off
), sgn
,
467 /* Copies the tree elements of COMB to ensure that they are not shared. */
470 unshare_aff_combination (aff_tree
*comb
)
474 for (i
= 0; i
< comb
->n
; i
++)
475 comb
->elts
[i
].val
= unshare_expr (comb
->elts
[i
].val
);
477 comb
->rest
= unshare_expr (comb
->rest
);
480 /* Remove M-th element from COMB. */
483 aff_combination_remove_elt (aff_tree
*comb
, unsigned m
)
487 comb
->elts
[m
] = comb
->elts
[comb
->n
];
490 comb
->elts
[comb
->n
].coef
= double_int_one
;
491 comb
->elts
[comb
->n
].val
= comb
->rest
;
492 comb
->rest
= NULL_TREE
;
497 /* Adds C * COEF * VAL to R. VAL may be NULL, in that case only
498 C * COEF is added to R. */
502 aff_combination_add_product (aff_tree
*c
, double_int coef
, tree val
,
508 for (i
= 0; i
< c
->n
; i
++)
510 aval
= c
->elts
[i
].val
;
513 type
= TREE_TYPE (aval
);
514 aval
= fold_build2 (MULT_EXPR
, type
, aval
,
515 fold_convert (type
, val
));
518 aff_combination_add_elt (r
, aval
, coef
* c
->elts
[i
].coef
);
526 type
= TREE_TYPE (aval
);
527 aval
= fold_build2 (MULT_EXPR
, type
, aval
,
528 fold_convert (type
, val
));
531 aff_combination_add_elt (r
, aval
, coef
);
535 aff_combination_add_elt (r
, val
, coef
* c
->offset
);
537 aff_combination_add_cst (r
, coef
* c
->offset
);
540 /* Multiplies C1 by C2, storing the result to R */
543 aff_combination_mult (aff_tree
*c1
, aff_tree
*c2
, aff_tree
*r
)
546 gcc_assert (TYPE_PRECISION (c1
->type
) == TYPE_PRECISION (c2
->type
));
548 aff_combination_zero (r
, c1
->type
);
550 for (i
= 0; i
< c2
->n
; i
++)
551 aff_combination_add_product (c1
, c2
->elts
[i
].coef
, c2
->elts
[i
].val
, r
);
553 aff_combination_add_product (c1
, double_int_one
, c2
->rest
, r
);
554 aff_combination_add_product (c1
, c2
->offset
, NULL
, r
);
557 /* Returns the element of COMB whose value is VAL, or NULL if no such
558 element exists. If IDX is not NULL, it is set to the index of VAL in
561 static struct aff_comb_elt
*
562 aff_combination_find_elt (aff_tree
*comb
, tree val
, unsigned *idx
)
566 for (i
= 0; i
< comb
->n
; i
++)
567 if (operand_equal_p (comb
->elts
[i
].val
, val
, 0))
572 return &comb
->elts
[i
];
578 /* Element of the cache that maps ssa name NAME to its expanded form
579 as an affine expression EXPANSION. */
581 struct name_expansion
585 /* True if the expansion for the name is just being generated. */
586 unsigned in_progress
: 1;
589 /* Expands SSA names in COMB recursively. CACHE is used to cache the
593 aff_combination_expand (aff_tree
*comb ATTRIBUTE_UNUSED
,
594 struct pointer_map_t
**cache ATTRIBUTE_UNUSED
)
597 aff_tree to_add
, current
, curre
;
602 struct name_expansion
*exp
;
604 aff_combination_zero (&to_add
, comb
->type
);
605 for (i
= 0; i
< comb
->n
; i
++)
610 e
= comb
->elts
[i
].val
;
611 type
= TREE_TYPE (e
);
613 /* Look through some conversions. */
614 if (TREE_CODE (e
) == NOP_EXPR
615 && (TYPE_PRECISION (type
)
616 >= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (e
, 0)))))
617 name
= TREE_OPERAND (e
, 0);
618 if (TREE_CODE (name
) != SSA_NAME
)
620 def
= SSA_NAME_DEF_STMT (name
);
621 if (!is_gimple_assign (def
) || gimple_assign_lhs (def
) != name
)
624 code
= gimple_assign_rhs_code (def
);
626 && !IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code
))
627 && (get_gimple_rhs_class (code
) != GIMPLE_SINGLE_RHS
628 || !is_gimple_min_invariant (gimple_assign_rhs1 (def
))))
631 /* We do not know whether the reference retains its value at the
632 place where the expansion is used. */
633 if (TREE_CODE_CLASS (code
) == tcc_reference
)
637 *cache
= pointer_map_create ();
638 slot
= pointer_map_insert (*cache
, e
);
639 exp
= (struct name_expansion
*) *slot
;
643 exp
= XNEW (struct name_expansion
);
644 exp
->in_progress
= 1;
646 /* In principle this is a generally valid folding, but
647 it is not unconditionally an optimization, so do it
648 here and not in fold_unary. */
649 /* Convert (T1)(X *+- CST) into (T1)X *+- (T1)CST if T1 is wider
650 than the type of X and overflow for the type of X is
653 && INTEGRAL_TYPE_P (type
)
654 && INTEGRAL_TYPE_P (TREE_TYPE (name
))
655 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (name
))
656 && TYPE_PRECISION (type
) > TYPE_PRECISION (TREE_TYPE (name
))
657 && (code
== PLUS_EXPR
|| code
== MINUS_EXPR
|| code
== MULT_EXPR
)
658 && TREE_CODE (gimple_assign_rhs2 (def
)) == INTEGER_CST
)
659 rhs
= fold_build2 (code
, type
,
660 fold_convert (type
, gimple_assign_rhs1 (def
)),
661 fold_convert (type
, gimple_assign_rhs2 (def
)));
664 rhs
= gimple_assign_rhs_to_tree (def
);
666 rhs
= fold_convert (type
, rhs
);
668 tree_to_aff_combination_expand (rhs
, comb
->type
, ¤t
, cache
);
669 exp
->expansion
= current
;
670 exp
->in_progress
= 0;
674 /* Since we follow the definitions in the SSA form, we should not
675 enter a cycle unless we pass through a phi node. */
676 gcc_assert (!exp
->in_progress
);
677 current
= exp
->expansion
;
680 /* Accumulate the new terms to TO_ADD, so that we do not modify
681 COMB while traversing it; include the term -coef * E, to remove
683 scale
= comb
->elts
[i
].coef
;
684 aff_combination_zero (&curre
, comb
->type
);
685 aff_combination_add_elt (&curre
, e
, -scale
);
686 aff_combination_scale (¤t
, scale
);
687 aff_combination_add (&to_add
, ¤t
);
688 aff_combination_add (&to_add
, &curre
);
690 aff_combination_add (comb
, &to_add
);
693 /* Similar to tree_to_aff_combination, but follows SSA name definitions
694 and expands them recursively. CACHE is used to cache the expansions
695 of the ssa names, to avoid exponential time complexity for cases
704 tree_to_aff_combination_expand (tree expr
, tree type
, aff_tree
*comb
,
705 struct pointer_map_t
**cache
)
707 tree_to_aff_combination (expr
, type
, comb
);
708 aff_combination_expand (comb
, cache
);
711 /* Frees memory occupied by struct name_expansion in *VALUE. Callback for
712 pointer_map_traverse. */
715 free_name_expansion (const void *key ATTRIBUTE_UNUSED
, void **value
,
716 void *data ATTRIBUTE_UNUSED
)
718 struct name_expansion
*const exp
= (struct name_expansion
*) *value
;
724 /* Frees memory allocated for the CACHE used by
725 tree_to_aff_combination_expand. */
728 free_affine_expand_cache (struct pointer_map_t
**cache
)
733 pointer_map_traverse (*cache
, free_name_expansion
, NULL
);
734 pointer_map_destroy (*cache
);
738 /* If VAL != CST * DIV for any constant CST, returns false.
739 Otherwise, if VAL != 0 (and hence CST != 0), and *MULT_SET is true,
740 additionally compares CST and MULT, and if they are different,
741 returns false. Finally, if neither of these two cases occur,
742 true is returned, and if CST != 0, CST is stored to MULT and
743 MULT_SET is set to true. */
746 double_int_constant_multiple_p (double_int val
, double_int div
,
747 bool *mult_set
, double_int
*mult
)
757 cst
= val
.sdivmod (div
, FLOOR_DIV_EXPR
, &rem
);
761 if (*mult_set
&& *mult
!= cst
)
769 /* Returns true if VAL = X * DIV for some constant X. If this is the case,
770 X is stored to MULT. */
773 aff_combination_constant_multiple_p (aff_tree
*val
, aff_tree
*div
,
776 bool mult_set
= false;
779 if (val
->n
== 0 && val
->offset
.is_zero ())
781 *mult
= double_int_zero
;
784 if (val
->n
!= div
->n
)
787 if (val
->rest
|| div
->rest
)
790 if (!double_int_constant_multiple_p (val
->offset
, div
->offset
,
794 for (i
= 0; i
< div
->n
; i
++)
796 struct aff_comb_elt
*elt
797 = aff_combination_find_elt (val
, div
->elts
[i
].val
, NULL
);
800 if (!double_int_constant_multiple_p (elt
->coef
, div
->elts
[i
].coef
,
805 gcc_assert (mult_set
);
809 /* Prints the affine VAL to the FILE. */
812 print_aff (FILE *file
, aff_tree
*val
)
815 bool uns
= TYPE_UNSIGNED (val
->type
);
816 if (POINTER_TYPE_P (val
->type
))
818 fprintf (file
, "{\n type = ");
819 print_generic_expr (file
, val
->type
, TDF_VOPS
|TDF_MEMSYMS
);
820 fprintf (file
, "\n offset = ");
821 dump_double_int (file
, val
->offset
, uns
);
824 fprintf (file
, "\n elements = {\n");
825 for (i
= 0; i
< val
->n
; i
++)
827 fprintf (file
, " [%d] = ", i
);
828 print_generic_expr (file
, val
->elts
[i
].val
, TDF_VOPS
|TDF_MEMSYMS
);
830 fprintf (file
, " * ");
831 dump_double_int (file
, val
->elts
[i
].coef
, uns
);
833 fprintf (file
, ", \n");
835 fprintf (file
, "\n }");
839 fprintf (file
, "\n rest = ");
840 print_generic_expr (file
, val
->rest
, TDF_VOPS
|TDF_MEMSYMS
);
842 fprintf (file
, "\n}");
845 /* Prints the affine VAL to the standard error, used for debugging. */
848 debug_aff (aff_tree
*val
)
850 print_aff (stderr
, val
);
851 fprintf (stderr
, "\n");
854 /* Returns address of the reference REF in ADDR. The size of the accessed
855 location is stored to SIZE. */
858 get_inner_reference_aff (tree ref
, aff_tree
*addr
, double_int
*size
)
860 HOST_WIDE_INT bitsize
, bitpos
;
862 enum machine_mode mode
;
865 tree base
= get_inner_reference (ref
, &bitsize
, &bitpos
, &toff
, &mode
,
867 tree base_addr
= build_fold_addr_expr (base
);
869 /* ADDR = &BASE + TOFF + BITPOS / BITS_PER_UNIT. */
871 tree_to_aff_combination (base_addr
, sizetype
, addr
);
875 tree_to_aff_combination (toff
, sizetype
, &tmp
);
876 aff_combination_add (addr
, &tmp
);
879 aff_combination_const (&tmp
, sizetype
,
880 double_int::from_shwi (bitpos
/ BITS_PER_UNIT
));
881 aff_combination_add (addr
, &tmp
);
883 *size
= double_int::from_shwi ((bitsize
+ BITS_PER_UNIT
- 1) / BITS_PER_UNIT
);
886 /* Returns true if a region of size SIZE1 at position 0 and a region of
887 size SIZE2 at position DIFF cannot overlap. */
890 aff_comb_cannot_overlap_p (aff_tree
*diff
, double_int size1
, double_int size2
)
894 /* Unless the difference is a constant, we fail. */
899 if (d
.is_negative ())
901 /* The second object is before the first one, we succeed if the last
902 element of the second object is before the start of the first one. */
903 bound
= d
+ size2
+ double_int_minus_one
;
904 return bound
.is_negative ();
908 /* We succeed if the second object starts after the first one ends. */
909 return size1
.sle (d
);