1 /* Data structure for the modref pass.
2 Copyright (C) 2020-2021 Free Software Foundation, Inc.
3 Contributed by David Cepelik and Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* modref_tree represent a decision tree that can be used by alias analysis
22 oracle to determine whether given memory access can be affected by a function
23 call. For every function we collect two trees, one for loads and other
24 for stores. Tree consist of following levels:
26 1) Base: this level represent base alias set of the access and refers
27 to sons (ref nodes). Flag all_refs means that all possible references
30 Because for LTO streaming we need to stream types rather than alias sets
31 modref_base_node is implemented as a template.
32 2) Ref: this level represent ref alias set and links to accesses unless
34 Again ref is an template to allow LTO streaming.
35 3) Access: this level represent info about individual accesses. Presently
36 we record whether access is through a dereference of a function parameter
37 and if so we record the access range.
40 #ifndef GCC_MODREF_TREE_H
41 #define GCC_MODREF_TREE_H
43 struct ipa_modref_summary
;
46 struct GTY(()) modref_access_node
49 /* Access range information (in bits). */
54 /* Offset from parameter pointer to the base of the access (in bytes). */
55 poly_int64 parm_offset
;
57 /* Index of parameter which specifies the base of access. -1 if base is not
58 a function parameter. */
60 bool parm_offset_known
;
61 /* Number of times interval was extended during dataflow.
62 This has to be limited in order to keep dataflow finite. */
63 unsigned char adjustments
;
65 /* Return true if access node holds no useful info. */
66 bool useful_p () const
68 return parm_index
!= -1;
70 /* Return true if range info is useful. */
71 bool range_info_useful_p () const
73 return parm_index
!= -1 && parm_offset_known
74 && (known_size_p (size
)
75 || known_size_p (max_size
)
76 || known_ge (offset
, 0));
78 /* Return true if both accesses are the same. */
79 bool operator == (modref_access_node
&a
) const
81 if (parm_index
!= a
.parm_index
)
85 if (parm_offset_known
!= a
.parm_offset_known
)
88 && !known_eq (parm_offset
, a
.parm_offset
))
91 if (range_info_useful_p () != a
.range_info_useful_p ())
93 if (range_info_useful_p ()
94 && (!known_eq (a
.offset
, offset
)
95 || !known_eq (a
.size
, size
)
96 || !known_eq (a
.max_size
, max_size
)))
100 /* Return true A is a subaccess. */
101 bool contains (const modref_access_node
&a
) const
103 poly_int64 aoffset_adj
= 0;
106 if (parm_index
!= a
.parm_index
)
108 if (parm_offset_known
)
110 if (!a
.parm_offset_known
)
112 /* Accesses are never below parm_offset, so look
113 for smaller offset. */
114 if (!known_le (parm_offset
, a
.parm_offset
))
116 aoffset_adj
= (a
.parm_offset
- parm_offset
)
117 << LOG2_BITS_PER_UNIT
;
120 if (range_info_useful_p ())
122 if (!a
.range_info_useful_p ())
124 /* Sizes of stores are used to check that object is big enough
125 to fit the store, so smaller or unknown sotre is more general
127 if (known_size_p (size
)
128 && (!known_size_p (a
.size
)
129 || !known_le (size
, a
.size
)))
131 if (known_size_p (max_size
))
132 return known_subrange_p (a
.offset
+ aoffset_adj
,
133 a
.max_size
, offset
, max_size
);
135 return known_le (offset
, a
.offset
+ aoffset_adj
);
139 /* Update access range to new parameters.
140 If RECORD_ADJUSTMENTS is true, record number of changes in the access
141 and if threshold is exceeded start dropping precision
142 so only constantly many updates are possible. This makes dataflow
144 void update (poly_int64 parm_offset1
,
145 poly_int64 offset1
, poly_int64 size1
, poly_int64 max_size1
,
146 bool record_adjustments
)
148 if (known_eq (offset
, offset1
)
149 && known_eq (size
, size1
)
150 && known_eq (max_size
, max_size1
))
152 if (!record_adjustments
153 || (++adjustments
) < param_modref_max_adjustments
)
155 parm_offset
= parm_offset1
;
158 max_size
= max_size1
;
164 "--param param=modref-max-adjustments limit reached:");
165 if (!known_eq (parm_offset
, parm_offset1
))
168 fprintf (dump_file
, " parm_offset cleared");
169 parm_offset_known
= false;
171 if (!known_eq (size
, size1
))
175 fprintf (dump_file
, " size cleared");
177 if (!known_eq (max_size
, max_size1
))
181 fprintf (dump_file
, " max_size cleared");
183 if (!known_eq (offset
, offset1
))
187 fprintf (dump_file
, " offset cleared");
190 fprintf (dump_file
, "\n");
193 /* Merge in access A if it is possible to do without losing
194 precision. Return true if successful.
195 If RECORD_ADJUSTMENTs is true, remember how many interval
196 was prolonged and punt when there are too many. */
197 bool merge (const modref_access_node
&a
, bool record_adjustments
)
199 poly_int64 offset1
= 0;
200 poly_int64 aoffset1
= 0;
201 poly_int64 new_parm_offset
= 0;
203 /* We assume that containment was tested earlier. */
204 gcc_checking_assert (!contains (a
) && !a
.contains (*this));
207 if (parm_index
!= a
.parm_index
)
209 if (parm_offset_known
)
211 if (!a
.parm_offset_known
)
213 if (!combined_offsets (a
, &new_parm_offset
, &offset1
, &aoffset1
))
217 /* See if we can merge ranges. */
218 if (range_info_useful_p ())
220 /* In this case we have containment that should be
222 gcc_checking_assert (a
.range_info_useful_p ());
224 /* If a.size is less specified than size, merge only
225 if intervals are otherwise equivalent. */
226 if (known_size_p (size
)
227 && (!known_size_p (a
.size
) || known_lt (a
.size
, size
)))
229 if (((known_size_p (max_size
) || known_size_p (a
.max_size
))
230 && !known_eq (max_size
, a
.max_size
))
231 || !known_eq (offset1
, aoffset1
))
233 update (new_parm_offset
, offset1
, a
.size
, max_size
,
237 /* If sizes are same, we can extend the interval. */
238 if ((known_size_p (size
) || known_size_p (a
.size
))
239 && !known_eq (size
, a
.size
))
241 if (known_le (offset1
, aoffset1
))
243 if (!known_size_p (max_size
)
244 || known_ge (offset1
+ max_size
, aoffset1
))
246 update2 (new_parm_offset
, offset1
, size
, max_size
,
247 aoffset1
, a
.size
, a
.max_size
,
252 else if (known_le (aoffset1
, offset1
))
254 if (!known_size_p (a
.max_size
)
255 || known_ge (aoffset1
+ a
.max_size
, offset1
))
257 update2 (new_parm_offset
, offset1
, size
, max_size
,
258 aoffset1
, a
.size
, a
.max_size
,
265 update (new_parm_offset
, offset1
,
266 size
, max_size
, record_adjustments
);
269 /* Return true if A1 and B1 can be merged with lower informatoin
271 Assume that no containment or lossless merging is possible. */
272 static bool closer_pair_p (const modref_access_node
&a1
,
273 const modref_access_node
&b1
,
274 const modref_access_node
&a2
,
275 const modref_access_node
&b2
)
277 /* Merging different parm indexes comes to complete loss
279 if (a1
.parm_index
!= b1
.parm_index
)
281 if (a2
.parm_index
!= b2
.parm_index
)
283 /* If parm is known and parm indexes are the same we should
284 already have containment. */
285 gcc_checking_assert (a1
.parm_offset_known
&& b1
.parm_offset_known
);
286 gcc_checking_assert (a2
.parm_offset_known
&& b2
.parm_offset_known
);
288 /* First normalize offsets for parm offsets. */
289 poly_int64 new_parm_offset
, offseta1
, offsetb1
, offseta2
, offsetb2
;
290 if (!a1
.combined_offsets (b1
, &new_parm_offset
, &offseta1
, &offsetb1
)
291 || !a2
.combined_offsets (b2
, &new_parm_offset
, &offseta2
, &offsetb2
))
295 /* Now compute distnace of the intervals. */
296 poly_int64 dist1
, dist2
;
297 if (known_le (offseta1
, offsetb1
))
299 if (!known_size_p (a1
.max_size
))
302 dist1
= offsetb1
- offseta1
- a1
.max_size
;
306 if (!known_size_p (b1
.max_size
))
309 dist1
= offseta1
- offsetb1
- b1
.max_size
;
311 if (known_le (offseta2
, offsetb2
))
313 if (!known_size_p (a2
.max_size
))
316 dist2
= offsetb2
- offseta2
- a2
.max_size
;
320 if (!known_size_p (b2
.max_size
))
323 dist2
= offseta2
- offsetb2
- b2
.max_size
;
325 /* It may happen that intervals overlap in case size
326 is different. Preffer the overlap to non-overlap. */
327 if (known_lt (dist1
, 0) && known_ge (dist2
, 0))
329 if (known_lt (dist2
, 0) && known_ge (dist1
, 0))
331 if (known_lt (dist1
, 0))
332 /* If both overlaps minimize overlap. */
333 return known_le (dist2
, dist1
);
335 /* If both are disjoint look for smaller distance. */
336 return known_le (dist1
, dist2
);
339 /* Merge in access A while losing precision. */
340 void forced_merge (const modref_access_node
&a
, bool record_adjustments
)
342 if (parm_index
!= a
.parm_index
)
344 gcc_checking_assert (parm_index
!= -1);
349 /* We assume that containment and lossless merging
350 was tested earlier. */
351 gcc_checking_assert (!contains (a
) && !a
.contains (*this)
352 && !merge (a
, record_adjustments
));
353 gcc_checking_assert (parm_offset_known
&& a
.parm_offset_known
);
355 poly_int64 new_parm_offset
, offset1
, aoffset1
;
356 if (!combined_offsets (a
, &new_parm_offset
, &offset1
, &aoffset1
))
358 parm_offset_known
= false;
361 gcc_checking_assert (range_info_useful_p ()
362 && a
.range_info_useful_p ());
363 if (record_adjustments
)
364 adjustments
+= a
.adjustments
;
365 update2 (new_parm_offset
,
366 offset1
, size
, max_size
,
367 aoffset1
, a
.size
, a
.max_size
,
371 /* Merge two ranges both starting at parm_offset1 and update THIS
373 void update2 (poly_int64 parm_offset1
,
374 poly_int64 offset1
, poly_int64 size1
, poly_int64 max_size1
,
375 poly_int64 offset2
, poly_int64 size2
, poly_int64 max_size2
,
376 bool record_adjustments
)
378 poly_int64 new_size
= size1
;
380 if (!known_size_p (size2
)
381 || known_le (size2
, size1
))
384 gcc_checking_assert (known_le (size1
, size2
));
386 if (known_le (offset1
, offset2
))
388 else if (known_le (offset2
, offset1
))
390 std::swap (offset1
, offset2
);
391 std::swap (max_size1
, max_size2
);
396 poly_int64 new_max_size
;
398 if (!known_size_p (max_size1
))
399 new_max_size
= max_size1
;
400 else if (!known_size_p (max_size2
))
401 new_max_size
= max_size2
;
404 max_size2
= max_size2
+ offset2
- offset1
;
405 if (known_le (max_size
, max_size2
))
406 new_max_size
= max_size2
;
407 else if (known_le (max_size2
, max_size
))
408 new_max_size
= max_size
;
413 update (parm_offset1
, offset1
,
414 new_size
, new_max_size
, record_adjustments
);
416 /* Given access nodes THIS and A, return true if they
417 can be done with common parm_offsets. In this case
418 return parm offset in new_parm_offset, new_offset
419 which is start of range in THIS and new_aoffset that
420 is start of range in A. */
421 bool combined_offsets (const modref_access_node
&a
,
422 poly_int64
*new_parm_offset
,
423 poly_int64
*new_offset
,
424 poly_int64
*new_aoffset
) const
426 gcc_checking_assert (parm_offset_known
&& a
.parm_offset_known
);
427 if (known_le (a
.parm_offset
, parm_offset
))
430 + ((parm_offset
- a
.parm_offset
)
431 << LOG2_BITS_PER_UNIT
);
432 *new_aoffset
= a
.offset
;
433 *new_parm_offset
= a
.parm_offset
;
436 else if (known_le (parm_offset
, a
.parm_offset
))
438 *new_aoffset
= a
.offset
439 + ((a
.parm_offset
- parm_offset
)
440 << LOG2_BITS_PER_UNIT
);
441 *new_offset
= offset
;
442 *new_parm_offset
= parm_offset
;
450 /* Access node specifying no useful info. */
451 const modref_access_node unspecified_modref_access_node
452 = {0, -1, -1, 0, -1, false, 0};
454 template <typename T
>
455 struct GTY((user
)) modref_ref_node
459 vec
<modref_access_node
, va_gc
> *accesses
;
461 modref_ref_node (T ref
):
463 every_access (false),
467 /* Collapse the tree. */
475 /* Verify that list does not contain redundant accesses. */
479 modref_access_node
*a
, *a2
;
481 FOR_EACH_VEC_SAFE_ELT (accesses
, i
, a
)
483 FOR_EACH_VEC_SAFE_ELT (accesses
, i2
, a2
)
485 gcc_assert (!a
->contains (*a2
));
489 /* Insert access with OFFSET and SIZE.
490 Collapse tree if it has more than MAX_ACCESSES entries.
491 If RECORD_ADJUSTMENTs is true avoid too many interval extensions.
492 Return true if record was changed. */
493 bool insert_access (modref_access_node a
, size_t max_accesses
,
494 bool record_adjustments
)
496 /* If this base->ref pair has no access information, bail out. */
500 /* Otherwise, insert a node for the ref of the access under the base. */
502 modref_access_node
*a2
;
517 FOR_EACH_VEC_SAFE_ELT (accesses
, i
, a2
)
519 if (a2
->contains (a
))
521 if (a
.contains (*a2
))
524 a2
->parm_index
= a
.parm_index
;
525 a2
->parm_offset_known
= a
.parm_offset_known
;
526 a2
->update (a
.parm_offset
, a
.offset
, a
.size
, a
.max_size
,
531 if (a2
->merge (a
, record_adjustments
))
536 gcc_checking_assert (!(a
== *a2
));
539 /* If this base->ref pair has too many accesses stored, we will clear
540 all accesses and bail out. */
541 if (accesses
&& accesses
->length () >= max_accesses
)
543 if (max_accesses
< 2)
548 "--param param=modref-max-accesses limit reached;"
552 /* Find least harmful merge and perform it. */
553 int best1
= -1, best2
= -1;
554 FOR_EACH_VEC_SAFE_ELT (accesses
, i
, a2
)
556 for (j
= i
+ 1; j
< accesses
->length (); j
++)
558 || modref_access_node::closer_pair_p
559 (*a2
, (*accesses
)[j
],
561 best2
< 0 ? a
: (*accesses
)[best2
]))
566 if (modref_access_node::closer_pair_p
569 best2
< 0 ? a
: (*accesses
)[best2
]))
575 (*accesses
)[best1
].forced_merge (best2
< 0 ? a
: (*accesses
)[best2
],
577 if (!(*accesses
)[best1
].useful_p ())
582 "--param param=modref-max-accesses limit reached;"
586 if (dump_file
&& best2
>= 0)
588 "--param param=modref-max-accesses limit reached;"
589 " merging %i and %i\n", best1
, best2
);
592 "--param param=modref-max-accesses limit reached;"
593 " merging with %i\n", best1
);
594 try_merge_with (best1
);
596 insert_access (a
, max_accesses
, record_adjustments
);
600 vec_safe_push (accesses
, a
);
604 /* Try to optimize the access list after entry INDEX was modified. */
606 try_merge_with (size_t index
)
610 for (i
= 0; i
< accesses
->length ();)
613 bool found
= false, restart
= false;
614 modref_access_node
*a
= &(*accesses
)[i
];
615 modref_access_node
*n
= &(*accesses
)[index
];
617 if (n
->contains (*a
))
619 if (!found
&& n
->merge (*a
, false))
620 found
= restart
= true;
623 accesses
->unordered_remove (i
);
624 if (index
== accesses
->length ())
640 /* Base of an access. */
641 template <typename T
>
642 struct GTY((user
)) modref_base_node
645 vec
<modref_ref_node
<T
> *, va_gc
> *refs
;
648 modref_base_node (T base
):
653 /* Search REF; return NULL if failed. */
654 modref_ref_node
<T
> *search (T ref
)
657 modref_ref_node
<T
> *n
;
658 FOR_EACH_VEC_SAFE_ELT (refs
, i
, n
)
664 /* Insert REF; collapse tree if there are more than MAX_REFS.
665 Return inserted ref and if CHANGED is non-null set it to true if
666 something changed. */
667 modref_ref_node
<T
> *insert_ref (T ref
, size_t max_refs
,
668 bool *changed
= NULL
)
670 modref_ref_node
<T
> *ref_node
;
672 /* If the node is collapsed, don't do anything. */
676 /* Otherwise, insert a node for the ref of the access under the base. */
677 ref_node
= search (ref
);
681 /* We always allow inserting ref 0. For non-0 refs there is upper
682 limit on number of entries and if exceeded,
683 drop ref conservatively to 0. */
684 if (ref
&& refs
&& refs
->length () >= max_refs
)
687 fprintf (dump_file
, "--param param=modref-max-refs limit reached;"
690 ref_node
= search (ref
);
698 ref_node
= new (ggc_alloc
<modref_ref_node
<T
> > ())modref_ref_node
<T
>
700 vec_safe_push (refs
, ref_node
);
707 modref_ref_node
<T
> *r
;
711 FOR_EACH_VEC_SAFE_ELT (refs
, i
, r
)
723 /* Map translating parameters across function call. */
725 struct modref_parm_map
727 /* Index of parameter we translate to.
728 -1 indicates that parameter is unknown
729 -2 indicates that parameter points to local memory and access can be
732 bool parm_offset_known
;
733 poly_int64 parm_offset
;
736 /* Access tree for a single function. */
737 template <typename T
>
738 struct GTY((user
)) modref_tree
740 vec
<modref_base_node
<T
> *, va_gc
> *bases
;
746 modref_tree (size_t max_bases
, size_t max_refs
, size_t max_accesses
):
748 max_bases (max_bases
),
750 max_accesses (max_accesses
),
751 every_base (false) {}
753 /* Insert BASE; collapse tree if there are more than MAX_REFS.
754 Return inserted base and if CHANGED is non-null set it to true if
756 If table gets full, try to insert REF instead. */
758 modref_base_node
<T
> *insert_base (T base
, T ref
, bool *changed
= NULL
)
760 modref_base_node
<T
> *base_node
;
762 /* If the node is collapsed, don't do anything. */
766 /* Otherwise, insert a node for the base of the access into the tree. */
767 base_node
= search (base
);
771 /* We always allow inserting base 0. For non-0 base there is upper
772 limit on number of entries and if exceeded,
773 drop base conservatively to ref and if it still does not fit to 0. */
774 if (base
&& bases
&& bases
->length () >= max_bases
)
776 base_node
= search (ref
);
780 fprintf (dump_file
, "--param param=modref-max-bases"
781 " limit reached; using ref\n");
785 fprintf (dump_file
, "--param param=modref-max-bases"
786 " limit reached; using 0\n");
788 base_node
= search (base
);
796 base_node
= new (ggc_alloc
<modref_base_node
<T
> > ())
797 modref_base_node
<T
> (base
);
798 vec_safe_push (bases
, base_node
);
802 /* Insert memory access to the tree.
803 Return true if something changed. */
804 bool insert (T base
, T ref
, modref_access_node a
,
805 bool record_adjustments
)
810 bool changed
= false;
812 /* No useful information tracked; collapse everything. */
813 if (!base
&& !ref
&& !a
.useful_p ())
819 modref_base_node
<T
> *base_node
= insert_base (base
, ref
, &changed
);
820 base
= base_node
->base
;
821 /* If table got full we may end up with useless base. */
822 if (!base
&& !ref
&& !a
.useful_p ())
827 if (base_node
->every_ref
)
829 gcc_checking_assert (search (base
) != NULL
);
831 /* No useful ref info tracked; collapse base. */
832 if (!ref
&& !a
.useful_p ())
834 base_node
->collapse ();
838 modref_ref_node
<T
> *ref_node
= base_node
->insert_ref (ref
, max_refs
,
842 if (ref_node
->every_access
)
844 changed
|= ref_node
->insert_access (a
, max_accesses
,
846 /* See if we failed to add useful access. */
847 if (ref_node
->every_access
)
849 /* Collapse everything if there is no useful base and ref. */
853 gcc_checking_assert (changed
);
855 /* Collapse base if there is no useful ref. */
858 base_node
->collapse ();
859 gcc_checking_assert (changed
);
865 /* Remove tree branches that are not useful (i.e. they will always pass). */
870 modref_base_node
<T
> *base_node
;
871 modref_ref_node
<T
> *ref_node
;
876 for (i
= 0; vec_safe_iterate (bases
, i
, &base_node
);)
879 for (j
= 0; vec_safe_iterate (base_node
->refs
, j
, &ref_node
);)
881 if (!ref_node
->every_access
882 && (!ref_node
->accesses
883 || !ref_node
->accesses
->length ()))
885 base_node
->refs
->unordered_remove (j
);
886 vec_free (ref_node
->accesses
);
887 ggc_delete (ref_node
);
892 if (!base_node
->every_ref
893 && (!base_node
->refs
|| !base_node
->refs
->length ()))
895 bases
->unordered_remove (i
);
896 vec_free (base_node
->refs
);
897 ggc_delete (base_node
);
902 if (bases
&& !bases
->length ())
909 /* Merge OTHER into the tree.
910 PARM_MAP, if non-NULL, maps parm indexes of callee to caller. -2 is used
911 to signalize that parameter is local and does not need to be tracked.
912 Return true if something has changed. */
913 bool merge (modref_tree
<T
> *other
, vec
<modref_parm_map
> *parm_map
,
914 bool record_accesses
)
916 if (!other
|| every_base
)
918 if (other
->every_base
)
924 bool changed
= false;
926 modref_base_node
<T
> *base_node
, *my_base_node
;
927 modref_ref_node
<T
> *ref_node
;
928 modref_access_node
*access_node
;
929 bool release
= false;
931 /* For self-recursive functions we may end up merging summary into itself;
932 produce copy first so we do not modify summary under our own hands. */
936 other
= modref_tree
<T
>::create_ggc (max_bases
, max_refs
, max_accesses
);
937 other
->copy_from (this);
940 FOR_EACH_VEC_SAFE_ELT (other
->bases
, i
, base_node
)
942 if (base_node
->every_ref
)
944 my_base_node
= insert_base (base_node
->base
, 0, &changed
);
945 if (my_base_node
&& !my_base_node
->every_ref
)
947 my_base_node
->collapse ();
953 FOR_EACH_VEC_SAFE_ELT (base_node
->refs
, j
, ref_node
)
955 if (ref_node
->every_access
)
957 changed
|= insert (base_node
->base
,
959 unspecified_modref_access_node
,
963 FOR_EACH_VEC_SAFE_ELT (ref_node
->accesses
, k
, access_node
)
965 modref_access_node a
= *access_node
;
967 if (a
.parm_index
!= -1 && parm_map
)
969 if (a
.parm_index
>= (int)parm_map
->length ())
971 else if ((*parm_map
) [a
.parm_index
].parm_index
== -2)
976 += (*parm_map
) [a
.parm_index
].parm_offset
;
979 [a
.parm_index
].parm_offset_known
;
981 = (*parm_map
) [a
.parm_index
].parm_index
;
984 changed
|= insert (base_node
->base
, ref_node
->ref
, a
,
994 /* Copy OTHER to THIS. */
995 void copy_from (modref_tree
<T
> *other
)
997 merge (other
, NULL
, false);
1000 /* Search BASE in tree; return NULL if failed. */
1001 modref_base_node
<T
> *search (T base
)
1004 modref_base_node
<T
> *n
;
1005 FOR_EACH_VEC_SAFE_ELT (bases
, i
, n
)
1006 if (n
->base
== base
)
1011 /* Return ggc allocated instance. We explicitly call destructors via
1012 ggc_delete and do not want finalizers to be registered and
1013 called at the garbage collection time. */
1014 static modref_tree
<T
> *create_ggc (size_t max_bases
, size_t max_refs
,
1015 size_t max_accesses
)
1017 return new (ggc_alloc_no_dtor
<modref_tree
<T
>> ())
1018 modref_tree
<T
> (max_bases
, max_refs
, max_accesses
);
1021 /* Remove all records and mark tree to alias with everything. */
1025 modref_base_node
<T
> *n
;
1029 FOR_EACH_VEC_SAFE_ELT (bases
, i
, n
)
1040 /* Release memory. */
1046 /* Update parameter indexes in TT according to MAP. */
1048 remap_params (vec
<int> *map
)
1051 modref_base_node
<T
> *base_node
;
1052 FOR_EACH_VEC_SAFE_ELT (bases
, i
, base_node
)
1055 modref_ref_node
<T
> *ref_node
;
1056 FOR_EACH_VEC_SAFE_ELT (base_node
->refs
, j
, ref_node
)
1059 modref_access_node
*access_node
;
1060 FOR_EACH_VEC_SAFE_ELT (ref_node
->accesses
, k
, access_node
)
1061 if (access_node
->parm_index
> 0)
1063 if (access_node
->parm_index
< (int)map
->length ())
1064 access_node
->parm_index
= (*map
)[access_node
->parm_index
];
1066 access_node
->parm_index
= -1;
1073 void modref_c_tests ();
1075 void gt_ggc_mx (modref_tree
<int>* const&);
1076 void gt_ggc_mx (modref_tree
<tree_node
*>* const&);
1077 void gt_pch_nx (modref_tree
<int>* const&);
1078 void gt_pch_nx (modref_tree
<tree_node
*>* const&);
1079 void gt_pch_nx (modref_tree
<int>* const&, gt_pointer_operator op
, void *cookie
);
1080 void gt_pch_nx (modref_tree
<tree_node
*>* const&, gt_pointer_operator op
,
1083 void gt_ggc_mx (modref_base_node
<int>*);
1084 void gt_ggc_mx (modref_base_node
<tree_node
*>* &);
1085 void gt_pch_nx (modref_base_node
<int>* const&);
1086 void gt_pch_nx (modref_base_node
<tree_node
*>* const&);
1087 void gt_pch_nx (modref_base_node
<int>* const&, gt_pointer_operator op
,
1089 void gt_pch_nx (modref_base_node
<tree_node
*>* const&, gt_pointer_operator op
,
1092 void gt_ggc_mx (modref_ref_node
<int>*);
1093 void gt_ggc_mx (modref_ref_node
<tree_node
*>* &);
1094 void gt_pch_nx (modref_ref_node
<int>* const&);
1095 void gt_pch_nx (modref_ref_node
<tree_node
*>* const&);
1096 void gt_pch_nx (modref_ref_node
<int>* const&, gt_pointer_operator op
,
1098 void gt_pch_nx (modref_ref_node
<tree_node
*>* const&, gt_pointer_operator op
,