[Ada] Empty CUDA_Global procedures when compiling for host
[official-gcc.git] / gcc / ipa-modref-tree.h
blob6a9ed5ce54b8b01cd0c7732e0a458cf06393935b
1 /* Data structure for the modref pass.
2 Copyright (C) 2020-2021 Free Software Foundation, Inc.
3 Contributed by David Cepelik and Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* modref_tree represent a decision tree that can be used by alias analysis
22 oracle to determine whether given memory access can be affected by a function
23 call. For every function we collect two trees, one for loads and other
24 for stores. Tree consist of following levels:
26 1) Base: this level represent base alias set of the access and refers
27 to sons (ref nodes). Flag all_refs means that all possible references
28 are aliasing.
30 Because for LTO streaming we need to stream types rather than alias sets
31 modref_base_node is implemented as a template.
32 2) Ref: this level represent ref alias set and links to accesses unless
33 all_refs flag is set.
34 Again ref is an template to allow LTO streaming.
35 3) Access: this level represent info about individual accesses. Presently
36 we record whether access is through a dereference of a function parameter
37 and if so we record the access range.
40 #ifndef GCC_MODREF_TREE_H
41 #define GCC_MODREF_TREE_H
43 struct ipa_modref_summary;
45 /* Memory access. */
46 struct GTY(()) modref_access_node
49 /* Access range information (in bits). */
50 poly_int64 offset;
51 poly_int64 size;
52 poly_int64 max_size;
54 /* Offset from parameter pointer to the base of the access (in bytes). */
55 poly_int64 parm_offset;
57 /* Index of parameter which specifies the base of access. -1 if base is not
58 a function parameter. */
59 int parm_index;
60 bool parm_offset_known;
61 /* Number of times interval was extended during dataflow.
62 This has to be limited in order to keep dataflow finite. */
63 unsigned char adjustments;
65 /* Return true if access node holds no useful info. */
66 bool useful_p () const
68 return parm_index != -1;
70 /* Return true if range info is useful. */
71 bool range_info_useful_p () const
73 return parm_index != -1 && parm_offset_known
74 && (known_size_p (size)
75 || known_size_p (max_size)
76 || known_ge (offset, 0));
78 /* Return true if both accesses are the same. */
79 bool operator == (modref_access_node &a) const
81 if (parm_index != a.parm_index)
82 return false;
83 if (parm_index >= 0)
85 if (parm_offset_known != a.parm_offset_known)
86 return false;
87 if (parm_offset_known
88 && !known_eq (parm_offset, a.parm_offset))
89 return false;
91 if (range_info_useful_p () != a.range_info_useful_p ())
92 return false;
93 if (range_info_useful_p ()
94 && (!known_eq (a.offset, offset)
95 || !known_eq (a.size, size)
96 || !known_eq (a.max_size, max_size)))
97 return false;
98 return true;
100 /* Return true A is a subaccess. */
101 bool contains (const modref_access_node &a) const
103 poly_int64 aoffset_adj = 0;
104 if (parm_index >= 0)
106 if (parm_index != a.parm_index)
107 return false;
108 if (parm_offset_known)
110 if (!a.parm_offset_known)
111 return false;
112 /* Accesses are never below parm_offset, so look
113 for smaller offset. */
114 if (!known_le (parm_offset, a.parm_offset))
115 return false;
116 aoffset_adj = (a.parm_offset - parm_offset)
117 << LOG2_BITS_PER_UNIT;
120 if (range_info_useful_p ())
122 if (!a.range_info_useful_p ())
123 return false;
124 /* Sizes of stores are used to check that object is big enough
125 to fit the store, so smaller or unknown sotre is more general
126 than large store. */
127 if (known_size_p (size)
128 && (!known_size_p (a.size)
129 || !known_le (size, a.size)))
130 return false;
131 if (known_size_p (max_size))
132 return known_subrange_p (a.offset + aoffset_adj,
133 a.max_size, offset, max_size);
134 else
135 return known_le (offset, a.offset + aoffset_adj);
137 return true;
139 /* Update access range to new parameters.
140 If RECORD_ADJUSTMENTS is true, record number of changes in the access
141 and if threshold is exceeded start dropping precision
142 so only constantly many updates are possible. This makes dataflow
143 to converge. */
144 void update (poly_int64 parm_offset1,
145 poly_int64 offset1, poly_int64 size1, poly_int64 max_size1,
146 bool record_adjustments)
148 if (known_eq (offset, offset1)
149 && known_eq (size, size1)
150 && known_eq (max_size, max_size1))
151 return;
152 if (!record_adjustments
153 || (++adjustments) < param_modref_max_adjustments)
155 parm_offset = parm_offset1;
156 offset = offset1;
157 size = size1;
158 max_size = max_size1;
160 else
162 if (dump_file)
163 fprintf (dump_file,
164 "--param param=modref-max-adjustments limit reached:");
165 if (!known_eq (parm_offset, parm_offset1))
167 if (dump_file)
168 fprintf (dump_file, " parm_offset cleared");
169 parm_offset_known = false;
171 if (!known_eq (size, size1))
173 size = -1;
174 if (dump_file)
175 fprintf (dump_file, " size cleared");
177 if (!known_eq (max_size, max_size1))
179 max_size = -1;
180 if (dump_file)
181 fprintf (dump_file, " max_size cleared");
183 if (!known_eq (offset, offset1))
185 offset = 0;
186 if (dump_file)
187 fprintf (dump_file, " offset cleared");
189 if (dump_file)
190 fprintf (dump_file, "\n");
193 /* Merge in access A if it is possible to do without losing
194 precision. Return true if successful.
195 If RECORD_ADJUSTMENTs is true, remember how many interval
196 was prolonged and punt when there are too many. */
197 bool merge (const modref_access_node &a, bool record_adjustments)
199 poly_int64 offset1 = 0;
200 poly_int64 aoffset1 = 0;
201 poly_int64 new_parm_offset = 0;
203 /* We assume that containment was tested earlier. */
204 gcc_checking_assert (!contains (a) && !a.contains (*this));
205 if (parm_index >= 0)
207 if (parm_index != a.parm_index)
208 return false;
209 if (parm_offset_known)
211 if (!a.parm_offset_known)
212 return false;
213 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
214 return false;
217 /* See if we can merge ranges. */
218 if (range_info_useful_p ())
220 /* In this case we have containment that should be
221 handled earlier. */
222 gcc_checking_assert (a.range_info_useful_p ());
224 /* If a.size is less specified than size, merge only
225 if intervals are otherwise equivalent. */
226 if (known_size_p (size)
227 && (!known_size_p (a.size) || known_lt (a.size, size)))
229 if (((known_size_p (max_size) || known_size_p (a.max_size))
230 && !known_eq (max_size, a.max_size))
231 || !known_eq (offset1, aoffset1))
232 return false;
233 update (new_parm_offset, offset1, a.size, max_size,
234 record_adjustments);
235 return true;
237 /* If sizes are same, we can extend the interval. */
238 if ((known_size_p (size) || known_size_p (a.size))
239 && !known_eq (size, a.size))
240 return false;
241 if (known_le (offset1, aoffset1))
243 if (!known_size_p (max_size)
244 || known_ge (offset1 + max_size, aoffset1))
246 update2 (new_parm_offset, offset1, size, max_size,
247 aoffset1, a.size, a.max_size,
248 record_adjustments);
249 return true;
252 else if (known_le (aoffset1, offset1))
254 if (!known_size_p (a.max_size)
255 || known_ge (aoffset1 + a.max_size, offset1))
257 update2 (new_parm_offset, offset1, size, max_size,
258 aoffset1, a.size, a.max_size,
259 record_adjustments);
260 return true;
263 return false;
265 update (new_parm_offset, offset1,
266 size, max_size, record_adjustments);
267 return true;
269 /* Return true if A1 and B1 can be merged with lower informatoin
270 less than A2 and B2.
271 Assume that no containment or lossless merging is possible. */
272 static bool closer_pair_p (const modref_access_node &a1,
273 const modref_access_node &b1,
274 const modref_access_node &a2,
275 const modref_access_node &b2)
277 /* Merging different parm indexes comes to complete loss
278 of range info. */
279 if (a1.parm_index != b1.parm_index)
280 return false;
281 if (a2.parm_index != b2.parm_index)
282 return true;
283 /* If parm is known and parm indexes are the same we should
284 already have containment. */
285 gcc_checking_assert (a1.parm_offset_known && b1.parm_offset_known);
286 gcc_checking_assert (a2.parm_offset_known && b2.parm_offset_known);
288 /* First normalize offsets for parm offsets. */
289 poly_int64 new_parm_offset, offseta1, offsetb1, offseta2, offsetb2;
290 if (!a1.combined_offsets (b1, &new_parm_offset, &offseta1, &offsetb1)
291 || !a2.combined_offsets (b2, &new_parm_offset, &offseta2, &offsetb2))
292 gcc_unreachable ();
295 /* Now compute distnace of the intervals. */
296 poly_int64 dist1, dist2;
297 if (known_le (offseta1, offsetb1))
299 if (!known_size_p (a1.max_size))
300 dist1 = 0;
301 else
302 dist1 = offsetb1 - offseta1 - a1.max_size;
304 else
306 if (!known_size_p (b1.max_size))
307 dist1 = 0;
308 else
309 dist1 = offseta1 - offsetb1 - b1.max_size;
311 if (known_le (offseta2, offsetb2))
313 if (!known_size_p (a2.max_size))
314 dist2 = 0;
315 else
316 dist2 = offsetb2 - offseta2 - a2.max_size;
318 else
320 if (!known_size_p (b2.max_size))
321 dist2 = 0;
322 else
323 dist2 = offseta2 - offsetb2 - b2.max_size;
325 /* It may happen that intervals overlap in case size
326 is different. Preffer the overlap to non-overlap. */
327 if (known_lt (dist1, 0) && known_ge (dist2, 0))
328 return true;
329 if (known_lt (dist2, 0) && known_ge (dist1, 0))
330 return false;
331 if (known_lt (dist1, 0))
332 /* If both overlaps minimize overlap. */
333 return known_le (dist2, dist1);
334 else
335 /* If both are disjoint look for smaller distance. */
336 return known_le (dist1, dist2);
339 /* Merge in access A while losing precision. */
340 void forced_merge (const modref_access_node &a, bool record_adjustments)
342 if (parm_index != a.parm_index)
344 gcc_checking_assert (parm_index != -1);
345 parm_index = -1;
346 return;
349 /* We assume that containment and lossless merging
350 was tested earlier. */
351 gcc_checking_assert (!contains (a) && !a.contains (*this)
352 && !merge (a, record_adjustments));
353 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
355 poly_int64 new_parm_offset, offset1, aoffset1;
356 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
358 parm_offset_known = false;
359 return;
361 gcc_checking_assert (range_info_useful_p ()
362 && a.range_info_useful_p ());
363 if (record_adjustments)
364 adjustments += a.adjustments;
365 update2 (new_parm_offset,
366 offset1, size, max_size,
367 aoffset1, a.size, a.max_size,
368 record_adjustments);
370 private:
371 /* Merge two ranges both starting at parm_offset1 and update THIS
372 with result. */
373 void update2 (poly_int64 parm_offset1,
374 poly_int64 offset1, poly_int64 size1, poly_int64 max_size1,
375 poly_int64 offset2, poly_int64 size2, poly_int64 max_size2,
376 bool record_adjustments)
378 poly_int64 new_size = size1;
380 if (!known_size_p (size2)
381 || known_le (size2, size1))
382 new_size = size2;
383 else
384 gcc_checking_assert (known_le (size1, size2));
386 if (known_le (offset1, offset2))
388 else if (known_le (offset2, offset1))
390 std::swap (offset1, offset2);
391 std::swap (max_size1, max_size2);
393 else
394 gcc_unreachable ();
396 poly_int64 new_max_size;
398 if (!known_size_p (max_size1))
399 new_max_size = max_size1;
400 else if (!known_size_p (max_size2))
401 new_max_size = max_size2;
402 else
404 max_size2 = max_size2 + offset2 - offset1;
405 if (known_le (max_size, max_size2))
406 new_max_size = max_size2;
407 else if (known_le (max_size2, max_size))
408 new_max_size = max_size;
409 else
410 gcc_unreachable ();
413 update (parm_offset1, offset1,
414 new_size, new_max_size, record_adjustments);
416 /* Given access nodes THIS and A, return true if they
417 can be done with common parm_offsets. In this case
418 return parm offset in new_parm_offset, new_offset
419 which is start of range in THIS and new_aoffset that
420 is start of range in A. */
421 bool combined_offsets (const modref_access_node &a,
422 poly_int64 *new_parm_offset,
423 poly_int64 *new_offset,
424 poly_int64 *new_aoffset) const
426 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
427 if (known_le (a.parm_offset, parm_offset))
429 *new_offset = offset
430 + ((parm_offset - a.parm_offset)
431 << LOG2_BITS_PER_UNIT);
432 *new_aoffset = a.offset;
433 *new_parm_offset = a.parm_offset;
434 return true;
436 else if (known_le (parm_offset, a.parm_offset))
438 *new_aoffset = a.offset
439 + ((a.parm_offset - parm_offset)
440 << LOG2_BITS_PER_UNIT);
441 *new_offset = offset;
442 *new_parm_offset = parm_offset;
443 return true;
445 else
446 return false;
450 /* Access node specifying no useful info. */
451 const modref_access_node unspecified_modref_access_node
452 = {0, -1, -1, 0, -1, false, 0};
454 template <typename T>
455 struct GTY((user)) modref_ref_node
457 T ref;
458 bool every_access;
459 vec <modref_access_node, va_gc> *accesses;
461 modref_ref_node (T ref):
462 ref (ref),
463 every_access (false),
464 accesses (NULL)
467 /* Collapse the tree. */
468 void collapse ()
470 vec_free (accesses);
471 accesses = NULL;
472 every_access = true;
475 /* Verify that list does not contain redundant accesses. */
476 void verify ()
478 size_t i, i2;
479 modref_access_node *a, *a2;
481 FOR_EACH_VEC_SAFE_ELT (accesses, i, a)
483 FOR_EACH_VEC_SAFE_ELT (accesses, i2, a2)
484 if (i != i2)
485 gcc_assert (!a->contains (*a2));
489 /* Insert access with OFFSET and SIZE.
490 Collapse tree if it has more than MAX_ACCESSES entries.
491 If RECORD_ADJUSTMENTs is true avoid too many interval extensions.
492 Return true if record was changed. */
493 bool insert_access (modref_access_node a, size_t max_accesses,
494 bool record_adjustments)
496 /* If this base->ref pair has no access information, bail out. */
497 if (every_access)
498 return false;
500 /* Otherwise, insert a node for the ref of the access under the base. */
501 size_t i, j;
502 modref_access_node *a2;
504 if (flag_checking)
505 verify ();
507 if (!a.useful_p ())
509 if (!every_access)
511 collapse ();
512 return true;
514 return false;
517 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
519 if (a2->contains (a))
520 return false;
521 if (a.contains (*a2))
523 a.adjustments = 0;
524 a2->parm_index = a.parm_index;
525 a2->parm_offset_known = a.parm_offset_known;
526 a2->update (a.parm_offset, a.offset, a.size, a.max_size,
527 record_adjustments);
528 try_merge_with (i);
529 return true;
531 if (a2->merge (a, record_adjustments))
533 try_merge_with (i);
534 return true;
536 gcc_checking_assert (!(a == *a2));
539 /* If this base->ref pair has too many accesses stored, we will clear
540 all accesses and bail out. */
541 if (accesses && accesses->length () >= max_accesses)
543 if (max_accesses < 2)
545 collapse ();
546 if (dump_file)
547 fprintf (dump_file,
548 "--param param=modref-max-accesses limit reached;"
549 " collapsing\n");
550 return true;
552 /* Find least harmful merge and perform it. */
553 int best1 = -1, best2 = -1;
554 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
556 for (j = i + 1; j < accesses->length (); j++)
557 if (best1 < 0
558 || modref_access_node::closer_pair_p
559 (*a2, (*accesses)[j],
560 (*accesses)[best1],
561 best2 < 0 ? a : (*accesses)[best2]))
563 best1 = i;
564 best2 = j;
566 if (modref_access_node::closer_pair_p
567 (*a2, a,
568 (*accesses)[best1],
569 best2 < 0 ? a : (*accesses)[best2]))
571 best1 = i;
572 best2 = -1;
575 (*accesses)[best1].forced_merge (best2 < 0 ? a : (*accesses)[best2],
576 record_adjustments);
577 if (!(*accesses)[best1].useful_p ())
579 collapse ();
580 if (dump_file)
581 fprintf (dump_file,
582 "--param param=modref-max-accesses limit reached;"
583 " collapsing\n");
584 return true;
586 if (dump_file && best2 >= 0)
587 fprintf (dump_file,
588 "--param param=modref-max-accesses limit reached;"
589 " merging %i and %i\n", best1, best2);
590 else if (dump_file)
591 fprintf (dump_file,
592 "--param param=modref-max-accesses limit reached;"
593 " merging with %i\n", best1);
594 try_merge_with (best1);
595 if (best2 >= 0)
596 insert_access (a, max_accesses, record_adjustments);
597 return 1;
599 a.adjustments = 0;
600 vec_safe_push (accesses, a);
601 return true;
603 private:
604 /* Try to optimize the access list after entry INDEX was modified. */
605 void
606 try_merge_with (size_t index)
608 size_t i;
610 for (i = 0; i < accesses->length ();)
611 if (i != index)
613 bool found = false, restart = false;
614 modref_access_node *a = &(*accesses)[i];
615 modref_access_node *n = &(*accesses)[index];
617 if (n->contains (*a))
618 found = true;
619 if (!found && n->merge (*a, false))
620 found = restart = true;
621 if (found)
623 accesses->unordered_remove (i);
624 if (index == accesses->length ())
626 index = i;
627 i++;
629 if (restart)
630 i = 0;
632 else
633 i++;
635 else
636 i++;
640 /* Base of an access. */
641 template <typename T>
642 struct GTY((user)) modref_base_node
644 T base;
645 vec <modref_ref_node <T> *, va_gc> *refs;
646 bool every_ref;
648 modref_base_node (T base):
649 base (base),
650 refs (NULL),
651 every_ref (false) {}
653 /* Search REF; return NULL if failed. */
654 modref_ref_node <T> *search (T ref)
656 size_t i;
657 modref_ref_node <T> *n;
658 FOR_EACH_VEC_SAFE_ELT (refs, i, n)
659 if (n->ref == ref)
660 return n;
661 return NULL;
664 /* Insert REF; collapse tree if there are more than MAX_REFS.
665 Return inserted ref and if CHANGED is non-null set it to true if
666 something changed. */
667 modref_ref_node <T> *insert_ref (T ref, size_t max_refs,
668 bool *changed = NULL)
670 modref_ref_node <T> *ref_node;
672 /* If the node is collapsed, don't do anything. */
673 if (every_ref)
674 return NULL;
676 /* Otherwise, insert a node for the ref of the access under the base. */
677 ref_node = search (ref);
678 if (ref_node)
679 return ref_node;
681 /* We always allow inserting ref 0. For non-0 refs there is upper
682 limit on number of entries and if exceeded,
683 drop ref conservatively to 0. */
684 if (ref && refs && refs->length () >= max_refs)
686 if (dump_file)
687 fprintf (dump_file, "--param param=modref-max-refs limit reached;"
688 " using 0\n");
689 ref = 0;
690 ref_node = search (ref);
691 if (ref_node)
692 return ref_node;
695 if (changed)
696 *changed = true;
698 ref_node = new (ggc_alloc <modref_ref_node <T> > ())modref_ref_node <T>
699 (ref);
700 vec_safe_push (refs, ref_node);
701 return ref_node;
704 void collapse ()
706 size_t i;
707 modref_ref_node <T> *r;
709 if (refs)
711 FOR_EACH_VEC_SAFE_ELT (refs, i, r)
713 r->collapse ();
714 ggc_free (r);
716 vec_free (refs);
718 refs = NULL;
719 every_ref = true;
723 /* Map translating parameters across function call. */
725 struct modref_parm_map
727 /* Index of parameter we translate to.
728 -1 indicates that parameter is unknown
729 -2 indicates that parameter points to local memory and access can be
730 discarded. */
731 int parm_index;
732 bool parm_offset_known;
733 poly_int64 parm_offset;
736 /* Access tree for a single function. */
737 template <typename T>
738 struct GTY((user)) modref_tree
740 vec <modref_base_node <T> *, va_gc> *bases;
741 size_t max_bases;
742 size_t max_refs;
743 size_t max_accesses;
744 bool every_base;
746 modref_tree (size_t max_bases, size_t max_refs, size_t max_accesses):
747 bases (NULL),
748 max_bases (max_bases),
749 max_refs (max_refs),
750 max_accesses (max_accesses),
751 every_base (false) {}
753 /* Insert BASE; collapse tree if there are more than MAX_REFS.
754 Return inserted base and if CHANGED is non-null set it to true if
755 something changed.
756 If table gets full, try to insert REF instead. */
758 modref_base_node <T> *insert_base (T base, T ref, bool *changed = NULL)
760 modref_base_node <T> *base_node;
762 /* If the node is collapsed, don't do anything. */
763 if (every_base)
764 return NULL;
766 /* Otherwise, insert a node for the base of the access into the tree. */
767 base_node = search (base);
768 if (base_node)
769 return base_node;
771 /* We always allow inserting base 0. For non-0 base there is upper
772 limit on number of entries and if exceeded,
773 drop base conservatively to ref and if it still does not fit to 0. */
774 if (base && bases && bases->length () >= max_bases)
776 base_node = search (ref);
777 if (base_node)
779 if (dump_file)
780 fprintf (dump_file, "--param param=modref-max-bases"
781 " limit reached; using ref\n");
782 return base_node;
784 if (dump_file)
785 fprintf (dump_file, "--param param=modref-max-bases"
786 " limit reached; using 0\n");
787 base = 0;
788 base_node = search (base);
789 if (base_node)
790 return base_node;
793 if (changed)
794 *changed = true;
796 base_node = new (ggc_alloc <modref_base_node <T> > ())
797 modref_base_node <T> (base);
798 vec_safe_push (bases, base_node);
799 return base_node;
802 /* Insert memory access to the tree.
803 Return true if something changed. */
804 bool insert (T base, T ref, modref_access_node a,
805 bool record_adjustments)
807 if (every_base)
808 return false;
810 bool changed = false;
812 /* No useful information tracked; collapse everything. */
813 if (!base && !ref && !a.useful_p ())
815 collapse ();
816 return true;
819 modref_base_node <T> *base_node = insert_base (base, ref, &changed);
820 base = base_node->base;
821 /* If table got full we may end up with useless base. */
822 if (!base && !ref && !a.useful_p ())
824 collapse ();
825 return true;
827 if (base_node->every_ref)
828 return changed;
829 gcc_checking_assert (search (base) != NULL);
831 /* No useful ref info tracked; collapse base. */
832 if (!ref && !a.useful_p ())
834 base_node->collapse ();
835 return true;
838 modref_ref_node <T> *ref_node = base_node->insert_ref (ref, max_refs,
839 &changed);
840 ref = ref_node->ref;
842 if (ref_node->every_access)
843 return changed;
844 changed |= ref_node->insert_access (a, max_accesses,
845 record_adjustments);
846 /* See if we failed to add useful access. */
847 if (ref_node->every_access)
849 /* Collapse everything if there is no useful base and ref. */
850 if (!base && !ref)
852 collapse ();
853 gcc_checking_assert (changed);
855 /* Collapse base if there is no useful ref. */
856 else if (!ref)
858 base_node->collapse ();
859 gcc_checking_assert (changed);
862 return changed;
865 /* Remove tree branches that are not useful (i.e. they will always pass). */
867 void cleanup ()
869 size_t i, j;
870 modref_base_node <T> *base_node;
871 modref_ref_node <T> *ref_node;
873 if (!bases)
874 return;
876 for (i = 0; vec_safe_iterate (bases, i, &base_node);)
878 if (base_node->refs)
879 for (j = 0; vec_safe_iterate (base_node->refs, j, &ref_node);)
881 if (!ref_node->every_access
882 && (!ref_node->accesses
883 || !ref_node->accesses->length ()))
885 base_node->refs->unordered_remove (j);
886 vec_free (ref_node->accesses);
887 ggc_delete (ref_node);
889 else
890 j++;
892 if (!base_node->every_ref
893 && (!base_node->refs || !base_node->refs->length ()))
895 bases->unordered_remove (i);
896 vec_free (base_node->refs);
897 ggc_delete (base_node);
899 else
900 i++;
902 if (bases && !bases->length ())
904 vec_free (bases);
905 bases = NULL;
909 /* Merge OTHER into the tree.
910 PARM_MAP, if non-NULL, maps parm indexes of callee to caller. -2 is used
911 to signalize that parameter is local and does not need to be tracked.
912 Return true if something has changed. */
913 bool merge (modref_tree <T> *other, vec <modref_parm_map> *parm_map,
914 bool record_accesses)
916 if (!other || every_base)
917 return false;
918 if (other->every_base)
920 collapse ();
921 return true;
924 bool changed = false;
925 size_t i, j, k;
926 modref_base_node <T> *base_node, *my_base_node;
927 modref_ref_node <T> *ref_node;
928 modref_access_node *access_node;
929 bool release = false;
931 /* For self-recursive functions we may end up merging summary into itself;
932 produce copy first so we do not modify summary under our own hands. */
933 if (other == this)
935 release = true;
936 other = modref_tree<T>::create_ggc (max_bases, max_refs, max_accesses);
937 other->copy_from (this);
940 FOR_EACH_VEC_SAFE_ELT (other->bases, i, base_node)
942 if (base_node->every_ref)
944 my_base_node = insert_base (base_node->base, 0, &changed);
945 if (my_base_node && !my_base_node->every_ref)
947 my_base_node->collapse ();
948 cleanup ();
949 changed = true;
952 else
953 FOR_EACH_VEC_SAFE_ELT (base_node->refs, j, ref_node)
955 if (ref_node->every_access)
957 changed |= insert (base_node->base,
958 ref_node->ref,
959 unspecified_modref_access_node,
960 record_accesses);
962 else
963 FOR_EACH_VEC_SAFE_ELT (ref_node->accesses, k, access_node)
965 modref_access_node a = *access_node;
967 if (a.parm_index != -1 && parm_map)
969 if (a.parm_index >= (int)parm_map->length ())
970 a.parm_index = -1;
971 else if ((*parm_map) [a.parm_index].parm_index == -2)
972 continue;
973 else
975 a.parm_offset
976 += (*parm_map) [a.parm_index].parm_offset;
977 a.parm_offset_known
978 &= (*parm_map)
979 [a.parm_index].parm_offset_known;
980 a.parm_index
981 = (*parm_map) [a.parm_index].parm_index;
984 changed |= insert (base_node->base, ref_node->ref, a,
985 record_accesses);
989 if (release)
990 ggc_delete (other);
991 return changed;
994 /* Copy OTHER to THIS. */
995 void copy_from (modref_tree <T> *other)
997 merge (other, NULL, false);
1000 /* Search BASE in tree; return NULL if failed. */
1001 modref_base_node <T> *search (T base)
1003 size_t i;
1004 modref_base_node <T> *n;
1005 FOR_EACH_VEC_SAFE_ELT (bases, i, n)
1006 if (n->base == base)
1007 return n;
1008 return NULL;
1011 /* Return ggc allocated instance. We explicitly call destructors via
1012 ggc_delete and do not want finalizers to be registered and
1013 called at the garbage collection time. */
1014 static modref_tree<T> *create_ggc (size_t max_bases, size_t max_refs,
1015 size_t max_accesses)
1017 return new (ggc_alloc_no_dtor<modref_tree<T>> ())
1018 modref_tree<T> (max_bases, max_refs, max_accesses);
1021 /* Remove all records and mark tree to alias with everything. */
1022 void collapse ()
1024 size_t i;
1025 modref_base_node <T> *n;
1027 if (bases)
1029 FOR_EACH_VEC_SAFE_ELT (bases, i, n)
1031 n->collapse ();
1032 ggc_free (n);
1034 vec_free (bases);
1036 bases = NULL;
1037 every_base = true;
1040 /* Release memory. */
1041 ~modref_tree ()
1043 collapse ();
1046 /* Update parameter indexes in TT according to MAP. */
1047 void
1048 remap_params (vec <int> *map)
1050 size_t i;
1051 modref_base_node <T> *base_node;
1052 FOR_EACH_VEC_SAFE_ELT (bases, i, base_node)
1054 size_t j;
1055 modref_ref_node <T> *ref_node;
1056 FOR_EACH_VEC_SAFE_ELT (base_node->refs, j, ref_node)
1058 size_t k;
1059 modref_access_node *access_node;
1060 FOR_EACH_VEC_SAFE_ELT (ref_node->accesses, k, access_node)
1061 if (access_node->parm_index > 0)
1063 if (access_node->parm_index < (int)map->length ())
1064 access_node->parm_index = (*map)[access_node->parm_index];
1065 else
1066 access_node->parm_index = -1;
1073 void modref_c_tests ();
1075 void gt_ggc_mx (modref_tree <int>* const&);
1076 void gt_ggc_mx (modref_tree <tree_node*>* const&);
1077 void gt_pch_nx (modref_tree <int>* const&);
1078 void gt_pch_nx (modref_tree <tree_node*>* const&);
1079 void gt_pch_nx (modref_tree <int>* const&, gt_pointer_operator op, void *cookie);
1080 void gt_pch_nx (modref_tree <tree_node*>* const&, gt_pointer_operator op,
1081 void *cookie);
1083 void gt_ggc_mx (modref_base_node <int>*);
1084 void gt_ggc_mx (modref_base_node <tree_node*>* &);
1085 void gt_pch_nx (modref_base_node <int>* const&);
1086 void gt_pch_nx (modref_base_node <tree_node*>* const&);
1087 void gt_pch_nx (modref_base_node <int>* const&, gt_pointer_operator op,
1088 void *cookie);
1089 void gt_pch_nx (modref_base_node <tree_node*>* const&, gt_pointer_operator op,
1090 void *cookie);
1092 void gt_ggc_mx (modref_ref_node <int>*);
1093 void gt_ggc_mx (modref_ref_node <tree_node*>* &);
1094 void gt_pch_nx (modref_ref_node <int>* const&);
1095 void gt_pch_nx (modref_ref_node <tree_node*>* const&);
1096 void gt_pch_nx (modref_ref_node <int>* const&, gt_pointer_operator op,
1097 void *cookie);
1098 void gt_pch_nx (modref_ref_node <tree_node*>* const&, gt_pointer_operator op,
1099 void *cookie);
1101 #endif