Daily bump.
[official-gcc.git] / gcc / ipa-modref-tree.h
blob9976e489697e5db1578021b591faaec2a116e8ac
1 /* Data structure for the modref pass.
2 Copyright (C) 2020-2021 Free Software Foundation, Inc.
3 Contributed by David Cepelik and Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* modref_tree represent a decision tree that can be used by alias analysis
22 oracle to determine whether given memory access can be affected by a function
23 call. For every function we collect two trees, one for loads and other
24 for stores. Tree consist of following levels:
26 1) Base: this level represent base alias set of the access and refers
27 to sons (ref nodes). Flag all_refs means that all possible references
28 are aliasing.
30 Because for LTO streaming we need to stream types rather than alias sets
31 modref_base_node is implemented as a template.
32 2) Ref: this level represent ref alias set and links to accesses unless
33 all_refs flag is set.
34 Again ref is an template to allow LTO streaming.
35 3) Access: this level represent info about individual accesses. Presently
36 we record whether access is through a dereference of a function parameter
37 and if so we record the access range.
40 #ifndef GCC_MODREF_TREE_H
41 #define GCC_MODREF_TREE_H
43 struct ipa_modref_summary;
45 /* Memory access. */
46 struct GTY(()) modref_access_node
49 /* Access range information (in bits). */
50 poly_int64 offset;
51 poly_int64 size;
52 poly_int64 max_size;
54 /* Offset from parameter pointer to the base of the access (in bytes). */
55 poly_int64 parm_offset;
57 /* Index of parameter which specifies the base of access. -1 if base is not
58 a function parameter. */
59 int parm_index;
60 bool parm_offset_known;
61 /* Number of times interval was extended during dataflow.
62 This has to be limited in order to keep dataflow finite. */
63 unsigned char adjustments;
65 /* Return true if access node holds no useful info. */
66 bool useful_p () const
68 return parm_index != -1;
70 /* Return true if range info is useful. */
71 bool range_info_useful_p () const
73 return parm_index != -1 && parm_offset_known
74 && (known_size_p (size)
75 || known_size_p (max_size)
76 || known_ge (offset, 0));
78 /* Return true if both accesses are the same. */
79 bool operator == (modref_access_node &a) const
81 if (parm_index != a.parm_index)
82 return false;
83 if (parm_index >= 0)
85 if (parm_offset_known != a.parm_offset_known)
86 return false;
87 if (parm_offset_known
88 && !known_eq (parm_offset, a.parm_offset))
89 return false;
91 if (range_info_useful_p () != a.range_info_useful_p ())
92 return false;
93 if (range_info_useful_p ()
94 && (!known_eq (a.offset, offset)
95 || !known_eq (a.size, size)
96 || !known_eq (a.max_size, max_size)))
97 return false;
98 return true;
100 /* Return true A is a subaccess. */
101 bool contains (const modref_access_node &a) const
103 poly_int64 aoffset_adj = 0;
104 if (parm_index >= 0)
106 if (parm_index != a.parm_index)
107 return false;
108 if (parm_offset_known)
110 if (!a.parm_offset_known)
111 return false;
112 /* Accesses are never below parm_offset, so look
113 for smaller offset.
114 If access ranges are known still allow merging
115 when bit offsets comparsion passes. */
116 if (!known_le (parm_offset, a.parm_offset)
117 && !range_info_useful_p ())
118 return false;
119 aoffset_adj = (a.parm_offset - parm_offset)
120 << LOG2_BITS_PER_UNIT;
123 if (range_info_useful_p ())
125 if (!a.range_info_useful_p ())
126 return false;
127 /* Sizes of stores are used to check that object is big enough
128 to fit the store, so smaller or unknown sotre is more general
129 than large store. */
130 if (known_size_p (size)
131 && (!known_size_p (a.size)
132 || !known_le (size, a.size)))
133 return false;
134 if (known_size_p (max_size))
135 return known_subrange_p (a.offset + aoffset_adj,
136 a.max_size, offset, max_size);
137 else
138 return known_le (offset, a.offset + aoffset_adj);
140 return true;
142 /* Update access range to new parameters.
143 If RECORD_ADJUSTMENTS is true, record number of changes in the access
144 and if threshold is exceeded start dropping precision
145 so only constantly many updates are possible. This makes dataflow
146 to converge. */
147 void update (poly_int64 parm_offset1,
148 poly_int64 offset1, poly_int64 size1, poly_int64 max_size1,
149 bool record_adjustments)
151 if (known_eq (parm_offset, parm_offset1)
152 && known_eq (offset, offset1)
153 && known_eq (size, size1)
154 && known_eq (max_size, max_size1))
155 return;
156 if (!record_adjustments
157 || (++adjustments) < param_modref_max_adjustments)
159 parm_offset = parm_offset1;
160 offset = offset1;
161 size = size1;
162 max_size = max_size1;
164 else
166 if (dump_file)
167 fprintf (dump_file,
168 "--param param=modref-max-adjustments limit reached:");
169 if (!known_eq (parm_offset, parm_offset1))
171 if (dump_file)
172 fprintf (dump_file, " parm_offset cleared");
173 parm_offset_known = false;
175 if (!known_eq (size, size1))
177 size = -1;
178 if (dump_file)
179 fprintf (dump_file, " size cleared");
181 if (!known_eq (max_size, max_size1))
183 max_size = -1;
184 if (dump_file)
185 fprintf (dump_file, " max_size cleared");
187 if (!known_eq (offset, offset1))
189 offset = 0;
190 if (dump_file)
191 fprintf (dump_file, " offset cleared");
193 if (dump_file)
194 fprintf (dump_file, "\n");
197 /* Merge in access A if it is possible to do without losing
198 precision. Return true if successful.
199 If RECORD_ADJUSTMENTs is true, remember how many interval
200 was prolonged and punt when there are too many. */
201 bool merge (const modref_access_node &a, bool record_adjustments)
203 poly_int64 offset1 = 0;
204 poly_int64 aoffset1 = 0;
205 poly_int64 new_parm_offset = 0;
207 /* We assume that containment was tested earlier. */
208 gcc_checking_assert (!contains (a) && !a.contains (*this));
209 if (parm_index >= 0)
211 if (parm_index != a.parm_index)
212 return false;
213 if (parm_offset_known)
215 if (!a.parm_offset_known)
216 return false;
217 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
218 return false;
221 /* See if we can merge ranges. */
222 if (range_info_useful_p ())
224 /* In this case we have containment that should be
225 handled earlier. */
226 gcc_checking_assert (a.range_info_useful_p ());
228 /* If a.size is less specified than size, merge only
229 if intervals are otherwise equivalent. */
230 if (known_size_p (size)
231 && (!known_size_p (a.size) || known_lt (a.size, size)))
233 if (((known_size_p (max_size) || known_size_p (a.max_size))
234 && !known_eq (max_size, a.max_size))
235 || !known_eq (offset1, aoffset1))
236 return false;
237 update (new_parm_offset, offset1, a.size, max_size,
238 record_adjustments);
239 return true;
241 /* If sizes are same, we can extend the interval. */
242 if ((known_size_p (size) || known_size_p (a.size))
243 && !known_eq (size, a.size))
244 return false;
245 if (known_le (offset1, aoffset1))
247 if (!known_size_p (max_size)
248 || known_ge (offset1 + max_size, aoffset1))
250 update2 (new_parm_offset, offset1, size, max_size,
251 aoffset1, a.size, a.max_size,
252 record_adjustments);
253 return true;
256 else if (known_le (aoffset1, offset1))
258 if (!known_size_p (a.max_size)
259 || known_ge (aoffset1 + a.max_size, offset1))
261 update2 (new_parm_offset, offset1, size, max_size,
262 aoffset1, a.size, a.max_size,
263 record_adjustments);
264 return true;
267 return false;
269 update (new_parm_offset, offset1,
270 size, max_size, record_adjustments);
271 return true;
273 /* Return true if A1 and B1 can be merged with lower informatoin
274 less than A2 and B2.
275 Assume that no containment or lossless merging is possible. */
276 static bool closer_pair_p (const modref_access_node &a1,
277 const modref_access_node &b1,
278 const modref_access_node &a2,
279 const modref_access_node &b2)
281 /* Merging different parm indexes comes to complete loss
282 of range info. */
283 if (a1.parm_index != b1.parm_index)
284 return false;
285 if (a2.parm_index != b2.parm_index)
286 return true;
287 /* If parm is known and parm indexes are the same we should
288 already have containment. */
289 gcc_checking_assert (a1.parm_offset_known && b1.parm_offset_known);
290 gcc_checking_assert (a2.parm_offset_known && b2.parm_offset_known);
292 /* First normalize offsets for parm offsets. */
293 poly_int64 new_parm_offset, offseta1, offsetb1, offseta2, offsetb2;
294 if (!a1.combined_offsets (b1, &new_parm_offset, &offseta1, &offsetb1)
295 || !a2.combined_offsets (b2, &new_parm_offset, &offseta2, &offsetb2))
296 gcc_unreachable ();
299 /* Now compute distnace of the intervals. */
300 poly_int64 dist1, dist2;
301 if (known_le (offseta1, offsetb1))
303 if (!known_size_p (a1.max_size))
304 dist1 = 0;
305 else
306 dist1 = offsetb1 - offseta1 - a1.max_size;
308 else
310 if (!known_size_p (b1.max_size))
311 dist1 = 0;
312 else
313 dist1 = offseta1 - offsetb1 - b1.max_size;
315 if (known_le (offseta2, offsetb2))
317 if (!known_size_p (a2.max_size))
318 dist2 = 0;
319 else
320 dist2 = offsetb2 - offseta2 - a2.max_size;
322 else
324 if (!known_size_p (b2.max_size))
325 dist2 = 0;
326 else
327 dist2 = offseta2 - offsetb2 - b2.max_size;
329 /* It may happen that intervals overlap in case size
330 is different. Preffer the overlap to non-overlap. */
331 if (known_lt (dist1, 0) && known_ge (dist2, 0))
332 return true;
333 if (known_lt (dist2, 0) && known_ge (dist1, 0))
334 return false;
335 if (known_lt (dist1, 0))
336 /* If both overlaps minimize overlap. */
337 return known_le (dist2, dist1);
338 else
339 /* If both are disjoint look for smaller distance. */
340 return known_le (dist1, dist2);
343 /* Merge in access A while losing precision. */
344 void forced_merge (const modref_access_node &a, bool record_adjustments)
346 if (parm_index != a.parm_index)
348 gcc_checking_assert (parm_index != -1);
349 parm_index = -1;
350 return;
353 /* We assume that containment and lossless merging
354 was tested earlier. */
355 gcc_checking_assert (!contains (a) && !a.contains (*this)
356 && !merge (a, record_adjustments));
357 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
359 poly_int64 new_parm_offset, offset1, aoffset1;
360 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
362 parm_offset_known = false;
363 return;
365 gcc_checking_assert (range_info_useful_p ()
366 && a.range_info_useful_p ());
367 if (record_adjustments)
368 adjustments += a.adjustments;
369 update2 (new_parm_offset,
370 offset1, size, max_size,
371 aoffset1, a.size, a.max_size,
372 record_adjustments);
374 private:
375 /* Merge two ranges both starting at parm_offset1 and update THIS
376 with result. */
377 void update2 (poly_int64 parm_offset1,
378 poly_int64 offset1, poly_int64 size1, poly_int64 max_size1,
379 poly_int64 offset2, poly_int64 size2, poly_int64 max_size2,
380 bool record_adjustments)
382 poly_int64 new_size = size1;
384 if (!known_size_p (size2)
385 || known_le (size2, size1))
386 new_size = size2;
387 else
388 gcc_checking_assert (known_le (size1, size2));
390 if (known_le (offset1, offset2))
392 else if (known_le (offset2, offset1))
394 std::swap (offset1, offset2);
395 std::swap (max_size1, max_size2);
397 else
398 gcc_unreachable ();
400 poly_int64 new_max_size;
402 if (!known_size_p (max_size1))
403 new_max_size = max_size1;
404 else if (!known_size_p (max_size2))
405 new_max_size = max_size2;
406 else
408 new_max_size = max_size2 + offset2 - offset1;
409 if (known_le (new_max_size, max_size1))
410 new_max_size = max_size1;
413 update (parm_offset1, offset1,
414 new_size, new_max_size, record_adjustments);
416 /* Given access nodes THIS and A, return true if they
417 can be done with common parm_offsets. In this case
418 return parm offset in new_parm_offset, new_offset
419 which is start of range in THIS and new_aoffset that
420 is start of range in A. */
421 bool combined_offsets (const modref_access_node &a,
422 poly_int64 *new_parm_offset,
423 poly_int64 *new_offset,
424 poly_int64 *new_aoffset) const
426 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
427 if (known_le (a.parm_offset, parm_offset))
429 *new_offset = offset
430 + ((parm_offset - a.parm_offset)
431 << LOG2_BITS_PER_UNIT);
432 *new_aoffset = a.offset;
433 *new_parm_offset = a.parm_offset;
434 return true;
436 else if (known_le (parm_offset, a.parm_offset))
438 *new_aoffset = a.offset
439 + ((a.parm_offset - parm_offset)
440 << LOG2_BITS_PER_UNIT);
441 *new_offset = offset;
442 *new_parm_offset = parm_offset;
443 return true;
445 else
446 return false;
450 /* Access node specifying no useful info. */
451 const modref_access_node unspecified_modref_access_node
452 = {0, -1, -1, 0, -1, false, 0};
454 template <typename T>
455 struct GTY((user)) modref_ref_node
457 T ref;
458 bool every_access;
459 vec <modref_access_node, va_gc> *accesses;
461 modref_ref_node (T ref):
462 ref (ref),
463 every_access (false),
464 accesses (NULL)
467 /* Collapse the tree. */
468 void collapse ()
470 vec_free (accesses);
471 accesses = NULL;
472 every_access = true;
475 /* Verify that list does not contain redundant accesses. */
476 void verify ()
478 size_t i, i2;
479 modref_access_node *a, *a2;
481 FOR_EACH_VEC_SAFE_ELT (accesses, i, a)
483 FOR_EACH_VEC_SAFE_ELT (accesses, i2, a2)
484 if (i != i2)
485 gcc_assert (!a->contains (*a2));
489 /* Insert access with OFFSET and SIZE.
490 Collapse tree if it has more than MAX_ACCESSES entries.
491 If RECORD_ADJUSTMENTs is true avoid too many interval extensions.
492 Return true if record was changed. */
493 bool insert_access (modref_access_node a, size_t max_accesses,
494 bool record_adjustments)
496 /* If this base->ref pair has no access information, bail out. */
497 if (every_access)
498 return false;
500 /* Otherwise, insert a node for the ref of the access under the base. */
501 size_t i, j;
502 modref_access_node *a2;
504 if (flag_checking)
505 verify ();
507 if (!a.useful_p ())
509 if (!every_access)
511 collapse ();
512 return true;
514 return false;
517 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
519 if (a2->contains (a))
520 return false;
521 if (a.contains (*a2))
523 a.adjustments = 0;
524 a2->parm_index = a.parm_index;
525 a2->parm_offset_known = a.parm_offset_known;
526 a2->update (a.parm_offset, a.offset, a.size, a.max_size,
527 record_adjustments);
528 try_merge_with (i);
529 return true;
531 if (a2->merge (a, record_adjustments))
533 try_merge_with (i);
534 return true;
536 gcc_checking_assert (!(a == *a2));
539 /* If this base->ref pair has too many accesses stored, we will clear
540 all accesses and bail out. */
541 if (accesses && accesses->length () >= max_accesses)
543 if (max_accesses < 2)
545 collapse ();
546 if (dump_file)
547 fprintf (dump_file,
548 "--param param=modref-max-accesses limit reached;"
549 " collapsing\n");
550 return true;
552 /* Find least harmful merge and perform it. */
553 int best1 = -1, best2 = -1;
554 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
556 for (j = i + 1; j < accesses->length (); j++)
557 if (best1 < 0
558 || modref_access_node::closer_pair_p
559 (*a2, (*accesses)[j],
560 (*accesses)[best1],
561 best2 < 0 ? a : (*accesses)[best2]))
563 best1 = i;
564 best2 = j;
566 if (modref_access_node::closer_pair_p
567 (*a2, a,
568 (*accesses)[best1],
569 best2 < 0 ? a : (*accesses)[best2]))
571 best1 = i;
572 best2 = -1;
575 (*accesses)[best1].forced_merge (best2 < 0 ? a : (*accesses)[best2],
576 record_adjustments);
577 /* Check that merging indeed merged ranges. */
578 gcc_checking_assert ((*accesses)[best1].contains
579 (best2 < 0 ? a : (*accesses)[best2]));
580 if (!(*accesses)[best1].useful_p ())
582 collapse ();
583 if (dump_file)
584 fprintf (dump_file,
585 "--param param=modref-max-accesses limit reached;"
586 " collapsing\n");
587 return true;
589 if (dump_file && best2 >= 0)
590 fprintf (dump_file,
591 "--param param=modref-max-accesses limit reached;"
592 " merging %i and %i\n", best1, best2);
593 else if (dump_file)
594 fprintf (dump_file,
595 "--param param=modref-max-accesses limit reached;"
596 " merging with %i\n", best1);
597 try_merge_with (best1);
598 if (best2 >= 0)
599 insert_access (a, max_accesses, record_adjustments);
600 return 1;
602 a.adjustments = 0;
603 vec_safe_push (accesses, a);
604 return true;
606 private:
607 /* Try to optimize the access list after entry INDEX was modified. */
608 void
609 try_merge_with (size_t index)
611 size_t i;
613 for (i = 0; i < accesses->length ();)
614 if (i != index)
616 bool found = false, restart = false;
617 modref_access_node *a = &(*accesses)[i];
618 modref_access_node *n = &(*accesses)[index];
620 if (n->contains (*a))
621 found = true;
622 if (!found && n->merge (*a, false))
623 found = restart = true;
624 gcc_checking_assert (found || !a->merge (*n, false));
625 if (found)
627 accesses->unordered_remove (i);
628 if (index == accesses->length ())
630 index = i;
631 i++;
633 if (restart)
634 i = 0;
636 else
637 i++;
639 else
640 i++;
644 /* Base of an access. */
645 template <typename T>
646 struct GTY((user)) modref_base_node
648 T base;
649 vec <modref_ref_node <T> *, va_gc> *refs;
650 bool every_ref;
652 modref_base_node (T base):
653 base (base),
654 refs (NULL),
655 every_ref (false) {}
657 /* Search REF; return NULL if failed. */
658 modref_ref_node <T> *search (T ref)
660 size_t i;
661 modref_ref_node <T> *n;
662 FOR_EACH_VEC_SAFE_ELT (refs, i, n)
663 if (n->ref == ref)
664 return n;
665 return NULL;
668 /* Insert REF; collapse tree if there are more than MAX_REFS.
669 Return inserted ref and if CHANGED is non-null set it to true if
670 something changed. */
671 modref_ref_node <T> *insert_ref (T ref, size_t max_refs,
672 bool *changed = NULL)
674 modref_ref_node <T> *ref_node;
676 /* If the node is collapsed, don't do anything. */
677 if (every_ref)
678 return NULL;
680 /* Otherwise, insert a node for the ref of the access under the base. */
681 ref_node = search (ref);
682 if (ref_node)
683 return ref_node;
685 /* We always allow inserting ref 0. For non-0 refs there is upper
686 limit on number of entries and if exceeded,
687 drop ref conservatively to 0. */
688 if (ref && refs && refs->length () >= max_refs)
690 if (dump_file)
691 fprintf (dump_file, "--param param=modref-max-refs limit reached;"
692 " using 0\n");
693 ref = 0;
694 ref_node = search (ref);
695 if (ref_node)
696 return ref_node;
699 if (changed)
700 *changed = true;
702 ref_node = new (ggc_alloc <modref_ref_node <T> > ())modref_ref_node <T>
703 (ref);
704 vec_safe_push (refs, ref_node);
705 return ref_node;
708 void collapse ()
710 size_t i;
711 modref_ref_node <T> *r;
713 if (refs)
715 FOR_EACH_VEC_SAFE_ELT (refs, i, r)
717 r->collapse ();
718 ggc_free (r);
720 vec_free (refs);
722 refs = NULL;
723 every_ref = true;
727 /* Map translating parameters across function call. */
729 struct modref_parm_map
731 /* Index of parameter we translate to.
732 -1 indicates that parameter is unknown
733 -2 indicates that parameter points to local memory and access can be
734 discarded. */
735 int parm_index;
736 bool parm_offset_known;
737 poly_int64 parm_offset;
740 /* Access tree for a single function. */
741 template <typename T>
742 struct GTY((user)) modref_tree
744 vec <modref_base_node <T> *, va_gc> *bases;
745 size_t max_bases;
746 size_t max_refs;
747 size_t max_accesses;
748 bool every_base;
750 modref_tree (size_t max_bases, size_t max_refs, size_t max_accesses):
751 bases (NULL),
752 max_bases (max_bases),
753 max_refs (max_refs),
754 max_accesses (max_accesses),
755 every_base (false) {}
757 /* Insert BASE; collapse tree if there are more than MAX_REFS.
758 Return inserted base and if CHANGED is non-null set it to true if
759 something changed.
760 If table gets full, try to insert REF instead. */
762 modref_base_node <T> *insert_base (T base, T ref, bool *changed = NULL)
764 modref_base_node <T> *base_node;
766 /* If the node is collapsed, don't do anything. */
767 if (every_base)
768 return NULL;
770 /* Otherwise, insert a node for the base of the access into the tree. */
771 base_node = search (base);
772 if (base_node)
773 return base_node;
775 /* We always allow inserting base 0. For non-0 base there is upper
776 limit on number of entries and if exceeded,
777 drop base conservatively to ref and if it still does not fit to 0. */
778 if (base && bases && bases->length () >= max_bases)
780 base_node = search (ref);
781 if (base_node)
783 if (dump_file)
784 fprintf (dump_file, "--param param=modref-max-bases"
785 " limit reached; using ref\n");
786 return base_node;
788 if (dump_file)
789 fprintf (dump_file, "--param param=modref-max-bases"
790 " limit reached; using 0\n");
791 base = 0;
792 base_node = search (base);
793 if (base_node)
794 return base_node;
797 if (changed)
798 *changed = true;
800 base_node = new (ggc_alloc <modref_base_node <T> > ())
801 modref_base_node <T> (base);
802 vec_safe_push (bases, base_node);
803 return base_node;
806 /* Insert memory access to the tree.
807 Return true if something changed. */
808 bool insert (T base, T ref, modref_access_node a,
809 bool record_adjustments)
811 if (every_base)
812 return false;
814 bool changed = false;
816 /* No useful information tracked; collapse everything. */
817 if (!base && !ref && !a.useful_p ())
819 collapse ();
820 return true;
823 modref_base_node <T> *base_node = insert_base (base, ref, &changed);
824 base = base_node->base;
825 /* If table got full we may end up with useless base. */
826 if (!base && !ref && !a.useful_p ())
828 collapse ();
829 return true;
831 if (base_node->every_ref)
832 return changed;
833 gcc_checking_assert (search (base) != NULL);
835 /* No useful ref info tracked; collapse base. */
836 if (!ref && !a.useful_p ())
838 base_node->collapse ();
839 return true;
842 modref_ref_node <T> *ref_node = base_node->insert_ref (ref, max_refs,
843 &changed);
844 ref = ref_node->ref;
846 if (ref_node->every_access)
847 return changed;
848 changed |= ref_node->insert_access (a, max_accesses,
849 record_adjustments);
850 /* See if we failed to add useful access. */
851 if (ref_node->every_access)
853 /* Collapse everything if there is no useful base and ref. */
854 if (!base && !ref)
856 collapse ();
857 gcc_checking_assert (changed);
859 /* Collapse base if there is no useful ref. */
860 else if (!ref)
862 base_node->collapse ();
863 gcc_checking_assert (changed);
866 return changed;
869 /* Remove tree branches that are not useful (i.e. they will always pass). */
871 void cleanup ()
873 size_t i, j;
874 modref_base_node <T> *base_node;
875 modref_ref_node <T> *ref_node;
877 if (!bases)
878 return;
880 for (i = 0; vec_safe_iterate (bases, i, &base_node);)
882 if (base_node->refs)
883 for (j = 0; vec_safe_iterate (base_node->refs, j, &ref_node);)
885 if (!ref_node->every_access
886 && (!ref_node->accesses
887 || !ref_node->accesses->length ()))
889 base_node->refs->unordered_remove (j);
890 vec_free (ref_node->accesses);
891 ggc_delete (ref_node);
893 else
894 j++;
896 if (!base_node->every_ref
897 && (!base_node->refs || !base_node->refs->length ()))
899 bases->unordered_remove (i);
900 vec_free (base_node->refs);
901 ggc_delete (base_node);
903 else
904 i++;
906 if (bases && !bases->length ())
908 vec_free (bases);
909 bases = NULL;
913 /* Merge OTHER into the tree.
914 PARM_MAP, if non-NULL, maps parm indexes of callee to caller. -2 is used
915 to signalize that parameter is local and does not need to be tracked.
916 Return true if something has changed. */
917 bool merge (modref_tree <T> *other, vec <modref_parm_map> *parm_map,
918 bool record_accesses)
920 if (!other || every_base)
921 return false;
922 if (other->every_base)
924 collapse ();
925 return true;
928 bool changed = false;
929 size_t i, j, k;
930 modref_base_node <T> *base_node, *my_base_node;
931 modref_ref_node <T> *ref_node;
932 modref_access_node *access_node;
933 bool release = false;
935 /* For self-recursive functions we may end up merging summary into itself;
936 produce copy first so we do not modify summary under our own hands. */
937 if (other == this)
939 release = true;
940 other = modref_tree<T>::create_ggc (max_bases, max_refs, max_accesses);
941 other->copy_from (this);
944 FOR_EACH_VEC_SAFE_ELT (other->bases, i, base_node)
946 if (base_node->every_ref)
948 my_base_node = insert_base (base_node->base, 0, &changed);
949 if (my_base_node && !my_base_node->every_ref)
951 my_base_node->collapse ();
952 cleanup ();
953 changed = true;
956 else
957 FOR_EACH_VEC_SAFE_ELT (base_node->refs, j, ref_node)
959 if (ref_node->every_access)
961 changed |= insert (base_node->base,
962 ref_node->ref,
963 unspecified_modref_access_node,
964 record_accesses);
966 else
967 FOR_EACH_VEC_SAFE_ELT (ref_node->accesses, k, access_node)
969 modref_access_node a = *access_node;
971 if (a.parm_index != -1 && parm_map)
973 if (a.parm_index >= (int)parm_map->length ())
974 a.parm_index = -1;
975 else if ((*parm_map) [a.parm_index].parm_index == -2)
976 continue;
977 else
979 a.parm_offset
980 += (*parm_map) [a.parm_index].parm_offset;
981 a.parm_offset_known
982 &= (*parm_map)
983 [a.parm_index].parm_offset_known;
984 a.parm_index
985 = (*parm_map) [a.parm_index].parm_index;
988 changed |= insert (base_node->base, ref_node->ref, a,
989 record_accesses);
993 if (release)
994 ggc_delete (other);
995 return changed;
998 /* Copy OTHER to THIS. */
999 void copy_from (modref_tree <T> *other)
1001 merge (other, NULL, false);
1004 /* Search BASE in tree; return NULL if failed. */
1005 modref_base_node <T> *search (T base)
1007 size_t i;
1008 modref_base_node <T> *n;
1009 FOR_EACH_VEC_SAFE_ELT (bases, i, n)
1010 if (n->base == base)
1011 return n;
1012 return NULL;
1015 /* Return true if tree contains access to global memory. */
1016 bool global_access_p ()
1018 size_t i, j, k;
1019 modref_base_node <T> *base_node;
1020 modref_ref_node <T> *ref_node;
1021 modref_access_node *access_node;
1022 if (every_base)
1023 return true;
1024 FOR_EACH_VEC_SAFE_ELT (bases, i, base_node)
1026 if (base_node->every_ref)
1027 return true;
1028 FOR_EACH_VEC_SAFE_ELT (base_node->refs, j, ref_node)
1030 if (ref_node->every_access)
1031 return true;
1032 FOR_EACH_VEC_SAFE_ELT (ref_node->accesses, k, access_node)
1033 if (access_node->parm_index < 0)
1034 return true;
1037 return false;
1040 /* Return ggc allocated instance. We explicitly call destructors via
1041 ggc_delete and do not want finalizers to be registered and
1042 called at the garbage collection time. */
1043 static modref_tree<T> *create_ggc (size_t max_bases, size_t max_refs,
1044 size_t max_accesses)
1046 return new (ggc_alloc_no_dtor<modref_tree<T>> ())
1047 modref_tree<T> (max_bases, max_refs, max_accesses);
1050 /* Remove all records and mark tree to alias with everything. */
1051 void collapse ()
1053 size_t i;
1054 modref_base_node <T> *n;
1056 if (bases)
1058 FOR_EACH_VEC_SAFE_ELT (bases, i, n)
1060 n->collapse ();
1061 ggc_free (n);
1063 vec_free (bases);
1065 bases = NULL;
1066 every_base = true;
1069 /* Release memory. */
1070 ~modref_tree ()
1072 collapse ();
1075 /* Update parameter indexes in TT according to MAP. */
1076 void
1077 remap_params (vec <int> *map)
1079 size_t i;
1080 modref_base_node <T> *base_node;
1081 FOR_EACH_VEC_SAFE_ELT (bases, i, base_node)
1083 size_t j;
1084 modref_ref_node <T> *ref_node;
1085 FOR_EACH_VEC_SAFE_ELT (base_node->refs, j, ref_node)
1087 size_t k;
1088 modref_access_node *access_node;
1089 FOR_EACH_VEC_SAFE_ELT (ref_node->accesses, k, access_node)
1090 if (access_node->parm_index > 0)
1092 if (access_node->parm_index < (int)map->length ())
1093 access_node->parm_index = (*map)[access_node->parm_index];
1094 else
1095 access_node->parm_index = -1;
1102 void modref_c_tests ();
1104 void gt_ggc_mx (modref_tree <int>* const&);
1105 void gt_ggc_mx (modref_tree <tree_node*>* const&);
1106 void gt_pch_nx (modref_tree <int>* const&);
1107 void gt_pch_nx (modref_tree <tree_node*>* const&);
1108 void gt_pch_nx (modref_tree <int>* const&, gt_pointer_operator op, void *cookie);
1109 void gt_pch_nx (modref_tree <tree_node*>* const&, gt_pointer_operator op,
1110 void *cookie);
1112 void gt_ggc_mx (modref_base_node <int>*);
1113 void gt_ggc_mx (modref_base_node <tree_node*>* &);
1114 void gt_pch_nx (modref_base_node <int>* const&);
1115 void gt_pch_nx (modref_base_node <tree_node*>* const&);
1116 void gt_pch_nx (modref_base_node <int>* const&, gt_pointer_operator op,
1117 void *cookie);
1118 void gt_pch_nx (modref_base_node <tree_node*>* const&, gt_pointer_operator op,
1119 void *cookie);
1121 void gt_ggc_mx (modref_ref_node <int>*);
1122 void gt_ggc_mx (modref_ref_node <tree_node*>* &);
1123 void gt_pch_nx (modref_ref_node <int>* const&);
1124 void gt_pch_nx (modref_ref_node <tree_node*>* const&);
1125 void gt_pch_nx (modref_ref_node <int>* const&, gt_pointer_operator op,
1126 void *cookie);
1127 void gt_pch_nx (modref_ref_node <tree_node*>* const&, gt_pointer_operator op,
1128 void *cookie);
1130 #endif