c++: using from enclosing class template [PR105006]
[official-gcc.git] / gcc / ipa-modref-tree.cc
blobd0ec2fbf004cf16fb8bc05cdb274f825a62ec6ef
1 /* Data structure for the modref pass.
2 Copyright (C) 2020-2022 Free Software Foundation, Inc.
3 Contributed by David Cepelik and Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "ipa-modref-tree.h"
27 #include "selftest.h"
28 #include "tree-ssa-alias.h"
29 #include "gimple.h"
30 #include "cgraph.h"
31 #include "tree-streamer.h"
33 /* Return true if both accesses are the same. */
34 bool
35 modref_access_node::operator == (modref_access_node &a) const
37 if (parm_index != a.parm_index)
38 return false;
39 if (parm_index != MODREF_UNKNOWN_PARM
40 && parm_index != MODREF_GLOBAL_MEMORY_PARM)
42 if (parm_offset_known != a.parm_offset_known)
43 return false;
44 if (parm_offset_known
45 && !known_eq (parm_offset, a.parm_offset))
46 return false;
48 if (range_info_useful_p () != a.range_info_useful_p ())
49 return false;
50 if (range_info_useful_p ()
51 && (!known_eq (a.offset, offset)
52 || !known_eq (a.size, size)
53 || !known_eq (a.max_size, max_size)))
54 return false;
55 return true;
58 /* Return true A is a subaccess. */
59 bool
60 modref_access_node::contains (const modref_access_node &a) const
62 poly_int64 aoffset_adj = 0;
63 if (parm_index != MODREF_UNKNOWN_PARM)
65 if (parm_index != a.parm_index)
66 return false;
67 if (parm_offset_known)
69 if (!a.parm_offset_known)
70 return false;
71 /* Accesses are never below parm_offset, so look
72 for smaller offset.
73 If access ranges are known still allow merging
74 when bit offsets comparison passes. */
75 if (!known_le (parm_offset, a.parm_offset)
76 && !range_info_useful_p ())
77 return false;
78 /* We allow negative aoffset_adj here in case
79 there is an useful range. This is because adding
80 a.offset may result in non-negative offset again.
81 Ubsan fails on val << LOG_BITS_PER_UNIT where val
82 is negative. */
83 aoffset_adj = (a.parm_offset - parm_offset)
84 * BITS_PER_UNIT;
87 if (range_info_useful_p ())
89 if (!a.range_info_useful_p ())
90 return false;
91 /* Sizes of stores are used to check that object is big enough
92 to fit the store, so smaller or unknown store is more general
93 than large store. */
94 if (known_size_p (size)
95 && (!known_size_p (a.size)
96 || !known_le (size, a.size)))
97 return false;
98 if (known_size_p (max_size))
99 return known_subrange_p (a.offset + aoffset_adj,
100 a.max_size, offset, max_size);
101 else
102 return known_le (offset, a.offset + aoffset_adj);
104 return true;
107 /* Update access range to new parameters.
108 If RECORD_ADJUSTMENTS is true, record number of changes in the access
109 and if threshold is exceeded start dropping precision
110 so only constantly many updates are possible. This makes dataflow
111 to converge. */
112 void
113 modref_access_node::update (poly_int64 parm_offset1,
114 poly_int64 offset1, poly_int64 size1,
115 poly_int64 max_size1, bool record_adjustments)
117 if (known_eq (parm_offset, parm_offset1)
118 && known_eq (offset, offset1)
119 && known_eq (size, size1)
120 && known_eq (max_size, max_size1))
121 return;
122 if (!record_adjustments
123 || (++adjustments) < param_modref_max_adjustments)
125 parm_offset = parm_offset1;
126 offset = offset1;
127 size = size1;
128 max_size = max_size1;
130 else
132 if (dump_file)
133 fprintf (dump_file, "--param modref-max-adjustments limit reached:");
134 if (!known_eq (parm_offset, parm_offset1))
136 if (dump_file)
137 fprintf (dump_file, " parm_offset cleared");
138 parm_offset_known = false;
140 if (!known_eq (size, size1))
142 size = -1;
143 if (dump_file)
144 fprintf (dump_file, " size cleared");
146 if (!known_eq (max_size, max_size1))
148 max_size = -1;
149 if (dump_file)
150 fprintf (dump_file, " max_size cleared");
152 if (!known_eq (offset, offset1))
154 offset = 0;
155 if (dump_file)
156 fprintf (dump_file, " offset cleared");
158 if (dump_file)
159 fprintf (dump_file, "\n");
163 /* Merge in access A if it is possible to do without losing
164 precision. Return true if successful.
165 If RECORD_ADJUSTMENTs is true, remember how many interval
166 was prolonged and punt when there are too many. */
167 bool
168 modref_access_node::merge (const modref_access_node &a,
169 bool record_adjustments)
171 poly_int64 offset1 = 0;
172 poly_int64 aoffset1 = 0;
173 poly_int64 new_parm_offset = 0;
175 /* We assume that containment was tested earlier. */
176 gcc_checking_assert (!contains (a) && !a.contains (*this));
177 if (parm_index != MODREF_UNKNOWN_PARM)
179 if (parm_index != a.parm_index)
180 return false;
181 if (parm_offset_known)
183 if (!a.parm_offset_known)
184 return false;
185 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
186 return false;
189 /* See if we can merge ranges. */
190 if (range_info_useful_p ())
192 /* In this case we have containment that should be
193 handled earlier. */
194 gcc_checking_assert (a.range_info_useful_p ());
196 /* If a.size is less specified than size, merge only
197 if intervals are otherwise equivalent. */
198 if (known_size_p (size)
199 && (!known_size_p (a.size) || known_lt (a.size, size)))
201 if (((known_size_p (max_size) || known_size_p (a.max_size))
202 && !known_eq (max_size, a.max_size))
203 || !known_eq (offset1, aoffset1))
204 return false;
205 update (new_parm_offset, offset1, a.size, max_size,
206 record_adjustments);
207 return true;
209 /* If sizes are same, we can extend the interval. */
210 if ((known_size_p (size) || known_size_p (a.size))
211 && !known_eq (size, a.size))
212 return false;
213 if (known_le (offset1, aoffset1))
215 if (!known_size_p (max_size)
216 || known_ge (offset1 + max_size, aoffset1))
218 update2 (new_parm_offset, offset1, size, max_size,
219 aoffset1, a.size, a.max_size,
220 record_adjustments);
221 return true;
224 else if (known_le (aoffset1, offset1))
226 if (!known_size_p (a.max_size)
227 || known_ge (aoffset1 + a.max_size, offset1))
229 update2 (new_parm_offset, offset1, size, max_size,
230 aoffset1, a.size, a.max_size,
231 record_adjustments);
232 return true;
235 return false;
237 update (new_parm_offset, offset1,
238 size, max_size, record_adjustments);
239 return true;
242 /* Return true if A1 and B1 can be merged with lower information
243 less than A2 and B2.
244 Assume that no containment or lossless merging is possible. */
245 bool
246 modref_access_node::closer_pair_p (const modref_access_node &a1,
247 const modref_access_node &b1,
248 const modref_access_node &a2,
249 const modref_access_node &b2)
251 /* Merging different parm indexes comes to complete loss
252 of range info. */
253 if (a1.parm_index != b1.parm_index)
254 return false;
255 if (a2.parm_index != b2.parm_index)
256 return true;
257 /* If parm is known and parm indexes are the same we should
258 already have containment. */
259 gcc_checking_assert (a1.parm_offset_known && b1.parm_offset_known);
260 gcc_checking_assert (a2.parm_offset_known && b2.parm_offset_known);
262 /* First normalize offsets for parm offsets. */
263 poly_int64 new_parm_offset, offseta1, offsetb1, offseta2, offsetb2;
264 if (!a1.combined_offsets (b1, &new_parm_offset, &offseta1, &offsetb1)
265 || !a2.combined_offsets (b2, &new_parm_offset, &offseta2, &offsetb2))
266 gcc_unreachable ();
269 /* Now compute distance of the intervals. */
270 poly_int64 dist1, dist2;
271 if (known_le (offseta1, offsetb1))
273 if (!known_size_p (a1.max_size))
274 dist1 = 0;
275 else
276 dist1 = offsetb1 - offseta1 - a1.max_size;
278 else
280 if (!known_size_p (b1.max_size))
281 dist1 = 0;
282 else
283 dist1 = offseta1 - offsetb1 - b1.max_size;
285 if (known_le (offseta2, offsetb2))
287 if (!known_size_p (a2.max_size))
288 dist2 = 0;
289 else
290 dist2 = offsetb2 - offseta2 - a2.max_size;
292 else
294 if (!known_size_p (b2.max_size))
295 dist2 = 0;
296 else
297 dist2 = offseta2 - offsetb2 - b2.max_size;
299 /* It may happen that intervals overlap in case size
300 is different. Prefer the overlap to non-overlap. */
301 if (known_lt (dist1, 0) && known_ge (dist2, 0))
302 return true;
303 if (known_lt (dist2, 0) && known_ge (dist1, 0))
304 return false;
305 if (known_lt (dist1, 0))
306 /* If both overlaps minimize overlap. */
307 return known_le (dist2, dist1);
308 else
309 /* If both are disjoint look for smaller distance. */
310 return known_le (dist1, dist2);
313 /* Merge in access A while losing precision. */
314 void
315 modref_access_node::forced_merge (const modref_access_node &a,
316 bool record_adjustments)
318 if (parm_index != a.parm_index)
320 gcc_checking_assert (parm_index != MODREF_UNKNOWN_PARM);
321 parm_index = MODREF_UNKNOWN_PARM;
322 return;
325 /* We assume that containment and lossless merging
326 was tested earlier. */
327 gcc_checking_assert (!contains (a) && !a.contains (*this)
328 && !merge (a, record_adjustments));
329 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
331 poly_int64 new_parm_offset, offset1, aoffset1;
332 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
334 parm_offset_known = false;
335 return;
337 gcc_checking_assert (range_info_useful_p ()
338 && a.range_info_useful_p ());
339 if (record_adjustments)
340 adjustments += a.adjustments;
341 update2 (new_parm_offset,
342 offset1, size, max_size,
343 aoffset1, a.size, a.max_size,
344 record_adjustments);
347 /* Merge two ranges both starting at parm_offset1 and update THIS
348 with result. */
349 void
350 modref_access_node::update2 (poly_int64 parm_offset1,
351 poly_int64 offset1, poly_int64 size1,
352 poly_int64 max_size1,
353 poly_int64 offset2, poly_int64 size2,
354 poly_int64 max_size2,
355 bool record_adjustments)
357 poly_int64 new_size = size1;
359 if (!known_size_p (size2)
360 || known_le (size2, size1))
361 new_size = size2;
362 else
363 gcc_checking_assert (known_le (size1, size2));
365 if (known_le (offset1, offset2))
367 else if (known_le (offset2, offset1))
369 std::swap (offset1, offset2);
370 std::swap (max_size1, max_size2);
372 else
373 gcc_unreachable ();
375 poly_int64 new_max_size;
377 if (!known_size_p (max_size1))
378 new_max_size = max_size1;
379 else if (!known_size_p (max_size2))
380 new_max_size = max_size2;
381 else
383 new_max_size = max_size2 + offset2 - offset1;
384 if (known_le (new_max_size, max_size1))
385 new_max_size = max_size1;
388 update (parm_offset1, offset1,
389 new_size, new_max_size, record_adjustments);
392 /* Given access nodes THIS and A, return true if they
393 can be done with common parm_offsets. In this case
394 return parm offset in new_parm_offset, new_offset
395 which is start of range in THIS and new_aoffset that
396 is start of range in A. */
397 bool
398 modref_access_node::combined_offsets (const modref_access_node &a,
399 poly_int64 *new_parm_offset,
400 poly_int64 *new_offset,
401 poly_int64 *new_aoffset) const
403 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
404 if (known_le (a.parm_offset, parm_offset))
406 *new_offset = offset
407 + ((parm_offset - a.parm_offset)
408 << LOG2_BITS_PER_UNIT);
409 *new_aoffset = a.offset;
410 *new_parm_offset = a.parm_offset;
411 return true;
413 else if (known_le (parm_offset, a.parm_offset))
415 *new_aoffset = a.offset
416 + ((a.parm_offset - parm_offset)
417 << LOG2_BITS_PER_UNIT);
418 *new_offset = offset;
419 *new_parm_offset = parm_offset;
420 return true;
422 else
423 return false;
426 /* Try to optimize the access ACCESSES list after entry INDEX was modified. */
427 void
428 modref_access_node::try_merge_with (vec <modref_access_node, va_gc> *&accesses,
429 size_t index)
431 size_t i;
433 for (i = 0; i < accesses->length ();)
434 if (i != index)
436 bool found = false, restart = false;
437 modref_access_node *a = &(*accesses)[i];
438 modref_access_node *n = &(*accesses)[index];
440 if (n->contains (*a))
441 found = true;
442 if (!found && n->merge (*a, false))
443 found = restart = true;
444 gcc_checking_assert (found || !a->merge (*n, false));
445 if (found)
447 accesses->unordered_remove (i);
448 if (index == accesses->length ())
450 index = i;
451 i++;
453 if (restart)
454 i = 0;
456 else
457 i++;
459 else
460 i++;
463 /* Stream out to OB. */
465 void
466 modref_access_node::stream_out (struct output_block *ob) const
468 streamer_write_hwi (ob, parm_index);
469 if (parm_index != MODREF_UNKNOWN_PARM)
471 streamer_write_uhwi (ob, parm_offset_known);
472 if (parm_offset_known)
474 streamer_write_poly_int64 (ob, parm_offset);
475 streamer_write_poly_int64 (ob, offset);
476 streamer_write_poly_int64 (ob, size);
477 streamer_write_poly_int64 (ob, max_size);
482 modref_access_node
483 modref_access_node::stream_in (struct lto_input_block *ib)
485 int parm_index = streamer_read_hwi (ib);
486 bool parm_offset_known = false;
487 poly_int64 parm_offset = 0;
488 poly_int64 offset = 0;
489 poly_int64 size = -1;
490 poly_int64 max_size = -1;
492 if (parm_index != MODREF_UNKNOWN_PARM)
494 parm_offset_known = streamer_read_uhwi (ib);
495 if (parm_offset_known)
497 parm_offset = streamer_read_poly_int64 (ib);
498 offset = streamer_read_poly_int64 (ib);
499 size = streamer_read_poly_int64 (ib);
500 max_size = streamer_read_poly_int64 (ib);
503 return {offset, size, max_size, parm_offset, parm_index,
504 parm_offset_known, false};
507 /* Insert access with OFFSET and SIZE.
508 Collapse tree if it has more than MAX_ACCESSES entries.
509 If RECORD_ADJUSTMENTs is true avoid too many interval extensions.
510 Return true if record was changed.
512 Return 0 if nothing changed, 1 if insert was successful and -1
513 if entries should be collapsed. */
515 modref_access_node::insert (vec <modref_access_node, va_gc> *&accesses,
516 modref_access_node a, size_t max_accesses,
517 bool record_adjustments)
519 size_t i, j;
520 modref_access_node *a2;
522 /* Verify that list does not contain redundant accesses. */
523 if (flag_checking)
525 size_t i, i2;
526 modref_access_node *a, *a2;
528 FOR_EACH_VEC_SAFE_ELT (accesses, i, a)
530 FOR_EACH_VEC_SAFE_ELT (accesses, i2, a2)
531 if (i != i2)
532 gcc_assert (!a->contains (*a2));
536 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
538 if (a2->contains (a))
539 return 0;
540 if (a.contains (*a2))
542 a.adjustments = 0;
543 a2->parm_index = a.parm_index;
544 a2->parm_offset_known = a.parm_offset_known;
545 a2->update (a.parm_offset, a.offset, a.size, a.max_size,
546 record_adjustments);
547 modref_access_node::try_merge_with (accesses, i);
548 return 1;
550 if (a2->merge (a, record_adjustments))
552 modref_access_node::try_merge_with (accesses, i);
553 return 1;
555 gcc_checking_assert (!(a == *a2));
558 /* If this base->ref pair has too many accesses stored, we will clear
559 all accesses and bail out. */
560 if (accesses && accesses->length () >= max_accesses)
562 if (max_accesses < 2)
563 return -1;
564 /* Find least harmful merge and perform it. */
565 int best1 = -1, best2 = -1;
566 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
568 for (j = i + 1; j < accesses->length (); j++)
569 if (best1 < 0
570 || modref_access_node::closer_pair_p
571 (*a2, (*accesses)[j],
572 (*accesses)[best1],
573 best2 < 0 ? a : (*accesses)[best2]))
575 best1 = i;
576 best2 = j;
578 if (modref_access_node::closer_pair_p
579 (*a2, a,
580 (*accesses)[best1],
581 best2 < 0 ? a : (*accesses)[best2]))
583 best1 = i;
584 best2 = -1;
587 (*accesses)[best1].forced_merge (best2 < 0 ? a : (*accesses)[best2],
588 record_adjustments);
589 /* Check that merging indeed merged ranges. */
590 gcc_checking_assert ((*accesses)[best1].contains
591 (best2 < 0 ? a : (*accesses)[best2]));
592 if (!(*accesses)[best1].useful_p ())
593 return -1;
594 if (dump_file && best2 >= 0)
595 fprintf (dump_file,
596 "--param modref-max-accesses limit reached;"
597 " merging %i and %i\n", best1, best2);
598 else if (dump_file)
599 fprintf (dump_file,
600 "--param modref-max-accesses limit reached;"
601 " merging with %i\n", best1);
602 modref_access_node::try_merge_with (accesses, best1);
603 if (best2 >= 0)
604 insert (accesses, a, max_accesses, record_adjustments);
605 return 1;
607 a.adjustments = 0;
608 vec_safe_push (accesses, a);
609 return 1;
612 /* Return true if range info is useful. */
613 bool
614 modref_access_node::range_info_useful_p () const
616 return parm_index != MODREF_UNKNOWN_PARM
617 && parm_index != MODREF_GLOBAL_MEMORY_PARM
618 && parm_offset_known
619 && (known_size_p (size)
620 || known_size_p (max_size)
621 || known_ge (offset, 0));
624 /* Dump range to debug OUT. */
625 void
626 modref_access_node::dump (FILE *out)
628 if (parm_index != MODREF_UNKNOWN_PARM)
630 if (parm_index == MODREF_GLOBAL_MEMORY_PARM)
631 fprintf (out, " Base in global memory");
632 else if (parm_index >= 0)
633 fprintf (out, " Parm %i", parm_index);
634 else if (parm_index == MODREF_STATIC_CHAIN_PARM)
635 fprintf (out, " Static chain");
636 else
637 gcc_unreachable ();
638 if (parm_offset_known)
640 fprintf (out, " param offset:");
641 print_dec ((poly_int64_pod)parm_offset, out, SIGNED);
644 if (range_info_useful_p ())
646 fprintf (out, " offset:");
647 print_dec ((poly_int64_pod)offset, out, SIGNED);
648 fprintf (out, " size:");
649 print_dec ((poly_int64_pod)size, out, SIGNED);
650 fprintf (out, " max_size:");
651 print_dec ((poly_int64_pod)max_size, out, SIGNED);
652 if (adjustments)
653 fprintf (out, " adjusted %i times", adjustments);
655 fprintf (out, "\n");
658 /* Return tree corresponding to parameter of the range in STMT. */
659 tree
660 modref_access_node::get_call_arg (const gcall *stmt) const
662 if (parm_index == MODREF_UNKNOWN_PARM
663 || parm_index == MODREF_GLOBAL_MEMORY_PARM)
664 return NULL;
665 if (parm_index == MODREF_STATIC_CHAIN_PARM)
666 return gimple_call_chain (stmt);
667 /* MODREF_RETSLOT_PARM should not happen in access trees since the store
668 is seen explicitly in the caller. */
669 gcc_checking_assert (parm_index >= 0);
670 if (parm_index >= (int)gimple_call_num_args (stmt))
671 return NULL;
672 return gimple_call_arg (stmt, parm_index);
675 /* Return tree corresponding to parameter of the range in STMT. */
676 bool
677 modref_access_node::get_ao_ref (const gcall *stmt, ao_ref *ref) const
679 tree arg;
681 if (!parm_offset_known || !(arg = get_call_arg (stmt)))
682 return false;
683 poly_offset_int off = (poly_offset_int)offset
684 + ((poly_offset_int)parm_offset << LOG2_BITS_PER_UNIT);
685 poly_int64 off2;
686 if (!off.to_shwi (&off2))
687 return false;
688 ao_ref_init_from_ptr_and_range (ref, arg, true, off2, size, max_size);
689 return true;
692 /* Return true A is a subkill. */
693 bool
694 modref_access_node::contains_for_kills (const modref_access_node &a) const
696 poly_int64 aoffset_adj = 0;
698 gcc_checking_assert (parm_index != MODREF_UNKNOWN_PARM
699 && a.parm_index != MODREF_UNKNOWN_PARM);
700 if (parm_index != a.parm_index)
701 return false;
702 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
703 aoffset_adj = (a.parm_offset - parm_offset)
704 * BITS_PER_UNIT;
705 gcc_checking_assert (range_info_useful_p () && a.range_info_useful_p ());
706 return known_subrange_p (a.offset + aoffset_adj,
707 a.max_size, offset, max_size);
710 /* Merge two ranges both starting at parm_offset1 and update THIS
711 with result. */
712 bool
713 modref_access_node::update_for_kills (poly_int64 parm_offset1,
714 poly_int64 offset1,
715 poly_int64 max_size1,
716 poly_int64 offset2,
717 poly_int64 max_size2,
718 bool record_adjustments)
720 if (known_le (offset1, offset2))
722 else if (known_le (offset2, offset1))
724 std::swap (offset1, offset2);
725 std::swap (max_size1, max_size2);
727 else
728 gcc_unreachable ();
730 poly_int64 new_max_size = max_size2 + offset2 - offset1;
731 if (known_le (new_max_size, max_size1))
732 new_max_size = max_size1;
733 if (known_eq (parm_offset, parm_offset1)
734 && known_eq (offset, offset1)
735 && known_eq (size, new_max_size)
736 && known_eq (max_size, new_max_size))
737 return false;
739 if (!record_adjustments
740 || (++adjustments) < param_modref_max_adjustments)
742 parm_offset = parm_offset1;
743 offset = offset1;
744 max_size = new_max_size;
745 size = new_max_size;
746 gcc_checking_assert (useful_for_kill_p ());
747 return true;
749 return false;
752 /* Merge in access A if it is possible to do without losing
753 precision. Return true if successful.
754 Unlike merge assume that both accesses are always executed
755 and merge size the same was as max_size. */
756 bool
757 modref_access_node::merge_for_kills (const modref_access_node &a,
758 bool record_adjustments)
760 poly_int64 offset1 = 0;
761 poly_int64 aoffset1 = 0;
762 poly_int64 new_parm_offset = 0;
764 /* We assume that containment was tested earlier. */
765 gcc_checking_assert (!contains_for_kills (a) && !a.contains_for_kills (*this)
766 && useful_for_kill_p () && a.useful_for_kill_p ());
768 if (parm_index != a.parm_index
769 || !combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
770 return false;
772 if (known_le (offset1, aoffset1))
774 if (!known_size_p (max_size)
775 || known_ge (offset1 + max_size, aoffset1))
776 return update_for_kills (new_parm_offset, offset1, max_size,
777 aoffset1, a.max_size, record_adjustments);
779 else if (known_le (aoffset1, offset1))
781 if (!known_size_p (a.max_size)
782 || known_ge (aoffset1 + a.max_size, offset1))
783 return update_for_kills (new_parm_offset, offset1, max_size,
784 aoffset1, a.max_size, record_adjustments);
786 return false;
789 /* Insert new kill A into KILLS. If RECORD_ADJUSTMENTS is true limit number
790 of changes to each entry. Return true if something changed. */
792 bool
793 modref_access_node::insert_kill (vec<modref_access_node> &kills,
794 modref_access_node &a, bool record_adjustments)
796 size_t index;
797 modref_access_node *a2;
798 bool merge = false;
800 gcc_checking_assert (a.useful_for_kill_p ());
802 /* See if we have corresponding entry already or we can merge with
803 neighboring entry. */
804 FOR_EACH_VEC_ELT (kills, index, a2)
806 if (a2->contains_for_kills (a))
807 return false;
808 if (a.contains_for_kills (*a2))
810 a.adjustments = 0;
811 *a2 = a;
812 merge = true;
813 break;
815 if (a2->merge_for_kills (a, record_adjustments))
817 merge = true;
818 break;
821 /* If entry was not found, insert it. */
822 if (!merge)
824 if ((int)kills.length () >= param_modref_max_accesses)
826 if (dump_file)
827 fprintf (dump_file, "--param modref-max-accesses limit reached:");
828 return false;
830 a.adjustments = 0;
831 kills.safe_push (a);
832 return true;
834 /* Extending range in an entry may make it possible to merge it with
835 other entries. */
836 size_t i;
838 for (i = 0; i < kills.length ();)
839 if (i != index)
841 bool found = false, restart = false;
842 modref_access_node *a = &kills[i];
843 modref_access_node *n = &kills[index];
845 if (n->contains_for_kills (*a))
846 found = true;
847 if (!found && n->merge_for_kills (*a, false))
848 found = restart = true;
849 gcc_checking_assert (found || !a->merge_for_kills (*n, false));
850 if (found)
852 kills.unordered_remove (i);
853 if (index == kills.length ())
855 index = i;
856 i++;
858 if (restart)
859 i = 0;
861 else
862 i++;
864 else
865 i++;
866 return true;
870 #if CHECKING_P
872 namespace selftest {
874 static void
875 test_insert_search_collapse ()
877 modref_base_node<alias_set_type> *base_node;
878 modref_ref_node<alias_set_type> *ref_node;
879 modref_access_node a = unspecified_modref_access_node;
881 modref_tree<alias_set_type> *t = new modref_tree<alias_set_type>();
882 ASSERT_FALSE (t->every_base);
884 /* Insert into an empty tree. */
885 t->insert (1, 2, 2, 1, 2, a, false);
886 ASSERT_NE (t->bases, NULL);
887 ASSERT_EQ (t->bases->length (), 1);
888 ASSERT_FALSE (t->every_base);
889 ASSERT_EQ (t->search (2), NULL);
891 base_node = t->search (1);
892 ASSERT_NE (base_node, NULL);
893 ASSERT_EQ (base_node->base, 1);
894 ASSERT_NE (base_node->refs, NULL);
895 ASSERT_EQ (base_node->refs->length (), 1);
896 ASSERT_EQ (base_node->search (1), NULL);
898 ref_node = base_node->search (2);
899 ASSERT_NE (ref_node, NULL);
900 ASSERT_EQ (ref_node->ref, 2);
902 /* Insert when base exists but ref does not. */
903 t->insert (1, 2, 2, 1, 3, a, false);
904 ASSERT_NE (t->bases, NULL);
905 ASSERT_EQ (t->bases->length (), 1);
906 ASSERT_EQ (t->search (1), base_node);
907 ASSERT_EQ (t->search (2), NULL);
908 ASSERT_NE (base_node->refs, NULL);
909 ASSERT_EQ (base_node->refs->length (), 2);
911 ref_node = base_node->search (3);
912 ASSERT_NE (ref_node, NULL);
914 /* Insert when base and ref exist, but access is not dominated by nor
915 dominates other accesses. */
916 t->insert (1, 2, 2, 1, 2, a, false);
917 ASSERT_EQ (t->bases->length (), 1);
918 ASSERT_EQ (t->search (1), base_node);
920 ref_node = base_node->search (2);
921 ASSERT_NE (ref_node, NULL);
923 /* Insert when base and ref exist and access is dominated. */
924 t->insert (1, 2, 2, 1, 2, a, false);
925 ASSERT_EQ (t->search (1), base_node);
926 ASSERT_EQ (base_node->search (2), ref_node);
928 /* Insert ref to trigger ref list collapse for base 1. */
929 t->insert (1, 2, 2, 1, 4, a, false);
930 ASSERT_EQ (t->search (1), base_node);
931 ASSERT_EQ (base_node->refs, NULL);
932 ASSERT_EQ (base_node->search (2), NULL);
933 ASSERT_EQ (base_node->search (3), NULL);
934 ASSERT_TRUE (base_node->every_ref);
936 /* Further inserts to collapsed ref list are ignored. */
937 t->insert (1, 2, 2, 1, 5, a, false);
938 ASSERT_EQ (t->search (1), base_node);
939 ASSERT_EQ (base_node->refs, NULL);
940 ASSERT_EQ (base_node->search (2), NULL);
941 ASSERT_EQ (base_node->search (3), NULL);
942 ASSERT_TRUE (base_node->every_ref);
944 /* Insert base to trigger base list collapse. */
945 t->insert (1, 2, 2, 5, 0, a, false);
946 ASSERT_TRUE (t->every_base);
947 ASSERT_EQ (t->bases, NULL);
948 ASSERT_EQ (t->search (1), NULL);
950 /* Further inserts to collapsed base list are ignored. */
951 t->insert (1, 2, 2, 7, 8, a, false);
952 ASSERT_TRUE (t->every_base);
953 ASSERT_EQ (t->bases, NULL);
954 ASSERT_EQ (t->search (1), NULL);
956 delete t;
959 static void
960 test_merge ()
962 modref_tree<alias_set_type> *t1, *t2;
963 modref_base_node<alias_set_type> *base_node;
964 modref_access_node a = unspecified_modref_access_node;
966 t1 = new modref_tree<alias_set_type>();
967 t1->insert (3, 4, 1, 1, 1, a, false);
968 t1->insert (3, 4, 1, 1, 2, a, false);
969 t1->insert (3, 4, 1, 1, 3, a, false);
970 t1->insert (3, 4, 1, 2, 1, a, false);
971 t1->insert (3, 4, 1, 3, 1, a, false);
973 t2 = new modref_tree<alias_set_type>();
974 t2->insert (10, 10, 10, 1, 2, a, false);
975 t2->insert (10, 10, 10, 1, 3, a, false);
976 t2->insert (10, 10, 10, 1, 4, a, false);
977 t2->insert (10, 10, 10, 3, 2, a, false);
978 t2->insert (10, 10, 10, 3, 3, a, false);
979 t2->insert (10, 10, 10, 3, 4, a, false);
980 t2->insert (10, 10, 10, 3, 5, a, false);
982 t1->merge (3, 4, 1, t2, NULL, NULL, false);
984 ASSERT_FALSE (t1->every_base);
985 ASSERT_NE (t1->bases, NULL);
986 ASSERT_EQ (t1->bases->length (), 3);
988 base_node = t1->search (1);
989 ASSERT_NE (base_node->refs, NULL);
990 ASSERT_FALSE (base_node->every_ref);
991 ASSERT_EQ (base_node->refs->length (), 4);
993 base_node = t1->search (2);
994 ASSERT_NE (base_node->refs, NULL);
995 ASSERT_FALSE (base_node->every_ref);
996 ASSERT_EQ (base_node->refs->length (), 1);
998 base_node = t1->search (3);
999 ASSERT_EQ (base_node->refs, NULL);
1000 ASSERT_TRUE (base_node->every_ref);
1002 delete t1;
1003 delete t2;
1007 void
1008 ipa_modref_tree_cc_tests ()
1010 test_insert_search_collapse ();
1011 test_merge ();
1014 } // namespace selftest
1016 #endif
1018 void
1019 gt_ggc_mx (modref_tree < int >*const &tt)
1021 if (tt->bases)
1023 ggc_test_and_set_mark (tt->bases);
1024 gt_ggc_mx (tt->bases);
1028 void
1029 gt_ggc_mx (modref_tree < tree_node * >*const &tt)
1031 if (tt->bases)
1033 ggc_test_and_set_mark (tt->bases);
1034 gt_ggc_mx (tt->bases);
1038 void gt_pch_nx (modref_tree<int>* const&) {}
1039 void gt_pch_nx (modref_tree<tree_node*>* const&) {}
1040 void gt_pch_nx (modref_tree<int>* const&, gt_pointer_operator, void *) {}
1041 void gt_pch_nx (modref_tree<tree_node*>* const&, gt_pointer_operator, void *) {}
1043 void gt_ggc_mx (modref_base_node<int>* &b)
1045 ggc_test_and_set_mark (b);
1046 if (b->refs)
1048 ggc_test_and_set_mark (b->refs);
1049 gt_ggc_mx (b->refs);
1053 void gt_ggc_mx (modref_base_node<tree_node*>* &b)
1055 ggc_test_and_set_mark (b);
1056 if (b->refs)
1058 ggc_test_and_set_mark (b->refs);
1059 gt_ggc_mx (b->refs);
1061 if (b->base)
1062 gt_ggc_mx (b->base);
1065 void gt_pch_nx (modref_base_node<int>*) {}
1066 void gt_pch_nx (modref_base_node<tree_node*>*) {}
1067 void gt_pch_nx (modref_base_node<int>*, gt_pointer_operator, void *) {}
1068 void gt_pch_nx (modref_base_node<tree_node*>*, gt_pointer_operator, void *) {}
1070 void gt_ggc_mx (modref_ref_node<int>* &r)
1072 ggc_test_and_set_mark (r);
1073 if (r->accesses)
1075 ggc_test_and_set_mark (r->accesses);
1076 gt_ggc_mx (r->accesses);
1080 void gt_ggc_mx (modref_ref_node<tree_node*>* &r)
1082 ggc_test_and_set_mark (r);
1083 if (r->accesses)
1085 ggc_test_and_set_mark (r->accesses);
1086 gt_ggc_mx (r->accesses);
1088 if (r->ref)
1089 gt_ggc_mx (r->ref);
1092 void gt_pch_nx (modref_ref_node<int>* ) {}
1093 void gt_pch_nx (modref_ref_node<tree_node*>*) {}
1094 void gt_pch_nx (modref_ref_node<int>*, gt_pointer_operator, void *) {}
1095 void gt_pch_nx (modref_ref_node<tree_node*>*, gt_pointer_operator, void *) {}
1097 void gt_ggc_mx (modref_access_node &)