Daily bump.
[official-gcc.git] / gcc / ipa-modref-tree.c
blob0671fa761998d285c224ffbd46dbeb335c87affb
1 /* Data structure for the modref pass.
2 Copyright (C) 2020-2021 Free Software Foundation, Inc.
3 Contributed by David Cepelik and Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "ipa-modref-tree.h"
27 #include "selftest.h"
28 #include "tree-ssa-alias.h"
29 #include "gimple.h"
30 #include "cgraph.h"
31 #include "tree-streamer.h"
33 /* Return true if both accesses are the same. */
34 bool
35 modref_access_node::operator == (modref_access_node &a) const
37 if (parm_index != a.parm_index)
38 return false;
39 if (parm_index != MODREF_UNKNOWN_PARM)
41 if (parm_offset_known != a.parm_offset_known)
42 return false;
43 if (parm_offset_known
44 && !known_eq (parm_offset, a.parm_offset))
45 return false;
47 if (range_info_useful_p () != a.range_info_useful_p ())
48 return false;
49 if (range_info_useful_p ()
50 && (!known_eq (a.offset, offset)
51 || !known_eq (a.size, size)
52 || !known_eq (a.max_size, max_size)))
53 return false;
54 return true;
57 /* Return true A is a subaccess. */
58 bool
59 modref_access_node::contains (const modref_access_node &a) const
61 poly_int64 aoffset_adj = 0;
62 if (parm_index != MODREF_UNKNOWN_PARM)
64 if (parm_index != a.parm_index)
65 return false;
66 if (parm_offset_known)
68 if (!a.parm_offset_known)
69 return false;
70 /* Accesses are never below parm_offset, so look
71 for smaller offset.
72 If access ranges are known still allow merging
73 when bit offsets comparsion passes. */
74 if (!known_le (parm_offset, a.parm_offset)
75 && !range_info_useful_p ())
76 return false;
77 /* We allow negative aoffset_adj here in case
78 there is an useful range. This is because adding
79 a.offset may result in non-ngative offset again.
80 Ubsan fails on val << LOG_BITS_PER_UNIT where val
81 is negative. */
82 aoffset_adj = (a.parm_offset - parm_offset)
83 * BITS_PER_UNIT;
86 if (range_info_useful_p ())
88 if (!a.range_info_useful_p ())
89 return false;
90 /* Sizes of stores are used to check that object is big enough
91 to fit the store, so smaller or unknown sotre is more general
92 than large store. */
93 if (known_size_p (size)
94 && (!known_size_p (a.size)
95 || !known_le (size, a.size)))
96 return false;
97 if (known_size_p (max_size))
98 return known_subrange_p (a.offset + aoffset_adj,
99 a.max_size, offset, max_size);
100 else
101 return known_le (offset, a.offset + aoffset_adj);
103 return true;
106 /* Update access range to new parameters.
107 If RECORD_ADJUSTMENTS is true, record number of changes in the access
108 and if threshold is exceeded start dropping precision
109 so only constantly many updates are possible. This makes dataflow
110 to converge. */
111 void
112 modref_access_node::update (poly_int64 parm_offset1,
113 poly_int64 offset1, poly_int64 size1,
114 poly_int64 max_size1, bool record_adjustments)
116 if (known_eq (parm_offset, parm_offset1)
117 && known_eq (offset, offset1)
118 && known_eq (size, size1)
119 && known_eq (max_size, max_size1))
120 return;
121 if (!record_adjustments
122 || (++adjustments) < param_modref_max_adjustments)
124 parm_offset = parm_offset1;
125 offset = offset1;
126 size = size1;
127 max_size = max_size1;
129 else
131 if (dump_file)
132 fprintf (dump_file,
133 "--param param=modref-max-adjustments limit reached:");
134 if (!known_eq (parm_offset, parm_offset1))
136 if (dump_file)
137 fprintf (dump_file, " parm_offset cleared");
138 parm_offset_known = false;
140 if (!known_eq (size, size1))
142 size = -1;
143 if (dump_file)
144 fprintf (dump_file, " size cleared");
146 if (!known_eq (max_size, max_size1))
148 max_size = -1;
149 if (dump_file)
150 fprintf (dump_file, " max_size cleared");
152 if (!known_eq (offset, offset1))
154 offset = 0;
155 if (dump_file)
156 fprintf (dump_file, " offset cleared");
158 if (dump_file)
159 fprintf (dump_file, "\n");
163 /* Merge in access A if it is possible to do without losing
164 precision. Return true if successful.
165 If RECORD_ADJUSTMENTs is true, remember how many interval
166 was prolonged and punt when there are too many. */
167 bool
168 modref_access_node::merge (const modref_access_node &a,
169 bool record_adjustments)
171 poly_int64 offset1 = 0;
172 poly_int64 aoffset1 = 0;
173 poly_int64 new_parm_offset = 0;
175 /* We assume that containment was tested earlier. */
176 gcc_checking_assert (!contains (a) && !a.contains (*this));
177 if (parm_index != MODREF_UNKNOWN_PARM)
179 if (parm_index != a.parm_index)
180 return false;
181 if (parm_offset_known)
183 if (!a.parm_offset_known)
184 return false;
185 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
186 return false;
189 /* See if we can merge ranges. */
190 if (range_info_useful_p ())
192 /* In this case we have containment that should be
193 handled earlier. */
194 gcc_checking_assert (a.range_info_useful_p ());
196 /* If a.size is less specified than size, merge only
197 if intervals are otherwise equivalent. */
198 if (known_size_p (size)
199 && (!known_size_p (a.size) || known_lt (a.size, size)))
201 if (((known_size_p (max_size) || known_size_p (a.max_size))
202 && !known_eq (max_size, a.max_size))
203 || !known_eq (offset1, aoffset1))
204 return false;
205 update (new_parm_offset, offset1, a.size, max_size,
206 record_adjustments);
207 return true;
209 /* If sizes are same, we can extend the interval. */
210 if ((known_size_p (size) || known_size_p (a.size))
211 && !known_eq (size, a.size))
212 return false;
213 if (known_le (offset1, aoffset1))
215 if (!known_size_p (max_size)
216 || known_ge (offset1 + max_size, aoffset1))
218 update2 (new_parm_offset, offset1, size, max_size,
219 aoffset1, a.size, a.max_size,
220 record_adjustments);
221 return true;
224 else if (known_le (aoffset1, offset1))
226 if (!known_size_p (a.max_size)
227 || known_ge (aoffset1 + a.max_size, offset1))
229 update2 (new_parm_offset, offset1, size, max_size,
230 aoffset1, a.size, a.max_size,
231 record_adjustments);
232 return true;
235 return false;
237 update (new_parm_offset, offset1,
238 size, max_size, record_adjustments);
239 return true;
242 /* Return true if A1 and B1 can be merged with lower information
243 less than A2 and B2.
244 Assume that no containment or lossless merging is possible. */
245 bool
246 modref_access_node::closer_pair_p (const modref_access_node &a1,
247 const modref_access_node &b1,
248 const modref_access_node &a2,
249 const modref_access_node &b2)
251 /* Merging different parm indexes comes to complete loss
252 of range info. */
253 if (a1.parm_index != b1.parm_index)
254 return false;
255 if (a2.parm_index != b2.parm_index)
256 return true;
257 /* If parm is known and parm indexes are the same we should
258 already have containment. */
259 gcc_checking_assert (a1.parm_offset_known && b1.parm_offset_known);
260 gcc_checking_assert (a2.parm_offset_known && b2.parm_offset_known);
262 /* First normalize offsets for parm offsets. */
263 poly_int64 new_parm_offset, offseta1, offsetb1, offseta2, offsetb2;
264 if (!a1.combined_offsets (b1, &new_parm_offset, &offseta1, &offsetb1)
265 || !a2.combined_offsets (b2, &new_parm_offset, &offseta2, &offsetb2))
266 gcc_unreachable ();
269 /* Now compute distnace of the intervals. */
270 poly_int64 dist1, dist2;
271 if (known_le (offseta1, offsetb1))
273 if (!known_size_p (a1.max_size))
274 dist1 = 0;
275 else
276 dist1 = offsetb1 - offseta1 - a1.max_size;
278 else
280 if (!known_size_p (b1.max_size))
281 dist1 = 0;
282 else
283 dist1 = offseta1 - offsetb1 - b1.max_size;
285 if (known_le (offseta2, offsetb2))
287 if (!known_size_p (a2.max_size))
288 dist2 = 0;
289 else
290 dist2 = offsetb2 - offseta2 - a2.max_size;
292 else
294 if (!known_size_p (b2.max_size))
295 dist2 = 0;
296 else
297 dist2 = offseta2 - offsetb2 - b2.max_size;
299 /* It may happen that intervals overlap in case size
300 is different. Preffer the overlap to non-overlap. */
301 if (known_lt (dist1, 0) && known_ge (dist2, 0))
302 return true;
303 if (known_lt (dist2, 0) && known_ge (dist1, 0))
304 return false;
305 if (known_lt (dist1, 0))
306 /* If both overlaps minimize overlap. */
307 return known_le (dist2, dist1);
308 else
309 /* If both are disjoint look for smaller distance. */
310 return known_le (dist1, dist2);
313 /* Merge in access A while losing precision. */
314 void
315 modref_access_node::forced_merge (const modref_access_node &a,
316 bool record_adjustments)
318 if (parm_index != a.parm_index)
320 gcc_checking_assert (parm_index != MODREF_UNKNOWN_PARM);
321 parm_index = MODREF_UNKNOWN_PARM;
322 return;
325 /* We assume that containment and lossless merging
326 was tested earlier. */
327 gcc_checking_assert (!contains (a) && !a.contains (*this)
328 && !merge (a, record_adjustments));
329 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
331 poly_int64 new_parm_offset, offset1, aoffset1;
332 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
334 parm_offset_known = false;
335 return;
337 gcc_checking_assert (range_info_useful_p ()
338 && a.range_info_useful_p ());
339 if (record_adjustments)
340 adjustments += a.adjustments;
341 update2 (new_parm_offset,
342 offset1, size, max_size,
343 aoffset1, a.size, a.max_size,
344 record_adjustments);
347 /* Merge two ranges both starting at parm_offset1 and update THIS
348 with result. */
349 void
350 modref_access_node::update2 (poly_int64 parm_offset1,
351 poly_int64 offset1, poly_int64 size1,
352 poly_int64 max_size1,
353 poly_int64 offset2, poly_int64 size2,
354 poly_int64 max_size2,
355 bool record_adjustments)
357 poly_int64 new_size = size1;
359 if (!known_size_p (size2)
360 || known_le (size2, size1))
361 new_size = size2;
362 else
363 gcc_checking_assert (known_le (size1, size2));
365 if (known_le (offset1, offset2))
367 else if (known_le (offset2, offset1))
369 std::swap (offset1, offset2);
370 std::swap (max_size1, max_size2);
372 else
373 gcc_unreachable ();
375 poly_int64 new_max_size;
377 if (!known_size_p (max_size1))
378 new_max_size = max_size1;
379 else if (!known_size_p (max_size2))
380 new_max_size = max_size2;
381 else
383 new_max_size = max_size2 + offset2 - offset1;
384 if (known_le (new_max_size, max_size1))
385 new_max_size = max_size1;
388 update (parm_offset1, offset1,
389 new_size, new_max_size, record_adjustments);
392 /* Given access nodes THIS and A, return true if they
393 can be done with common parm_offsets. In this case
394 return parm offset in new_parm_offset, new_offset
395 which is start of range in THIS and new_aoffset that
396 is start of range in A. */
397 bool
398 modref_access_node::combined_offsets (const modref_access_node &a,
399 poly_int64 *new_parm_offset,
400 poly_int64 *new_offset,
401 poly_int64 *new_aoffset) const
403 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
404 if (known_le (a.parm_offset, parm_offset))
406 *new_offset = offset
407 + ((parm_offset - a.parm_offset)
408 << LOG2_BITS_PER_UNIT);
409 *new_aoffset = a.offset;
410 *new_parm_offset = a.parm_offset;
411 return true;
413 else if (known_le (parm_offset, a.parm_offset))
415 *new_aoffset = a.offset
416 + ((a.parm_offset - parm_offset)
417 << LOG2_BITS_PER_UNIT);
418 *new_offset = offset;
419 *new_parm_offset = parm_offset;
420 return true;
422 else
423 return false;
426 /* Try to optimize the access ACCESSES list after entry INDEX was modified. */
427 void
428 modref_access_node::try_merge_with (vec <modref_access_node, va_gc> *&accesses,
429 size_t index)
431 size_t i;
433 for (i = 0; i < accesses->length ();)
434 if (i != index)
436 bool found = false, restart = false;
437 modref_access_node *a = &(*accesses)[i];
438 modref_access_node *n = &(*accesses)[index];
440 if (n->contains (*a))
441 found = true;
442 if (!found && n->merge (*a, false))
443 found = restart = true;
444 gcc_checking_assert (found || !a->merge (*n, false));
445 if (found)
447 accesses->unordered_remove (i);
448 if (index == accesses->length ())
450 index = i;
451 i++;
453 if (restart)
454 i = 0;
456 else
457 i++;
459 else
460 i++;
463 /* Stream out to OB. */
465 void
466 modref_access_node::stream_out (struct output_block *ob) const
468 streamer_write_hwi (ob, parm_index);
469 if (parm_index != MODREF_UNKNOWN_PARM)
471 streamer_write_uhwi (ob, parm_offset_known);
472 if (parm_offset_known)
474 streamer_write_poly_int64 (ob, parm_offset);
475 streamer_write_poly_int64 (ob, offset);
476 streamer_write_poly_int64 (ob, size);
477 streamer_write_poly_int64 (ob, max_size);
482 modref_access_node
483 modref_access_node::stream_in (struct lto_input_block *ib)
485 int parm_index = streamer_read_hwi (ib);
486 bool parm_offset_known = false;
487 poly_int64 parm_offset = 0;
488 poly_int64 offset = 0;
489 poly_int64 size = -1;
490 poly_int64 max_size = -1;
492 if (parm_index != MODREF_UNKNOWN_PARM)
494 parm_offset_known = streamer_read_uhwi (ib);
495 if (parm_offset_known)
497 parm_offset = streamer_read_poly_int64 (ib);
498 offset = streamer_read_poly_int64 (ib);
499 size = streamer_read_poly_int64 (ib);
500 max_size = streamer_read_poly_int64 (ib);
503 return {offset, size, max_size, parm_offset, parm_index,
504 parm_offset_known, false};
507 /* Insert access with OFFSET and SIZE.
508 Collapse tree if it has more than MAX_ACCESSES entries.
509 If RECORD_ADJUSTMENTs is true avoid too many interval extensions.
510 Return true if record was changed.
512 Reutrn 0 if nothing changed, 1 if insert was successful and -1
513 if entries should be collapsed. */
515 modref_access_node::insert (vec <modref_access_node, va_gc> *&accesses,
516 modref_access_node a, size_t max_accesses,
517 bool record_adjustments)
519 size_t i, j;
520 modref_access_node *a2;
522 /* Verify that list does not contain redundant accesses. */
523 if (flag_checking)
525 size_t i, i2;
526 modref_access_node *a, *a2;
528 FOR_EACH_VEC_SAFE_ELT (accesses, i, a)
530 FOR_EACH_VEC_SAFE_ELT (accesses, i2, a2)
531 if (i != i2)
532 gcc_assert (!a->contains (*a2));
536 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
538 if (a2->contains (a))
539 return 0;
540 if (a.contains (*a2))
542 a.adjustments = 0;
543 a2->parm_index = a.parm_index;
544 a2->parm_offset_known = a.parm_offset_known;
545 a2->update (a.parm_offset, a.offset, a.size, a.max_size,
546 record_adjustments);
547 modref_access_node::try_merge_with (accesses, i);
548 return 1;
550 if (a2->merge (a, record_adjustments))
552 modref_access_node::try_merge_with (accesses, i);
553 return 1;
555 gcc_checking_assert (!(a == *a2));
558 /* If this base->ref pair has too many accesses stored, we will clear
559 all accesses and bail out. */
560 if (accesses && accesses->length () >= max_accesses)
562 if (max_accesses < 2)
563 return -1;
564 /* Find least harmful merge and perform it. */
565 int best1 = -1, best2 = -1;
566 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
568 for (j = i + 1; j < accesses->length (); j++)
569 if (best1 < 0
570 || modref_access_node::closer_pair_p
571 (*a2, (*accesses)[j],
572 (*accesses)[best1],
573 best2 < 0 ? a : (*accesses)[best2]))
575 best1 = i;
576 best2 = j;
578 if (modref_access_node::closer_pair_p
579 (*a2, a,
580 (*accesses)[best1],
581 best2 < 0 ? a : (*accesses)[best2]))
583 best1 = i;
584 best2 = -1;
587 (*accesses)[best1].forced_merge (best2 < 0 ? a : (*accesses)[best2],
588 record_adjustments);
589 /* Check that merging indeed merged ranges. */
590 gcc_checking_assert ((*accesses)[best1].contains
591 (best2 < 0 ? a : (*accesses)[best2]));
592 if (!(*accesses)[best1].useful_p ())
593 return -1;
594 if (dump_file && best2 >= 0)
595 fprintf (dump_file,
596 "--param param=modref-max-accesses limit reached;"
597 " merging %i and %i\n", best1, best2);
598 else if (dump_file)
599 fprintf (dump_file,
600 "--param param=modref-max-accesses limit reached;"
601 " merging with %i\n", best1);
602 modref_access_node::try_merge_with (accesses, best1);
603 if (best2 >= 0)
604 insert (accesses, a, max_accesses, record_adjustments);
605 return 1;
607 a.adjustments = 0;
608 vec_safe_push (accesses, a);
609 return 1;
612 /* Return true if range info is useful. */
613 bool
614 modref_access_node::range_info_useful_p () const
616 return parm_index != MODREF_UNKNOWN_PARM && parm_offset_known
617 && (known_size_p (size)
618 || known_size_p (max_size)
619 || known_ge (offset, 0));
622 /* Dump range to debug OUT. */
623 void
624 modref_access_node::dump (FILE *out)
626 if (parm_index != MODREF_UNKNOWN_PARM)
628 if (parm_index >= 0)
629 fprintf (out, " Parm %i", parm_index);
630 else if (parm_index == MODREF_STATIC_CHAIN_PARM)
631 fprintf (out, " Static chain");
632 else
633 gcc_unreachable ();
634 if (parm_offset_known)
636 fprintf (out, " param offset:");
637 print_dec ((poly_int64_pod)parm_offset, out, SIGNED);
640 if (range_info_useful_p ())
642 fprintf (out, " offset:");
643 print_dec ((poly_int64_pod)offset, out, SIGNED);
644 fprintf (out, " size:");
645 print_dec ((poly_int64_pod)size, out, SIGNED);
646 fprintf (out, " max_size:");
647 print_dec ((poly_int64_pod)max_size, out, SIGNED);
648 if (adjustments)
649 fprintf (out, " adjusted %i times", adjustments);
651 fprintf (out, "\n");
654 /* Return tree corresponding to parameter of the range in STMT. */
655 tree
656 modref_access_node::get_call_arg (const gcall *stmt) const
658 if (parm_index == MODREF_UNKNOWN_PARM)
659 return NULL;
660 if (parm_index == MODREF_STATIC_CHAIN_PARM)
661 return gimple_call_chain (stmt);
662 /* MODREF_RETSLOT_PARM should not happen in access trees since the store
663 is seen explicitly in the caller. */
664 gcc_checking_assert (parm_index >= 0);
665 if (parm_index >= (int)gimple_call_num_args (stmt))
666 return NULL;
667 return gimple_call_arg (stmt, parm_index);
670 /* Return tree corresponding to parameter of the range in STMT. */
671 bool
672 modref_access_node::get_ao_ref (const gcall *stmt, ao_ref *ref) const
674 tree arg;
676 if (!parm_offset_known || !(arg = get_call_arg (stmt)))
677 return false;
678 poly_offset_int off = (poly_offset_int)offset
679 + ((poly_offset_int)parm_offset << LOG2_BITS_PER_UNIT);
680 poly_int64 off2;
681 if (!off.to_shwi (&off2))
682 return false;
683 ao_ref_init_from_ptr_and_range (ref, arg, true, off2, size, max_size);
684 return true;
687 /* Return true A is a subkill. */
688 bool
689 modref_access_node::contains_for_kills (const modref_access_node &a) const
691 poly_int64 aoffset_adj = 0;
693 gcc_checking_assert (parm_index != MODREF_UNKNOWN_PARM
694 && a.parm_index != MODREF_UNKNOWN_PARM);
695 if (parm_index != a.parm_index)
696 return false;
697 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
698 aoffset_adj = (a.parm_offset - parm_offset)
699 * BITS_PER_UNIT;
700 gcc_checking_assert (range_info_useful_p () && a.range_info_useful_p ());
701 return known_subrange_p (a.offset + aoffset_adj,
702 a.max_size, offset, max_size);
705 /* Merge two ranges both starting at parm_offset1 and update THIS
706 with result. */
707 bool
708 modref_access_node::update_for_kills (poly_int64 parm_offset1,
709 poly_int64 offset1,
710 poly_int64 max_size1,
711 poly_int64 offset2,
712 poly_int64 max_size2,
713 bool record_adjustments)
715 if (known_le (offset1, offset2))
717 else if (known_le (offset2, offset1))
719 std::swap (offset1, offset2);
720 std::swap (max_size1, max_size2);
722 else
723 gcc_unreachable ();
725 poly_int64 new_max_size = max_size2 + offset2 - offset1;
726 if (known_le (new_max_size, max_size1))
727 new_max_size = max_size1;
728 if (known_eq (parm_offset, parm_offset1)
729 && known_eq (offset, offset1)
730 && known_eq (size, new_max_size)
731 && known_eq (max_size, new_max_size))
732 return false;
734 if (!record_adjustments
735 || (++adjustments) < param_modref_max_adjustments)
737 parm_offset = parm_offset1;
738 offset = offset1;
739 max_size = new_max_size;
740 size = new_max_size;
741 gcc_checking_assert (useful_for_kill_p ());
742 return true;
744 return false;
747 /* Merge in access A if it is possible to do without losing
748 precision. Return true if successful.
749 Unlike merge assume that both accesses are always executed
750 and merge size the same was as max_size. */
751 bool
752 modref_access_node::merge_for_kills (const modref_access_node &a,
753 bool record_adjustments)
755 poly_int64 offset1 = 0;
756 poly_int64 aoffset1 = 0;
757 poly_int64 new_parm_offset = 0;
759 /* We assume that containment was tested earlier. */
760 gcc_checking_assert (!contains_for_kills (a) && !a.contains_for_kills (*this)
761 && useful_for_kill_p () && a.useful_for_kill_p ());
763 if (parm_index != a.parm_index
764 || !combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
765 return false;
767 if (known_le (offset1, aoffset1))
769 if (!known_size_p (max_size)
770 || known_ge (offset1 + max_size, aoffset1))
771 return update_for_kills (new_parm_offset, offset1, max_size,
772 aoffset1, a.max_size, record_adjustments);
774 else if (known_le (aoffset1, offset1))
776 if (!known_size_p (a.max_size)
777 || known_ge (aoffset1 + a.max_size, offset1))
778 return update_for_kills (new_parm_offset, offset1, max_size,
779 aoffset1, a.max_size, record_adjustments);
781 return false;
784 /* Insert new kill A into KILLS. If RECORD_ADJUSTMENTS is true limit number
785 of changes to each entry. Return true if something changed. */
787 bool
788 modref_access_node::insert_kill (vec<modref_access_node> &kills,
789 modref_access_node &a, bool record_adjustments)
791 size_t index;
792 modref_access_node *a2;
793 bool merge = false;
795 gcc_checking_assert (a.useful_for_kill_p ());
797 /* See if we have corresponding entry already or we can merge with
798 neighbouring entry. */
799 FOR_EACH_VEC_ELT (kills, index, a2)
801 if (a2->contains_for_kills (a))
802 return false;
803 if (a.contains_for_kills (*a2))
805 a.adjustments = 0;
806 *a2 = a;
807 merge = true;
808 break;
810 if (a2->merge_for_kills (a, record_adjustments))
812 merge = true;
813 break;
816 /* If entry was not found, insert it. */
817 if (!merge)
819 if ((int)kills.length () >= param_modref_max_accesses)
821 if (dump_file)
822 fprintf (dump_file,
823 "--param param=modref-max-accesses limit reached:");
824 return false;
826 a.adjustments = 0;
827 kills.safe_push (a);
828 return true;
830 /* Extending range in an entry may make it possible to merge it with
831 other entries. */
832 size_t i;
834 for (i = 0; i < kills.length ();)
835 if (i != index)
837 bool found = false, restart = false;
838 modref_access_node *a = &kills[i];
839 modref_access_node *n = &kills[index];
841 if (n->contains_for_kills (*a))
842 found = true;
843 if (!found && n->merge_for_kills (*a, false))
844 found = restart = true;
845 gcc_checking_assert (found || !a->merge_for_kills (*n, false));
846 if (found)
848 kills.unordered_remove (i);
849 if (index == kills.length ())
851 index = i;
852 i++;
854 if (restart)
855 i = 0;
857 else
858 i++;
860 else
861 i++;
862 return true;
866 #if CHECKING_P
868 namespace selftest {
870 static void
871 test_insert_search_collapse ()
873 modref_base_node<alias_set_type> *base_node;
874 modref_ref_node<alias_set_type> *ref_node;
875 modref_access_node a = unspecified_modref_access_node;
877 modref_tree<alias_set_type> *t = new modref_tree<alias_set_type>();
878 ASSERT_FALSE (t->every_base);
880 /* Insert into an empty tree. */
881 t->insert (1, 2, 2, 1, 2, a, false);
882 ASSERT_NE (t->bases, NULL);
883 ASSERT_EQ (t->bases->length (), 1);
884 ASSERT_FALSE (t->every_base);
885 ASSERT_EQ (t->search (2), NULL);
887 base_node = t->search (1);
888 ASSERT_NE (base_node, NULL);
889 ASSERT_EQ (base_node->base, 1);
890 ASSERT_NE (base_node->refs, NULL);
891 ASSERT_EQ (base_node->refs->length (), 1);
892 ASSERT_EQ (base_node->search (1), NULL);
894 ref_node = base_node->search (2);
895 ASSERT_NE (ref_node, NULL);
896 ASSERT_EQ (ref_node->ref, 2);
898 /* Insert when base exists but ref does not. */
899 t->insert (1, 2, 2, 1, 3, a, false);
900 ASSERT_NE (t->bases, NULL);
901 ASSERT_EQ (t->bases->length (), 1);
902 ASSERT_EQ (t->search (1), base_node);
903 ASSERT_EQ (t->search (2), NULL);
904 ASSERT_NE (base_node->refs, NULL);
905 ASSERT_EQ (base_node->refs->length (), 2);
907 ref_node = base_node->search (3);
908 ASSERT_NE (ref_node, NULL);
910 /* Insert when base and ref exist, but access is not dominated by nor
911 dominates other accesses. */
912 t->insert (1, 2, 2, 1, 2, a, false);
913 ASSERT_EQ (t->bases->length (), 1);
914 ASSERT_EQ (t->search (1), base_node);
916 ref_node = base_node->search (2);
917 ASSERT_NE (ref_node, NULL);
919 /* Insert when base and ref exist and access is dominated. */
920 t->insert (1, 2, 2, 1, 2, a, false);
921 ASSERT_EQ (t->search (1), base_node);
922 ASSERT_EQ (base_node->search (2), ref_node);
924 /* Insert ref to trigger ref list collapse for base 1. */
925 t->insert (1, 2, 2, 1, 4, a, false);
926 ASSERT_EQ (t->search (1), base_node);
927 ASSERT_EQ (base_node->refs, NULL);
928 ASSERT_EQ (base_node->search (2), NULL);
929 ASSERT_EQ (base_node->search (3), NULL);
930 ASSERT_TRUE (base_node->every_ref);
932 /* Further inserts to collapsed ref list are ignored. */
933 t->insert (1, 2, 2, 1, 5, a, false);
934 ASSERT_EQ (t->search (1), base_node);
935 ASSERT_EQ (base_node->refs, NULL);
936 ASSERT_EQ (base_node->search (2), NULL);
937 ASSERT_EQ (base_node->search (3), NULL);
938 ASSERT_TRUE (base_node->every_ref);
940 /* Insert base to trigger base list collapse. */
941 t->insert (1, 2, 2, 5, 0, a, false);
942 ASSERT_TRUE (t->every_base);
943 ASSERT_EQ (t->bases, NULL);
944 ASSERT_EQ (t->search (1), NULL);
946 /* Further inserts to collapsed base list are ignored. */
947 t->insert (1, 2, 2, 7, 8, a, false);
948 ASSERT_TRUE (t->every_base);
949 ASSERT_EQ (t->bases, NULL);
950 ASSERT_EQ (t->search (1), NULL);
952 delete t;
955 static void
956 test_merge ()
958 modref_tree<alias_set_type> *t1, *t2;
959 modref_base_node<alias_set_type> *base_node;
960 modref_access_node a = unspecified_modref_access_node;
962 t1 = new modref_tree<alias_set_type>();
963 t1->insert (3, 4, 1, 1, 1, a, false);
964 t1->insert (3, 4, 1, 1, 2, a, false);
965 t1->insert (3, 4, 1, 1, 3, a, false);
966 t1->insert (3, 4, 1, 2, 1, a, false);
967 t1->insert (3, 4, 1, 3, 1, a, false);
969 t2 = new modref_tree<alias_set_type>();
970 t2->insert (10, 10, 10, 1, 2, a, false);
971 t2->insert (10, 10, 10, 1, 3, a, false);
972 t2->insert (10, 10, 10, 1, 4, a, false);
973 t2->insert (10, 10, 10, 3, 2, a, false);
974 t2->insert (10, 10, 10, 3, 3, a, false);
975 t2->insert (10, 10, 10, 3, 4, a, false);
976 t2->insert (10, 10, 10, 3, 5, a, false);
978 t1->merge (3, 4, 1, t2, NULL, NULL, false);
980 ASSERT_FALSE (t1->every_base);
981 ASSERT_NE (t1->bases, NULL);
982 ASSERT_EQ (t1->bases->length (), 3);
984 base_node = t1->search (1);
985 ASSERT_NE (base_node->refs, NULL);
986 ASSERT_FALSE (base_node->every_ref);
987 ASSERT_EQ (base_node->refs->length (), 4);
989 base_node = t1->search (2);
990 ASSERT_NE (base_node->refs, NULL);
991 ASSERT_FALSE (base_node->every_ref);
992 ASSERT_EQ (base_node->refs->length (), 1);
994 base_node = t1->search (3);
995 ASSERT_EQ (base_node->refs, NULL);
996 ASSERT_TRUE (base_node->every_ref);
998 delete t1;
999 delete t2;
1003 void
1004 ipa_modref_tree_c_tests ()
1006 test_insert_search_collapse ();
1007 test_merge ();
1010 } // namespace selftest
1012 #endif
1014 void
1015 gt_ggc_mx (modref_tree < int >*const &tt)
1017 if (tt->bases)
1019 ggc_test_and_set_mark (tt->bases);
1020 gt_ggc_mx (tt->bases);
1024 void
1025 gt_ggc_mx (modref_tree < tree_node * >*const &tt)
1027 if (tt->bases)
1029 ggc_test_and_set_mark (tt->bases);
1030 gt_ggc_mx (tt->bases);
1034 void gt_pch_nx (modref_tree<int>* const&) {}
1035 void gt_pch_nx (modref_tree<tree_node*>* const&) {}
1036 void gt_pch_nx (modref_tree<int>* const&, gt_pointer_operator, void *) {}
1037 void gt_pch_nx (modref_tree<tree_node*>* const&, gt_pointer_operator, void *) {}
1039 void gt_ggc_mx (modref_base_node<int>* &b)
1041 ggc_test_and_set_mark (b);
1042 if (b->refs)
1044 ggc_test_and_set_mark (b->refs);
1045 gt_ggc_mx (b->refs);
1049 void gt_ggc_mx (modref_base_node<tree_node*>* &b)
1051 ggc_test_and_set_mark (b);
1052 if (b->refs)
1054 ggc_test_and_set_mark (b->refs);
1055 gt_ggc_mx (b->refs);
1057 if (b->base)
1058 gt_ggc_mx (b->base);
1061 void gt_pch_nx (modref_base_node<int>*) {}
1062 void gt_pch_nx (modref_base_node<tree_node*>*) {}
1063 void gt_pch_nx (modref_base_node<int>*, gt_pointer_operator, void *) {}
1064 void gt_pch_nx (modref_base_node<tree_node*>*, gt_pointer_operator, void *) {}
1066 void gt_ggc_mx (modref_ref_node<int>* &r)
1068 ggc_test_and_set_mark (r);
1069 if (r->accesses)
1071 ggc_test_and_set_mark (r->accesses);
1072 gt_ggc_mx (r->accesses);
1076 void gt_ggc_mx (modref_ref_node<tree_node*>* &r)
1078 ggc_test_and_set_mark (r);
1079 if (r->accesses)
1081 ggc_test_and_set_mark (r->accesses);
1082 gt_ggc_mx (r->accesses);
1084 if (r->ref)
1085 gt_ggc_mx (r->ref);
1088 void gt_pch_nx (modref_ref_node<int>* ) {}
1089 void gt_pch_nx (modref_ref_node<tree_node*>*) {}
1090 void gt_pch_nx (modref_ref_node<int>*, gt_pointer_operator, void *) {}
1091 void gt_pch_nx (modref_ref_node<tree_node*>*, gt_pointer_operator, void *) {}
1093 void gt_ggc_mx (modref_access_node &)