c++: fix array cleanup with throwing temp dtor
[official-gcc.git] / gcc / ipa-modref-tree.c
blob64ef0772343f6702f7138fc4503621e2da31af71
1 /* Data structure for the modref pass.
2 Copyright (C) 2020-2021 Free Software Foundation, Inc.
3 Contributed by David Cepelik and Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "ipa-modref-tree.h"
27 #include "selftest.h"
28 #include "tree-ssa-alias.h"
29 #include "gimple.h"
30 #include "cgraph.h"
31 #include "tree-streamer.h"
33 /* Return true if both accesses are the same. */
34 bool
35 modref_access_node::operator == (modref_access_node &a) const
37 if (parm_index != a.parm_index)
38 return false;
39 if (parm_index != MODREF_UNKNOWN_PARM
40 && parm_index != MODREF_GLOBAL_MEMORY_PARM)
42 if (parm_offset_known != a.parm_offset_known)
43 return false;
44 if (parm_offset_known
45 && !known_eq (parm_offset, a.parm_offset))
46 return false;
48 if (range_info_useful_p () != a.range_info_useful_p ())
49 return false;
50 if (range_info_useful_p ()
51 && (!known_eq (a.offset, offset)
52 || !known_eq (a.size, size)
53 || !known_eq (a.max_size, max_size)))
54 return false;
55 return true;
58 /* Return true A is a subaccess. */
59 bool
60 modref_access_node::contains (const modref_access_node &a) const
62 poly_int64 aoffset_adj = 0;
63 if (parm_index != MODREF_UNKNOWN_PARM)
65 if (parm_index != a.parm_index)
66 return false;
67 if (parm_offset_known)
69 if (!a.parm_offset_known)
70 return false;
71 /* Accesses are never below parm_offset, so look
72 for smaller offset.
73 If access ranges are known still allow merging
74 when bit offsets comparsion passes. */
75 if (!known_le (parm_offset, a.parm_offset)
76 && !range_info_useful_p ())
77 return false;
78 /* We allow negative aoffset_adj here in case
79 there is an useful range. This is because adding
80 a.offset may result in non-ngative offset again.
81 Ubsan fails on val << LOG_BITS_PER_UNIT where val
82 is negative. */
83 aoffset_adj = (a.parm_offset - parm_offset)
84 * BITS_PER_UNIT;
87 if (range_info_useful_p ())
89 if (!a.range_info_useful_p ())
90 return false;
91 /* Sizes of stores are used to check that object is big enough
92 to fit the store, so smaller or unknown sotre is more general
93 than large store. */
94 if (known_size_p (size)
95 && (!known_size_p (a.size)
96 || !known_le (size, a.size)))
97 return false;
98 if (known_size_p (max_size))
99 return known_subrange_p (a.offset + aoffset_adj,
100 a.max_size, offset, max_size);
101 else
102 return known_le (offset, a.offset + aoffset_adj);
104 return true;
107 /* Update access range to new parameters.
108 If RECORD_ADJUSTMENTS is true, record number of changes in the access
109 and if threshold is exceeded start dropping precision
110 so only constantly many updates are possible. This makes dataflow
111 to converge. */
112 void
113 modref_access_node::update (poly_int64 parm_offset1,
114 poly_int64 offset1, poly_int64 size1,
115 poly_int64 max_size1, bool record_adjustments)
117 if (known_eq (parm_offset, parm_offset1)
118 && known_eq (offset, offset1)
119 && known_eq (size, size1)
120 && known_eq (max_size, max_size1))
121 return;
122 if (!record_adjustments
123 || (++adjustments) < param_modref_max_adjustments)
125 parm_offset = parm_offset1;
126 offset = offset1;
127 size = size1;
128 max_size = max_size1;
130 else
132 if (dump_file)
133 fprintf (dump_file,
134 "--param param=modref-max-adjustments limit reached:");
135 if (!known_eq (parm_offset, parm_offset1))
137 if (dump_file)
138 fprintf (dump_file, " parm_offset cleared");
139 parm_offset_known = false;
141 if (!known_eq (size, size1))
143 size = -1;
144 if (dump_file)
145 fprintf (dump_file, " size cleared");
147 if (!known_eq (max_size, max_size1))
149 max_size = -1;
150 if (dump_file)
151 fprintf (dump_file, " max_size cleared");
153 if (!known_eq (offset, offset1))
155 offset = 0;
156 if (dump_file)
157 fprintf (dump_file, " offset cleared");
159 if (dump_file)
160 fprintf (dump_file, "\n");
164 /* Merge in access A if it is possible to do without losing
165 precision. Return true if successful.
166 If RECORD_ADJUSTMENTs is true, remember how many interval
167 was prolonged and punt when there are too many. */
168 bool
169 modref_access_node::merge (const modref_access_node &a,
170 bool record_adjustments)
172 poly_int64 offset1 = 0;
173 poly_int64 aoffset1 = 0;
174 poly_int64 new_parm_offset = 0;
176 /* We assume that containment was tested earlier. */
177 gcc_checking_assert (!contains (a) && !a.contains (*this));
178 if (parm_index != MODREF_UNKNOWN_PARM)
180 if (parm_index != a.parm_index)
181 return false;
182 if (parm_offset_known)
184 if (!a.parm_offset_known)
185 return false;
186 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
187 return false;
190 /* See if we can merge ranges. */
191 if (range_info_useful_p ())
193 /* In this case we have containment that should be
194 handled earlier. */
195 gcc_checking_assert (a.range_info_useful_p ());
197 /* If a.size is less specified than size, merge only
198 if intervals are otherwise equivalent. */
199 if (known_size_p (size)
200 && (!known_size_p (a.size) || known_lt (a.size, size)))
202 if (((known_size_p (max_size) || known_size_p (a.max_size))
203 && !known_eq (max_size, a.max_size))
204 || !known_eq (offset1, aoffset1))
205 return false;
206 update (new_parm_offset, offset1, a.size, max_size,
207 record_adjustments);
208 return true;
210 /* If sizes are same, we can extend the interval. */
211 if ((known_size_p (size) || known_size_p (a.size))
212 && !known_eq (size, a.size))
213 return false;
214 if (known_le (offset1, aoffset1))
216 if (!known_size_p (max_size)
217 || known_ge (offset1 + max_size, aoffset1))
219 update2 (new_parm_offset, offset1, size, max_size,
220 aoffset1, a.size, a.max_size,
221 record_adjustments);
222 return true;
225 else if (known_le (aoffset1, offset1))
227 if (!known_size_p (a.max_size)
228 || known_ge (aoffset1 + a.max_size, offset1))
230 update2 (new_parm_offset, offset1, size, max_size,
231 aoffset1, a.size, a.max_size,
232 record_adjustments);
233 return true;
236 return false;
238 update (new_parm_offset, offset1,
239 size, max_size, record_adjustments);
240 return true;
243 /* Return true if A1 and B1 can be merged with lower information
244 less than A2 and B2.
245 Assume that no containment or lossless merging is possible. */
246 bool
247 modref_access_node::closer_pair_p (const modref_access_node &a1,
248 const modref_access_node &b1,
249 const modref_access_node &a2,
250 const modref_access_node &b2)
252 /* Merging different parm indexes comes to complete loss
253 of range info. */
254 if (a1.parm_index != b1.parm_index)
255 return false;
256 if (a2.parm_index != b2.parm_index)
257 return true;
258 /* If parm is known and parm indexes are the same we should
259 already have containment. */
260 gcc_checking_assert (a1.parm_offset_known && b1.parm_offset_known);
261 gcc_checking_assert (a2.parm_offset_known && b2.parm_offset_known);
263 /* First normalize offsets for parm offsets. */
264 poly_int64 new_parm_offset, offseta1, offsetb1, offseta2, offsetb2;
265 if (!a1.combined_offsets (b1, &new_parm_offset, &offseta1, &offsetb1)
266 || !a2.combined_offsets (b2, &new_parm_offset, &offseta2, &offsetb2))
267 gcc_unreachable ();
270 /* Now compute distnace of the intervals. */
271 poly_int64 dist1, dist2;
272 if (known_le (offseta1, offsetb1))
274 if (!known_size_p (a1.max_size))
275 dist1 = 0;
276 else
277 dist1 = offsetb1 - offseta1 - a1.max_size;
279 else
281 if (!known_size_p (b1.max_size))
282 dist1 = 0;
283 else
284 dist1 = offseta1 - offsetb1 - b1.max_size;
286 if (known_le (offseta2, offsetb2))
288 if (!known_size_p (a2.max_size))
289 dist2 = 0;
290 else
291 dist2 = offsetb2 - offseta2 - a2.max_size;
293 else
295 if (!known_size_p (b2.max_size))
296 dist2 = 0;
297 else
298 dist2 = offseta2 - offsetb2 - b2.max_size;
300 /* It may happen that intervals overlap in case size
301 is different. Prefer the overlap to non-overlap. */
302 if (known_lt (dist1, 0) && known_ge (dist2, 0))
303 return true;
304 if (known_lt (dist2, 0) && known_ge (dist1, 0))
305 return false;
306 if (known_lt (dist1, 0))
307 /* If both overlaps minimize overlap. */
308 return known_le (dist2, dist1);
309 else
310 /* If both are disjoint look for smaller distance. */
311 return known_le (dist1, dist2);
314 /* Merge in access A while losing precision. */
315 void
316 modref_access_node::forced_merge (const modref_access_node &a,
317 bool record_adjustments)
319 if (parm_index != a.parm_index)
321 gcc_checking_assert (parm_index != MODREF_UNKNOWN_PARM);
322 parm_index = MODREF_UNKNOWN_PARM;
323 return;
326 /* We assume that containment and lossless merging
327 was tested earlier. */
328 gcc_checking_assert (!contains (a) && !a.contains (*this)
329 && !merge (a, record_adjustments));
330 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
332 poly_int64 new_parm_offset, offset1, aoffset1;
333 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
335 parm_offset_known = false;
336 return;
338 gcc_checking_assert (range_info_useful_p ()
339 && a.range_info_useful_p ());
340 if (record_adjustments)
341 adjustments += a.adjustments;
342 update2 (new_parm_offset,
343 offset1, size, max_size,
344 aoffset1, a.size, a.max_size,
345 record_adjustments);
348 /* Merge two ranges both starting at parm_offset1 and update THIS
349 with result. */
350 void
351 modref_access_node::update2 (poly_int64 parm_offset1,
352 poly_int64 offset1, poly_int64 size1,
353 poly_int64 max_size1,
354 poly_int64 offset2, poly_int64 size2,
355 poly_int64 max_size2,
356 bool record_adjustments)
358 poly_int64 new_size = size1;
360 if (!known_size_p (size2)
361 || known_le (size2, size1))
362 new_size = size2;
363 else
364 gcc_checking_assert (known_le (size1, size2));
366 if (known_le (offset1, offset2))
368 else if (known_le (offset2, offset1))
370 std::swap (offset1, offset2);
371 std::swap (max_size1, max_size2);
373 else
374 gcc_unreachable ();
376 poly_int64 new_max_size;
378 if (!known_size_p (max_size1))
379 new_max_size = max_size1;
380 else if (!known_size_p (max_size2))
381 new_max_size = max_size2;
382 else
384 new_max_size = max_size2 + offset2 - offset1;
385 if (known_le (new_max_size, max_size1))
386 new_max_size = max_size1;
389 update (parm_offset1, offset1,
390 new_size, new_max_size, record_adjustments);
393 /* Given access nodes THIS and A, return true if they
394 can be done with common parm_offsets. In this case
395 return parm offset in new_parm_offset, new_offset
396 which is start of range in THIS and new_aoffset that
397 is start of range in A. */
398 bool
399 modref_access_node::combined_offsets (const modref_access_node &a,
400 poly_int64 *new_parm_offset,
401 poly_int64 *new_offset,
402 poly_int64 *new_aoffset) const
404 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
405 if (known_le (a.parm_offset, parm_offset))
407 *new_offset = offset
408 + ((parm_offset - a.parm_offset)
409 << LOG2_BITS_PER_UNIT);
410 *new_aoffset = a.offset;
411 *new_parm_offset = a.parm_offset;
412 return true;
414 else if (known_le (parm_offset, a.parm_offset))
416 *new_aoffset = a.offset
417 + ((a.parm_offset - parm_offset)
418 << LOG2_BITS_PER_UNIT);
419 *new_offset = offset;
420 *new_parm_offset = parm_offset;
421 return true;
423 else
424 return false;
427 /* Try to optimize the access ACCESSES list after entry INDEX was modified. */
428 void
429 modref_access_node::try_merge_with (vec <modref_access_node, va_gc> *&accesses,
430 size_t index)
432 size_t i;
434 for (i = 0; i < accesses->length ();)
435 if (i != index)
437 bool found = false, restart = false;
438 modref_access_node *a = &(*accesses)[i];
439 modref_access_node *n = &(*accesses)[index];
441 if (n->contains (*a))
442 found = true;
443 if (!found && n->merge (*a, false))
444 found = restart = true;
445 gcc_checking_assert (found || !a->merge (*n, false));
446 if (found)
448 accesses->unordered_remove (i);
449 if (index == accesses->length ())
451 index = i;
452 i++;
454 if (restart)
455 i = 0;
457 else
458 i++;
460 else
461 i++;
464 /* Stream out to OB. */
466 void
467 modref_access_node::stream_out (struct output_block *ob) const
469 streamer_write_hwi (ob, parm_index);
470 if (parm_index != MODREF_UNKNOWN_PARM)
472 streamer_write_uhwi (ob, parm_offset_known);
473 if (parm_offset_known)
475 streamer_write_poly_int64 (ob, parm_offset);
476 streamer_write_poly_int64 (ob, offset);
477 streamer_write_poly_int64 (ob, size);
478 streamer_write_poly_int64 (ob, max_size);
483 modref_access_node
484 modref_access_node::stream_in (struct lto_input_block *ib)
486 int parm_index = streamer_read_hwi (ib);
487 bool parm_offset_known = false;
488 poly_int64 parm_offset = 0;
489 poly_int64 offset = 0;
490 poly_int64 size = -1;
491 poly_int64 max_size = -1;
493 if (parm_index != MODREF_UNKNOWN_PARM)
495 parm_offset_known = streamer_read_uhwi (ib);
496 if (parm_offset_known)
498 parm_offset = streamer_read_poly_int64 (ib);
499 offset = streamer_read_poly_int64 (ib);
500 size = streamer_read_poly_int64 (ib);
501 max_size = streamer_read_poly_int64 (ib);
504 return {offset, size, max_size, parm_offset, parm_index,
505 parm_offset_known, false};
508 /* Insert access with OFFSET and SIZE.
509 Collapse tree if it has more than MAX_ACCESSES entries.
510 If RECORD_ADJUSTMENTs is true avoid too many interval extensions.
511 Return true if record was changed.
513 Reutrn 0 if nothing changed, 1 if insert was successful and -1
514 if entries should be collapsed. */
516 modref_access_node::insert (vec <modref_access_node, va_gc> *&accesses,
517 modref_access_node a, size_t max_accesses,
518 bool record_adjustments)
520 size_t i, j;
521 modref_access_node *a2;
523 /* Verify that list does not contain redundant accesses. */
524 if (flag_checking)
526 size_t i, i2;
527 modref_access_node *a, *a2;
529 FOR_EACH_VEC_SAFE_ELT (accesses, i, a)
531 FOR_EACH_VEC_SAFE_ELT (accesses, i2, a2)
532 if (i != i2)
533 gcc_assert (!a->contains (*a2));
537 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
539 if (a2->contains (a))
540 return 0;
541 if (a.contains (*a2))
543 a.adjustments = 0;
544 a2->parm_index = a.parm_index;
545 a2->parm_offset_known = a.parm_offset_known;
546 a2->update (a.parm_offset, a.offset, a.size, a.max_size,
547 record_adjustments);
548 modref_access_node::try_merge_with (accesses, i);
549 return 1;
551 if (a2->merge (a, record_adjustments))
553 modref_access_node::try_merge_with (accesses, i);
554 return 1;
556 gcc_checking_assert (!(a == *a2));
559 /* If this base->ref pair has too many accesses stored, we will clear
560 all accesses and bail out. */
561 if (accesses && accesses->length () >= max_accesses)
563 if (max_accesses < 2)
564 return -1;
565 /* Find least harmful merge and perform it. */
566 int best1 = -1, best2 = -1;
567 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
569 for (j = i + 1; j < accesses->length (); j++)
570 if (best1 < 0
571 || modref_access_node::closer_pair_p
572 (*a2, (*accesses)[j],
573 (*accesses)[best1],
574 best2 < 0 ? a : (*accesses)[best2]))
576 best1 = i;
577 best2 = j;
579 if (modref_access_node::closer_pair_p
580 (*a2, a,
581 (*accesses)[best1],
582 best2 < 0 ? a : (*accesses)[best2]))
584 best1 = i;
585 best2 = -1;
588 (*accesses)[best1].forced_merge (best2 < 0 ? a : (*accesses)[best2],
589 record_adjustments);
590 /* Check that merging indeed merged ranges. */
591 gcc_checking_assert ((*accesses)[best1].contains
592 (best2 < 0 ? a : (*accesses)[best2]));
593 if (!(*accesses)[best1].useful_p ())
594 return -1;
595 if (dump_file && best2 >= 0)
596 fprintf (dump_file,
597 "--param param=modref-max-accesses limit reached;"
598 " merging %i and %i\n", best1, best2);
599 else if (dump_file)
600 fprintf (dump_file,
601 "--param param=modref-max-accesses limit reached;"
602 " merging with %i\n", best1);
603 modref_access_node::try_merge_with (accesses, best1);
604 if (best2 >= 0)
605 insert (accesses, a, max_accesses, record_adjustments);
606 return 1;
608 a.adjustments = 0;
609 vec_safe_push (accesses, a);
610 return 1;
613 /* Return true if range info is useful. */
614 bool
615 modref_access_node::range_info_useful_p () const
617 return parm_index != MODREF_UNKNOWN_PARM
618 && parm_index != MODREF_GLOBAL_MEMORY_PARM
619 && parm_offset_known
620 && (known_size_p (size)
621 || known_size_p (max_size)
622 || known_ge (offset, 0));
625 /* Dump range to debug OUT. */
626 void
627 modref_access_node::dump (FILE *out)
629 if (parm_index != MODREF_UNKNOWN_PARM)
631 if (parm_index == MODREF_GLOBAL_MEMORY_PARM)
632 fprintf (out, " Base in global memory");
633 else if (parm_index >= 0)
634 fprintf (out, " Parm %i", parm_index);
635 else if (parm_index == MODREF_STATIC_CHAIN_PARM)
636 fprintf (out, " Static chain");
637 else
638 gcc_unreachable ();
639 if (parm_offset_known)
641 fprintf (out, " param offset:");
642 print_dec ((poly_int64_pod)parm_offset, out, SIGNED);
645 if (range_info_useful_p ())
647 fprintf (out, " offset:");
648 print_dec ((poly_int64_pod)offset, out, SIGNED);
649 fprintf (out, " size:");
650 print_dec ((poly_int64_pod)size, out, SIGNED);
651 fprintf (out, " max_size:");
652 print_dec ((poly_int64_pod)max_size, out, SIGNED);
653 if (adjustments)
654 fprintf (out, " adjusted %i times", adjustments);
656 fprintf (out, "\n");
659 /* Return tree corresponding to parameter of the range in STMT. */
660 tree
661 modref_access_node::get_call_arg (const gcall *stmt) const
663 if (parm_index == MODREF_UNKNOWN_PARM
664 || parm_index == MODREF_GLOBAL_MEMORY_PARM)
665 return NULL;
666 if (parm_index == MODREF_STATIC_CHAIN_PARM)
667 return gimple_call_chain (stmt);
668 /* MODREF_RETSLOT_PARM should not happen in access trees since the store
669 is seen explicitly in the caller. */
670 gcc_checking_assert (parm_index >= 0);
671 if (parm_index >= (int)gimple_call_num_args (stmt))
672 return NULL;
673 return gimple_call_arg (stmt, parm_index);
676 /* Return tree corresponding to parameter of the range in STMT. */
677 bool
678 modref_access_node::get_ao_ref (const gcall *stmt, ao_ref *ref) const
680 tree arg;
682 if (!parm_offset_known || !(arg = get_call_arg (stmt)))
683 return false;
684 poly_offset_int off = (poly_offset_int)offset
685 + ((poly_offset_int)parm_offset << LOG2_BITS_PER_UNIT);
686 poly_int64 off2;
687 if (!off.to_shwi (&off2))
688 return false;
689 ao_ref_init_from_ptr_and_range (ref, arg, true, off2, size, max_size);
690 return true;
693 /* Return true A is a subkill. */
694 bool
695 modref_access_node::contains_for_kills (const modref_access_node &a) const
697 poly_int64 aoffset_adj = 0;
699 gcc_checking_assert (parm_index != MODREF_UNKNOWN_PARM
700 && a.parm_index != MODREF_UNKNOWN_PARM);
701 if (parm_index != a.parm_index)
702 return false;
703 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
704 aoffset_adj = (a.parm_offset - parm_offset)
705 * BITS_PER_UNIT;
706 gcc_checking_assert (range_info_useful_p () && a.range_info_useful_p ());
707 return known_subrange_p (a.offset + aoffset_adj,
708 a.max_size, offset, max_size);
711 /* Merge two ranges both starting at parm_offset1 and update THIS
712 with result. */
713 bool
714 modref_access_node::update_for_kills (poly_int64 parm_offset1,
715 poly_int64 offset1,
716 poly_int64 max_size1,
717 poly_int64 offset2,
718 poly_int64 max_size2,
719 bool record_adjustments)
721 if (known_le (offset1, offset2))
723 else if (known_le (offset2, offset1))
725 std::swap (offset1, offset2);
726 std::swap (max_size1, max_size2);
728 else
729 gcc_unreachable ();
731 poly_int64 new_max_size = max_size2 + offset2 - offset1;
732 if (known_le (new_max_size, max_size1))
733 new_max_size = max_size1;
734 if (known_eq (parm_offset, parm_offset1)
735 && known_eq (offset, offset1)
736 && known_eq (size, new_max_size)
737 && known_eq (max_size, new_max_size))
738 return false;
740 if (!record_adjustments
741 || (++adjustments) < param_modref_max_adjustments)
743 parm_offset = parm_offset1;
744 offset = offset1;
745 max_size = new_max_size;
746 size = new_max_size;
747 gcc_checking_assert (useful_for_kill_p ());
748 return true;
750 return false;
753 /* Merge in access A if it is possible to do without losing
754 precision. Return true if successful.
755 Unlike merge assume that both accesses are always executed
756 and merge size the same was as max_size. */
757 bool
758 modref_access_node::merge_for_kills (const modref_access_node &a,
759 bool record_adjustments)
761 poly_int64 offset1 = 0;
762 poly_int64 aoffset1 = 0;
763 poly_int64 new_parm_offset = 0;
765 /* We assume that containment was tested earlier. */
766 gcc_checking_assert (!contains_for_kills (a) && !a.contains_for_kills (*this)
767 && useful_for_kill_p () && a.useful_for_kill_p ());
769 if (parm_index != a.parm_index
770 || !combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
771 return false;
773 if (known_le (offset1, aoffset1))
775 if (!known_size_p (max_size)
776 || known_ge (offset1 + max_size, aoffset1))
777 return update_for_kills (new_parm_offset, offset1, max_size,
778 aoffset1, a.max_size, record_adjustments);
780 else if (known_le (aoffset1, offset1))
782 if (!known_size_p (a.max_size)
783 || known_ge (aoffset1 + a.max_size, offset1))
784 return update_for_kills (new_parm_offset, offset1, max_size,
785 aoffset1, a.max_size, record_adjustments);
787 return false;
790 /* Insert new kill A into KILLS. If RECORD_ADJUSTMENTS is true limit number
791 of changes to each entry. Return true if something changed. */
793 bool
794 modref_access_node::insert_kill (vec<modref_access_node> &kills,
795 modref_access_node &a, bool record_adjustments)
797 size_t index;
798 modref_access_node *a2;
799 bool merge = false;
801 gcc_checking_assert (a.useful_for_kill_p ());
803 /* See if we have corresponding entry already or we can merge with
804 neighbouring entry. */
805 FOR_EACH_VEC_ELT (kills, index, a2)
807 if (a2->contains_for_kills (a))
808 return false;
809 if (a.contains_for_kills (*a2))
811 a.adjustments = 0;
812 *a2 = a;
813 merge = true;
814 break;
816 if (a2->merge_for_kills (a, record_adjustments))
818 merge = true;
819 break;
822 /* If entry was not found, insert it. */
823 if (!merge)
825 if ((int)kills.length () >= param_modref_max_accesses)
827 if (dump_file)
828 fprintf (dump_file,
829 "--param param=modref-max-accesses limit reached:");
830 return false;
832 a.adjustments = 0;
833 kills.safe_push (a);
834 return true;
836 /* Extending range in an entry may make it possible to merge it with
837 other entries. */
838 size_t i;
840 for (i = 0; i < kills.length ();)
841 if (i != index)
843 bool found = false, restart = false;
844 modref_access_node *a = &kills[i];
845 modref_access_node *n = &kills[index];
847 if (n->contains_for_kills (*a))
848 found = true;
849 if (!found && n->merge_for_kills (*a, false))
850 found = restart = true;
851 gcc_checking_assert (found || !a->merge_for_kills (*n, false));
852 if (found)
854 kills.unordered_remove (i);
855 if (index == kills.length ())
857 index = i;
858 i++;
860 if (restart)
861 i = 0;
863 else
864 i++;
866 else
867 i++;
868 return true;
872 #if CHECKING_P
874 namespace selftest {
876 static void
877 test_insert_search_collapse ()
879 modref_base_node<alias_set_type> *base_node;
880 modref_ref_node<alias_set_type> *ref_node;
881 modref_access_node a = unspecified_modref_access_node;
883 modref_tree<alias_set_type> *t = new modref_tree<alias_set_type>();
884 ASSERT_FALSE (t->every_base);
886 /* Insert into an empty tree. */
887 t->insert (1, 2, 2, 1, 2, a, false);
888 ASSERT_NE (t->bases, NULL);
889 ASSERT_EQ (t->bases->length (), 1);
890 ASSERT_FALSE (t->every_base);
891 ASSERT_EQ (t->search (2), NULL);
893 base_node = t->search (1);
894 ASSERT_NE (base_node, NULL);
895 ASSERT_EQ (base_node->base, 1);
896 ASSERT_NE (base_node->refs, NULL);
897 ASSERT_EQ (base_node->refs->length (), 1);
898 ASSERT_EQ (base_node->search (1), NULL);
900 ref_node = base_node->search (2);
901 ASSERT_NE (ref_node, NULL);
902 ASSERT_EQ (ref_node->ref, 2);
904 /* Insert when base exists but ref does not. */
905 t->insert (1, 2, 2, 1, 3, a, false);
906 ASSERT_NE (t->bases, NULL);
907 ASSERT_EQ (t->bases->length (), 1);
908 ASSERT_EQ (t->search (1), base_node);
909 ASSERT_EQ (t->search (2), NULL);
910 ASSERT_NE (base_node->refs, NULL);
911 ASSERT_EQ (base_node->refs->length (), 2);
913 ref_node = base_node->search (3);
914 ASSERT_NE (ref_node, NULL);
916 /* Insert when base and ref exist, but access is not dominated by nor
917 dominates other accesses. */
918 t->insert (1, 2, 2, 1, 2, a, false);
919 ASSERT_EQ (t->bases->length (), 1);
920 ASSERT_EQ (t->search (1), base_node);
922 ref_node = base_node->search (2);
923 ASSERT_NE (ref_node, NULL);
925 /* Insert when base and ref exist and access is dominated. */
926 t->insert (1, 2, 2, 1, 2, a, false);
927 ASSERT_EQ (t->search (1), base_node);
928 ASSERT_EQ (base_node->search (2), ref_node);
930 /* Insert ref to trigger ref list collapse for base 1. */
931 t->insert (1, 2, 2, 1, 4, a, false);
932 ASSERT_EQ (t->search (1), base_node);
933 ASSERT_EQ (base_node->refs, NULL);
934 ASSERT_EQ (base_node->search (2), NULL);
935 ASSERT_EQ (base_node->search (3), NULL);
936 ASSERT_TRUE (base_node->every_ref);
938 /* Further inserts to collapsed ref list are ignored. */
939 t->insert (1, 2, 2, 1, 5, a, false);
940 ASSERT_EQ (t->search (1), base_node);
941 ASSERT_EQ (base_node->refs, NULL);
942 ASSERT_EQ (base_node->search (2), NULL);
943 ASSERT_EQ (base_node->search (3), NULL);
944 ASSERT_TRUE (base_node->every_ref);
946 /* Insert base to trigger base list collapse. */
947 t->insert (1, 2, 2, 5, 0, a, false);
948 ASSERT_TRUE (t->every_base);
949 ASSERT_EQ (t->bases, NULL);
950 ASSERT_EQ (t->search (1), NULL);
952 /* Further inserts to collapsed base list are ignored. */
953 t->insert (1, 2, 2, 7, 8, a, false);
954 ASSERT_TRUE (t->every_base);
955 ASSERT_EQ (t->bases, NULL);
956 ASSERT_EQ (t->search (1), NULL);
958 delete t;
961 static void
962 test_merge ()
964 modref_tree<alias_set_type> *t1, *t2;
965 modref_base_node<alias_set_type> *base_node;
966 modref_access_node a = unspecified_modref_access_node;
968 t1 = new modref_tree<alias_set_type>();
969 t1->insert (3, 4, 1, 1, 1, a, false);
970 t1->insert (3, 4, 1, 1, 2, a, false);
971 t1->insert (3, 4, 1, 1, 3, a, false);
972 t1->insert (3, 4, 1, 2, 1, a, false);
973 t1->insert (3, 4, 1, 3, 1, a, false);
975 t2 = new modref_tree<alias_set_type>();
976 t2->insert (10, 10, 10, 1, 2, a, false);
977 t2->insert (10, 10, 10, 1, 3, a, false);
978 t2->insert (10, 10, 10, 1, 4, a, false);
979 t2->insert (10, 10, 10, 3, 2, a, false);
980 t2->insert (10, 10, 10, 3, 3, a, false);
981 t2->insert (10, 10, 10, 3, 4, a, false);
982 t2->insert (10, 10, 10, 3, 5, a, false);
984 t1->merge (3, 4, 1, t2, NULL, NULL, false);
986 ASSERT_FALSE (t1->every_base);
987 ASSERT_NE (t1->bases, NULL);
988 ASSERT_EQ (t1->bases->length (), 3);
990 base_node = t1->search (1);
991 ASSERT_NE (base_node->refs, NULL);
992 ASSERT_FALSE (base_node->every_ref);
993 ASSERT_EQ (base_node->refs->length (), 4);
995 base_node = t1->search (2);
996 ASSERT_NE (base_node->refs, NULL);
997 ASSERT_FALSE (base_node->every_ref);
998 ASSERT_EQ (base_node->refs->length (), 1);
1000 base_node = t1->search (3);
1001 ASSERT_EQ (base_node->refs, NULL);
1002 ASSERT_TRUE (base_node->every_ref);
1004 delete t1;
1005 delete t2;
1009 void
1010 ipa_modref_tree_c_tests ()
1012 test_insert_search_collapse ();
1013 test_merge ();
1016 } // namespace selftest
1018 #endif
1020 void
1021 gt_ggc_mx (modref_tree < int >*const &tt)
1023 if (tt->bases)
1025 ggc_test_and_set_mark (tt->bases);
1026 gt_ggc_mx (tt->bases);
1030 void
1031 gt_ggc_mx (modref_tree < tree_node * >*const &tt)
1033 if (tt->bases)
1035 ggc_test_and_set_mark (tt->bases);
1036 gt_ggc_mx (tt->bases);
1040 void gt_pch_nx (modref_tree<int>* const&) {}
1041 void gt_pch_nx (modref_tree<tree_node*>* const&) {}
1042 void gt_pch_nx (modref_tree<int>* const&, gt_pointer_operator, void *) {}
1043 void gt_pch_nx (modref_tree<tree_node*>* const&, gt_pointer_operator, void *) {}
1045 void gt_ggc_mx (modref_base_node<int>* &b)
1047 ggc_test_and_set_mark (b);
1048 if (b->refs)
1050 ggc_test_and_set_mark (b->refs);
1051 gt_ggc_mx (b->refs);
1055 void gt_ggc_mx (modref_base_node<tree_node*>* &b)
1057 ggc_test_and_set_mark (b);
1058 if (b->refs)
1060 ggc_test_and_set_mark (b->refs);
1061 gt_ggc_mx (b->refs);
1063 if (b->base)
1064 gt_ggc_mx (b->base);
1067 void gt_pch_nx (modref_base_node<int>*) {}
1068 void gt_pch_nx (modref_base_node<tree_node*>*) {}
1069 void gt_pch_nx (modref_base_node<int>*, gt_pointer_operator, void *) {}
1070 void gt_pch_nx (modref_base_node<tree_node*>*, gt_pointer_operator, void *) {}
1072 void gt_ggc_mx (modref_ref_node<int>* &r)
1074 ggc_test_and_set_mark (r);
1075 if (r->accesses)
1077 ggc_test_and_set_mark (r->accesses);
1078 gt_ggc_mx (r->accesses);
1082 void gt_ggc_mx (modref_ref_node<tree_node*>* &r)
1084 ggc_test_and_set_mark (r);
1085 if (r->accesses)
1087 ggc_test_and_set_mark (r->accesses);
1088 gt_ggc_mx (r->accesses);
1090 if (r->ref)
1091 gt_ggc_mx (r->ref);
1094 void gt_pch_nx (modref_ref_node<int>* ) {}
1095 void gt_pch_nx (modref_ref_node<tree_node*>*) {}
1096 void gt_pch_nx (modref_ref_node<int>*, gt_pointer_operator, void *) {}
1097 void gt_pch_nx (modref_ref_node<tree_node*>*, gt_pointer_operator, void *) {}
1099 void gt_ggc_mx (modref_access_node &)