RISC-V: Make stack_save_restore tests more robust
[official-gcc.git] / gcc / ipa-modref-tree.cc
blobde89d879bf4e3dfd8f6fe5219873f283f2279f31
1 /* Data structure for the modref pass.
2 Copyright (C) 2020-2023 Free Software Foundation, Inc.
3 Contributed by David Cepelik and Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "ipa-modref-tree.h"
27 #include "selftest.h"
28 #include "tree-ssa-alias.h"
29 #include "gimple.h"
30 #include "cgraph.h"
31 #include "tree-streamer.h"
33 /* Return true if both accesses are the same. */
34 bool
35 modref_access_node::operator == (modref_access_node &a) const
37 if (parm_index != a.parm_index)
38 return false;
39 if (parm_index != MODREF_UNKNOWN_PARM
40 && parm_index != MODREF_GLOBAL_MEMORY_PARM)
42 if (parm_offset_known != a.parm_offset_known)
43 return false;
44 if (parm_offset_known
45 && !known_eq (parm_offset, a.parm_offset))
46 return false;
48 if (range_info_useful_p () != a.range_info_useful_p ())
49 return false;
50 if (range_info_useful_p ()
51 && (!known_eq (a.offset, offset)
52 || !known_eq (a.size, size)
53 || !known_eq (a.max_size, max_size)))
54 return false;
55 return true;
58 /* Return true A is a subaccess. */
59 bool
60 modref_access_node::contains (const modref_access_node &a) const
62 poly_int64 aoffset_adj = 0;
63 if (parm_index != MODREF_UNKNOWN_PARM)
65 if (parm_index != a.parm_index)
66 return false;
67 if (parm_offset_known)
69 if (!a.parm_offset_known)
70 return false;
71 /* Accesses are never below parm_offset, so look
72 for smaller offset.
73 If access ranges are known still allow merging
74 when bit offsets comparison passes. */
75 if (!known_le (parm_offset, a.parm_offset)
76 && !range_info_useful_p ())
77 return false;
78 /* We allow negative aoffset_adj here in case
79 there is an useful range. This is because adding
80 a.offset may result in non-negative offset again.
81 Ubsan fails on val << LOG_BITS_PER_UNIT where val
82 is negative. */
83 aoffset_adj = (a.parm_offset - parm_offset)
84 * BITS_PER_UNIT;
87 if (range_info_useful_p ())
89 if (!a.range_info_useful_p ())
90 return false;
91 /* Sizes of stores are used to check that object is big enough
92 to fit the store, so smaller or unknown store is more general
93 than large store. */
94 if (known_size_p (size)
95 && (!known_size_p (a.size)
96 || !known_le (size, a.size)))
97 return false;
98 if (known_size_p (max_size))
99 return known_subrange_p (a.offset + aoffset_adj,
100 a.max_size, offset, max_size);
101 else
102 return known_le (offset, a.offset + aoffset_adj);
104 return true;
107 /* Update access range to new parameters.
108 If RECORD_ADJUSTMENTS is true, record number of changes in the access
109 and if threshold is exceeded start dropping precision
110 so only constantly many updates are possible. This makes dataflow
111 to converge. */
112 void
113 modref_access_node::update (poly_int64 parm_offset1,
114 poly_int64 offset1, poly_int64 size1,
115 poly_int64 max_size1, bool record_adjustments)
117 if (known_eq (parm_offset, parm_offset1)
118 && known_eq (offset, offset1)
119 && known_eq (size, size1)
120 && known_eq (max_size, max_size1))
121 return;
122 if (!record_adjustments
123 || (++adjustments) < param_modref_max_adjustments)
125 parm_offset = parm_offset1;
126 offset = offset1;
127 size = size1;
128 max_size = max_size1;
130 else
132 if (dump_file)
133 fprintf (dump_file, "--param modref-max-adjustments limit reached:");
134 if (!known_eq (parm_offset, parm_offset1))
136 if (dump_file)
137 fprintf (dump_file, " parm_offset cleared");
138 parm_offset_known = false;
140 if (!known_eq (size, size1))
142 size = -1;
143 if (dump_file)
144 fprintf (dump_file, " size cleared");
146 if (!known_eq (max_size, max_size1))
148 max_size = -1;
149 if (dump_file)
150 fprintf (dump_file, " max_size cleared");
152 if (!known_eq (offset, offset1))
154 offset = 0;
155 if (dump_file)
156 fprintf (dump_file, " offset cleared");
158 if (dump_file)
159 fprintf (dump_file, "\n");
163 /* Merge in access A if it is possible to do without losing
164 precision. Return true if successful.
165 If RECORD_ADJUSTMENTs is true, remember how many interval
166 was prolonged and punt when there are too many. */
167 bool
168 modref_access_node::merge (const modref_access_node &a,
169 bool record_adjustments)
171 poly_int64 offset1 = 0;
172 poly_int64 aoffset1 = 0;
173 poly_int64 new_parm_offset = 0;
175 /* We assume that containment was tested earlier. */
176 gcc_checking_assert (!contains (a) && !a.contains (*this));
177 if (parm_index != MODREF_UNKNOWN_PARM)
179 if (parm_index != a.parm_index)
180 return false;
181 if (parm_offset_known)
183 if (!a.parm_offset_known)
184 return false;
185 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
186 return false;
189 /* See if we can merge ranges. */
190 if (range_info_useful_p ())
192 /* In this case we have containment that should be
193 handled earlier. */
194 gcc_checking_assert (a.range_info_useful_p ());
196 /* If a.size is less specified than size, merge only
197 if intervals are otherwise equivalent. */
198 if (known_size_p (size)
199 && (!known_size_p (a.size) || known_lt (a.size, size)))
201 if (((known_size_p (max_size) || known_size_p (a.max_size))
202 && !known_eq (max_size, a.max_size))
203 || !known_eq (offset1, aoffset1))
204 return false;
205 update (new_parm_offset, offset1, a.size, max_size,
206 record_adjustments);
207 return true;
209 /* If sizes are same, we can extend the interval. */
210 if ((known_size_p (size) || known_size_p (a.size))
211 && !known_eq (size, a.size))
212 return false;
213 if (known_le (offset1, aoffset1))
215 if (!known_size_p (max_size)
216 || known_ge (offset1 + max_size, aoffset1))
218 update2 (new_parm_offset, offset1, size, max_size,
219 aoffset1, a.size, a.max_size,
220 record_adjustments);
221 return true;
224 else if (known_le (aoffset1, offset1))
226 if (!known_size_p (a.max_size)
227 || known_ge (aoffset1 + a.max_size, offset1))
229 update2 (new_parm_offset, offset1, size, max_size,
230 aoffset1, a.size, a.max_size,
231 record_adjustments);
232 return true;
235 return false;
237 update (new_parm_offset, offset1,
238 size, max_size, record_adjustments);
239 return true;
242 /* Return true if A1 and B1 can be merged with lower information
243 less than A2 and B2.
244 Assume that no containment or lossless merging is possible. */
245 bool
246 modref_access_node::closer_pair_p (const modref_access_node &a1,
247 const modref_access_node &b1,
248 const modref_access_node &a2,
249 const modref_access_node &b2)
251 /* Merging different parm indexes comes to complete loss
252 of range info. */
253 if (a1.parm_index != b1.parm_index)
254 return false;
255 if (a2.parm_index != b2.parm_index)
256 return true;
257 /* If parm is known and parm indexes are the same we should
258 already have containment. */
259 gcc_checking_assert (a1.parm_offset_known && b1.parm_offset_known);
260 gcc_checking_assert (a2.parm_offset_known && b2.parm_offset_known);
262 /* First normalize offsets for parm offsets. */
263 poly_int64 new_parm_offset, offseta1, offsetb1, offseta2, offsetb2;
264 if (!a1.combined_offsets (b1, &new_parm_offset, &offseta1, &offsetb1)
265 || !a2.combined_offsets (b2, &new_parm_offset, &offseta2, &offsetb2))
266 gcc_unreachable ();
269 /* Now compute distance of the intervals. */
270 poly_offset_int dist1, dist2;
271 if (known_le (offseta1, offsetb1))
273 if (!known_size_p (a1.max_size))
274 dist1 = 0;
275 else
276 dist1 = (poly_offset_int)offsetb1
277 - (poly_offset_int)offseta1
278 - (poly_offset_int)a1.max_size;
280 else
282 if (!known_size_p (b1.max_size))
283 dist1 = 0;
284 else
285 dist1 = (poly_offset_int)offseta1
286 - (poly_offset_int)offsetb1
287 - (poly_offset_int)b1.max_size;
289 if (known_le (offseta2, offsetb2))
291 if (!known_size_p (a2.max_size))
292 dist2 = 0;
293 else
294 dist2 = (poly_offset_int)offsetb2
295 - (poly_offset_int)offseta2
296 - (poly_offset_int)a2.max_size;
298 else
300 if (!known_size_p (b2.max_size))
301 dist2 = 0;
302 else
303 dist2 = offseta2
304 - (poly_offset_int)offsetb2
305 - (poly_offset_int)b2.max_size;
307 /* It may happen that intervals overlap in case size
308 is different. Prefer the overlap to non-overlap. */
309 if (known_lt (dist1, 0) && known_ge (dist2, 0))
310 return true;
311 if (known_lt (dist2, 0) && known_ge (dist1, 0))
312 return false;
313 if (known_lt (dist1, 0))
314 /* If both overlaps minimize overlap. */
315 return known_le (dist2, dist1);
316 else
317 /* If both are disjoint look for smaller distance. */
318 return known_le (dist1, dist2);
321 /* Merge in access A while losing precision. */
322 void
323 modref_access_node::forced_merge (const modref_access_node &a,
324 bool record_adjustments)
326 if (parm_index != a.parm_index)
328 gcc_checking_assert (parm_index != MODREF_UNKNOWN_PARM);
329 parm_index = MODREF_UNKNOWN_PARM;
330 return;
333 /* We assume that containment and lossless merging
334 was tested earlier. */
335 gcc_checking_assert (!contains (a) && !a.contains (*this)
336 && !merge (a, record_adjustments));
337 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
339 poly_int64 new_parm_offset, offset1, aoffset1;
340 if (!combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
342 parm_offset_known = false;
343 return;
345 gcc_checking_assert (range_info_useful_p ()
346 && a.range_info_useful_p ());
347 if (record_adjustments)
348 adjustments += a.adjustments;
349 update2 (new_parm_offset,
350 offset1, size, max_size,
351 aoffset1, a.size, a.max_size,
352 record_adjustments);
355 /* Merge two ranges both starting at parm_offset1 and update THIS
356 with result. */
357 void
358 modref_access_node::update2 (poly_int64 parm_offset1,
359 poly_int64 offset1, poly_int64 size1,
360 poly_int64 max_size1,
361 poly_int64 offset2, poly_int64 size2,
362 poly_int64 max_size2,
363 bool record_adjustments)
365 poly_int64 new_size = size1;
367 if (!known_size_p (size2)
368 || known_le (size2, size1))
369 new_size = size2;
370 else
371 gcc_checking_assert (known_le (size1, size2));
373 if (known_le (offset1, offset2))
375 else if (known_le (offset2, offset1))
377 std::swap (offset1, offset2);
378 std::swap (max_size1, max_size2);
380 else
381 gcc_unreachable ();
383 poly_int64 new_max_size;
385 if (!known_size_p (max_size1))
386 new_max_size = max_size1;
387 else if (!known_size_p (max_size2))
388 new_max_size = max_size2;
389 else
391 poly_offset_int s = (poly_offset_int)max_size2
392 + (poly_offset_int)offset2
393 - (poly_offset_int)offset1;
394 if (s.to_shwi (&new_max_size))
396 if (known_le (new_max_size, max_size1))
397 new_max_size = max_size1;
399 else
400 new_max_size = -1;
403 update (parm_offset1, offset1,
404 new_size, new_max_size, record_adjustments);
407 /* Given access nodes THIS and A, return true if they
408 can be done with common parm_offsets. In this case
409 return parm offset in new_parm_offset, new_offset
410 which is start of range in THIS and new_aoffset that
411 is start of range in A. */
412 bool
413 modref_access_node::combined_offsets (const modref_access_node &a,
414 poly_int64 *new_parm_offset,
415 poly_int64 *new_offset,
416 poly_int64 *new_aoffset) const
418 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
419 if (known_le (a.parm_offset, parm_offset))
421 *new_offset = offset
422 + ((parm_offset - a.parm_offset)
423 << LOG2_BITS_PER_UNIT);
424 *new_aoffset = a.offset;
425 *new_parm_offset = a.parm_offset;
426 return true;
428 else if (known_le (parm_offset, a.parm_offset))
430 *new_aoffset = a.offset
431 + ((a.parm_offset - parm_offset)
432 << LOG2_BITS_PER_UNIT);
433 *new_offset = offset;
434 *new_parm_offset = parm_offset;
435 return true;
437 else
438 return false;
441 /* Try to optimize the access ACCESSES list after entry INDEX was modified. */
442 void
443 modref_access_node::try_merge_with (vec <modref_access_node, va_gc> *&accesses,
444 size_t index)
446 size_t i;
448 for (i = 0; i < accesses->length ();)
449 if (i != index)
451 bool found = false, restart = false;
452 modref_access_node *a = &(*accesses)[i];
453 modref_access_node *n = &(*accesses)[index];
455 if (n->contains (*a))
456 found = true;
457 if (!found && n->merge (*a, false))
458 found = restart = true;
459 gcc_checking_assert (found || !a->merge (*n, false));
460 if (found)
462 accesses->unordered_remove (i);
463 if (index == accesses->length ())
465 index = i;
466 i++;
468 if (restart)
469 i = 0;
471 else
472 i++;
474 else
475 i++;
478 /* Stream out to OB. */
480 void
481 modref_access_node::stream_out (struct output_block *ob) const
483 streamer_write_hwi (ob, parm_index);
484 if (parm_index != MODREF_UNKNOWN_PARM)
486 streamer_write_uhwi (ob, parm_offset_known);
487 if (parm_offset_known)
489 streamer_write_poly_int64 (ob, parm_offset);
490 streamer_write_poly_int64 (ob, offset);
491 streamer_write_poly_int64 (ob, size);
492 streamer_write_poly_int64 (ob, max_size);
497 modref_access_node
498 modref_access_node::stream_in (struct lto_input_block *ib)
500 int parm_index = streamer_read_hwi (ib);
501 bool parm_offset_known = false;
502 poly_int64 parm_offset = 0;
503 poly_int64 offset = 0;
504 poly_int64 size = -1;
505 poly_int64 max_size = -1;
507 if (parm_index != MODREF_UNKNOWN_PARM)
509 parm_offset_known = streamer_read_uhwi (ib);
510 if (parm_offset_known)
512 parm_offset = streamer_read_poly_int64 (ib);
513 offset = streamer_read_poly_int64 (ib);
514 size = streamer_read_poly_int64 (ib);
515 max_size = streamer_read_poly_int64 (ib);
518 return {offset, size, max_size, parm_offset, parm_index,
519 parm_offset_known, false};
522 /* Insert access with OFFSET and SIZE.
523 Collapse tree if it has more than MAX_ACCESSES entries.
524 If RECORD_ADJUSTMENTs is true avoid too many interval extensions.
525 Return true if record was changed.
527 Return 0 if nothing changed, 1 if insert was successful and -1
528 if entries should be collapsed. */
530 modref_access_node::insert (vec <modref_access_node, va_gc> *&accesses,
531 modref_access_node a, size_t max_accesses,
532 bool record_adjustments)
534 size_t i, j;
535 modref_access_node *a2;
537 /* Verify that list does not contain redundant accesses. */
538 if (flag_checking)
540 size_t i, i2;
541 modref_access_node *a, *a2;
543 FOR_EACH_VEC_SAFE_ELT (accesses, i, a)
545 FOR_EACH_VEC_SAFE_ELT (accesses, i2, a2)
546 if (i != i2)
547 gcc_assert (!a->contains (*a2));
551 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
553 if (a2->contains (a))
554 return 0;
555 if (a.contains (*a2))
557 a.adjustments = 0;
558 a2->parm_index = a.parm_index;
559 a2->parm_offset_known = a.parm_offset_known;
560 a2->update (a.parm_offset, a.offset, a.size, a.max_size,
561 record_adjustments);
562 modref_access_node::try_merge_with (accesses, i);
563 return 1;
565 if (a2->merge (a, record_adjustments))
567 modref_access_node::try_merge_with (accesses, i);
568 return 1;
570 gcc_checking_assert (!(a == *a2));
573 /* If this base->ref pair has too many accesses stored, we will clear
574 all accesses and bail out. */
575 if (accesses && accesses->length () >= max_accesses)
577 if (max_accesses < 2)
578 return -1;
579 /* Find least harmful merge and perform it. */
580 int best1 = -1, best2 = -1;
581 FOR_EACH_VEC_SAFE_ELT (accesses, i, a2)
583 for (j = i + 1; j < accesses->length (); j++)
584 if (best1 < 0
585 || modref_access_node::closer_pair_p
586 (*a2, (*accesses)[j],
587 (*accesses)[best1],
588 best2 < 0 ? a : (*accesses)[best2]))
590 best1 = i;
591 best2 = j;
593 if (modref_access_node::closer_pair_p
594 (*a2, a,
595 (*accesses)[best1],
596 best2 < 0 ? a : (*accesses)[best2]))
598 best1 = i;
599 best2 = -1;
602 (*accesses)[best1].forced_merge (best2 < 0 ? a : (*accesses)[best2],
603 record_adjustments);
604 /* Check that merging indeed merged ranges. */
605 gcc_checking_assert ((*accesses)[best1].contains
606 (best2 < 0 ? a : (*accesses)[best2]));
607 if (!(*accesses)[best1].useful_p ())
608 return -1;
609 if (dump_file && best2 >= 0)
610 fprintf (dump_file,
611 "--param modref-max-accesses limit reached;"
612 " merging %i and %i\n", best1, best2);
613 else if (dump_file)
614 fprintf (dump_file,
615 "--param modref-max-accesses limit reached;"
616 " merging with %i\n", best1);
617 modref_access_node::try_merge_with (accesses, best1);
618 if (best2 >= 0)
619 insert (accesses, a, max_accesses, record_adjustments);
620 return 1;
622 a.adjustments = 0;
623 vec_safe_push (accesses, a);
624 return 1;
627 /* Return true if range info is useful. */
628 bool
629 modref_access_node::range_info_useful_p () const
631 return parm_index != MODREF_UNKNOWN_PARM
632 && parm_index != MODREF_GLOBAL_MEMORY_PARM
633 && parm_offset_known
634 && (known_size_p (size)
635 || known_size_p (max_size)
636 || known_ge (offset, 0));
639 /* Dump range to debug OUT. */
640 void
641 modref_access_node::dump (FILE *out)
643 if (parm_index != MODREF_UNKNOWN_PARM)
645 if (parm_index == MODREF_GLOBAL_MEMORY_PARM)
646 fprintf (out, " Base in global memory");
647 else if (parm_index >= 0)
648 fprintf (out, " Parm %i", parm_index);
649 else if (parm_index == MODREF_STATIC_CHAIN_PARM)
650 fprintf (out, " Static chain");
651 else
652 gcc_unreachable ();
653 if (parm_offset_known)
655 fprintf (out, " param offset:");
656 print_dec ((poly_int64_pod)parm_offset, out, SIGNED);
659 if (range_info_useful_p ())
661 fprintf (out, " offset:");
662 print_dec ((poly_int64_pod)offset, out, SIGNED);
663 fprintf (out, " size:");
664 print_dec ((poly_int64_pod)size, out, SIGNED);
665 fprintf (out, " max_size:");
666 print_dec ((poly_int64_pod)max_size, out, SIGNED);
667 if (adjustments)
668 fprintf (out, " adjusted %i times", adjustments);
670 fprintf (out, "\n");
673 /* Return tree corresponding to parameter of the range in STMT. */
674 tree
675 modref_access_node::get_call_arg (const gcall *stmt) const
677 if (parm_index == MODREF_UNKNOWN_PARM
678 || parm_index == MODREF_GLOBAL_MEMORY_PARM)
679 return NULL;
680 if (parm_index == MODREF_STATIC_CHAIN_PARM)
681 return gimple_call_chain (stmt);
682 /* MODREF_RETSLOT_PARM should not happen in access trees since the store
683 is seen explicitly in the caller. */
684 gcc_checking_assert (parm_index >= 0);
685 if (parm_index >= (int)gimple_call_num_args (stmt))
686 return NULL;
687 return gimple_call_arg (stmt, parm_index);
690 /* Return tree corresponding to parameter of the range in STMT. */
691 bool
692 modref_access_node::get_ao_ref (const gcall *stmt, ao_ref *ref) const
694 tree arg;
696 if (!parm_offset_known
697 || !(arg = get_call_arg (stmt))
698 || !POINTER_TYPE_P (TREE_TYPE (arg)))
699 return false;
700 poly_offset_int off = (poly_offset_int)offset
701 + ((poly_offset_int)parm_offset << LOG2_BITS_PER_UNIT);
702 poly_int64 off2;
703 if (!off.to_shwi (&off2))
704 return false;
705 ao_ref_init_from_ptr_and_range (ref, arg, true, off2, size, max_size);
706 return true;
709 /* Return true A is a subkill. */
710 bool
711 modref_access_node::contains_for_kills (const modref_access_node &a) const
713 poly_int64 aoffset_adj = 0;
715 gcc_checking_assert (parm_index != MODREF_UNKNOWN_PARM
716 && a.parm_index != MODREF_UNKNOWN_PARM);
717 if (parm_index != a.parm_index)
718 return false;
719 gcc_checking_assert (parm_offset_known && a.parm_offset_known);
720 aoffset_adj = (a.parm_offset - parm_offset)
721 * BITS_PER_UNIT;
722 gcc_checking_assert (range_info_useful_p () && a.range_info_useful_p ());
723 return known_subrange_p (a.offset + aoffset_adj,
724 a.max_size, offset, max_size);
727 /* Merge two ranges both starting at parm_offset1 and update THIS
728 with result. */
729 bool
730 modref_access_node::update_for_kills (poly_int64 parm_offset1,
731 poly_int64 offset1,
732 poly_int64 max_size1,
733 poly_int64 offset2,
734 poly_int64 max_size2,
735 bool record_adjustments)
737 if (known_le (offset1, offset2))
739 else if (known_le (offset2, offset1))
741 std::swap (offset1, offset2);
742 std::swap (max_size1, max_size2);
744 else
745 gcc_unreachable ();
747 poly_int64 new_max_size = max_size2 + offset2 - offset1;
748 if (known_le (new_max_size, max_size1))
749 new_max_size = max_size1;
750 if (known_eq (parm_offset, parm_offset1)
751 && known_eq (offset, offset1)
752 && known_eq (size, new_max_size)
753 && known_eq (max_size, new_max_size))
754 return false;
756 if (!record_adjustments
757 || (++adjustments) < param_modref_max_adjustments)
759 parm_offset = parm_offset1;
760 offset = offset1;
761 max_size = new_max_size;
762 size = new_max_size;
763 gcc_checking_assert (useful_for_kill_p ());
764 return true;
766 return false;
769 /* Merge in access A if it is possible to do without losing
770 precision. Return true if successful.
771 Unlike merge assume that both accesses are always executed
772 and merge size the same was as max_size. */
773 bool
774 modref_access_node::merge_for_kills (const modref_access_node &a,
775 bool record_adjustments)
777 poly_int64 offset1 = 0;
778 poly_int64 aoffset1 = 0;
779 poly_int64 new_parm_offset = 0;
781 /* We assume that containment was tested earlier. */
782 gcc_checking_assert (!contains_for_kills (a) && !a.contains_for_kills (*this)
783 && useful_for_kill_p () && a.useful_for_kill_p ());
785 if (parm_index != a.parm_index
786 || !combined_offsets (a, &new_parm_offset, &offset1, &aoffset1))
787 return false;
789 if (known_le (offset1, aoffset1))
791 if (!known_size_p (max_size)
792 || known_ge (offset1 + max_size, aoffset1))
793 return update_for_kills (new_parm_offset, offset1, max_size,
794 aoffset1, a.max_size, record_adjustments);
796 else if (known_le (aoffset1, offset1))
798 if (!known_size_p (a.max_size)
799 || known_ge (aoffset1 + a.max_size, offset1))
800 return update_for_kills (new_parm_offset, offset1, max_size,
801 aoffset1, a.max_size, record_adjustments);
803 return false;
806 /* Insert new kill A into KILLS. If RECORD_ADJUSTMENTS is true limit number
807 of changes to each entry. Return true if something changed. */
809 bool
810 modref_access_node::insert_kill (vec<modref_access_node> &kills,
811 modref_access_node &a, bool record_adjustments)
813 size_t index;
814 modref_access_node *a2;
815 bool merge = false;
817 gcc_checking_assert (a.useful_for_kill_p ());
819 /* See if we have corresponding entry already or we can merge with
820 neighboring entry. */
821 FOR_EACH_VEC_ELT (kills, index, a2)
823 if (a2->contains_for_kills (a))
824 return false;
825 if (a.contains_for_kills (*a2))
827 a.adjustments = 0;
828 *a2 = a;
829 merge = true;
830 break;
832 if (a2->merge_for_kills (a, record_adjustments))
834 merge = true;
835 break;
838 /* If entry was not found, insert it. */
839 if (!merge)
841 if ((int)kills.length () >= param_modref_max_accesses)
843 if (dump_file)
844 fprintf (dump_file, "--param modref-max-accesses limit reached:");
845 return false;
847 a.adjustments = 0;
848 kills.safe_push (a);
849 return true;
851 /* Extending range in an entry may make it possible to merge it with
852 other entries. */
853 size_t i;
855 for (i = 0; i < kills.length ();)
856 if (i != index)
858 bool found = false, restart = false;
859 modref_access_node *a = &kills[i];
860 modref_access_node *n = &kills[index];
862 if (n->contains_for_kills (*a))
863 found = true;
864 if (!found && n->merge_for_kills (*a, false))
865 found = restart = true;
866 gcc_checking_assert (found || !a->merge_for_kills (*n, false));
867 if (found)
869 kills.unordered_remove (i);
870 if (index == kills.length ())
872 index = i;
873 i++;
875 if (restart)
876 i = 0;
878 else
879 i++;
881 else
882 i++;
883 return true;
887 #if CHECKING_P
889 namespace selftest {
891 static void
892 test_insert_search_collapse ()
894 modref_base_node<alias_set_type> *base_node;
895 modref_ref_node<alias_set_type> *ref_node;
896 modref_access_node a = unspecified_modref_access_node;
898 modref_tree<alias_set_type> *t = new modref_tree<alias_set_type>();
899 ASSERT_FALSE (t->every_base);
901 /* Insert into an empty tree. */
902 t->insert (1, 2, 2, 1, 2, a, false);
903 ASSERT_NE (t->bases, NULL);
904 ASSERT_EQ (t->bases->length (), 1);
905 ASSERT_FALSE (t->every_base);
906 ASSERT_EQ (t->search (2), NULL);
908 base_node = t->search (1);
909 ASSERT_NE (base_node, NULL);
910 ASSERT_EQ (base_node->base, 1);
911 ASSERT_NE (base_node->refs, NULL);
912 ASSERT_EQ (base_node->refs->length (), 1);
913 ASSERT_EQ (base_node->search (1), NULL);
915 ref_node = base_node->search (2);
916 ASSERT_NE (ref_node, NULL);
917 ASSERT_EQ (ref_node->ref, 2);
919 /* Insert when base exists but ref does not. */
920 t->insert (1, 2, 2, 1, 3, a, false);
921 ASSERT_NE (t->bases, NULL);
922 ASSERT_EQ (t->bases->length (), 1);
923 ASSERT_EQ (t->search (1), base_node);
924 ASSERT_EQ (t->search (2), NULL);
925 ASSERT_NE (base_node->refs, NULL);
926 ASSERT_EQ (base_node->refs->length (), 2);
928 ref_node = base_node->search (3);
929 ASSERT_NE (ref_node, NULL);
931 /* Insert when base and ref exist, but access is not dominated by nor
932 dominates other accesses. */
933 t->insert (1, 2, 2, 1, 2, a, false);
934 ASSERT_EQ (t->bases->length (), 1);
935 ASSERT_EQ (t->search (1), base_node);
937 ref_node = base_node->search (2);
938 ASSERT_NE (ref_node, NULL);
940 /* Insert when base and ref exist and access is dominated. */
941 t->insert (1, 2, 2, 1, 2, a, false);
942 ASSERT_EQ (t->search (1), base_node);
943 ASSERT_EQ (base_node->search (2), ref_node);
945 /* Insert ref to trigger ref list collapse for base 1. */
946 t->insert (1, 2, 2, 1, 4, a, false);
947 ASSERT_EQ (t->search (1), base_node);
948 ASSERT_EQ (base_node->refs, NULL);
949 ASSERT_EQ (base_node->search (2), NULL);
950 ASSERT_EQ (base_node->search (3), NULL);
951 ASSERT_TRUE (base_node->every_ref);
953 /* Further inserts to collapsed ref list are ignored. */
954 t->insert (1, 2, 2, 1, 5, a, false);
955 ASSERT_EQ (t->search (1), base_node);
956 ASSERT_EQ (base_node->refs, NULL);
957 ASSERT_EQ (base_node->search (2), NULL);
958 ASSERT_EQ (base_node->search (3), NULL);
959 ASSERT_TRUE (base_node->every_ref);
961 /* Insert base to trigger base list collapse. */
962 t->insert (1, 2, 2, 5, 0, a, false);
963 ASSERT_TRUE (t->every_base);
964 ASSERT_EQ (t->bases, NULL);
965 ASSERT_EQ (t->search (1), NULL);
967 /* Further inserts to collapsed base list are ignored. */
968 t->insert (1, 2, 2, 7, 8, a, false);
969 ASSERT_TRUE (t->every_base);
970 ASSERT_EQ (t->bases, NULL);
971 ASSERT_EQ (t->search (1), NULL);
973 delete t;
976 static void
977 test_merge ()
979 modref_tree<alias_set_type> *t1, *t2;
980 modref_base_node<alias_set_type> *base_node;
981 modref_access_node a = unspecified_modref_access_node;
983 t1 = new modref_tree<alias_set_type>();
984 t1->insert (3, 4, 1, 1, 1, a, false);
985 t1->insert (3, 4, 1, 1, 2, a, false);
986 t1->insert (3, 4, 1, 1, 3, a, false);
987 t1->insert (3, 4, 1, 2, 1, a, false);
988 t1->insert (3, 4, 1, 3, 1, a, false);
990 t2 = new modref_tree<alias_set_type>();
991 t2->insert (10, 10, 10, 1, 2, a, false);
992 t2->insert (10, 10, 10, 1, 3, a, false);
993 t2->insert (10, 10, 10, 1, 4, a, false);
994 t2->insert (10, 10, 10, 3, 2, a, false);
995 t2->insert (10, 10, 10, 3, 3, a, false);
996 t2->insert (10, 10, 10, 3, 4, a, false);
997 t2->insert (10, 10, 10, 3, 5, a, false);
999 t1->merge (3, 4, 1, t2, NULL, NULL, false);
1001 ASSERT_FALSE (t1->every_base);
1002 ASSERT_NE (t1->bases, NULL);
1003 ASSERT_EQ (t1->bases->length (), 3);
1005 base_node = t1->search (1);
1006 ASSERT_NE (base_node->refs, NULL);
1007 ASSERT_FALSE (base_node->every_ref);
1008 ASSERT_EQ (base_node->refs->length (), 4);
1010 base_node = t1->search (2);
1011 ASSERT_NE (base_node->refs, NULL);
1012 ASSERT_FALSE (base_node->every_ref);
1013 ASSERT_EQ (base_node->refs->length (), 1);
1015 base_node = t1->search (3);
1016 ASSERT_EQ (base_node->refs, NULL);
1017 ASSERT_TRUE (base_node->every_ref);
1019 delete t1;
1020 delete t2;
1024 void
1025 ipa_modref_tree_cc_tests ()
1027 test_insert_search_collapse ();
1028 test_merge ();
1031 } // namespace selftest
1033 #endif
1035 void
1036 gt_ggc_mx (modref_tree < int >*const &tt)
1038 if (tt->bases)
1040 ggc_test_and_set_mark (tt->bases);
1041 gt_ggc_mx (tt->bases);
1045 void
1046 gt_ggc_mx (modref_tree < tree_node * >*const &tt)
1048 if (tt->bases)
1050 ggc_test_and_set_mark (tt->bases);
1051 gt_ggc_mx (tt->bases);
1055 void gt_pch_nx (modref_tree<int>* const&) {}
1056 void gt_pch_nx (modref_tree<tree_node*>* const&) {}
1057 void gt_pch_nx (modref_tree<int>* const&, gt_pointer_operator, void *) {}
1058 void gt_pch_nx (modref_tree<tree_node*>* const&, gt_pointer_operator, void *) {}
1060 void gt_ggc_mx (modref_base_node<int>* &b)
1062 ggc_test_and_set_mark (b);
1063 if (b->refs)
1065 ggc_test_and_set_mark (b->refs);
1066 gt_ggc_mx (b->refs);
1070 void gt_ggc_mx (modref_base_node<tree_node*>* &b)
1072 ggc_test_and_set_mark (b);
1073 if (b->refs)
1075 ggc_test_and_set_mark (b->refs);
1076 gt_ggc_mx (b->refs);
1078 if (b->base)
1079 gt_ggc_mx (b->base);
1082 void gt_pch_nx (modref_base_node<int>*) {}
1083 void gt_pch_nx (modref_base_node<tree_node*>*) {}
1084 void gt_pch_nx (modref_base_node<int>*, gt_pointer_operator, void *) {}
1085 void gt_pch_nx (modref_base_node<tree_node*>*, gt_pointer_operator, void *) {}
1087 void gt_ggc_mx (modref_ref_node<int>* &r)
1089 ggc_test_and_set_mark (r);
1090 if (r->accesses)
1092 ggc_test_and_set_mark (r->accesses);
1093 gt_ggc_mx (r->accesses);
1097 void gt_ggc_mx (modref_ref_node<tree_node*>* &r)
1099 ggc_test_and_set_mark (r);
1100 if (r->accesses)
1102 ggc_test_and_set_mark (r->accesses);
1103 gt_ggc_mx (r->accesses);
1105 if (r->ref)
1106 gt_ggc_mx (r->ref);
1109 void gt_pch_nx (modref_ref_node<int>* ) {}
1110 void gt_pch_nx (modref_ref_node<tree_node*>*) {}
1111 void gt_pch_nx (modref_ref_node<int>*, gt_pointer_operator, void *) {}
1112 void gt_pch_nx (modref_ref_node<tree_node*>*, gt_pointer_operator, void *) {}
1114 void gt_ggc_mx (modref_access_node &)