1 /* Data structure for the modref pass.
2 Copyright (C) 2020-2021 Free Software Foundation, Inc.
3 Contributed by David Cepelik and Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
26 #include "ipa-modref-tree.h"
28 #include "tree-ssa-alias.h"
31 #include "tree-streamer.h"
33 /* Return true if both accesses are the same. */
35 modref_access_node::operator == (modref_access_node
&a
) const
37 if (parm_index
!= a
.parm_index
)
39 if (parm_index
!= MODREF_UNKNOWN_PARM
40 && parm_index
!= MODREF_GLOBAL_MEMORY_PARM
)
42 if (parm_offset_known
!= a
.parm_offset_known
)
45 && !known_eq (parm_offset
, a
.parm_offset
))
48 if (range_info_useful_p () != a
.range_info_useful_p ())
50 if (range_info_useful_p ()
51 && (!known_eq (a
.offset
, offset
)
52 || !known_eq (a
.size
, size
)
53 || !known_eq (a
.max_size
, max_size
)))
58 /* Return true A is a subaccess. */
60 modref_access_node::contains (const modref_access_node
&a
) const
62 poly_int64 aoffset_adj
= 0;
63 if (parm_index
!= MODREF_UNKNOWN_PARM
)
65 if (parm_index
!= a
.parm_index
)
67 if (parm_offset_known
)
69 if (!a
.parm_offset_known
)
71 /* Accesses are never below parm_offset, so look
73 If access ranges are known still allow merging
74 when bit offsets comparsion passes. */
75 if (!known_le (parm_offset
, a
.parm_offset
)
76 && !range_info_useful_p ())
78 /* We allow negative aoffset_adj here in case
79 there is an useful range. This is because adding
80 a.offset may result in non-ngative offset again.
81 Ubsan fails on val << LOG_BITS_PER_UNIT where val
83 aoffset_adj
= (a
.parm_offset
- parm_offset
)
87 if (range_info_useful_p ())
89 if (!a
.range_info_useful_p ())
91 /* Sizes of stores are used to check that object is big enough
92 to fit the store, so smaller or unknown sotre is more general
94 if (known_size_p (size
)
95 && (!known_size_p (a
.size
)
96 || !known_le (size
, a
.size
)))
98 if (known_size_p (max_size
))
99 return known_subrange_p (a
.offset
+ aoffset_adj
,
100 a
.max_size
, offset
, max_size
);
102 return known_le (offset
, a
.offset
+ aoffset_adj
);
107 /* Update access range to new parameters.
108 If RECORD_ADJUSTMENTS is true, record number of changes in the access
109 and if threshold is exceeded start dropping precision
110 so only constantly many updates are possible. This makes dataflow
113 modref_access_node::update (poly_int64 parm_offset1
,
114 poly_int64 offset1
, poly_int64 size1
,
115 poly_int64 max_size1
, bool record_adjustments
)
117 if (known_eq (parm_offset
, parm_offset1
)
118 && known_eq (offset
, offset1
)
119 && known_eq (size
, size1
)
120 && known_eq (max_size
, max_size1
))
122 if (!record_adjustments
123 || (++adjustments
) < param_modref_max_adjustments
)
125 parm_offset
= parm_offset1
;
128 max_size
= max_size1
;
134 "--param param=modref-max-adjustments limit reached:");
135 if (!known_eq (parm_offset
, parm_offset1
))
138 fprintf (dump_file
, " parm_offset cleared");
139 parm_offset_known
= false;
141 if (!known_eq (size
, size1
))
145 fprintf (dump_file
, " size cleared");
147 if (!known_eq (max_size
, max_size1
))
151 fprintf (dump_file
, " max_size cleared");
153 if (!known_eq (offset
, offset1
))
157 fprintf (dump_file
, " offset cleared");
160 fprintf (dump_file
, "\n");
164 /* Merge in access A if it is possible to do without losing
165 precision. Return true if successful.
166 If RECORD_ADJUSTMENTs is true, remember how many interval
167 was prolonged and punt when there are too many. */
169 modref_access_node::merge (const modref_access_node
&a
,
170 bool record_adjustments
)
172 poly_int64 offset1
= 0;
173 poly_int64 aoffset1
= 0;
174 poly_int64 new_parm_offset
= 0;
176 /* We assume that containment was tested earlier. */
177 gcc_checking_assert (!contains (a
) && !a
.contains (*this));
178 if (parm_index
!= MODREF_UNKNOWN_PARM
)
180 if (parm_index
!= a
.parm_index
)
182 if (parm_offset_known
)
184 if (!a
.parm_offset_known
)
186 if (!combined_offsets (a
, &new_parm_offset
, &offset1
, &aoffset1
))
190 /* See if we can merge ranges. */
191 if (range_info_useful_p ())
193 /* In this case we have containment that should be
195 gcc_checking_assert (a
.range_info_useful_p ());
197 /* If a.size is less specified than size, merge only
198 if intervals are otherwise equivalent. */
199 if (known_size_p (size
)
200 && (!known_size_p (a
.size
) || known_lt (a
.size
, size
)))
202 if (((known_size_p (max_size
) || known_size_p (a
.max_size
))
203 && !known_eq (max_size
, a
.max_size
))
204 || !known_eq (offset1
, aoffset1
))
206 update (new_parm_offset
, offset1
, a
.size
, max_size
,
210 /* If sizes are same, we can extend the interval. */
211 if ((known_size_p (size
) || known_size_p (a
.size
))
212 && !known_eq (size
, a
.size
))
214 if (known_le (offset1
, aoffset1
))
216 if (!known_size_p (max_size
)
217 || known_ge (offset1
+ max_size
, aoffset1
))
219 update2 (new_parm_offset
, offset1
, size
, max_size
,
220 aoffset1
, a
.size
, a
.max_size
,
225 else if (known_le (aoffset1
, offset1
))
227 if (!known_size_p (a
.max_size
)
228 || known_ge (aoffset1
+ a
.max_size
, offset1
))
230 update2 (new_parm_offset
, offset1
, size
, max_size
,
231 aoffset1
, a
.size
, a
.max_size
,
238 update (new_parm_offset
, offset1
,
239 size
, max_size
, record_adjustments
);
243 /* Return true if A1 and B1 can be merged with lower information
245 Assume that no containment or lossless merging is possible. */
247 modref_access_node::closer_pair_p (const modref_access_node
&a1
,
248 const modref_access_node
&b1
,
249 const modref_access_node
&a2
,
250 const modref_access_node
&b2
)
252 /* Merging different parm indexes comes to complete loss
254 if (a1
.parm_index
!= b1
.parm_index
)
256 if (a2
.parm_index
!= b2
.parm_index
)
258 /* If parm is known and parm indexes are the same we should
259 already have containment. */
260 gcc_checking_assert (a1
.parm_offset_known
&& b1
.parm_offset_known
);
261 gcc_checking_assert (a2
.parm_offset_known
&& b2
.parm_offset_known
);
263 /* First normalize offsets for parm offsets. */
264 poly_int64 new_parm_offset
, offseta1
, offsetb1
, offseta2
, offsetb2
;
265 if (!a1
.combined_offsets (b1
, &new_parm_offset
, &offseta1
, &offsetb1
)
266 || !a2
.combined_offsets (b2
, &new_parm_offset
, &offseta2
, &offsetb2
))
270 /* Now compute distnace of the intervals. */
271 poly_int64 dist1
, dist2
;
272 if (known_le (offseta1
, offsetb1
))
274 if (!known_size_p (a1
.max_size
))
277 dist1
= offsetb1
- offseta1
- a1
.max_size
;
281 if (!known_size_p (b1
.max_size
))
284 dist1
= offseta1
- offsetb1
- b1
.max_size
;
286 if (known_le (offseta2
, offsetb2
))
288 if (!known_size_p (a2
.max_size
))
291 dist2
= offsetb2
- offseta2
- a2
.max_size
;
295 if (!known_size_p (b2
.max_size
))
298 dist2
= offseta2
- offsetb2
- b2
.max_size
;
300 /* It may happen that intervals overlap in case size
301 is different. Prefer the overlap to non-overlap. */
302 if (known_lt (dist1
, 0) && known_ge (dist2
, 0))
304 if (known_lt (dist2
, 0) && known_ge (dist1
, 0))
306 if (known_lt (dist1
, 0))
307 /* If both overlaps minimize overlap. */
308 return known_le (dist2
, dist1
);
310 /* If both are disjoint look for smaller distance. */
311 return known_le (dist1
, dist2
);
314 /* Merge in access A while losing precision. */
316 modref_access_node::forced_merge (const modref_access_node
&a
,
317 bool record_adjustments
)
319 if (parm_index
!= a
.parm_index
)
321 gcc_checking_assert (parm_index
!= MODREF_UNKNOWN_PARM
);
322 parm_index
= MODREF_UNKNOWN_PARM
;
326 /* We assume that containment and lossless merging
327 was tested earlier. */
328 gcc_checking_assert (!contains (a
) && !a
.contains (*this)
329 && !merge (a
, record_adjustments
));
330 gcc_checking_assert (parm_offset_known
&& a
.parm_offset_known
);
332 poly_int64 new_parm_offset
, offset1
, aoffset1
;
333 if (!combined_offsets (a
, &new_parm_offset
, &offset1
, &aoffset1
))
335 parm_offset_known
= false;
338 gcc_checking_assert (range_info_useful_p ()
339 && a
.range_info_useful_p ());
340 if (record_adjustments
)
341 adjustments
+= a
.adjustments
;
342 update2 (new_parm_offset
,
343 offset1
, size
, max_size
,
344 aoffset1
, a
.size
, a
.max_size
,
348 /* Merge two ranges both starting at parm_offset1 and update THIS
351 modref_access_node::update2 (poly_int64 parm_offset1
,
352 poly_int64 offset1
, poly_int64 size1
,
353 poly_int64 max_size1
,
354 poly_int64 offset2
, poly_int64 size2
,
355 poly_int64 max_size2
,
356 bool record_adjustments
)
358 poly_int64 new_size
= size1
;
360 if (!known_size_p (size2
)
361 || known_le (size2
, size1
))
364 gcc_checking_assert (known_le (size1
, size2
));
366 if (known_le (offset1
, offset2
))
368 else if (known_le (offset2
, offset1
))
370 std::swap (offset1
, offset2
);
371 std::swap (max_size1
, max_size2
);
376 poly_int64 new_max_size
;
378 if (!known_size_p (max_size1
))
379 new_max_size
= max_size1
;
380 else if (!known_size_p (max_size2
))
381 new_max_size
= max_size2
;
384 new_max_size
= max_size2
+ offset2
- offset1
;
385 if (known_le (new_max_size
, max_size1
))
386 new_max_size
= max_size1
;
389 update (parm_offset1
, offset1
,
390 new_size
, new_max_size
, record_adjustments
);
393 /* Given access nodes THIS and A, return true if they
394 can be done with common parm_offsets. In this case
395 return parm offset in new_parm_offset, new_offset
396 which is start of range in THIS and new_aoffset that
397 is start of range in A. */
399 modref_access_node::combined_offsets (const modref_access_node
&a
,
400 poly_int64
*new_parm_offset
,
401 poly_int64
*new_offset
,
402 poly_int64
*new_aoffset
) const
404 gcc_checking_assert (parm_offset_known
&& a
.parm_offset_known
);
405 if (known_le (a
.parm_offset
, parm_offset
))
408 + ((parm_offset
- a
.parm_offset
)
409 << LOG2_BITS_PER_UNIT
);
410 *new_aoffset
= a
.offset
;
411 *new_parm_offset
= a
.parm_offset
;
414 else if (known_le (parm_offset
, a
.parm_offset
))
416 *new_aoffset
= a
.offset
417 + ((a
.parm_offset
- parm_offset
)
418 << LOG2_BITS_PER_UNIT
);
419 *new_offset
= offset
;
420 *new_parm_offset
= parm_offset
;
427 /* Try to optimize the access ACCESSES list after entry INDEX was modified. */
429 modref_access_node::try_merge_with (vec
<modref_access_node
, va_gc
> *&accesses
,
434 for (i
= 0; i
< accesses
->length ();)
437 bool found
= false, restart
= false;
438 modref_access_node
*a
= &(*accesses
)[i
];
439 modref_access_node
*n
= &(*accesses
)[index
];
441 if (n
->contains (*a
))
443 if (!found
&& n
->merge (*a
, false))
444 found
= restart
= true;
445 gcc_checking_assert (found
|| !a
->merge (*n
, false));
448 accesses
->unordered_remove (i
);
449 if (index
== accesses
->length ())
464 /* Stream out to OB. */
467 modref_access_node::stream_out (struct output_block
*ob
) const
469 streamer_write_hwi (ob
, parm_index
);
470 if (parm_index
!= MODREF_UNKNOWN_PARM
)
472 streamer_write_uhwi (ob
, parm_offset_known
);
473 if (parm_offset_known
)
475 streamer_write_poly_int64 (ob
, parm_offset
);
476 streamer_write_poly_int64 (ob
, offset
);
477 streamer_write_poly_int64 (ob
, size
);
478 streamer_write_poly_int64 (ob
, max_size
);
484 modref_access_node::stream_in (struct lto_input_block
*ib
)
486 int parm_index
= streamer_read_hwi (ib
);
487 bool parm_offset_known
= false;
488 poly_int64 parm_offset
= 0;
489 poly_int64 offset
= 0;
490 poly_int64 size
= -1;
491 poly_int64 max_size
= -1;
493 if (parm_index
!= MODREF_UNKNOWN_PARM
)
495 parm_offset_known
= streamer_read_uhwi (ib
);
496 if (parm_offset_known
)
498 parm_offset
= streamer_read_poly_int64 (ib
);
499 offset
= streamer_read_poly_int64 (ib
);
500 size
= streamer_read_poly_int64 (ib
);
501 max_size
= streamer_read_poly_int64 (ib
);
504 return {offset
, size
, max_size
, parm_offset
, parm_index
,
505 parm_offset_known
, false};
508 /* Insert access with OFFSET and SIZE.
509 Collapse tree if it has more than MAX_ACCESSES entries.
510 If RECORD_ADJUSTMENTs is true avoid too many interval extensions.
511 Return true if record was changed.
513 Reutrn 0 if nothing changed, 1 if insert was successful and -1
514 if entries should be collapsed. */
516 modref_access_node::insert (vec
<modref_access_node
, va_gc
> *&accesses
,
517 modref_access_node a
, size_t max_accesses
,
518 bool record_adjustments
)
521 modref_access_node
*a2
;
523 /* Verify that list does not contain redundant accesses. */
527 modref_access_node
*a
, *a2
;
529 FOR_EACH_VEC_SAFE_ELT (accesses
, i
, a
)
531 FOR_EACH_VEC_SAFE_ELT (accesses
, i2
, a2
)
533 gcc_assert (!a
->contains (*a2
));
537 FOR_EACH_VEC_SAFE_ELT (accesses
, i
, a2
)
539 if (a2
->contains (a
))
541 if (a
.contains (*a2
))
544 a2
->parm_index
= a
.parm_index
;
545 a2
->parm_offset_known
= a
.parm_offset_known
;
546 a2
->update (a
.parm_offset
, a
.offset
, a
.size
, a
.max_size
,
548 modref_access_node::try_merge_with (accesses
, i
);
551 if (a2
->merge (a
, record_adjustments
))
553 modref_access_node::try_merge_with (accesses
, i
);
556 gcc_checking_assert (!(a
== *a2
));
559 /* If this base->ref pair has too many accesses stored, we will clear
560 all accesses and bail out. */
561 if (accesses
&& accesses
->length () >= max_accesses
)
563 if (max_accesses
< 2)
565 /* Find least harmful merge and perform it. */
566 int best1
= -1, best2
= -1;
567 FOR_EACH_VEC_SAFE_ELT (accesses
, i
, a2
)
569 for (j
= i
+ 1; j
< accesses
->length (); j
++)
571 || modref_access_node::closer_pair_p
572 (*a2
, (*accesses
)[j
],
574 best2
< 0 ? a
: (*accesses
)[best2
]))
579 if (modref_access_node::closer_pair_p
582 best2
< 0 ? a
: (*accesses
)[best2
]))
588 (*accesses
)[best1
].forced_merge (best2
< 0 ? a
: (*accesses
)[best2
],
590 /* Check that merging indeed merged ranges. */
591 gcc_checking_assert ((*accesses
)[best1
].contains
592 (best2
< 0 ? a
: (*accesses
)[best2
]));
593 if (!(*accesses
)[best1
].useful_p ())
595 if (dump_file
&& best2
>= 0)
597 "--param param=modref-max-accesses limit reached;"
598 " merging %i and %i\n", best1
, best2
);
601 "--param param=modref-max-accesses limit reached;"
602 " merging with %i\n", best1
);
603 modref_access_node::try_merge_with (accesses
, best1
);
605 insert (accesses
, a
, max_accesses
, record_adjustments
);
609 vec_safe_push (accesses
, a
);
613 /* Return true if range info is useful. */
615 modref_access_node::range_info_useful_p () const
617 return parm_index
!= MODREF_UNKNOWN_PARM
618 && parm_index
!= MODREF_GLOBAL_MEMORY_PARM
620 && (known_size_p (size
)
621 || known_size_p (max_size
)
622 || known_ge (offset
, 0));
625 /* Dump range to debug OUT. */
627 modref_access_node::dump (FILE *out
)
629 if (parm_index
!= MODREF_UNKNOWN_PARM
)
631 if (parm_index
== MODREF_GLOBAL_MEMORY_PARM
)
632 fprintf (out
, " Base in global memory");
633 else if (parm_index
>= 0)
634 fprintf (out
, " Parm %i", parm_index
);
635 else if (parm_index
== MODREF_STATIC_CHAIN_PARM
)
636 fprintf (out
, " Static chain");
639 if (parm_offset_known
)
641 fprintf (out
, " param offset:");
642 print_dec ((poly_int64_pod
)parm_offset
, out
, SIGNED
);
645 if (range_info_useful_p ())
647 fprintf (out
, " offset:");
648 print_dec ((poly_int64_pod
)offset
, out
, SIGNED
);
649 fprintf (out
, " size:");
650 print_dec ((poly_int64_pod
)size
, out
, SIGNED
);
651 fprintf (out
, " max_size:");
652 print_dec ((poly_int64_pod
)max_size
, out
, SIGNED
);
654 fprintf (out
, " adjusted %i times", adjustments
);
659 /* Return tree corresponding to parameter of the range in STMT. */
661 modref_access_node::get_call_arg (const gcall
*stmt
) const
663 if (parm_index
== MODREF_UNKNOWN_PARM
664 || parm_index
== MODREF_GLOBAL_MEMORY_PARM
)
666 if (parm_index
== MODREF_STATIC_CHAIN_PARM
)
667 return gimple_call_chain (stmt
);
668 /* MODREF_RETSLOT_PARM should not happen in access trees since the store
669 is seen explicitly in the caller. */
670 gcc_checking_assert (parm_index
>= 0);
671 if (parm_index
>= (int)gimple_call_num_args (stmt
))
673 return gimple_call_arg (stmt
, parm_index
);
676 /* Return tree corresponding to parameter of the range in STMT. */
678 modref_access_node::get_ao_ref (const gcall
*stmt
, ao_ref
*ref
) const
682 if (!parm_offset_known
|| !(arg
= get_call_arg (stmt
)))
684 poly_offset_int off
= (poly_offset_int
)offset
685 + ((poly_offset_int
)parm_offset
<< LOG2_BITS_PER_UNIT
);
687 if (!off
.to_shwi (&off2
))
689 ao_ref_init_from_ptr_and_range (ref
, arg
, true, off2
, size
, max_size
);
693 /* Return true A is a subkill. */
695 modref_access_node::contains_for_kills (const modref_access_node
&a
) const
697 poly_int64 aoffset_adj
= 0;
699 gcc_checking_assert (parm_index
!= MODREF_UNKNOWN_PARM
700 && a
.parm_index
!= MODREF_UNKNOWN_PARM
);
701 if (parm_index
!= a
.parm_index
)
703 gcc_checking_assert (parm_offset_known
&& a
.parm_offset_known
);
704 aoffset_adj
= (a
.parm_offset
- parm_offset
)
706 gcc_checking_assert (range_info_useful_p () && a
.range_info_useful_p ());
707 return known_subrange_p (a
.offset
+ aoffset_adj
,
708 a
.max_size
, offset
, max_size
);
711 /* Merge two ranges both starting at parm_offset1 and update THIS
714 modref_access_node::update_for_kills (poly_int64 parm_offset1
,
716 poly_int64 max_size1
,
718 poly_int64 max_size2
,
719 bool record_adjustments
)
721 if (known_le (offset1
, offset2
))
723 else if (known_le (offset2
, offset1
))
725 std::swap (offset1
, offset2
);
726 std::swap (max_size1
, max_size2
);
731 poly_int64 new_max_size
= max_size2
+ offset2
- offset1
;
732 if (known_le (new_max_size
, max_size1
))
733 new_max_size
= max_size1
;
734 if (known_eq (parm_offset
, parm_offset1
)
735 && known_eq (offset
, offset1
)
736 && known_eq (size
, new_max_size
)
737 && known_eq (max_size
, new_max_size
))
740 if (!record_adjustments
741 || (++adjustments
) < param_modref_max_adjustments
)
743 parm_offset
= parm_offset1
;
745 max_size
= new_max_size
;
747 gcc_checking_assert (useful_for_kill_p ());
753 /* Merge in access A if it is possible to do without losing
754 precision. Return true if successful.
755 Unlike merge assume that both accesses are always executed
756 and merge size the same was as max_size. */
758 modref_access_node::merge_for_kills (const modref_access_node
&a
,
759 bool record_adjustments
)
761 poly_int64 offset1
= 0;
762 poly_int64 aoffset1
= 0;
763 poly_int64 new_parm_offset
= 0;
765 /* We assume that containment was tested earlier. */
766 gcc_checking_assert (!contains_for_kills (a
) && !a
.contains_for_kills (*this)
767 && useful_for_kill_p () && a
.useful_for_kill_p ());
769 if (parm_index
!= a
.parm_index
770 || !combined_offsets (a
, &new_parm_offset
, &offset1
, &aoffset1
))
773 if (known_le (offset1
, aoffset1
))
775 if (!known_size_p (max_size
)
776 || known_ge (offset1
+ max_size
, aoffset1
))
777 return update_for_kills (new_parm_offset
, offset1
, max_size
,
778 aoffset1
, a
.max_size
, record_adjustments
);
780 else if (known_le (aoffset1
, offset1
))
782 if (!known_size_p (a
.max_size
)
783 || known_ge (aoffset1
+ a
.max_size
, offset1
))
784 return update_for_kills (new_parm_offset
, offset1
, max_size
,
785 aoffset1
, a
.max_size
, record_adjustments
);
790 /* Insert new kill A into KILLS. If RECORD_ADJUSTMENTS is true limit number
791 of changes to each entry. Return true if something changed. */
794 modref_access_node::insert_kill (vec
<modref_access_node
> &kills
,
795 modref_access_node
&a
, bool record_adjustments
)
798 modref_access_node
*a2
;
801 gcc_checking_assert (a
.useful_for_kill_p ());
803 /* See if we have corresponding entry already or we can merge with
804 neighbouring entry. */
805 FOR_EACH_VEC_ELT (kills
, index
, a2
)
807 if (a2
->contains_for_kills (a
))
809 if (a
.contains_for_kills (*a2
))
816 if (a2
->merge_for_kills (a
, record_adjustments
))
822 /* If entry was not found, insert it. */
825 if ((int)kills
.length () >= param_modref_max_accesses
)
829 "--param param=modref-max-accesses limit reached:");
836 /* Extending range in an entry may make it possible to merge it with
840 for (i
= 0; i
< kills
.length ();)
843 bool found
= false, restart
= false;
844 modref_access_node
*a
= &kills
[i
];
845 modref_access_node
*n
= &kills
[index
];
847 if (n
->contains_for_kills (*a
))
849 if (!found
&& n
->merge_for_kills (*a
, false))
850 found
= restart
= true;
851 gcc_checking_assert (found
|| !a
->merge_for_kills (*n
, false));
854 kills
.unordered_remove (i
);
855 if (index
== kills
.length ())
877 test_insert_search_collapse ()
879 modref_base_node
<alias_set_type
> *base_node
;
880 modref_ref_node
<alias_set_type
> *ref_node
;
881 modref_access_node a
= unspecified_modref_access_node
;
883 modref_tree
<alias_set_type
> *t
= new modref_tree
<alias_set_type
>();
884 ASSERT_FALSE (t
->every_base
);
886 /* Insert into an empty tree. */
887 t
->insert (1, 2, 2, 1, 2, a
, false);
888 ASSERT_NE (t
->bases
, NULL
);
889 ASSERT_EQ (t
->bases
->length (), 1);
890 ASSERT_FALSE (t
->every_base
);
891 ASSERT_EQ (t
->search (2), NULL
);
893 base_node
= t
->search (1);
894 ASSERT_NE (base_node
, NULL
);
895 ASSERT_EQ (base_node
->base
, 1);
896 ASSERT_NE (base_node
->refs
, NULL
);
897 ASSERT_EQ (base_node
->refs
->length (), 1);
898 ASSERT_EQ (base_node
->search (1), NULL
);
900 ref_node
= base_node
->search (2);
901 ASSERT_NE (ref_node
, NULL
);
902 ASSERT_EQ (ref_node
->ref
, 2);
904 /* Insert when base exists but ref does not. */
905 t
->insert (1, 2, 2, 1, 3, a
, false);
906 ASSERT_NE (t
->bases
, NULL
);
907 ASSERT_EQ (t
->bases
->length (), 1);
908 ASSERT_EQ (t
->search (1), base_node
);
909 ASSERT_EQ (t
->search (2), NULL
);
910 ASSERT_NE (base_node
->refs
, NULL
);
911 ASSERT_EQ (base_node
->refs
->length (), 2);
913 ref_node
= base_node
->search (3);
914 ASSERT_NE (ref_node
, NULL
);
916 /* Insert when base and ref exist, but access is not dominated by nor
917 dominates other accesses. */
918 t
->insert (1, 2, 2, 1, 2, a
, false);
919 ASSERT_EQ (t
->bases
->length (), 1);
920 ASSERT_EQ (t
->search (1), base_node
);
922 ref_node
= base_node
->search (2);
923 ASSERT_NE (ref_node
, NULL
);
925 /* Insert when base and ref exist and access is dominated. */
926 t
->insert (1, 2, 2, 1, 2, a
, false);
927 ASSERT_EQ (t
->search (1), base_node
);
928 ASSERT_EQ (base_node
->search (2), ref_node
);
930 /* Insert ref to trigger ref list collapse for base 1. */
931 t
->insert (1, 2, 2, 1, 4, a
, false);
932 ASSERT_EQ (t
->search (1), base_node
);
933 ASSERT_EQ (base_node
->refs
, NULL
);
934 ASSERT_EQ (base_node
->search (2), NULL
);
935 ASSERT_EQ (base_node
->search (3), NULL
);
936 ASSERT_TRUE (base_node
->every_ref
);
938 /* Further inserts to collapsed ref list are ignored. */
939 t
->insert (1, 2, 2, 1, 5, a
, false);
940 ASSERT_EQ (t
->search (1), base_node
);
941 ASSERT_EQ (base_node
->refs
, NULL
);
942 ASSERT_EQ (base_node
->search (2), NULL
);
943 ASSERT_EQ (base_node
->search (3), NULL
);
944 ASSERT_TRUE (base_node
->every_ref
);
946 /* Insert base to trigger base list collapse. */
947 t
->insert (1, 2, 2, 5, 0, a
, false);
948 ASSERT_TRUE (t
->every_base
);
949 ASSERT_EQ (t
->bases
, NULL
);
950 ASSERT_EQ (t
->search (1), NULL
);
952 /* Further inserts to collapsed base list are ignored. */
953 t
->insert (1, 2, 2, 7, 8, a
, false);
954 ASSERT_TRUE (t
->every_base
);
955 ASSERT_EQ (t
->bases
, NULL
);
956 ASSERT_EQ (t
->search (1), NULL
);
964 modref_tree
<alias_set_type
> *t1
, *t2
;
965 modref_base_node
<alias_set_type
> *base_node
;
966 modref_access_node a
= unspecified_modref_access_node
;
968 t1
= new modref_tree
<alias_set_type
>();
969 t1
->insert (3, 4, 1, 1, 1, a
, false);
970 t1
->insert (3, 4, 1, 1, 2, a
, false);
971 t1
->insert (3, 4, 1, 1, 3, a
, false);
972 t1
->insert (3, 4, 1, 2, 1, a
, false);
973 t1
->insert (3, 4, 1, 3, 1, a
, false);
975 t2
= new modref_tree
<alias_set_type
>();
976 t2
->insert (10, 10, 10, 1, 2, a
, false);
977 t2
->insert (10, 10, 10, 1, 3, a
, false);
978 t2
->insert (10, 10, 10, 1, 4, a
, false);
979 t2
->insert (10, 10, 10, 3, 2, a
, false);
980 t2
->insert (10, 10, 10, 3, 3, a
, false);
981 t2
->insert (10, 10, 10, 3, 4, a
, false);
982 t2
->insert (10, 10, 10, 3, 5, a
, false);
984 t1
->merge (3, 4, 1, t2
, NULL
, NULL
, false);
986 ASSERT_FALSE (t1
->every_base
);
987 ASSERT_NE (t1
->bases
, NULL
);
988 ASSERT_EQ (t1
->bases
->length (), 3);
990 base_node
= t1
->search (1);
991 ASSERT_NE (base_node
->refs
, NULL
);
992 ASSERT_FALSE (base_node
->every_ref
);
993 ASSERT_EQ (base_node
->refs
->length (), 4);
995 base_node
= t1
->search (2);
996 ASSERT_NE (base_node
->refs
, NULL
);
997 ASSERT_FALSE (base_node
->every_ref
);
998 ASSERT_EQ (base_node
->refs
->length (), 1);
1000 base_node
= t1
->search (3);
1001 ASSERT_EQ (base_node
->refs
, NULL
);
1002 ASSERT_TRUE (base_node
->every_ref
);
1010 ipa_modref_tree_c_tests ()
1012 test_insert_search_collapse ();
1016 } // namespace selftest
1021 gt_ggc_mx (modref_tree
< int >*const &tt
)
1025 ggc_test_and_set_mark (tt
->bases
);
1026 gt_ggc_mx (tt
->bases
);
1031 gt_ggc_mx (modref_tree
< tree_node
* >*const &tt
)
1035 ggc_test_and_set_mark (tt
->bases
);
1036 gt_ggc_mx (tt
->bases
);
1040 void gt_pch_nx (modref_tree
<int>* const&) {}
1041 void gt_pch_nx (modref_tree
<tree_node
*>* const&) {}
1042 void gt_pch_nx (modref_tree
<int>* const&, gt_pointer_operator
, void *) {}
1043 void gt_pch_nx (modref_tree
<tree_node
*>* const&, gt_pointer_operator
, void *) {}
1045 void gt_ggc_mx (modref_base_node
<int>* &b
)
1047 ggc_test_and_set_mark (b
);
1050 ggc_test_and_set_mark (b
->refs
);
1051 gt_ggc_mx (b
->refs
);
1055 void gt_ggc_mx (modref_base_node
<tree_node
*>* &b
)
1057 ggc_test_and_set_mark (b
);
1060 ggc_test_and_set_mark (b
->refs
);
1061 gt_ggc_mx (b
->refs
);
1064 gt_ggc_mx (b
->base
);
1067 void gt_pch_nx (modref_base_node
<int>*) {}
1068 void gt_pch_nx (modref_base_node
<tree_node
*>*) {}
1069 void gt_pch_nx (modref_base_node
<int>*, gt_pointer_operator
, void *) {}
1070 void gt_pch_nx (modref_base_node
<tree_node
*>*, gt_pointer_operator
, void *) {}
1072 void gt_ggc_mx (modref_ref_node
<int>* &r
)
1074 ggc_test_and_set_mark (r
);
1077 ggc_test_and_set_mark (r
->accesses
);
1078 gt_ggc_mx (r
->accesses
);
1082 void gt_ggc_mx (modref_ref_node
<tree_node
*>* &r
)
1084 ggc_test_and_set_mark (r
);
1087 ggc_test_and_set_mark (r
->accesses
);
1088 gt_ggc_mx (r
->accesses
);
1094 void gt_pch_nx (modref_ref_node
<int>* ) {}
1095 void gt_pch_nx (modref_ref_node
<tree_node
*>*) {}
1096 void gt_pch_nx (modref_ref_node
<int>*, gt_pointer_operator
, void *) {}
1097 void gt_pch_nx (modref_ref_node
<tree_node
*>*, gt_pointer_operator
, void *) {}
1099 void gt_ggc_mx (modref_access_node
&)