1 /* Data structure for the modref pass.
2 Copyright (C) 2020-2022 Free Software Foundation, Inc.
3 Contributed by David Cepelik and Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
26 #include "ipa-modref-tree.h"
28 #include "tree-ssa-alias.h"
31 #include "tree-streamer.h"
33 /* Return true if both accesses are the same. */
35 modref_access_node::operator == (modref_access_node
&a
) const
37 if (parm_index
!= a
.parm_index
)
39 if (parm_index
!= MODREF_UNKNOWN_PARM
40 && parm_index
!= MODREF_GLOBAL_MEMORY_PARM
)
42 if (parm_offset_known
!= a
.parm_offset_known
)
45 && !known_eq (parm_offset
, a
.parm_offset
))
48 if (range_info_useful_p () != a
.range_info_useful_p ())
50 if (range_info_useful_p ()
51 && (!known_eq (a
.offset
, offset
)
52 || !known_eq (a
.size
, size
)
53 || !known_eq (a
.max_size
, max_size
)))
58 /* Return true A is a subaccess. */
60 modref_access_node::contains (const modref_access_node
&a
) const
62 poly_int64 aoffset_adj
= 0;
63 if (parm_index
!= MODREF_UNKNOWN_PARM
)
65 if (parm_index
!= a
.parm_index
)
67 if (parm_offset_known
)
69 if (!a
.parm_offset_known
)
71 /* Accesses are never below parm_offset, so look
73 If access ranges are known still allow merging
74 when bit offsets comparison passes. */
75 if (!known_le (parm_offset
, a
.parm_offset
)
76 && !range_info_useful_p ())
78 /* We allow negative aoffset_adj here in case
79 there is an useful range. This is because adding
80 a.offset may result in non-negative offset again.
81 Ubsan fails on val << LOG_BITS_PER_UNIT where val
83 aoffset_adj
= (a
.parm_offset
- parm_offset
)
87 if (range_info_useful_p ())
89 if (!a
.range_info_useful_p ())
91 /* Sizes of stores are used to check that object is big enough
92 to fit the store, so smaller or unknown store is more general
94 if (known_size_p (size
)
95 && (!known_size_p (a
.size
)
96 || !known_le (size
, a
.size
)))
98 if (known_size_p (max_size
))
99 return known_subrange_p (a
.offset
+ aoffset_adj
,
100 a
.max_size
, offset
, max_size
);
102 return known_le (offset
, a
.offset
+ aoffset_adj
);
107 /* Update access range to new parameters.
108 If RECORD_ADJUSTMENTS is true, record number of changes in the access
109 and if threshold is exceeded start dropping precision
110 so only constantly many updates are possible. This makes dataflow
113 modref_access_node::update (poly_int64 parm_offset1
,
114 poly_int64 offset1
, poly_int64 size1
,
115 poly_int64 max_size1
, bool record_adjustments
)
117 if (known_eq (parm_offset
, parm_offset1
)
118 && known_eq (offset
, offset1
)
119 && known_eq (size
, size1
)
120 && known_eq (max_size
, max_size1
))
122 if (!record_adjustments
123 || (++adjustments
) < param_modref_max_adjustments
)
125 parm_offset
= parm_offset1
;
128 max_size
= max_size1
;
133 fprintf (dump_file
, "--param modref-max-adjustments limit reached:");
134 if (!known_eq (parm_offset
, parm_offset1
))
137 fprintf (dump_file
, " parm_offset cleared");
138 parm_offset_known
= false;
140 if (!known_eq (size
, size1
))
144 fprintf (dump_file
, " size cleared");
146 if (!known_eq (max_size
, max_size1
))
150 fprintf (dump_file
, " max_size cleared");
152 if (!known_eq (offset
, offset1
))
156 fprintf (dump_file
, " offset cleared");
159 fprintf (dump_file
, "\n");
163 /* Merge in access A if it is possible to do without losing
164 precision. Return true if successful.
165 If RECORD_ADJUSTMENTs is true, remember how many interval
166 was prolonged and punt when there are too many. */
168 modref_access_node::merge (const modref_access_node
&a
,
169 bool record_adjustments
)
171 poly_int64 offset1
= 0;
172 poly_int64 aoffset1
= 0;
173 poly_int64 new_parm_offset
= 0;
175 /* We assume that containment was tested earlier. */
176 gcc_checking_assert (!contains (a
) && !a
.contains (*this));
177 if (parm_index
!= MODREF_UNKNOWN_PARM
)
179 if (parm_index
!= a
.parm_index
)
181 if (parm_offset_known
)
183 if (!a
.parm_offset_known
)
185 if (!combined_offsets (a
, &new_parm_offset
, &offset1
, &aoffset1
))
189 /* See if we can merge ranges. */
190 if (range_info_useful_p ())
192 /* In this case we have containment that should be
194 gcc_checking_assert (a
.range_info_useful_p ());
196 /* If a.size is less specified than size, merge only
197 if intervals are otherwise equivalent. */
198 if (known_size_p (size
)
199 && (!known_size_p (a
.size
) || known_lt (a
.size
, size
)))
201 if (((known_size_p (max_size
) || known_size_p (a
.max_size
))
202 && !known_eq (max_size
, a
.max_size
))
203 || !known_eq (offset1
, aoffset1
))
205 update (new_parm_offset
, offset1
, a
.size
, max_size
,
209 /* If sizes are same, we can extend the interval. */
210 if ((known_size_p (size
) || known_size_p (a
.size
))
211 && !known_eq (size
, a
.size
))
213 if (known_le (offset1
, aoffset1
))
215 if (!known_size_p (max_size
)
216 || known_ge (offset1
+ max_size
, aoffset1
))
218 update2 (new_parm_offset
, offset1
, size
, max_size
,
219 aoffset1
, a
.size
, a
.max_size
,
224 else if (known_le (aoffset1
, offset1
))
226 if (!known_size_p (a
.max_size
)
227 || known_ge (aoffset1
+ a
.max_size
, offset1
))
229 update2 (new_parm_offset
, offset1
, size
, max_size
,
230 aoffset1
, a
.size
, a
.max_size
,
237 update (new_parm_offset
, offset1
,
238 size
, max_size
, record_adjustments
);
242 /* Return true if A1 and B1 can be merged with lower information
244 Assume that no containment or lossless merging is possible. */
246 modref_access_node::closer_pair_p (const modref_access_node
&a1
,
247 const modref_access_node
&b1
,
248 const modref_access_node
&a2
,
249 const modref_access_node
&b2
)
251 /* Merging different parm indexes comes to complete loss
253 if (a1
.parm_index
!= b1
.parm_index
)
255 if (a2
.parm_index
!= b2
.parm_index
)
257 /* If parm is known and parm indexes are the same we should
258 already have containment. */
259 gcc_checking_assert (a1
.parm_offset_known
&& b1
.parm_offset_known
);
260 gcc_checking_assert (a2
.parm_offset_known
&& b2
.parm_offset_known
);
262 /* First normalize offsets for parm offsets. */
263 poly_int64 new_parm_offset
, offseta1
, offsetb1
, offseta2
, offsetb2
;
264 if (!a1
.combined_offsets (b1
, &new_parm_offset
, &offseta1
, &offsetb1
)
265 || !a2
.combined_offsets (b2
, &new_parm_offset
, &offseta2
, &offsetb2
))
269 /* Now compute distance of the intervals. */
270 poly_int64 dist1
, dist2
;
271 if (known_le (offseta1
, offsetb1
))
273 if (!known_size_p (a1
.max_size
))
276 dist1
= offsetb1
- offseta1
- a1
.max_size
;
280 if (!known_size_p (b1
.max_size
))
283 dist1
= offseta1
- offsetb1
- b1
.max_size
;
285 if (known_le (offseta2
, offsetb2
))
287 if (!known_size_p (a2
.max_size
))
290 dist2
= offsetb2
- offseta2
- a2
.max_size
;
294 if (!known_size_p (b2
.max_size
))
297 dist2
= offseta2
- offsetb2
- b2
.max_size
;
299 /* It may happen that intervals overlap in case size
300 is different. Prefer the overlap to non-overlap. */
301 if (known_lt (dist1
, 0) && known_ge (dist2
, 0))
303 if (known_lt (dist2
, 0) && known_ge (dist1
, 0))
305 if (known_lt (dist1
, 0))
306 /* If both overlaps minimize overlap. */
307 return known_le (dist2
, dist1
);
309 /* If both are disjoint look for smaller distance. */
310 return known_le (dist1
, dist2
);
313 /* Merge in access A while losing precision. */
315 modref_access_node::forced_merge (const modref_access_node
&a
,
316 bool record_adjustments
)
318 if (parm_index
!= a
.parm_index
)
320 gcc_checking_assert (parm_index
!= MODREF_UNKNOWN_PARM
);
321 parm_index
= MODREF_UNKNOWN_PARM
;
325 /* We assume that containment and lossless merging
326 was tested earlier. */
327 gcc_checking_assert (!contains (a
) && !a
.contains (*this)
328 && !merge (a
, record_adjustments
));
329 gcc_checking_assert (parm_offset_known
&& a
.parm_offset_known
);
331 poly_int64 new_parm_offset
, offset1
, aoffset1
;
332 if (!combined_offsets (a
, &new_parm_offset
, &offset1
, &aoffset1
))
334 parm_offset_known
= false;
337 gcc_checking_assert (range_info_useful_p ()
338 && a
.range_info_useful_p ());
339 if (record_adjustments
)
340 adjustments
+= a
.adjustments
;
341 update2 (new_parm_offset
,
342 offset1
, size
, max_size
,
343 aoffset1
, a
.size
, a
.max_size
,
347 /* Merge two ranges both starting at parm_offset1 and update THIS
350 modref_access_node::update2 (poly_int64 parm_offset1
,
351 poly_int64 offset1
, poly_int64 size1
,
352 poly_int64 max_size1
,
353 poly_int64 offset2
, poly_int64 size2
,
354 poly_int64 max_size2
,
355 bool record_adjustments
)
357 poly_int64 new_size
= size1
;
359 if (!known_size_p (size2
)
360 || known_le (size2
, size1
))
363 gcc_checking_assert (known_le (size1
, size2
));
365 if (known_le (offset1
, offset2
))
367 else if (known_le (offset2
, offset1
))
369 std::swap (offset1
, offset2
);
370 std::swap (max_size1
, max_size2
);
375 poly_int64 new_max_size
;
377 if (!known_size_p (max_size1
))
378 new_max_size
= max_size1
;
379 else if (!known_size_p (max_size2
))
380 new_max_size
= max_size2
;
383 new_max_size
= max_size2
+ offset2
- offset1
;
384 if (known_le (new_max_size
, max_size1
))
385 new_max_size
= max_size1
;
388 update (parm_offset1
, offset1
,
389 new_size
, new_max_size
, record_adjustments
);
392 /* Given access nodes THIS and A, return true if they
393 can be done with common parm_offsets. In this case
394 return parm offset in new_parm_offset, new_offset
395 which is start of range in THIS and new_aoffset that
396 is start of range in A. */
398 modref_access_node::combined_offsets (const modref_access_node
&a
,
399 poly_int64
*new_parm_offset
,
400 poly_int64
*new_offset
,
401 poly_int64
*new_aoffset
) const
403 gcc_checking_assert (parm_offset_known
&& a
.parm_offset_known
);
404 if (known_le (a
.parm_offset
, parm_offset
))
407 + ((parm_offset
- a
.parm_offset
)
408 << LOG2_BITS_PER_UNIT
);
409 *new_aoffset
= a
.offset
;
410 *new_parm_offset
= a
.parm_offset
;
413 else if (known_le (parm_offset
, a
.parm_offset
))
415 *new_aoffset
= a
.offset
416 + ((a
.parm_offset
- parm_offset
)
417 << LOG2_BITS_PER_UNIT
);
418 *new_offset
= offset
;
419 *new_parm_offset
= parm_offset
;
426 /* Try to optimize the access ACCESSES list after entry INDEX was modified. */
428 modref_access_node::try_merge_with (vec
<modref_access_node
, va_gc
> *&accesses
,
433 for (i
= 0; i
< accesses
->length ();)
436 bool found
= false, restart
= false;
437 modref_access_node
*a
= &(*accesses
)[i
];
438 modref_access_node
*n
= &(*accesses
)[index
];
440 if (n
->contains (*a
))
442 if (!found
&& n
->merge (*a
, false))
443 found
= restart
= true;
444 gcc_checking_assert (found
|| !a
->merge (*n
, false));
447 accesses
->unordered_remove (i
);
448 if (index
== accesses
->length ())
463 /* Stream out to OB. */
466 modref_access_node::stream_out (struct output_block
*ob
) const
468 streamer_write_hwi (ob
, parm_index
);
469 if (parm_index
!= MODREF_UNKNOWN_PARM
)
471 streamer_write_uhwi (ob
, parm_offset_known
);
472 if (parm_offset_known
)
474 streamer_write_poly_int64 (ob
, parm_offset
);
475 streamer_write_poly_int64 (ob
, offset
);
476 streamer_write_poly_int64 (ob
, size
);
477 streamer_write_poly_int64 (ob
, max_size
);
483 modref_access_node::stream_in (struct lto_input_block
*ib
)
485 int parm_index
= streamer_read_hwi (ib
);
486 bool parm_offset_known
= false;
487 poly_int64 parm_offset
= 0;
488 poly_int64 offset
= 0;
489 poly_int64 size
= -1;
490 poly_int64 max_size
= -1;
492 if (parm_index
!= MODREF_UNKNOWN_PARM
)
494 parm_offset_known
= streamer_read_uhwi (ib
);
495 if (parm_offset_known
)
497 parm_offset
= streamer_read_poly_int64 (ib
);
498 offset
= streamer_read_poly_int64 (ib
);
499 size
= streamer_read_poly_int64 (ib
);
500 max_size
= streamer_read_poly_int64 (ib
);
503 return {offset
, size
, max_size
, parm_offset
, parm_index
,
504 parm_offset_known
, false};
507 /* Insert access with OFFSET and SIZE.
508 Collapse tree if it has more than MAX_ACCESSES entries.
509 If RECORD_ADJUSTMENTs is true avoid too many interval extensions.
510 Return true if record was changed.
512 Return 0 if nothing changed, 1 if insert was successful and -1
513 if entries should be collapsed. */
515 modref_access_node::insert (vec
<modref_access_node
, va_gc
> *&accesses
,
516 modref_access_node a
, size_t max_accesses
,
517 bool record_adjustments
)
520 modref_access_node
*a2
;
522 /* Verify that list does not contain redundant accesses. */
526 modref_access_node
*a
, *a2
;
528 FOR_EACH_VEC_SAFE_ELT (accesses
, i
, a
)
530 FOR_EACH_VEC_SAFE_ELT (accesses
, i2
, a2
)
532 gcc_assert (!a
->contains (*a2
));
536 FOR_EACH_VEC_SAFE_ELT (accesses
, i
, a2
)
538 if (a2
->contains (a
))
540 if (a
.contains (*a2
))
543 a2
->parm_index
= a
.parm_index
;
544 a2
->parm_offset_known
= a
.parm_offset_known
;
545 a2
->update (a
.parm_offset
, a
.offset
, a
.size
, a
.max_size
,
547 modref_access_node::try_merge_with (accesses
, i
);
550 if (a2
->merge (a
, record_adjustments
))
552 modref_access_node::try_merge_with (accesses
, i
);
555 gcc_checking_assert (!(a
== *a2
));
558 /* If this base->ref pair has too many accesses stored, we will clear
559 all accesses and bail out. */
560 if (accesses
&& accesses
->length () >= max_accesses
)
562 if (max_accesses
< 2)
564 /* Find least harmful merge and perform it. */
565 int best1
= -1, best2
= -1;
566 FOR_EACH_VEC_SAFE_ELT (accesses
, i
, a2
)
568 for (j
= i
+ 1; j
< accesses
->length (); j
++)
570 || modref_access_node::closer_pair_p
571 (*a2
, (*accesses
)[j
],
573 best2
< 0 ? a
: (*accesses
)[best2
]))
578 if (modref_access_node::closer_pair_p
581 best2
< 0 ? a
: (*accesses
)[best2
]))
587 (*accesses
)[best1
].forced_merge (best2
< 0 ? a
: (*accesses
)[best2
],
589 /* Check that merging indeed merged ranges. */
590 gcc_checking_assert ((*accesses
)[best1
].contains
591 (best2
< 0 ? a
: (*accesses
)[best2
]));
592 if (!(*accesses
)[best1
].useful_p ())
594 if (dump_file
&& best2
>= 0)
596 "--param modref-max-accesses limit reached;"
597 " merging %i and %i\n", best1
, best2
);
600 "--param modref-max-accesses limit reached;"
601 " merging with %i\n", best1
);
602 modref_access_node::try_merge_with (accesses
, best1
);
604 insert (accesses
, a
, max_accesses
, record_adjustments
);
608 vec_safe_push (accesses
, a
);
612 /* Return true if range info is useful. */
614 modref_access_node::range_info_useful_p () const
616 return parm_index
!= MODREF_UNKNOWN_PARM
617 && parm_index
!= MODREF_GLOBAL_MEMORY_PARM
619 && (known_size_p (size
)
620 || known_size_p (max_size
)
621 || known_ge (offset
, 0));
624 /* Dump range to debug OUT. */
626 modref_access_node::dump (FILE *out
)
628 if (parm_index
!= MODREF_UNKNOWN_PARM
)
630 if (parm_index
== MODREF_GLOBAL_MEMORY_PARM
)
631 fprintf (out
, " Base in global memory");
632 else if (parm_index
>= 0)
633 fprintf (out
, " Parm %i", parm_index
);
634 else if (parm_index
== MODREF_STATIC_CHAIN_PARM
)
635 fprintf (out
, " Static chain");
638 if (parm_offset_known
)
640 fprintf (out
, " param offset:");
641 print_dec ((poly_int64_pod
)parm_offset
, out
, SIGNED
);
644 if (range_info_useful_p ())
646 fprintf (out
, " offset:");
647 print_dec ((poly_int64_pod
)offset
, out
, SIGNED
);
648 fprintf (out
, " size:");
649 print_dec ((poly_int64_pod
)size
, out
, SIGNED
);
650 fprintf (out
, " max_size:");
651 print_dec ((poly_int64_pod
)max_size
, out
, SIGNED
);
653 fprintf (out
, " adjusted %i times", adjustments
);
658 /* Return tree corresponding to parameter of the range in STMT. */
660 modref_access_node::get_call_arg (const gcall
*stmt
) const
662 if (parm_index
== MODREF_UNKNOWN_PARM
663 || parm_index
== MODREF_GLOBAL_MEMORY_PARM
)
665 if (parm_index
== MODREF_STATIC_CHAIN_PARM
)
666 return gimple_call_chain (stmt
);
667 /* MODREF_RETSLOT_PARM should not happen in access trees since the store
668 is seen explicitly in the caller. */
669 gcc_checking_assert (parm_index
>= 0);
670 if (parm_index
>= (int)gimple_call_num_args (stmt
))
672 return gimple_call_arg (stmt
, parm_index
);
675 /* Return tree corresponding to parameter of the range in STMT. */
677 modref_access_node::get_ao_ref (const gcall
*stmt
, ao_ref
*ref
) const
681 if (!parm_offset_known
|| !(arg
= get_call_arg (stmt
)))
683 poly_offset_int off
= (poly_offset_int
)offset
684 + ((poly_offset_int
)parm_offset
<< LOG2_BITS_PER_UNIT
);
686 if (!off
.to_shwi (&off2
))
688 ao_ref_init_from_ptr_and_range (ref
, arg
, true, off2
, size
, max_size
);
692 /* Return true A is a subkill. */
694 modref_access_node::contains_for_kills (const modref_access_node
&a
) const
696 poly_int64 aoffset_adj
= 0;
698 gcc_checking_assert (parm_index
!= MODREF_UNKNOWN_PARM
699 && a
.parm_index
!= MODREF_UNKNOWN_PARM
);
700 if (parm_index
!= a
.parm_index
)
702 gcc_checking_assert (parm_offset_known
&& a
.parm_offset_known
);
703 aoffset_adj
= (a
.parm_offset
- parm_offset
)
705 gcc_checking_assert (range_info_useful_p () && a
.range_info_useful_p ());
706 return known_subrange_p (a
.offset
+ aoffset_adj
,
707 a
.max_size
, offset
, max_size
);
710 /* Merge two ranges both starting at parm_offset1 and update THIS
713 modref_access_node::update_for_kills (poly_int64 parm_offset1
,
715 poly_int64 max_size1
,
717 poly_int64 max_size2
,
718 bool record_adjustments
)
720 if (known_le (offset1
, offset2
))
722 else if (known_le (offset2
, offset1
))
724 std::swap (offset1
, offset2
);
725 std::swap (max_size1
, max_size2
);
730 poly_int64 new_max_size
= max_size2
+ offset2
- offset1
;
731 if (known_le (new_max_size
, max_size1
))
732 new_max_size
= max_size1
;
733 if (known_eq (parm_offset
, parm_offset1
)
734 && known_eq (offset
, offset1
)
735 && known_eq (size
, new_max_size
)
736 && known_eq (max_size
, new_max_size
))
739 if (!record_adjustments
740 || (++adjustments
) < param_modref_max_adjustments
)
742 parm_offset
= parm_offset1
;
744 max_size
= new_max_size
;
746 gcc_checking_assert (useful_for_kill_p ());
752 /* Merge in access A if it is possible to do without losing
753 precision. Return true if successful.
754 Unlike merge assume that both accesses are always executed
755 and merge size the same was as max_size. */
757 modref_access_node::merge_for_kills (const modref_access_node
&a
,
758 bool record_adjustments
)
760 poly_int64 offset1
= 0;
761 poly_int64 aoffset1
= 0;
762 poly_int64 new_parm_offset
= 0;
764 /* We assume that containment was tested earlier. */
765 gcc_checking_assert (!contains_for_kills (a
) && !a
.contains_for_kills (*this)
766 && useful_for_kill_p () && a
.useful_for_kill_p ());
768 if (parm_index
!= a
.parm_index
769 || !combined_offsets (a
, &new_parm_offset
, &offset1
, &aoffset1
))
772 if (known_le (offset1
, aoffset1
))
774 if (!known_size_p (max_size
)
775 || known_ge (offset1
+ max_size
, aoffset1
))
776 return update_for_kills (new_parm_offset
, offset1
, max_size
,
777 aoffset1
, a
.max_size
, record_adjustments
);
779 else if (known_le (aoffset1
, offset1
))
781 if (!known_size_p (a
.max_size
)
782 || known_ge (aoffset1
+ a
.max_size
, offset1
))
783 return update_for_kills (new_parm_offset
, offset1
, max_size
,
784 aoffset1
, a
.max_size
, record_adjustments
);
789 /* Insert new kill A into KILLS. If RECORD_ADJUSTMENTS is true limit number
790 of changes to each entry. Return true if something changed. */
793 modref_access_node::insert_kill (vec
<modref_access_node
> &kills
,
794 modref_access_node
&a
, bool record_adjustments
)
797 modref_access_node
*a2
;
800 gcc_checking_assert (a
.useful_for_kill_p ());
802 /* See if we have corresponding entry already or we can merge with
803 neighboring entry. */
804 FOR_EACH_VEC_ELT (kills
, index
, a2
)
806 if (a2
->contains_for_kills (a
))
808 if (a
.contains_for_kills (*a2
))
815 if (a2
->merge_for_kills (a
, record_adjustments
))
821 /* If entry was not found, insert it. */
824 if ((int)kills
.length () >= param_modref_max_accesses
)
827 fprintf (dump_file
, "--param modref-max-accesses limit reached:");
834 /* Extending range in an entry may make it possible to merge it with
838 for (i
= 0; i
< kills
.length ();)
841 bool found
= false, restart
= false;
842 modref_access_node
*a
= &kills
[i
];
843 modref_access_node
*n
= &kills
[index
];
845 if (n
->contains_for_kills (*a
))
847 if (!found
&& n
->merge_for_kills (*a
, false))
848 found
= restart
= true;
849 gcc_checking_assert (found
|| !a
->merge_for_kills (*n
, false));
852 kills
.unordered_remove (i
);
853 if (index
== kills
.length ())
875 test_insert_search_collapse ()
877 modref_base_node
<alias_set_type
> *base_node
;
878 modref_ref_node
<alias_set_type
> *ref_node
;
879 modref_access_node a
= unspecified_modref_access_node
;
881 modref_tree
<alias_set_type
> *t
= new modref_tree
<alias_set_type
>();
882 ASSERT_FALSE (t
->every_base
);
884 /* Insert into an empty tree. */
885 t
->insert (1, 2, 2, 1, 2, a
, false);
886 ASSERT_NE (t
->bases
, NULL
);
887 ASSERT_EQ (t
->bases
->length (), 1);
888 ASSERT_FALSE (t
->every_base
);
889 ASSERT_EQ (t
->search (2), NULL
);
891 base_node
= t
->search (1);
892 ASSERT_NE (base_node
, NULL
);
893 ASSERT_EQ (base_node
->base
, 1);
894 ASSERT_NE (base_node
->refs
, NULL
);
895 ASSERT_EQ (base_node
->refs
->length (), 1);
896 ASSERT_EQ (base_node
->search (1), NULL
);
898 ref_node
= base_node
->search (2);
899 ASSERT_NE (ref_node
, NULL
);
900 ASSERT_EQ (ref_node
->ref
, 2);
902 /* Insert when base exists but ref does not. */
903 t
->insert (1, 2, 2, 1, 3, a
, false);
904 ASSERT_NE (t
->bases
, NULL
);
905 ASSERT_EQ (t
->bases
->length (), 1);
906 ASSERT_EQ (t
->search (1), base_node
);
907 ASSERT_EQ (t
->search (2), NULL
);
908 ASSERT_NE (base_node
->refs
, NULL
);
909 ASSERT_EQ (base_node
->refs
->length (), 2);
911 ref_node
= base_node
->search (3);
912 ASSERT_NE (ref_node
, NULL
);
914 /* Insert when base and ref exist, but access is not dominated by nor
915 dominates other accesses. */
916 t
->insert (1, 2, 2, 1, 2, a
, false);
917 ASSERT_EQ (t
->bases
->length (), 1);
918 ASSERT_EQ (t
->search (1), base_node
);
920 ref_node
= base_node
->search (2);
921 ASSERT_NE (ref_node
, NULL
);
923 /* Insert when base and ref exist and access is dominated. */
924 t
->insert (1, 2, 2, 1, 2, a
, false);
925 ASSERT_EQ (t
->search (1), base_node
);
926 ASSERT_EQ (base_node
->search (2), ref_node
);
928 /* Insert ref to trigger ref list collapse for base 1. */
929 t
->insert (1, 2, 2, 1, 4, a
, false);
930 ASSERT_EQ (t
->search (1), base_node
);
931 ASSERT_EQ (base_node
->refs
, NULL
);
932 ASSERT_EQ (base_node
->search (2), NULL
);
933 ASSERT_EQ (base_node
->search (3), NULL
);
934 ASSERT_TRUE (base_node
->every_ref
);
936 /* Further inserts to collapsed ref list are ignored. */
937 t
->insert (1, 2, 2, 1, 5, a
, false);
938 ASSERT_EQ (t
->search (1), base_node
);
939 ASSERT_EQ (base_node
->refs
, NULL
);
940 ASSERT_EQ (base_node
->search (2), NULL
);
941 ASSERT_EQ (base_node
->search (3), NULL
);
942 ASSERT_TRUE (base_node
->every_ref
);
944 /* Insert base to trigger base list collapse. */
945 t
->insert (1, 2, 2, 5, 0, a
, false);
946 ASSERT_TRUE (t
->every_base
);
947 ASSERT_EQ (t
->bases
, NULL
);
948 ASSERT_EQ (t
->search (1), NULL
);
950 /* Further inserts to collapsed base list are ignored. */
951 t
->insert (1, 2, 2, 7, 8, a
, false);
952 ASSERT_TRUE (t
->every_base
);
953 ASSERT_EQ (t
->bases
, NULL
);
954 ASSERT_EQ (t
->search (1), NULL
);
962 modref_tree
<alias_set_type
> *t1
, *t2
;
963 modref_base_node
<alias_set_type
> *base_node
;
964 modref_access_node a
= unspecified_modref_access_node
;
966 t1
= new modref_tree
<alias_set_type
>();
967 t1
->insert (3, 4, 1, 1, 1, a
, false);
968 t1
->insert (3, 4, 1, 1, 2, a
, false);
969 t1
->insert (3, 4, 1, 1, 3, a
, false);
970 t1
->insert (3, 4, 1, 2, 1, a
, false);
971 t1
->insert (3, 4, 1, 3, 1, a
, false);
973 t2
= new modref_tree
<alias_set_type
>();
974 t2
->insert (10, 10, 10, 1, 2, a
, false);
975 t2
->insert (10, 10, 10, 1, 3, a
, false);
976 t2
->insert (10, 10, 10, 1, 4, a
, false);
977 t2
->insert (10, 10, 10, 3, 2, a
, false);
978 t2
->insert (10, 10, 10, 3, 3, a
, false);
979 t2
->insert (10, 10, 10, 3, 4, a
, false);
980 t2
->insert (10, 10, 10, 3, 5, a
, false);
982 t1
->merge (3, 4, 1, t2
, NULL
, NULL
, false);
984 ASSERT_FALSE (t1
->every_base
);
985 ASSERT_NE (t1
->bases
, NULL
);
986 ASSERT_EQ (t1
->bases
->length (), 3);
988 base_node
= t1
->search (1);
989 ASSERT_NE (base_node
->refs
, NULL
);
990 ASSERT_FALSE (base_node
->every_ref
);
991 ASSERT_EQ (base_node
->refs
->length (), 4);
993 base_node
= t1
->search (2);
994 ASSERT_NE (base_node
->refs
, NULL
);
995 ASSERT_FALSE (base_node
->every_ref
);
996 ASSERT_EQ (base_node
->refs
->length (), 1);
998 base_node
= t1
->search (3);
999 ASSERT_EQ (base_node
->refs
, NULL
);
1000 ASSERT_TRUE (base_node
->every_ref
);
1008 ipa_modref_tree_cc_tests ()
1010 test_insert_search_collapse ();
1014 } // namespace selftest
1019 gt_ggc_mx (modref_tree
< int >*const &tt
)
1023 ggc_test_and_set_mark (tt
->bases
);
1024 gt_ggc_mx (tt
->bases
);
1029 gt_ggc_mx (modref_tree
< tree_node
* >*const &tt
)
1033 ggc_test_and_set_mark (tt
->bases
);
1034 gt_ggc_mx (tt
->bases
);
1038 void gt_pch_nx (modref_tree
<int>* const&) {}
1039 void gt_pch_nx (modref_tree
<tree_node
*>* const&) {}
1040 void gt_pch_nx (modref_tree
<int>* const&, gt_pointer_operator
, void *) {}
1041 void gt_pch_nx (modref_tree
<tree_node
*>* const&, gt_pointer_operator
, void *) {}
1043 void gt_ggc_mx (modref_base_node
<int>* &b
)
1045 ggc_test_and_set_mark (b
);
1048 ggc_test_and_set_mark (b
->refs
);
1049 gt_ggc_mx (b
->refs
);
1053 void gt_ggc_mx (modref_base_node
<tree_node
*>* &b
)
1055 ggc_test_and_set_mark (b
);
1058 ggc_test_and_set_mark (b
->refs
);
1059 gt_ggc_mx (b
->refs
);
1062 gt_ggc_mx (b
->base
);
1065 void gt_pch_nx (modref_base_node
<int>*) {}
1066 void gt_pch_nx (modref_base_node
<tree_node
*>*) {}
1067 void gt_pch_nx (modref_base_node
<int>*, gt_pointer_operator
, void *) {}
1068 void gt_pch_nx (modref_base_node
<tree_node
*>*, gt_pointer_operator
, void *) {}
1070 void gt_ggc_mx (modref_ref_node
<int>* &r
)
1072 ggc_test_and_set_mark (r
);
1075 ggc_test_and_set_mark (r
->accesses
);
1076 gt_ggc_mx (r
->accesses
);
1080 void gt_ggc_mx (modref_ref_node
<tree_node
*>* &r
)
1082 ggc_test_and_set_mark (r
);
1085 ggc_test_and_set_mark (r
->accesses
);
1086 gt_ggc_mx (r
->accesses
);
1092 void gt_pch_nx (modref_ref_node
<int>* ) {}
1093 void gt_pch_nx (modref_ref_node
<tree_node
*>*) {}
1094 void gt_pch_nx (modref_ref_node
<int>*, gt_pointer_operator
, void *) {}
1095 void gt_pch_nx (modref_ref_node
<tree_node
*>*, gt_pointer_operator
, void *) {}
1097 void gt_ggc_mx (modref_access_node
&)