vect: Ensure both NITERSM1 and NITERS are INTEGER_CSTs or neither of them [PR113210]
[official-gcc.git] / gcc / analyzer / store.cc
blob67c90b7fce4a002445c1ca76eb7d4caada384557
1 /* Classes for modeling the state of memory.
2 Copyright (C) 2020-2024 Free Software Foundation, Inc.
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #define INCLUDE_MEMORY
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tree.h"
26 #include "function.h"
27 #include "basic-block.h"
28 #include "gimple.h"
29 #include "gimple-iterator.h"
30 #include "diagnostic-core.h"
31 #include "graphviz.h"
32 #include "options.h"
33 #include "cgraph.h"
34 #include "tree-dfa.h"
35 #include "stringpool.h"
36 #include "convert.h"
37 #include "target.h"
38 #include "fold-const.h"
39 #include "tree-pretty-print.h"
40 #include "diagnostic-color.h"
41 #include "bitmap.h"
42 #include "selftest.h"
43 #include "analyzer/analyzer.h"
44 #include "analyzer/analyzer-logging.h"
45 #include "ordered-hash-map.h"
46 #include "options.h"
47 #include "cfg.h"
48 #include "analyzer/supergraph.h"
49 #include "sbitmap.h"
50 #include "analyzer/call-string.h"
51 #include "analyzer/program-point.h"
52 #include "analyzer/store.h"
53 #include "analyzer/region-model.h"
54 #include "analyzer/call-summary.h"
55 #include "analyzer/analyzer-selftests.h"
56 #include "stor-layout.h"
58 #if ENABLE_ANALYZER
60 namespace ana {
62 /* Dump SVALS to PP, sorting them to ensure determinism. */
64 static void
65 dump_svalue_set (const hash_set <const svalue *> &svals,
66 pretty_printer *pp, bool simple)
68 auto_vec <const svalue *> v;
69 for (hash_set<const svalue *>::iterator iter = svals.begin ();
70 iter != svals.end (); ++iter)
72 v.safe_push (*iter);
74 v.qsort (svalue::cmp_ptr_ptr);
76 pp_character (pp, '{');
77 const svalue *sval;
78 unsigned i;
79 FOR_EACH_VEC_ELT (v, i, sval)
81 if (i > 0)
82 pp_string (pp, ", ");
83 sval->dump_to_pp (pp, simple);
85 pp_character (pp, '}');
88 /* class uncertainty_t. */
90 /* Dump this object to PP. */
92 void
93 uncertainty_t::dump_to_pp (pretty_printer *pp, bool simple) const
95 pp_string (pp, "{m_maybe_bound_svals: ");
96 dump_svalue_set (m_maybe_bound_svals, pp, simple);
98 pp_string (pp, ", m_mutable_at_unknown_call_svals: ");
99 dump_svalue_set (m_mutable_at_unknown_call_svals, pp, simple);
100 pp_string (pp, "}");
103 /* Dump this object to stderr. */
105 DEBUG_FUNCTION void
106 uncertainty_t::dump (bool simple) const
108 pretty_printer pp;
109 pp_format_decoder (&pp) = default_tree_printer;
110 pp_show_color (&pp) = pp_show_color (global_dc->printer);
111 pp.buffer->stream = stderr;
112 dump_to_pp (&pp, simple);
113 pp_newline (&pp);
114 pp_flush (&pp);
117 /* class binding_key. */
119 const binding_key *
120 binding_key::make (store_manager *mgr, const region *r)
122 region_offset offset = r->get_offset (mgr->get_svalue_manager ());
123 if (offset.symbolic_p ())
124 return mgr->get_symbolic_binding (r);
125 else
127 bit_size_t bit_size;
128 if (r->get_bit_size (&bit_size))
130 /* Must be non-empty. */
131 gcc_assert (bit_size > 0);
132 return mgr->get_concrete_binding (offset.get_bit_offset (),
133 bit_size);
135 else
136 return mgr->get_symbolic_binding (r);
140 /* Dump this binding_key to stderr. */
142 DEBUG_FUNCTION void
143 binding_key::dump (bool simple) const
145 pretty_printer pp;
146 pp_format_decoder (&pp) = default_tree_printer;
147 pp_show_color (&pp) = pp_show_color (global_dc->printer);
148 pp.buffer->stream = stderr;
149 dump_to_pp (&pp, simple);
150 pp_newline (&pp);
151 pp_flush (&pp);
154 /* Get a description of this binding_key. */
156 label_text
157 binding_key::get_desc (bool simple) const
159 pretty_printer pp;
160 pp_format_decoder (&pp) = default_tree_printer;
161 dump_to_pp (&pp, simple);
162 return label_text::take (xstrdup (pp_formatted_text (&pp)));
165 /* qsort callback. */
168 binding_key::cmp_ptrs (const void *p1, const void *p2)
170 const binding_key * const *pk1 = (const binding_key * const *)p1;
171 const binding_key * const *pk2 = (const binding_key * const *)p2;
172 return cmp (*pk1, *pk2);
175 /* Comparator for binding_keys. */
178 binding_key::cmp (const binding_key *k1, const binding_key *k2)
180 int concrete1 = k1->concrete_p ();
181 int concrete2 = k2->concrete_p ();
182 if (int concrete_cmp = concrete1 - concrete2)
183 return concrete_cmp;
184 if (concrete1)
186 const concrete_binding *b1 = (const concrete_binding *)k1;
187 const concrete_binding *b2 = (const concrete_binding *)k2;
188 if (int start_cmp = wi::cmp (b1->get_start_bit_offset (),
189 b2->get_start_bit_offset (),
190 SIGNED))
191 return start_cmp;
192 return wi::cmp (b1->get_next_bit_offset (), b2->get_next_bit_offset (),
193 SIGNED);
195 else
197 const symbolic_binding *s1 = (const symbolic_binding *)k1;
198 const symbolic_binding *s2 = (const symbolic_binding *)k2;
199 if (s1 > s2)
200 return 1;
201 if (s1 < s2)
202 return -1;
203 return 0;
207 /* struct bit_range. */
209 void
210 bit_range::dump_to_pp (pretty_printer *pp) const
212 byte_range bytes (0, 0);
213 if (as_byte_range (&bytes))
214 bytes.dump_to_pp (pp);
215 else
217 pp_string (pp, "start: ");
218 pp_wide_int (pp, m_start_bit_offset, SIGNED);
219 pp_string (pp, ", size: ");
220 pp_wide_int (pp, m_size_in_bits, SIGNED);
221 pp_string (pp, ", next: ");
222 pp_wide_int (pp, get_next_bit_offset (), SIGNED);
226 /* Dump this object to stderr. */
228 DEBUG_FUNCTION void
229 bit_range::dump () const
231 pretty_printer pp;
232 pp.buffer->stream = stderr;
233 dump_to_pp (&pp);
234 pp_newline (&pp);
235 pp_flush (&pp);
238 /* Generate a JSON value for this bit_range.
239 This is intended for debugging the analyzer rather
240 than serialization. */
242 json::object *
243 bit_range::to_json () const
245 json::object *obj = new json::object ();
246 obj->set ("start_bit_offset",
247 bit_offset_to_json (m_start_bit_offset));
248 obj->set ("size_in_bits",
249 bit_offset_to_json (m_size_in_bits));
250 return obj;
253 /* If OTHER is a subset of this, return true and, if OUT is
254 non-null, write to *OUT the relative range of OTHER within this.
255 Otherwise return false. */
257 bool
258 bit_range::contains_p (const bit_range &other, bit_range *out) const
260 if (contains_p (other.get_start_bit_offset ())
261 && contains_p (other.get_last_bit_offset ()))
263 if (out)
265 out->m_start_bit_offset = other.m_start_bit_offset - m_start_bit_offset;
266 out->m_size_in_bits = other.m_size_in_bits;
268 return true;
270 else
271 return false;
274 /* If OTHER intersects this, return true and write
275 the relative range of OTHER within THIS to *OUT_THIS,
276 and the relative range of THIS within OTHER to *OUT_OTHER.
277 Otherwise return false. */
279 bool
280 bit_range::intersects_p (const bit_range &other,
281 bit_range *out_this,
282 bit_range *out_other) const
284 if (get_start_bit_offset () < other.get_next_bit_offset ()
285 && other.get_start_bit_offset () < get_next_bit_offset ())
287 bit_offset_t overlap_start
288 = MAX (get_start_bit_offset (),
289 other.get_start_bit_offset ());
290 bit_offset_t overlap_next
291 = MIN (get_next_bit_offset (),
292 other.get_next_bit_offset ());
293 gcc_assert (overlap_next > overlap_start);
294 bit_range abs_overlap_bits (overlap_start, overlap_next - overlap_start);
295 *out_this = abs_overlap_bits - get_start_bit_offset ();
296 *out_other = abs_overlap_bits - other.get_start_bit_offset ();
297 return true;
299 else
300 return false;
303 /* Return true if THIS and OTHER intersect and write the number
304 of bits both buffers overlap to *OUT_NUM_OVERLAP_BITS.
306 Otherwise return false. */
308 bool
309 bit_range::intersects_p (const bit_range &other,
310 bit_size_t *out_num_overlap_bits) const
312 if (get_start_bit_offset () < other.get_next_bit_offset ()
313 && other.get_start_bit_offset () < get_next_bit_offset ())
315 bit_offset_t overlap_start = MAX (get_start_bit_offset (),
316 other.get_start_bit_offset ());
317 bit_offset_t overlap_next = MIN (get_next_bit_offset (),
318 other.get_next_bit_offset ());
319 gcc_assert (overlap_next > overlap_start);
320 *out_num_overlap_bits = overlap_next - overlap_start;
321 return true;
323 else
324 return false;
327 /* Return true if THIS exceeds OTHER and write the overhanging
328 bit range to OUT_OVERHANGING_BIT_RANGE. */
330 bool
331 bit_range::exceeds_p (const bit_range &other,
332 bit_range *out_overhanging_bit_range) const
334 gcc_assert (!empty_p ());
336 if (other.get_next_bit_offset () < get_next_bit_offset ())
338 /* THIS definitely exceeds OTHER. */
339 bit_offset_t start = MAX (get_start_bit_offset (),
340 other.get_next_bit_offset ());
341 bit_offset_t size = get_next_bit_offset () - start;
342 gcc_assert (size > 0);
343 out_overhanging_bit_range->m_start_bit_offset = start;
344 out_overhanging_bit_range->m_size_in_bits = size;
345 return true;
347 else
348 return false;
351 /* Return true if THIS falls short of OFFSET and write the
352 bit range fallen short to OUT_FALL_SHORT_BITS. */
354 bool
355 bit_range::falls_short_of_p (bit_offset_t offset,
356 bit_range *out_fall_short_bits) const
358 gcc_assert (!empty_p ());
360 if (get_start_bit_offset () < offset)
362 /* THIS falls short of OFFSET. */
363 bit_offset_t start = get_start_bit_offset ();
364 bit_offset_t size = MIN (offset, get_next_bit_offset ()) - start;
365 gcc_assert (size > 0);
366 out_fall_short_bits->m_start_bit_offset = start;
367 out_fall_short_bits->m_size_in_bits = size;
368 return true;
370 else
371 return false;
375 bit_range::cmp (const bit_range &br1, const bit_range &br2)
377 if (int start_cmp = wi::cmps (br1.m_start_bit_offset,
378 br2.m_start_bit_offset))
379 return start_cmp;
381 return wi::cmpu (br1.m_size_in_bits, br2.m_size_in_bits);
384 /* Offset this range by OFFSET. */
386 bit_range
387 bit_range::operator- (bit_offset_t offset) const
389 return bit_range (m_start_bit_offset - offset, m_size_in_bits);
392 /* If MASK is a contiguous range of set bits, write them
393 to *OUT and return true.
394 Otherwise return false. */
396 bool
397 bit_range::from_mask (unsigned HOST_WIDE_INT mask, bit_range *out)
399 unsigned iter_bit_idx = 0;
400 unsigned HOST_WIDE_INT iter_bit_mask = 1;
402 /* Find the first contiguous run of set bits in MASK. */
404 /* Find first set bit in MASK. */
405 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
407 if (mask & iter_bit_mask)
408 break;
409 iter_bit_idx++;
410 iter_bit_mask <<= 1;
412 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
413 /* MASK is zero. */
414 return false;
416 unsigned first_set_iter_bit_idx = iter_bit_idx;
417 unsigned num_set_bits = 1;
418 iter_bit_idx++;
419 iter_bit_mask <<= 1;
421 /* Find next unset bit in MASK. */
422 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
424 if (!(mask & iter_bit_mask))
425 break;
426 num_set_bits++;
427 iter_bit_idx++;
428 iter_bit_mask <<= 1;
430 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
432 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
433 return true;
436 /* We now have the first contiguous run of set bits in MASK.
437 Fail if any other bits are set. */
438 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
440 if (mask & iter_bit_mask)
441 return false;
442 iter_bit_idx++;
443 iter_bit_mask <<= 1;
446 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
447 return true;
450 /* Attempt to convert this bit_range to a byte_range.
451 Return true if it is possible, writing the result to *OUT.
452 Otherwise return false. */
454 bool
455 bit_range::as_byte_range (byte_range *out) const
457 if (m_start_bit_offset % BITS_PER_UNIT == 0
458 && m_size_in_bits % BITS_PER_UNIT == 0)
460 out->m_start_byte_offset = m_start_bit_offset / BITS_PER_UNIT;
461 out->m_size_in_bytes = m_size_in_bits / BITS_PER_UNIT;
462 return true;
464 return false;
467 /* Dump this object to PP. */
469 void
470 byte_range::dump_to_pp (pretty_printer *pp) const
472 if (m_size_in_bytes == 0)
474 pp_string (pp, "empty");
476 else if (m_size_in_bytes == 1)
478 pp_string (pp, "byte ");
479 pp_wide_int (pp, m_start_byte_offset, SIGNED);
481 else
483 pp_string (pp, "bytes ");
484 pp_wide_int (pp, m_start_byte_offset, SIGNED);
485 pp_string (pp, "-");
486 pp_wide_int (pp, get_last_byte_offset (), SIGNED);
490 /* Dump this object to stderr. */
492 DEBUG_FUNCTION void
493 byte_range::dump () const
495 pretty_printer pp;
496 pp.buffer->stream = stderr;
497 dump_to_pp (&pp);
498 pp_newline (&pp);
499 pp_flush (&pp);
502 /* Generate a JSON value for this byte_range.
503 This is intended for debugging the analyzer rather
504 than serialization. */
506 json::object *
507 byte_range::to_json () const
509 json::object *obj = new json::object ();
510 obj->set ("start_byte_offset",
511 byte_offset_to_json (m_start_byte_offset));
512 obj->set ("size_in_bytes",
513 byte_offset_to_json (m_size_in_bytes));
514 return obj;
517 /* If OTHER is a subset of this, return true and write
518 to *OUT the relative range of OTHER within this.
519 Otherwise return false. */
521 bool
522 byte_range::contains_p (const byte_range &other, byte_range *out) const
524 if (contains_p (other.get_start_byte_offset ())
525 && contains_p (other.get_last_byte_offset ()))
527 out->m_start_byte_offset = other.m_start_byte_offset - m_start_byte_offset;
528 out->m_size_in_bytes = other.m_size_in_bytes;
529 return true;
531 else
532 return false;
535 /* qsort comparator for byte ranges. */
538 byte_range::cmp (const byte_range &br1, const byte_range &br2)
540 /* Order first by offset. */
541 if (int start_cmp = wi::cmps (br1.m_start_byte_offset,
542 br2.m_start_byte_offset))
543 return start_cmp;
545 /* ...then by size. */
546 return wi::cmpu (br1.m_size_in_bytes, br2.m_size_in_bytes);
549 /* class concrete_binding : public binding_key. */
551 /* Implementation of binding_key::dump_to_pp vfunc for concrete_binding. */
553 void
554 concrete_binding::dump_to_pp (pretty_printer *pp, bool) const
556 m_bit_range.dump_to_pp (pp);
559 /* Return true if this binding overlaps with OTHER. */
561 bool
562 concrete_binding::overlaps_p (const concrete_binding &other) const
564 if (get_start_bit_offset () < other.get_next_bit_offset ()
565 && get_next_bit_offset () > other.get_start_bit_offset ())
566 return true;
567 return false;
570 /* If this is expressible as a concrete byte range, return true
571 and write it to *OUT. Otherwise return false. */
573 bool
574 concrete_binding::get_byte_range (byte_range *out) const
576 return m_bit_range.as_byte_range (out);
579 /* Comparator for use by vec<const concrete_binding *>::qsort. */
582 concrete_binding::cmp_ptr_ptr (const void *p1, const void *p2)
584 const concrete_binding *b1 = *(const concrete_binding * const *)p1;
585 const concrete_binding *b2 = *(const concrete_binding * const *)p2;
587 return bit_range::cmp (b1->m_bit_range, b2->m_bit_range);
590 /* class symbolic_binding : public binding_key. */
592 void
593 symbolic_binding::dump_to_pp (pretty_printer *pp, bool simple) const
595 //binding_key::dump_to_pp (pp, simple);
596 pp_string (pp, "region: ");
597 m_region->dump_to_pp (pp, simple);
600 /* Comparator for use by vec<const symbolic_binding *>::qsort. */
603 symbolic_binding::cmp_ptr_ptr (const void *p1, const void *p2)
605 const symbolic_binding *b1 = *(const symbolic_binding * const *)p1;
606 const symbolic_binding *b2 = *(const symbolic_binding * const *)p2;
608 return region::cmp_ids (b1->get_region (), b2->get_region ());
611 /* The store is oblivious to the types of the svalues bound within
612 it: any type can get bound at any location.
613 Simplify any casts before binding.
615 For example, if we have:
616 struct big { int ia[1024]; };
617 struct big src, dst;
618 memcpy (&dst, &src, sizeof (struct big));
619 this reaches us in gimple form as:
620 MEM <unsigned char[4096]> [(char * {ref-all})&dst]
621 = MEM <unsigned char[4096]> [(char * {ref-all})&src];
622 Using cast_region when handling the MEM_REF would give us:
623 INIT_VAL(CAST_REG(unsigned char[4096], src))
624 as rhs_sval, but we can fold that into a cast svalue:
625 CAST(unsigned char[4096], INIT_VAL(src))
626 We can discard that cast from the svalue when binding it in
627 the store for "dst", and simply store:
628 cluster for: dst
629 key: {kind: direct, start: 0, size: 32768, next: 32768}
630 value: ‘struct big’ {INIT_VAL(src)}. */
632 static const svalue *
633 simplify_for_binding (const svalue *sval)
635 if (const svalue *cast_sval = sval->maybe_undo_cast ())
636 sval = cast_sval;
637 return sval;
640 /* class binding_map. */
642 /* binding_map's copy ctor. */
644 binding_map::binding_map (const binding_map &other)
645 : m_map (other.m_map)
649 /* binding_map's assignment operator. */
651 binding_map&
652 binding_map::operator=(const binding_map &other)
654 /* For now, assume we only ever copy to an empty cluster. */
655 gcc_assert (m_map.elements () == 0);
656 for (map_t::iterator iter = other.m_map.begin (); iter != other.m_map.end ();
657 ++iter)
659 const binding_key *key = (*iter).first;
660 const svalue *sval = (*iter).second;
661 m_map.put (key, sval);
663 return *this;
666 /* binding_map's equality operator. */
668 bool
669 binding_map::operator== (const binding_map &other) const
671 if (m_map.elements () != other.m_map.elements ())
672 return false;
674 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
676 const binding_key *key = (*iter).first;
677 const svalue *sval = (*iter).second;
678 const svalue **other_slot
679 = const_cast <map_t &> (other.m_map).get (key);
680 if (other_slot == NULL)
681 return false;
682 if (sval != *other_slot)
683 return false;
685 gcc_checking_assert (hash () == other.hash ());
686 return true;
689 /* Generate a hash value for this binding_map. */
691 hashval_t
692 binding_map::hash () const
694 hashval_t result = 0;
695 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
697 /* Use a new hasher for each key to avoid depending on the ordering
698 of keys when accumulating the result. */
699 inchash::hash hstate;
700 hstate.add_ptr ((*iter).first);
701 hstate.add_ptr ((*iter).second);
702 result ^= hstate.end ();
704 return result;
707 /* Dump a representation of this binding_map to PP.
708 SIMPLE controls how values and regions are to be printed.
709 If MULTILINE, then split the dump over multiple lines and
710 use whitespace for readability, otherwise put all on one line. */
712 void
713 binding_map::dump_to_pp (pretty_printer *pp, bool simple,
714 bool multiline) const
716 auto_vec <const binding_key *> binding_keys;
717 for (map_t::iterator iter = m_map.begin ();
718 iter != m_map.end (); ++iter)
720 const binding_key *key = (*iter).first;
721 binding_keys.safe_push (key);
723 binding_keys.qsort (binding_key::cmp_ptrs);
725 const binding_key *key;
726 unsigned i;
727 FOR_EACH_VEC_ELT (binding_keys, i, key)
729 const svalue *value = *const_cast <map_t &> (m_map).get (key);
730 if (multiline)
732 pp_string (pp, " key: {");
733 key->dump_to_pp (pp, simple);
734 pp_string (pp, "}");
735 pp_newline (pp);
736 pp_string (pp, " value: ");
737 if (tree t = value->get_type ())
738 dump_quoted_tree (pp, t);
739 pp_string (pp, " {");
740 value->dump_to_pp (pp, simple);
741 pp_string (pp, "}");
742 pp_newline (pp);
744 else
746 if (i > 0)
747 pp_string (pp, ", ");
748 pp_string (pp, "binding key: {");
749 key->dump_to_pp (pp, simple);
750 pp_string (pp, "}, value: {");
751 value->dump_to_pp (pp, simple);
752 pp_string (pp, "}");
757 /* Dump a multiline representation of this binding_map to stderr. */
759 DEBUG_FUNCTION void
760 binding_map::dump (bool simple) const
762 pretty_printer pp;
763 pp_format_decoder (&pp) = default_tree_printer;
764 pp_show_color (&pp) = pp_show_color (global_dc->printer);
765 pp.buffer->stream = stderr;
766 dump_to_pp (&pp, simple, true);
767 pp_newline (&pp);
768 pp_flush (&pp);
771 /* Return a new json::object of the form
772 {KEY_DESC : SVALUE_DESC,
773 ...for the various key/value pairs in this binding_map}. */
775 json::object *
776 binding_map::to_json () const
778 json::object *map_obj = new json::object ();
780 auto_vec <const binding_key *> binding_keys;
781 for (map_t::iterator iter = m_map.begin ();
782 iter != m_map.end (); ++iter)
784 const binding_key *key = (*iter).first;
785 binding_keys.safe_push (key);
787 binding_keys.qsort (binding_key::cmp_ptrs);
789 const binding_key *key;
790 unsigned i;
791 FOR_EACH_VEC_ELT (binding_keys, i, key)
793 const svalue *value = *const_cast <map_t &> (m_map).get (key);
794 label_text key_desc = key->get_desc ();
795 map_obj->set (key_desc.get (), value->to_json ());
798 return map_obj;
801 /* Comparator for imposing an order on binding_maps. */
804 binding_map::cmp (const binding_map &map1, const binding_map &map2)
806 if (int count_cmp = map1.elements () - map2.elements ())
807 return count_cmp;
809 auto_vec <const binding_key *> keys1 (map1.elements ());
810 for (map_t::iterator iter = map1.begin ();
811 iter != map1.end (); ++iter)
812 keys1.quick_push ((*iter).first);
813 keys1.qsort (binding_key::cmp_ptrs);
815 auto_vec <const binding_key *> keys2 (map2.elements ());
816 for (map_t::iterator iter = map2.begin ();
817 iter != map2.end (); ++iter)
818 keys2.quick_push ((*iter).first);
819 keys2.qsort (binding_key::cmp_ptrs);
821 for (size_t i = 0; i < keys1.length (); i++)
823 const binding_key *k1 = keys1[i];
824 const binding_key *k2 = keys2[i];
825 if (int key_cmp = binding_key::cmp (k1, k2))
826 return key_cmp;
827 gcc_assert (k1 == k2);
828 if (int sval_cmp = svalue::cmp_ptr (map1.get (k1), map2.get (k2)))
829 return sval_cmp;
832 return 0;
835 /* Get the child region of PARENT_REG based upon INDEX within a
836 CONSTRUCTOR. */
838 static const region *
839 get_subregion_within_ctor (const region *parent_reg, tree index,
840 region_model_manager *mgr)
842 switch (TREE_CODE (index))
844 default:
845 gcc_unreachable ();
846 case INTEGER_CST:
848 const svalue *index_sval
849 = mgr->get_or_create_constant_svalue (index);
850 return mgr->get_element_region (parent_reg,
851 TREE_TYPE (parent_reg->get_type ()),
852 index_sval);
854 break;
855 case FIELD_DECL:
856 return mgr->get_field_region (parent_reg, index);
860 /* Get the svalue for VAL, a non-CONSTRUCTOR value within a CONSTRUCTOR. */
862 static const svalue *
863 get_svalue_for_ctor_val (tree val, region_model_manager *mgr)
865 /* Reuse the get_rvalue logic from region_model. */
866 region_model m (mgr);
867 return m.get_rvalue (path_var (val, 0), NULL);
870 /* Bind values from CONSTRUCTOR to this map, relative to
871 PARENT_REG's relationship to its base region.
872 Return true if successful, false if there was a problem (e.g. due
873 to hitting a complexity limit). */
875 bool
876 binding_map::apply_ctor_to_region (const region *parent_reg, tree ctor,
877 region_model_manager *mgr)
879 gcc_assert (parent_reg);
880 gcc_assert (TREE_CODE (ctor) == CONSTRUCTOR);
882 unsigned ix;
883 tree index;
884 tree val;
885 tree parent_type = parent_reg->get_type ();
886 tree field;
887 if (TREE_CODE (parent_type) == RECORD_TYPE)
888 field = TYPE_FIELDS (parent_type);
889 else
890 field = NULL_TREE;
891 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), ix, index, val)
893 if (!index)
895 /* If index is NULL, then iterate through the fields for
896 a RECORD_TYPE, or use an INTEGER_CST otherwise.
897 Compare with similar logic in output_constructor. */
898 if (field)
900 index = field;
901 field = DECL_CHAIN (field);
903 else
904 index = build_int_cst (integer_type_node, ix);
906 else if (TREE_CODE (index) == RANGE_EXPR)
908 tree min_index = TREE_OPERAND (index, 0);
909 tree max_index = TREE_OPERAND (index, 1);
910 if (min_index == max_index)
912 if (!apply_ctor_pair_to_child_region (parent_reg, mgr,
913 min_index, val))
914 return false;
916 else
918 if (!apply_ctor_val_to_range (parent_reg, mgr,
919 min_index, max_index, val))
920 return false;
922 continue;
924 if (!apply_ctor_pair_to_child_region (parent_reg, mgr, index, val))
925 return false;
927 return true;
930 /* Bind the value VAL into the range of elements within PARENT_REF
931 from MIN_INDEX to MAX_INDEX (including endpoints).
932 For use in handling RANGE_EXPR within a CONSTRUCTOR.
933 Return true if successful, false if there was a problem (e.g. due
934 to hitting a complexity limit). */
936 bool
937 binding_map::apply_ctor_val_to_range (const region *parent_reg,
938 region_model_manager *mgr,
939 tree min_index, tree max_index,
940 tree val)
942 gcc_assert (TREE_CODE (min_index) == INTEGER_CST);
943 gcc_assert (TREE_CODE (max_index) == INTEGER_CST);
945 /* Generate a binding key for the range. */
946 const region *min_element
947 = get_subregion_within_ctor (parent_reg, min_index, mgr);
948 const region *max_element
949 = get_subregion_within_ctor (parent_reg, max_index, mgr);
950 region_offset min_offset = min_element->get_offset (mgr);
951 if (min_offset.symbolic_p ())
952 return false;
953 bit_offset_t start_bit_offset = min_offset.get_bit_offset ();
954 store_manager *smgr = mgr->get_store_manager ();
955 if (max_element->empty_p ())
956 return false;
957 const binding_key *max_element_key = binding_key::make (smgr, max_element);
958 if (max_element_key->symbolic_p ())
959 return false;
960 const concrete_binding *max_element_ckey
961 = max_element_key->dyn_cast_concrete_binding ();
962 bit_size_t range_size_in_bits
963 = max_element_ckey->get_next_bit_offset () - start_bit_offset;
964 const concrete_binding *range_key
965 = smgr->get_concrete_binding (start_bit_offset, range_size_in_bits);
966 if (range_key->symbolic_p ())
967 return false;
969 /* Get the value. */
970 if (TREE_CODE (val) == CONSTRUCTOR)
971 return false;
972 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
974 /* Bind the value to the range. */
975 put (range_key, sval);
976 return true;
979 /* Bind the value VAL into INDEX within PARENT_REF.
980 For use in handling a pair of entries within a CONSTRUCTOR.
981 Return true if successful, false if there was a problem (e.g. due
982 to hitting a complexity limit). */
984 bool
985 binding_map::apply_ctor_pair_to_child_region (const region *parent_reg,
986 region_model_manager *mgr,
987 tree index, tree val)
989 const region *child_reg
990 = get_subregion_within_ctor (parent_reg, index, mgr);
991 if (TREE_CODE (val) == CONSTRUCTOR)
992 return apply_ctor_to_region (child_reg, val, mgr);
993 else
995 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
996 if (child_reg->empty_p ())
997 return false;
998 const binding_key *k
999 = binding_key::make (mgr->get_store_manager (), child_reg);
1000 /* Handle the case where we have an unknown size for child_reg
1001 (e.g. due to it being a trailing field with incomplete array
1002 type. */
1003 if (!k->concrete_p ())
1005 /* Assume that sval has a well-defined size for this case. */
1006 tree sval_type = sval->get_type ();
1007 gcc_assert (sval_type);
1008 HOST_WIDE_INT sval_byte_size = int_size_in_bytes (sval_type);
1009 gcc_assert (sval_byte_size != -1);
1010 bit_size_t sval_bit_size = sval_byte_size * BITS_PER_UNIT;
1011 /* Get offset of child relative to base region. */
1012 region_offset child_base_offset = child_reg->get_offset (mgr);
1013 if (child_base_offset.symbolic_p ())
1014 return false;
1015 /* Convert to an offset relative to the parent region. */
1016 region_offset parent_base_offset = parent_reg->get_offset (mgr);
1017 gcc_assert (!parent_base_offset.symbolic_p ());
1018 bit_offset_t child_parent_offset
1019 = (child_base_offset.get_bit_offset ()
1020 - parent_base_offset.get_bit_offset ());
1021 /* Create a concrete key for the child within the parent. */
1022 k = mgr->get_store_manager ()->get_concrete_binding
1023 (child_parent_offset, sval_bit_size);
1025 gcc_assert (k->concrete_p ());
1026 put (k, sval);
1027 return true;
1031 /* Populate OUT with all bindings within this map that overlap KEY. */
1033 void
1034 binding_map::get_overlapping_bindings (const binding_key *key,
1035 auto_vec<const binding_key *> *out)
1037 for (auto iter : *this)
1039 const binding_key *iter_key = iter.first;
1040 if (const concrete_binding *ckey
1041 = key->dyn_cast_concrete_binding ())
1043 if (const concrete_binding *iter_ckey
1044 = iter_key->dyn_cast_concrete_binding ())
1046 if (ckey->overlaps_p (*iter_ckey))
1047 out->safe_push (iter_key);
1049 else
1051 /* Assume overlap. */
1052 out->safe_push (iter_key);
1055 else
1057 /* Assume overlap. */
1058 out->safe_push (iter_key);
1063 /* Remove, truncate, and/or split any bindings within this map that
1064 overlap DROP_KEY.
1066 For example, if we have:
1068 +------------------------------------+
1069 | old binding |
1070 +------------------------------------+
1072 which is to be overwritten with:
1074 .......+----------------------+.......
1075 .......| new binding |.......
1076 .......+----------------------+.......
1078 this function "cuts a hole" out of the old binding:
1080 +------+......................+------+
1081 |prefix| hole for new binding |suffix|
1082 +------+......................+------+
1084 into which the new binding can be added without
1085 overlapping the prefix or suffix.
1087 The prefix and suffix (if added) will be bound to the pertinent
1088 parts of the value of the old binding.
1090 For example, given:
1091 struct s5
1093 char arr[8];
1095 void test_5 (struct s5 *p)
1097 struct s5 f = *p;
1098 f.arr[3] = 42;
1100 then after the "f = *p;" we have:
1101 cluster for: f: INIT_VAL((*INIT_VAL(p_33(D))))
1102 and at the "f.arr[3] = 42;" we remove the bindings overlapping
1103 "f.arr[3]", replacing it with a prefix (bytes 0-2) and suffix (bytes 4-7)
1104 giving:
1105 cluster for: f
1106 key: {bytes 0-2}
1107 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1108 key: {bytes 4-7}
1109 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1110 punching a hole into which the new value can be written at byte 3:
1111 cluster for: f
1112 key: {bytes 0-2}
1113 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1114 key: {byte 3}
1115 value: 'char' {(char)42}
1116 key: {bytes 4-7}
1117 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1119 If UNCERTAINTY is non-NULL, use it to record any svalues that
1120 were removed, as being maybe-bound.
1122 If MAYBE_LIVE_VALUES is non-NULL, then use it to record any svalues that
1123 were removed as being maybe-live.
1125 If ALWAYS_OVERLAP, then assume that DROP_KEY can overlap anything
1126 in the map, due to one or both of the underlying clusters being
1127 symbolic (but not the same symbolic region). Hence even if DROP_KEY is a
1128 concrete binding it could actually be referring to the same memory as
1129 distinct concrete bindings in the map. Remove all bindings, but
1130 register any svalues with *UNCERTAINTY. */
1132 void
1133 binding_map::remove_overlapping_bindings (store_manager *mgr,
1134 const binding_key *drop_key,
1135 uncertainty_t *uncertainty,
1136 svalue_set *maybe_live_values,
1137 bool always_overlap)
1139 /* Get the bindings of interest within this map. */
1140 auto_vec<const binding_key *> bindings;
1141 if (always_overlap)
1142 for (auto iter : *this)
1143 bindings.safe_push (iter.first); /* Add all bindings. */
1144 else
1145 /* Just add overlapping bindings. */
1146 get_overlapping_bindings (drop_key, &bindings);
1148 unsigned i;
1149 const binding_key *iter_binding;
1150 FOR_EACH_VEC_ELT (bindings, i, iter_binding)
1152 /* Record any svalues that were removed to *UNCERTAINTY as being
1153 maybe-bound, provided at least some part of the binding is symbolic.
1155 Specifically, if at least one of the bindings is symbolic, or we
1156 have ALWAYS_OVERLAP for the case where we have possibly aliasing
1157 regions, then we don't know that the svalue has been overwritten,
1158 and should record that to *UNCERTAINTY.
1160 However, if we have concrete keys accessing within the same symbolic
1161 region, then we *know* that the symbolic region has been overwritten,
1162 so we don't record it to *UNCERTAINTY, as this could be a genuine
1163 leak. */
1164 const svalue *old_sval = get (iter_binding);
1165 if (uncertainty
1166 && (drop_key->symbolic_p ()
1167 || iter_binding->symbolic_p ()
1168 || always_overlap))
1169 uncertainty->on_maybe_bound_sval (old_sval);
1171 /* Record any svalues that were removed to *MAYBE_LIVE_VALUES as being
1172 maybe-live. */
1173 if (maybe_live_values)
1174 maybe_live_values->add (old_sval);
1176 /* Begin by removing the old binding. */
1177 m_map.remove (iter_binding);
1179 /* Don't attempt to handle prefixes/suffixes for the
1180 "always_overlap" case; everything's being removed. */
1181 if (always_overlap)
1182 continue;
1184 /* Now potentially add the prefix and suffix. */
1185 if (const concrete_binding *drop_ckey
1186 = drop_key->dyn_cast_concrete_binding ())
1187 if (const concrete_binding *iter_ckey
1188 = iter_binding->dyn_cast_concrete_binding ())
1190 gcc_assert (drop_ckey->overlaps_p (*iter_ckey));
1192 const bit_range &drop_bits = drop_ckey->get_bit_range ();
1193 const bit_range &iter_bits = iter_ckey->get_bit_range ();
1195 if (iter_bits.get_start_bit_offset ()
1196 < drop_bits.get_start_bit_offset ())
1198 /* We have a truncated prefix. */
1199 bit_range prefix_bits (iter_bits.get_start_bit_offset (),
1200 (drop_bits.get_start_bit_offset ()
1201 - iter_bits.get_start_bit_offset ()));
1202 const concrete_binding *prefix_key
1203 = mgr->get_concrete_binding (prefix_bits);
1204 bit_range rel_prefix (0, prefix_bits.m_size_in_bits);
1205 const svalue *prefix_sval
1206 = old_sval->extract_bit_range (NULL_TREE,
1207 rel_prefix,
1208 mgr->get_svalue_manager ());
1209 m_map.put (prefix_key, prefix_sval);
1212 if (iter_bits.get_next_bit_offset ()
1213 > drop_bits.get_next_bit_offset ())
1215 /* We have a truncated suffix. */
1216 bit_range suffix_bits (drop_bits.get_next_bit_offset (),
1217 (iter_bits.get_next_bit_offset ()
1218 - drop_bits.get_next_bit_offset ()));
1219 const concrete_binding *suffix_key
1220 = mgr->get_concrete_binding (suffix_bits);
1221 bit_range rel_suffix (drop_bits.get_next_bit_offset ()
1222 - iter_bits.get_start_bit_offset (),
1223 suffix_bits.m_size_in_bits);
1224 const svalue *suffix_sval
1225 = old_sval->extract_bit_range (NULL_TREE,
1226 rel_suffix,
1227 mgr->get_svalue_manager ());
1228 m_map.put (suffix_key, suffix_sval);
1234 /* class binding_cluster. */
1236 binding_cluster::binding_cluster (const region *base_region)
1237 : m_base_region (base_region), m_map (),
1238 m_escaped (false), m_touched (false)
1242 /* binding_cluster's copy ctor. */
1244 binding_cluster::binding_cluster (const binding_cluster &other)
1245 : m_base_region (other.m_base_region), m_map (other.m_map),
1246 m_escaped (other.m_escaped), m_touched (other.m_touched)
1250 /* binding_cluster's assignment operator. */
1252 binding_cluster&
1253 binding_cluster::operator= (const binding_cluster &other)
1255 gcc_assert (m_base_region == other.m_base_region);
1256 m_map = other.m_map;
1257 m_escaped = other.m_escaped;
1258 m_touched = other.m_touched;
1259 return *this;
1262 /* binding_cluster's equality operator. */
1264 bool
1265 binding_cluster::operator== (const binding_cluster &other) const
1267 if (m_map != other.m_map)
1268 return false;
1270 if (m_base_region != other.m_base_region)
1271 return false;
1273 if (m_escaped != other.m_escaped)
1274 return false;
1276 if (m_touched != other.m_touched)
1277 return false;
1279 gcc_checking_assert (hash () == other.hash ());
1281 return true;
1284 /* Generate a hash value for this binding_cluster. */
1286 hashval_t
1287 binding_cluster::hash () const
1289 return m_map.hash ();
1292 /* Return true if this binding_cluster is symbolic
1293 i.e. its base region is symbolic. */
1295 bool
1296 binding_cluster::symbolic_p () const
1298 return m_base_region->get_kind () == RK_SYMBOLIC;
1301 /* Dump a representation of this binding_cluster to PP.
1302 SIMPLE controls how values and regions are to be printed.
1303 If MULTILINE, then split the dump over multiple lines and
1304 use whitespace for readability, otherwise put all on one line. */
1306 void
1307 binding_cluster::dump_to_pp (pretty_printer *pp, bool simple,
1308 bool multiline) const
1310 if (m_escaped)
1312 if (multiline)
1314 pp_string (pp, " ESCAPED");
1315 pp_newline (pp);
1317 else
1318 pp_string (pp, "(ESCAPED)");
1320 if (m_touched)
1322 if (multiline)
1324 pp_string (pp, " TOUCHED");
1325 pp_newline (pp);
1327 else
1328 pp_string (pp, "(TOUCHED)");
1331 m_map.dump_to_pp (pp, simple, multiline);
1334 /* Dump a multiline representation of this binding_cluster to stderr. */
1336 DEBUG_FUNCTION void
1337 binding_cluster::dump (bool simple) const
1339 pretty_printer pp;
1340 pp_format_decoder (&pp) = default_tree_printer;
1341 pp_show_color (&pp) = pp_show_color (global_dc->printer);
1342 pp.buffer->stream = stderr;
1343 pp_string (&pp, " cluster for: ");
1344 m_base_region->dump_to_pp (&pp, simple);
1345 pp_string (&pp, ": ");
1346 pp_newline (&pp);
1347 dump_to_pp (&pp, simple, true);
1348 pp_newline (&pp);
1349 pp_flush (&pp);
1352 /* Assert that this object is valid. */
1354 void
1355 binding_cluster::validate () const
1357 int num_symbolic = 0;
1358 int num_concrete = 0;
1359 for (auto iter : m_map)
1361 if (iter.first->symbolic_p ())
1362 num_symbolic++;
1363 else
1364 num_concrete++;
1366 /* We shouldn't have more than one symbolic key per cluster
1367 (or one would have clobbered the other). */
1368 gcc_assert (num_symbolic < 2);
1369 /* We can't have both concrete and symbolic keys. */
1370 gcc_assert (num_concrete == 0 || num_symbolic == 0);
1373 /* Return a new json::object of the form
1374 {"escaped": true/false,
1375 "touched": true/false,
1376 "map" : object for the binding_map. */
1378 json::object *
1379 binding_cluster::to_json () const
1381 json::object *cluster_obj = new json::object ();
1383 cluster_obj->set ("escaped", new json::literal (m_escaped));
1384 cluster_obj->set ("touched", new json::literal (m_touched));
1385 cluster_obj->set ("map", m_map.to_json ());
1387 return cluster_obj;
1390 /* Add a binding of SVAL of kind KIND to REG, unpacking SVAL if it is a
1391 compound_sval. */
1393 void
1394 binding_cluster::bind (store_manager *mgr,
1395 const region *reg, const svalue *sval)
1397 if (const compound_svalue *compound_sval
1398 = sval->dyn_cast_compound_svalue ())
1400 bind_compound_sval (mgr, reg, compound_sval);
1401 return;
1404 if (reg->empty_p ())
1405 return;
1406 const binding_key *binding = binding_key::make (mgr, reg);
1407 bind_key (binding, sval);
1410 /* Bind SVAL to KEY.
1411 Unpacking of compound_svalues should already have been done by the
1412 time this is called. */
1414 void
1415 binding_cluster::bind_key (const binding_key *key, const svalue *sval)
1417 gcc_assert (sval->get_kind () != SK_COMPOUND);
1419 m_map.put (key, sval);
1420 if (key->symbolic_p ())
1421 m_touched = true;
1424 /* Subroutine of binding_cluster::bind.
1425 Unpack compound_svals when binding them, so that we bind them
1426 element-wise. */
1428 void
1429 binding_cluster::bind_compound_sval (store_manager *mgr,
1430 const region *reg,
1431 const compound_svalue *compound_sval)
1433 region_offset reg_offset
1434 = reg->get_offset (mgr->get_svalue_manager ());
1435 if (reg_offset.symbolic_p ())
1437 m_touched = true;
1438 clobber_region (mgr, reg);
1439 return;
1442 for (map_t::iterator iter = compound_sval->begin ();
1443 iter != compound_sval->end (); ++iter)
1445 const binding_key *iter_key = (*iter).first;
1446 const svalue *iter_sval = (*iter).second;
1448 if (const concrete_binding *concrete_key
1449 = iter_key->dyn_cast_concrete_binding ())
1451 bit_offset_t effective_start
1452 = (concrete_key->get_start_bit_offset ()
1453 + reg_offset.get_bit_offset ());
1454 const concrete_binding *effective_concrete_key
1455 = mgr->get_concrete_binding (effective_start,
1456 concrete_key->get_size_in_bits ());
1457 bind_key (effective_concrete_key, iter_sval);
1459 else
1460 gcc_unreachable ();
1464 /* Remove all bindings overlapping REG within this cluster. */
1466 void
1467 binding_cluster::clobber_region (store_manager *mgr, const region *reg)
1469 remove_overlapping_bindings (mgr, reg, NULL, NULL);
1472 /* Remove any bindings for REG within this cluster. */
1474 void
1475 binding_cluster::purge_region (store_manager *mgr, const region *reg)
1477 gcc_assert (reg->get_kind () == RK_DECL);
1478 if (reg->empty_p ())
1479 return;
1480 const binding_key *binding
1481 = binding_key::make (mgr, const_cast<region *> (reg));
1482 m_map.remove (binding);
1485 /* Clobber REG and fill it with repeated copies of SVAL. */
1487 void
1488 binding_cluster::fill_region (store_manager *mgr,
1489 const region *reg,
1490 const svalue *sval)
1492 clobber_region (mgr, reg);
1494 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1495 const svalue *byte_size_sval = reg->get_byte_size_sval (sval_mgr);
1496 const svalue *fill_sval
1497 = sval_mgr->get_or_create_repeated_svalue (reg->get_type (),
1498 byte_size_sval, sval);
1499 bind (mgr, reg, fill_sval);
1502 /* Clobber REG within this cluster and fill it with zeroes. */
1504 void
1505 binding_cluster::zero_fill_region (store_manager *mgr, const region *reg)
1507 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1508 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
1509 fill_region (mgr, reg, zero_sval);
1512 /* Mark REG_TO_BIND within this cluster as being unknown.
1514 Remove any bindings overlapping REG_FOR_OVERLAP.
1515 If UNCERTAINTY is non-NULL, use it to record any svalues that
1516 had bindings to them removed, as being maybe-bound.
1517 If MAYBE_LIVE_VALUES is non-NULL, use it to record any svalues that
1518 had bindings to them removed, as being maybe-live.
1520 REG_TO_BIND and REG_FOR_OVERLAP are the same for
1521 store::mark_region_as_unknown, but are different in
1522 store::set_value's alias handling, for handling the case where
1523 we have a write to a symbolic REG_FOR_OVERLAP. */
1525 void
1526 binding_cluster::mark_region_as_unknown (store_manager *mgr,
1527 const region *reg_to_bind,
1528 const region *reg_for_overlap,
1529 uncertainty_t *uncertainty,
1530 svalue_set *maybe_live_values)
1532 if (reg_to_bind->empty_p ())
1533 return;
1535 remove_overlapping_bindings (mgr, reg_for_overlap, uncertainty,
1536 maybe_live_values);
1538 /* Add a default binding to "unknown". */
1539 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1540 const svalue *sval
1541 = sval_mgr->get_or_create_unknown_svalue (reg_to_bind->get_type ());
1542 bind (mgr, reg_to_bind, sval);
1545 /* Purge state involving SVAL. */
1547 void
1548 binding_cluster::purge_state_involving (const svalue *sval,
1549 region_model_manager *sval_mgr)
1551 auto_vec<const binding_key *> to_remove;
1552 auto_vec<std::pair<const binding_key *, tree> > to_make_unknown;
1553 for (auto iter : m_map)
1555 const binding_key *iter_key = iter.first;
1556 if (const symbolic_binding *symbolic_key
1557 = iter_key->dyn_cast_symbolic_binding ())
1559 const region *reg = symbolic_key->get_region ();
1560 if (reg->involves_p (sval))
1561 to_remove.safe_push (iter_key);
1563 const svalue *iter_sval = iter.second;
1564 if (iter_sval->involves_p (sval))
1565 to_make_unknown.safe_push (std::make_pair(iter_key,
1566 iter_sval->get_type ()));
1568 for (auto iter : to_remove)
1570 m_map.remove (iter);
1571 m_touched = true;
1573 for (auto iter : to_make_unknown)
1575 const svalue *new_sval
1576 = sval_mgr->get_or_create_unknown_svalue (iter.second);
1577 m_map.put (iter.first, new_sval);
1581 /* Get any SVAL bound to REG within this cluster via kind KIND,
1582 without checking parent regions of REG. */
1584 const svalue *
1585 binding_cluster::get_binding (store_manager *mgr,
1586 const region *reg) const
1588 if (reg->empty_p ())
1589 return NULL;
1590 const binding_key *reg_binding = binding_key::make (mgr, reg);
1591 const svalue *sval = m_map.get (reg_binding);
1592 if (sval)
1594 /* If we have a struct with a single field, then the binding of
1595 the field will equal that of the struct, and looking up e.g.
1596 PARENT_REG.field within:
1597 cluster for PARENT_REG: INIT_VAL(OTHER_REG)
1598 will erroneously return INIT_VAL(OTHER_REG), rather than
1599 SUB_VALUE(INIT_VAL(OTHER_REG), FIELD) == INIT_VAL(OTHER_REG.FIELD).
1600 Fix this issue by iterating upwards whilst the bindings are equal,
1601 expressing the lookups as subvalues.
1602 We have to gather a list of subregion accesses, then walk it
1603 in reverse to get the subvalues. */
1604 auto_vec<const region *> regions;
1605 while (const region *parent_reg = reg->get_parent_region ())
1607 const binding_key *parent_reg_binding
1608 = binding_key::make (mgr, parent_reg);
1609 if (parent_reg_binding == reg_binding
1610 && sval->get_type ()
1611 && reg->get_type ()
1612 && sval->get_type () != reg->get_type ())
1614 regions.safe_push (reg);
1615 reg = parent_reg;
1617 else
1618 break;
1620 if (sval->get_type ()
1621 && reg->get_type ()
1622 && sval->get_type () == reg->get_type ())
1624 unsigned i;
1625 const region *iter_reg;
1626 FOR_EACH_VEC_ELT_REVERSE (regions, i, iter_reg)
1628 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1629 sval = rmm_mgr->get_or_create_sub_svalue (iter_reg->get_type (),
1630 sval, iter_reg);
1634 return sval;
1637 /* Get any SVAL bound to REG within this cluster,
1638 either directly for REG, or recursively checking for bindings within
1639 parent regions and extracting subvalues if need be. */
1641 const svalue *
1642 binding_cluster::get_binding_recursive (store_manager *mgr,
1643 const region *reg) const
1645 if (const svalue *sval = get_binding (mgr, reg))
1646 return sval;
1647 if (reg != m_base_region)
1648 if (const region *parent_reg = reg->get_parent_region ())
1649 if (const svalue *parent_sval
1650 = get_binding_recursive (mgr, parent_reg))
1652 /* Extract child svalue from parent svalue. */
1653 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1654 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1655 parent_sval, reg);
1657 return NULL;
1660 /* Get any value bound for REG within this cluster. */
1662 const svalue *
1663 binding_cluster::get_any_binding (store_manager *mgr,
1664 const region *reg) const
1666 /* Look for a direct binding. */
1667 if (const svalue *direct_sval
1668 = get_binding_recursive (mgr, reg))
1669 return direct_sval;
1671 /* If we had a write to a cluster of unknown size, we might
1672 have a self-binding of the whole base region with an svalue,
1673 where the base region is symbolic.
1674 Handle such cases by returning sub_svalue instances. */
1675 if (const svalue *cluster_sval = maybe_get_simple_value (mgr))
1677 /* Extract child svalue from parent svalue. */
1678 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1679 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1680 cluster_sval, reg);
1683 /* If this cluster has been touched by a symbolic write, then the content
1684 of any subregion not currently specifically bound is "UNKNOWN". */
1685 if (m_touched)
1687 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1688 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1691 /* Alternatively, if this is a symbolic read and the cluster has any bindings,
1692 then we don't know if we're reading those values or not, so the result
1693 is also "UNKNOWN". */
1694 if (reg->get_offset (mgr->get_svalue_manager ()).symbolic_p ()
1695 && m_map.elements () > 0)
1697 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1698 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1701 if (const svalue *compound_sval = maybe_get_compound_binding (mgr, reg))
1702 return compound_sval;
1704 /* Otherwise, the initial value, or uninitialized. */
1705 return NULL;
1708 /* Attempt to get a compound_svalue for the bindings within the cluster
1709 affecting REG (which could be the base region itself).
1711 Create a compound_svalue with the subset of bindings the affect REG,
1712 offsetting them so that the offsets are relative to the start of REG
1713 within the cluster.
1715 For example, REG could be one element within an array of structs.
1717 Return the resulting compound_svalue, or NULL if there's a problem. */
1719 const svalue *
1720 binding_cluster::maybe_get_compound_binding (store_manager *mgr,
1721 const region *reg) const
1723 region_offset cluster_offset
1724 = m_base_region->get_offset (mgr->get_svalue_manager ());
1725 if (cluster_offset.symbolic_p ())
1726 return NULL;
1727 region_offset reg_offset = reg->get_offset (mgr->get_svalue_manager ());
1728 if (reg_offset.symbolic_p ())
1729 return NULL;
1731 if (reg->empty_p ())
1732 return NULL;
1734 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1736 /* We will a build the result map in two parts:
1737 (a) result_map, holding the concrete keys from this cluster,
1739 (b) default_map, holding the initial values for the region
1740 (e.g. uninitialized, initializer values, or zero), unless this
1741 cluster has been touched.
1743 We will populate (a), and as we do, clobber (b), trimming and
1744 splitting its bindings as necessary.
1745 Finally, we will merge (b) into (a), giving a concrete map
1746 that merges both the initial values and the bound values from
1747 the binding_cluster.
1748 Doing it this way reduces N for the O(N^2) intersection-finding,
1749 perhaps we should have a spatial-organized data structure for
1750 concrete keys, though. */
1752 binding_map result_map;
1753 binding_map default_map;
1755 /* Set up default values in default_map. */
1756 const svalue *default_sval;
1757 if (m_touched)
1758 default_sval = sval_mgr->get_or_create_unknown_svalue (reg->get_type ());
1759 else
1760 default_sval = sval_mgr->get_or_create_initial_value (reg);
1761 const binding_key *default_key = binding_key::make (mgr, reg);
1762 default_map.put (default_key, default_sval);
1764 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
1766 const binding_key *key = (*iter).first;
1767 const svalue *sval = (*iter).second;
1769 if (const concrete_binding *concrete_key
1770 = key->dyn_cast_concrete_binding ())
1772 const bit_range &bound_range = concrete_key->get_bit_range ();
1774 bit_size_t reg_bit_size;
1775 if (!reg->get_bit_size (&reg_bit_size))
1776 return NULL;
1778 bit_range reg_range (reg_offset.get_bit_offset (),
1779 reg_bit_size);
1781 /* Skip bindings that are outside the bit range of REG. */
1782 if (!bound_range.intersects_p (reg_range))
1783 continue;
1785 /* We shouldn't have an exact match; that should have been
1786 handled already. */
1787 gcc_assert (!(reg_range == bound_range));
1789 bit_range subrange (0, 0);
1790 if (reg_range.contains_p (bound_range, &subrange))
1792 /* We have a bound range fully within REG.
1793 Add it to map, offsetting accordingly. */
1795 /* Get offset of KEY relative to REG, rather than to
1796 the cluster. */
1797 const concrete_binding *offset_concrete_key
1798 = mgr->get_concrete_binding (subrange);
1799 result_map.put (offset_concrete_key, sval);
1801 /* Clobber default_map, removing/trimming/spliting where
1802 it overlaps with offset_concrete_key. */
1803 default_map.remove_overlapping_bindings (mgr,
1804 offset_concrete_key,
1805 NULL, NULL, false);
1807 else if (bound_range.contains_p (reg_range, &subrange))
1809 /* REG is fully within the bound range, but
1810 is not equal to it; we're extracting a subvalue. */
1811 return sval->extract_bit_range (reg->get_type (),
1812 subrange,
1813 mgr->get_svalue_manager ());
1815 else
1817 /* REG and the bound range partially overlap. */
1818 bit_range reg_subrange (0, 0);
1819 bit_range bound_subrange (0, 0);
1820 reg_range.intersects_p (bound_range,
1821 &reg_subrange, &bound_subrange);
1823 /* Get the bits from the bound value for the bits at the
1824 intersection (relative to the bound value). */
1825 const svalue *overlap_sval
1826 = sval->extract_bit_range (NULL_TREE,
1827 bound_subrange,
1828 mgr->get_svalue_manager ());
1830 /* Get key for overlap, relative to the REG. */
1831 const concrete_binding *overlap_concrete_key
1832 = mgr->get_concrete_binding (reg_subrange);
1833 result_map.put (overlap_concrete_key, overlap_sval);
1835 /* Clobber default_map, removing/trimming/spliting where
1836 it overlaps with overlap_concrete_key. */
1837 default_map.remove_overlapping_bindings (mgr,
1838 overlap_concrete_key,
1839 NULL, NULL, false);
1842 else
1843 /* Can't handle symbolic bindings. */
1844 return NULL;
1847 if (result_map.elements () == 0)
1848 return NULL;
1850 /* Merge any bindings from default_map into result_map. */
1851 for (auto iter : default_map)
1853 const binding_key *key = iter.first;
1854 const svalue *sval = iter.second;
1855 result_map.put (key, sval);
1858 return sval_mgr->get_or_create_compound_svalue (reg->get_type (), result_map);
1861 /* Remove, truncate, and/or split any bindings within this map that
1862 could overlap REG.
1864 If REG's base region or this cluster is symbolic and they're different
1865 base regions, then remove everything in this cluster's map, on the
1866 grounds that REG could be referring to the same memory as anything
1867 in the map.
1869 If UNCERTAINTY is non-NULL, use it to record any svalues that
1870 were removed, as being maybe-bound.
1872 If MAYBE_LIVE_VALUES is non-NULL, use it to record any svalues that
1873 were removed, as being maybe-live. */
1875 void
1876 binding_cluster::remove_overlapping_bindings (store_manager *mgr,
1877 const region *reg,
1878 uncertainty_t *uncertainty,
1879 svalue_set *maybe_live_values)
1881 if (reg->empty_p ())
1882 return;
1883 const binding_key *reg_binding = binding_key::make (mgr, reg);
1885 const region *cluster_base_reg = get_base_region ();
1886 const region *other_base_reg = reg->get_base_region ();
1887 /* If at least one of the base regions involved is symbolic, and they're
1888 not the same base region, then consider everything in the map as
1889 potentially overlapping with reg_binding (even if it's a concrete
1890 binding and things in the map are concrete - they could be referring
1891 to the same memory when the symbolic base regions are taken into
1892 account). */
1893 bool always_overlap = (cluster_base_reg != other_base_reg
1894 && (cluster_base_reg->get_kind () == RK_SYMBOLIC
1895 || other_base_reg->get_kind () == RK_SYMBOLIC));
1896 m_map.remove_overlapping_bindings (mgr, reg_binding, uncertainty,
1897 maybe_live_values,
1898 always_overlap);
1901 /* Attempt to merge CLUSTER_A and CLUSTER_B into OUT_CLUSTER, using
1902 MGR and MERGER.
1903 Return true if they can be merged, false otherwise. */
1905 bool
1906 binding_cluster::can_merge_p (const binding_cluster *cluster_a,
1907 const binding_cluster *cluster_b,
1908 binding_cluster *out_cluster,
1909 store *out_store,
1910 store_manager *mgr,
1911 model_merger *merger)
1913 gcc_assert (out_cluster);
1915 /* Merge flags ("ESCAPED" and "TOUCHED") by setting the merged flag to
1916 true if either of the inputs is true. */
1917 if ((cluster_a && cluster_a->m_escaped)
1918 || (cluster_b && cluster_b->m_escaped))
1919 out_cluster->m_escaped = true;
1920 if ((cluster_a && cluster_a->m_touched)
1921 || (cluster_b && cluster_b->m_touched))
1922 out_cluster->m_touched = true;
1924 /* At least one of CLUSTER_A and CLUSTER_B are non-NULL, but either
1925 could be NULL. Handle these cases. */
1926 if (cluster_a == NULL)
1928 gcc_assert (cluster_b != NULL);
1929 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1930 out_cluster->make_unknown_relative_to (cluster_b, out_store, mgr);
1931 return true;
1933 if (cluster_b == NULL)
1935 gcc_assert (cluster_a != NULL);
1936 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1937 out_cluster->make_unknown_relative_to (cluster_a, out_store, mgr);
1938 return true;
1941 /* The "both inputs are non-NULL" case. */
1942 gcc_assert (cluster_a != NULL && cluster_b != NULL);
1943 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1944 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1946 hash_set<const binding_key *> keys;
1947 for (map_t::iterator iter_a = cluster_a->m_map.begin ();
1948 iter_a != cluster_a->m_map.end (); ++iter_a)
1950 const binding_key *key_a = (*iter_a).first;
1951 keys.add (key_a);
1953 for (map_t::iterator iter_b = cluster_b->m_map.begin ();
1954 iter_b != cluster_b->m_map.end (); ++iter_b)
1956 const binding_key *key_b = (*iter_b).first;
1957 keys.add (key_b);
1959 int num_symbolic_keys = 0;
1960 int num_concrete_keys = 0;
1961 for (hash_set<const binding_key *>::iterator iter = keys.begin ();
1962 iter != keys.end (); ++iter)
1964 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1965 const binding_key *key = *iter;
1966 const svalue *sval_a = cluster_a->get_any_value (key);
1967 const svalue *sval_b = cluster_b->get_any_value (key);
1969 if (key->symbolic_p ())
1970 num_symbolic_keys++;
1971 else
1972 num_concrete_keys++;
1974 if (sval_a == sval_b)
1976 gcc_assert (sval_a);
1977 out_cluster->m_map.put (key, sval_a);
1978 continue;
1980 else if (sval_a && sval_b)
1982 if (const svalue *merged_sval
1983 = sval_a->can_merge_p (sval_b, sval_mgr, merger))
1985 out_cluster->m_map.put (key, merged_sval);
1986 continue;
1988 /* Merger of the svalues failed. Reject merger of the cluster. */
1989 return false;
1992 /* If we get here, then one cluster binds this key and the other
1993 doesn't; merge them as "UNKNOWN". */
1994 gcc_assert (sval_a || sval_b);
1996 const svalue *bound_sval = sval_a ? sval_a : sval_b;
1997 tree type = bound_sval->get_type ();
1998 const svalue *unknown_sval
1999 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue (type);
2001 /* ...but reject the merger if this sval shouldn't be mergeable
2002 (e.g. reject merging svalues that have non-purgable sm-state,
2003 to avoid falsely reporting memory leaks by merging them
2004 with something else). */
2005 if (!bound_sval->can_merge_p (unknown_sval, sval_mgr, merger))
2006 return false;
2008 out_cluster->m_map.put (key, unknown_sval);
2011 /* We can only have at most one symbolic key per cluster,
2012 and if we do, we can't have any concrete keys.
2013 If this happens, mark the cluster as touched, with no keys. */
2014 if (num_symbolic_keys >= 2
2015 || (num_concrete_keys > 0 && num_symbolic_keys > 0))
2017 out_cluster->m_touched = true;
2018 out_cluster->m_map.empty ();
2021 /* We don't handle other kinds of overlaps yet. */
2023 return true;
2026 /* Update this cluster to reflect an attempt to merge OTHER where there
2027 is no other cluster to merge with, and so we're notionally merging the
2028 bound values in OTHER with the initial value of the relevant regions.
2030 Any bound keys in OTHER should be bound to unknown in this. */
2032 void
2033 binding_cluster::make_unknown_relative_to (const binding_cluster *other,
2034 store *out_store,
2035 store_manager *mgr)
2037 for (map_t::iterator iter = other->m_map.begin ();
2038 iter != other->m_map.end (); ++iter)
2040 const binding_key *iter_key = (*iter).first;
2041 const svalue *iter_sval = (*iter).second;
2042 const svalue *unknown_sval
2043 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue
2044 (iter_sval->get_type ());
2045 m_map.put (iter_key, unknown_sval);
2047 /* For any pointers in OTHER, the merger means that the
2048 concrete pointer becomes an unknown value, which could
2049 show up as a false report of a leak when considering what
2050 pointers are live before vs after.
2051 Avoid this by marking the base regions they point to as having
2052 escaped. */
2053 if (const region_svalue *region_sval
2054 = iter_sval->dyn_cast_region_svalue ())
2056 const region *base_reg
2057 = region_sval->get_pointee ()->get_base_region ();
2058 if (base_reg->tracked_p ()
2059 && !base_reg->symbolic_for_unknown_ptr_p ())
2061 binding_cluster *c = out_store->get_or_create_cluster (base_reg);
2062 c->mark_as_escaped ();
2068 /* Mark this cluster as having escaped. */
2070 void
2071 binding_cluster::mark_as_escaped ()
2073 m_escaped = true;
2076 /* If this cluster has escaped (by this call, or by an earlier one, or
2077 by being an external param), then unbind all values and mark it
2078 as "touched", so that it has a conjured value, rather than an
2079 initial_svalue.
2080 Use P to purge state involving conjured_svalues. */
2082 void
2083 binding_cluster::on_unknown_fncall (const gcall *call,
2084 store_manager *mgr,
2085 const conjured_purge &p)
2087 if (m_escaped)
2089 m_map.empty ();
2091 if (!m_base_region->empty_p ())
2093 /* Bind it to a new "conjured" value using CALL. */
2094 const svalue *sval
2095 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
2096 (m_base_region->get_type (), call, m_base_region, p);
2097 bind (mgr, m_base_region, sval);
2100 m_touched = true;
2104 /* Mark this cluster as having been clobbered by STMT.
2105 Use P to purge state involving conjured_svalues. */
2107 void
2108 binding_cluster::on_asm (const gasm *stmt,
2109 store_manager *mgr,
2110 const conjured_purge &p)
2112 m_map.empty ();
2114 /* Bind it to a new "conjured" value using CALL. */
2115 const svalue *sval
2116 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
2117 (m_base_region->get_type (), stmt, m_base_region, p);
2118 bind (mgr, m_base_region, sval);
2120 m_touched = true;
2123 /* Return true if this cluster has escaped. */
2125 bool
2126 binding_cluster::escaped_p () const
2128 /* Consider the "errno" region to always have escaped. */
2129 if (m_base_region->get_kind () == RK_ERRNO)
2130 return true;
2131 return m_escaped;
2134 /* Return true if this binding_cluster has no information
2135 i.e. if there are no bindings, and it hasn't been marked as having
2136 escaped, or touched symbolically. */
2138 bool
2139 binding_cluster::redundant_p () const
2141 return (m_map.elements () == 0
2142 && !m_escaped
2143 && !m_touched);
2146 /* Add PV to OUT_PVS, casting it to TYPE if it is not already of that type. */
2148 static void
2149 append_pathvar_with_type (path_var pv,
2150 tree type,
2151 auto_vec<path_var> *out_pvs)
2153 gcc_assert (pv.m_tree);
2155 if (TREE_TYPE (pv.m_tree) != type)
2156 pv.m_tree = build1 (NOP_EXPR, type, pv.m_tree);
2158 out_pvs->safe_push (pv);
2161 /* Find representative path_vars for SVAL within this binding of BASE_REG,
2162 appending the results to OUT_PVS. */
2164 void
2165 binding_cluster::get_representative_path_vars (const region_model *model,
2166 svalue_set *visited,
2167 const region *base_reg,
2168 const svalue *sval,
2169 auto_vec<path_var> *out_pvs)
2170 const
2172 sval = simplify_for_binding (sval);
2174 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
2176 const binding_key *key = (*iter).first;
2177 const svalue *bound_sval = (*iter).second;
2178 if (bound_sval == sval)
2180 if (const concrete_binding *ckey
2181 = key->dyn_cast_concrete_binding ())
2183 auto_vec <const region *> subregions;
2184 base_reg->get_subregions_for_binding
2185 (model->get_manager (),
2186 ckey->get_start_bit_offset (),
2187 ckey->get_size_in_bits (),
2188 sval->get_type (),
2189 &subregions);
2190 unsigned i;
2191 const region *subregion;
2192 FOR_EACH_VEC_ELT (subregions, i, subregion)
2194 if (path_var pv
2195 = model->get_representative_path_var (subregion,
2196 visited))
2197 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2200 else
2202 const symbolic_binding *skey = (const symbolic_binding *)key;
2203 if (path_var pv
2204 = model->get_representative_path_var (skey->get_region (),
2205 visited))
2206 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2212 /* Get any svalue bound to KEY, or NULL. */
2214 const svalue *
2215 binding_cluster::get_any_value (const binding_key *key) const
2217 return m_map.get (key);
2220 /* If this cluster has a single direct binding for the whole of the region,
2221 return it.
2222 For use in simplifying dumps. */
2224 const svalue *
2225 binding_cluster::maybe_get_simple_value (store_manager *mgr) const
2227 /* Fail gracefully if MGR is NULL to make it easier to dump store
2228 instances in the debugger. */
2229 if (mgr == NULL)
2230 return NULL;
2232 if (m_map.elements () != 1)
2233 return NULL;
2235 if (m_base_region->empty_p ())
2236 return NULL;
2238 const binding_key *key = binding_key::make (mgr, m_base_region);
2239 return get_any_value (key);
2242 /* class store_manager. */
2244 logger *
2245 store_manager::get_logger () const
2247 return m_mgr->get_logger ();
2250 /* binding consolidation. */
2252 const concrete_binding *
2253 store_manager::get_concrete_binding (bit_offset_t start_bit_offset,
2254 bit_offset_t size_in_bits)
2256 concrete_binding b (start_bit_offset, size_in_bits);
2257 if (concrete_binding *existing = m_concrete_binding_key_mgr.get (b))
2258 return existing;
2260 concrete_binding *to_save = new concrete_binding (b);
2261 m_concrete_binding_key_mgr.put (b, to_save);
2262 return to_save;
2265 const symbolic_binding *
2266 store_manager::get_symbolic_binding (const region *reg)
2268 symbolic_binding b (reg);
2269 if (symbolic_binding *existing = m_symbolic_binding_key_mgr.get (b))
2270 return existing;
2272 symbolic_binding *to_save = new symbolic_binding (b);
2273 m_symbolic_binding_key_mgr.put (b, to_save);
2274 return to_save;
2277 /* class store. */
2279 /* store's default ctor. */
2281 store::store ()
2282 : m_called_unknown_fn (false)
2286 /* store's copy ctor. */
2288 store::store (const store &other)
2289 : m_cluster_map (other.m_cluster_map.elements ()),
2290 m_called_unknown_fn (other.m_called_unknown_fn)
2292 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2293 iter != other.m_cluster_map.end ();
2294 ++iter)
2296 const region *reg = (*iter).first;
2297 gcc_assert (reg);
2298 binding_cluster *c = (*iter).second;
2299 gcc_assert (c);
2300 m_cluster_map.put (reg, new binding_cluster (*c));
2304 /* store's dtor. */
2306 store::~store ()
2308 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2309 iter != m_cluster_map.end ();
2310 ++iter)
2311 delete (*iter).second;
2314 /* store's assignment operator. */
2316 store &
2317 store::operator= (const store &other)
2319 /* Delete existing cluster map. */
2320 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2321 iter != m_cluster_map.end ();
2322 ++iter)
2323 delete (*iter).second;
2324 m_cluster_map.empty ();
2326 m_called_unknown_fn = other.m_called_unknown_fn;
2328 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2329 iter != other.m_cluster_map.end ();
2330 ++iter)
2332 const region *reg = (*iter).first;
2333 gcc_assert (reg);
2334 binding_cluster *c = (*iter).second;
2335 gcc_assert (c);
2336 m_cluster_map.put (reg, new binding_cluster (*c));
2338 return *this;
2341 /* store's equality operator. */
2343 bool
2344 store::operator== (const store &other) const
2346 if (m_called_unknown_fn != other.m_called_unknown_fn)
2347 return false;
2349 if (m_cluster_map.elements () != other.m_cluster_map.elements ())
2350 return false;
2352 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2353 iter != m_cluster_map.end ();
2354 ++iter)
2356 const region *reg = (*iter).first;
2357 binding_cluster *c = (*iter).second;
2358 binding_cluster **other_slot
2359 = const_cast <cluster_map_t &> (other.m_cluster_map).get (reg);
2360 if (other_slot == NULL)
2361 return false;
2362 if (*c != **other_slot)
2363 return false;
2366 gcc_checking_assert (hash () == other.hash ());
2368 return true;
2371 /* Get a hash value for this store. */
2373 hashval_t
2374 store::hash () const
2376 hashval_t result = 0;
2377 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2378 iter != m_cluster_map.end ();
2379 ++iter)
2380 result ^= (*iter).second->hash ();
2381 return result;
2384 /* Populate OUT with a sorted list of parent regions for the regions in IN,
2385 removing duplicate parents. */
2387 static void
2388 get_sorted_parent_regions (auto_vec<const region *> *out,
2389 auto_vec<const region *> &in)
2391 /* Get the set of parent regions. */
2392 hash_set<const region *> parent_regions;
2393 const region *iter_reg;
2394 unsigned i;
2395 FOR_EACH_VEC_ELT (in, i, iter_reg)
2397 const region *parent_reg = iter_reg->get_parent_region ();
2398 gcc_assert (parent_reg);
2399 parent_regions.add (parent_reg);
2402 /* Write to OUT. */
2403 for (hash_set<const region *>::iterator iter = parent_regions.begin();
2404 iter != parent_regions.end(); ++iter)
2405 out->safe_push (*iter);
2407 /* Sort OUT. */
2408 out->qsort (region::cmp_ptr_ptr);
2411 /* Dump a representation of this store to PP, using SIMPLE to control how
2412 svalues and regions are printed.
2413 MGR is used for simplifying dumps if non-NULL, but can also be NULL
2414 (to make it easier to use from the debugger). */
2416 void
2417 store::dump_to_pp (pretty_printer *pp, bool simple, bool multiline,
2418 store_manager *mgr) const
2420 /* Sort into some deterministic order. */
2421 auto_vec<const region *> base_regions;
2422 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2423 iter != m_cluster_map.end (); ++iter)
2425 const region *base_reg = (*iter).first;
2426 base_regions.safe_push (base_reg);
2428 base_regions.qsort (region::cmp_ptr_ptr);
2430 /* Gather clusters, organize by parent region, so that we can group
2431 together locals, globals, etc. */
2432 auto_vec<const region *> parent_regions;
2433 get_sorted_parent_regions (&parent_regions, base_regions);
2435 const region *parent_reg;
2436 unsigned i;
2437 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2439 gcc_assert (parent_reg);
2440 pp_string (pp, "clusters within ");
2441 parent_reg->dump_to_pp (pp, simple);
2442 if (multiline)
2443 pp_newline (pp);
2444 else
2445 pp_string (pp, " {");
2447 const region *base_reg;
2448 unsigned j;
2449 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2451 /* This is O(N * M), but N ought to be small. */
2452 if (base_reg->get_parent_region () != parent_reg)
2453 continue;
2454 binding_cluster *cluster
2455 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2456 if (!multiline)
2458 if (j > 0)
2459 pp_string (pp, ", ");
2461 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
2463 /* Special-case to simplify dumps for the common case where
2464 we just have one value directly bound to the whole of a
2465 region. */
2466 if (multiline)
2468 pp_string (pp, " cluster for: ");
2469 base_reg->dump_to_pp (pp, simple);
2470 pp_string (pp, ": ");
2471 sval->dump_to_pp (pp, simple);
2472 if (cluster->escaped_p ())
2473 pp_string (pp, " (ESCAPED)");
2474 if (cluster->touched_p ())
2475 pp_string (pp, " (TOUCHED)");
2476 pp_newline (pp);
2478 else
2480 pp_string (pp, "region: {");
2481 base_reg->dump_to_pp (pp, simple);
2482 pp_string (pp, ", value: ");
2483 sval->dump_to_pp (pp, simple);
2484 if (cluster->escaped_p ())
2485 pp_string (pp, " (ESCAPED)");
2486 if (cluster->touched_p ())
2487 pp_string (pp, " (TOUCHED)");
2488 pp_string (pp, "}");
2491 else if (multiline)
2493 pp_string (pp, " cluster for: ");
2494 base_reg->dump_to_pp (pp, simple);
2495 pp_newline (pp);
2496 cluster->dump_to_pp (pp, simple, multiline);
2498 else
2500 pp_string (pp, "base region: {");
2501 base_reg->dump_to_pp (pp, simple);
2502 pp_string (pp, "} has cluster: {");
2503 cluster->dump_to_pp (pp, simple, multiline);
2504 pp_string (pp, "}");
2507 if (!multiline)
2508 pp_string (pp, "}");
2510 pp_printf (pp, "m_called_unknown_fn: %s",
2511 m_called_unknown_fn ? "TRUE" : "FALSE");
2512 if (multiline)
2513 pp_newline (pp);
2516 /* Dump a multiline representation of this store to stderr. */
2518 DEBUG_FUNCTION void
2519 store::dump (bool simple) const
2521 pretty_printer pp;
2522 pp_format_decoder (&pp) = default_tree_printer;
2523 pp_show_color (&pp) = pp_show_color (global_dc->printer);
2524 pp.buffer->stream = stderr;
2525 dump_to_pp (&pp, simple, true, NULL);
2526 pp_newline (&pp);
2527 pp_flush (&pp);
2530 /* Assert that this object is valid. */
2532 void
2533 store::validate () const
2535 for (auto iter : m_cluster_map)
2536 iter.second->validate ();
2539 /* Return a new json::object of the form
2540 {PARENT_REGION_DESC: {BASE_REGION_DESC: object for binding_map,
2541 ... for each cluster within parent region},
2542 ...for each parent region,
2543 "called_unknown_fn": true/false}. */
2545 json::object *
2546 store::to_json () const
2548 json::object *store_obj = new json::object ();
2550 /* Sort into some deterministic order. */
2551 auto_vec<const region *> base_regions;
2552 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2553 iter != m_cluster_map.end (); ++iter)
2555 const region *base_reg = (*iter).first;
2556 base_regions.safe_push (base_reg);
2558 base_regions.qsort (region::cmp_ptr_ptr);
2560 /* Gather clusters, organize by parent region, so that we can group
2561 together locals, globals, etc. */
2562 auto_vec<const region *> parent_regions;
2563 get_sorted_parent_regions (&parent_regions, base_regions);
2565 const region *parent_reg;
2566 unsigned i;
2567 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2569 gcc_assert (parent_reg);
2571 json::object *clusters_in_parent_reg_obj = new json::object ();
2573 const region *base_reg;
2574 unsigned j;
2575 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2577 /* This is O(N * M), but N ought to be small. */
2578 if (base_reg->get_parent_region () != parent_reg)
2579 continue;
2580 binding_cluster *cluster
2581 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2582 label_text base_reg_desc = base_reg->get_desc ();
2583 clusters_in_parent_reg_obj->set (base_reg_desc.get (),
2584 cluster->to_json ());
2586 label_text parent_reg_desc = parent_reg->get_desc ();
2587 store_obj->set (parent_reg_desc.get (), clusters_in_parent_reg_obj);
2590 store_obj->set ("called_unknown_fn", new json::literal (m_called_unknown_fn));
2592 return store_obj;
2595 /* Get any svalue bound to REG, or NULL. */
2597 const svalue *
2598 store::get_any_binding (store_manager *mgr, const region *reg) const
2600 const region *base_reg = reg->get_base_region ();
2601 binding_cluster **cluster_slot
2602 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg);
2603 if (!cluster_slot)
2604 return NULL;
2605 return (*cluster_slot)->get_any_binding (mgr, reg);
2608 /* Set the value of LHS_REG to RHS_SVAL. */
2610 void
2611 store::set_value (store_manager *mgr, const region *lhs_reg,
2612 const svalue *rhs_sval,
2613 uncertainty_t *uncertainty)
2615 logger *logger = mgr->get_logger ();
2616 LOG_SCOPE (logger);
2618 remove_overlapping_bindings (mgr, lhs_reg, uncertainty);
2620 if (lhs_reg->get_type ())
2621 rhs_sval = simplify_for_binding (rhs_sval);
2622 /* ...but if we have no type for the region, retain any cast. */
2624 const region *lhs_base_reg = lhs_reg->get_base_region ();
2625 binding_cluster *lhs_cluster;
2626 if (lhs_base_reg->symbolic_for_unknown_ptr_p ())
2628 /* Reject attempting to bind values into a symbolic region
2629 for an unknown ptr; merely invalidate values below. */
2630 lhs_cluster = NULL;
2632 /* The LHS of the write is *UNKNOWN. If the RHS is a pointer,
2633 then treat the region being pointed to as having escaped. */
2634 if (const region_svalue *ptr_sval = rhs_sval->dyn_cast_region_svalue ())
2636 const region *ptr_dst = ptr_sval->get_pointee ();
2637 const region *ptr_base_reg = ptr_dst->get_base_region ();
2638 mark_as_escaped (ptr_base_reg);
2640 if (uncertainty)
2641 uncertainty->on_maybe_bound_sval (rhs_sval);
2643 else if (lhs_base_reg->tracked_p ())
2645 lhs_cluster = get_or_create_cluster (lhs_base_reg);
2646 lhs_cluster->bind (mgr, lhs_reg, rhs_sval);
2648 else
2650 /* Reject attempting to bind values into an untracked region;
2651 merely invalidate values below. */
2652 lhs_cluster = NULL;
2655 /* Bindings to a cluster can affect other clusters if a symbolic
2656 base region is involved.
2657 Writes to concrete clusters can't affect other concrete clusters,
2658 but can affect symbolic clusters.
2659 Writes to symbolic clusters can affect both concrete and symbolic
2660 clusters.
2661 Invalidate our knowledge of other clusters that might have been
2662 affected by the write.
2663 Gather the set of all svalues that might still be live even if
2664 the store doesn't refer to them. */
2665 svalue_set maybe_live_values;
2666 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2667 iter != m_cluster_map.end (); ++iter)
2669 const region *iter_base_reg = (*iter).first;
2670 binding_cluster *iter_cluster = (*iter).second;
2671 if (iter_base_reg != lhs_base_reg
2672 && (lhs_cluster == NULL
2673 || lhs_cluster->symbolic_p ()
2674 || iter_cluster->symbolic_p ()))
2676 tristate t_alias = eval_alias (lhs_base_reg, iter_base_reg);
2677 switch (t_alias.get_value ())
2679 default:
2680 gcc_unreachable ();
2682 case tristate::TS_UNKNOWN:
2683 if (logger)
2685 pretty_printer *pp = logger->get_printer ();
2686 logger->start_log_line ();
2687 logger->log_partial ("possible aliasing of ");
2688 iter_base_reg->dump_to_pp (pp, true);
2689 logger->log_partial (" when writing SVAL: ");
2690 rhs_sval->dump_to_pp (pp, true);
2691 logger->log_partial (" to LHS_REG: ");
2692 lhs_reg->dump_to_pp (pp, true);
2693 logger->end_log_line ();
2695 /* Mark all of iter_cluster's iter_base_reg as unknown,
2696 using LHS_REG when considering overlaps, to handle
2697 symbolic vs concrete issues. */
2698 iter_cluster->mark_region_as_unknown
2699 (mgr,
2700 iter_base_reg, /* reg_to_bind */
2701 lhs_reg, /* reg_for_overlap */
2702 uncertainty,
2703 &maybe_live_values);
2704 break;
2706 case tristate::TS_TRUE:
2707 gcc_unreachable ();
2708 break;
2710 case tristate::TS_FALSE:
2711 /* If they can't be aliases, then don't invalidate this
2712 cluster. */
2713 break;
2717 /* Given the set of svalues that might still be live, process them
2718 (e.g. marking regions as escaped).
2719 We do this after the iteration to avoid potentially changing
2720 m_cluster_map whilst iterating over it. */
2721 on_maybe_live_values (maybe_live_values);
2724 /* Determine if BASE_REG_A could be an alias of BASE_REG_B. */
2726 tristate
2727 store::eval_alias (const region *base_reg_a,
2728 const region *base_reg_b) const
2730 /* SSA names can't alias. */
2731 tree decl_a = base_reg_a->maybe_get_decl ();
2732 if (decl_a && TREE_CODE (decl_a) == SSA_NAME)
2733 return tristate::TS_FALSE;
2734 tree decl_b = base_reg_b->maybe_get_decl ();
2735 if (decl_b && TREE_CODE (decl_b) == SSA_NAME)
2736 return tristate::TS_FALSE;
2738 /* Try both ways, for symmetry. */
2739 tristate ts_ab = eval_alias_1 (base_reg_a, base_reg_b);
2740 if (ts_ab.is_false ())
2741 return tristate::TS_FALSE;
2742 tristate ts_ba = eval_alias_1 (base_reg_b, base_reg_a);
2743 if (ts_ba.is_false ())
2744 return tristate::TS_FALSE;
2745 return tristate::TS_UNKNOWN;
2748 /* Half of store::eval_alias; called twice for symmetry. */
2750 tristate
2751 store::eval_alias_1 (const region *base_reg_a,
2752 const region *base_reg_b) const
2754 /* If they're in different memory spaces, they can't alias. */
2756 enum memory_space memspace_a = base_reg_a->get_memory_space ();
2757 if (memspace_a != MEMSPACE_UNKNOWN)
2759 enum memory_space memspace_b = base_reg_b->get_memory_space ();
2760 if (memspace_b != MEMSPACE_UNKNOWN
2761 && memspace_a != memspace_b)
2762 return tristate::TS_FALSE;
2766 if (const symbolic_region *sym_reg_a
2767 = base_reg_a->dyn_cast_symbolic_region ())
2769 const svalue *sval_a = sym_reg_a->get_pointer ();
2770 if (tree decl_b = base_reg_b->maybe_get_decl ())
2772 if (!may_be_aliased (decl_b))
2773 return tristate::TS_FALSE;
2774 if (sval_a->get_kind () == SK_INITIAL)
2775 if (!is_global_var (decl_b))
2777 /* The initial value of a pointer can't point to a local. */
2778 return tristate::TS_FALSE;
2781 if (sval_a->get_kind () == SK_INITIAL
2782 && base_reg_b->get_kind () == RK_HEAP_ALLOCATED)
2784 /* The initial value of a pointer can't point to a
2785 region that was allocated on the heap after the beginning of the
2786 path. */
2787 return tristate::TS_FALSE;
2789 if (const widening_svalue *widening_sval_a
2790 = sval_a->dyn_cast_widening_svalue ())
2792 const svalue *base = widening_sval_a->get_base_svalue ();
2793 if (const region_svalue *region_sval
2794 = base->dyn_cast_region_svalue ())
2796 const region *pointee = region_sval->get_pointee ();
2797 /* If we have sval_a is WIDENING(&REGION, OP), and
2798 B can't alias REGION, then B can't alias A either.
2799 For example, A might arise from
2800 for (ptr = &REGION; ...; ptr++)
2801 where sval_a is ptr in the 2nd iteration of the loop.
2802 We want to ensure that "*ptr" can only clobber things
2803 within REGION's base region. */
2804 tristate ts = eval_alias (pointee->get_base_region (),
2805 base_reg_b);
2806 if (ts.is_false ())
2807 return tristate::TS_FALSE;
2811 return tristate::TS_UNKNOWN;
2814 /* Record all of the values in MAYBE_LIVE_VALUES as being possibly live. */
2816 void
2817 store::on_maybe_live_values (const svalue_set &maybe_live_values)
2819 for (auto sval : maybe_live_values)
2821 if (const region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
2823 const region *base_reg = ptr_sval->get_pointee ()->get_base_region ();
2824 mark_as_escaped (base_reg);
2829 /* Remove all bindings overlapping REG within this store. */
2831 void
2832 store::clobber_region (store_manager *mgr, const region *reg)
2834 const region *base_reg = reg->get_base_region ();
2835 binding_cluster **slot = m_cluster_map.get (base_reg);
2836 if (!slot)
2837 return;
2838 binding_cluster *cluster = *slot;
2839 cluster->clobber_region (mgr, reg);
2840 if (cluster->redundant_p ())
2842 delete cluster;
2843 m_cluster_map.remove (base_reg);
2847 /* Remove any bindings for REG within this store. */
2849 void
2850 store::purge_region (store_manager *mgr, const region *reg)
2852 const region *base_reg = reg->get_base_region ();
2853 binding_cluster **slot = m_cluster_map.get (base_reg);
2854 if (!slot)
2855 return;
2856 binding_cluster *cluster = *slot;
2857 cluster->purge_region (mgr, reg);
2858 if (cluster->redundant_p ())
2860 delete cluster;
2861 m_cluster_map.remove (base_reg);
2865 /* Fill REG with SVAL. */
2867 void
2868 store::fill_region (store_manager *mgr, const region *reg, const svalue *sval)
2870 /* Filling an empty region is a no-op. */
2871 if (reg->empty_p ())
2872 return;
2874 const region *base_reg = reg->get_base_region ();
2875 if (base_reg->symbolic_for_unknown_ptr_p ()
2876 || !base_reg->tracked_p ())
2877 return;
2878 binding_cluster *cluster = get_or_create_cluster (base_reg);
2879 cluster->fill_region (mgr, reg, sval);
2882 /* Zero-fill REG. */
2884 void
2885 store::zero_fill_region (store_manager *mgr, const region *reg)
2887 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
2888 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
2889 fill_region (mgr, reg, zero_sval);
2892 /* Mark REG as having unknown content. */
2894 void
2895 store::mark_region_as_unknown (store_manager *mgr, const region *reg,
2896 uncertainty_t *uncertainty,
2897 svalue_set *maybe_live_values)
2899 const region *base_reg = reg->get_base_region ();
2900 if (base_reg->symbolic_for_unknown_ptr_p ()
2901 || !base_reg->tracked_p ())
2902 return;
2903 binding_cluster *cluster = get_or_create_cluster (base_reg);
2904 cluster->mark_region_as_unknown (mgr, reg, reg, uncertainty,
2905 maybe_live_values);
2908 /* Purge state involving SVAL. */
2910 void
2911 store::purge_state_involving (const svalue *sval,
2912 region_model_manager *sval_mgr)
2914 auto_vec <const region *> base_regs_to_purge;
2915 for (auto iter : m_cluster_map)
2917 const region *base_reg = iter.first;
2918 if (base_reg->involves_p (sval))
2919 base_regs_to_purge.safe_push (base_reg);
2920 else
2922 binding_cluster *cluster = iter.second;
2923 cluster->purge_state_involving (sval, sval_mgr);
2927 for (auto iter : base_regs_to_purge)
2928 purge_cluster (iter);
2931 /* Get the cluster for BASE_REG, or NULL (const version). */
2933 const binding_cluster *
2934 store::get_cluster (const region *base_reg) const
2936 gcc_assert (base_reg);
2937 gcc_assert (base_reg->get_base_region () == base_reg);
2938 if (binding_cluster **slot
2939 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg))
2940 return *slot;
2941 else
2942 return NULL;
2945 /* Get the cluster for BASE_REG, or NULL (non-const version). */
2947 binding_cluster *
2948 store::get_cluster (const region *base_reg)
2950 gcc_assert (base_reg);
2951 gcc_assert (base_reg->get_base_region () == base_reg);
2952 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2953 return *slot;
2954 else
2955 return NULL;
2958 /* Get the cluster for BASE_REG, creating it if doesn't already exist. */
2960 binding_cluster *
2961 store::get_or_create_cluster (const region *base_reg)
2963 gcc_assert (base_reg);
2964 gcc_assert (base_reg->get_base_region () == base_reg);
2966 /* We shouldn't create clusters for dereferencing an UNKNOWN ptr. */
2967 gcc_assert (!base_reg->symbolic_for_unknown_ptr_p ());
2969 /* We shouldn't create clusters for base regions that aren't trackable. */
2970 gcc_assert (base_reg->tracked_p ());
2972 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2973 return *slot;
2975 binding_cluster *cluster = new binding_cluster (base_reg);
2976 m_cluster_map.put (base_reg, cluster);
2978 return cluster;
2981 /* Remove any cluster for BASE_REG, for use by
2982 region_model::unbind_region_and_descendents
2983 when popping stack frames and handling deleted heap regions. */
2985 void
2986 store::purge_cluster (const region *base_reg)
2988 gcc_assert (base_reg->get_base_region () == base_reg);
2989 binding_cluster **slot = m_cluster_map.get (base_reg);
2990 if (!slot)
2991 return;
2992 binding_cluster *cluster = *slot;
2993 delete cluster;
2994 m_cluster_map.remove (base_reg);
2997 /* Attempt to merge STORE_A and STORE_B into OUT_STORE.
2998 Return true if successful, or false if the stores can't be merged. */
3000 bool
3001 store::can_merge_p (const store *store_a, const store *store_b,
3002 store *out_store, store_manager *mgr,
3003 model_merger *merger)
3005 if (store_a->m_called_unknown_fn || store_b->m_called_unknown_fn)
3006 out_store->m_called_unknown_fn = true;
3008 /* Get the union of all base regions for STORE_A and STORE_B. */
3009 hash_set<const region *> base_regions;
3010 for (cluster_map_t::iterator iter_a = store_a->m_cluster_map.begin ();
3011 iter_a != store_a->m_cluster_map.end (); ++iter_a)
3013 const region *base_reg_a = (*iter_a).first;
3014 base_regions.add (base_reg_a);
3016 for (cluster_map_t::iterator iter_b = store_b->m_cluster_map.begin ();
3017 iter_b != store_b->m_cluster_map.end (); ++iter_b)
3019 const region *base_reg_b = (*iter_b).first;
3020 base_regions.add (base_reg_b);
3023 /* Sort the base regions before considering them. This ought not to
3024 affect the results, but can affect which types UNKNOWN_REGIONs are
3025 created for in a run; sorting them thus avoids minor differences
3026 in logfiles. */
3027 auto_vec<const region *> vec_base_regions (base_regions.elements ());
3028 for (hash_set<const region *>::iterator iter = base_regions.begin ();
3029 iter != base_regions.end (); ++iter)
3030 vec_base_regions.quick_push (*iter);
3031 vec_base_regions.qsort (region::cmp_ptr_ptr);
3032 unsigned i;
3033 const region *base_reg;
3034 FOR_EACH_VEC_ELT (vec_base_regions, i, base_reg)
3036 const binding_cluster *cluster_a = store_a->get_cluster (base_reg);
3037 const binding_cluster *cluster_b = store_b->get_cluster (base_reg);
3038 /* At least one of cluster_a and cluster_b must be non-NULL. */
3039 binding_cluster *out_cluster
3040 = out_store->get_or_create_cluster (base_reg);
3041 if (!binding_cluster::can_merge_p (cluster_a, cluster_b,
3042 out_cluster, out_store, mgr, merger))
3043 return false;
3045 return true;
3048 /* Mark the cluster for BASE_REG as having escaped.
3049 For use when handling an unrecognized function call, and
3050 for params to "top-level" calls.
3051 Further unknown function calls could touch it, even if the cluster
3052 isn't reachable from args of those calls. */
3054 void
3055 store::mark_as_escaped (const region *base_reg)
3057 gcc_assert (base_reg);
3058 gcc_assert (base_reg->get_base_region () == base_reg);
3060 if (base_reg->symbolic_for_unknown_ptr_p ()
3061 || !base_reg->tracked_p ())
3062 return;
3064 binding_cluster *cluster = get_or_create_cluster (base_reg);
3065 cluster->mark_as_escaped ();
3068 /* Handle an unknown fncall by updating any clusters that have escaped
3069 (either in this fncall, or in a prior one). */
3071 void
3072 store::on_unknown_fncall (const gcall *call, store_manager *mgr,
3073 const conjured_purge &p)
3075 m_called_unknown_fn = true;
3077 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3078 iter != m_cluster_map.end (); ++iter)
3079 (*iter).second->on_unknown_fncall (call, mgr, p);
3082 /* Return true if a non-const pointer to BASE_REG (or something within it)
3083 has escaped to code outside of the TU being analyzed. */
3085 bool
3086 store::escaped_p (const region *base_reg) const
3088 gcc_assert (base_reg);
3089 gcc_assert (base_reg->get_base_region () == base_reg);
3091 /* "errno" can always be modified by external code. */
3092 if (base_reg->get_kind () == RK_ERRNO)
3093 return true;
3095 if (binding_cluster **cluster_slot
3096 = const_cast <cluster_map_t &>(m_cluster_map).get (base_reg))
3097 return (*cluster_slot)->escaped_p ();
3098 return false;
3101 /* Populate OUT_PVS with a list of path_vars for describing SVAL based on
3102 this store, using VISITED to ensure the traversal terminates. */
3104 void
3105 store::get_representative_path_vars (const region_model *model,
3106 svalue_set *visited,
3107 const svalue *sval,
3108 auto_vec<path_var> *out_pvs) const
3110 gcc_assert (sval);
3112 /* Find all bindings that reference SVAL. */
3113 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3114 iter != m_cluster_map.end (); ++iter)
3116 const region *base_reg = (*iter).first;
3117 binding_cluster *cluster = (*iter).second;
3118 cluster->get_representative_path_vars (model, visited, base_reg, sval,
3119 out_pvs);
3122 if (const initial_svalue *init_sval = sval->dyn_cast_initial_svalue ())
3124 const region *reg = init_sval->get_region ();
3125 if (path_var pv = model->get_representative_path_var (reg,
3126 visited))
3127 out_pvs->safe_push (pv);
3131 /* Remove all bindings overlapping REG within this store, removing
3132 any clusters that become redundant.
3134 If UNCERTAINTY is non-NULL, use it to record any svalues that
3135 were removed, as being maybe-bound. */
3137 void
3138 store::remove_overlapping_bindings (store_manager *mgr, const region *reg,
3139 uncertainty_t *uncertainty)
3141 const region *base_reg = reg->get_base_region ();
3142 if (binding_cluster **cluster_slot = m_cluster_map.get (base_reg))
3144 binding_cluster *cluster = *cluster_slot;
3145 if (reg == base_reg && !escaped_p (base_reg))
3147 /* Remove whole cluster. */
3148 m_cluster_map.remove (base_reg);
3149 delete cluster;
3150 return;
3152 /* Pass NULL for the maybe_live_values here, as we don't want to
3153 record the old svalues as being maybe-bound. */
3154 cluster->remove_overlapping_bindings (mgr, reg, uncertainty, NULL);
3158 /* Subclass of visitor that accumulates a hash_set of the regions that
3159 were visited. */
3161 struct region_finder : public visitor
3163 void visit_region (const region *reg) final override
3165 m_regs.add (reg);
3168 hash_set<const region *> m_regs;
3171 /* Canonicalize this store, to maximize the chance of equality between
3172 instances. */
3174 void
3175 store::canonicalize (store_manager *mgr)
3177 /* If we have e.g.:
3178 cluster for: HEAP_ALLOCATED_REGION(543)
3179 ESCAPED
3180 TOUCHED
3181 where the heap region is empty and unreferenced, then purge that
3182 cluster, to avoid unbounded state chains involving these. */
3184 /* Find regions that are referenced by bound values in the store. */
3185 region_finder s;
3186 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3187 iter != m_cluster_map.end (); ++iter)
3189 binding_cluster *cluster = (*iter).second;
3190 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3191 bind_iter != cluster->m_map.end (); ++bind_iter)
3192 (*bind_iter).second->accept (&s);
3195 /* Locate heap-allocated regions that have empty bindings that weren't
3196 found above. */
3197 hash_set<const region *> purgeable_regions;
3198 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3199 iter != m_cluster_map.end (); ++iter)
3201 const region *base_reg = (*iter).first;
3202 binding_cluster *cluster = (*iter).second;
3203 if (base_reg->get_kind () == RK_HEAP_ALLOCATED)
3205 /* Don't purge a heap-allocated region that's been marked as
3206 escaping, since this could be recording that a ptr to it
3207 was written to an unknown symbolic region along this
3208 path, and so we don't know whether it's referenced or
3209 not, and hence should report it as leaking
3210 (PR analyzer/106473). */
3211 if (cluster->escaped_p ())
3212 continue;
3214 if (cluster->empty_p ())
3215 if (!s.m_regs.contains (base_reg))
3216 purgeable_regions.add (base_reg);
3218 /* Also cover the UNKNOWN case. */
3219 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
3220 if (sval->get_kind () == SK_UNKNOWN)
3221 if (!s.m_regs.contains (base_reg))
3222 purgeable_regions.add (base_reg);
3226 /* Purge them. */
3227 for (hash_set<const region *>::iterator iter = purgeable_regions.begin ();
3228 iter != purgeable_regions.end (); ++iter)
3230 const region *base_reg = *iter;
3231 purge_cluster (base_reg);
3235 /* Subroutine for use by exploded_path::feasible_p.
3237 We need to deal with state differences between:
3238 (a) when the exploded_graph is being initially constructed and
3239 (b) when replaying the state changes along a specific path in
3240 in exploded_path::feasible_p.
3242 In (a), state merging happens, so when exploring a loop
3243 for (i = 0; i < 1024; i++)
3244 on successive iterations we have i == 0, then i == WIDENING.
3246 In (b), no state merging happens, so naively replaying the path
3247 that goes twice through the loop then exits it
3248 would lead to i == 0, then i == 1, and then a (i >= 1024) eedge
3249 that exits the loop, which would be found to be infeasible as i == 1,
3250 and the path would be rejected.
3252 We need to fix up state during replay. This subroutine is
3253 called whenever we enter a supernode that we've already
3254 visited along this exploded_path, passing in OTHER_STORE
3255 from the destination enode's state.
3257 Find bindings to widening values in OTHER_STORE.
3258 For all that are found, update the binding in this store to UNKNOWN. */
3260 void
3261 store::loop_replay_fixup (const store *other_store,
3262 region_model_manager *mgr)
3264 gcc_assert (other_store);
3265 for (cluster_map_t::iterator iter = other_store->m_cluster_map.begin ();
3266 iter != other_store->m_cluster_map.end (); ++iter)
3268 const region *base_reg = (*iter).first;
3269 binding_cluster *cluster = (*iter).second;
3270 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3271 bind_iter != cluster->m_map.end (); ++bind_iter)
3273 const binding_key *key = (*bind_iter).first;
3274 const svalue *sval = (*bind_iter).second;
3275 if (sval->get_kind () == SK_WIDENING)
3277 binding_cluster *this_cluster
3278 = get_or_create_cluster (base_reg);
3279 const svalue *unknown
3280 = mgr->get_or_create_unknown_svalue (sval->get_type ());
3281 this_cluster->bind_key (key, unknown);
3287 /* Use R to replay the bindings from SUMMARY into this object. */
3289 void
3290 store::replay_call_summary (call_summary_replay &r,
3291 const store &summary)
3293 if (summary.m_called_unknown_fn)
3295 /* A call to an external function occurred in the summary.
3296 Hence we need to invalidate our knownledge of globals,
3297 escaped regions, etc. */
3298 on_unknown_fncall (r.get_call_stmt (),
3299 r.get_store_manager (),
3300 conjured_purge (r.get_caller_model (),
3301 r.get_ctxt ()));
3304 auto_vec<const region *> keys (summary.m_cluster_map.elements ());
3305 for (auto kv : summary.m_cluster_map)
3306 keys.quick_push (kv.first);
3307 keys.qsort (region::cmp_ptr_ptr);
3308 for (auto base_reg : keys)
3309 replay_call_summary_cluster (r, summary, base_reg);
3312 /* Use R and SUMMARY to replay the bindings in SUMMARY_CLUSTER
3313 into this object. */
3315 void
3316 store::replay_call_summary_cluster (call_summary_replay &r,
3317 const store &summary,
3318 const region *summary_base_reg)
3320 const call_details &cd = r.get_call_details ();
3321 region_model_manager *reg_mgr = r.get_manager ();
3322 store_manager *mgr = reg_mgr->get_store_manager ();
3323 const binding_cluster *summary_cluster
3324 = summary.get_cluster (summary_base_reg);
3326 /* Handle "ESCAPED" and "TOUCHED" flags. */
3327 if (summary_cluster->escaped_p () || summary_cluster->touched_p ())
3328 if (const region *caller_reg
3329 = r.convert_region_from_summary (summary_base_reg))
3331 const region *caller_base_reg = caller_reg->get_base_region ();
3332 if (caller_base_reg->tracked_p ()
3333 && !caller_base_reg->symbolic_for_unknown_ptr_p ())
3335 binding_cluster *caller_cluster
3336 = get_or_create_cluster (caller_base_reg);
3337 if (summary_cluster->escaped_p ())
3338 caller_cluster->mark_as_escaped ();
3339 if (summary_cluster->touched_p ())
3340 caller_cluster->m_touched = true;
3344 switch (summary_base_reg->get_kind ())
3346 /* Top-level regions. */
3347 case RK_FRAME:
3348 case RK_GLOBALS:
3349 case RK_CODE:
3350 case RK_STACK:
3351 case RK_HEAP:
3352 case RK_THREAD_LOCAL:
3353 case RK_ROOT:
3354 /* Child regions. */
3355 case RK_FIELD:
3356 case RK_ELEMENT:
3357 case RK_OFFSET:
3358 case RK_SIZED:
3359 case RK_CAST:
3360 case RK_BIT_RANGE:
3361 /* Other regions. */
3362 case RK_VAR_ARG:
3363 case RK_UNKNOWN:
3364 /* These should never be the base region of a binding cluster. */
3365 gcc_unreachable ();
3366 break;
3368 case RK_FUNCTION:
3369 case RK_LABEL:
3370 case RK_STRING:
3371 /* These can be marked as escaping. */
3372 break;
3374 case RK_SYMBOLIC:
3376 const symbolic_region *summary_symbolic_reg
3377 = as_a <const symbolic_region *> (summary_base_reg);
3378 const svalue *summary_ptr_sval = summary_symbolic_reg->get_pointer ();
3379 const svalue *caller_ptr_sval
3380 = r.convert_svalue_from_summary (summary_ptr_sval);
3381 if (!caller_ptr_sval)
3382 return;
3383 const region *caller_dest_reg
3384 = cd.get_model ()->deref_rvalue (caller_ptr_sval,
3385 NULL_TREE,
3386 cd.get_ctxt ());
3387 const svalue *summary_sval
3388 = summary.get_any_binding (mgr, summary_base_reg);
3389 if (!summary_sval)
3390 return;
3391 const svalue *caller_sval
3392 = r.convert_svalue_from_summary (summary_sval);
3393 if (!caller_sval)
3394 caller_sval =
3395 reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
3396 set_value (mgr, caller_dest_reg,
3397 caller_sval, NULL /* uncertainty_t * */);
3399 break;
3401 case RK_HEAP_ALLOCATED:
3402 case RK_DECL:
3403 case RK_ERRNO:
3404 case RK_PRIVATE:
3406 const region *caller_dest_reg
3407 = r.convert_region_from_summary (summary_base_reg);
3408 if (!caller_dest_reg)
3409 return;
3410 const svalue *summary_sval
3411 = summary.get_any_binding (mgr, summary_base_reg);
3412 if (!summary_sval)
3413 summary_sval = reg_mgr->get_or_create_compound_svalue
3414 (summary_base_reg->get_type (),
3415 summary_cluster->get_map ());
3416 const svalue *caller_sval
3417 = r.convert_svalue_from_summary (summary_sval);
3418 if (!caller_sval)
3419 caller_sval =
3420 reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
3421 set_value (mgr, caller_dest_reg,
3422 caller_sval, NULL /* uncertainty_t * */);
3424 break;
3426 case RK_ALLOCA:
3427 /* Ignore bindings of alloca regions in the summary. */
3428 break;
3432 #if CHECKING_P
3434 namespace selftest {
3436 /* Verify that bit_range::intersects_p works as expected. */
3438 static void
3439 test_bit_range_intersects_p ()
3441 bit_range b0 (0, 1);
3442 bit_range b1 (1, 1);
3443 bit_range b2 (2, 1);
3444 bit_range b3 (3, 1);
3445 bit_range b4 (4, 1);
3446 bit_range b5 (5, 1);
3447 bit_range b6 (6, 1);
3448 bit_range b7 (7, 1);
3449 bit_range b1_to_6 (1, 6);
3450 bit_range b0_to_7 (0, 8);
3451 bit_range b3_to_5 (3, 3);
3452 bit_range b6_to_7 (6, 2);
3454 /* self-intersection is true. */
3455 ASSERT_TRUE (b0.intersects_p (b0));
3456 ASSERT_TRUE (b7.intersects_p (b7));
3457 ASSERT_TRUE (b1_to_6.intersects_p (b1_to_6));
3458 ASSERT_TRUE (b0_to_7.intersects_p (b0_to_7));
3460 ASSERT_FALSE (b0.intersects_p (b1));
3461 ASSERT_FALSE (b1.intersects_p (b0));
3462 ASSERT_FALSE (b0.intersects_p (b7));
3463 ASSERT_FALSE (b7.intersects_p (b0));
3465 ASSERT_TRUE (b0_to_7.intersects_p (b0));
3466 ASSERT_TRUE (b0_to_7.intersects_p (b7));
3467 ASSERT_TRUE (b0.intersects_p (b0_to_7));
3468 ASSERT_TRUE (b7.intersects_p (b0_to_7));
3470 ASSERT_FALSE (b0.intersects_p (b1_to_6));
3471 ASSERT_FALSE (b1_to_6.intersects_p (b0));
3472 ASSERT_TRUE (b1.intersects_p (b1_to_6));
3473 ASSERT_TRUE (b1_to_6.intersects_p (b1));
3474 ASSERT_TRUE (b1_to_6.intersects_p (b6));
3475 ASSERT_FALSE (b1_to_6.intersects_p (b7));
3477 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7));
3478 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6));
3480 ASSERT_FALSE (b3_to_5.intersects_p (b6_to_7));
3481 ASSERT_FALSE (b6_to_7.intersects_p (b3_to_5));
3483 bit_range r1 (0,0);
3484 bit_range r2 (0,0);
3485 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7, &r1, &r2));
3486 ASSERT_EQ (r1.get_start_bit_offset (), 0);
3487 ASSERT_EQ (r1.m_size_in_bits, 6);
3488 ASSERT_EQ (r2.get_start_bit_offset (), 1);
3489 ASSERT_EQ (r2.m_size_in_bits, 6);
3491 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6, &r1, &r2));
3492 ASSERT_EQ (r1.get_start_bit_offset (), 1);
3493 ASSERT_EQ (r1.m_size_in_bits, 6);
3494 ASSERT_EQ (r2.get_start_bit_offset (), 0);
3495 ASSERT_EQ (r2.m_size_in_bits, 6);
3498 /* Implementation detail of ASSERT_BIT_RANGE_FROM_MASK_EQ. */
3500 static void
3501 assert_bit_range_from_mask_eq (const location &loc,
3502 unsigned HOST_WIDE_INT mask,
3503 const bit_range &expected)
3505 bit_range actual (0, 0);
3506 bool ok = bit_range::from_mask (mask, &actual);
3507 ASSERT_TRUE_AT (loc, ok);
3508 ASSERT_EQ_AT (loc, actual, expected);
3511 /* Assert that bit_range::from_mask (MASK) returns true, and writes
3512 out EXPECTED_BIT_RANGE. */
3514 #define ASSERT_BIT_RANGE_FROM_MASK_EQ(MASK, EXPECTED_BIT_RANGE) \
3515 SELFTEST_BEGIN_STMT \
3516 assert_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK, \
3517 EXPECTED_BIT_RANGE); \
3518 SELFTEST_END_STMT
3520 /* Implementation detail of ASSERT_NO_BIT_RANGE_FROM_MASK. */
3522 static void
3523 assert_no_bit_range_from_mask_eq (const location &loc,
3524 unsigned HOST_WIDE_INT mask)
3526 bit_range actual (0, 0);
3527 bool ok = bit_range::from_mask (mask, &actual);
3528 ASSERT_FALSE_AT (loc, ok);
3531 /* Assert that bit_range::from_mask (MASK) returns false. */
3533 #define ASSERT_NO_BIT_RANGE_FROM_MASK(MASK) \
3534 SELFTEST_BEGIN_STMT \
3535 assert_no_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK); \
3536 SELFTEST_END_STMT
3538 /* Verify that bit_range::from_mask works as expected. */
3540 static void
3541 test_bit_range_from_mask ()
3543 /* Should fail on zero. */
3544 ASSERT_NO_BIT_RANGE_FROM_MASK (0);
3546 /* Verify 1-bit masks. */
3547 ASSERT_BIT_RANGE_FROM_MASK_EQ (1, bit_range (0, 1));
3548 ASSERT_BIT_RANGE_FROM_MASK_EQ (2, bit_range (1, 1));
3549 ASSERT_BIT_RANGE_FROM_MASK_EQ (4, bit_range (2, 1));
3550 ASSERT_BIT_RANGE_FROM_MASK_EQ (8, bit_range (3, 1));
3551 ASSERT_BIT_RANGE_FROM_MASK_EQ (16, bit_range (4, 1));
3552 ASSERT_BIT_RANGE_FROM_MASK_EQ (32, bit_range (5, 1));
3553 ASSERT_BIT_RANGE_FROM_MASK_EQ (64, bit_range (6, 1));
3554 ASSERT_BIT_RANGE_FROM_MASK_EQ (128, bit_range (7, 1));
3556 /* Verify N-bit masks starting at bit 0. */
3557 ASSERT_BIT_RANGE_FROM_MASK_EQ (3, bit_range (0, 2));
3558 ASSERT_BIT_RANGE_FROM_MASK_EQ (7, bit_range (0, 3));
3559 ASSERT_BIT_RANGE_FROM_MASK_EQ (15, bit_range (0, 4));
3560 ASSERT_BIT_RANGE_FROM_MASK_EQ (31, bit_range (0, 5));
3561 ASSERT_BIT_RANGE_FROM_MASK_EQ (63, bit_range (0, 6));
3562 ASSERT_BIT_RANGE_FROM_MASK_EQ (127, bit_range (0, 7));
3563 ASSERT_BIT_RANGE_FROM_MASK_EQ (255, bit_range (0, 8));
3564 ASSERT_BIT_RANGE_FROM_MASK_EQ (0xffff, bit_range (0, 16));
3566 /* Various other tests. */
3567 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x30, bit_range (4, 2));
3568 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x700, bit_range (8, 3));
3569 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x600, bit_range (9, 2));
3571 /* Multiple ranges of set bits should fail. */
3572 ASSERT_NO_BIT_RANGE_FROM_MASK (0x101);
3573 ASSERT_NO_BIT_RANGE_FROM_MASK (0xf0f0f0f0);
3576 /* Implementation detail of ASSERT_OVERLAP. */
3578 static void
3579 assert_overlap (const location &loc,
3580 const concrete_binding *b1,
3581 const concrete_binding *b2)
3583 ASSERT_TRUE_AT (loc, b1->overlaps_p (*b2));
3584 ASSERT_TRUE_AT (loc, b2->overlaps_p (*b1));
3587 /* Implementation detail of ASSERT_DISJOINT. */
3589 static void
3590 assert_disjoint (const location &loc,
3591 const concrete_binding *b1,
3592 const concrete_binding *b2)
3594 ASSERT_FALSE_AT (loc, b1->overlaps_p (*b2));
3595 ASSERT_FALSE_AT (loc, b2->overlaps_p (*b1));
3598 /* Assert that B1 and B2 overlap, checking both ways. */
3600 #define ASSERT_OVERLAP(B1, B2) \
3601 SELFTEST_BEGIN_STMT \
3602 assert_overlap (SELFTEST_LOCATION, B1, B2); \
3603 SELFTEST_END_STMT
3605 /* Assert that B1 and B2 do not overlap, checking both ways. */
3607 #define ASSERT_DISJOINT(B1, B2) \
3608 SELFTEST_BEGIN_STMT \
3609 assert_disjoint (SELFTEST_LOCATION, B1, B2); \
3610 SELFTEST_END_STMT
3612 /* Verify that concrete_binding::overlaps_p works as expected. */
3614 static void
3615 test_binding_key_overlap ()
3617 store_manager mgr (NULL);
3619 /* Various 8-bit bindings. */
3620 const concrete_binding *cb_0_7 = mgr.get_concrete_binding (0, 8);
3621 const concrete_binding *cb_8_15 = mgr.get_concrete_binding (8, 8);
3622 const concrete_binding *cb_16_23 = mgr.get_concrete_binding (16, 8);
3623 const concrete_binding *cb_24_31 = mgr.get_concrete_binding (24, 8);
3625 /* 16-bit bindings. */
3626 const concrete_binding *cb_0_15 = mgr.get_concrete_binding (0, 16);
3627 const concrete_binding *cb_8_23 = mgr.get_concrete_binding (8, 16);
3628 const concrete_binding *cb_16_31 = mgr.get_concrete_binding (16, 16);
3630 /* 32-bit binding. */
3631 const concrete_binding *cb_0_31 = mgr.get_concrete_binding (0, 32);
3633 /* Everything should self-overlap. */
3634 ASSERT_OVERLAP (cb_0_7, cb_0_7);
3635 ASSERT_OVERLAP (cb_8_15, cb_8_15);
3636 ASSERT_OVERLAP (cb_16_23, cb_16_23);
3637 ASSERT_OVERLAP (cb_24_31, cb_24_31);
3638 ASSERT_OVERLAP (cb_0_15, cb_0_15);
3639 ASSERT_OVERLAP (cb_8_23, cb_8_23);
3640 ASSERT_OVERLAP (cb_16_31, cb_16_31);
3641 ASSERT_OVERLAP (cb_0_31, cb_0_31);
3643 /* Verify the 8-bit bindings that don't overlap each other. */
3644 ASSERT_DISJOINT (cb_0_7, cb_8_15);
3645 ASSERT_DISJOINT (cb_8_15, cb_16_23);
3647 /* Check for overlap of differently-sized bindings. */
3648 ASSERT_OVERLAP (cb_0_7, cb_0_31);
3649 /* ...and with differing start points. */
3650 ASSERT_OVERLAP (cb_8_15, cb_0_31);
3651 ASSERT_DISJOINT (cb_8_15, cb_16_31);
3652 ASSERT_OVERLAP (cb_16_23, cb_0_31);
3653 ASSERT_OVERLAP (cb_16_31, cb_0_31);
3655 ASSERT_DISJOINT (cb_0_7, cb_8_23);
3656 ASSERT_OVERLAP (cb_8_23, cb_16_23);
3657 ASSERT_OVERLAP (cb_8_23, cb_16_31);
3658 ASSERT_DISJOINT (cb_8_23, cb_24_31);
3661 /* Run all of the selftests within this file. */
3663 void
3664 analyzer_store_cc_tests ()
3666 test_bit_range_intersects_p ();
3667 test_bit_range_from_mask ();
3668 test_binding_key_overlap ();
3671 } // namespace selftest
3673 #endif /* CHECKING_P */
3675 } // namespace ana
3677 #endif /* #if ENABLE_ANALYZER */