ada: Fix wrong resolution for hidden discriminant in predicate
[official-gcc.git] / gcc / analyzer / store.cc
blobc7bc4b40f87c8ee41befb4856732226de2e0410d
1 /* Classes for modeling the state of memory.
2 Copyright (C) 2020-2023 Free Software Foundation, Inc.
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #define INCLUDE_MEMORY
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tree.h"
26 #include "function.h"
27 #include "basic-block.h"
28 #include "gimple.h"
29 #include "gimple-iterator.h"
30 #include "diagnostic-core.h"
31 #include "graphviz.h"
32 #include "options.h"
33 #include "cgraph.h"
34 #include "tree-dfa.h"
35 #include "stringpool.h"
36 #include "convert.h"
37 #include "target.h"
38 #include "fold-const.h"
39 #include "tree-pretty-print.h"
40 #include "diagnostic-color.h"
41 #include "diagnostic-metadata.h"
42 #include "bitmap.h"
43 #include "selftest.h"
44 #include "analyzer/analyzer.h"
45 #include "analyzer/analyzer-logging.h"
46 #include "ordered-hash-map.h"
47 #include "options.h"
48 #include "cfg.h"
49 #include "analyzer/supergraph.h"
50 #include "sbitmap.h"
51 #include "analyzer/call-string.h"
52 #include "analyzer/program-point.h"
53 #include "analyzer/store.h"
54 #include "analyzer/region-model.h"
55 #include "analyzer/call-summary.h"
56 #include "analyzer/analyzer-selftests.h"
57 #include "stor-layout.h"
59 #if ENABLE_ANALYZER
61 namespace ana {
63 /* Dump SVALS to PP, sorting them to ensure determinism. */
65 static void
66 dump_svalue_set (const hash_set <const svalue *> &svals,
67 pretty_printer *pp, bool simple)
69 auto_vec <const svalue *> v;
70 for (hash_set<const svalue *>::iterator iter = svals.begin ();
71 iter != svals.end (); ++iter)
73 v.safe_push (*iter);
75 v.qsort (svalue::cmp_ptr_ptr);
77 pp_character (pp, '{');
78 const svalue *sval;
79 unsigned i;
80 FOR_EACH_VEC_ELT (v, i, sval)
82 if (i > 0)
83 pp_string (pp, ", ");
84 sval->dump_to_pp (pp, simple);
86 pp_character (pp, '}');
89 /* class uncertainty_t. */
91 /* Dump this object to PP. */
93 void
94 uncertainty_t::dump_to_pp (pretty_printer *pp, bool simple) const
96 pp_string (pp, "{m_maybe_bound_svals: ");
97 dump_svalue_set (m_maybe_bound_svals, pp, simple);
99 pp_string (pp, ", m_mutable_at_unknown_call_svals: ");
100 dump_svalue_set (m_mutable_at_unknown_call_svals, pp, simple);
101 pp_string (pp, "}");
104 /* Dump this object to stderr. */
106 DEBUG_FUNCTION void
107 uncertainty_t::dump (bool simple) const
109 pretty_printer pp;
110 pp_format_decoder (&pp) = default_tree_printer;
111 pp_show_color (&pp) = pp_show_color (global_dc->printer);
112 pp.buffer->stream = stderr;
113 dump_to_pp (&pp, simple);
114 pp_newline (&pp);
115 pp_flush (&pp);
118 /* class binding_key. */
120 const binding_key *
121 binding_key::make (store_manager *mgr, const region *r)
123 region_offset offset = r->get_offset (mgr->get_svalue_manager ());
124 if (offset.symbolic_p ())
125 return mgr->get_symbolic_binding (r);
126 else
128 bit_size_t bit_size;
129 if (r->get_bit_size (&bit_size))
131 /* Must be non-empty. */
132 gcc_assert (bit_size > 0);
133 return mgr->get_concrete_binding (offset.get_bit_offset (),
134 bit_size);
136 else
137 return mgr->get_symbolic_binding (r);
141 /* Dump this binding_key to stderr. */
143 DEBUG_FUNCTION void
144 binding_key::dump (bool simple) const
146 pretty_printer pp;
147 pp_format_decoder (&pp) = default_tree_printer;
148 pp_show_color (&pp) = pp_show_color (global_dc->printer);
149 pp.buffer->stream = stderr;
150 dump_to_pp (&pp, simple);
151 pp_newline (&pp);
152 pp_flush (&pp);
155 /* Get a description of this binding_key. */
157 label_text
158 binding_key::get_desc (bool simple) const
160 pretty_printer pp;
161 pp_format_decoder (&pp) = default_tree_printer;
162 dump_to_pp (&pp, simple);
163 return label_text::take (xstrdup (pp_formatted_text (&pp)));
166 /* qsort callback. */
169 binding_key::cmp_ptrs (const void *p1, const void *p2)
171 const binding_key * const *pk1 = (const binding_key * const *)p1;
172 const binding_key * const *pk2 = (const binding_key * const *)p2;
173 return cmp (*pk1, *pk2);
176 /* Comparator for binding_keys. */
179 binding_key::cmp (const binding_key *k1, const binding_key *k2)
181 int concrete1 = k1->concrete_p ();
182 int concrete2 = k2->concrete_p ();
183 if (int concrete_cmp = concrete1 - concrete2)
184 return concrete_cmp;
185 if (concrete1)
187 const concrete_binding *b1 = (const concrete_binding *)k1;
188 const concrete_binding *b2 = (const concrete_binding *)k2;
189 if (int start_cmp = wi::cmp (b1->get_start_bit_offset (),
190 b2->get_start_bit_offset (),
191 SIGNED))
192 return start_cmp;
193 return wi::cmp (b1->get_next_bit_offset (), b2->get_next_bit_offset (),
194 SIGNED);
196 else
198 const symbolic_binding *s1 = (const symbolic_binding *)k1;
199 const symbolic_binding *s2 = (const symbolic_binding *)k2;
200 if (s1 > s2)
201 return 1;
202 if (s1 < s2)
203 return -1;
204 return 0;
208 /* struct bit_range. */
210 void
211 bit_range::dump_to_pp (pretty_printer *pp) const
213 byte_range bytes (0, 0);
214 if (as_byte_range (&bytes))
215 bytes.dump_to_pp (pp);
216 else
218 pp_string (pp, "start: ");
219 pp_wide_int (pp, m_start_bit_offset, SIGNED);
220 pp_string (pp, ", size: ");
221 pp_wide_int (pp, m_size_in_bits, SIGNED);
222 pp_string (pp, ", next: ");
223 pp_wide_int (pp, get_next_bit_offset (), SIGNED);
227 /* Dump this object to stderr. */
229 DEBUG_FUNCTION void
230 bit_range::dump () const
232 pretty_printer pp;
233 pp.buffer->stream = stderr;
234 dump_to_pp (&pp);
235 pp_newline (&pp);
236 pp_flush (&pp);
239 /* If OTHER is a subset of this, return true and, if OUT is
240 non-null, write to *OUT the relative range of OTHER within this.
241 Otherwise return false. */
243 bool
244 bit_range::contains_p (const bit_range &other, bit_range *out) const
246 if (contains_p (other.get_start_bit_offset ())
247 && contains_p (other.get_last_bit_offset ()))
249 if (out)
251 out->m_start_bit_offset = other.m_start_bit_offset - m_start_bit_offset;
252 out->m_size_in_bits = other.m_size_in_bits;
254 return true;
256 else
257 return false;
260 /* If OTHER intersects this, return true and write
261 the relative range of OTHER within THIS to *OUT_THIS,
262 and the relative range of THIS within OTHER to *OUT_OTHER.
263 Otherwise return false. */
265 bool
266 bit_range::intersects_p (const bit_range &other,
267 bit_range *out_this,
268 bit_range *out_other) const
270 if (get_start_bit_offset () < other.get_next_bit_offset ()
271 && other.get_start_bit_offset () < get_next_bit_offset ())
273 bit_offset_t overlap_start
274 = MAX (get_start_bit_offset (),
275 other.get_start_bit_offset ());
276 bit_offset_t overlap_next
277 = MIN (get_next_bit_offset (),
278 other.get_next_bit_offset ());
279 gcc_assert (overlap_next > overlap_start);
280 bit_range abs_overlap_bits (overlap_start, overlap_next - overlap_start);
281 *out_this = abs_overlap_bits - get_start_bit_offset ();
282 *out_other = abs_overlap_bits - other.get_start_bit_offset ();
283 return true;
285 else
286 return false;
290 bit_range::cmp (const bit_range &br1, const bit_range &br2)
292 if (int start_cmp = wi::cmps (br1.m_start_bit_offset,
293 br2.m_start_bit_offset))
294 return start_cmp;
296 return wi::cmpu (br1.m_size_in_bits, br2.m_size_in_bits);
299 /* Offset this range by OFFSET. */
301 bit_range
302 bit_range::operator- (bit_offset_t offset) const
304 return bit_range (m_start_bit_offset - offset, m_size_in_bits);
307 /* If MASK is a contiguous range of set bits, write them
308 to *OUT and return true.
309 Otherwise return false. */
311 bool
312 bit_range::from_mask (unsigned HOST_WIDE_INT mask, bit_range *out)
314 unsigned iter_bit_idx = 0;
315 unsigned HOST_WIDE_INT iter_bit_mask = 1;
317 /* Find the first contiguous run of set bits in MASK. */
319 /* Find first set bit in MASK. */
320 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
322 if (mask & iter_bit_mask)
323 break;
324 iter_bit_idx++;
325 iter_bit_mask <<= 1;
327 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
328 /* MASK is zero. */
329 return false;
331 unsigned first_set_iter_bit_idx = iter_bit_idx;
332 unsigned num_set_bits = 1;
333 iter_bit_idx++;
334 iter_bit_mask <<= 1;
336 /* Find next unset bit in MASK. */
337 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
339 if (!(mask & iter_bit_mask))
340 break;
341 num_set_bits++;
342 iter_bit_idx++;
343 iter_bit_mask <<= 1;
345 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
347 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
348 return true;
351 /* We now have the first contiguous run of set bits in MASK.
352 Fail if any other bits are set. */
353 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
355 if (mask & iter_bit_mask)
356 return false;
357 iter_bit_idx++;
358 iter_bit_mask <<= 1;
361 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
362 return true;
365 /* Attempt to convert this bit_range to a byte_range.
366 Return true if it is possible, writing the result to *OUT.
367 Otherwise return false. */
369 bool
370 bit_range::as_byte_range (byte_range *out) const
372 if (m_start_bit_offset % BITS_PER_UNIT == 0
373 && m_size_in_bits % BITS_PER_UNIT == 0)
375 out->m_start_byte_offset = m_start_bit_offset / BITS_PER_UNIT;
376 out->m_size_in_bytes = m_size_in_bits / BITS_PER_UNIT;
377 return true;
379 return false;
382 /* Dump this object to PP. */
384 void
385 byte_range::dump_to_pp (pretty_printer *pp) const
387 if (m_size_in_bytes == 0)
389 pp_string (pp, "empty");
391 else if (m_size_in_bytes == 1)
393 pp_string (pp, "byte ");
394 pp_wide_int (pp, m_start_byte_offset, SIGNED);
396 else
398 pp_string (pp, "bytes ");
399 pp_wide_int (pp, m_start_byte_offset, SIGNED);
400 pp_string (pp, "-");
401 pp_wide_int (pp, get_last_byte_offset (), SIGNED);
405 /* Dump this object to stderr. */
407 DEBUG_FUNCTION void
408 byte_range::dump () const
410 pretty_printer pp;
411 pp.buffer->stream = stderr;
412 dump_to_pp (&pp);
413 pp_newline (&pp);
414 pp_flush (&pp);
417 /* If OTHER is a subset of this, return true and write
418 to *OUT the relative range of OTHER within this.
419 Otherwise return false. */
421 bool
422 byte_range::contains_p (const byte_range &other, byte_range *out) const
424 if (contains_p (other.get_start_byte_offset ())
425 && contains_p (other.get_last_byte_offset ()))
427 out->m_start_byte_offset = other.m_start_byte_offset - m_start_byte_offset;
428 out->m_size_in_bytes = other.m_size_in_bytes;
429 return true;
431 else
432 return false;
435 /* Return true if THIS and OTHER intersect and write the number
436 of bytes both buffers overlap to *OUT_NUM_OVERLAP_BYTES.
438 Otherwise return false. */
440 bool
441 byte_range::intersects_p (const byte_range &other,
442 byte_size_t *out_num_overlap_bytes) const
444 if (get_start_byte_offset () < other.get_next_byte_offset ()
445 && other.get_start_byte_offset () < get_next_byte_offset ())
447 byte_offset_t overlap_start = MAX (get_start_byte_offset (),
448 other.get_start_byte_offset ());
449 byte_offset_t overlap_next = MIN (get_next_byte_offset (),
450 other.get_next_byte_offset ());
451 gcc_assert (overlap_next > overlap_start);
452 *out_num_overlap_bytes = overlap_next - overlap_start;
453 return true;
455 else
456 return false;
459 /* Return true if THIS exceeds OTHER and write the overhanging
460 byte range to OUT_OVERHANGING_BYTE_RANGE. */
462 bool
463 byte_range::exceeds_p (const byte_range &other,
464 byte_range *out_overhanging_byte_range) const
466 gcc_assert (!empty_p ());
468 if (other.get_next_byte_offset () < get_next_byte_offset ())
470 /* THIS definitely exceeds OTHER. */
471 byte_offset_t start = MAX (get_start_byte_offset (),
472 other.get_next_byte_offset ());
473 byte_offset_t size = get_next_byte_offset () - start;
474 gcc_assert (size > 0);
475 out_overhanging_byte_range->m_start_byte_offset = start;
476 out_overhanging_byte_range->m_size_in_bytes = size;
477 return true;
479 else
480 return false;
483 /* Return true if THIS falls short of OFFSET and write the
484 byte range fallen short to OUT_FALL_SHORT_BYTES. */
486 bool
487 byte_range::falls_short_of_p (byte_offset_t offset,
488 byte_range *out_fall_short_bytes) const
490 gcc_assert (!empty_p ());
492 if (get_start_byte_offset () < offset)
494 /* THIS falls short of OFFSET. */
495 byte_offset_t start = get_start_byte_offset ();
496 byte_offset_t size = MIN (offset, get_next_byte_offset ()) - start;
497 gcc_assert (size > 0);
498 out_fall_short_bytes->m_start_byte_offset = start;
499 out_fall_short_bytes->m_size_in_bytes = size;
500 return true;
502 else
503 return false;
506 /* qsort comparator for byte ranges. */
509 byte_range::cmp (const byte_range &br1, const byte_range &br2)
511 /* Order first by offset. */
512 if (int start_cmp = wi::cmps (br1.m_start_byte_offset,
513 br2.m_start_byte_offset))
514 return start_cmp;
516 /* ...then by size. */
517 return wi::cmpu (br1.m_size_in_bytes, br2.m_size_in_bytes);
520 /* class concrete_binding : public binding_key. */
522 /* Implementation of binding_key::dump_to_pp vfunc for concrete_binding. */
524 void
525 concrete_binding::dump_to_pp (pretty_printer *pp, bool) const
527 m_bit_range.dump_to_pp (pp);
530 /* Return true if this binding overlaps with OTHER. */
532 bool
533 concrete_binding::overlaps_p (const concrete_binding &other) const
535 if (get_start_bit_offset () < other.get_next_bit_offset ()
536 && get_next_bit_offset () > other.get_start_bit_offset ())
537 return true;
538 return false;
541 /* Comparator for use by vec<const concrete_binding *>::qsort. */
544 concrete_binding::cmp_ptr_ptr (const void *p1, const void *p2)
546 const concrete_binding *b1 = *(const concrete_binding * const *)p1;
547 const concrete_binding *b2 = *(const concrete_binding * const *)p2;
549 return bit_range::cmp (b1->m_bit_range, b2->m_bit_range);
552 /* class symbolic_binding : public binding_key. */
554 void
555 symbolic_binding::dump_to_pp (pretty_printer *pp, bool simple) const
557 //binding_key::dump_to_pp (pp, simple);
558 pp_string (pp, "region: ");
559 m_region->dump_to_pp (pp, simple);
562 /* Comparator for use by vec<const symbolic_binding *>::qsort. */
565 symbolic_binding::cmp_ptr_ptr (const void *p1, const void *p2)
567 const symbolic_binding *b1 = *(const symbolic_binding * const *)p1;
568 const symbolic_binding *b2 = *(const symbolic_binding * const *)p2;
570 return region::cmp_ids (b1->get_region (), b2->get_region ());
573 /* The store is oblivious to the types of the svalues bound within
574 it: any type can get bound at any location.
575 Simplify any casts before binding.
577 For example, if we have:
578 struct big { int ia[1024]; };
579 struct big src, dst;
580 memcpy (&dst, &src, sizeof (struct big));
581 this reaches us in gimple form as:
582 MEM <unsigned char[4096]> [(char * {ref-all})&dst]
583 = MEM <unsigned char[4096]> [(char * {ref-all})&src];
584 Using cast_region when handling the MEM_REF would give us:
585 INIT_VAL(CAST_REG(unsigned char[4096], src))
586 as rhs_sval, but we can fold that into a cast svalue:
587 CAST(unsigned char[4096], INIT_VAL(src))
588 We can discard that cast from the svalue when binding it in
589 the store for "dst", and simply store:
590 cluster for: dst
591 key: {kind: direct, start: 0, size: 32768, next: 32768}
592 value: ‘struct big’ {INIT_VAL(src)}. */
594 static const svalue *
595 simplify_for_binding (const svalue *sval)
597 if (const svalue *cast_sval = sval->maybe_undo_cast ())
598 sval = cast_sval;
599 return sval;
602 /* class binding_map. */
604 /* binding_map's copy ctor. */
606 binding_map::binding_map (const binding_map &other)
607 : m_map (other.m_map)
611 /* binding_map's assignment operator. */
613 binding_map&
614 binding_map::operator=(const binding_map &other)
616 /* For now, assume we only ever copy to an empty cluster. */
617 gcc_assert (m_map.elements () == 0);
618 for (map_t::iterator iter = other.m_map.begin (); iter != other.m_map.end ();
619 ++iter)
621 const binding_key *key = (*iter).first;
622 const svalue *sval = (*iter).second;
623 m_map.put (key, sval);
625 return *this;
628 /* binding_map's equality operator. */
630 bool
631 binding_map::operator== (const binding_map &other) const
633 if (m_map.elements () != other.m_map.elements ())
634 return false;
636 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
638 const binding_key *key = (*iter).first;
639 const svalue *sval = (*iter).second;
640 const svalue **other_slot
641 = const_cast <map_t &> (other.m_map).get (key);
642 if (other_slot == NULL)
643 return false;
644 if (sval != *other_slot)
645 return false;
647 gcc_checking_assert (hash () == other.hash ());
648 return true;
651 /* Generate a hash value for this binding_map. */
653 hashval_t
654 binding_map::hash () const
656 hashval_t result = 0;
657 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
659 /* Use a new hasher for each key to avoid depending on the ordering
660 of keys when accumulating the result. */
661 inchash::hash hstate;
662 hstate.add_ptr ((*iter).first);
663 hstate.add_ptr ((*iter).second);
664 result ^= hstate.end ();
666 return result;
669 /* Dump a representation of this binding_map to PP.
670 SIMPLE controls how values and regions are to be printed.
671 If MULTILINE, then split the dump over multiple lines and
672 use whitespace for readability, otherwise put all on one line. */
674 void
675 binding_map::dump_to_pp (pretty_printer *pp, bool simple,
676 bool multiline) const
678 auto_vec <const binding_key *> binding_keys;
679 for (map_t::iterator iter = m_map.begin ();
680 iter != m_map.end (); ++iter)
682 const binding_key *key = (*iter).first;
683 binding_keys.safe_push (key);
685 binding_keys.qsort (binding_key::cmp_ptrs);
687 const binding_key *key;
688 unsigned i;
689 FOR_EACH_VEC_ELT (binding_keys, i, key)
691 const svalue *value = *const_cast <map_t &> (m_map).get (key);
692 if (multiline)
694 pp_string (pp, " key: {");
695 key->dump_to_pp (pp, simple);
696 pp_string (pp, "}");
697 pp_newline (pp);
698 pp_string (pp, " value: ");
699 if (tree t = value->get_type ())
700 dump_quoted_tree (pp, t);
701 pp_string (pp, " {");
702 value->dump_to_pp (pp, simple);
703 pp_string (pp, "}");
704 pp_newline (pp);
706 else
708 if (i > 0)
709 pp_string (pp, ", ");
710 pp_string (pp, "binding key: {");
711 key->dump_to_pp (pp, simple);
712 pp_string (pp, "}, value: {");
713 value->dump_to_pp (pp, simple);
714 pp_string (pp, "}");
719 /* Dump a multiline representation of this binding_map to stderr. */
721 DEBUG_FUNCTION void
722 binding_map::dump (bool simple) const
724 pretty_printer pp;
725 pp_format_decoder (&pp) = default_tree_printer;
726 pp_show_color (&pp) = pp_show_color (global_dc->printer);
727 pp.buffer->stream = stderr;
728 dump_to_pp (&pp, simple, true);
729 pp_newline (&pp);
730 pp_flush (&pp);
733 /* Return a new json::object of the form
734 {KEY_DESC : SVALUE_DESC,
735 ...for the various key/value pairs in this binding_map}. */
737 json::object *
738 binding_map::to_json () const
740 json::object *map_obj = new json::object ();
742 auto_vec <const binding_key *> binding_keys;
743 for (map_t::iterator iter = m_map.begin ();
744 iter != m_map.end (); ++iter)
746 const binding_key *key = (*iter).first;
747 binding_keys.safe_push (key);
749 binding_keys.qsort (binding_key::cmp_ptrs);
751 const binding_key *key;
752 unsigned i;
753 FOR_EACH_VEC_ELT (binding_keys, i, key)
755 const svalue *value = *const_cast <map_t &> (m_map).get (key);
756 label_text key_desc = key->get_desc ();
757 map_obj->set (key_desc.get (), value->to_json ());
760 return map_obj;
763 /* Comparator for imposing an order on binding_maps. */
766 binding_map::cmp (const binding_map &map1, const binding_map &map2)
768 if (int count_cmp = map1.elements () - map2.elements ())
769 return count_cmp;
771 auto_vec <const binding_key *> keys1 (map1.elements ());
772 for (map_t::iterator iter = map1.begin ();
773 iter != map1.end (); ++iter)
774 keys1.quick_push ((*iter).first);
775 keys1.qsort (binding_key::cmp_ptrs);
777 auto_vec <const binding_key *> keys2 (map2.elements ());
778 for (map_t::iterator iter = map2.begin ();
779 iter != map2.end (); ++iter)
780 keys2.quick_push ((*iter).first);
781 keys2.qsort (binding_key::cmp_ptrs);
783 for (size_t i = 0; i < keys1.length (); i++)
785 const binding_key *k1 = keys1[i];
786 const binding_key *k2 = keys2[i];
787 if (int key_cmp = binding_key::cmp (k1, k2))
788 return key_cmp;
789 gcc_assert (k1 == k2);
790 if (int sval_cmp = svalue::cmp_ptr (map1.get (k1), map2.get (k2)))
791 return sval_cmp;
794 return 0;
797 /* Get the child region of PARENT_REG based upon INDEX within a
798 CONSTRUCTOR. */
800 static const region *
801 get_subregion_within_ctor (const region *parent_reg, tree index,
802 region_model_manager *mgr)
804 switch (TREE_CODE (index))
806 default:
807 gcc_unreachable ();
808 case INTEGER_CST:
810 const svalue *index_sval
811 = mgr->get_or_create_constant_svalue (index);
812 return mgr->get_element_region (parent_reg,
813 TREE_TYPE (parent_reg->get_type ()),
814 index_sval);
816 break;
817 case FIELD_DECL:
818 return mgr->get_field_region (parent_reg, index);
822 /* Get the svalue for VAL, a non-CONSTRUCTOR value within a CONSTRUCTOR. */
824 static const svalue *
825 get_svalue_for_ctor_val (tree val, region_model_manager *mgr)
827 /* Reuse the get_rvalue logic from region_model. */
828 region_model m (mgr);
829 return m.get_rvalue (path_var (val, 0), NULL);
832 /* Bind values from CONSTRUCTOR to this map, relative to
833 PARENT_REG's relationship to its base region.
834 Return true if successful, false if there was a problem (e.g. due
835 to hitting a complexity limit). */
837 bool
838 binding_map::apply_ctor_to_region (const region *parent_reg, tree ctor,
839 region_model_manager *mgr)
841 gcc_assert (parent_reg);
842 gcc_assert (TREE_CODE (ctor) == CONSTRUCTOR);
844 unsigned ix;
845 tree index;
846 tree val;
847 tree parent_type = parent_reg->get_type ();
848 tree field;
849 if (TREE_CODE (parent_type) == RECORD_TYPE)
850 field = TYPE_FIELDS (parent_type);
851 else
852 field = NULL_TREE;
853 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), ix, index, val)
855 if (!index)
857 /* If index is NULL, then iterate through the fields for
858 a RECORD_TYPE, or use an INTEGER_CST otherwise.
859 Compare with similar logic in output_constructor. */
860 if (field)
862 index = field;
863 field = DECL_CHAIN (field);
865 else
866 index = build_int_cst (integer_type_node, ix);
868 else if (TREE_CODE (index) == RANGE_EXPR)
870 tree min_index = TREE_OPERAND (index, 0);
871 tree max_index = TREE_OPERAND (index, 1);
872 if (min_index == max_index)
874 if (!apply_ctor_pair_to_child_region (parent_reg, mgr,
875 min_index, val))
876 return false;
878 else
880 if (!apply_ctor_val_to_range (parent_reg, mgr,
881 min_index, max_index, val))
882 return false;
884 continue;
886 if (!apply_ctor_pair_to_child_region (parent_reg, mgr, index, val))
887 return false;
889 return true;
892 /* Bind the value VAL into the range of elements within PARENT_REF
893 from MIN_INDEX to MAX_INDEX (including endpoints).
894 For use in handling RANGE_EXPR within a CONSTRUCTOR.
895 Return true if successful, false if there was a problem (e.g. due
896 to hitting a complexity limit). */
898 bool
899 binding_map::apply_ctor_val_to_range (const region *parent_reg,
900 region_model_manager *mgr,
901 tree min_index, tree max_index,
902 tree val)
904 gcc_assert (TREE_CODE (min_index) == INTEGER_CST);
905 gcc_assert (TREE_CODE (max_index) == INTEGER_CST);
907 /* Generate a binding key for the range. */
908 const region *min_element
909 = get_subregion_within_ctor (parent_reg, min_index, mgr);
910 const region *max_element
911 = get_subregion_within_ctor (parent_reg, max_index, mgr);
912 region_offset min_offset = min_element->get_offset (mgr);
913 if (min_offset.symbolic_p ())
914 return false;
915 bit_offset_t start_bit_offset = min_offset.get_bit_offset ();
916 store_manager *smgr = mgr->get_store_manager ();
917 if (max_element->empty_p ())
918 return false;
919 const binding_key *max_element_key = binding_key::make (smgr, max_element);
920 if (max_element_key->symbolic_p ())
921 return false;
922 const concrete_binding *max_element_ckey
923 = max_element_key->dyn_cast_concrete_binding ();
924 bit_size_t range_size_in_bits
925 = max_element_ckey->get_next_bit_offset () - start_bit_offset;
926 const concrete_binding *range_key
927 = smgr->get_concrete_binding (start_bit_offset, range_size_in_bits);
928 if (range_key->symbolic_p ())
929 return false;
931 /* Get the value. */
932 if (TREE_CODE (val) == CONSTRUCTOR)
933 return false;
934 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
936 /* Bind the value to the range. */
937 put (range_key, sval);
938 return true;
941 /* Bind the value VAL into INDEX within PARENT_REF.
942 For use in handling a pair of entries within a CONSTRUCTOR.
943 Return true if successful, false if there was a problem (e.g. due
944 to hitting a complexity limit). */
946 bool
947 binding_map::apply_ctor_pair_to_child_region (const region *parent_reg,
948 region_model_manager *mgr,
949 tree index, tree val)
951 const region *child_reg
952 = get_subregion_within_ctor (parent_reg, index, mgr);
953 if (TREE_CODE (val) == CONSTRUCTOR)
954 return apply_ctor_to_region (child_reg, val, mgr);
955 else
957 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
958 if (child_reg->empty_p ())
959 return false;
960 const binding_key *k
961 = binding_key::make (mgr->get_store_manager (), child_reg);
962 /* Handle the case where we have an unknown size for child_reg
963 (e.g. due to it being a trailing field with incomplete array
964 type. */
965 if (!k->concrete_p ())
967 /* Assume that sval has a well-defined size for this case. */
968 tree sval_type = sval->get_type ();
969 gcc_assert (sval_type);
970 HOST_WIDE_INT sval_byte_size = int_size_in_bytes (sval_type);
971 gcc_assert (sval_byte_size != -1);
972 bit_size_t sval_bit_size = sval_byte_size * BITS_PER_UNIT;
973 /* Get offset of child relative to base region. */
974 region_offset child_base_offset = child_reg->get_offset (mgr);
975 if (child_base_offset.symbolic_p ())
976 return false;
977 /* Convert to an offset relative to the parent region. */
978 region_offset parent_base_offset = parent_reg->get_offset (mgr);
979 gcc_assert (!parent_base_offset.symbolic_p ());
980 bit_offset_t child_parent_offset
981 = (child_base_offset.get_bit_offset ()
982 - parent_base_offset.get_bit_offset ());
983 /* Create a concrete key for the child within the parent. */
984 k = mgr->get_store_manager ()->get_concrete_binding
985 (child_parent_offset, sval_bit_size);
987 gcc_assert (k->concrete_p ());
988 put (k, sval);
989 return true;
993 /* Populate OUT with all bindings within this map that overlap KEY. */
995 void
996 binding_map::get_overlapping_bindings (const binding_key *key,
997 auto_vec<const binding_key *> *out)
999 for (auto iter : *this)
1001 const binding_key *iter_key = iter.first;
1002 if (const concrete_binding *ckey
1003 = key->dyn_cast_concrete_binding ())
1005 if (const concrete_binding *iter_ckey
1006 = iter_key->dyn_cast_concrete_binding ())
1008 if (ckey->overlaps_p (*iter_ckey))
1009 out->safe_push (iter_key);
1011 else
1013 /* Assume overlap. */
1014 out->safe_push (iter_key);
1017 else
1019 /* Assume overlap. */
1020 out->safe_push (iter_key);
1025 /* Remove, truncate, and/or split any bindings within this map that
1026 overlap DROP_KEY.
1028 For example, if we have:
1030 +------------------------------------+
1031 | old binding |
1032 +------------------------------------+
1034 which is to be overwritten with:
1036 .......+----------------------+.......
1037 .......| new binding |.......
1038 .......+----------------------+.......
1040 this function "cuts a hole" out of the old binding:
1042 +------+......................+------+
1043 |prefix| hole for new binding |suffix|
1044 +------+......................+------+
1046 into which the new binding can be added without
1047 overlapping the prefix or suffix.
1049 The prefix and suffix (if added) will be bound to the pertinent
1050 parts of the value of the old binding.
1052 For example, given:
1053 struct s5
1055 char arr[8];
1057 void test_5 (struct s5 *p)
1059 struct s5 f = *p;
1060 f.arr[3] = 42;
1062 then after the "f = *p;" we have:
1063 cluster for: f: INIT_VAL((*INIT_VAL(p_33(D))))
1064 and at the "f.arr[3] = 42;" we remove the bindings overlapping
1065 "f.arr[3]", replacing it with a prefix (bytes 0-2) and suffix (bytes 4-7)
1066 giving:
1067 cluster for: f
1068 key: {bytes 0-2}
1069 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1070 key: {bytes 4-7}
1071 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1072 punching a hole into which the new value can be written at byte 3:
1073 cluster for: f
1074 key: {bytes 0-2}
1075 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1076 key: {byte 3}
1077 value: 'char' {(char)42}
1078 key: {bytes 4-7}
1079 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1081 If UNCERTAINTY is non-NULL, use it to record any svalues that
1082 were removed, as being maybe-bound.
1084 If MAYBE_LIVE_VALUES is non-NULL, then use it to record any svalues that
1085 were removed as being maybe-live.
1087 If ALWAYS_OVERLAP, then assume that DROP_KEY can overlap anything
1088 in the map, due to one or both of the underlying clusters being
1089 symbolic (but not the same symbolic region). Hence even if DROP_KEY is a
1090 concrete binding it could actually be referring to the same memory as
1091 distinct concrete bindings in the map. Remove all bindings, but
1092 register any svalues with *UNCERTAINTY. */
1094 void
1095 binding_map::remove_overlapping_bindings (store_manager *mgr,
1096 const binding_key *drop_key,
1097 uncertainty_t *uncertainty,
1098 svalue_set *maybe_live_values,
1099 bool always_overlap)
1101 /* Get the bindings of interest within this map. */
1102 auto_vec<const binding_key *> bindings;
1103 if (always_overlap)
1104 for (auto iter : *this)
1105 bindings.safe_push (iter.first); /* Add all bindings. */
1106 else
1107 /* Just add overlapping bindings. */
1108 get_overlapping_bindings (drop_key, &bindings);
1110 unsigned i;
1111 const binding_key *iter_binding;
1112 FOR_EACH_VEC_ELT (bindings, i, iter_binding)
1114 /* Record any svalues that were removed to *UNCERTAINTY as being
1115 maybe-bound, provided at least some part of the binding is symbolic.
1117 Specifically, if at least one of the bindings is symbolic, or we
1118 have ALWAYS_OVERLAP for the case where we have possibly aliasing
1119 regions, then we don't know that the svalue has been overwritten,
1120 and should record that to *UNCERTAINTY.
1122 However, if we have concrete keys accessing within the same symbolic
1123 region, then we *know* that the symbolic region has been overwritten,
1124 so we don't record it to *UNCERTAINTY, as this could be a genuine
1125 leak. */
1126 const svalue *old_sval = get (iter_binding);
1127 if (uncertainty
1128 && (drop_key->symbolic_p ()
1129 || iter_binding->symbolic_p ()
1130 || always_overlap))
1131 uncertainty->on_maybe_bound_sval (old_sval);
1133 /* Record any svalues that were removed to *MAYBE_LIVE_VALUES as being
1134 maybe-live. */
1135 if (maybe_live_values)
1136 maybe_live_values->add (old_sval);
1138 /* Begin by removing the old binding. */
1139 m_map.remove (iter_binding);
1141 /* Don't attempt to handle prefixes/suffixes for the
1142 "always_overlap" case; everything's being removed. */
1143 if (always_overlap)
1144 continue;
1146 /* Now potentially add the prefix and suffix. */
1147 if (const concrete_binding *drop_ckey
1148 = drop_key->dyn_cast_concrete_binding ())
1149 if (const concrete_binding *iter_ckey
1150 = iter_binding->dyn_cast_concrete_binding ())
1152 gcc_assert (drop_ckey->overlaps_p (*iter_ckey));
1154 const bit_range &drop_bits = drop_ckey->get_bit_range ();
1155 const bit_range &iter_bits = iter_ckey->get_bit_range ();
1157 if (iter_bits.get_start_bit_offset ()
1158 < drop_bits.get_start_bit_offset ())
1160 /* We have a truncated prefix. */
1161 bit_range prefix_bits (iter_bits.get_start_bit_offset (),
1162 (drop_bits.get_start_bit_offset ()
1163 - iter_bits.get_start_bit_offset ()));
1164 const concrete_binding *prefix_key
1165 = mgr->get_concrete_binding (prefix_bits);
1166 bit_range rel_prefix (0, prefix_bits.m_size_in_bits);
1167 const svalue *prefix_sval
1168 = old_sval->extract_bit_range (NULL_TREE,
1169 rel_prefix,
1170 mgr->get_svalue_manager ());
1171 m_map.put (prefix_key, prefix_sval);
1174 if (iter_bits.get_next_bit_offset ()
1175 > drop_bits.get_next_bit_offset ())
1177 /* We have a truncated suffix. */
1178 bit_range suffix_bits (drop_bits.get_next_bit_offset (),
1179 (iter_bits.get_next_bit_offset ()
1180 - drop_bits.get_next_bit_offset ()));
1181 const concrete_binding *suffix_key
1182 = mgr->get_concrete_binding (suffix_bits);
1183 bit_range rel_suffix (drop_bits.get_next_bit_offset ()
1184 - iter_bits.get_start_bit_offset (),
1185 suffix_bits.m_size_in_bits);
1186 const svalue *suffix_sval
1187 = old_sval->extract_bit_range (NULL_TREE,
1188 rel_suffix,
1189 mgr->get_svalue_manager ());
1190 m_map.put (suffix_key, suffix_sval);
1196 /* class binding_cluster. */
1198 binding_cluster::binding_cluster (const region *base_region)
1199 : m_base_region (base_region), m_map (),
1200 m_escaped (false), m_touched (false)
1204 /* binding_cluster's copy ctor. */
1206 binding_cluster::binding_cluster (const binding_cluster &other)
1207 : m_base_region (other.m_base_region), m_map (other.m_map),
1208 m_escaped (other.m_escaped), m_touched (other.m_touched)
1212 /* binding_cluster's assignment operator. */
1214 binding_cluster&
1215 binding_cluster::operator= (const binding_cluster &other)
1217 gcc_assert (m_base_region == other.m_base_region);
1218 m_map = other.m_map;
1219 m_escaped = other.m_escaped;
1220 m_touched = other.m_touched;
1221 return *this;
1224 /* binding_cluster's equality operator. */
1226 bool
1227 binding_cluster::operator== (const binding_cluster &other) const
1229 if (m_map != other.m_map)
1230 return false;
1232 if (m_base_region != other.m_base_region)
1233 return false;
1235 if (m_escaped != other.m_escaped)
1236 return false;
1238 if (m_touched != other.m_touched)
1239 return false;
1241 gcc_checking_assert (hash () == other.hash ());
1243 return true;
1246 /* Generate a hash value for this binding_cluster. */
1248 hashval_t
1249 binding_cluster::hash () const
1251 return m_map.hash ();
1254 /* Return true if this binding_cluster is symbolic
1255 i.e. its base region is symbolic. */
1257 bool
1258 binding_cluster::symbolic_p () const
1260 return m_base_region->get_kind () == RK_SYMBOLIC;
1263 /* Dump a representation of this binding_cluster to PP.
1264 SIMPLE controls how values and regions are to be printed.
1265 If MULTILINE, then split the dump over multiple lines and
1266 use whitespace for readability, otherwise put all on one line. */
1268 void
1269 binding_cluster::dump_to_pp (pretty_printer *pp, bool simple,
1270 bool multiline) const
1272 if (m_escaped)
1274 if (multiline)
1276 pp_string (pp, " ESCAPED");
1277 pp_newline (pp);
1279 else
1280 pp_string (pp, "(ESCAPED)");
1282 if (m_touched)
1284 if (multiline)
1286 pp_string (pp, " TOUCHED");
1287 pp_newline (pp);
1289 else
1290 pp_string (pp, "(TOUCHED)");
1293 m_map.dump_to_pp (pp, simple, multiline);
1296 /* Dump a multiline representation of this binding_cluster to stderr. */
1298 DEBUG_FUNCTION void
1299 binding_cluster::dump (bool simple) const
1301 pretty_printer pp;
1302 pp_format_decoder (&pp) = default_tree_printer;
1303 pp_show_color (&pp) = pp_show_color (global_dc->printer);
1304 pp.buffer->stream = stderr;
1305 pp_string (&pp, " cluster for: ");
1306 m_base_region->dump_to_pp (&pp, simple);
1307 pp_string (&pp, ": ");
1308 pp_newline (&pp);
1309 dump_to_pp (&pp, simple, true);
1310 pp_newline (&pp);
1311 pp_flush (&pp);
1314 /* Assert that this object is valid. */
1316 void
1317 binding_cluster::validate () const
1319 int num_symbolic = 0;
1320 int num_concrete = 0;
1321 for (auto iter : m_map)
1323 if (iter.first->symbolic_p ())
1324 num_symbolic++;
1325 else
1326 num_concrete++;
1328 /* We shouldn't have more than one symbolic key per cluster
1329 (or one would have clobbered the other). */
1330 gcc_assert (num_symbolic < 2);
1331 /* We can't have both concrete and symbolic keys. */
1332 gcc_assert (num_concrete == 0 || num_symbolic == 0);
1335 /* Return a new json::object of the form
1336 {"escaped": true/false,
1337 "touched": true/false,
1338 "map" : object for the binding_map. */
1340 json::object *
1341 binding_cluster::to_json () const
1343 json::object *cluster_obj = new json::object ();
1345 cluster_obj->set ("escaped", new json::literal (m_escaped));
1346 cluster_obj->set ("touched", new json::literal (m_touched));
1347 cluster_obj->set ("map", m_map.to_json ());
1349 return cluster_obj;
1352 /* Add a binding of SVAL of kind KIND to REG, unpacking SVAL if it is a
1353 compound_sval. */
1355 void
1356 binding_cluster::bind (store_manager *mgr,
1357 const region *reg, const svalue *sval)
1359 if (const compound_svalue *compound_sval
1360 = sval->dyn_cast_compound_svalue ())
1362 bind_compound_sval (mgr, reg, compound_sval);
1363 return;
1366 if (reg->empty_p ())
1367 return;
1368 const binding_key *binding = binding_key::make (mgr, reg);
1369 bind_key (binding, sval);
1372 /* Bind SVAL to KEY.
1373 Unpacking of compound_svalues should already have been done by the
1374 time this is called. */
1376 void
1377 binding_cluster::bind_key (const binding_key *key, const svalue *sval)
1379 gcc_assert (sval->get_kind () != SK_COMPOUND);
1381 m_map.put (key, sval);
1382 if (key->symbolic_p ())
1383 m_touched = true;
1386 /* Subroutine of binding_cluster::bind.
1387 Unpack compound_svals when binding them, so that we bind them
1388 element-wise. */
1390 void
1391 binding_cluster::bind_compound_sval (store_manager *mgr,
1392 const region *reg,
1393 const compound_svalue *compound_sval)
1395 region_offset reg_offset
1396 = reg->get_offset (mgr->get_svalue_manager ());
1397 if (reg_offset.symbolic_p ())
1399 m_touched = true;
1400 clobber_region (mgr, reg);
1401 return;
1404 for (map_t::iterator iter = compound_sval->begin ();
1405 iter != compound_sval->end (); ++iter)
1407 const binding_key *iter_key = (*iter).first;
1408 const svalue *iter_sval = (*iter).second;
1410 if (const concrete_binding *concrete_key
1411 = iter_key->dyn_cast_concrete_binding ())
1413 bit_offset_t effective_start
1414 = (concrete_key->get_start_bit_offset ()
1415 + reg_offset.get_bit_offset ());
1416 const concrete_binding *effective_concrete_key
1417 = mgr->get_concrete_binding (effective_start,
1418 concrete_key->get_size_in_bits ());
1419 bind_key (effective_concrete_key, iter_sval);
1421 else
1422 gcc_unreachable ();
1426 /* Remove all bindings overlapping REG within this cluster. */
1428 void
1429 binding_cluster::clobber_region (store_manager *mgr, const region *reg)
1431 remove_overlapping_bindings (mgr, reg, NULL, NULL);
1434 /* Remove any bindings for REG within this cluster. */
1436 void
1437 binding_cluster::purge_region (store_manager *mgr, const region *reg)
1439 gcc_assert (reg->get_kind () == RK_DECL);
1440 if (reg->empty_p ())
1441 return;
1442 const binding_key *binding
1443 = binding_key::make (mgr, const_cast<region *> (reg));
1444 m_map.remove (binding);
1447 /* Clobber REG and fill it with repeated copies of SVAL. */
1449 void
1450 binding_cluster::fill_region (store_manager *mgr,
1451 const region *reg,
1452 const svalue *sval)
1454 clobber_region (mgr, reg);
1456 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1457 const svalue *byte_size_sval = reg->get_byte_size_sval (sval_mgr);
1458 const svalue *fill_sval
1459 = sval_mgr->get_or_create_repeated_svalue (reg->get_type (),
1460 byte_size_sval, sval);
1461 bind (mgr, reg, fill_sval);
1464 /* Clobber REG within this cluster and fill it with zeroes. */
1466 void
1467 binding_cluster::zero_fill_region (store_manager *mgr, const region *reg)
1469 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1470 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
1471 fill_region (mgr, reg, zero_sval);
1474 /* Mark REG_TO_BIND within this cluster as being unknown.
1476 Remove any bindings overlapping REG_FOR_OVERLAP.
1477 If UNCERTAINTY is non-NULL, use it to record any svalues that
1478 had bindings to them removed, as being maybe-bound.
1479 If MAYBE_LIVE_VALUES is non-NULL, use it to record any svalues that
1480 had bindings to them removed, as being maybe-live.
1482 REG_TO_BIND and REG_FOR_OVERLAP are the same for
1483 store::mark_region_as_unknown, but are different in
1484 store::set_value's alias handling, for handling the case where
1485 we have a write to a symbolic REG_FOR_OVERLAP. */
1487 void
1488 binding_cluster::mark_region_as_unknown (store_manager *mgr,
1489 const region *reg_to_bind,
1490 const region *reg_for_overlap,
1491 uncertainty_t *uncertainty,
1492 svalue_set *maybe_live_values)
1494 if (reg_to_bind->empty_p ())
1495 return;
1497 remove_overlapping_bindings (mgr, reg_for_overlap, uncertainty,
1498 maybe_live_values);
1500 /* Add a default binding to "unknown". */
1501 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1502 const svalue *sval
1503 = sval_mgr->get_or_create_unknown_svalue (reg_to_bind->get_type ());
1504 bind (mgr, reg_to_bind, sval);
1507 /* Purge state involving SVAL. */
1509 void
1510 binding_cluster::purge_state_involving (const svalue *sval,
1511 region_model_manager *sval_mgr)
1513 auto_vec<const binding_key *> to_remove;
1514 auto_vec<std::pair<const binding_key *, tree> > to_make_unknown;
1515 for (auto iter : m_map)
1517 const binding_key *iter_key = iter.first;
1518 if (const symbolic_binding *symbolic_key
1519 = iter_key->dyn_cast_symbolic_binding ())
1521 const region *reg = symbolic_key->get_region ();
1522 if (reg->involves_p (sval))
1523 to_remove.safe_push (iter_key);
1525 const svalue *iter_sval = iter.second;
1526 if (iter_sval->involves_p (sval))
1527 to_make_unknown.safe_push (std::make_pair(iter_key,
1528 iter_sval->get_type ()));
1530 for (auto iter : to_remove)
1532 m_map.remove (iter);
1533 m_touched = true;
1535 for (auto iter : to_make_unknown)
1537 const svalue *new_sval
1538 = sval_mgr->get_or_create_unknown_svalue (iter.second);
1539 m_map.put (iter.first, new_sval);
1543 /* Get any SVAL bound to REG within this cluster via kind KIND,
1544 without checking parent regions of REG. */
1546 const svalue *
1547 binding_cluster::get_binding (store_manager *mgr,
1548 const region *reg) const
1550 if (reg->empty_p ())
1551 return NULL;
1552 const binding_key *reg_binding = binding_key::make (mgr, reg);
1553 const svalue *sval = m_map.get (reg_binding);
1554 if (sval)
1556 /* If we have a struct with a single field, then the binding of
1557 the field will equal that of the struct, and looking up e.g.
1558 PARENT_REG.field within:
1559 cluster for PARENT_REG: INIT_VAL(OTHER_REG)
1560 will erroneously return INIT_VAL(OTHER_REG), rather than
1561 SUB_VALUE(INIT_VAL(OTHER_REG), FIELD) == INIT_VAL(OTHER_REG.FIELD).
1562 Fix this issue by iterating upwards whilst the bindings are equal,
1563 expressing the lookups as subvalues.
1564 We have to gather a list of subregion accesses, then walk it
1565 in reverse to get the subvalues. */
1566 auto_vec<const region *> regions;
1567 while (const region *parent_reg = reg->get_parent_region ())
1569 const binding_key *parent_reg_binding
1570 = binding_key::make (mgr, parent_reg);
1571 if (parent_reg_binding == reg_binding
1572 && sval->get_type ()
1573 && reg->get_type ()
1574 && sval->get_type () != reg->get_type ())
1576 regions.safe_push (reg);
1577 reg = parent_reg;
1579 else
1580 break;
1582 if (sval->get_type ()
1583 && reg->get_type ()
1584 && sval->get_type () == reg->get_type ())
1586 unsigned i;
1587 const region *iter_reg;
1588 FOR_EACH_VEC_ELT_REVERSE (regions, i, iter_reg)
1590 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1591 sval = rmm_mgr->get_or_create_sub_svalue (iter_reg->get_type (),
1592 sval, iter_reg);
1596 return sval;
1599 /* Get any SVAL bound to REG within this cluster,
1600 either directly for REG, or recursively checking for bindings within
1601 parent regions and extracting subvalues if need be. */
1603 const svalue *
1604 binding_cluster::get_binding_recursive (store_manager *mgr,
1605 const region *reg) const
1607 if (const svalue *sval = get_binding (mgr, reg))
1608 return sval;
1609 if (reg != m_base_region)
1610 if (const region *parent_reg = reg->get_parent_region ())
1611 if (const svalue *parent_sval
1612 = get_binding_recursive (mgr, parent_reg))
1614 /* Extract child svalue from parent svalue. */
1615 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1616 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1617 parent_sval, reg);
1619 return NULL;
1622 /* Get any value bound for REG within this cluster. */
1624 const svalue *
1625 binding_cluster::get_any_binding (store_manager *mgr,
1626 const region *reg) const
1628 /* Look for a direct binding. */
1629 if (const svalue *direct_sval
1630 = get_binding_recursive (mgr, reg))
1631 return direct_sval;
1633 /* If we had a write to a cluster of unknown size, we might
1634 have a self-binding of the whole base region with an svalue,
1635 where the base region is symbolic.
1636 Handle such cases by returning sub_svalue instances. */
1637 if (const svalue *cluster_sval = maybe_get_simple_value (mgr))
1639 /* Extract child svalue from parent svalue. */
1640 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1641 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1642 cluster_sval, reg);
1645 /* If this cluster has been touched by a symbolic write, then the content
1646 of any subregion not currently specifically bound is "UNKNOWN". */
1647 if (m_touched)
1649 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1650 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1653 /* Alternatively, if this is a symbolic read and the cluster has any bindings,
1654 then we don't know if we're reading those values or not, so the result
1655 is also "UNKNOWN". */
1656 if (reg->get_offset (mgr->get_svalue_manager ()).symbolic_p ()
1657 && m_map.elements () > 0)
1659 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1660 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1663 if (const svalue *compound_sval = maybe_get_compound_binding (mgr, reg))
1664 return compound_sval;
1666 /* Otherwise, the initial value, or uninitialized. */
1667 return NULL;
1670 /* Attempt to get a compound_svalue for the bindings within the cluster
1671 affecting REG (which could be the base region itself).
1673 Create a compound_svalue with the subset of bindings the affect REG,
1674 offsetting them so that the offsets are relative to the start of REG
1675 within the cluster.
1677 For example, REG could be one element within an array of structs.
1679 Return the resulting compound_svalue, or NULL if there's a problem. */
1681 const svalue *
1682 binding_cluster::maybe_get_compound_binding (store_manager *mgr,
1683 const region *reg) const
1685 region_offset cluster_offset
1686 = m_base_region->get_offset (mgr->get_svalue_manager ());
1687 if (cluster_offset.symbolic_p ())
1688 return NULL;
1689 region_offset reg_offset = reg->get_offset (mgr->get_svalue_manager ());
1690 if (reg_offset.symbolic_p ())
1691 return NULL;
1693 if (reg->empty_p ())
1694 return NULL;
1696 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1698 /* We will a build the result map in two parts:
1699 (a) result_map, holding the concrete keys from this cluster,
1701 (b) default_map, holding the initial values for the region
1702 (e.g. uninitialized, initializer values, or zero), unless this
1703 cluster has been touched.
1705 We will populate (a), and as we do, clobber (b), trimming and
1706 splitting its bindings as necessary.
1707 Finally, we will merge (b) into (a), giving a concrete map
1708 that merges both the initial values and the bound values from
1709 the binding_cluster.
1710 Doing it this way reduces N for the O(N^2) intersection-finding,
1711 perhaps we should have a spatial-organized data structure for
1712 concrete keys, though. */
1714 binding_map result_map;
1715 binding_map default_map;
1717 /* Set up default values in default_map. */
1718 const svalue *default_sval;
1719 if (m_touched)
1720 default_sval = sval_mgr->get_or_create_unknown_svalue (reg->get_type ());
1721 else
1722 default_sval = sval_mgr->get_or_create_initial_value (reg);
1723 const binding_key *default_key = binding_key::make (mgr, reg);
1724 default_map.put (default_key, default_sval);
1726 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
1728 const binding_key *key = (*iter).first;
1729 const svalue *sval = (*iter).second;
1731 if (const concrete_binding *concrete_key
1732 = key->dyn_cast_concrete_binding ())
1734 const bit_range &bound_range = concrete_key->get_bit_range ();
1736 bit_size_t reg_bit_size;
1737 if (!reg->get_bit_size (&reg_bit_size))
1738 return NULL;
1740 bit_range reg_range (reg_offset.get_bit_offset (),
1741 reg_bit_size);
1743 /* Skip bindings that are outside the bit range of REG. */
1744 if (!bound_range.intersects_p (reg_range))
1745 continue;
1747 /* We shouldn't have an exact match; that should have been
1748 handled already. */
1749 gcc_assert (!(reg_range == bound_range));
1751 bit_range subrange (0, 0);
1752 if (reg_range.contains_p (bound_range, &subrange))
1754 /* We have a bound range fully within REG.
1755 Add it to map, offsetting accordingly. */
1757 /* Get offset of KEY relative to REG, rather than to
1758 the cluster. */
1759 const concrete_binding *offset_concrete_key
1760 = mgr->get_concrete_binding (subrange);
1761 result_map.put (offset_concrete_key, sval);
1763 /* Clobber default_map, removing/trimming/spliting where
1764 it overlaps with offset_concrete_key. */
1765 default_map.remove_overlapping_bindings (mgr,
1766 offset_concrete_key,
1767 NULL, NULL, false);
1769 else if (bound_range.contains_p (reg_range, &subrange))
1771 /* REG is fully within the bound range, but
1772 is not equal to it; we're extracting a subvalue. */
1773 return sval->extract_bit_range (reg->get_type (),
1774 subrange,
1775 mgr->get_svalue_manager ());
1777 else
1779 /* REG and the bound range partially overlap. */
1780 bit_range reg_subrange (0, 0);
1781 bit_range bound_subrange (0, 0);
1782 reg_range.intersects_p (bound_range,
1783 &reg_subrange, &bound_subrange);
1785 /* Get the bits from the bound value for the bits at the
1786 intersection (relative to the bound value). */
1787 const svalue *overlap_sval
1788 = sval->extract_bit_range (NULL_TREE,
1789 bound_subrange,
1790 mgr->get_svalue_manager ());
1792 /* Get key for overlap, relative to the REG. */
1793 const concrete_binding *overlap_concrete_key
1794 = mgr->get_concrete_binding (reg_subrange);
1795 result_map.put (overlap_concrete_key, overlap_sval);
1797 /* Clobber default_map, removing/trimming/spliting where
1798 it overlaps with overlap_concrete_key. */
1799 default_map.remove_overlapping_bindings (mgr,
1800 overlap_concrete_key,
1801 NULL, NULL, false);
1804 else
1805 /* Can't handle symbolic bindings. */
1806 return NULL;
1809 if (result_map.elements () == 0)
1810 return NULL;
1812 /* Merge any bindings from default_map into result_map. */
1813 for (auto iter : default_map)
1815 const binding_key *key = iter.first;
1816 const svalue *sval = iter.second;
1817 result_map.put (key, sval);
1820 return sval_mgr->get_or_create_compound_svalue (reg->get_type (), result_map);
1823 /* Remove, truncate, and/or split any bindings within this map that
1824 could overlap REG.
1826 If REG's base region or this cluster is symbolic and they're different
1827 base regions, then remove everything in this cluster's map, on the
1828 grounds that REG could be referring to the same memory as anything
1829 in the map.
1831 If UNCERTAINTY is non-NULL, use it to record any svalues that
1832 were removed, as being maybe-bound.
1834 If MAYBE_LIVE_VALUES is non-NULL, use it to record any svalues that
1835 were removed, as being maybe-live. */
1837 void
1838 binding_cluster::remove_overlapping_bindings (store_manager *mgr,
1839 const region *reg,
1840 uncertainty_t *uncertainty,
1841 svalue_set *maybe_live_values)
1843 if (reg->empty_p ())
1844 return;
1845 const binding_key *reg_binding = binding_key::make (mgr, reg);
1847 const region *cluster_base_reg = get_base_region ();
1848 const region *other_base_reg = reg->get_base_region ();
1849 /* If at least one of the base regions involved is symbolic, and they're
1850 not the same base region, then consider everything in the map as
1851 potentially overlapping with reg_binding (even if it's a concrete
1852 binding and things in the map are concrete - they could be referring
1853 to the same memory when the symbolic base regions are taken into
1854 account). */
1855 bool always_overlap = (cluster_base_reg != other_base_reg
1856 && (cluster_base_reg->get_kind () == RK_SYMBOLIC
1857 || other_base_reg->get_kind () == RK_SYMBOLIC));
1858 m_map.remove_overlapping_bindings (mgr, reg_binding, uncertainty,
1859 maybe_live_values,
1860 always_overlap);
1863 /* Attempt to merge CLUSTER_A and CLUSTER_B into OUT_CLUSTER, using
1864 MGR and MERGER.
1865 Return true if they can be merged, false otherwise. */
1867 bool
1868 binding_cluster::can_merge_p (const binding_cluster *cluster_a,
1869 const binding_cluster *cluster_b,
1870 binding_cluster *out_cluster,
1871 store *out_store,
1872 store_manager *mgr,
1873 model_merger *merger)
1875 gcc_assert (out_cluster);
1877 /* Merge flags ("ESCAPED" and "TOUCHED") by setting the merged flag to
1878 true if either of the inputs is true. */
1879 if ((cluster_a && cluster_a->m_escaped)
1880 || (cluster_b && cluster_b->m_escaped))
1881 out_cluster->m_escaped = true;
1882 if ((cluster_a && cluster_a->m_touched)
1883 || (cluster_b && cluster_b->m_touched))
1884 out_cluster->m_touched = true;
1886 /* At least one of CLUSTER_A and CLUSTER_B are non-NULL, but either
1887 could be NULL. Handle these cases. */
1888 if (cluster_a == NULL)
1890 gcc_assert (cluster_b != NULL);
1891 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1892 out_cluster->make_unknown_relative_to (cluster_b, out_store, mgr);
1893 return true;
1895 if (cluster_b == NULL)
1897 gcc_assert (cluster_a != NULL);
1898 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1899 out_cluster->make_unknown_relative_to (cluster_a, out_store, mgr);
1900 return true;
1903 /* The "both inputs are non-NULL" case. */
1904 gcc_assert (cluster_a != NULL && cluster_b != NULL);
1905 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1906 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1908 hash_set<const binding_key *> keys;
1909 for (map_t::iterator iter_a = cluster_a->m_map.begin ();
1910 iter_a != cluster_a->m_map.end (); ++iter_a)
1912 const binding_key *key_a = (*iter_a).first;
1913 keys.add (key_a);
1915 for (map_t::iterator iter_b = cluster_b->m_map.begin ();
1916 iter_b != cluster_b->m_map.end (); ++iter_b)
1918 const binding_key *key_b = (*iter_b).first;
1919 keys.add (key_b);
1921 int num_symbolic_keys = 0;
1922 int num_concrete_keys = 0;
1923 for (hash_set<const binding_key *>::iterator iter = keys.begin ();
1924 iter != keys.end (); ++iter)
1926 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1927 const binding_key *key = *iter;
1928 const svalue *sval_a = cluster_a->get_any_value (key);
1929 const svalue *sval_b = cluster_b->get_any_value (key);
1931 if (key->symbolic_p ())
1932 num_symbolic_keys++;
1933 else
1934 num_concrete_keys++;
1936 if (sval_a == sval_b)
1938 gcc_assert (sval_a);
1939 out_cluster->m_map.put (key, sval_a);
1940 continue;
1942 else if (sval_a && sval_b)
1944 if (const svalue *merged_sval
1945 = sval_a->can_merge_p (sval_b, sval_mgr, merger))
1947 out_cluster->m_map.put (key, merged_sval);
1948 continue;
1950 /* Merger of the svalues failed. Reject merger of the cluster. */
1951 return false;
1954 /* If we get here, then one cluster binds this key and the other
1955 doesn't; merge them as "UNKNOWN". */
1956 gcc_assert (sval_a || sval_b);
1958 const svalue *bound_sval = sval_a ? sval_a : sval_b;
1959 tree type = bound_sval->get_type ();
1960 const svalue *unknown_sval
1961 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue (type);
1963 /* ...but reject the merger if this sval shouldn't be mergeable
1964 (e.g. reject merging svalues that have non-purgable sm-state,
1965 to avoid falsely reporting memory leaks by merging them
1966 with something else). */
1967 if (!bound_sval->can_merge_p (unknown_sval, sval_mgr, merger))
1968 return false;
1970 out_cluster->m_map.put (key, unknown_sval);
1973 /* We can only have at most one symbolic key per cluster,
1974 and if we do, we can't have any concrete keys.
1975 If this happens, mark the cluster as touched, with no keys. */
1976 if (num_symbolic_keys >= 2
1977 || (num_concrete_keys > 0 && num_symbolic_keys > 0))
1979 out_cluster->m_touched = true;
1980 out_cluster->m_map.empty ();
1983 /* We don't handle other kinds of overlaps yet. */
1985 return true;
1988 /* Update this cluster to reflect an attempt to merge OTHER where there
1989 is no other cluster to merge with, and so we're notionally merging the
1990 bound values in OTHER with the initial value of the relevant regions.
1992 Any bound keys in OTHER should be bound to unknown in this. */
1994 void
1995 binding_cluster::make_unknown_relative_to (const binding_cluster *other,
1996 store *out_store,
1997 store_manager *mgr)
1999 for (map_t::iterator iter = other->m_map.begin ();
2000 iter != other->m_map.end (); ++iter)
2002 const binding_key *iter_key = (*iter).first;
2003 const svalue *iter_sval = (*iter).second;
2004 const svalue *unknown_sval
2005 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue
2006 (iter_sval->get_type ());
2007 m_map.put (iter_key, unknown_sval);
2009 /* For any pointers in OTHER, the merger means that the
2010 concrete pointer becomes an unknown value, which could
2011 show up as a false report of a leak when considering what
2012 pointers are live before vs after.
2013 Avoid this by marking the base regions they point to as having
2014 escaped. */
2015 if (const region_svalue *region_sval
2016 = iter_sval->dyn_cast_region_svalue ())
2018 const region *base_reg
2019 = region_sval->get_pointee ()->get_base_region ();
2020 if (base_reg->tracked_p ()
2021 && !base_reg->symbolic_for_unknown_ptr_p ())
2023 binding_cluster *c = out_store->get_or_create_cluster (base_reg);
2024 c->mark_as_escaped ();
2030 /* Mark this cluster as having escaped. */
2032 void
2033 binding_cluster::mark_as_escaped ()
2035 m_escaped = true;
2038 /* If this cluster has escaped (by this call, or by an earlier one, or
2039 by being an external param), then unbind all values and mark it
2040 as "touched", so that it has a conjured value, rather than an
2041 initial_svalue.
2042 Use P to purge state involving conjured_svalues. */
2044 void
2045 binding_cluster::on_unknown_fncall (const gcall *call,
2046 store_manager *mgr,
2047 const conjured_purge &p)
2049 if (m_escaped)
2051 m_map.empty ();
2053 if (!m_base_region->empty_p ())
2055 /* Bind it to a new "conjured" value using CALL. */
2056 const svalue *sval
2057 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
2058 (m_base_region->get_type (), call, m_base_region, p);
2059 bind (mgr, m_base_region, sval);
2062 m_touched = true;
2066 /* Mark this cluster as having been clobbered by STMT.
2067 Use P to purge state involving conjured_svalues. */
2069 void
2070 binding_cluster::on_asm (const gasm *stmt,
2071 store_manager *mgr,
2072 const conjured_purge &p)
2074 m_map.empty ();
2076 /* Bind it to a new "conjured" value using CALL. */
2077 const svalue *sval
2078 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
2079 (m_base_region->get_type (), stmt, m_base_region, p);
2080 bind (mgr, m_base_region, sval);
2082 m_touched = true;
2085 /* Return true if this cluster has escaped. */
2087 bool
2088 binding_cluster::escaped_p () const
2090 /* Consider the "errno" region to always have escaped. */
2091 if (m_base_region->get_kind () == RK_ERRNO)
2092 return true;
2093 return m_escaped;
2096 /* Return true if this binding_cluster has no information
2097 i.e. if there are no bindings, and it hasn't been marked as having
2098 escaped, or touched symbolically. */
2100 bool
2101 binding_cluster::redundant_p () const
2103 return (m_map.elements () == 0
2104 && !m_escaped
2105 && !m_touched);
2108 /* Add PV to OUT_PVS, casting it to TYPE if it is not already of that type. */
2110 static void
2111 append_pathvar_with_type (path_var pv,
2112 tree type,
2113 auto_vec<path_var> *out_pvs)
2115 gcc_assert (pv.m_tree);
2117 if (TREE_TYPE (pv.m_tree) != type)
2118 pv.m_tree = build1 (NOP_EXPR, type, pv.m_tree);
2120 out_pvs->safe_push (pv);
2123 /* Find representative path_vars for SVAL within this binding of BASE_REG,
2124 appending the results to OUT_PVS. */
2126 void
2127 binding_cluster::get_representative_path_vars (const region_model *model,
2128 svalue_set *visited,
2129 const region *base_reg,
2130 const svalue *sval,
2131 auto_vec<path_var> *out_pvs)
2132 const
2134 sval = simplify_for_binding (sval);
2136 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
2138 const binding_key *key = (*iter).first;
2139 const svalue *bound_sval = (*iter).second;
2140 if (bound_sval == sval)
2142 if (const concrete_binding *ckey
2143 = key->dyn_cast_concrete_binding ())
2145 auto_vec <const region *> subregions;
2146 base_reg->get_subregions_for_binding
2147 (model->get_manager (),
2148 ckey->get_start_bit_offset (),
2149 ckey->get_size_in_bits (),
2150 sval->get_type (),
2151 &subregions);
2152 unsigned i;
2153 const region *subregion;
2154 FOR_EACH_VEC_ELT (subregions, i, subregion)
2156 if (path_var pv
2157 = model->get_representative_path_var (subregion,
2158 visited))
2159 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2162 else
2164 const symbolic_binding *skey = (const symbolic_binding *)key;
2165 if (path_var pv
2166 = model->get_representative_path_var (skey->get_region (),
2167 visited))
2168 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2174 /* Get any svalue bound to KEY, or NULL. */
2176 const svalue *
2177 binding_cluster::get_any_value (const binding_key *key) const
2179 return m_map.get (key);
2182 /* If this cluster has a single direct binding for the whole of the region,
2183 return it.
2184 For use in simplifying dumps. */
2186 const svalue *
2187 binding_cluster::maybe_get_simple_value (store_manager *mgr) const
2189 /* Fail gracefully if MGR is NULL to make it easier to dump store
2190 instances in the debugger. */
2191 if (mgr == NULL)
2192 return NULL;
2194 if (m_map.elements () != 1)
2195 return NULL;
2197 if (m_base_region->empty_p ())
2198 return NULL;
2200 const binding_key *key = binding_key::make (mgr, m_base_region);
2201 return get_any_value (key);
2204 /* class store_manager. */
2206 logger *
2207 store_manager::get_logger () const
2209 return m_mgr->get_logger ();
2212 /* binding consolidation. */
2214 const concrete_binding *
2215 store_manager::get_concrete_binding (bit_offset_t start_bit_offset,
2216 bit_offset_t size_in_bits)
2218 concrete_binding b (start_bit_offset, size_in_bits);
2219 if (concrete_binding *existing = m_concrete_binding_key_mgr.get (b))
2220 return existing;
2222 concrete_binding *to_save = new concrete_binding (b);
2223 m_concrete_binding_key_mgr.put (b, to_save);
2224 return to_save;
2227 const symbolic_binding *
2228 store_manager::get_symbolic_binding (const region *reg)
2230 symbolic_binding b (reg);
2231 if (symbolic_binding *existing = m_symbolic_binding_key_mgr.get (b))
2232 return existing;
2234 symbolic_binding *to_save = new symbolic_binding (b);
2235 m_symbolic_binding_key_mgr.put (b, to_save);
2236 return to_save;
2239 /* class store. */
2241 /* store's default ctor. */
2243 store::store ()
2244 : m_called_unknown_fn (false)
2248 /* store's copy ctor. */
2250 store::store (const store &other)
2251 : m_cluster_map (other.m_cluster_map.elements ()),
2252 m_called_unknown_fn (other.m_called_unknown_fn)
2254 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2255 iter != other.m_cluster_map.end ();
2256 ++iter)
2258 const region *reg = (*iter).first;
2259 gcc_assert (reg);
2260 binding_cluster *c = (*iter).second;
2261 gcc_assert (c);
2262 m_cluster_map.put (reg, new binding_cluster (*c));
2266 /* store's dtor. */
2268 store::~store ()
2270 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2271 iter != m_cluster_map.end ();
2272 ++iter)
2273 delete (*iter).second;
2276 /* store's assignment operator. */
2278 store &
2279 store::operator= (const store &other)
2281 /* Delete existing cluster map. */
2282 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2283 iter != m_cluster_map.end ();
2284 ++iter)
2285 delete (*iter).second;
2286 m_cluster_map.empty ();
2288 m_called_unknown_fn = other.m_called_unknown_fn;
2290 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2291 iter != other.m_cluster_map.end ();
2292 ++iter)
2294 const region *reg = (*iter).first;
2295 gcc_assert (reg);
2296 binding_cluster *c = (*iter).second;
2297 gcc_assert (c);
2298 m_cluster_map.put (reg, new binding_cluster (*c));
2300 return *this;
2303 /* store's equality operator. */
2305 bool
2306 store::operator== (const store &other) const
2308 if (m_called_unknown_fn != other.m_called_unknown_fn)
2309 return false;
2311 if (m_cluster_map.elements () != other.m_cluster_map.elements ())
2312 return false;
2314 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2315 iter != m_cluster_map.end ();
2316 ++iter)
2318 const region *reg = (*iter).first;
2319 binding_cluster *c = (*iter).second;
2320 binding_cluster **other_slot
2321 = const_cast <cluster_map_t &> (other.m_cluster_map).get (reg);
2322 if (other_slot == NULL)
2323 return false;
2324 if (*c != **other_slot)
2325 return false;
2328 gcc_checking_assert (hash () == other.hash ());
2330 return true;
2333 /* Get a hash value for this store. */
2335 hashval_t
2336 store::hash () const
2338 hashval_t result = 0;
2339 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2340 iter != m_cluster_map.end ();
2341 ++iter)
2342 result ^= (*iter).second->hash ();
2343 return result;
2346 /* Populate OUT with a sorted list of parent regions for the regions in IN,
2347 removing duplicate parents. */
2349 static void
2350 get_sorted_parent_regions (auto_vec<const region *> *out,
2351 auto_vec<const region *> &in)
2353 /* Get the set of parent regions. */
2354 hash_set<const region *> parent_regions;
2355 const region *iter_reg;
2356 unsigned i;
2357 FOR_EACH_VEC_ELT (in, i, iter_reg)
2359 const region *parent_reg = iter_reg->get_parent_region ();
2360 gcc_assert (parent_reg);
2361 parent_regions.add (parent_reg);
2364 /* Write to OUT. */
2365 for (hash_set<const region *>::iterator iter = parent_regions.begin();
2366 iter != parent_regions.end(); ++iter)
2367 out->safe_push (*iter);
2369 /* Sort OUT. */
2370 out->qsort (region::cmp_ptr_ptr);
2373 /* Dump a representation of this store to PP, using SIMPLE to control how
2374 svalues and regions are printed.
2375 MGR is used for simplifying dumps if non-NULL, but can also be NULL
2376 (to make it easier to use from the debugger). */
2378 void
2379 store::dump_to_pp (pretty_printer *pp, bool simple, bool multiline,
2380 store_manager *mgr) const
2382 /* Sort into some deterministic order. */
2383 auto_vec<const region *> base_regions;
2384 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2385 iter != m_cluster_map.end (); ++iter)
2387 const region *base_reg = (*iter).first;
2388 base_regions.safe_push (base_reg);
2390 base_regions.qsort (region::cmp_ptr_ptr);
2392 /* Gather clusters, organize by parent region, so that we can group
2393 together locals, globals, etc. */
2394 auto_vec<const region *> parent_regions;
2395 get_sorted_parent_regions (&parent_regions, base_regions);
2397 const region *parent_reg;
2398 unsigned i;
2399 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2401 gcc_assert (parent_reg);
2402 pp_string (pp, "clusters within ");
2403 parent_reg->dump_to_pp (pp, simple);
2404 if (multiline)
2405 pp_newline (pp);
2406 else
2407 pp_string (pp, " {");
2409 const region *base_reg;
2410 unsigned j;
2411 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2413 /* This is O(N * M), but N ought to be small. */
2414 if (base_reg->get_parent_region () != parent_reg)
2415 continue;
2416 binding_cluster *cluster
2417 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2418 if (!multiline)
2420 if (j > 0)
2421 pp_string (pp, ", ");
2423 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
2425 /* Special-case to simplify dumps for the common case where
2426 we just have one value directly bound to the whole of a
2427 region. */
2428 if (multiline)
2430 pp_string (pp, " cluster for: ");
2431 base_reg->dump_to_pp (pp, simple);
2432 pp_string (pp, ": ");
2433 sval->dump_to_pp (pp, simple);
2434 if (cluster->escaped_p ())
2435 pp_string (pp, " (ESCAPED)");
2436 if (cluster->touched_p ())
2437 pp_string (pp, " (TOUCHED)");
2438 pp_newline (pp);
2440 else
2442 pp_string (pp, "region: {");
2443 base_reg->dump_to_pp (pp, simple);
2444 pp_string (pp, ", value: ");
2445 sval->dump_to_pp (pp, simple);
2446 if (cluster->escaped_p ())
2447 pp_string (pp, " (ESCAPED)");
2448 if (cluster->touched_p ())
2449 pp_string (pp, " (TOUCHED)");
2450 pp_string (pp, "}");
2453 else if (multiline)
2455 pp_string (pp, " cluster for: ");
2456 base_reg->dump_to_pp (pp, simple);
2457 pp_newline (pp);
2458 cluster->dump_to_pp (pp, simple, multiline);
2460 else
2462 pp_string (pp, "base region: {");
2463 base_reg->dump_to_pp (pp, simple);
2464 pp_string (pp, "} has cluster: {");
2465 cluster->dump_to_pp (pp, simple, multiline);
2466 pp_string (pp, "}");
2469 if (!multiline)
2470 pp_string (pp, "}");
2472 pp_printf (pp, "m_called_unknown_fn: %s",
2473 m_called_unknown_fn ? "TRUE" : "FALSE");
2474 if (multiline)
2475 pp_newline (pp);
2478 /* Dump a multiline representation of this store to stderr. */
2480 DEBUG_FUNCTION void
2481 store::dump (bool simple) const
2483 pretty_printer pp;
2484 pp_format_decoder (&pp) = default_tree_printer;
2485 pp_show_color (&pp) = pp_show_color (global_dc->printer);
2486 pp.buffer->stream = stderr;
2487 dump_to_pp (&pp, simple, true, NULL);
2488 pp_newline (&pp);
2489 pp_flush (&pp);
2492 /* Assert that this object is valid. */
2494 void
2495 store::validate () const
2497 for (auto iter : m_cluster_map)
2498 iter.second->validate ();
2501 /* Return a new json::object of the form
2502 {PARENT_REGION_DESC: {BASE_REGION_DESC: object for binding_map,
2503 ... for each cluster within parent region},
2504 ...for each parent region,
2505 "called_unknown_fn": true/false}. */
2507 json::object *
2508 store::to_json () const
2510 json::object *store_obj = new json::object ();
2512 /* Sort into some deterministic order. */
2513 auto_vec<const region *> base_regions;
2514 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2515 iter != m_cluster_map.end (); ++iter)
2517 const region *base_reg = (*iter).first;
2518 base_regions.safe_push (base_reg);
2520 base_regions.qsort (region::cmp_ptr_ptr);
2522 /* Gather clusters, organize by parent region, so that we can group
2523 together locals, globals, etc. */
2524 auto_vec<const region *> parent_regions;
2525 get_sorted_parent_regions (&parent_regions, base_regions);
2527 const region *parent_reg;
2528 unsigned i;
2529 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2531 gcc_assert (parent_reg);
2533 json::object *clusters_in_parent_reg_obj = new json::object ();
2535 const region *base_reg;
2536 unsigned j;
2537 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2539 /* This is O(N * M), but N ought to be small. */
2540 if (base_reg->get_parent_region () != parent_reg)
2541 continue;
2542 binding_cluster *cluster
2543 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2544 label_text base_reg_desc = base_reg->get_desc ();
2545 clusters_in_parent_reg_obj->set (base_reg_desc.get (),
2546 cluster->to_json ());
2548 label_text parent_reg_desc = parent_reg->get_desc ();
2549 store_obj->set (parent_reg_desc.get (), clusters_in_parent_reg_obj);
2552 store_obj->set ("called_unknown_fn", new json::literal (m_called_unknown_fn));
2554 return store_obj;
2557 /* Get any svalue bound to REG, or NULL. */
2559 const svalue *
2560 store::get_any_binding (store_manager *mgr, const region *reg) const
2562 const region *base_reg = reg->get_base_region ();
2563 binding_cluster **cluster_slot
2564 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg);
2565 if (!cluster_slot)
2566 return NULL;
2567 return (*cluster_slot)->get_any_binding (mgr, reg);
2570 /* Set the value of LHS_REG to RHS_SVAL. */
2572 void
2573 store::set_value (store_manager *mgr, const region *lhs_reg,
2574 const svalue *rhs_sval,
2575 uncertainty_t *uncertainty)
2577 logger *logger = mgr->get_logger ();
2578 LOG_SCOPE (logger);
2580 remove_overlapping_bindings (mgr, lhs_reg, uncertainty);
2582 if (lhs_reg->get_type ())
2583 rhs_sval = simplify_for_binding (rhs_sval);
2584 /* ...but if we have no type for the region, retain any cast. */
2586 const region *lhs_base_reg = lhs_reg->get_base_region ();
2587 binding_cluster *lhs_cluster;
2588 if (lhs_base_reg->symbolic_for_unknown_ptr_p ())
2590 /* Reject attempting to bind values into a symbolic region
2591 for an unknown ptr; merely invalidate values below. */
2592 lhs_cluster = NULL;
2594 /* The LHS of the write is *UNKNOWN. If the RHS is a pointer,
2595 then treat the region being pointed to as having escaped. */
2596 if (const region_svalue *ptr_sval = rhs_sval->dyn_cast_region_svalue ())
2598 const region *ptr_dst = ptr_sval->get_pointee ();
2599 const region *ptr_base_reg = ptr_dst->get_base_region ();
2600 mark_as_escaped (ptr_base_reg);
2602 if (uncertainty)
2603 uncertainty->on_maybe_bound_sval (rhs_sval);
2605 else if (lhs_base_reg->tracked_p ())
2607 lhs_cluster = get_or_create_cluster (lhs_base_reg);
2608 lhs_cluster->bind (mgr, lhs_reg, rhs_sval);
2610 else
2612 /* Reject attempting to bind values into an untracked region;
2613 merely invalidate values below. */
2614 lhs_cluster = NULL;
2617 /* Bindings to a cluster can affect other clusters if a symbolic
2618 base region is involved.
2619 Writes to concrete clusters can't affect other concrete clusters,
2620 but can affect symbolic clusters.
2621 Writes to symbolic clusters can affect both concrete and symbolic
2622 clusters.
2623 Invalidate our knowledge of other clusters that might have been
2624 affected by the write.
2625 Gather the set of all svalues that might still be live even if
2626 the store doesn't refer to them. */
2627 svalue_set maybe_live_values;
2628 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2629 iter != m_cluster_map.end (); ++iter)
2631 const region *iter_base_reg = (*iter).first;
2632 binding_cluster *iter_cluster = (*iter).second;
2633 if (iter_base_reg != lhs_base_reg
2634 && (lhs_cluster == NULL
2635 || lhs_cluster->symbolic_p ()
2636 || iter_cluster->symbolic_p ()))
2638 tristate t_alias = eval_alias (lhs_base_reg, iter_base_reg);
2639 switch (t_alias.get_value ())
2641 default:
2642 gcc_unreachable ();
2644 case tristate::TS_UNKNOWN:
2645 if (logger)
2647 pretty_printer *pp = logger->get_printer ();
2648 logger->start_log_line ();
2649 logger->log_partial ("possible aliasing of ");
2650 iter_base_reg->dump_to_pp (pp, true);
2651 logger->log_partial (" when writing SVAL: ");
2652 rhs_sval->dump_to_pp (pp, true);
2653 logger->log_partial (" to LHS_REG: ");
2654 lhs_reg->dump_to_pp (pp, true);
2655 logger->end_log_line ();
2657 /* Mark all of iter_cluster's iter_base_reg as unknown,
2658 using LHS_REG when considering overlaps, to handle
2659 symbolic vs concrete issues. */
2660 iter_cluster->mark_region_as_unknown
2661 (mgr,
2662 iter_base_reg, /* reg_to_bind */
2663 lhs_reg, /* reg_for_overlap */
2664 uncertainty,
2665 &maybe_live_values);
2666 break;
2668 case tristate::TS_TRUE:
2669 gcc_unreachable ();
2670 break;
2672 case tristate::TS_FALSE:
2673 /* If they can't be aliases, then don't invalidate this
2674 cluster. */
2675 break;
2679 /* Given the set of svalues that might still be live, process them
2680 (e.g. marking regions as escaped).
2681 We do this after the iteration to avoid potentially changing
2682 m_cluster_map whilst iterating over it. */
2683 on_maybe_live_values (maybe_live_values);
2686 /* Determine if BASE_REG_A could be an alias of BASE_REG_B. */
2688 tristate
2689 store::eval_alias (const region *base_reg_a,
2690 const region *base_reg_b) const
2692 /* SSA names can't alias. */
2693 tree decl_a = base_reg_a->maybe_get_decl ();
2694 if (decl_a && TREE_CODE (decl_a) == SSA_NAME)
2695 return tristate::TS_FALSE;
2696 tree decl_b = base_reg_b->maybe_get_decl ();
2697 if (decl_b && TREE_CODE (decl_b) == SSA_NAME)
2698 return tristate::TS_FALSE;
2700 /* Try both ways, for symmetry. */
2701 tristate ts_ab = eval_alias_1 (base_reg_a, base_reg_b);
2702 if (ts_ab.is_false ())
2703 return tristate::TS_FALSE;
2704 tristate ts_ba = eval_alias_1 (base_reg_b, base_reg_a);
2705 if (ts_ba.is_false ())
2706 return tristate::TS_FALSE;
2707 return tristate::TS_UNKNOWN;
2710 /* Half of store::eval_alias; called twice for symmetry. */
2712 tristate
2713 store::eval_alias_1 (const region *base_reg_a,
2714 const region *base_reg_b) const
2716 /* If they're in different memory spaces, they can't alias. */
2718 enum memory_space memspace_a = base_reg_a->get_memory_space ();
2719 if (memspace_a != MEMSPACE_UNKNOWN)
2721 enum memory_space memspace_b = base_reg_b->get_memory_space ();
2722 if (memspace_b != MEMSPACE_UNKNOWN
2723 && memspace_a != memspace_b)
2724 return tristate::TS_FALSE;
2728 if (const symbolic_region *sym_reg_a
2729 = base_reg_a->dyn_cast_symbolic_region ())
2731 const svalue *sval_a = sym_reg_a->get_pointer ();
2732 if (tree decl_b = base_reg_b->maybe_get_decl ())
2734 if (!may_be_aliased (decl_b))
2735 return tristate::TS_FALSE;
2736 if (sval_a->get_kind () == SK_INITIAL)
2737 if (!is_global_var (decl_b))
2739 /* The initial value of a pointer can't point to a local. */
2740 return tristate::TS_FALSE;
2743 if (sval_a->get_kind () == SK_INITIAL
2744 && base_reg_b->get_kind () == RK_HEAP_ALLOCATED)
2746 /* The initial value of a pointer can't point to a
2747 region that was allocated on the heap after the beginning of the
2748 path. */
2749 return tristate::TS_FALSE;
2751 if (const widening_svalue *widening_sval_a
2752 = sval_a->dyn_cast_widening_svalue ())
2754 const svalue *base = widening_sval_a->get_base_svalue ();
2755 if (const region_svalue *region_sval
2756 = base->dyn_cast_region_svalue ())
2758 const region *pointee = region_sval->get_pointee ();
2759 /* If we have sval_a is WIDENING(&REGION, OP), and
2760 B can't alias REGION, then B can't alias A either.
2761 For example, A might arise from
2762 for (ptr = &REGION; ...; ptr++)
2763 where sval_a is ptr in the 2nd iteration of the loop.
2764 We want to ensure that "*ptr" can only clobber things
2765 within REGION's base region. */
2766 tristate ts = eval_alias (pointee->get_base_region (),
2767 base_reg_b);
2768 if (ts.is_false ())
2769 return tristate::TS_FALSE;
2773 return tristate::TS_UNKNOWN;
2776 /* Record all of the values in MAYBE_LIVE_VALUES as being possibly live. */
2778 void
2779 store::on_maybe_live_values (const svalue_set &maybe_live_values)
2781 for (auto sval : maybe_live_values)
2783 if (const region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
2785 const region *base_reg = ptr_sval->get_pointee ()->get_base_region ();
2786 mark_as_escaped (base_reg);
2791 /* Remove all bindings overlapping REG within this store. */
2793 void
2794 store::clobber_region (store_manager *mgr, const region *reg)
2796 const region *base_reg = reg->get_base_region ();
2797 binding_cluster **slot = m_cluster_map.get (base_reg);
2798 if (!slot)
2799 return;
2800 binding_cluster *cluster = *slot;
2801 cluster->clobber_region (mgr, reg);
2802 if (cluster->redundant_p ())
2804 delete cluster;
2805 m_cluster_map.remove (base_reg);
2809 /* Remove any bindings for REG within this store. */
2811 void
2812 store::purge_region (store_manager *mgr, const region *reg)
2814 const region *base_reg = reg->get_base_region ();
2815 binding_cluster **slot = m_cluster_map.get (base_reg);
2816 if (!slot)
2817 return;
2818 binding_cluster *cluster = *slot;
2819 cluster->purge_region (mgr, reg);
2820 if (cluster->redundant_p ())
2822 delete cluster;
2823 m_cluster_map.remove (base_reg);
2827 /* Fill REG with SVAL. */
2829 void
2830 store::fill_region (store_manager *mgr, const region *reg, const svalue *sval)
2832 /* Filling an empty region is a no-op. */
2833 if (reg->empty_p ())
2834 return;
2836 const region *base_reg = reg->get_base_region ();
2837 if (base_reg->symbolic_for_unknown_ptr_p ()
2838 || !base_reg->tracked_p ())
2839 return;
2840 binding_cluster *cluster = get_or_create_cluster (base_reg);
2841 cluster->fill_region (mgr, reg, sval);
2844 /* Zero-fill REG. */
2846 void
2847 store::zero_fill_region (store_manager *mgr, const region *reg)
2849 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
2850 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
2851 fill_region (mgr, reg, zero_sval);
2854 /* Mark REG as having unknown content. */
2856 void
2857 store::mark_region_as_unknown (store_manager *mgr, const region *reg,
2858 uncertainty_t *uncertainty,
2859 svalue_set *maybe_live_values)
2861 const region *base_reg = reg->get_base_region ();
2862 if (base_reg->symbolic_for_unknown_ptr_p ()
2863 || !base_reg->tracked_p ())
2864 return;
2865 binding_cluster *cluster = get_or_create_cluster (base_reg);
2866 cluster->mark_region_as_unknown (mgr, reg, reg, uncertainty,
2867 maybe_live_values);
2870 /* Purge state involving SVAL. */
2872 void
2873 store::purge_state_involving (const svalue *sval,
2874 region_model_manager *sval_mgr)
2876 auto_vec <const region *> base_regs_to_purge;
2877 for (auto iter : m_cluster_map)
2879 const region *base_reg = iter.first;
2880 if (base_reg->involves_p (sval))
2881 base_regs_to_purge.safe_push (base_reg);
2882 else
2884 binding_cluster *cluster = iter.second;
2885 cluster->purge_state_involving (sval, sval_mgr);
2889 for (auto iter : base_regs_to_purge)
2890 purge_cluster (iter);
2893 /* Get the cluster for BASE_REG, or NULL (const version). */
2895 const binding_cluster *
2896 store::get_cluster (const region *base_reg) const
2898 gcc_assert (base_reg);
2899 gcc_assert (base_reg->get_base_region () == base_reg);
2900 if (binding_cluster **slot
2901 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg))
2902 return *slot;
2903 else
2904 return NULL;
2907 /* Get the cluster for BASE_REG, or NULL (non-const version). */
2909 binding_cluster *
2910 store::get_cluster (const region *base_reg)
2912 gcc_assert (base_reg);
2913 gcc_assert (base_reg->get_base_region () == base_reg);
2914 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2915 return *slot;
2916 else
2917 return NULL;
2920 /* Get the cluster for BASE_REG, creating it if doesn't already exist. */
2922 binding_cluster *
2923 store::get_or_create_cluster (const region *base_reg)
2925 gcc_assert (base_reg);
2926 gcc_assert (base_reg->get_base_region () == base_reg);
2928 /* We shouldn't create clusters for dereferencing an UNKNOWN ptr. */
2929 gcc_assert (!base_reg->symbolic_for_unknown_ptr_p ());
2931 /* We shouldn't create clusters for base regions that aren't trackable. */
2932 gcc_assert (base_reg->tracked_p ());
2934 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2935 return *slot;
2937 binding_cluster *cluster = new binding_cluster (base_reg);
2938 m_cluster_map.put (base_reg, cluster);
2940 return cluster;
2943 /* Remove any cluster for BASE_REG, for use by
2944 region_model::unbind_region_and_descendents
2945 when popping stack frames and handling deleted heap regions. */
2947 void
2948 store::purge_cluster (const region *base_reg)
2950 gcc_assert (base_reg->get_base_region () == base_reg);
2951 binding_cluster **slot = m_cluster_map.get (base_reg);
2952 if (!slot)
2953 return;
2954 binding_cluster *cluster = *slot;
2955 delete cluster;
2956 m_cluster_map.remove (base_reg);
2959 /* Attempt to merge STORE_A and STORE_B into OUT_STORE.
2960 Return true if successful, or false if the stores can't be merged. */
2962 bool
2963 store::can_merge_p (const store *store_a, const store *store_b,
2964 store *out_store, store_manager *mgr,
2965 model_merger *merger)
2967 if (store_a->m_called_unknown_fn || store_b->m_called_unknown_fn)
2968 out_store->m_called_unknown_fn = true;
2970 /* Get the union of all base regions for STORE_A and STORE_B. */
2971 hash_set<const region *> base_regions;
2972 for (cluster_map_t::iterator iter_a = store_a->m_cluster_map.begin ();
2973 iter_a != store_a->m_cluster_map.end (); ++iter_a)
2975 const region *base_reg_a = (*iter_a).first;
2976 base_regions.add (base_reg_a);
2978 for (cluster_map_t::iterator iter_b = store_b->m_cluster_map.begin ();
2979 iter_b != store_b->m_cluster_map.end (); ++iter_b)
2981 const region *base_reg_b = (*iter_b).first;
2982 base_regions.add (base_reg_b);
2985 /* Sort the base regions before considering them. This ought not to
2986 affect the results, but can affect which types UNKNOWN_REGIONs are
2987 created for in a run; sorting them thus avoids minor differences
2988 in logfiles. */
2989 auto_vec<const region *> vec_base_regions (base_regions.elements ());
2990 for (hash_set<const region *>::iterator iter = base_regions.begin ();
2991 iter != base_regions.end (); ++iter)
2992 vec_base_regions.quick_push (*iter);
2993 vec_base_regions.qsort (region::cmp_ptr_ptr);
2994 unsigned i;
2995 const region *base_reg;
2996 FOR_EACH_VEC_ELT (vec_base_regions, i, base_reg)
2998 const binding_cluster *cluster_a = store_a->get_cluster (base_reg);
2999 const binding_cluster *cluster_b = store_b->get_cluster (base_reg);
3000 /* At least one of cluster_a and cluster_b must be non-NULL. */
3001 binding_cluster *out_cluster
3002 = out_store->get_or_create_cluster (base_reg);
3003 if (!binding_cluster::can_merge_p (cluster_a, cluster_b,
3004 out_cluster, out_store, mgr, merger))
3005 return false;
3007 return true;
3010 /* Mark the cluster for BASE_REG as having escaped.
3011 For use when handling an unrecognized function call, and
3012 for params to "top-level" calls.
3013 Further unknown function calls could touch it, even if the cluster
3014 isn't reachable from args of those calls. */
3016 void
3017 store::mark_as_escaped (const region *base_reg)
3019 gcc_assert (base_reg);
3020 gcc_assert (base_reg->get_base_region () == base_reg);
3022 if (base_reg->symbolic_for_unknown_ptr_p ()
3023 || !base_reg->tracked_p ())
3024 return;
3026 binding_cluster *cluster = get_or_create_cluster (base_reg);
3027 cluster->mark_as_escaped ();
3030 /* Handle an unknown fncall by updating any clusters that have escaped
3031 (either in this fncall, or in a prior one). */
3033 void
3034 store::on_unknown_fncall (const gcall *call, store_manager *mgr,
3035 const conjured_purge &p)
3037 m_called_unknown_fn = true;
3039 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3040 iter != m_cluster_map.end (); ++iter)
3041 (*iter).second->on_unknown_fncall (call, mgr, p);
3044 /* Return true if a non-const pointer to BASE_REG (or something within it)
3045 has escaped to code outside of the TU being analyzed. */
3047 bool
3048 store::escaped_p (const region *base_reg) const
3050 gcc_assert (base_reg);
3051 gcc_assert (base_reg->get_base_region () == base_reg);
3053 /* "errno" can always be modified by external code. */
3054 if (base_reg->get_kind () == RK_ERRNO)
3055 return true;
3057 if (binding_cluster **cluster_slot
3058 = const_cast <cluster_map_t &>(m_cluster_map).get (base_reg))
3059 return (*cluster_slot)->escaped_p ();
3060 return false;
3063 /* Populate OUT_PVS with a list of path_vars for describing SVAL based on
3064 this store, using VISITED to ensure the traversal terminates. */
3066 void
3067 store::get_representative_path_vars (const region_model *model,
3068 svalue_set *visited,
3069 const svalue *sval,
3070 auto_vec<path_var> *out_pvs) const
3072 gcc_assert (sval);
3074 /* Find all bindings that reference SVAL. */
3075 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3076 iter != m_cluster_map.end (); ++iter)
3078 const region *base_reg = (*iter).first;
3079 binding_cluster *cluster = (*iter).second;
3080 cluster->get_representative_path_vars (model, visited, base_reg, sval,
3081 out_pvs);
3084 if (const initial_svalue *init_sval = sval->dyn_cast_initial_svalue ())
3086 const region *reg = init_sval->get_region ();
3087 if (path_var pv = model->get_representative_path_var (reg,
3088 visited))
3089 out_pvs->safe_push (pv);
3093 /* Remove all bindings overlapping REG within this store, removing
3094 any clusters that become redundant.
3096 If UNCERTAINTY is non-NULL, use it to record any svalues that
3097 were removed, as being maybe-bound. */
3099 void
3100 store::remove_overlapping_bindings (store_manager *mgr, const region *reg,
3101 uncertainty_t *uncertainty)
3103 const region *base_reg = reg->get_base_region ();
3104 if (binding_cluster **cluster_slot = m_cluster_map.get (base_reg))
3106 binding_cluster *cluster = *cluster_slot;
3107 if (reg == base_reg && !escaped_p (base_reg))
3109 /* Remove whole cluster. */
3110 m_cluster_map.remove (base_reg);
3111 delete cluster;
3112 return;
3114 /* Pass NULL for the maybe_live_values here, as we don't want to
3115 record the old svalues as being maybe-bound. */
3116 cluster->remove_overlapping_bindings (mgr, reg, uncertainty, NULL);
3120 /* Subclass of visitor that accumulates a hash_set of the regions that
3121 were visited. */
3123 struct region_finder : public visitor
3125 void visit_region (const region *reg) final override
3127 m_regs.add (reg);
3130 hash_set<const region *> m_regs;
3133 /* Canonicalize this store, to maximize the chance of equality between
3134 instances. */
3136 void
3137 store::canonicalize (store_manager *mgr)
3139 /* If we have e.g.:
3140 cluster for: HEAP_ALLOCATED_REGION(543)
3141 ESCAPED
3142 TOUCHED
3143 where the heap region is empty and unreferenced, then purge that
3144 cluster, to avoid unbounded state chains involving these. */
3146 /* Find regions that are referenced by bound values in the store. */
3147 region_finder s;
3148 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3149 iter != m_cluster_map.end (); ++iter)
3151 binding_cluster *cluster = (*iter).second;
3152 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3153 bind_iter != cluster->m_map.end (); ++bind_iter)
3154 (*bind_iter).second->accept (&s);
3157 /* Locate heap-allocated regions that have empty bindings that weren't
3158 found above. */
3159 hash_set<const region *> purgeable_regions;
3160 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3161 iter != m_cluster_map.end (); ++iter)
3163 const region *base_reg = (*iter).first;
3164 binding_cluster *cluster = (*iter).second;
3165 if (base_reg->get_kind () == RK_HEAP_ALLOCATED)
3167 /* Don't purge a heap-allocated region that's been marked as
3168 escaping, since this could be recording that a ptr to it
3169 was written to an unknown symbolic region along this
3170 path, and so we don't know whether it's referenced or
3171 not, and hence should report it as leaking
3172 (PR analyzer/106473). */
3173 if (cluster->escaped_p ())
3174 continue;
3176 if (cluster->empty_p ())
3177 if (!s.m_regs.contains (base_reg))
3178 purgeable_regions.add (base_reg);
3180 /* Also cover the UNKNOWN case. */
3181 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
3182 if (sval->get_kind () == SK_UNKNOWN)
3183 if (!s.m_regs.contains (base_reg))
3184 purgeable_regions.add (base_reg);
3188 /* Purge them. */
3189 for (hash_set<const region *>::iterator iter = purgeable_regions.begin ();
3190 iter != purgeable_regions.end (); ++iter)
3192 const region *base_reg = *iter;
3193 purge_cluster (base_reg);
3197 /* Subroutine for use by exploded_path::feasible_p.
3199 We need to deal with state differences between:
3200 (a) when the exploded_graph is being initially constructed and
3201 (b) when replaying the state changes along a specific path in
3202 in exploded_path::feasible_p.
3204 In (a), state merging happens, so when exploring a loop
3205 for (i = 0; i < 1024; i++)
3206 on successive iterations we have i == 0, then i == WIDENING.
3208 In (b), no state merging happens, so naively replaying the path
3209 that goes twice through the loop then exits it
3210 would lead to i == 0, then i == 1, and then a (i >= 1024) eedge
3211 that exits the loop, which would be found to be infeasible as i == 1,
3212 and the path would be rejected.
3214 We need to fix up state during replay. This subroutine is
3215 called whenever we enter a supernode that we've already
3216 visited along this exploded_path, passing in OTHER_STORE
3217 from the destination enode's state.
3219 Find bindings to widening values in OTHER_STORE.
3220 For all that are found, update the binding in this store to UNKNOWN. */
3222 void
3223 store::loop_replay_fixup (const store *other_store,
3224 region_model_manager *mgr)
3226 gcc_assert (other_store);
3227 for (cluster_map_t::iterator iter = other_store->m_cluster_map.begin ();
3228 iter != other_store->m_cluster_map.end (); ++iter)
3230 const region *base_reg = (*iter).first;
3231 binding_cluster *cluster = (*iter).second;
3232 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3233 bind_iter != cluster->m_map.end (); ++bind_iter)
3235 const binding_key *key = (*bind_iter).first;
3236 const svalue *sval = (*bind_iter).second;
3237 if (sval->get_kind () == SK_WIDENING)
3239 binding_cluster *this_cluster
3240 = get_or_create_cluster (base_reg);
3241 const svalue *unknown
3242 = mgr->get_or_create_unknown_svalue (sval->get_type ());
3243 this_cluster->bind_key (key, unknown);
3249 /* Use R to replay the bindings from SUMMARY into this object. */
3251 void
3252 store::replay_call_summary (call_summary_replay &r,
3253 const store &summary)
3255 if (summary.m_called_unknown_fn)
3257 /* A call to an external function occurred in the summary.
3258 Hence we need to invalidate our knownledge of globals,
3259 escaped regions, etc. */
3260 on_unknown_fncall (r.get_call_stmt (),
3261 r.get_store_manager (),
3262 conjured_purge (r.get_caller_model (),
3263 r.get_ctxt ()));
3266 auto_vec<const region *> keys (summary.m_cluster_map.elements ());
3267 for (auto kv : summary.m_cluster_map)
3268 keys.quick_push (kv.first);
3269 keys.qsort (region::cmp_ptr_ptr);
3270 for (auto base_reg : keys)
3271 replay_call_summary_cluster (r, summary, base_reg);
3274 /* Use R and SUMMARY to replay the bindings in SUMMARY_CLUSTER
3275 into this object. */
3277 void
3278 store::replay_call_summary_cluster (call_summary_replay &r,
3279 const store &summary,
3280 const region *summary_base_reg)
3282 const call_details &cd = r.get_call_details ();
3283 region_model_manager *reg_mgr = r.get_manager ();
3284 store_manager *mgr = reg_mgr->get_store_manager ();
3285 const binding_cluster *summary_cluster
3286 = summary.get_cluster (summary_base_reg);
3288 /* Handle "ESCAPED" and "TOUCHED" flags. */
3289 if (summary_cluster->escaped_p () || summary_cluster->touched_p ())
3290 if (const region *caller_reg
3291 = r.convert_region_from_summary (summary_base_reg))
3293 const region *caller_base_reg = caller_reg->get_base_region ();
3294 if (caller_base_reg->tracked_p ()
3295 && !caller_base_reg->symbolic_for_unknown_ptr_p ())
3297 binding_cluster *caller_cluster
3298 = get_or_create_cluster (caller_base_reg);
3299 if (summary_cluster->escaped_p ())
3300 caller_cluster->mark_as_escaped ();
3301 if (summary_cluster->touched_p ())
3302 caller_cluster->m_touched = true;
3306 switch (summary_base_reg->get_kind ())
3308 /* Top-level regions. */
3309 case RK_FRAME:
3310 case RK_GLOBALS:
3311 case RK_CODE:
3312 case RK_STACK:
3313 case RK_HEAP:
3314 case RK_THREAD_LOCAL:
3315 case RK_ROOT:
3316 /* Child regions. */
3317 case RK_FIELD:
3318 case RK_ELEMENT:
3319 case RK_OFFSET:
3320 case RK_SIZED:
3321 case RK_CAST:
3322 case RK_BIT_RANGE:
3323 /* Other regions. */
3324 case RK_VAR_ARG:
3325 case RK_UNKNOWN:
3326 /* These should never be the base region of a binding cluster. */
3327 gcc_unreachable ();
3328 break;
3330 case RK_FUNCTION:
3331 case RK_LABEL:
3332 case RK_STRING:
3333 /* These can be marked as escaping. */
3334 break;
3336 case RK_SYMBOLIC:
3338 const symbolic_region *summary_symbolic_reg
3339 = as_a <const symbolic_region *> (summary_base_reg);
3340 const svalue *summary_ptr_sval = summary_symbolic_reg->get_pointer ();
3341 const svalue *caller_ptr_sval
3342 = r.convert_svalue_from_summary (summary_ptr_sval);
3343 if (!caller_ptr_sval)
3344 return;
3345 const region *caller_dest_reg
3346 = cd.get_model ()->deref_rvalue (caller_ptr_sval,
3347 NULL_TREE,
3348 cd.get_ctxt ());
3349 const svalue *summary_sval
3350 = summary.get_any_binding (mgr, summary_base_reg);
3351 if (!summary_sval)
3352 return;
3353 const svalue *caller_sval
3354 = r.convert_svalue_from_summary (summary_sval);
3355 if (!caller_sval)
3356 caller_sval =
3357 reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
3358 set_value (mgr, caller_dest_reg,
3359 caller_sval, NULL /* uncertainty_t * */);
3361 break;
3363 case RK_HEAP_ALLOCATED:
3364 case RK_DECL:
3365 case RK_ERRNO:
3367 const region *caller_dest_reg
3368 = r.convert_region_from_summary (summary_base_reg);
3369 if (!caller_dest_reg)
3370 return;
3371 const svalue *summary_sval
3372 = summary.get_any_binding (mgr, summary_base_reg);
3373 if (!summary_sval)
3374 summary_sval = reg_mgr->get_or_create_compound_svalue
3375 (summary_base_reg->get_type (),
3376 summary_cluster->get_map ());
3377 const svalue *caller_sval
3378 = r.convert_svalue_from_summary (summary_sval);
3379 if (!caller_sval)
3380 caller_sval =
3381 reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
3382 set_value (mgr, caller_dest_reg,
3383 caller_sval, NULL /* uncertainty_t * */);
3385 break;
3387 case RK_ALLOCA:
3388 /* Ignore bindings of alloca regions in the summary. */
3389 break;
3393 #if CHECKING_P
3395 namespace selftest {
3397 /* Verify that bit_range::intersects_p works as expected. */
3399 static void
3400 test_bit_range_intersects_p ()
3402 bit_range b0 (0, 1);
3403 bit_range b1 (1, 1);
3404 bit_range b2 (2, 1);
3405 bit_range b3 (3, 1);
3406 bit_range b4 (4, 1);
3407 bit_range b5 (5, 1);
3408 bit_range b6 (6, 1);
3409 bit_range b7 (7, 1);
3410 bit_range b1_to_6 (1, 6);
3411 bit_range b0_to_7 (0, 8);
3412 bit_range b3_to_5 (3, 3);
3413 bit_range b6_to_7 (6, 2);
3415 /* self-intersection is true. */
3416 ASSERT_TRUE (b0.intersects_p (b0));
3417 ASSERT_TRUE (b7.intersects_p (b7));
3418 ASSERT_TRUE (b1_to_6.intersects_p (b1_to_6));
3419 ASSERT_TRUE (b0_to_7.intersects_p (b0_to_7));
3421 ASSERT_FALSE (b0.intersects_p (b1));
3422 ASSERT_FALSE (b1.intersects_p (b0));
3423 ASSERT_FALSE (b0.intersects_p (b7));
3424 ASSERT_FALSE (b7.intersects_p (b0));
3426 ASSERT_TRUE (b0_to_7.intersects_p (b0));
3427 ASSERT_TRUE (b0_to_7.intersects_p (b7));
3428 ASSERT_TRUE (b0.intersects_p (b0_to_7));
3429 ASSERT_TRUE (b7.intersects_p (b0_to_7));
3431 ASSERT_FALSE (b0.intersects_p (b1_to_6));
3432 ASSERT_FALSE (b1_to_6.intersects_p (b0));
3433 ASSERT_TRUE (b1.intersects_p (b1_to_6));
3434 ASSERT_TRUE (b1_to_6.intersects_p (b1));
3435 ASSERT_TRUE (b1_to_6.intersects_p (b6));
3436 ASSERT_FALSE (b1_to_6.intersects_p (b7));
3438 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7));
3439 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6));
3441 ASSERT_FALSE (b3_to_5.intersects_p (b6_to_7));
3442 ASSERT_FALSE (b6_to_7.intersects_p (b3_to_5));
3444 bit_range r1 (0,0);
3445 bit_range r2 (0,0);
3446 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7, &r1, &r2));
3447 ASSERT_EQ (r1.get_start_bit_offset (), 0);
3448 ASSERT_EQ (r1.m_size_in_bits, 6);
3449 ASSERT_EQ (r2.get_start_bit_offset (), 1);
3450 ASSERT_EQ (r2.m_size_in_bits, 6);
3452 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6, &r1, &r2));
3453 ASSERT_EQ (r1.get_start_bit_offset (), 1);
3454 ASSERT_EQ (r1.m_size_in_bits, 6);
3455 ASSERT_EQ (r2.get_start_bit_offset (), 0);
3456 ASSERT_EQ (r2.m_size_in_bits, 6);
3459 /* Implementation detail of ASSERT_BIT_RANGE_FROM_MASK_EQ. */
3461 static void
3462 assert_bit_range_from_mask_eq (const location &loc,
3463 unsigned HOST_WIDE_INT mask,
3464 const bit_range &expected)
3466 bit_range actual (0, 0);
3467 bool ok = bit_range::from_mask (mask, &actual);
3468 ASSERT_TRUE_AT (loc, ok);
3469 ASSERT_EQ_AT (loc, actual, expected);
3472 /* Assert that bit_range::from_mask (MASK) returns true, and writes
3473 out EXPECTED_BIT_RANGE. */
3475 #define ASSERT_BIT_RANGE_FROM_MASK_EQ(MASK, EXPECTED_BIT_RANGE) \
3476 SELFTEST_BEGIN_STMT \
3477 assert_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK, \
3478 EXPECTED_BIT_RANGE); \
3479 SELFTEST_END_STMT
3481 /* Implementation detail of ASSERT_NO_BIT_RANGE_FROM_MASK. */
3483 static void
3484 assert_no_bit_range_from_mask_eq (const location &loc,
3485 unsigned HOST_WIDE_INT mask)
3487 bit_range actual (0, 0);
3488 bool ok = bit_range::from_mask (mask, &actual);
3489 ASSERT_FALSE_AT (loc, ok);
3492 /* Assert that bit_range::from_mask (MASK) returns false. */
3494 #define ASSERT_NO_BIT_RANGE_FROM_MASK(MASK) \
3495 SELFTEST_BEGIN_STMT \
3496 assert_no_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK); \
3497 SELFTEST_END_STMT
3499 /* Verify that bit_range::from_mask works as expected. */
3501 static void
3502 test_bit_range_from_mask ()
3504 /* Should fail on zero. */
3505 ASSERT_NO_BIT_RANGE_FROM_MASK (0);
3507 /* Verify 1-bit masks. */
3508 ASSERT_BIT_RANGE_FROM_MASK_EQ (1, bit_range (0, 1));
3509 ASSERT_BIT_RANGE_FROM_MASK_EQ (2, bit_range (1, 1));
3510 ASSERT_BIT_RANGE_FROM_MASK_EQ (4, bit_range (2, 1));
3511 ASSERT_BIT_RANGE_FROM_MASK_EQ (8, bit_range (3, 1));
3512 ASSERT_BIT_RANGE_FROM_MASK_EQ (16, bit_range (4, 1));
3513 ASSERT_BIT_RANGE_FROM_MASK_EQ (32, bit_range (5, 1));
3514 ASSERT_BIT_RANGE_FROM_MASK_EQ (64, bit_range (6, 1));
3515 ASSERT_BIT_RANGE_FROM_MASK_EQ (128, bit_range (7, 1));
3517 /* Verify N-bit masks starting at bit 0. */
3518 ASSERT_BIT_RANGE_FROM_MASK_EQ (3, bit_range (0, 2));
3519 ASSERT_BIT_RANGE_FROM_MASK_EQ (7, bit_range (0, 3));
3520 ASSERT_BIT_RANGE_FROM_MASK_EQ (15, bit_range (0, 4));
3521 ASSERT_BIT_RANGE_FROM_MASK_EQ (31, bit_range (0, 5));
3522 ASSERT_BIT_RANGE_FROM_MASK_EQ (63, bit_range (0, 6));
3523 ASSERT_BIT_RANGE_FROM_MASK_EQ (127, bit_range (0, 7));
3524 ASSERT_BIT_RANGE_FROM_MASK_EQ (255, bit_range (0, 8));
3525 ASSERT_BIT_RANGE_FROM_MASK_EQ (0xffff, bit_range (0, 16));
3527 /* Various other tests. */
3528 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x30, bit_range (4, 2));
3529 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x700, bit_range (8, 3));
3530 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x600, bit_range (9, 2));
3532 /* Multiple ranges of set bits should fail. */
3533 ASSERT_NO_BIT_RANGE_FROM_MASK (0x101);
3534 ASSERT_NO_BIT_RANGE_FROM_MASK (0xf0f0f0f0);
3537 /* Implementation detail of ASSERT_OVERLAP. */
3539 static void
3540 assert_overlap (const location &loc,
3541 const concrete_binding *b1,
3542 const concrete_binding *b2)
3544 ASSERT_TRUE_AT (loc, b1->overlaps_p (*b2));
3545 ASSERT_TRUE_AT (loc, b2->overlaps_p (*b1));
3548 /* Implementation detail of ASSERT_DISJOINT. */
3550 static void
3551 assert_disjoint (const location &loc,
3552 const concrete_binding *b1,
3553 const concrete_binding *b2)
3555 ASSERT_FALSE_AT (loc, b1->overlaps_p (*b2));
3556 ASSERT_FALSE_AT (loc, b2->overlaps_p (*b1));
3559 /* Assert that B1 and B2 overlap, checking both ways. */
3561 #define ASSERT_OVERLAP(B1, B2) \
3562 SELFTEST_BEGIN_STMT \
3563 assert_overlap (SELFTEST_LOCATION, B1, B2); \
3564 SELFTEST_END_STMT
3566 /* Assert that B1 and B2 do not overlap, checking both ways. */
3568 #define ASSERT_DISJOINT(B1, B2) \
3569 SELFTEST_BEGIN_STMT \
3570 assert_disjoint (SELFTEST_LOCATION, B1, B2); \
3571 SELFTEST_END_STMT
3573 /* Verify that concrete_binding::overlaps_p works as expected. */
3575 static void
3576 test_binding_key_overlap ()
3578 store_manager mgr (NULL);
3580 /* Various 8-bit bindings. */
3581 const concrete_binding *cb_0_7 = mgr.get_concrete_binding (0, 8);
3582 const concrete_binding *cb_8_15 = mgr.get_concrete_binding (8, 8);
3583 const concrete_binding *cb_16_23 = mgr.get_concrete_binding (16, 8);
3584 const concrete_binding *cb_24_31 = mgr.get_concrete_binding (24, 8);
3586 /* 16-bit bindings. */
3587 const concrete_binding *cb_0_15 = mgr.get_concrete_binding (0, 16);
3588 const concrete_binding *cb_8_23 = mgr.get_concrete_binding (8, 16);
3589 const concrete_binding *cb_16_31 = mgr.get_concrete_binding (16, 16);
3591 /* 32-bit binding. */
3592 const concrete_binding *cb_0_31 = mgr.get_concrete_binding (0, 32);
3594 /* Everything should self-overlap. */
3595 ASSERT_OVERLAP (cb_0_7, cb_0_7);
3596 ASSERT_OVERLAP (cb_8_15, cb_8_15);
3597 ASSERT_OVERLAP (cb_16_23, cb_16_23);
3598 ASSERT_OVERLAP (cb_24_31, cb_24_31);
3599 ASSERT_OVERLAP (cb_0_15, cb_0_15);
3600 ASSERT_OVERLAP (cb_8_23, cb_8_23);
3601 ASSERT_OVERLAP (cb_16_31, cb_16_31);
3602 ASSERT_OVERLAP (cb_0_31, cb_0_31);
3604 /* Verify the 8-bit bindings that don't overlap each other. */
3605 ASSERT_DISJOINT (cb_0_7, cb_8_15);
3606 ASSERT_DISJOINT (cb_8_15, cb_16_23);
3608 /* Check for overlap of differently-sized bindings. */
3609 ASSERT_OVERLAP (cb_0_7, cb_0_31);
3610 /* ...and with differing start points. */
3611 ASSERT_OVERLAP (cb_8_15, cb_0_31);
3612 ASSERT_DISJOINT (cb_8_15, cb_16_31);
3613 ASSERT_OVERLAP (cb_16_23, cb_0_31);
3614 ASSERT_OVERLAP (cb_16_31, cb_0_31);
3616 ASSERT_DISJOINT (cb_0_7, cb_8_23);
3617 ASSERT_OVERLAP (cb_8_23, cb_16_23);
3618 ASSERT_OVERLAP (cb_8_23, cb_16_31);
3619 ASSERT_DISJOINT (cb_8_23, cb_24_31);
3622 /* Run all of the selftests within this file. */
3624 void
3625 analyzer_store_cc_tests ()
3627 test_bit_range_intersects_p ();
3628 test_bit_range_from_mask ();
3629 test_binding_key_overlap ();
3632 } // namespace selftest
3634 #endif /* CHECKING_P */
3636 } // namespace ana
3638 #endif /* #if ENABLE_ANALYZER */