c++: diagnose usage of co_await and co_yield in default args [PR115906]
[official-gcc.git] / gcc / analyzer / store.cc
blobc6c17b58d5c7f9a727858e664de22ad5659ed6d0
1 /* Classes for modeling the state of memory.
2 Copyright (C) 2020-2024 Free Software Foundation, Inc.
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #define INCLUDE_MEMORY
23 #define INCLUDE_VECTOR
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tree.h"
27 #include "function.h"
28 #include "basic-block.h"
29 #include "gimple.h"
30 #include "gimple-iterator.h"
31 #include "diagnostic-core.h"
32 #include "graphviz.h"
33 #include "options.h"
34 #include "cgraph.h"
35 #include "tree-dfa.h"
36 #include "stringpool.h"
37 #include "convert.h"
38 #include "target.h"
39 #include "fold-const.h"
40 #include "tree-pretty-print.h"
41 #include "diagnostic-color.h"
42 #include "bitmap.h"
43 #include "selftest.h"
44 #include "analyzer/analyzer.h"
45 #include "analyzer/analyzer-logging.h"
46 #include "ordered-hash-map.h"
47 #include "options.h"
48 #include "cfg.h"
49 #include "analyzer/supergraph.h"
50 #include "sbitmap.h"
51 #include "analyzer/call-string.h"
52 #include "analyzer/program-point.h"
53 #include "analyzer/store.h"
54 #include "analyzer/region-model.h"
55 #include "analyzer/call-summary.h"
56 #include "analyzer/analyzer-selftests.h"
57 #include "stor-layout.h"
58 #include "text-art/tree-widget.h"
60 #if ENABLE_ANALYZER
62 namespace ana {
64 /* Dump SVALS to PP, sorting them to ensure determinism. */
66 static void
67 dump_svalue_set (const hash_set <const svalue *> &svals,
68 pretty_printer *pp, bool simple)
70 auto_vec <const svalue *> v;
71 for (hash_set<const svalue *>::iterator iter = svals.begin ();
72 iter != svals.end (); ++iter)
74 v.safe_push (*iter);
76 v.qsort (svalue::cmp_ptr_ptr);
78 pp_character (pp, '{');
79 const svalue *sval;
80 unsigned i;
81 FOR_EACH_VEC_ELT (v, i, sval)
83 if (i > 0)
84 pp_string (pp, ", ");
85 sval->dump_to_pp (pp, simple);
87 pp_character (pp, '}');
90 /* class uncertainty_t. */
92 /* Dump this object to PP. */
94 void
95 uncertainty_t::dump_to_pp (pretty_printer *pp, bool simple) const
97 pp_string (pp, "{m_maybe_bound_svals: ");
98 dump_svalue_set (m_maybe_bound_svals, pp, simple);
100 pp_string (pp, ", m_mutable_at_unknown_call_svals: ");
101 dump_svalue_set (m_mutable_at_unknown_call_svals, pp, simple);
102 pp_string (pp, "}");
105 /* Dump this object to stderr. */
107 DEBUG_FUNCTION void
108 uncertainty_t::dump (bool simple) const
110 pretty_printer pp;
111 pp_format_decoder (&pp) = default_tree_printer;
112 pp_show_color (&pp) = pp_show_color (global_dc->printer);
113 pp.set_output_stream (stderr);
114 dump_to_pp (&pp, simple);
115 pp_newline (&pp);
116 pp_flush (&pp);
119 /* class binding_key. */
121 const binding_key *
122 binding_key::make (store_manager *mgr, const region *r)
124 region_offset offset = r->get_offset (mgr->get_svalue_manager ());
125 if (offset.symbolic_p ())
126 return mgr->get_symbolic_binding (r);
127 else
129 bit_size_t bit_size;
130 if (r->get_bit_size (&bit_size))
132 /* Must be non-empty. */
133 gcc_assert (bit_size > 0);
134 return mgr->get_concrete_binding (offset.get_bit_offset (),
135 bit_size);
137 else
138 return mgr->get_symbolic_binding (r);
142 /* Dump this binding_key to stderr. */
144 DEBUG_FUNCTION void
145 binding_key::dump (bool simple) const
147 pretty_printer pp;
148 pp_format_decoder (&pp) = default_tree_printer;
149 pp_show_color (&pp) = pp_show_color (global_dc->printer);
150 pp.set_output_stream (stderr);
151 dump_to_pp (&pp, simple);
152 pp_newline (&pp);
153 pp_flush (&pp);
156 /* Get a description of this binding_key. */
158 label_text
159 binding_key::get_desc (bool simple) const
161 pretty_printer pp;
162 pp_format_decoder (&pp) = default_tree_printer;
163 dump_to_pp (&pp, simple);
164 return label_text::take (xstrdup (pp_formatted_text (&pp)));
167 /* qsort callback. */
170 binding_key::cmp_ptrs (const void *p1, const void *p2)
172 const binding_key * const *pk1 = (const binding_key * const *)p1;
173 const binding_key * const *pk2 = (const binding_key * const *)p2;
174 return cmp (*pk1, *pk2);
177 /* Comparator for binding_keys. */
180 binding_key::cmp (const binding_key *k1, const binding_key *k2)
182 int concrete1 = k1->concrete_p ();
183 int concrete2 = k2->concrete_p ();
184 if (int concrete_cmp = concrete1 - concrete2)
185 return concrete_cmp;
186 if (concrete1)
188 const concrete_binding *b1 = (const concrete_binding *)k1;
189 const concrete_binding *b2 = (const concrete_binding *)k2;
190 if (int start_cmp = wi::cmp (b1->get_start_bit_offset (),
191 b2->get_start_bit_offset (),
192 SIGNED))
193 return start_cmp;
194 return wi::cmp (b1->get_next_bit_offset (), b2->get_next_bit_offset (),
195 SIGNED);
197 else
199 const symbolic_binding *s1 = (const symbolic_binding *)k1;
200 const symbolic_binding *s2 = (const symbolic_binding *)k2;
201 if (s1 > s2)
202 return 1;
203 if (s1 < s2)
204 return -1;
205 return 0;
209 /* struct bit_range. */
211 void
212 bit_range::dump_to_pp (pretty_printer *pp) const
214 byte_range bytes (0, 0);
215 if (as_byte_range (&bytes))
216 bytes.dump_to_pp (pp);
217 else
219 pp_string (pp, "start: ");
220 pp_wide_int (pp, m_start_bit_offset, SIGNED);
221 pp_string (pp, ", size: ");
222 pp_wide_int (pp, m_size_in_bits, SIGNED);
223 pp_string (pp, ", next: ");
224 pp_wide_int (pp, get_next_bit_offset (), SIGNED);
228 /* Dump this object to stderr. */
230 DEBUG_FUNCTION void
231 bit_range::dump () const
233 pretty_printer pp;
234 pp.set_output_stream (stderr);
235 dump_to_pp (&pp);
236 pp_newline (&pp);
237 pp_flush (&pp);
240 /* Generate a JSON value for this bit_range.
241 This is intended for debugging the analyzer rather
242 than serialization. */
244 json::object *
245 bit_range::to_json () const
247 json::object *obj = new json::object ();
248 obj->set ("start_bit_offset",
249 bit_offset_to_json (m_start_bit_offset));
250 obj->set ("size_in_bits",
251 bit_offset_to_json (m_size_in_bits));
252 return obj;
255 /* If OTHER is a subset of this, return true and, if OUT is
256 non-null, write to *OUT the relative range of OTHER within this.
257 Otherwise return false. */
259 bool
260 bit_range::contains_p (const bit_range &other, bit_range *out) const
262 if (contains_p (other.get_start_bit_offset ())
263 && contains_p (other.get_last_bit_offset ()))
265 if (out)
267 out->m_start_bit_offset = other.m_start_bit_offset - m_start_bit_offset;
268 out->m_size_in_bits = other.m_size_in_bits;
270 return true;
272 else
273 return false;
276 /* If OTHER intersects this, return true and write
277 the relative range of OTHER within THIS to *OUT_THIS,
278 and the relative range of THIS within OTHER to *OUT_OTHER.
279 Otherwise return false. */
281 bool
282 bit_range::intersects_p (const bit_range &other,
283 bit_range *out_this,
284 bit_range *out_other) const
286 if (get_start_bit_offset () < other.get_next_bit_offset ()
287 && other.get_start_bit_offset () < get_next_bit_offset ())
289 bit_offset_t overlap_start
290 = MAX (get_start_bit_offset (),
291 other.get_start_bit_offset ());
292 bit_offset_t overlap_next
293 = MIN (get_next_bit_offset (),
294 other.get_next_bit_offset ());
295 if (overlap_next <= overlap_start)
296 /* If this has happened, some kind of overflow has happened in
297 our arithmetic. For now, reject such cases. */
298 return false;
299 bit_range abs_overlap_bits (overlap_start, overlap_next - overlap_start);
300 *out_this = abs_overlap_bits - get_start_bit_offset ();
301 *out_other = abs_overlap_bits - other.get_start_bit_offset ();
302 return true;
304 else
305 return false;
308 /* Return true if THIS and OTHER intersect and write the number
309 of bits both buffers overlap to *OUT_NUM_OVERLAP_BITS.
311 Otherwise return false. */
313 bool
314 bit_range::intersects_p (const bit_range &other,
315 bit_size_t *out_num_overlap_bits) const
317 if (get_start_bit_offset () < other.get_next_bit_offset ()
318 && other.get_start_bit_offset () < get_next_bit_offset ())
320 bit_offset_t overlap_start = MAX (get_start_bit_offset (),
321 other.get_start_bit_offset ());
322 bit_offset_t overlap_next = MIN (get_next_bit_offset (),
323 other.get_next_bit_offset ());
324 if (overlap_next <= overlap_start)
325 /* If this has happened, some kind of overflow has happened in
326 our arithmetic. For now, reject such cases. */
327 return false;
328 *out_num_overlap_bits = overlap_next - overlap_start;
329 return true;
331 else
332 return false;
335 /* Return true if THIS exceeds OTHER and write the overhanging
336 bit range to OUT_OVERHANGING_BIT_RANGE. */
338 bool
339 bit_range::exceeds_p (const bit_range &other,
340 bit_range *out_overhanging_bit_range) const
342 gcc_assert (!empty_p ());
344 if (other.get_next_bit_offset () < get_next_bit_offset ())
346 /* THIS definitely exceeds OTHER. */
347 bit_offset_t start = MAX (get_start_bit_offset (),
348 other.get_next_bit_offset ());
349 bit_offset_t size = get_next_bit_offset () - start;
350 if (size <= 0)
351 /* If this has happened, some kind of overflow has happened in
352 our arithmetic. For now, reject such cases. */
353 return false;
354 out_overhanging_bit_range->m_start_bit_offset = start;
355 out_overhanging_bit_range->m_size_in_bits = size;
356 return true;
358 else
359 return false;
362 /* Return true if THIS falls short of OFFSET and write the
363 bit range fallen short to OUT_FALL_SHORT_BITS. */
365 bool
366 bit_range::falls_short_of_p (bit_offset_t offset,
367 bit_range *out_fall_short_bits) const
369 gcc_assert (!empty_p ());
371 if (get_start_bit_offset () < offset)
373 /* THIS falls short of OFFSET. */
374 bit_offset_t start = get_start_bit_offset ();
375 bit_offset_t size = MIN (offset, get_next_bit_offset ()) - start;
376 if (size <= 0)
377 /* If this has happened, some kind of overflow has happened in
378 our arithmetic. For now, reject such cases. */
379 return false;
380 out_fall_short_bits->m_start_bit_offset = start;
381 out_fall_short_bits->m_size_in_bits = size;
382 return true;
384 else
385 return false;
389 bit_range::cmp (const bit_range &br1, const bit_range &br2)
391 if (int start_cmp = wi::cmps (br1.m_start_bit_offset,
392 br2.m_start_bit_offset))
393 return start_cmp;
395 return wi::cmpu (br1.m_size_in_bits, br2.m_size_in_bits);
398 /* Offset this range by OFFSET. */
400 bit_range
401 bit_range::operator- (bit_offset_t offset) const
403 return bit_range (m_start_bit_offset - offset, m_size_in_bits);
406 /* If MASK is a contiguous range of set bits, write them
407 to *OUT and return true.
408 Otherwise return false. */
410 bool
411 bit_range::from_mask (unsigned HOST_WIDE_INT mask, bit_range *out)
413 unsigned iter_bit_idx = 0;
414 unsigned HOST_WIDE_INT iter_bit_mask = 1;
416 /* Find the first contiguous run of set bits in MASK. */
418 /* Find first set bit in MASK. */
419 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
421 if (mask & iter_bit_mask)
422 break;
423 iter_bit_idx++;
424 iter_bit_mask <<= 1;
426 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
427 /* MASK is zero. */
428 return false;
430 unsigned first_set_iter_bit_idx = iter_bit_idx;
431 unsigned num_set_bits = 1;
432 iter_bit_idx++;
433 iter_bit_mask <<= 1;
435 /* Find next unset bit in MASK. */
436 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
438 if (!(mask & iter_bit_mask))
439 break;
440 num_set_bits++;
441 iter_bit_idx++;
442 iter_bit_mask <<= 1;
444 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
446 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
447 return true;
450 /* We now have the first contiguous run of set bits in MASK.
451 Fail if any other bits are set. */
452 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
454 if (mask & iter_bit_mask)
455 return false;
456 iter_bit_idx++;
457 iter_bit_mask <<= 1;
460 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
461 return true;
464 /* Attempt to convert this bit_range to a byte_range.
465 Return true if it is possible, writing the result to *OUT.
466 Otherwise return false. */
468 bool
469 bit_range::as_byte_range (byte_range *out) const
471 if (m_start_bit_offset % BITS_PER_UNIT == 0
472 && m_size_in_bits % BITS_PER_UNIT == 0)
474 out->m_start_byte_offset = m_start_bit_offset / BITS_PER_UNIT;
475 out->m_size_in_bytes = m_size_in_bits / BITS_PER_UNIT;
476 return true;
478 return false;
481 /* Dump this object to PP. */
483 void
484 byte_range::dump_to_pp (pretty_printer *pp) const
486 if (m_size_in_bytes == 0)
488 pp_string (pp, "empty");
490 else if (m_size_in_bytes == 1)
492 pp_string (pp, "byte ");
493 pp_wide_int (pp, m_start_byte_offset, SIGNED);
495 else
497 pp_string (pp, "bytes ");
498 pp_wide_int (pp, m_start_byte_offset, SIGNED);
499 pp_string (pp, "-");
500 pp_wide_int (pp, get_last_byte_offset (), SIGNED);
504 /* Dump this object to stderr. */
506 DEBUG_FUNCTION void
507 byte_range::dump () const
509 pretty_printer pp;
510 pp.set_output_stream (stderr);
511 dump_to_pp (&pp);
512 pp_newline (&pp);
513 pp_flush (&pp);
516 /* Generate a JSON value for this byte_range.
517 This is intended for debugging the analyzer rather
518 than serialization. */
520 json::object *
521 byte_range::to_json () const
523 json::object *obj = new json::object ();
524 obj->set ("start_byte_offset",
525 byte_offset_to_json (m_start_byte_offset));
526 obj->set ("size_in_bytes",
527 byte_offset_to_json (m_size_in_bytes));
528 return obj;
531 /* If OTHER is a subset of this, return true and write
532 to *OUT the relative range of OTHER within this.
533 Otherwise return false. */
535 bool
536 byte_range::contains_p (const byte_range &other, byte_range *out) const
538 if (contains_p (other.get_start_byte_offset ())
539 && contains_p (other.get_last_byte_offset ()))
541 out->m_start_byte_offset = other.m_start_byte_offset - m_start_byte_offset;
542 out->m_size_in_bytes = other.m_size_in_bytes;
543 return true;
545 else
546 return false;
549 /* qsort comparator for byte ranges. */
552 byte_range::cmp (const byte_range &br1, const byte_range &br2)
554 /* Order first by offset. */
555 if (int start_cmp = wi::cmps (br1.m_start_byte_offset,
556 br2.m_start_byte_offset))
557 return start_cmp;
559 /* ...then by size. */
560 return wi::cmpu (br1.m_size_in_bytes, br2.m_size_in_bytes);
563 /* class concrete_binding : public binding_key. */
565 /* Implementation of binding_key::dump_to_pp vfunc for concrete_binding. */
567 void
568 concrete_binding::dump_to_pp (pretty_printer *pp, bool) const
570 m_bit_range.dump_to_pp (pp);
573 /* Return true if this binding overlaps with OTHER. */
575 bool
576 concrete_binding::overlaps_p (const concrete_binding &other) const
578 if (get_start_bit_offset () < other.get_next_bit_offset ()
579 && get_next_bit_offset () > other.get_start_bit_offset ())
580 return true;
581 return false;
584 /* If this is expressible as a concrete byte range, return true
585 and write it to *OUT. Otherwise return false. */
587 bool
588 concrete_binding::get_byte_range (byte_range *out) const
590 return m_bit_range.as_byte_range (out);
593 /* Comparator for use by vec<const concrete_binding *>::qsort. */
596 concrete_binding::cmp_ptr_ptr (const void *p1, const void *p2)
598 const concrete_binding *b1 = *(const concrete_binding * const *)p1;
599 const concrete_binding *b2 = *(const concrete_binding * const *)p2;
601 return bit_range::cmp (b1->m_bit_range, b2->m_bit_range);
604 /* class symbolic_binding : public binding_key. */
606 void
607 symbolic_binding::dump_to_pp (pretty_printer *pp, bool simple) const
609 //binding_key::dump_to_pp (pp, simple);
610 pp_string (pp, "region: ");
611 m_region->dump_to_pp (pp, simple);
614 /* Comparator for use by vec<const symbolic_binding *>::qsort. */
617 symbolic_binding::cmp_ptr_ptr (const void *p1, const void *p2)
619 const symbolic_binding *b1 = *(const symbolic_binding * const *)p1;
620 const symbolic_binding *b2 = *(const symbolic_binding * const *)p2;
622 return region::cmp_ids (b1->get_region (), b2->get_region ());
625 /* The store is oblivious to the types of the svalues bound within
626 it: any type can get bound at any location.
627 Simplify any casts before binding.
629 For example, if we have:
630 struct big { int ia[1024]; };
631 struct big src, dst;
632 memcpy (&dst, &src, sizeof (struct big));
633 this reaches us in gimple form as:
634 MEM <unsigned char[4096]> [(char * {ref-all})&dst]
635 = MEM <unsigned char[4096]> [(char * {ref-all})&src];
636 Using cast_region when handling the MEM_REF would give us:
637 INIT_VAL(CAST_REG(unsigned char[4096], src))
638 as rhs_sval, but we can fold that into a cast svalue:
639 CAST(unsigned char[4096], INIT_VAL(src))
640 We can discard that cast from the svalue when binding it in
641 the store for "dst", and simply store:
642 cluster for: dst
643 key: {kind: direct, start: 0, size: 32768, next: 32768}
644 value: ‘struct big’ {INIT_VAL(src)}. */
646 static const svalue *
647 simplify_for_binding (const svalue *sval)
649 if (const svalue *cast_sval = sval->maybe_undo_cast ())
650 sval = cast_sval;
651 return sval;
654 /* class binding_map. */
656 /* binding_map's copy ctor. */
658 binding_map::binding_map (const binding_map &other)
659 : m_map (other.m_map)
663 /* binding_map's assignment operator. */
665 binding_map&
666 binding_map::operator=(const binding_map &other)
668 /* For now, assume we only ever copy to an empty cluster. */
669 gcc_assert (m_map.elements () == 0);
670 for (map_t::iterator iter = other.m_map.begin (); iter != other.m_map.end ();
671 ++iter)
673 const binding_key *key = (*iter).first;
674 const svalue *sval = (*iter).second;
675 m_map.put (key, sval);
677 return *this;
680 /* binding_map's equality operator. */
682 bool
683 binding_map::operator== (const binding_map &other) const
685 if (m_map.elements () != other.m_map.elements ())
686 return false;
688 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
690 const binding_key *key = (*iter).first;
691 const svalue *sval = (*iter).second;
692 const svalue **other_slot
693 = const_cast <map_t &> (other.m_map).get (key);
694 if (other_slot == NULL)
695 return false;
696 if (sval != *other_slot)
697 return false;
699 gcc_checking_assert (hash () == other.hash ());
700 return true;
703 /* Generate a hash value for this binding_map. */
705 hashval_t
706 binding_map::hash () const
708 hashval_t result = 0;
709 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
711 /* Use a new hasher for each key to avoid depending on the ordering
712 of keys when accumulating the result. */
713 inchash::hash hstate;
714 hstate.add_ptr ((*iter).first);
715 hstate.add_ptr ((*iter).second);
716 result ^= hstate.end ();
718 return result;
721 /* Dump a representation of this binding_map to PP.
722 SIMPLE controls how values and regions are to be printed.
723 If MULTILINE, then split the dump over multiple lines and
724 use whitespace for readability, otherwise put all on one line. */
726 void
727 binding_map::dump_to_pp (pretty_printer *pp, bool simple,
728 bool multiline) const
730 auto_vec <const binding_key *> binding_keys;
731 for (map_t::iterator iter = m_map.begin ();
732 iter != m_map.end (); ++iter)
734 const binding_key *key = (*iter).first;
735 binding_keys.safe_push (key);
737 binding_keys.qsort (binding_key::cmp_ptrs);
739 const binding_key *key;
740 unsigned i;
741 FOR_EACH_VEC_ELT (binding_keys, i, key)
743 const svalue *value = *const_cast <map_t &> (m_map).get (key);
744 if (multiline)
746 pp_string (pp, " key: {");
747 key->dump_to_pp (pp, simple);
748 pp_string (pp, "}");
749 pp_newline (pp);
750 pp_string (pp, " value: ");
751 if (tree t = value->get_type ())
752 dump_quoted_tree (pp, t);
753 pp_string (pp, " {");
754 value->dump_to_pp (pp, simple);
755 pp_string (pp, "}");
756 pp_newline (pp);
758 else
760 if (i > 0)
761 pp_string (pp, ", ");
762 pp_string (pp, "binding key: {");
763 key->dump_to_pp (pp, simple);
764 pp_string (pp, "}, value: {");
765 value->dump_to_pp (pp, simple);
766 pp_string (pp, "}");
771 /* Dump a multiline representation of this binding_map to stderr. */
773 DEBUG_FUNCTION void
774 binding_map::dump (bool simple) const
776 pretty_printer pp;
777 pp_format_decoder (&pp) = default_tree_printer;
778 pp_show_color (&pp) = pp_show_color (global_dc->printer);
779 pp.set_output_stream (stderr);
780 dump_to_pp (&pp, simple, true);
781 pp_newline (&pp);
782 pp_flush (&pp);
785 /* Return a new json::object of the form
786 {KEY_DESC : SVALUE_DESC,
787 ...for the various key/value pairs in this binding_map}. */
789 json::object *
790 binding_map::to_json () const
792 json::object *map_obj = new json::object ();
794 auto_vec <const binding_key *> binding_keys;
795 for (map_t::iterator iter = m_map.begin ();
796 iter != m_map.end (); ++iter)
798 const binding_key *key = (*iter).first;
799 binding_keys.safe_push (key);
801 binding_keys.qsort (binding_key::cmp_ptrs);
803 const binding_key *key;
804 unsigned i;
805 FOR_EACH_VEC_ELT (binding_keys, i, key)
807 const svalue *value = *const_cast <map_t &> (m_map).get (key);
808 label_text key_desc = key->get_desc ();
809 map_obj->set (key_desc.get (), value->to_json ());
812 return map_obj;
815 /* Add a child to PARENT_WIDGET expressing a binding between
816 KEY and SVAL. */
818 static void
819 add_binding_to_tree_widget (text_art::tree_widget &parent_widget,
820 const text_art::dump_widget_info &dwi,
821 const binding_key *key,
822 const svalue *sval)
824 pretty_printer the_pp;
825 pretty_printer * const pp = &the_pp;
826 pp_format_decoder (pp) = default_tree_printer;
827 pp_show_color (pp) = true;
828 const bool simple = true;
830 key->dump_to_pp (pp, simple);
831 pp_string (pp, ": ");
832 if (tree t = sval->get_type ())
833 dump_quoted_tree (pp, t);
834 pp_string (pp, " {");
835 sval->dump_to_pp (pp, simple);
836 pp_string (pp, "}");
838 parent_widget.add_child (text_art::tree_widget::make (dwi, pp));
841 void
842 binding_map::add_to_tree_widget (text_art::tree_widget &parent_widget,
843 const text_art::dump_widget_info &dwi) const
845 auto_vec <const binding_key *> binding_keys;
846 for (map_t::iterator iter = m_map.begin ();
847 iter != m_map.end (); ++iter)
849 const binding_key *key = (*iter).first;
850 binding_keys.safe_push (key);
852 binding_keys.qsort (binding_key::cmp_ptrs);
854 const binding_key *key;
855 unsigned i;
856 FOR_EACH_VEC_ELT (binding_keys, i, key)
858 const svalue *sval = *const_cast <map_t &> (m_map).get (key);
859 add_binding_to_tree_widget (parent_widget, dwi,
860 key, sval);
865 /* Comparator for imposing an order on binding_maps. */
868 binding_map::cmp (const binding_map &map1, const binding_map &map2)
870 if (int count_cmp = map1.elements () - map2.elements ())
871 return count_cmp;
873 auto_vec <const binding_key *> keys1 (map1.elements ());
874 for (map_t::iterator iter = map1.begin ();
875 iter != map1.end (); ++iter)
876 keys1.quick_push ((*iter).first);
877 keys1.qsort (binding_key::cmp_ptrs);
879 auto_vec <const binding_key *> keys2 (map2.elements ());
880 for (map_t::iterator iter = map2.begin ();
881 iter != map2.end (); ++iter)
882 keys2.quick_push ((*iter).first);
883 keys2.qsort (binding_key::cmp_ptrs);
885 for (size_t i = 0; i < keys1.length (); i++)
887 const binding_key *k1 = keys1[i];
888 const binding_key *k2 = keys2[i];
889 if (int key_cmp = binding_key::cmp (k1, k2))
890 return key_cmp;
891 gcc_assert (k1 == k2);
892 if (int sval_cmp = svalue::cmp_ptr (map1.get (k1), map2.get (k2)))
893 return sval_cmp;
896 return 0;
899 /* Get the child region of PARENT_REG based upon INDEX within a
900 CONSTRUCTOR. */
902 static const region *
903 get_subregion_within_ctor (const region *parent_reg, tree index,
904 region_model_manager *mgr)
906 switch (TREE_CODE (index))
908 default:
909 gcc_unreachable ();
910 case INTEGER_CST:
912 const svalue *index_sval
913 = mgr->get_or_create_constant_svalue (index);
914 return mgr->get_element_region (parent_reg,
915 TREE_TYPE (parent_reg->get_type ()),
916 index_sval);
918 break;
919 case FIELD_DECL:
920 return mgr->get_field_region (parent_reg, index);
924 /* Get the svalue for VAL, a non-CONSTRUCTOR value within a CONSTRUCTOR. */
926 static const svalue *
927 get_svalue_for_ctor_val (tree val, region_model_manager *mgr)
929 /* Reuse the get_rvalue logic from region_model. */
930 region_model m (mgr);
931 return m.get_rvalue (path_var (val, 0), NULL);
934 /* Bind values from CONSTRUCTOR to this map, relative to
935 PARENT_REG's relationship to its base region.
936 Return true if successful, false if there was a problem (e.g. due
937 to hitting a complexity limit). */
939 bool
940 binding_map::apply_ctor_to_region (const region *parent_reg, tree ctor,
941 region_model_manager *mgr)
943 gcc_assert (parent_reg);
944 gcc_assert (TREE_CODE (ctor) == CONSTRUCTOR);
946 unsigned ix;
947 tree index;
948 tree val;
949 tree parent_type = parent_reg->get_type ();
950 tree field;
951 if (TREE_CODE (parent_type) == RECORD_TYPE)
952 field = TYPE_FIELDS (parent_type);
953 else
954 field = NULL_TREE;
955 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), ix, index, val)
957 if (!index)
959 /* If index is NULL, then iterate through the fields for
960 a RECORD_TYPE, or use an INTEGER_CST otherwise.
961 Compare with similar logic in output_constructor. */
962 if (field)
964 index = field;
965 field = DECL_CHAIN (field);
967 else
968 index = build_int_cst (integer_type_node, ix);
970 else if (TREE_CODE (index) == RANGE_EXPR)
972 tree min_index = TREE_OPERAND (index, 0);
973 tree max_index = TREE_OPERAND (index, 1);
974 if (min_index == max_index)
976 if (!apply_ctor_pair_to_child_region (parent_reg, mgr,
977 min_index, val))
978 return false;
980 else
982 if (!apply_ctor_val_to_range (parent_reg, mgr,
983 min_index, max_index, val))
984 return false;
986 continue;
988 if (!apply_ctor_pair_to_child_region (parent_reg, mgr, index, val))
989 return false;
991 return true;
994 /* Bind the value VAL into the range of elements within PARENT_REF
995 from MIN_INDEX to MAX_INDEX (including endpoints).
996 For use in handling RANGE_EXPR within a CONSTRUCTOR.
997 Return true if successful, false if there was a problem (e.g. due
998 to hitting a complexity limit). */
1000 bool
1001 binding_map::apply_ctor_val_to_range (const region *parent_reg,
1002 region_model_manager *mgr,
1003 tree min_index, tree max_index,
1004 tree val)
1006 gcc_assert (TREE_CODE (min_index) == INTEGER_CST);
1007 gcc_assert (TREE_CODE (max_index) == INTEGER_CST);
1009 /* Generate a binding key for the range. */
1010 const region *min_element
1011 = get_subregion_within_ctor (parent_reg, min_index, mgr);
1012 const region *max_element
1013 = get_subregion_within_ctor (parent_reg, max_index, mgr);
1014 region_offset min_offset = min_element->get_offset (mgr);
1015 if (min_offset.symbolic_p ())
1016 return false;
1017 bit_offset_t start_bit_offset = min_offset.get_bit_offset ();
1018 store_manager *smgr = mgr->get_store_manager ();
1019 if (max_element->empty_p ())
1020 return false;
1021 const binding_key *max_element_key = binding_key::make (smgr, max_element);
1022 if (max_element_key->symbolic_p ())
1023 return false;
1024 const concrete_binding *max_element_ckey
1025 = max_element_key->dyn_cast_concrete_binding ();
1026 bit_size_t range_size_in_bits
1027 = max_element_ckey->get_next_bit_offset () - start_bit_offset;
1028 const concrete_binding *range_key
1029 = smgr->get_concrete_binding (start_bit_offset, range_size_in_bits);
1030 if (range_key->symbolic_p ())
1031 return false;
1033 /* Get the value. */
1034 if (TREE_CODE (val) == CONSTRUCTOR)
1035 return false;
1036 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
1038 /* Bind the value to the range. */
1039 put (range_key, sval);
1040 return true;
1043 /* Bind the value VAL into INDEX within PARENT_REF.
1044 For use in handling a pair of entries within a CONSTRUCTOR.
1045 Return true if successful, false if there was a problem (e.g. due
1046 to hitting a complexity limit). */
1048 bool
1049 binding_map::apply_ctor_pair_to_child_region (const region *parent_reg,
1050 region_model_manager *mgr,
1051 tree index, tree val)
1053 const region *child_reg
1054 = get_subregion_within_ctor (parent_reg, index, mgr);
1055 if (TREE_CODE (val) == CONSTRUCTOR)
1056 return apply_ctor_to_region (child_reg, val, mgr);
1057 else
1059 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
1060 if (child_reg->empty_p ())
1061 return false;
1062 const binding_key *k
1063 = binding_key::make (mgr->get_store_manager (), child_reg);
1064 /* Handle the case where we have an unknown size for child_reg
1065 (e.g. due to it being a trailing field with incomplete array
1066 type. */
1067 if (!k->concrete_p ())
1069 /* Assume that sval has a well-defined size for this case. */
1070 tree sval_type = sval->get_type ();
1071 gcc_assert (sval_type);
1072 HOST_WIDE_INT sval_byte_size = int_size_in_bytes (sval_type);
1073 gcc_assert (sval_byte_size != -1);
1074 bit_size_t sval_bit_size = sval_byte_size * BITS_PER_UNIT;
1075 /* Get offset of child relative to base region. */
1076 region_offset child_base_offset = child_reg->get_offset (mgr);
1077 if (child_base_offset.symbolic_p ())
1078 return false;
1079 /* Convert to an offset relative to the parent region. */
1080 region_offset parent_base_offset = parent_reg->get_offset (mgr);
1081 gcc_assert (!parent_base_offset.symbolic_p ());
1082 bit_offset_t child_parent_offset
1083 = (child_base_offset.get_bit_offset ()
1084 - parent_base_offset.get_bit_offset ());
1085 /* Create a concrete key for the child within the parent. */
1086 k = mgr->get_store_manager ()->get_concrete_binding
1087 (child_parent_offset, sval_bit_size);
1089 gcc_assert (k->concrete_p ());
1090 put (k, sval);
1091 return true;
1095 /* Populate OUT with all bindings within this map that overlap KEY. */
1097 void
1098 binding_map::get_overlapping_bindings (const binding_key *key,
1099 auto_vec<const binding_key *> *out)
1101 for (auto iter : *this)
1103 const binding_key *iter_key = iter.first;
1104 if (const concrete_binding *ckey
1105 = key->dyn_cast_concrete_binding ())
1107 if (const concrete_binding *iter_ckey
1108 = iter_key->dyn_cast_concrete_binding ())
1110 if (ckey->overlaps_p (*iter_ckey))
1111 out->safe_push (iter_key);
1113 else
1115 /* Assume overlap. */
1116 out->safe_push (iter_key);
1119 else
1121 /* Assume overlap. */
1122 out->safe_push (iter_key);
1127 /* Remove, truncate, and/or split any bindings within this map that
1128 overlap DROP_KEY.
1130 For example, if we have:
1132 +------------------------------------+
1133 | old binding |
1134 +------------------------------------+
1136 which is to be overwritten with:
1138 .......+----------------------+.......
1139 .......| new binding |.......
1140 .......+----------------------+.......
1142 this function "cuts a hole" out of the old binding:
1144 +------+......................+------+
1145 |prefix| hole for new binding |suffix|
1146 +------+......................+------+
1148 into which the new binding can be added without
1149 overlapping the prefix or suffix.
1151 The prefix and suffix (if added) will be bound to the pertinent
1152 parts of the value of the old binding.
1154 For example, given:
1155 struct s5
1157 char arr[8];
1159 void test_5 (struct s5 *p)
1161 struct s5 f = *p;
1162 f.arr[3] = 42;
1164 then after the "f = *p;" we have:
1165 cluster for: f: INIT_VAL((*INIT_VAL(p_33(D))))
1166 and at the "f.arr[3] = 42;" we remove the bindings overlapping
1167 "f.arr[3]", replacing it with a prefix (bytes 0-2) and suffix (bytes 4-7)
1168 giving:
1169 cluster for: f
1170 key: {bytes 0-2}
1171 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1172 key: {bytes 4-7}
1173 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1174 punching a hole into which the new value can be written at byte 3:
1175 cluster for: f
1176 key: {bytes 0-2}
1177 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1178 key: {byte 3}
1179 value: 'char' {(char)42}
1180 key: {bytes 4-7}
1181 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1183 If UNCERTAINTY is non-NULL, use it to record any svalues that
1184 were removed, as being maybe-bound.
1186 If MAYBE_LIVE_VALUES is non-NULL, then use it to record any svalues that
1187 were removed as being maybe-live.
1189 If ALWAYS_OVERLAP, then assume that DROP_KEY can overlap anything
1190 in the map, due to one or both of the underlying clusters being
1191 symbolic (but not the same symbolic region). Hence even if DROP_KEY is a
1192 concrete binding it could actually be referring to the same memory as
1193 distinct concrete bindings in the map. Remove all bindings, but
1194 register any svalues with *UNCERTAINTY. */
1196 void
1197 binding_map::remove_overlapping_bindings (store_manager *mgr,
1198 const binding_key *drop_key,
1199 uncertainty_t *uncertainty,
1200 svalue_set *maybe_live_values,
1201 bool always_overlap)
1203 /* Get the bindings of interest within this map. */
1204 auto_vec<const binding_key *> bindings;
1205 if (always_overlap)
1206 for (auto iter : *this)
1207 bindings.safe_push (iter.first); /* Add all bindings. */
1208 else
1209 /* Just add overlapping bindings. */
1210 get_overlapping_bindings (drop_key, &bindings);
1212 unsigned i;
1213 const binding_key *iter_binding;
1214 FOR_EACH_VEC_ELT (bindings, i, iter_binding)
1216 /* Record any svalues that were removed to *UNCERTAINTY as being
1217 maybe-bound, provided at least some part of the binding is symbolic.
1219 Specifically, if at least one of the bindings is symbolic, or we
1220 have ALWAYS_OVERLAP for the case where we have possibly aliasing
1221 regions, then we don't know that the svalue has been overwritten,
1222 and should record that to *UNCERTAINTY.
1224 However, if we have concrete keys accessing within the same symbolic
1225 region, then we *know* that the symbolic region has been overwritten,
1226 so we don't record it to *UNCERTAINTY, as this could be a genuine
1227 leak. */
1228 const svalue *old_sval = get (iter_binding);
1229 if (uncertainty
1230 && (drop_key->symbolic_p ()
1231 || iter_binding->symbolic_p ()
1232 || always_overlap))
1233 uncertainty->on_maybe_bound_sval (old_sval);
1235 /* Record any svalues that were removed to *MAYBE_LIVE_VALUES as being
1236 maybe-live. */
1237 if (maybe_live_values)
1238 maybe_live_values->add (old_sval);
1240 /* Begin by removing the old binding. */
1241 m_map.remove (iter_binding);
1243 /* Don't attempt to handle prefixes/suffixes for the
1244 "always_overlap" case; everything's being removed. */
1245 if (always_overlap)
1246 continue;
1248 /* Now potentially add the prefix and suffix. */
1249 if (const concrete_binding *drop_ckey
1250 = drop_key->dyn_cast_concrete_binding ())
1251 if (const concrete_binding *iter_ckey
1252 = iter_binding->dyn_cast_concrete_binding ())
1254 gcc_assert (drop_ckey->overlaps_p (*iter_ckey));
1256 const bit_range &drop_bits = drop_ckey->get_bit_range ();
1257 const bit_range &iter_bits = iter_ckey->get_bit_range ();
1259 if (iter_bits.get_start_bit_offset ()
1260 < drop_bits.get_start_bit_offset ())
1262 /* We have a truncated prefix. */
1263 bit_range prefix_bits (iter_bits.get_start_bit_offset (),
1264 (drop_bits.get_start_bit_offset ()
1265 - iter_bits.get_start_bit_offset ()));
1266 const concrete_binding *prefix_key
1267 = mgr->get_concrete_binding (prefix_bits);
1268 bit_range rel_prefix (0, prefix_bits.m_size_in_bits);
1269 const svalue *prefix_sval
1270 = old_sval->extract_bit_range (NULL_TREE,
1271 rel_prefix,
1272 mgr->get_svalue_manager ());
1273 m_map.put (prefix_key, prefix_sval);
1276 if (iter_bits.get_next_bit_offset ()
1277 > drop_bits.get_next_bit_offset ())
1279 /* We have a truncated suffix. */
1280 bit_range suffix_bits (drop_bits.get_next_bit_offset (),
1281 (iter_bits.get_next_bit_offset ()
1282 - drop_bits.get_next_bit_offset ()));
1283 const concrete_binding *suffix_key
1284 = mgr->get_concrete_binding (suffix_bits);
1285 bit_range rel_suffix (drop_bits.get_next_bit_offset ()
1286 - iter_bits.get_start_bit_offset (),
1287 suffix_bits.m_size_in_bits);
1288 const svalue *suffix_sval
1289 = old_sval->extract_bit_range (NULL_TREE,
1290 rel_suffix,
1291 mgr->get_svalue_manager ());
1292 m_map.put (suffix_key, suffix_sval);
1298 /* class binding_cluster. */
1300 binding_cluster::binding_cluster (const region *base_region)
1301 : m_base_region (base_region), m_map (),
1302 m_escaped (false), m_touched (false)
1306 /* binding_cluster's copy ctor. */
1308 binding_cluster::binding_cluster (const binding_cluster &other)
1309 : m_base_region (other.m_base_region), m_map (other.m_map),
1310 m_escaped (other.m_escaped), m_touched (other.m_touched)
1314 /* binding_cluster's assignment operator. */
1316 binding_cluster&
1317 binding_cluster::operator= (const binding_cluster &other)
1319 gcc_assert (m_base_region == other.m_base_region);
1320 m_map = other.m_map;
1321 m_escaped = other.m_escaped;
1322 m_touched = other.m_touched;
1323 return *this;
1326 /* binding_cluster's equality operator. */
1328 bool
1329 binding_cluster::operator== (const binding_cluster &other) const
1331 if (m_map != other.m_map)
1332 return false;
1334 if (m_base_region != other.m_base_region)
1335 return false;
1337 if (m_escaped != other.m_escaped)
1338 return false;
1340 if (m_touched != other.m_touched)
1341 return false;
1343 gcc_checking_assert (hash () == other.hash ());
1345 return true;
1348 /* Generate a hash value for this binding_cluster. */
1350 hashval_t
1351 binding_cluster::hash () const
1353 return m_map.hash ();
1356 /* Return true if this binding_cluster is symbolic
1357 i.e. its base region is symbolic. */
1359 bool
1360 binding_cluster::symbolic_p () const
1362 return m_base_region->get_kind () == RK_SYMBOLIC;
1365 /* Dump a representation of this binding_cluster to PP.
1366 SIMPLE controls how values and regions are to be printed.
1367 If MULTILINE, then split the dump over multiple lines and
1368 use whitespace for readability, otherwise put all on one line. */
1370 void
1371 binding_cluster::dump_to_pp (pretty_printer *pp, bool simple,
1372 bool multiline) const
1374 if (m_escaped)
1376 if (multiline)
1378 pp_string (pp, " ESCAPED");
1379 pp_newline (pp);
1381 else
1382 pp_string (pp, "(ESCAPED)");
1384 if (m_touched)
1386 if (multiline)
1388 pp_string (pp, " TOUCHED");
1389 pp_newline (pp);
1391 else
1392 pp_string (pp, "(TOUCHED)");
1395 m_map.dump_to_pp (pp, simple, multiline);
1398 /* Dump a multiline representation of this binding_cluster to stderr. */
1400 DEBUG_FUNCTION void
1401 binding_cluster::dump (bool simple) const
1403 pretty_printer pp;
1404 pp_format_decoder (&pp) = default_tree_printer;
1405 pp_show_color (&pp) = pp_show_color (global_dc->printer);
1406 pp.set_output_stream (stderr);
1407 pp_string (&pp, " cluster for: ");
1408 m_base_region->dump_to_pp (&pp, simple);
1409 pp_string (&pp, ": ");
1410 pp_newline (&pp);
1411 dump_to_pp (&pp, simple, true);
1412 pp_newline (&pp);
1413 pp_flush (&pp);
1416 /* Assert that this object is valid. */
1418 void
1419 binding_cluster::validate () const
1421 int num_symbolic = 0;
1422 int num_concrete = 0;
1423 for (auto iter : m_map)
1425 if (iter.first->symbolic_p ())
1426 num_symbolic++;
1427 else
1428 num_concrete++;
1430 /* We shouldn't have more than one symbolic key per cluster
1431 (or one would have clobbered the other). */
1432 gcc_assert (num_symbolic < 2);
1433 /* We can't have both concrete and symbolic keys. */
1434 gcc_assert (num_concrete == 0 || num_symbolic == 0);
1437 /* Return a new json::object of the form
1438 {"escaped": true/false,
1439 "touched": true/false,
1440 "map" : object for the binding_map. */
1442 json::object *
1443 binding_cluster::to_json () const
1445 json::object *cluster_obj = new json::object ();
1447 cluster_obj->set_bool ("escaped", m_escaped);
1448 cluster_obj->set_bool ("touched", m_touched);
1449 cluster_obj->set ("map", m_map.to_json ());
1451 return cluster_obj;
1454 std::unique_ptr<text_art::tree_widget>
1455 binding_cluster::make_dump_widget (const text_art::dump_widget_info &dwi,
1456 store_manager *mgr) const
1458 pretty_printer the_pp;
1459 pretty_printer * const pp = &the_pp;
1460 pp_format_decoder (pp) = default_tree_printer;
1461 pp_show_color (pp) = true;
1462 const bool simple = true;
1464 m_base_region->dump_to_pp (pp, simple);
1465 pp_string (pp, ": ");
1467 if (const svalue *sval = maybe_get_simple_value (mgr))
1469 /* Special-case to simplify dumps for the common case where
1470 we just have one value directly bound to the whole of a
1471 region. */
1472 sval->dump_to_pp (pp, simple);
1473 if (escaped_p ())
1474 pp_string (pp, " (ESCAPED)");
1475 if (touched_p ())
1476 pp_string (pp, " (TOUCHED)");
1478 return text_art::tree_widget::make (dwi, pp);
1480 else
1482 if (escaped_p ())
1483 pp_string (pp, " (ESCAPED)");
1484 if (touched_p ())
1485 pp_string (pp, " (TOUCHED)");
1487 std::unique_ptr<text_art::tree_widget> cluster_widget
1488 (text_art::tree_widget::make (dwi, pp));
1490 m_map.add_to_tree_widget (*cluster_widget, dwi);
1492 return cluster_widget;
1496 /* Add a binding of SVAL of kind KIND to REG, unpacking SVAL if it is a
1497 compound_sval. */
1499 void
1500 binding_cluster::bind (store_manager *mgr,
1501 const region *reg, const svalue *sval)
1503 if (const compound_svalue *compound_sval
1504 = sval->dyn_cast_compound_svalue ())
1506 bind_compound_sval (mgr, reg, compound_sval);
1507 return;
1510 if (reg->empty_p ())
1511 return;
1512 const binding_key *binding = binding_key::make (mgr, reg);
1513 bind_key (binding, sval);
1516 /* Bind SVAL to KEY.
1517 Unpacking of compound_svalues should already have been done by the
1518 time this is called. */
1520 void
1521 binding_cluster::bind_key (const binding_key *key, const svalue *sval)
1523 gcc_assert (sval->get_kind () != SK_COMPOUND);
1525 m_map.put (key, sval);
1526 if (key->symbolic_p ())
1527 m_touched = true;
1530 /* Subroutine of binding_cluster::bind.
1531 Unpack compound_svals when binding them, so that we bind them
1532 element-wise. */
1534 void
1535 binding_cluster::bind_compound_sval (store_manager *mgr,
1536 const region *reg,
1537 const compound_svalue *compound_sval)
1539 region_offset reg_offset
1540 = reg->get_offset (mgr->get_svalue_manager ());
1541 if (reg_offset.symbolic_p ())
1543 m_touched = true;
1544 clobber_region (mgr, reg);
1545 return;
1548 for (map_t::iterator iter = compound_sval->begin ();
1549 iter != compound_sval->end (); ++iter)
1551 const binding_key *iter_key = (*iter).first;
1552 const svalue *iter_sval = (*iter).second;
1554 if (const concrete_binding *concrete_key
1555 = iter_key->dyn_cast_concrete_binding ())
1557 bit_offset_t effective_start
1558 = (concrete_key->get_start_bit_offset ()
1559 + reg_offset.get_bit_offset ());
1560 const concrete_binding *effective_concrete_key
1561 = mgr->get_concrete_binding (effective_start,
1562 concrete_key->get_size_in_bits ());
1563 bind_key (effective_concrete_key, iter_sval);
1565 else
1566 gcc_unreachable ();
1570 /* Remove all bindings overlapping REG within this cluster. */
1572 void
1573 binding_cluster::clobber_region (store_manager *mgr, const region *reg)
1575 remove_overlapping_bindings (mgr, reg, NULL, NULL);
1578 /* Remove any bindings for REG within this cluster. */
1580 void
1581 binding_cluster::purge_region (store_manager *mgr, const region *reg)
1583 gcc_assert (reg->get_kind () == RK_DECL);
1584 if (reg->empty_p ())
1585 return;
1586 const binding_key *binding
1587 = binding_key::make (mgr, const_cast<region *> (reg));
1588 m_map.remove (binding);
1591 /* Clobber REG and fill it with repeated copies of SVAL. */
1593 void
1594 binding_cluster::fill_region (store_manager *mgr,
1595 const region *reg,
1596 const svalue *sval)
1598 clobber_region (mgr, reg);
1600 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1601 const svalue *byte_size_sval = reg->get_byte_size_sval (sval_mgr);
1602 const svalue *fill_sval
1603 = sval_mgr->get_or_create_repeated_svalue (reg->get_type (),
1604 byte_size_sval, sval);
1605 bind (mgr, reg, fill_sval);
1608 /* Clobber REG within this cluster and fill it with zeroes. */
1610 void
1611 binding_cluster::zero_fill_region (store_manager *mgr, const region *reg)
1613 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1614 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
1615 fill_region (mgr, reg, zero_sval);
1618 /* Mark REG_TO_BIND within this cluster as being unknown.
1620 Remove any bindings overlapping REG_FOR_OVERLAP.
1621 If UNCERTAINTY is non-NULL, use it to record any svalues that
1622 had bindings to them removed, as being maybe-bound.
1623 If MAYBE_LIVE_VALUES is non-NULL, use it to record any svalues that
1624 had bindings to them removed, as being maybe-live.
1626 REG_TO_BIND and REG_FOR_OVERLAP are the same for
1627 store::mark_region_as_unknown, but are different in
1628 store::set_value's alias handling, for handling the case where
1629 we have a write to a symbolic REG_FOR_OVERLAP. */
1631 void
1632 binding_cluster::mark_region_as_unknown (store_manager *mgr,
1633 const region *reg_to_bind,
1634 const region *reg_for_overlap,
1635 uncertainty_t *uncertainty,
1636 svalue_set *maybe_live_values)
1638 if (reg_to_bind->empty_p ())
1639 return;
1641 remove_overlapping_bindings (mgr, reg_for_overlap, uncertainty,
1642 maybe_live_values);
1644 /* Add a default binding to "unknown". */
1645 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1646 const svalue *sval
1647 = sval_mgr->get_or_create_unknown_svalue (reg_to_bind->get_type ());
1648 bind (mgr, reg_to_bind, sval);
1651 /* Purge state involving SVAL. */
1653 void
1654 binding_cluster::purge_state_involving (const svalue *sval,
1655 region_model_manager *sval_mgr)
1657 auto_vec<const binding_key *> to_remove;
1658 auto_vec<std::pair<const binding_key *, tree> > to_make_unknown;
1659 for (auto iter : m_map)
1661 const binding_key *iter_key = iter.first;
1662 if (const symbolic_binding *symbolic_key
1663 = iter_key->dyn_cast_symbolic_binding ())
1665 const region *reg = symbolic_key->get_region ();
1666 if (reg->involves_p (sval))
1667 to_remove.safe_push (iter_key);
1669 const svalue *iter_sval = iter.second;
1670 if (iter_sval->involves_p (sval))
1671 to_make_unknown.safe_push (std::make_pair(iter_key,
1672 iter_sval->get_type ()));
1674 for (auto iter : to_remove)
1676 m_map.remove (iter);
1677 m_touched = true;
1679 for (auto iter : to_make_unknown)
1681 const svalue *new_sval
1682 = sval_mgr->get_or_create_unknown_svalue (iter.second);
1683 m_map.put (iter.first, new_sval);
1687 /* Get any SVAL bound to REG within this cluster via kind KIND,
1688 without checking parent regions of REG. */
1690 const svalue *
1691 binding_cluster::get_binding (store_manager *mgr,
1692 const region *reg) const
1694 if (reg->empty_p ())
1695 return NULL;
1696 const binding_key *reg_binding = binding_key::make (mgr, reg);
1697 const svalue *sval = m_map.get (reg_binding);
1698 if (sval)
1700 /* If we have a struct with a single field, then the binding of
1701 the field will equal that of the struct, and looking up e.g.
1702 PARENT_REG.field within:
1703 cluster for PARENT_REG: INIT_VAL(OTHER_REG)
1704 will erroneously return INIT_VAL(OTHER_REG), rather than
1705 SUB_VALUE(INIT_VAL(OTHER_REG), FIELD) == INIT_VAL(OTHER_REG.FIELD).
1706 Fix this issue by iterating upwards whilst the bindings are equal,
1707 expressing the lookups as subvalues.
1708 We have to gather a list of subregion accesses, then walk it
1709 in reverse to get the subvalues. */
1710 auto_vec<const region *> regions;
1711 while (const region *parent_reg = reg->get_parent_region ())
1713 const binding_key *parent_reg_binding
1714 = binding_key::make (mgr, parent_reg);
1715 if (parent_reg_binding == reg_binding
1716 && sval->get_type ()
1717 && reg->get_type ()
1718 && sval->get_type () != reg->get_type ())
1720 regions.safe_push (reg);
1721 reg = parent_reg;
1723 else
1724 break;
1726 if (sval->get_type ()
1727 && reg->get_type ()
1728 && sval->get_type () == reg->get_type ())
1730 unsigned i;
1731 const region *iter_reg;
1732 FOR_EACH_VEC_ELT_REVERSE (regions, i, iter_reg)
1734 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1735 sval = rmm_mgr->get_or_create_sub_svalue (iter_reg->get_type (),
1736 sval, iter_reg);
1740 return sval;
1743 /* Get any SVAL bound to REG within this cluster,
1744 either directly for REG, or recursively checking for bindings within
1745 parent regions and extracting subvalues if need be. */
1747 const svalue *
1748 binding_cluster::get_binding_recursive (store_manager *mgr,
1749 const region *reg) const
1751 if (const svalue *sval = get_binding (mgr, reg))
1752 return sval;
1753 if (reg != m_base_region)
1754 if (const region *parent_reg = reg->get_parent_region ())
1755 if (const svalue *parent_sval
1756 = get_binding_recursive (mgr, parent_reg))
1758 /* Extract child svalue from parent svalue. */
1759 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1760 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1761 parent_sval, reg);
1763 return NULL;
1766 /* Get any value bound for REG within this cluster. */
1768 const svalue *
1769 binding_cluster::get_any_binding (store_manager *mgr,
1770 const region *reg) const
1772 /* Look for a direct binding. */
1773 if (const svalue *direct_sval
1774 = get_binding_recursive (mgr, reg))
1775 return direct_sval;
1777 /* If we had a write to a cluster of unknown size, we might
1778 have a self-binding of the whole base region with an svalue,
1779 where the base region is symbolic.
1780 Handle such cases by returning sub_svalue instances. */
1781 if (const svalue *cluster_sval = maybe_get_simple_value (mgr))
1783 /* Extract child svalue from parent svalue. */
1784 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1785 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1786 cluster_sval, reg);
1789 /* If this cluster has been touched by a symbolic write, then the content
1790 of any subregion not currently specifically bound is "UNKNOWN". */
1791 if (m_touched)
1793 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1794 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1797 /* Alternatively, if this is a symbolic read and the cluster has any bindings,
1798 then we don't know if we're reading those values or not, so the result
1799 is also "UNKNOWN". */
1800 if (reg->get_offset (mgr->get_svalue_manager ()).symbolic_p ()
1801 && m_map.elements () > 0)
1803 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1804 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1807 if (const svalue *compound_sval = maybe_get_compound_binding (mgr, reg))
1808 return compound_sval;
1810 /* Otherwise, the initial value, or uninitialized. */
1811 return NULL;
1814 /* Attempt to get a compound_svalue for the bindings within the cluster
1815 affecting REG (which could be the base region itself).
1817 Create a compound_svalue with the subset of bindings the affect REG,
1818 offsetting them so that the offsets are relative to the start of REG
1819 within the cluster.
1821 For example, REG could be one element within an array of structs.
1823 Return the resulting compound_svalue, or NULL if there's a problem. */
1825 const svalue *
1826 binding_cluster::maybe_get_compound_binding (store_manager *mgr,
1827 const region *reg) const
1829 region_offset cluster_offset
1830 = m_base_region->get_offset (mgr->get_svalue_manager ());
1831 if (cluster_offset.symbolic_p ())
1832 return NULL;
1833 region_offset reg_offset = reg->get_offset (mgr->get_svalue_manager ());
1834 if (reg_offset.symbolic_p ())
1835 return NULL;
1837 if (reg->empty_p ())
1838 return NULL;
1840 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1842 /* We will a build the result map in two parts:
1843 (a) result_map, holding the concrete keys from this cluster,
1845 (b) default_map, holding the initial values for the region
1846 (e.g. uninitialized, initializer values, or zero), unless this
1847 cluster has been touched.
1849 We will populate (a), and as we do, clobber (b), trimming and
1850 splitting its bindings as necessary.
1851 Finally, we will merge (b) into (a), giving a concrete map
1852 that merges both the initial values and the bound values from
1853 the binding_cluster.
1854 Doing it this way reduces N for the O(N^2) intersection-finding,
1855 perhaps we should have a spatial-organized data structure for
1856 concrete keys, though. */
1858 binding_map result_map;
1859 binding_map default_map;
1861 /* Set up default values in default_map. */
1862 const svalue *default_sval;
1863 if (m_touched)
1864 default_sval = sval_mgr->get_or_create_unknown_svalue (reg->get_type ());
1865 else
1866 default_sval = sval_mgr->get_or_create_initial_value (reg);
1867 const binding_key *default_key = binding_key::make (mgr, reg);
1869 /* Express the bit-range of the default key for REG relative to REG,
1870 rather than to the base region. */
1871 const concrete_binding *concrete_default_key
1872 = default_key->dyn_cast_concrete_binding ();
1873 if (!concrete_default_key)
1874 return nullptr;
1875 const concrete_binding *default_key_relative_to_reg
1876 = mgr->get_concrete_binding (0, concrete_default_key->get_size_in_bits ());
1877 default_map.put (default_key_relative_to_reg, default_sval);
1879 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
1881 const binding_key *key = (*iter).first;
1882 const svalue *sval = (*iter).second;
1884 if (const concrete_binding *concrete_key
1885 = key->dyn_cast_concrete_binding ())
1887 const bit_range &bound_range = concrete_key->get_bit_range ();
1889 bit_size_t reg_bit_size;
1890 if (!reg->get_bit_size (&reg_bit_size))
1891 return NULL;
1893 bit_range reg_range (reg_offset.get_bit_offset (),
1894 reg_bit_size);
1896 /* Skip bindings that are outside the bit range of REG. */
1897 if (!bound_range.intersects_p (reg_range))
1898 continue;
1900 /* We shouldn't have an exact match; that should have been
1901 handled already. */
1902 gcc_assert (!(reg_range == bound_range));
1904 bit_range subrange (0, 0);
1905 if (reg_range.contains_p (bound_range, &subrange))
1907 /* We have a bound range fully within REG.
1908 Add it to map, offsetting accordingly. */
1910 /* Get offset of KEY relative to REG, rather than to
1911 the cluster. */
1912 const concrete_binding *offset_concrete_key
1913 = mgr->get_concrete_binding (subrange);
1914 result_map.put (offset_concrete_key, sval);
1916 /* Clobber default_map, removing/trimming/spliting where
1917 it overlaps with offset_concrete_key. */
1918 default_map.remove_overlapping_bindings (mgr,
1919 offset_concrete_key,
1920 NULL, NULL, false);
1922 else if (bound_range.contains_p (reg_range, &subrange))
1924 /* REG is fully within the bound range, but
1925 is not equal to it; we're extracting a subvalue. */
1926 return sval->extract_bit_range (reg->get_type (),
1927 subrange,
1928 mgr->get_svalue_manager ());
1930 else
1932 /* REG and the bound range partially overlap. */
1933 bit_range reg_subrange (0, 0);
1934 bit_range bound_subrange (0, 0);
1935 reg_range.intersects_p (bound_range,
1936 &reg_subrange, &bound_subrange);
1938 /* Get the bits from the bound value for the bits at the
1939 intersection (relative to the bound value). */
1940 const svalue *overlap_sval
1941 = sval->extract_bit_range (NULL_TREE,
1942 bound_subrange,
1943 mgr->get_svalue_manager ());
1945 /* Get key for overlap, relative to the REG. */
1946 const concrete_binding *overlap_concrete_key
1947 = mgr->get_concrete_binding (reg_subrange);
1948 result_map.put (overlap_concrete_key, overlap_sval);
1950 /* Clobber default_map, removing/trimming/spliting where
1951 it overlaps with overlap_concrete_key. */
1952 default_map.remove_overlapping_bindings (mgr,
1953 overlap_concrete_key,
1954 NULL, NULL, false);
1957 else
1958 /* Can't handle symbolic bindings. */
1959 return NULL;
1962 if (result_map.elements () == 0)
1963 return NULL;
1965 /* Merge any bindings from default_map into result_map. */
1966 for (auto iter : default_map)
1968 const binding_key *key = iter.first;
1969 const svalue *sval = iter.second;
1970 result_map.put (key, sval);
1973 return sval_mgr->get_or_create_compound_svalue (reg->get_type (), result_map);
1976 /* Remove, truncate, and/or split any bindings within this map that
1977 could overlap REG.
1979 If REG's base region or this cluster is symbolic and they're different
1980 base regions, then remove everything in this cluster's map, on the
1981 grounds that REG could be referring to the same memory as anything
1982 in the map.
1984 If UNCERTAINTY is non-NULL, use it to record any svalues that
1985 were removed, as being maybe-bound.
1987 If MAYBE_LIVE_VALUES is non-NULL, use it to record any svalues that
1988 were removed, as being maybe-live. */
1990 void
1991 binding_cluster::remove_overlapping_bindings (store_manager *mgr,
1992 const region *reg,
1993 uncertainty_t *uncertainty,
1994 svalue_set *maybe_live_values)
1996 if (reg->empty_p ())
1997 return;
1998 const binding_key *reg_binding = binding_key::make (mgr, reg);
2000 const region *cluster_base_reg = get_base_region ();
2001 const region *other_base_reg = reg->get_base_region ();
2002 /* If at least one of the base regions involved is symbolic, and they're
2003 not the same base region, then consider everything in the map as
2004 potentially overlapping with reg_binding (even if it's a concrete
2005 binding and things in the map are concrete - they could be referring
2006 to the same memory when the symbolic base regions are taken into
2007 account). */
2008 bool always_overlap = (cluster_base_reg != other_base_reg
2009 && (cluster_base_reg->get_kind () == RK_SYMBOLIC
2010 || other_base_reg->get_kind () == RK_SYMBOLIC));
2011 m_map.remove_overlapping_bindings (mgr, reg_binding, uncertainty,
2012 maybe_live_values,
2013 always_overlap);
2016 /* Attempt to merge CLUSTER_A and CLUSTER_B into OUT_CLUSTER, using
2017 MGR and MERGER.
2018 Return true if they can be merged, false otherwise. */
2020 bool
2021 binding_cluster::can_merge_p (const binding_cluster *cluster_a,
2022 const binding_cluster *cluster_b,
2023 binding_cluster *out_cluster,
2024 store *out_store,
2025 store_manager *mgr,
2026 model_merger *merger)
2028 gcc_assert (out_cluster);
2030 /* Merge flags ("ESCAPED" and "TOUCHED") by setting the merged flag to
2031 true if either of the inputs is true. */
2032 if ((cluster_a && cluster_a->m_escaped)
2033 || (cluster_b && cluster_b->m_escaped))
2034 out_cluster->m_escaped = true;
2035 if ((cluster_a && cluster_a->m_touched)
2036 || (cluster_b && cluster_b->m_touched))
2037 out_cluster->m_touched = true;
2039 /* At least one of CLUSTER_A and CLUSTER_B are non-NULL, but either
2040 could be NULL. Handle these cases. */
2041 if (cluster_a == NULL)
2043 gcc_assert (cluster_b != NULL);
2044 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
2045 out_cluster->make_unknown_relative_to (cluster_b, out_store, mgr);
2046 return true;
2048 if (cluster_b == NULL)
2050 gcc_assert (cluster_a != NULL);
2051 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
2052 out_cluster->make_unknown_relative_to (cluster_a, out_store, mgr);
2053 return true;
2056 /* The "both inputs are non-NULL" case. */
2057 gcc_assert (cluster_a != NULL && cluster_b != NULL);
2058 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
2059 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
2061 hash_set<const binding_key *> keys;
2062 for (map_t::iterator iter_a = cluster_a->m_map.begin ();
2063 iter_a != cluster_a->m_map.end (); ++iter_a)
2065 const binding_key *key_a = (*iter_a).first;
2066 keys.add (key_a);
2068 for (map_t::iterator iter_b = cluster_b->m_map.begin ();
2069 iter_b != cluster_b->m_map.end (); ++iter_b)
2071 const binding_key *key_b = (*iter_b).first;
2072 keys.add (key_b);
2074 int num_symbolic_keys = 0;
2075 int num_concrete_keys = 0;
2076 for (hash_set<const binding_key *>::iterator iter = keys.begin ();
2077 iter != keys.end (); ++iter)
2079 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
2080 const binding_key *key = *iter;
2081 const svalue *sval_a = cluster_a->get_any_value (key);
2082 const svalue *sval_b = cluster_b->get_any_value (key);
2084 if (key->symbolic_p ())
2085 num_symbolic_keys++;
2086 else
2087 num_concrete_keys++;
2089 if (sval_a == sval_b)
2091 gcc_assert (sval_a);
2092 out_cluster->m_map.put (key, sval_a);
2093 continue;
2095 else if (sval_a && sval_b)
2097 if (const svalue *merged_sval
2098 = sval_a->can_merge_p (sval_b, sval_mgr, merger))
2100 out_cluster->m_map.put (key, merged_sval);
2101 continue;
2103 /* Merger of the svalues failed. Reject merger of the cluster. */
2104 return false;
2107 /* If we get here, then one cluster binds this key and the other
2108 doesn't; merge them as "UNKNOWN". */
2109 gcc_assert (sval_a || sval_b);
2111 const svalue *bound_sval = sval_a ? sval_a : sval_b;
2112 tree type = bound_sval->get_type ();
2113 const svalue *unknown_sval
2114 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue (type);
2116 /* ...but reject the merger if this sval shouldn't be mergeable
2117 (e.g. reject merging svalues that have non-purgable sm-state,
2118 to avoid falsely reporting memory leaks by merging them
2119 with something else). */
2120 if (!bound_sval->can_merge_p (unknown_sval, sval_mgr, merger))
2121 return false;
2123 out_cluster->m_map.put (key, unknown_sval);
2126 /* We can only have at most one symbolic key per cluster,
2127 and if we do, we can't have any concrete keys.
2128 If this happens, mark the cluster as touched, with no keys. */
2129 if (num_symbolic_keys >= 2
2130 || (num_concrete_keys > 0 && num_symbolic_keys > 0))
2132 out_cluster->m_touched = true;
2133 out_cluster->m_map.empty ();
2136 /* We don't handle other kinds of overlaps yet. */
2138 return true;
2141 /* Update this cluster to reflect an attempt to merge OTHER where there
2142 is no other cluster to merge with, and so we're notionally merging the
2143 bound values in OTHER with the initial value of the relevant regions.
2145 Any bound keys in OTHER should be bound to unknown in this. */
2147 void
2148 binding_cluster::make_unknown_relative_to (const binding_cluster *other,
2149 store *out_store,
2150 store_manager *mgr)
2152 for (map_t::iterator iter = other->m_map.begin ();
2153 iter != other->m_map.end (); ++iter)
2155 const binding_key *iter_key = (*iter).first;
2156 const svalue *iter_sval = (*iter).second;
2157 const svalue *unknown_sval
2158 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue
2159 (iter_sval->get_type ());
2160 m_map.put (iter_key, unknown_sval);
2162 /* For any pointers in OTHER, the merger means that the
2163 concrete pointer becomes an unknown value, which could
2164 show up as a false report of a leak when considering what
2165 pointers are live before vs after.
2166 Avoid this by marking the base regions they point to as having
2167 escaped. */
2168 if (const region_svalue *region_sval
2169 = iter_sval->dyn_cast_region_svalue ())
2171 const region *base_reg
2172 = region_sval->get_pointee ()->get_base_region ();
2173 if (base_reg->tracked_p ()
2174 && !base_reg->symbolic_for_unknown_ptr_p ())
2176 binding_cluster *c = out_store->get_or_create_cluster (base_reg);
2177 c->mark_as_escaped ();
2183 /* Mark this cluster as having escaped. */
2185 void
2186 binding_cluster::mark_as_escaped ()
2188 m_escaped = true;
2191 /* If this cluster has escaped (by this call, or by an earlier one, or
2192 by being an external param), then unbind all values and mark it
2193 as "touched", so that it has a conjured value, rather than an
2194 initial_svalue.
2195 Use P to purge state involving conjured_svalues. */
2197 void
2198 binding_cluster::on_unknown_fncall (const gcall *call,
2199 store_manager *mgr,
2200 const conjured_purge &p)
2202 if (m_escaped)
2204 m_map.empty ();
2206 if (!m_base_region->empty_p ())
2208 /* Bind it to a new "conjured" value using CALL. */
2209 const svalue *sval
2210 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
2211 (m_base_region->get_type (), call, m_base_region, p);
2212 bind (mgr, m_base_region, sval);
2215 m_touched = true;
2219 /* Mark this cluster as having been clobbered by STMT.
2220 Use P to purge state involving conjured_svalues. */
2222 void
2223 binding_cluster::on_asm (const gasm *stmt,
2224 store_manager *mgr,
2225 const conjured_purge &p)
2227 m_map.empty ();
2229 /* Bind it to a new "conjured" value using CALL. */
2230 const svalue *sval
2231 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
2232 (m_base_region->get_type (), stmt, m_base_region, p);
2233 bind (mgr, m_base_region, sval);
2235 m_touched = true;
2238 /* Return true if this cluster has escaped. */
2240 bool
2241 binding_cluster::escaped_p () const
2243 /* Consider the "errno" region to always have escaped. */
2244 if (m_base_region->get_kind () == RK_ERRNO)
2245 return true;
2246 return m_escaped;
2249 /* Return true if this binding_cluster has no information
2250 i.e. if there are no bindings, and it hasn't been marked as having
2251 escaped, or touched symbolically. */
2253 bool
2254 binding_cluster::redundant_p () const
2256 return (m_map.elements () == 0
2257 && !m_escaped
2258 && !m_touched);
2261 /* Add PV to OUT_PVS, casting it to TYPE if it is not already of that type. */
2263 static void
2264 append_pathvar_with_type (path_var pv,
2265 tree type,
2266 auto_vec<path_var> *out_pvs)
2268 gcc_assert (pv.m_tree);
2270 if (TREE_TYPE (pv.m_tree) != type)
2271 pv.m_tree = build1 (NOP_EXPR, type, pv.m_tree);
2273 out_pvs->safe_push (pv);
2276 /* Find representative path_vars for SVAL within this binding of BASE_REG,
2277 appending the results to OUT_PVS. */
2279 void
2280 binding_cluster::get_representative_path_vars (const region_model *model,
2281 svalue_set *visited,
2282 const region *base_reg,
2283 const svalue *sval,
2284 logger *logger,
2285 auto_vec<path_var> *out_pvs)
2286 const
2288 sval = simplify_for_binding (sval);
2290 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
2292 const binding_key *key = (*iter).first;
2293 const svalue *bound_sval = (*iter).second;
2294 if (bound_sval == sval)
2296 if (const concrete_binding *ckey
2297 = key->dyn_cast_concrete_binding ())
2299 auto_vec <const region *> subregions;
2300 base_reg->get_subregions_for_binding
2301 (model->get_manager (),
2302 ckey->get_start_bit_offset (),
2303 ckey->get_size_in_bits (),
2304 sval->get_type (),
2305 &subregions);
2306 unsigned i;
2307 const region *subregion;
2308 FOR_EACH_VEC_ELT (subregions, i, subregion)
2310 if (path_var pv
2311 = model->get_representative_path_var (subregion,
2312 visited,
2313 logger))
2314 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2317 else
2319 const symbolic_binding *skey = (const symbolic_binding *)key;
2320 if (path_var pv
2321 = model->get_representative_path_var (skey->get_region (),
2322 visited,
2323 logger))
2324 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2330 /* Get any svalue bound to KEY, or NULL. */
2332 const svalue *
2333 binding_cluster::get_any_value (const binding_key *key) const
2335 return m_map.get (key);
2338 /* If this cluster has a single direct binding for the whole of the region,
2339 return it.
2340 For use in simplifying dumps. */
2342 const svalue *
2343 binding_cluster::maybe_get_simple_value (store_manager *mgr) const
2345 /* Fail gracefully if MGR is NULL to make it easier to dump store
2346 instances in the debugger. */
2347 if (mgr == NULL)
2348 return NULL;
2350 if (m_map.elements () != 1)
2351 return NULL;
2353 if (m_base_region->empty_p ())
2354 return NULL;
2356 const binding_key *key = binding_key::make (mgr, m_base_region);
2357 return get_any_value (key);
2360 /* class store_manager. */
2362 logger *
2363 store_manager::get_logger () const
2365 return m_mgr->get_logger ();
2368 /* binding consolidation. */
2370 const concrete_binding *
2371 store_manager::get_concrete_binding (bit_offset_t start_bit_offset,
2372 bit_offset_t size_in_bits)
2374 concrete_binding b (start_bit_offset, size_in_bits);
2375 if (concrete_binding *existing = m_concrete_binding_key_mgr.get (b))
2376 return existing;
2378 concrete_binding *to_save = new concrete_binding (b);
2379 m_concrete_binding_key_mgr.put (b, to_save);
2380 return to_save;
2383 const symbolic_binding *
2384 store_manager::get_symbolic_binding (const region *reg)
2386 symbolic_binding b (reg);
2387 if (symbolic_binding *existing = m_symbolic_binding_key_mgr.get (b))
2388 return existing;
2390 symbolic_binding *to_save = new symbolic_binding (b);
2391 m_symbolic_binding_key_mgr.put (b, to_save);
2392 return to_save;
2395 /* class store. */
2397 /* store's default ctor. */
2399 store::store ()
2400 : m_called_unknown_fn (false)
2404 /* store's copy ctor. */
2406 store::store (const store &other)
2407 : m_cluster_map (other.m_cluster_map.elements ()),
2408 m_called_unknown_fn (other.m_called_unknown_fn)
2410 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2411 iter != other.m_cluster_map.end ();
2412 ++iter)
2414 const region *reg = (*iter).first;
2415 gcc_assert (reg);
2416 binding_cluster *c = (*iter).second;
2417 gcc_assert (c);
2418 m_cluster_map.put (reg, new binding_cluster (*c));
2422 /* store's dtor. */
2424 store::~store ()
2426 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2427 iter != m_cluster_map.end ();
2428 ++iter)
2429 delete (*iter).second;
2432 /* store's assignment operator. */
2434 store &
2435 store::operator= (const store &other)
2437 /* Delete existing cluster map. */
2438 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2439 iter != m_cluster_map.end ();
2440 ++iter)
2441 delete (*iter).second;
2442 m_cluster_map.empty ();
2444 m_called_unknown_fn = other.m_called_unknown_fn;
2446 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2447 iter != other.m_cluster_map.end ();
2448 ++iter)
2450 const region *reg = (*iter).first;
2451 gcc_assert (reg);
2452 binding_cluster *c = (*iter).second;
2453 gcc_assert (c);
2454 m_cluster_map.put (reg, new binding_cluster (*c));
2456 return *this;
2459 /* store's equality operator. */
2461 bool
2462 store::operator== (const store &other) const
2464 if (m_called_unknown_fn != other.m_called_unknown_fn)
2465 return false;
2467 if (m_cluster_map.elements () != other.m_cluster_map.elements ())
2468 return false;
2470 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2471 iter != m_cluster_map.end ();
2472 ++iter)
2474 const region *reg = (*iter).first;
2475 binding_cluster *c = (*iter).second;
2476 binding_cluster **other_slot
2477 = const_cast <cluster_map_t &> (other.m_cluster_map).get (reg);
2478 if (other_slot == NULL)
2479 return false;
2480 if (*c != **other_slot)
2481 return false;
2484 gcc_checking_assert (hash () == other.hash ());
2486 return true;
2489 /* Get a hash value for this store. */
2491 hashval_t
2492 store::hash () const
2494 hashval_t result = 0;
2495 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2496 iter != m_cluster_map.end ();
2497 ++iter)
2498 result ^= (*iter).second->hash ();
2499 return result;
2502 /* Populate OUT with a sorted list of parent regions for the regions in IN,
2503 removing duplicate parents. */
2505 static void
2506 get_sorted_parent_regions (auto_vec<const region *> *out,
2507 auto_vec<const region *> &in)
2509 /* Get the set of parent regions. */
2510 hash_set<const region *> parent_regions;
2511 const region *iter_reg;
2512 unsigned i;
2513 FOR_EACH_VEC_ELT (in, i, iter_reg)
2515 const region *parent_reg = iter_reg->get_parent_region ();
2516 gcc_assert (parent_reg);
2517 parent_regions.add (parent_reg);
2520 /* Write to OUT. */
2521 for (hash_set<const region *>::iterator iter = parent_regions.begin();
2522 iter != parent_regions.end(); ++iter)
2523 out->safe_push (*iter);
2525 /* Sort OUT. */
2526 out->qsort (region::cmp_ptr_ptr);
2529 /* Dump a representation of this store to PP, using SIMPLE to control how
2530 svalues and regions are printed.
2531 MGR is used for simplifying dumps if non-NULL, but can also be NULL
2532 (to make it easier to use from the debugger). */
2534 void
2535 store::dump_to_pp (pretty_printer *pp, bool simple, bool multiline,
2536 store_manager *mgr) const
2538 /* Sort into some deterministic order. */
2539 auto_vec<const region *> base_regions;
2540 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2541 iter != m_cluster_map.end (); ++iter)
2543 const region *base_reg = (*iter).first;
2544 base_regions.safe_push (base_reg);
2546 base_regions.qsort (region::cmp_ptr_ptr);
2548 /* Gather clusters, organize by parent region, so that we can group
2549 together locals, globals, etc. */
2550 auto_vec<const region *> parent_regions;
2551 get_sorted_parent_regions (&parent_regions, base_regions);
2553 const region *parent_reg;
2554 unsigned i;
2555 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2557 gcc_assert (parent_reg);
2558 pp_string (pp, "clusters within ");
2559 parent_reg->dump_to_pp (pp, simple);
2560 if (multiline)
2561 pp_newline (pp);
2562 else
2563 pp_string (pp, " {");
2565 const region *base_reg;
2566 unsigned j;
2567 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2569 /* This is O(N * M), but N ought to be small. */
2570 if (base_reg->get_parent_region () != parent_reg)
2571 continue;
2572 binding_cluster *cluster
2573 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2574 if (!multiline)
2576 if (j > 0)
2577 pp_string (pp, ", ");
2579 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
2581 /* Special-case to simplify dumps for the common case where
2582 we just have one value directly bound to the whole of a
2583 region. */
2584 if (multiline)
2586 pp_string (pp, " cluster for: ");
2587 base_reg->dump_to_pp (pp, simple);
2588 pp_string (pp, ": ");
2589 sval->dump_to_pp (pp, simple);
2590 if (cluster->escaped_p ())
2591 pp_string (pp, " (ESCAPED)");
2592 if (cluster->touched_p ())
2593 pp_string (pp, " (TOUCHED)");
2594 pp_newline (pp);
2596 else
2598 pp_string (pp, "region: {");
2599 base_reg->dump_to_pp (pp, simple);
2600 pp_string (pp, ", value: ");
2601 sval->dump_to_pp (pp, simple);
2602 if (cluster->escaped_p ())
2603 pp_string (pp, " (ESCAPED)");
2604 if (cluster->touched_p ())
2605 pp_string (pp, " (TOUCHED)");
2606 pp_string (pp, "}");
2609 else if (multiline)
2611 pp_string (pp, " cluster for: ");
2612 base_reg->dump_to_pp (pp, simple);
2613 pp_newline (pp);
2614 cluster->dump_to_pp (pp, simple, multiline);
2616 else
2618 pp_string (pp, "base region: {");
2619 base_reg->dump_to_pp (pp, simple);
2620 pp_string (pp, "} has cluster: {");
2621 cluster->dump_to_pp (pp, simple, multiline);
2622 pp_string (pp, "}");
2625 if (!multiline)
2626 pp_string (pp, "}");
2628 pp_printf (pp, "m_called_unknown_fn: %s",
2629 m_called_unknown_fn ? "TRUE" : "FALSE");
2630 if (multiline)
2631 pp_newline (pp);
2634 /* Dump a multiline representation of this store to stderr. */
2636 DEBUG_FUNCTION void
2637 store::dump (bool simple) const
2639 pretty_printer pp;
2640 pp_format_decoder (&pp) = default_tree_printer;
2641 pp_show_color (&pp) = pp_show_color (global_dc->printer);
2642 pp.set_output_stream (stderr);
2643 dump_to_pp (&pp, simple, true, NULL);
2644 pp_newline (&pp);
2645 pp_flush (&pp);
2648 /* Assert that this object is valid. */
2650 void
2651 store::validate () const
2653 for (auto iter : m_cluster_map)
2654 iter.second->validate ();
2657 /* Return a new json::object of the form
2658 {PARENT_REGION_DESC: {BASE_REGION_DESC: object for binding_map,
2659 ... for each cluster within parent region},
2660 ...for each parent region,
2661 "called_unknown_fn": true/false}. */
2663 json::object *
2664 store::to_json () const
2666 json::object *store_obj = new json::object ();
2668 /* Sort into some deterministic order. */
2669 auto_vec<const region *> base_regions;
2670 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2671 iter != m_cluster_map.end (); ++iter)
2673 const region *base_reg = (*iter).first;
2674 base_regions.safe_push (base_reg);
2676 base_regions.qsort (region::cmp_ptr_ptr);
2678 /* Gather clusters, organize by parent region, so that we can group
2679 together locals, globals, etc. */
2680 auto_vec<const region *> parent_regions;
2681 get_sorted_parent_regions (&parent_regions, base_regions);
2683 const region *parent_reg;
2684 unsigned i;
2685 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2687 gcc_assert (parent_reg);
2689 json::object *clusters_in_parent_reg_obj = new json::object ();
2691 const region *base_reg;
2692 unsigned j;
2693 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2695 /* This is O(N * M), but N ought to be small. */
2696 if (base_reg->get_parent_region () != parent_reg)
2697 continue;
2698 binding_cluster *cluster
2699 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2700 label_text base_reg_desc = base_reg->get_desc ();
2701 clusters_in_parent_reg_obj->set (base_reg_desc.get (),
2702 cluster->to_json ());
2704 label_text parent_reg_desc = parent_reg->get_desc ();
2705 store_obj->set (parent_reg_desc.get (), clusters_in_parent_reg_obj);
2708 store_obj->set_bool ("called_unknown_fn", m_called_unknown_fn);
2710 return store_obj;
2713 std::unique_ptr<text_art::tree_widget>
2714 store::make_dump_widget (const text_art::dump_widget_info &dwi,
2715 store_manager *mgr) const
2717 std::unique_ptr<text_art::tree_widget> store_widget
2718 (text_art::tree_widget::make (dwi, "Store"));
2720 store_widget->add_child
2721 (text_art::tree_widget::from_fmt (dwi, nullptr,
2722 "m_called_unknown_fn: %s",
2723 m_called_unknown_fn ? "true" : "false"));
2725 /* Sort into some deterministic order. */
2726 auto_vec<const region *> base_regions;
2727 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2728 iter != m_cluster_map.end (); ++iter)
2730 const region *base_reg = (*iter).first;
2731 base_regions.safe_push (base_reg);
2733 base_regions.qsort (region::cmp_ptr_ptr);
2735 /* Gather clusters, organize by parent region, so that we can group
2736 together locals, globals, etc. */
2737 auto_vec<const region *> parent_regions;
2738 get_sorted_parent_regions (&parent_regions, base_regions);
2740 const region *parent_reg;
2741 unsigned i;
2742 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2744 gcc_assert (parent_reg);
2746 pretty_printer the_pp;
2747 pretty_printer * const pp = &the_pp;
2748 pp_format_decoder (pp) = default_tree_printer;
2749 pp_show_color (pp) = true;
2750 const bool simple = true;
2752 parent_reg->dump_to_pp (pp, simple);
2754 std::unique_ptr<text_art::tree_widget> parent_reg_widget
2755 (text_art::tree_widget::make (dwi, pp));
2757 const region *base_reg;
2758 unsigned j;
2759 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2761 /* This is O(N * M), but N ought to be small. */
2762 if (base_reg->get_parent_region () != parent_reg)
2763 continue;
2764 binding_cluster *cluster
2765 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2766 parent_reg_widget->add_child
2767 (cluster->make_dump_widget (dwi, mgr));
2769 store_widget->add_child (std::move (parent_reg_widget));
2772 return store_widget;
2775 /* Get any svalue bound to REG, or NULL. */
2777 const svalue *
2778 store::get_any_binding (store_manager *mgr, const region *reg) const
2780 const region *base_reg = reg->get_base_region ();
2781 binding_cluster **cluster_slot
2782 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg);
2783 if (!cluster_slot)
2784 return NULL;
2785 return (*cluster_slot)->get_any_binding (mgr, reg);
2788 /* Set the value of LHS_REG to RHS_SVAL. */
2790 void
2791 store::set_value (store_manager *mgr, const region *lhs_reg,
2792 const svalue *rhs_sval,
2793 uncertainty_t *uncertainty)
2795 logger *logger = mgr->get_logger ();
2796 LOG_SCOPE (logger);
2798 remove_overlapping_bindings (mgr, lhs_reg, uncertainty);
2800 if (lhs_reg->get_type ())
2801 rhs_sval = simplify_for_binding (rhs_sval);
2802 /* ...but if we have no type for the region, retain any cast. */
2804 const region *lhs_base_reg = lhs_reg->get_base_region ();
2805 binding_cluster *lhs_cluster;
2806 if (lhs_base_reg->symbolic_for_unknown_ptr_p ())
2808 /* Reject attempting to bind values into a symbolic region
2809 for an unknown ptr; merely invalidate values below. */
2810 lhs_cluster = NULL;
2812 /* The LHS of the write is *UNKNOWN. If the RHS is a pointer,
2813 then treat the region being pointed to as having escaped. */
2814 if (const region_svalue *ptr_sval = rhs_sval->dyn_cast_region_svalue ())
2816 const region *ptr_dst = ptr_sval->get_pointee ();
2817 const region *ptr_base_reg = ptr_dst->get_base_region ();
2818 mark_as_escaped (ptr_base_reg);
2820 if (uncertainty)
2821 uncertainty->on_maybe_bound_sval (rhs_sval);
2823 else if (lhs_base_reg->tracked_p ())
2825 lhs_cluster = get_or_create_cluster (lhs_base_reg);
2826 lhs_cluster->bind (mgr, lhs_reg, rhs_sval);
2828 else
2830 /* Reject attempting to bind values into an untracked region;
2831 merely invalidate values below. */
2832 lhs_cluster = NULL;
2835 /* Bindings to a cluster can affect other clusters if a symbolic
2836 base region is involved.
2837 Writes to concrete clusters can't affect other concrete clusters,
2838 but can affect symbolic clusters.
2839 Writes to symbolic clusters can affect both concrete and symbolic
2840 clusters.
2841 Invalidate our knowledge of other clusters that might have been
2842 affected by the write.
2843 Gather the set of all svalues that might still be live even if
2844 the store doesn't refer to them. */
2845 svalue_set maybe_live_values;
2846 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2847 iter != m_cluster_map.end (); ++iter)
2849 const region *iter_base_reg = (*iter).first;
2850 binding_cluster *iter_cluster = (*iter).second;
2851 if (iter_base_reg != lhs_base_reg
2852 && (lhs_cluster == NULL
2853 || lhs_cluster->symbolic_p ()
2854 || iter_cluster->symbolic_p ()))
2856 tristate t_alias = eval_alias (lhs_base_reg, iter_base_reg);
2857 switch (t_alias.get_value ())
2859 default:
2860 gcc_unreachable ();
2862 case tristate::TS_UNKNOWN:
2863 if (logger)
2865 pretty_printer *pp = logger->get_printer ();
2866 logger->start_log_line ();
2867 logger->log_partial ("possible aliasing of ");
2868 iter_base_reg->dump_to_pp (pp, true);
2869 logger->log_partial (" when writing SVAL: ");
2870 rhs_sval->dump_to_pp (pp, true);
2871 logger->log_partial (" to LHS_REG: ");
2872 lhs_reg->dump_to_pp (pp, true);
2873 logger->end_log_line ();
2875 /* Mark all of iter_cluster's iter_base_reg as unknown,
2876 using LHS_REG when considering overlaps, to handle
2877 symbolic vs concrete issues. */
2878 iter_cluster->mark_region_as_unknown
2879 (mgr,
2880 iter_base_reg, /* reg_to_bind */
2881 lhs_reg, /* reg_for_overlap */
2882 uncertainty,
2883 &maybe_live_values);
2884 break;
2886 case tristate::TS_TRUE:
2887 gcc_unreachable ();
2888 break;
2890 case tristate::TS_FALSE:
2891 /* If they can't be aliases, then don't invalidate this
2892 cluster. */
2893 break;
2897 /* Given the set of svalues that might still be live, process them
2898 (e.g. marking regions as escaped).
2899 We do this after the iteration to avoid potentially changing
2900 m_cluster_map whilst iterating over it. */
2901 on_maybe_live_values (maybe_live_values);
2904 /* Determine if BASE_REG_A could be an alias of BASE_REG_B. */
2906 tristate
2907 store::eval_alias (const region *base_reg_a,
2908 const region *base_reg_b) const
2910 /* SSA names can't alias. */
2911 tree decl_a = base_reg_a->maybe_get_decl ();
2912 if (decl_a && TREE_CODE (decl_a) == SSA_NAME)
2913 return tristate::TS_FALSE;
2914 tree decl_b = base_reg_b->maybe_get_decl ();
2915 if (decl_b && TREE_CODE (decl_b) == SSA_NAME)
2916 return tristate::TS_FALSE;
2918 /* Try both ways, for symmetry. */
2919 tristate ts_ab = eval_alias_1 (base_reg_a, base_reg_b);
2920 if (ts_ab.is_false ())
2921 return tristate::TS_FALSE;
2922 tristate ts_ba = eval_alias_1 (base_reg_b, base_reg_a);
2923 if (ts_ba.is_false ())
2924 return tristate::TS_FALSE;
2925 return tristate::TS_UNKNOWN;
2928 /* Half of store::eval_alias; called twice for symmetry. */
2930 tristate
2931 store::eval_alias_1 (const region *base_reg_a,
2932 const region *base_reg_b) const
2934 /* If they're in different memory spaces, they can't alias. */
2936 enum memory_space memspace_a = base_reg_a->get_memory_space ();
2937 if (memspace_a != MEMSPACE_UNKNOWN)
2939 enum memory_space memspace_b = base_reg_b->get_memory_space ();
2940 if (memspace_b != MEMSPACE_UNKNOWN
2941 && memspace_a != memspace_b)
2942 return tristate::TS_FALSE;
2946 if (const symbolic_region *sym_reg_a
2947 = base_reg_a->dyn_cast_symbolic_region ())
2949 const svalue *sval_a = sym_reg_a->get_pointer ();
2950 if (tree decl_b = base_reg_b->maybe_get_decl ())
2952 if (!may_be_aliased (decl_b))
2953 return tristate::TS_FALSE;
2954 if (sval_a->get_kind () == SK_INITIAL)
2955 if (!is_global_var (decl_b))
2957 /* The initial value of a pointer can't point to a local. */
2958 return tristate::TS_FALSE;
2961 if (sval_a->get_kind () == SK_INITIAL
2962 && base_reg_b->get_kind () == RK_HEAP_ALLOCATED)
2964 /* The initial value of a pointer can't point to a
2965 region that was allocated on the heap after the beginning of the
2966 path. */
2967 return tristate::TS_FALSE;
2969 if (const widening_svalue *widening_sval_a
2970 = sval_a->dyn_cast_widening_svalue ())
2972 const svalue *base = widening_sval_a->get_base_svalue ();
2973 if (const region_svalue *region_sval
2974 = base->dyn_cast_region_svalue ())
2976 const region *pointee = region_sval->get_pointee ();
2977 /* If we have sval_a is WIDENING(&REGION, OP), and
2978 B can't alias REGION, then B can't alias A either.
2979 For example, A might arise from
2980 for (ptr = &REGION; ...; ptr++)
2981 where sval_a is ptr in the 2nd iteration of the loop.
2982 We want to ensure that "*ptr" can only clobber things
2983 within REGION's base region. */
2984 tristate ts = eval_alias (pointee->get_base_region (),
2985 base_reg_b);
2986 if (ts.is_false ())
2987 return tristate::TS_FALSE;
2991 return tristate::TS_UNKNOWN;
2994 /* Record all of the values in MAYBE_LIVE_VALUES as being possibly live. */
2996 void
2997 store::on_maybe_live_values (const svalue_set &maybe_live_values)
2999 for (auto sval : maybe_live_values)
3001 if (const region_svalue *ptr_sval = sval->dyn_cast_region_svalue ())
3003 const region *base_reg = ptr_sval->get_pointee ()->get_base_region ();
3004 mark_as_escaped (base_reg);
3009 /* Remove all bindings overlapping REG within this store. */
3011 void
3012 store::clobber_region (store_manager *mgr, const region *reg)
3014 const region *base_reg = reg->get_base_region ();
3015 binding_cluster **slot = m_cluster_map.get (base_reg);
3016 if (!slot)
3017 return;
3018 binding_cluster *cluster = *slot;
3019 cluster->clobber_region (mgr, reg);
3020 if (cluster->redundant_p ())
3022 delete cluster;
3023 m_cluster_map.remove (base_reg);
3027 /* Remove any bindings for REG within this store. */
3029 void
3030 store::purge_region (store_manager *mgr, const region *reg)
3032 const region *base_reg = reg->get_base_region ();
3033 binding_cluster **slot = m_cluster_map.get (base_reg);
3034 if (!slot)
3035 return;
3036 binding_cluster *cluster = *slot;
3037 cluster->purge_region (mgr, reg);
3038 if (cluster->redundant_p ())
3040 delete cluster;
3041 m_cluster_map.remove (base_reg);
3045 /* Fill REG with SVAL. */
3047 void
3048 store::fill_region (store_manager *mgr, const region *reg, const svalue *sval)
3050 /* Filling an empty region is a no-op. */
3051 if (reg->empty_p ())
3052 return;
3054 const region *base_reg = reg->get_base_region ();
3055 if (base_reg->symbolic_for_unknown_ptr_p ()
3056 || !base_reg->tracked_p ())
3057 return;
3058 binding_cluster *cluster = get_or_create_cluster (base_reg);
3059 cluster->fill_region (mgr, reg, sval);
3062 /* Zero-fill REG. */
3064 void
3065 store::zero_fill_region (store_manager *mgr, const region *reg)
3067 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
3068 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
3069 fill_region (mgr, reg, zero_sval);
3072 /* Mark REG as having unknown content. */
3074 void
3075 store::mark_region_as_unknown (store_manager *mgr, const region *reg,
3076 uncertainty_t *uncertainty,
3077 svalue_set *maybe_live_values)
3079 const region *base_reg = reg->get_base_region ();
3080 if (base_reg->symbolic_for_unknown_ptr_p ()
3081 || !base_reg->tracked_p ())
3082 return;
3083 binding_cluster *cluster = get_or_create_cluster (base_reg);
3084 cluster->mark_region_as_unknown (mgr, reg, reg, uncertainty,
3085 maybe_live_values);
3088 /* Purge state involving SVAL. */
3090 void
3091 store::purge_state_involving (const svalue *sval,
3092 region_model_manager *sval_mgr)
3094 auto_vec <const region *> base_regs_to_purge;
3095 for (auto iter : m_cluster_map)
3097 const region *base_reg = iter.first;
3098 if (base_reg->involves_p (sval))
3099 base_regs_to_purge.safe_push (base_reg);
3100 else
3102 binding_cluster *cluster = iter.second;
3103 cluster->purge_state_involving (sval, sval_mgr);
3107 for (auto iter : base_regs_to_purge)
3108 purge_cluster (iter);
3111 /* Get the cluster for BASE_REG, or NULL (const version). */
3113 const binding_cluster *
3114 store::get_cluster (const region *base_reg) const
3116 gcc_assert (base_reg);
3117 gcc_assert (base_reg->get_base_region () == base_reg);
3118 if (binding_cluster **slot
3119 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg))
3120 return *slot;
3121 else
3122 return NULL;
3125 /* Get the cluster for BASE_REG, or NULL (non-const version). */
3127 binding_cluster *
3128 store::get_cluster (const region *base_reg)
3130 gcc_assert (base_reg);
3131 gcc_assert (base_reg->get_base_region () == base_reg);
3132 if (binding_cluster **slot = m_cluster_map.get (base_reg))
3133 return *slot;
3134 else
3135 return NULL;
3138 /* Get the cluster for BASE_REG, creating it if doesn't already exist. */
3140 binding_cluster *
3141 store::get_or_create_cluster (const region *base_reg)
3143 gcc_assert (base_reg);
3144 gcc_assert (base_reg->get_base_region () == base_reg);
3146 /* We shouldn't create clusters for dereferencing an UNKNOWN ptr. */
3147 gcc_assert (!base_reg->symbolic_for_unknown_ptr_p ());
3149 /* We shouldn't create clusters for base regions that aren't trackable. */
3150 gcc_assert (base_reg->tracked_p ());
3152 if (binding_cluster **slot = m_cluster_map.get (base_reg))
3153 return *slot;
3155 binding_cluster *cluster = new binding_cluster (base_reg);
3156 m_cluster_map.put (base_reg, cluster);
3158 return cluster;
3161 /* Remove any cluster for BASE_REG, for use by
3162 region_model::unbind_region_and_descendents
3163 when popping stack frames and handling deleted heap regions. */
3165 void
3166 store::purge_cluster (const region *base_reg)
3168 gcc_assert (base_reg->get_base_region () == base_reg);
3169 binding_cluster **slot = m_cluster_map.get (base_reg);
3170 if (!slot)
3171 return;
3172 binding_cluster *cluster = *slot;
3173 delete cluster;
3174 m_cluster_map.remove (base_reg);
3177 /* Attempt to merge STORE_A and STORE_B into OUT_STORE.
3178 Return true if successful, or false if the stores can't be merged. */
3180 bool
3181 store::can_merge_p (const store *store_a, const store *store_b,
3182 store *out_store, store_manager *mgr,
3183 model_merger *merger)
3185 if (store_a->m_called_unknown_fn || store_b->m_called_unknown_fn)
3186 out_store->m_called_unknown_fn = true;
3188 /* Get the union of all base regions for STORE_A and STORE_B. */
3189 hash_set<const region *> base_regions;
3190 for (cluster_map_t::iterator iter_a = store_a->m_cluster_map.begin ();
3191 iter_a != store_a->m_cluster_map.end (); ++iter_a)
3193 const region *base_reg_a = (*iter_a).first;
3194 base_regions.add (base_reg_a);
3196 for (cluster_map_t::iterator iter_b = store_b->m_cluster_map.begin ();
3197 iter_b != store_b->m_cluster_map.end (); ++iter_b)
3199 const region *base_reg_b = (*iter_b).first;
3200 base_regions.add (base_reg_b);
3203 /* Sort the base regions before considering them. This ought not to
3204 affect the results, but can affect which types UNKNOWN_REGIONs are
3205 created for in a run; sorting them thus avoids minor differences
3206 in logfiles. */
3207 auto_vec<const region *> vec_base_regions (base_regions.elements ());
3208 for (hash_set<const region *>::iterator iter = base_regions.begin ();
3209 iter != base_regions.end (); ++iter)
3210 vec_base_regions.quick_push (*iter);
3211 vec_base_regions.qsort (region::cmp_ptr_ptr);
3212 unsigned i;
3213 const region *base_reg;
3214 FOR_EACH_VEC_ELT (vec_base_regions, i, base_reg)
3216 const binding_cluster *cluster_a = store_a->get_cluster (base_reg);
3217 const binding_cluster *cluster_b = store_b->get_cluster (base_reg);
3218 /* At least one of cluster_a and cluster_b must be non-NULL. */
3219 binding_cluster *out_cluster
3220 = out_store->get_or_create_cluster (base_reg);
3221 if (!binding_cluster::can_merge_p (cluster_a, cluster_b,
3222 out_cluster, out_store, mgr, merger))
3223 return false;
3225 return true;
3228 /* Mark the cluster for BASE_REG as having escaped.
3229 For use when handling an unrecognized function call, and
3230 for params to "top-level" calls.
3231 Further unknown function calls could touch it, even if the cluster
3232 isn't reachable from args of those calls. */
3234 void
3235 store::mark_as_escaped (const region *base_reg)
3237 gcc_assert (base_reg);
3238 gcc_assert (base_reg->get_base_region () == base_reg);
3240 if (base_reg->symbolic_for_unknown_ptr_p ()
3241 || !base_reg->tracked_p ())
3242 return;
3244 binding_cluster *cluster = get_or_create_cluster (base_reg);
3245 cluster->mark_as_escaped ();
3248 /* Handle an unknown fncall by updating any clusters that have escaped
3249 (either in this fncall, or in a prior one). */
3251 void
3252 store::on_unknown_fncall (const gcall *call, store_manager *mgr,
3253 const conjured_purge &p)
3255 m_called_unknown_fn = true;
3257 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3258 iter != m_cluster_map.end (); ++iter)
3259 (*iter).second->on_unknown_fncall (call, mgr, p);
3262 /* Return true if a non-const pointer to BASE_REG (or something within it)
3263 has escaped to code outside of the TU being analyzed. */
3265 bool
3266 store::escaped_p (const region *base_reg) const
3268 gcc_assert (base_reg);
3269 gcc_assert (base_reg->get_base_region () == base_reg);
3271 /* "errno" can always be modified by external code. */
3272 if (base_reg->get_kind () == RK_ERRNO)
3273 return true;
3275 if (binding_cluster **cluster_slot
3276 = const_cast <cluster_map_t &>(m_cluster_map).get (base_reg))
3277 return (*cluster_slot)->escaped_p ();
3278 return false;
3281 /* Populate OUT_PVS with a list of path_vars for describing SVAL based on
3282 this store, using VISITED to ensure the traversal terminates. */
3284 void
3285 store::get_representative_path_vars (const region_model *model,
3286 svalue_set *visited,
3287 const svalue *sval,
3288 logger *logger,
3289 auto_vec<path_var> *out_pvs) const
3291 gcc_assert (sval);
3293 /* Find all bindings that reference SVAL. */
3294 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3295 iter != m_cluster_map.end (); ++iter)
3297 const region *base_reg = (*iter).first;
3298 binding_cluster *cluster = (*iter).second;
3299 cluster->get_representative_path_vars (model, visited, base_reg, sval,
3300 logger,
3301 out_pvs);
3304 if (const initial_svalue *init_sval = sval->dyn_cast_initial_svalue ())
3306 const region *reg = init_sval->get_region ();
3307 if (path_var pv = model->get_representative_path_var (reg,
3308 visited,
3309 logger))
3310 out_pvs->safe_push (pv);
3314 /* Remove all bindings overlapping REG within this store, removing
3315 any clusters that become redundant.
3317 If UNCERTAINTY is non-NULL, use it to record any svalues that
3318 were removed, as being maybe-bound. */
3320 void
3321 store::remove_overlapping_bindings (store_manager *mgr, const region *reg,
3322 uncertainty_t *uncertainty)
3324 const region *base_reg = reg->get_base_region ();
3325 if (binding_cluster **cluster_slot = m_cluster_map.get (base_reg))
3327 binding_cluster *cluster = *cluster_slot;
3328 if (reg == base_reg && !escaped_p (base_reg))
3330 /* Remove whole cluster. */
3331 m_cluster_map.remove (base_reg);
3332 delete cluster;
3333 return;
3335 /* Pass NULL for the maybe_live_values here, as we don't want to
3336 record the old svalues as being maybe-bound. */
3337 cluster->remove_overlapping_bindings (mgr, reg, uncertainty, NULL);
3341 /* Subclass of visitor that accumulates a hash_set of the regions that
3342 were visited. */
3344 struct region_finder : public visitor
3346 void visit_region (const region *reg) final override
3348 m_regs.add (reg);
3351 hash_set<const region *> m_regs;
3354 /* Canonicalize this store, to maximize the chance of equality between
3355 instances. */
3357 void
3358 store::canonicalize (store_manager *mgr)
3360 /* If we have e.g.:
3361 cluster for: HEAP_ALLOCATED_REGION(543)
3362 ESCAPED
3363 TOUCHED
3364 where the heap region is empty and unreferenced, then purge that
3365 cluster, to avoid unbounded state chains involving these. */
3367 /* Find regions that are referenced by bound values in the store. */
3368 region_finder s;
3369 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3370 iter != m_cluster_map.end (); ++iter)
3372 binding_cluster *cluster = (*iter).second;
3373 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3374 bind_iter != cluster->m_map.end (); ++bind_iter)
3375 (*bind_iter).second->accept (&s);
3378 /* Locate heap-allocated regions that have empty bindings that weren't
3379 found above. */
3380 hash_set<const region *> purgeable_regions;
3381 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3382 iter != m_cluster_map.end (); ++iter)
3384 const region *base_reg = (*iter).first;
3385 binding_cluster *cluster = (*iter).second;
3386 if (base_reg->get_kind () == RK_HEAP_ALLOCATED)
3388 /* Don't purge a heap-allocated region that's been marked as
3389 escaping, since this could be recording that a ptr to it
3390 was written to an unknown symbolic region along this
3391 path, and so we don't know whether it's referenced or
3392 not, and hence should report it as leaking
3393 (PR analyzer/106473). */
3394 if (cluster->escaped_p ())
3395 continue;
3397 if (cluster->empty_p ())
3398 if (!s.m_regs.contains (base_reg))
3399 purgeable_regions.add (base_reg);
3401 /* Also cover the UNKNOWN case. */
3402 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
3403 if (sval->get_kind () == SK_UNKNOWN)
3404 if (!s.m_regs.contains (base_reg))
3405 purgeable_regions.add (base_reg);
3409 /* Purge them. */
3410 for (hash_set<const region *>::iterator iter = purgeable_regions.begin ();
3411 iter != purgeable_regions.end (); ++iter)
3413 const region *base_reg = *iter;
3414 purge_cluster (base_reg);
3418 /* Subroutine for use by exploded_path::feasible_p.
3420 We need to deal with state differences between:
3421 (a) when the exploded_graph is being initially constructed and
3422 (b) when replaying the state changes along a specific path in
3423 in exploded_path::feasible_p.
3425 In (a), state merging happens, so when exploring a loop
3426 for (i = 0; i < 1024; i++)
3427 on successive iterations we have i == 0, then i == WIDENING.
3429 In (b), no state merging happens, so naively replaying the path
3430 that goes twice through the loop then exits it
3431 would lead to i == 0, then i == 1, and then a (i >= 1024) eedge
3432 that exits the loop, which would be found to be infeasible as i == 1,
3433 and the path would be rejected.
3435 We need to fix up state during replay. This subroutine is
3436 called whenever we enter a supernode that we've already
3437 visited along this exploded_path, passing in OTHER_STORE
3438 from the destination enode's state.
3440 Find bindings to widening values in OTHER_STORE.
3441 For all that are found, update the binding in this store to UNKNOWN. */
3443 void
3444 store::loop_replay_fixup (const store *other_store,
3445 region_model_manager *mgr)
3447 gcc_assert (other_store);
3448 for (cluster_map_t::iterator iter = other_store->m_cluster_map.begin ();
3449 iter != other_store->m_cluster_map.end (); ++iter)
3451 const region *base_reg = (*iter).first;
3452 binding_cluster *cluster = (*iter).second;
3453 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3454 bind_iter != cluster->m_map.end (); ++bind_iter)
3456 const binding_key *key = (*bind_iter).first;
3457 const svalue *sval = (*bind_iter).second;
3458 if (sval->get_kind () == SK_WIDENING)
3460 binding_cluster *this_cluster
3461 = get_or_create_cluster (base_reg);
3462 const svalue *unknown
3463 = mgr->get_or_create_unknown_svalue (sval->get_type ());
3464 this_cluster->bind_key (key, unknown);
3470 /* Use R to replay the bindings from SUMMARY into this object. */
3472 void
3473 store::replay_call_summary (call_summary_replay &r,
3474 const store &summary)
3476 if (summary.m_called_unknown_fn)
3478 /* A call to an external function occurred in the summary.
3479 Hence we need to invalidate our knownledge of globals,
3480 escaped regions, etc. */
3481 on_unknown_fncall (r.get_call_stmt (),
3482 r.get_store_manager (),
3483 conjured_purge (r.get_caller_model (),
3484 r.get_ctxt ()));
3487 auto_vec<const region *> keys (summary.m_cluster_map.elements ());
3488 for (auto kv : summary.m_cluster_map)
3489 keys.quick_push (kv.first);
3490 keys.qsort (region::cmp_ptr_ptr);
3491 for (auto base_reg : keys)
3492 replay_call_summary_cluster (r, summary, base_reg);
3495 /* Use R and SUMMARY to replay the bindings in SUMMARY_CLUSTER
3496 into this object. */
3498 void
3499 store::replay_call_summary_cluster (call_summary_replay &r,
3500 const store &summary,
3501 const region *summary_base_reg)
3503 const call_details &cd = r.get_call_details ();
3504 region_model_manager *reg_mgr = r.get_manager ();
3505 store_manager *mgr = reg_mgr->get_store_manager ();
3506 const binding_cluster *summary_cluster
3507 = summary.get_cluster (summary_base_reg);
3509 /* Handle "ESCAPED" and "TOUCHED" flags. */
3510 if (summary_cluster->escaped_p () || summary_cluster->touched_p ())
3511 if (const region *caller_reg
3512 = r.convert_region_from_summary (summary_base_reg))
3514 const region *caller_base_reg = caller_reg->get_base_region ();
3515 if (caller_base_reg->tracked_p ()
3516 && !caller_base_reg->symbolic_for_unknown_ptr_p ())
3518 binding_cluster *caller_cluster
3519 = get_or_create_cluster (caller_base_reg);
3520 if (summary_cluster->escaped_p ())
3521 caller_cluster->mark_as_escaped ();
3522 if (summary_cluster->touched_p ())
3523 caller_cluster->m_touched = true;
3527 switch (summary_base_reg->get_kind ())
3529 /* Top-level regions. */
3530 case RK_FRAME:
3531 case RK_GLOBALS:
3532 case RK_CODE:
3533 case RK_STACK:
3534 case RK_HEAP:
3535 case RK_THREAD_LOCAL:
3536 case RK_ROOT:
3537 /* Child regions. */
3538 case RK_FIELD:
3539 case RK_ELEMENT:
3540 case RK_OFFSET:
3541 case RK_SIZED:
3542 case RK_CAST:
3543 case RK_BIT_RANGE:
3544 /* Other regions. */
3545 case RK_VAR_ARG:
3546 case RK_UNKNOWN:
3547 /* These should never be the base region of a binding cluster. */
3548 gcc_unreachable ();
3549 break;
3551 case RK_FUNCTION:
3552 case RK_LABEL:
3553 case RK_STRING:
3554 /* These can be marked as escaping. */
3555 break;
3557 case RK_SYMBOLIC:
3559 const symbolic_region *summary_symbolic_reg
3560 = as_a <const symbolic_region *> (summary_base_reg);
3561 const svalue *summary_ptr_sval = summary_symbolic_reg->get_pointer ();
3562 const svalue *caller_ptr_sval
3563 = r.convert_svalue_from_summary (summary_ptr_sval);
3564 if (!caller_ptr_sval)
3565 return;
3566 const region *caller_dest_reg
3567 = cd.get_model ()->deref_rvalue (caller_ptr_sval,
3568 NULL_TREE,
3569 cd.get_ctxt ());
3570 const svalue *summary_sval
3571 = summary.get_any_binding (mgr, summary_base_reg);
3572 if (!summary_sval)
3573 return;
3574 const svalue *caller_sval
3575 = r.convert_svalue_from_summary (summary_sval);
3576 if (!caller_sval)
3577 caller_sval =
3578 reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
3579 set_value (mgr, caller_dest_reg,
3580 caller_sval, NULL /* uncertainty_t * */);
3582 break;
3584 case RK_HEAP_ALLOCATED:
3585 case RK_DECL:
3586 case RK_ERRNO:
3587 case RK_PRIVATE:
3589 const region *caller_dest_reg
3590 = r.convert_region_from_summary (summary_base_reg);
3591 if (!caller_dest_reg)
3592 return;
3593 const svalue *summary_sval
3594 = summary.get_any_binding (mgr, summary_base_reg);
3595 if (!summary_sval)
3596 summary_sval = reg_mgr->get_or_create_compound_svalue
3597 (summary_base_reg->get_type (),
3598 summary_cluster->get_map ());
3599 const svalue *caller_sval
3600 = r.convert_svalue_from_summary (summary_sval);
3601 if (!caller_sval)
3602 caller_sval =
3603 reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
3604 set_value (mgr, caller_dest_reg,
3605 caller_sval, NULL /* uncertainty_t * */);
3607 break;
3609 case RK_ALLOCA:
3610 /* Ignore bindings of alloca regions in the summary. */
3611 break;
3615 #if CHECKING_P
3617 namespace selftest {
3619 /* Verify that bit_range::intersects_p works as expected. */
3621 static void
3622 test_bit_range_intersects_p ()
3624 bit_range b0 (0, 1);
3625 bit_range b1 (1, 1);
3626 bit_range b2 (2, 1);
3627 bit_range b3 (3, 1);
3628 bit_range b4 (4, 1);
3629 bit_range b5 (5, 1);
3630 bit_range b6 (6, 1);
3631 bit_range b7 (7, 1);
3632 bit_range b1_to_6 (1, 6);
3633 bit_range b0_to_7 (0, 8);
3634 bit_range b3_to_5 (3, 3);
3635 bit_range b6_to_7 (6, 2);
3637 /* self-intersection is true. */
3638 ASSERT_TRUE (b0.intersects_p (b0));
3639 ASSERT_TRUE (b7.intersects_p (b7));
3640 ASSERT_TRUE (b1_to_6.intersects_p (b1_to_6));
3641 ASSERT_TRUE (b0_to_7.intersects_p (b0_to_7));
3643 ASSERT_FALSE (b0.intersects_p (b1));
3644 ASSERT_FALSE (b1.intersects_p (b0));
3645 ASSERT_FALSE (b0.intersects_p (b7));
3646 ASSERT_FALSE (b7.intersects_p (b0));
3648 ASSERT_TRUE (b0_to_7.intersects_p (b0));
3649 ASSERT_TRUE (b0_to_7.intersects_p (b7));
3650 ASSERT_TRUE (b0.intersects_p (b0_to_7));
3651 ASSERT_TRUE (b7.intersects_p (b0_to_7));
3653 ASSERT_FALSE (b0.intersects_p (b1_to_6));
3654 ASSERT_FALSE (b1_to_6.intersects_p (b0));
3655 ASSERT_TRUE (b1.intersects_p (b1_to_6));
3656 ASSERT_TRUE (b1_to_6.intersects_p (b1));
3657 ASSERT_TRUE (b1_to_6.intersects_p (b6));
3658 ASSERT_FALSE (b1_to_6.intersects_p (b7));
3660 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7));
3661 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6));
3663 ASSERT_FALSE (b3_to_5.intersects_p (b6_to_7));
3664 ASSERT_FALSE (b6_to_7.intersects_p (b3_to_5));
3666 bit_range r1 (0,0);
3667 bit_range r2 (0,0);
3668 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7, &r1, &r2));
3669 ASSERT_EQ (r1.get_start_bit_offset (), 0);
3670 ASSERT_EQ (r1.m_size_in_bits, 6);
3671 ASSERT_EQ (r2.get_start_bit_offset (), 1);
3672 ASSERT_EQ (r2.m_size_in_bits, 6);
3674 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6, &r1, &r2));
3675 ASSERT_EQ (r1.get_start_bit_offset (), 1);
3676 ASSERT_EQ (r1.m_size_in_bits, 6);
3677 ASSERT_EQ (r2.get_start_bit_offset (), 0);
3678 ASSERT_EQ (r2.m_size_in_bits, 6);
3681 /* Implementation detail of ASSERT_BIT_RANGE_FROM_MASK_EQ. */
3683 static void
3684 assert_bit_range_from_mask_eq (const location &loc,
3685 unsigned HOST_WIDE_INT mask,
3686 const bit_range &expected)
3688 bit_range actual (0, 0);
3689 bool ok = bit_range::from_mask (mask, &actual);
3690 ASSERT_TRUE_AT (loc, ok);
3691 ASSERT_EQ_AT (loc, actual, expected);
3694 /* Assert that bit_range::from_mask (MASK) returns true, and writes
3695 out EXPECTED_BIT_RANGE. */
3697 #define ASSERT_BIT_RANGE_FROM_MASK_EQ(MASK, EXPECTED_BIT_RANGE) \
3698 SELFTEST_BEGIN_STMT \
3699 assert_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK, \
3700 EXPECTED_BIT_RANGE); \
3701 SELFTEST_END_STMT
3703 /* Implementation detail of ASSERT_NO_BIT_RANGE_FROM_MASK. */
3705 static void
3706 assert_no_bit_range_from_mask_eq (const location &loc,
3707 unsigned HOST_WIDE_INT mask)
3709 bit_range actual (0, 0);
3710 bool ok = bit_range::from_mask (mask, &actual);
3711 ASSERT_FALSE_AT (loc, ok);
3714 /* Assert that bit_range::from_mask (MASK) returns false. */
3716 #define ASSERT_NO_BIT_RANGE_FROM_MASK(MASK) \
3717 SELFTEST_BEGIN_STMT \
3718 assert_no_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK); \
3719 SELFTEST_END_STMT
3721 /* Verify that bit_range::from_mask works as expected. */
3723 static void
3724 test_bit_range_from_mask ()
3726 /* Should fail on zero. */
3727 ASSERT_NO_BIT_RANGE_FROM_MASK (0);
3729 /* Verify 1-bit masks. */
3730 ASSERT_BIT_RANGE_FROM_MASK_EQ (1, bit_range (0, 1));
3731 ASSERT_BIT_RANGE_FROM_MASK_EQ (2, bit_range (1, 1));
3732 ASSERT_BIT_RANGE_FROM_MASK_EQ (4, bit_range (2, 1));
3733 ASSERT_BIT_RANGE_FROM_MASK_EQ (8, bit_range (3, 1));
3734 ASSERT_BIT_RANGE_FROM_MASK_EQ (16, bit_range (4, 1));
3735 ASSERT_BIT_RANGE_FROM_MASK_EQ (32, bit_range (5, 1));
3736 ASSERT_BIT_RANGE_FROM_MASK_EQ (64, bit_range (6, 1));
3737 ASSERT_BIT_RANGE_FROM_MASK_EQ (128, bit_range (7, 1));
3739 /* Verify N-bit masks starting at bit 0. */
3740 ASSERT_BIT_RANGE_FROM_MASK_EQ (3, bit_range (0, 2));
3741 ASSERT_BIT_RANGE_FROM_MASK_EQ (7, bit_range (0, 3));
3742 ASSERT_BIT_RANGE_FROM_MASK_EQ (15, bit_range (0, 4));
3743 ASSERT_BIT_RANGE_FROM_MASK_EQ (31, bit_range (0, 5));
3744 ASSERT_BIT_RANGE_FROM_MASK_EQ (63, bit_range (0, 6));
3745 ASSERT_BIT_RANGE_FROM_MASK_EQ (127, bit_range (0, 7));
3746 ASSERT_BIT_RANGE_FROM_MASK_EQ (255, bit_range (0, 8));
3747 ASSERT_BIT_RANGE_FROM_MASK_EQ (0xffff, bit_range (0, 16));
3749 /* Various other tests. */
3750 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x30, bit_range (4, 2));
3751 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x700, bit_range (8, 3));
3752 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x600, bit_range (9, 2));
3754 /* Multiple ranges of set bits should fail. */
3755 ASSERT_NO_BIT_RANGE_FROM_MASK (0x101);
3756 ASSERT_NO_BIT_RANGE_FROM_MASK (0xf0f0f0f0);
3759 /* Implementation detail of ASSERT_OVERLAP. */
3761 static void
3762 assert_overlap (const location &loc,
3763 const concrete_binding *b1,
3764 const concrete_binding *b2)
3766 ASSERT_TRUE_AT (loc, b1->overlaps_p (*b2));
3767 ASSERT_TRUE_AT (loc, b2->overlaps_p (*b1));
3770 /* Implementation detail of ASSERT_DISJOINT. */
3772 static void
3773 assert_disjoint (const location &loc,
3774 const concrete_binding *b1,
3775 const concrete_binding *b2)
3777 ASSERT_FALSE_AT (loc, b1->overlaps_p (*b2));
3778 ASSERT_FALSE_AT (loc, b2->overlaps_p (*b1));
3781 /* Assert that B1 and B2 overlap, checking both ways. */
3783 #define ASSERT_OVERLAP(B1, B2) \
3784 SELFTEST_BEGIN_STMT \
3785 assert_overlap (SELFTEST_LOCATION, B1, B2); \
3786 SELFTEST_END_STMT
3788 /* Assert that B1 and B2 do not overlap, checking both ways. */
3790 #define ASSERT_DISJOINT(B1, B2) \
3791 SELFTEST_BEGIN_STMT \
3792 assert_disjoint (SELFTEST_LOCATION, B1, B2); \
3793 SELFTEST_END_STMT
3795 /* Verify that concrete_binding::overlaps_p works as expected. */
3797 static void
3798 test_binding_key_overlap ()
3800 store_manager mgr (NULL);
3802 /* Various 8-bit bindings. */
3803 const concrete_binding *cb_0_7 = mgr.get_concrete_binding (0, 8);
3804 const concrete_binding *cb_8_15 = mgr.get_concrete_binding (8, 8);
3805 const concrete_binding *cb_16_23 = mgr.get_concrete_binding (16, 8);
3806 const concrete_binding *cb_24_31 = mgr.get_concrete_binding (24, 8);
3808 /* 16-bit bindings. */
3809 const concrete_binding *cb_0_15 = mgr.get_concrete_binding (0, 16);
3810 const concrete_binding *cb_8_23 = mgr.get_concrete_binding (8, 16);
3811 const concrete_binding *cb_16_31 = mgr.get_concrete_binding (16, 16);
3813 /* 32-bit binding. */
3814 const concrete_binding *cb_0_31 = mgr.get_concrete_binding (0, 32);
3816 /* Everything should self-overlap. */
3817 ASSERT_OVERLAP (cb_0_7, cb_0_7);
3818 ASSERT_OVERLAP (cb_8_15, cb_8_15);
3819 ASSERT_OVERLAP (cb_16_23, cb_16_23);
3820 ASSERT_OVERLAP (cb_24_31, cb_24_31);
3821 ASSERT_OVERLAP (cb_0_15, cb_0_15);
3822 ASSERT_OVERLAP (cb_8_23, cb_8_23);
3823 ASSERT_OVERLAP (cb_16_31, cb_16_31);
3824 ASSERT_OVERLAP (cb_0_31, cb_0_31);
3826 /* Verify the 8-bit bindings that don't overlap each other. */
3827 ASSERT_DISJOINT (cb_0_7, cb_8_15);
3828 ASSERT_DISJOINT (cb_8_15, cb_16_23);
3830 /* Check for overlap of differently-sized bindings. */
3831 ASSERT_OVERLAP (cb_0_7, cb_0_31);
3832 /* ...and with differing start points. */
3833 ASSERT_OVERLAP (cb_8_15, cb_0_31);
3834 ASSERT_DISJOINT (cb_8_15, cb_16_31);
3835 ASSERT_OVERLAP (cb_16_23, cb_0_31);
3836 ASSERT_OVERLAP (cb_16_31, cb_0_31);
3838 ASSERT_DISJOINT (cb_0_7, cb_8_23);
3839 ASSERT_OVERLAP (cb_8_23, cb_16_23);
3840 ASSERT_OVERLAP (cb_8_23, cb_16_31);
3841 ASSERT_DISJOINT (cb_8_23, cb_24_31);
3844 /* Run all of the selftests within this file. */
3846 void
3847 analyzer_store_cc_tests ()
3849 test_bit_range_intersects_p ();
3850 test_bit_range_from_mask ();
3851 test_binding_key_overlap ();
3854 } // namespace selftest
3856 #endif /* CHECKING_P */
3858 } // namespace ana
3860 #endif /* #if ENABLE_ANALYZER */