d: Add test for PR d/108167 to the testsuite [PR108167]
[official-gcc.git] / gcc / analyzer / store.cc
blobe964545b08432c86a9cc31adf714d5f9284488b5
1 /* Classes for modeling the state of memory.
2 Copyright (C) 2020-2023 Free Software Foundation, Inc.
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #define INCLUDE_MEMORY
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tree.h"
26 #include "function.h"
27 #include "basic-block.h"
28 #include "gimple.h"
29 #include "gimple-iterator.h"
30 #include "diagnostic-core.h"
31 #include "graphviz.h"
32 #include "options.h"
33 #include "cgraph.h"
34 #include "tree-dfa.h"
35 #include "stringpool.h"
36 #include "convert.h"
37 #include "target.h"
38 #include "fold-const.h"
39 #include "tree-pretty-print.h"
40 #include "diagnostic-color.h"
41 #include "diagnostic-metadata.h"
42 #include "bitmap.h"
43 #include "selftest.h"
44 #include "analyzer/analyzer.h"
45 #include "analyzer/analyzer-logging.h"
46 #include "ordered-hash-map.h"
47 #include "options.h"
48 #include "cfg.h"
49 #include "analyzer/supergraph.h"
50 #include "sbitmap.h"
51 #include "analyzer/call-string.h"
52 #include "analyzer/program-point.h"
53 #include "analyzer/store.h"
54 #include "analyzer/region-model.h"
55 #include "analyzer/call-summary.h"
56 #include "analyzer/analyzer-selftests.h"
57 #include "stor-layout.h"
59 #if ENABLE_ANALYZER
61 namespace ana {
63 /* Dump SVALS to PP, sorting them to ensure determinism. */
65 static void
66 dump_svalue_set (const hash_set <const svalue *> &svals,
67 pretty_printer *pp, bool simple)
69 auto_vec <const svalue *> v;
70 for (hash_set<const svalue *>::iterator iter = svals.begin ();
71 iter != svals.end (); ++iter)
73 v.safe_push (*iter);
75 v.qsort (svalue::cmp_ptr_ptr);
77 pp_character (pp, '{');
78 const svalue *sval;
79 unsigned i;
80 FOR_EACH_VEC_ELT (v, i, sval)
82 if (i > 0)
83 pp_string (pp, ", ");
84 sval->dump_to_pp (pp, simple);
86 pp_character (pp, '}');
89 /* class uncertainty_t. */
91 /* Dump this object to PP. */
93 void
94 uncertainty_t::dump_to_pp (pretty_printer *pp, bool simple) const
96 pp_string (pp, "{m_maybe_bound_svals: ");
97 dump_svalue_set (m_maybe_bound_svals, pp, simple);
99 pp_string (pp, ", m_mutable_at_unknown_call_svals: ");
100 dump_svalue_set (m_mutable_at_unknown_call_svals, pp, simple);
101 pp_string (pp, "}");
104 /* Dump this object to stderr. */
106 DEBUG_FUNCTION void
107 uncertainty_t::dump (bool simple) const
109 pretty_printer pp;
110 pp_format_decoder (&pp) = default_tree_printer;
111 pp_show_color (&pp) = pp_show_color (global_dc->printer);
112 pp.buffer->stream = stderr;
113 dump_to_pp (&pp, simple);
114 pp_newline (&pp);
115 pp_flush (&pp);
118 /* class binding_key. */
120 const binding_key *
121 binding_key::make (store_manager *mgr, const region *r)
123 region_offset offset = r->get_offset (mgr->get_svalue_manager ());
124 if (offset.symbolic_p ())
125 return mgr->get_symbolic_binding (r);
126 else
128 bit_size_t bit_size;
129 if (r->get_bit_size (&bit_size))
131 /* Must be non-empty. */
132 gcc_assert (bit_size > 0);
133 return mgr->get_concrete_binding (offset.get_bit_offset (),
134 bit_size);
136 else
137 return mgr->get_symbolic_binding (r);
141 /* Dump this binding_key to stderr. */
143 DEBUG_FUNCTION void
144 binding_key::dump (bool simple) const
146 pretty_printer pp;
147 pp_format_decoder (&pp) = default_tree_printer;
148 pp_show_color (&pp) = pp_show_color (global_dc->printer);
149 pp.buffer->stream = stderr;
150 dump_to_pp (&pp, simple);
151 pp_newline (&pp);
152 pp_flush (&pp);
155 /* Get a description of this binding_key. */
157 label_text
158 binding_key::get_desc (bool simple) const
160 pretty_printer pp;
161 pp_format_decoder (&pp) = default_tree_printer;
162 dump_to_pp (&pp, simple);
163 return label_text::take (xstrdup (pp_formatted_text (&pp)));
166 /* qsort callback. */
169 binding_key::cmp_ptrs (const void *p1, const void *p2)
171 const binding_key * const *pk1 = (const binding_key * const *)p1;
172 const binding_key * const *pk2 = (const binding_key * const *)p2;
173 return cmp (*pk1, *pk2);
176 /* Comparator for binding_keys. */
179 binding_key::cmp (const binding_key *k1, const binding_key *k2)
181 int concrete1 = k1->concrete_p ();
182 int concrete2 = k2->concrete_p ();
183 if (int concrete_cmp = concrete1 - concrete2)
184 return concrete_cmp;
185 if (concrete1)
187 const concrete_binding *b1 = (const concrete_binding *)k1;
188 const concrete_binding *b2 = (const concrete_binding *)k2;
189 if (int start_cmp = wi::cmp (b1->get_start_bit_offset (),
190 b2->get_start_bit_offset (),
191 SIGNED))
192 return start_cmp;
193 return wi::cmp (b1->get_next_bit_offset (), b2->get_next_bit_offset (),
194 SIGNED);
196 else
198 const symbolic_binding *s1 = (const symbolic_binding *)k1;
199 const symbolic_binding *s2 = (const symbolic_binding *)k2;
200 if (s1 > s2)
201 return 1;
202 if (s1 < s2)
203 return -1;
204 return 0;
208 /* struct bit_range. */
210 void
211 bit_range::dump_to_pp (pretty_printer *pp) const
213 byte_range bytes (0, 0);
214 if (as_byte_range (&bytes))
215 bytes.dump_to_pp (pp);
216 else
218 pp_string (pp, "start: ");
219 pp_wide_int (pp, m_start_bit_offset, SIGNED);
220 pp_string (pp, ", size: ");
221 pp_wide_int (pp, m_size_in_bits, SIGNED);
222 pp_string (pp, ", next: ");
223 pp_wide_int (pp, get_next_bit_offset (), SIGNED);
227 /* Dump this object to stderr. */
229 DEBUG_FUNCTION void
230 bit_range::dump () const
232 pretty_printer pp;
233 pp.buffer->stream = stderr;
234 dump_to_pp (&pp);
235 pp_newline (&pp);
236 pp_flush (&pp);
239 /* If OTHER is a subset of this, return true and write
240 to *OUT the relative range of OTHER within this.
241 Otherwise return false. */
243 bool
244 bit_range::contains_p (const bit_range &other, bit_range *out) const
246 if (contains_p (other.get_start_bit_offset ())
247 && contains_p (other.get_last_bit_offset ()))
249 out->m_start_bit_offset = other.m_start_bit_offset - m_start_bit_offset;
250 out->m_size_in_bits = other.m_size_in_bits;
251 return true;
253 else
254 return false;
257 /* If OTHER intersects this, return true and write
258 the relative range of OTHER within THIS to *OUT_THIS,
259 and the relative range of THIS within OTHER to *OUT_OTHER.
260 Otherwise return false. */
262 bool
263 bit_range::intersects_p (const bit_range &other,
264 bit_range *out_this,
265 bit_range *out_other) const
267 if (get_start_bit_offset () < other.get_next_bit_offset ()
268 && other.get_start_bit_offset () < get_next_bit_offset ())
270 bit_offset_t overlap_start
271 = MAX (get_start_bit_offset (),
272 other.get_start_bit_offset ());
273 bit_offset_t overlap_next
274 = MIN (get_next_bit_offset (),
275 other.get_next_bit_offset ());
276 gcc_assert (overlap_next > overlap_start);
277 bit_range abs_overlap_bits (overlap_start, overlap_next - overlap_start);
278 *out_this = abs_overlap_bits - get_start_bit_offset ();
279 *out_other = abs_overlap_bits - other.get_start_bit_offset ();
280 return true;
282 else
283 return false;
287 bit_range::cmp (const bit_range &br1, const bit_range &br2)
289 if (int start_cmp = wi::cmps (br1.m_start_bit_offset,
290 br2.m_start_bit_offset))
291 return start_cmp;
293 return wi::cmpu (br1.m_size_in_bits, br2.m_size_in_bits);
296 /* Offset this range by OFFSET. */
298 bit_range
299 bit_range::operator- (bit_offset_t offset) const
301 return bit_range (m_start_bit_offset - offset, m_size_in_bits);
304 /* If MASK is a contiguous range of set bits, write them
305 to *OUT and return true.
306 Otherwise return false. */
308 bool
309 bit_range::from_mask (unsigned HOST_WIDE_INT mask, bit_range *out)
311 unsigned iter_bit_idx = 0;
312 unsigned HOST_WIDE_INT iter_bit_mask = 1;
314 /* Find the first contiguous run of set bits in MASK. */
316 /* Find first set bit in MASK. */
317 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
319 if (mask & iter_bit_mask)
320 break;
321 iter_bit_idx++;
322 iter_bit_mask <<= 1;
324 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
325 /* MASK is zero. */
326 return false;
328 unsigned first_set_iter_bit_idx = iter_bit_idx;
329 unsigned num_set_bits = 1;
330 iter_bit_idx++;
331 iter_bit_mask <<= 1;
333 /* Find next unset bit in MASK. */
334 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
336 if (!(mask & iter_bit_mask))
337 break;
338 num_set_bits++;
339 iter_bit_idx++;
340 iter_bit_mask <<= 1;
342 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
344 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
345 return true;
348 /* We now have the first contiguous run of set bits in MASK.
349 Fail if any other bits are set. */
350 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
352 if (mask & iter_bit_mask)
353 return false;
354 iter_bit_idx++;
355 iter_bit_mask <<= 1;
358 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
359 return true;
362 /* Attempt to convert this bit_range to a byte_range.
363 Return true if it is possible, writing the result to *OUT.
364 Otherwise return false. */
366 bool
367 bit_range::as_byte_range (byte_range *out) const
369 if (m_start_bit_offset % BITS_PER_UNIT == 0
370 && m_size_in_bits % BITS_PER_UNIT == 0)
372 out->m_start_byte_offset = m_start_bit_offset / BITS_PER_UNIT;
373 out->m_size_in_bytes = m_size_in_bits / BITS_PER_UNIT;
374 return true;
376 return false;
379 /* Dump this object to PP. */
381 void
382 byte_range::dump_to_pp (pretty_printer *pp) const
384 if (m_size_in_bytes == 0)
386 pp_string (pp, "empty");
388 else if (m_size_in_bytes == 1)
390 pp_string (pp, "byte ");
391 pp_wide_int (pp, m_start_byte_offset, SIGNED);
393 else
395 pp_string (pp, "bytes ");
396 pp_wide_int (pp, m_start_byte_offset, SIGNED);
397 pp_string (pp, "-");
398 pp_wide_int (pp, get_last_byte_offset (), SIGNED);
402 /* Dump this object to stderr. */
404 DEBUG_FUNCTION void
405 byte_range::dump () const
407 pretty_printer pp;
408 pp.buffer->stream = stderr;
409 dump_to_pp (&pp);
410 pp_newline (&pp);
411 pp_flush (&pp);
414 /* If OTHER is a subset of this, return true and write
415 to *OUT the relative range of OTHER within this.
416 Otherwise return false. */
418 bool
419 byte_range::contains_p (const byte_range &other, byte_range *out) const
421 if (contains_p (other.get_start_byte_offset ())
422 && contains_p (other.get_last_byte_offset ()))
424 out->m_start_byte_offset = other.m_start_byte_offset - m_start_byte_offset;
425 out->m_size_in_bytes = other.m_size_in_bytes;
426 return true;
428 else
429 return false;
432 /* Return true if THIS and OTHER intersect and write the number
433 of bytes both buffers overlap to *OUT_NUM_OVERLAP_BYTES.
435 Otherwise return false. */
437 bool
438 byte_range::intersects_p (const byte_range &other,
439 byte_size_t *out_num_overlap_bytes) const
441 if (get_start_byte_offset () < other.get_next_byte_offset ()
442 && other.get_start_byte_offset () < get_next_byte_offset ())
444 byte_offset_t overlap_start = MAX (get_start_byte_offset (),
445 other.get_start_byte_offset ());
446 byte_offset_t overlap_next = MIN (get_next_byte_offset (),
447 other.get_next_byte_offset ());
448 gcc_assert (overlap_next > overlap_start);
449 *out_num_overlap_bytes = overlap_next - overlap_start;
450 return true;
452 else
453 return false;
456 /* Return true if THIS exceeds OTHER and write the overhanging
457 byte range to OUT_OVERHANGING_BYTE_RANGE. */
459 bool
460 byte_range::exceeds_p (const byte_range &other,
461 byte_range *out_overhanging_byte_range) const
463 gcc_assert (!empty_p ());
465 if (other.get_next_byte_offset () < get_next_byte_offset ())
467 /* THIS definitely exceeds OTHER. */
468 byte_offset_t start = MAX (get_start_byte_offset (),
469 other.get_next_byte_offset ());
470 byte_offset_t size = get_next_byte_offset () - start;
471 gcc_assert (size > 0);
472 out_overhanging_byte_range->m_start_byte_offset = start;
473 out_overhanging_byte_range->m_size_in_bytes = size;
474 return true;
476 else
477 return false;
480 /* Return true if THIS falls short of OFFSET and write the
481 byte range fallen short to OUT_FALL_SHORT_BYTES. */
483 bool
484 byte_range::falls_short_of_p (byte_offset_t offset,
485 byte_range *out_fall_short_bytes) const
487 gcc_assert (!empty_p ());
489 if (get_start_byte_offset () < offset)
491 /* THIS falls short of OFFSET. */
492 byte_offset_t start = get_start_byte_offset ();
493 byte_offset_t size = MIN (offset, get_next_byte_offset ()) - start;
494 gcc_assert (size > 0);
495 out_fall_short_bytes->m_start_byte_offset = start;
496 out_fall_short_bytes->m_size_in_bytes = size;
497 return true;
499 else
500 return false;
503 /* qsort comparator for byte ranges. */
506 byte_range::cmp (const byte_range &br1, const byte_range &br2)
508 /* Order first by offset. */
509 if (int start_cmp = wi::cmps (br1.m_start_byte_offset,
510 br2.m_start_byte_offset))
511 return start_cmp;
513 /* ...then by size. */
514 return wi::cmpu (br1.m_size_in_bytes, br2.m_size_in_bytes);
517 /* class concrete_binding : public binding_key. */
519 /* Implementation of binding_key::dump_to_pp vfunc for concrete_binding. */
521 void
522 concrete_binding::dump_to_pp (pretty_printer *pp, bool) const
524 m_bit_range.dump_to_pp (pp);
527 /* Return true if this binding overlaps with OTHER. */
529 bool
530 concrete_binding::overlaps_p (const concrete_binding &other) const
532 if (get_start_bit_offset () < other.get_next_bit_offset ()
533 && get_next_bit_offset () > other.get_start_bit_offset ())
534 return true;
535 return false;
538 /* Comparator for use by vec<const concrete_binding *>::qsort. */
541 concrete_binding::cmp_ptr_ptr (const void *p1, const void *p2)
543 const concrete_binding *b1 = *(const concrete_binding * const *)p1;
544 const concrete_binding *b2 = *(const concrete_binding * const *)p2;
546 return bit_range::cmp (b1->m_bit_range, b2->m_bit_range);
549 /* class symbolic_binding : public binding_key. */
551 void
552 symbolic_binding::dump_to_pp (pretty_printer *pp, bool simple) const
554 //binding_key::dump_to_pp (pp, simple);
555 pp_string (pp, "region: ");
556 m_region->dump_to_pp (pp, simple);
559 /* Comparator for use by vec<const symbolic_binding *>::qsort. */
562 symbolic_binding::cmp_ptr_ptr (const void *p1, const void *p2)
564 const symbolic_binding *b1 = *(const symbolic_binding * const *)p1;
565 const symbolic_binding *b2 = *(const symbolic_binding * const *)p2;
567 return region::cmp_ids (b1->get_region (), b2->get_region ());
570 /* The store is oblivious to the types of the svalues bound within
571 it: any type can get bound at any location.
572 Simplify any casts before binding.
574 For example, if we have:
575 struct big { int ia[1024]; };
576 struct big src, dst;
577 memcpy (&dst, &src, sizeof (struct big));
578 this reaches us in gimple form as:
579 MEM <unsigned char[4096]> [(char * {ref-all})&dst]
580 = MEM <unsigned char[4096]> [(char * {ref-all})&src];
581 Using cast_region when handling the MEM_REF would give us:
582 INIT_VAL(CAST_REG(unsigned char[4096], src))
583 as rhs_sval, but we can fold that into a cast svalue:
584 CAST(unsigned char[4096], INIT_VAL(src))
585 We can discard that cast from the svalue when binding it in
586 the store for "dst", and simply store:
587 cluster for: dst
588 key: {kind: direct, start: 0, size: 32768, next: 32768}
589 value: ‘struct big’ {INIT_VAL(src)}. */
591 static const svalue *
592 simplify_for_binding (const svalue *sval)
594 if (const svalue *cast_sval = sval->maybe_undo_cast ())
595 sval = cast_sval;
596 return sval;
599 /* class binding_map. */
601 /* binding_map's copy ctor. */
603 binding_map::binding_map (const binding_map &other)
604 : m_map (other.m_map)
608 /* binding_map's assignment operator. */
610 binding_map&
611 binding_map::operator=(const binding_map &other)
613 /* For now, assume we only ever copy to an empty cluster. */
614 gcc_assert (m_map.elements () == 0);
615 for (map_t::iterator iter = other.m_map.begin (); iter != other.m_map.end ();
616 ++iter)
618 const binding_key *key = (*iter).first;
619 const svalue *sval = (*iter).second;
620 m_map.put (key, sval);
622 return *this;
625 /* binding_map's equality operator. */
627 bool
628 binding_map::operator== (const binding_map &other) const
630 if (m_map.elements () != other.m_map.elements ())
631 return false;
633 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
635 const binding_key *key = (*iter).first;
636 const svalue *sval = (*iter).second;
637 const svalue **other_slot
638 = const_cast <map_t &> (other.m_map).get (key);
639 if (other_slot == NULL)
640 return false;
641 if (sval != *other_slot)
642 return false;
644 gcc_checking_assert (hash () == other.hash ());
645 return true;
648 /* Generate a hash value for this binding_map. */
650 hashval_t
651 binding_map::hash () const
653 hashval_t result = 0;
654 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
656 /* Use a new hasher for each key to avoid depending on the ordering
657 of keys when accumulating the result. */
658 inchash::hash hstate;
659 hstate.add_ptr ((*iter).first);
660 hstate.add_ptr ((*iter).second);
661 result ^= hstate.end ();
663 return result;
666 /* Dump a representation of this binding_map to PP.
667 SIMPLE controls how values and regions are to be printed.
668 If MULTILINE, then split the dump over multiple lines and
669 use whitespace for readability, otherwise put all on one line. */
671 void
672 binding_map::dump_to_pp (pretty_printer *pp, bool simple,
673 bool multiline) const
675 auto_vec <const binding_key *> binding_keys;
676 for (map_t::iterator iter = m_map.begin ();
677 iter != m_map.end (); ++iter)
679 const binding_key *key = (*iter).first;
680 binding_keys.safe_push (key);
682 binding_keys.qsort (binding_key::cmp_ptrs);
684 const binding_key *key;
685 unsigned i;
686 FOR_EACH_VEC_ELT (binding_keys, i, key)
688 const svalue *value = *const_cast <map_t &> (m_map).get (key);
689 if (multiline)
691 pp_string (pp, " key: {");
692 key->dump_to_pp (pp, simple);
693 pp_string (pp, "}");
694 pp_newline (pp);
695 pp_string (pp, " value: ");
696 if (tree t = value->get_type ())
697 dump_quoted_tree (pp, t);
698 pp_string (pp, " {");
699 value->dump_to_pp (pp, simple);
700 pp_string (pp, "}");
701 pp_newline (pp);
703 else
705 if (i > 0)
706 pp_string (pp, ", ");
707 pp_string (pp, "binding key: {");
708 key->dump_to_pp (pp, simple);
709 pp_string (pp, "}, value: {");
710 value->dump_to_pp (pp, simple);
711 pp_string (pp, "}");
716 /* Dump a multiline representation of this binding_map to stderr. */
718 DEBUG_FUNCTION void
719 binding_map::dump (bool simple) const
721 pretty_printer pp;
722 pp_format_decoder (&pp) = default_tree_printer;
723 pp_show_color (&pp) = pp_show_color (global_dc->printer);
724 pp.buffer->stream = stderr;
725 dump_to_pp (&pp, simple, true);
726 pp_newline (&pp);
727 pp_flush (&pp);
730 /* Return a new json::object of the form
731 {KEY_DESC : SVALUE_DESC,
732 ...for the various key/value pairs in this binding_map}. */
734 json::object *
735 binding_map::to_json () const
737 json::object *map_obj = new json::object ();
739 auto_vec <const binding_key *> binding_keys;
740 for (map_t::iterator iter = m_map.begin ();
741 iter != m_map.end (); ++iter)
743 const binding_key *key = (*iter).first;
744 binding_keys.safe_push (key);
746 binding_keys.qsort (binding_key::cmp_ptrs);
748 const binding_key *key;
749 unsigned i;
750 FOR_EACH_VEC_ELT (binding_keys, i, key)
752 const svalue *value = *const_cast <map_t &> (m_map).get (key);
753 label_text key_desc = key->get_desc ();
754 map_obj->set (key_desc.get (), value->to_json ());
757 return map_obj;
760 /* Comparator for imposing an order on binding_maps. */
763 binding_map::cmp (const binding_map &map1, const binding_map &map2)
765 if (int count_cmp = map1.elements () - map2.elements ())
766 return count_cmp;
768 auto_vec <const binding_key *> keys1 (map1.elements ());
769 for (map_t::iterator iter = map1.begin ();
770 iter != map1.end (); ++iter)
771 keys1.quick_push ((*iter).first);
772 keys1.qsort (binding_key::cmp_ptrs);
774 auto_vec <const binding_key *> keys2 (map2.elements ());
775 for (map_t::iterator iter = map2.begin ();
776 iter != map2.end (); ++iter)
777 keys2.quick_push ((*iter).first);
778 keys2.qsort (binding_key::cmp_ptrs);
780 for (size_t i = 0; i < keys1.length (); i++)
782 const binding_key *k1 = keys1[i];
783 const binding_key *k2 = keys2[i];
784 if (int key_cmp = binding_key::cmp (k1, k2))
785 return key_cmp;
786 gcc_assert (k1 == k2);
787 if (int sval_cmp = svalue::cmp_ptr (map1.get (k1), map2.get (k2)))
788 return sval_cmp;
791 return 0;
794 /* Get the child region of PARENT_REG based upon INDEX within a
795 CONSTRUCTOR. */
797 static const region *
798 get_subregion_within_ctor (const region *parent_reg, tree index,
799 region_model_manager *mgr)
801 switch (TREE_CODE (index))
803 default:
804 gcc_unreachable ();
805 case INTEGER_CST:
807 const svalue *index_sval
808 = mgr->get_or_create_constant_svalue (index);
809 return mgr->get_element_region (parent_reg,
810 TREE_TYPE (parent_reg->get_type ()),
811 index_sval);
813 break;
814 case FIELD_DECL:
815 return mgr->get_field_region (parent_reg, index);
819 /* Get the svalue for VAL, a non-CONSTRUCTOR value within a CONSTRUCTOR. */
821 static const svalue *
822 get_svalue_for_ctor_val (tree val, region_model_manager *mgr)
824 /* Reuse the get_rvalue logic from region_model. */
825 region_model m (mgr);
826 return m.get_rvalue (path_var (val, 0), NULL);
829 /* Bind values from CONSTRUCTOR to this map, relative to
830 PARENT_REG's relationship to its base region.
831 Return true if successful, false if there was a problem (e.g. due
832 to hitting a complexity limit). */
834 bool
835 binding_map::apply_ctor_to_region (const region *parent_reg, tree ctor,
836 region_model_manager *mgr)
838 gcc_assert (parent_reg);
839 gcc_assert (TREE_CODE (ctor) == CONSTRUCTOR);
841 unsigned ix;
842 tree index;
843 tree val;
844 tree parent_type = parent_reg->get_type ();
845 tree field;
846 if (TREE_CODE (parent_type) == RECORD_TYPE)
847 field = TYPE_FIELDS (parent_type);
848 else
849 field = NULL_TREE;
850 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), ix, index, val)
852 if (!index)
854 /* If index is NULL, then iterate through the fields for
855 a RECORD_TYPE, or use an INTEGER_CST otherwise.
856 Compare with similar logic in output_constructor. */
857 if (field)
859 index = field;
860 field = DECL_CHAIN (field);
862 else
863 index = build_int_cst (integer_type_node, ix);
865 else if (TREE_CODE (index) == RANGE_EXPR)
867 tree min_index = TREE_OPERAND (index, 0);
868 tree max_index = TREE_OPERAND (index, 1);
869 if (min_index == max_index)
871 if (!apply_ctor_pair_to_child_region (parent_reg, mgr,
872 min_index, val))
873 return false;
875 else
877 if (!apply_ctor_val_to_range (parent_reg, mgr,
878 min_index, max_index, val))
879 return false;
881 continue;
883 if (!apply_ctor_pair_to_child_region (parent_reg, mgr, index, val))
884 return false;
886 return true;
889 /* Bind the value VAL into the range of elements within PARENT_REF
890 from MIN_INDEX to MAX_INDEX (including endpoints).
891 For use in handling RANGE_EXPR within a CONSTRUCTOR.
892 Return true if successful, false if there was a problem (e.g. due
893 to hitting a complexity limit). */
895 bool
896 binding_map::apply_ctor_val_to_range (const region *parent_reg,
897 region_model_manager *mgr,
898 tree min_index, tree max_index,
899 tree val)
901 gcc_assert (TREE_CODE (min_index) == INTEGER_CST);
902 gcc_assert (TREE_CODE (max_index) == INTEGER_CST);
904 /* Generate a binding key for the range. */
905 const region *min_element
906 = get_subregion_within_ctor (parent_reg, min_index, mgr);
907 const region *max_element
908 = get_subregion_within_ctor (parent_reg, max_index, mgr);
909 region_offset min_offset = min_element->get_offset (mgr);
910 if (min_offset.symbolic_p ())
911 return false;
912 bit_offset_t start_bit_offset = min_offset.get_bit_offset ();
913 store_manager *smgr = mgr->get_store_manager ();
914 if (max_element->empty_p ())
915 return false;
916 const binding_key *max_element_key = binding_key::make (smgr, max_element);
917 if (max_element_key->symbolic_p ())
918 return false;
919 const concrete_binding *max_element_ckey
920 = max_element_key->dyn_cast_concrete_binding ();
921 bit_size_t range_size_in_bits
922 = max_element_ckey->get_next_bit_offset () - start_bit_offset;
923 const concrete_binding *range_key
924 = smgr->get_concrete_binding (start_bit_offset, range_size_in_bits);
925 if (range_key->symbolic_p ())
926 return false;
928 /* Get the value. */
929 if (TREE_CODE (val) == CONSTRUCTOR)
930 return false;
931 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
933 /* Bind the value to the range. */
934 put (range_key, sval);
935 return true;
938 /* Bind the value VAL into INDEX within PARENT_REF.
939 For use in handling a pair of entries within a CONSTRUCTOR.
940 Return true if successful, false if there was a problem (e.g. due
941 to hitting a complexity limit). */
943 bool
944 binding_map::apply_ctor_pair_to_child_region (const region *parent_reg,
945 region_model_manager *mgr,
946 tree index, tree val)
948 const region *child_reg
949 = get_subregion_within_ctor (parent_reg, index, mgr);
950 if (TREE_CODE (val) == CONSTRUCTOR)
951 return apply_ctor_to_region (child_reg, val, mgr);
952 else
954 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
955 if (child_reg->empty_p ())
956 return false;
957 const binding_key *k
958 = binding_key::make (mgr->get_store_manager (), child_reg);
959 /* Handle the case where we have an unknown size for child_reg
960 (e.g. due to it being a trailing field with incomplete array
961 type. */
962 if (!k->concrete_p ())
964 /* Assume that sval has a well-defined size for this case. */
965 tree sval_type = sval->get_type ();
966 gcc_assert (sval_type);
967 HOST_WIDE_INT sval_byte_size = int_size_in_bytes (sval_type);
968 gcc_assert (sval_byte_size != -1);
969 bit_size_t sval_bit_size = sval_byte_size * BITS_PER_UNIT;
970 /* Get offset of child relative to base region. */
971 region_offset child_base_offset = child_reg->get_offset (mgr);
972 if (child_base_offset.symbolic_p ())
973 return false;
974 /* Convert to an offset relative to the parent region. */
975 region_offset parent_base_offset = parent_reg->get_offset (mgr);
976 gcc_assert (!parent_base_offset.symbolic_p ());
977 bit_offset_t child_parent_offset
978 = (child_base_offset.get_bit_offset ()
979 - parent_base_offset.get_bit_offset ());
980 /* Create a concrete key for the child within the parent. */
981 k = mgr->get_store_manager ()->get_concrete_binding
982 (child_parent_offset, sval_bit_size);
984 gcc_assert (k->concrete_p ());
985 put (k, sval);
986 return true;
990 /* Populate OUT with all bindings within this map that overlap KEY. */
992 void
993 binding_map::get_overlapping_bindings (const binding_key *key,
994 auto_vec<const binding_key *> *out)
996 for (auto iter : *this)
998 const binding_key *iter_key = iter.first;
999 if (const concrete_binding *ckey
1000 = key->dyn_cast_concrete_binding ())
1002 if (const concrete_binding *iter_ckey
1003 = iter_key->dyn_cast_concrete_binding ())
1005 if (ckey->overlaps_p (*iter_ckey))
1006 out->safe_push (iter_key);
1008 else
1010 /* Assume overlap. */
1011 out->safe_push (iter_key);
1014 else
1016 /* Assume overlap. */
1017 out->safe_push (iter_key);
1022 /* Remove, truncate, and/or split any bindings within this map that
1023 overlap DROP_KEY.
1025 For example, if we have:
1027 +------------------------------------+
1028 | old binding |
1029 +------------------------------------+
1031 which is to be overwritten with:
1033 .......+----------------------+.......
1034 .......| new binding |.......
1035 .......+----------------------+.......
1037 this function "cuts a hole" out of the old binding:
1039 +------+......................+------+
1040 |prefix| hole for new binding |suffix|
1041 +------+......................+------+
1043 into which the new binding can be added without
1044 overlapping the prefix or suffix.
1046 The prefix and suffix (if added) will be bound to the pertinent
1047 parts of the value of the old binding.
1049 For example, given:
1050 struct s5
1052 char arr[8];
1054 void test_5 (struct s5 *p)
1056 struct s5 f = *p;
1057 f.arr[3] = 42;
1059 then after the "f = *p;" we have:
1060 cluster for: f: INIT_VAL((*INIT_VAL(p_33(D))))
1061 and at the "f.arr[3] = 42;" we remove the bindings overlapping
1062 "f.arr[3]", replacing it with a prefix (bytes 0-2) and suffix (bytes 4-7)
1063 giving:
1064 cluster for: f
1065 key: {bytes 0-2}
1066 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1067 key: {bytes 4-7}
1068 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1069 punching a hole into which the new value can be written at byte 3:
1070 cluster for: f
1071 key: {bytes 0-2}
1072 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1073 key: {byte 3}
1074 value: 'char' {(char)42}
1075 key: {bytes 4-7}
1076 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1078 If UNCERTAINTY is non-NULL, use it to record any svalues that
1079 were removed, as being maybe-bound.
1081 If ALWAYS_OVERLAP, then assume that DROP_KEY can overlap anything
1082 in the map, due to one or both of the underlying clusters being
1083 symbolic (but not the same symbolic region). Hence even if DROP_KEY is a
1084 concrete binding it could actually be referring to the same memory as
1085 distinct concrete bindings in the map. Remove all bindings, but
1086 register any svalues with *UNCERTAINTY. */
1088 void
1089 binding_map::remove_overlapping_bindings (store_manager *mgr,
1090 const binding_key *drop_key,
1091 uncertainty_t *uncertainty,
1092 bool always_overlap)
1094 /* Get the bindings of interest within this map. */
1095 auto_vec<const binding_key *> bindings;
1096 if (always_overlap)
1097 for (auto iter : *this)
1098 bindings.safe_push (iter.first); /* Add all bindings. */
1099 else
1100 /* Just add overlapping bindings. */
1101 get_overlapping_bindings (drop_key, &bindings);
1103 unsigned i;
1104 const binding_key *iter_binding;
1105 FOR_EACH_VEC_ELT (bindings, i, iter_binding)
1107 /* Record any svalues that were removed to *UNCERTAINTY as being
1108 maybe-bound, provided at least some part of the binding is symbolic.
1110 Specifically, if at least one of the bindings is symbolic, or we
1111 have ALWAYS_OVERLAP for the case where we have possibly aliasing
1112 regions, then we don't know that the svalue has been overwritten,
1113 and should record that to *UNCERTAINTY.
1115 However, if we have concrete keys accessing within the same symbolic
1116 region, then we *know* that the symbolic region has been overwritten,
1117 so we don't record it to *UNCERTAINTY, as this could be a genuine
1118 leak. */
1119 const svalue *old_sval = get (iter_binding);
1120 if (uncertainty
1121 && (drop_key->symbolic_p ()
1122 || iter_binding->symbolic_p ()
1123 || always_overlap))
1124 uncertainty->on_maybe_bound_sval (old_sval);
1126 /* Begin by removing the old binding. */
1127 m_map.remove (iter_binding);
1129 /* Don't attempt to handle prefixes/suffixes for the
1130 "always_overlap" case; everything's being removed. */
1131 if (always_overlap)
1132 continue;
1134 /* Now potentially add the prefix and suffix. */
1135 if (const concrete_binding *drop_ckey
1136 = drop_key->dyn_cast_concrete_binding ())
1137 if (const concrete_binding *iter_ckey
1138 = iter_binding->dyn_cast_concrete_binding ())
1140 gcc_assert (drop_ckey->overlaps_p (*iter_ckey));
1142 const bit_range &drop_bits = drop_ckey->get_bit_range ();
1143 const bit_range &iter_bits = iter_ckey->get_bit_range ();
1145 if (iter_bits.get_start_bit_offset ()
1146 < drop_bits.get_start_bit_offset ())
1148 /* We have a truncated prefix. */
1149 bit_range prefix_bits (iter_bits.get_start_bit_offset (),
1150 (drop_bits.get_start_bit_offset ()
1151 - iter_bits.get_start_bit_offset ()));
1152 const concrete_binding *prefix_key
1153 = mgr->get_concrete_binding (prefix_bits);
1154 bit_range rel_prefix (0, prefix_bits.m_size_in_bits);
1155 const svalue *prefix_sval
1156 = old_sval->extract_bit_range (NULL_TREE,
1157 rel_prefix,
1158 mgr->get_svalue_manager ());
1159 m_map.put (prefix_key, prefix_sval);
1162 if (iter_bits.get_next_bit_offset ()
1163 > drop_bits.get_next_bit_offset ())
1165 /* We have a truncated suffix. */
1166 bit_range suffix_bits (drop_bits.get_next_bit_offset (),
1167 (iter_bits.get_next_bit_offset ()
1168 - drop_bits.get_next_bit_offset ()));
1169 const concrete_binding *suffix_key
1170 = mgr->get_concrete_binding (suffix_bits);
1171 bit_range rel_suffix (drop_bits.get_next_bit_offset ()
1172 - iter_bits.get_start_bit_offset (),
1173 suffix_bits.m_size_in_bits);
1174 const svalue *suffix_sval
1175 = old_sval->extract_bit_range (NULL_TREE,
1176 rel_suffix,
1177 mgr->get_svalue_manager ());
1178 m_map.put (suffix_key, suffix_sval);
1184 /* class binding_cluster. */
1186 binding_cluster::binding_cluster (const region *base_region)
1187 : m_base_region (base_region), m_map (),
1188 m_escaped (false), m_touched (false)
1192 /* binding_cluster's copy ctor. */
1194 binding_cluster::binding_cluster (const binding_cluster &other)
1195 : m_base_region (other.m_base_region), m_map (other.m_map),
1196 m_escaped (other.m_escaped), m_touched (other.m_touched)
1200 /* binding_cluster's assignment operator. */
1202 binding_cluster&
1203 binding_cluster::operator= (const binding_cluster &other)
1205 gcc_assert (m_base_region == other.m_base_region);
1206 m_map = other.m_map;
1207 m_escaped = other.m_escaped;
1208 m_touched = other.m_touched;
1209 return *this;
1212 /* binding_cluster's equality operator. */
1214 bool
1215 binding_cluster::operator== (const binding_cluster &other) const
1217 if (m_map != other.m_map)
1218 return false;
1220 if (m_base_region != other.m_base_region)
1221 return false;
1223 if (m_escaped != other.m_escaped)
1224 return false;
1226 if (m_touched != other.m_touched)
1227 return false;
1229 gcc_checking_assert (hash () == other.hash ());
1231 return true;
1234 /* Generate a hash value for this binding_cluster. */
1236 hashval_t
1237 binding_cluster::hash () const
1239 return m_map.hash ();
1242 /* Return true if this binding_cluster is symbolic
1243 i.e. its base region is symbolic. */
1245 bool
1246 binding_cluster::symbolic_p () const
1248 return m_base_region->get_kind () == RK_SYMBOLIC;
1251 /* Dump a representation of this binding_cluster to PP.
1252 SIMPLE controls how values and regions are to be printed.
1253 If MULTILINE, then split the dump over multiple lines and
1254 use whitespace for readability, otherwise put all on one line. */
1256 void
1257 binding_cluster::dump_to_pp (pretty_printer *pp, bool simple,
1258 bool multiline) const
1260 if (m_escaped)
1262 if (multiline)
1264 pp_string (pp, " ESCAPED");
1265 pp_newline (pp);
1267 else
1268 pp_string (pp, "(ESCAPED)");
1270 if (m_touched)
1272 if (multiline)
1274 pp_string (pp, " TOUCHED");
1275 pp_newline (pp);
1277 else
1278 pp_string (pp, "(TOUCHED)");
1281 m_map.dump_to_pp (pp, simple, multiline);
1284 /* Dump a multiline representation of this binding_cluster to stderr. */
1286 DEBUG_FUNCTION void
1287 binding_cluster::dump (bool simple) const
1289 pretty_printer pp;
1290 pp_format_decoder (&pp) = default_tree_printer;
1291 pp_show_color (&pp) = pp_show_color (global_dc->printer);
1292 pp.buffer->stream = stderr;
1293 pp_string (&pp, " cluster for: ");
1294 m_base_region->dump_to_pp (&pp, simple);
1295 pp_string (&pp, ": ");
1296 pp_newline (&pp);
1297 dump_to_pp (&pp, simple, true);
1298 pp_newline (&pp);
1299 pp_flush (&pp);
1302 /* Assert that this object is valid. */
1304 void
1305 binding_cluster::validate () const
1307 int num_symbolic = 0;
1308 int num_concrete = 0;
1309 for (auto iter : m_map)
1311 if (iter.first->symbolic_p ())
1312 num_symbolic++;
1313 else
1314 num_concrete++;
1316 /* We shouldn't have more than one symbolic key per cluster
1317 (or one would have clobbered the other). */
1318 gcc_assert (num_symbolic < 2);
1319 /* We can't have both concrete and symbolic keys. */
1320 gcc_assert (num_concrete == 0 || num_symbolic == 0);
1323 /* Return a new json::object of the form
1324 {"escaped": true/false,
1325 "touched": true/false,
1326 "map" : object for the binding_map. */
1328 json::object *
1329 binding_cluster::to_json () const
1331 json::object *cluster_obj = new json::object ();
1333 cluster_obj->set ("escaped", new json::literal (m_escaped));
1334 cluster_obj->set ("touched", new json::literal (m_touched));
1335 cluster_obj->set ("map", m_map.to_json ());
1337 return cluster_obj;
1340 /* Add a binding of SVAL of kind KIND to REG, unpacking SVAL if it is a
1341 compound_sval. */
1343 void
1344 binding_cluster::bind (store_manager *mgr,
1345 const region *reg, const svalue *sval)
1347 if (const compound_svalue *compound_sval
1348 = sval->dyn_cast_compound_svalue ())
1350 bind_compound_sval (mgr, reg, compound_sval);
1351 return;
1354 if (reg->empty_p ())
1355 return;
1356 const binding_key *binding = binding_key::make (mgr, reg);
1357 bind_key (binding, sval);
1360 /* Bind SVAL to KEY.
1361 Unpacking of compound_svalues should already have been done by the
1362 time this is called. */
1364 void
1365 binding_cluster::bind_key (const binding_key *key, const svalue *sval)
1367 gcc_assert (sval->get_kind () != SK_COMPOUND);
1369 m_map.put (key, sval);
1370 if (key->symbolic_p ())
1371 m_touched = true;
1374 /* Subroutine of binding_cluster::bind.
1375 Unpack compound_svals when binding them, so that we bind them
1376 element-wise. */
1378 void
1379 binding_cluster::bind_compound_sval (store_manager *mgr,
1380 const region *reg,
1381 const compound_svalue *compound_sval)
1383 region_offset reg_offset
1384 = reg->get_offset (mgr->get_svalue_manager ());
1385 if (reg_offset.symbolic_p ())
1387 m_touched = true;
1388 clobber_region (mgr, reg);
1389 return;
1392 for (map_t::iterator iter = compound_sval->begin ();
1393 iter != compound_sval->end (); ++iter)
1395 const binding_key *iter_key = (*iter).first;
1396 const svalue *iter_sval = (*iter).second;
1398 if (const concrete_binding *concrete_key
1399 = iter_key->dyn_cast_concrete_binding ())
1401 bit_offset_t effective_start
1402 = (concrete_key->get_start_bit_offset ()
1403 + reg_offset.get_bit_offset ());
1404 const concrete_binding *effective_concrete_key
1405 = mgr->get_concrete_binding (effective_start,
1406 concrete_key->get_size_in_bits ());
1407 bind_key (effective_concrete_key, iter_sval);
1409 else
1410 gcc_unreachable ();
1414 /* Remove all bindings overlapping REG within this cluster. */
1416 void
1417 binding_cluster::clobber_region (store_manager *mgr, const region *reg)
1419 remove_overlapping_bindings (mgr, reg, NULL);
1422 /* Remove any bindings for REG within this cluster. */
1424 void
1425 binding_cluster::purge_region (store_manager *mgr, const region *reg)
1427 gcc_assert (reg->get_kind () == RK_DECL);
1428 if (reg->empty_p ())
1429 return;
1430 const binding_key *binding
1431 = binding_key::make (mgr, const_cast<region *> (reg));
1432 m_map.remove (binding);
1435 /* Clobber REG and fill it with repeated copies of SVAL. */
1437 void
1438 binding_cluster::fill_region (store_manager *mgr,
1439 const region *reg,
1440 const svalue *sval)
1442 clobber_region (mgr, reg);
1444 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1445 const svalue *byte_size_sval = reg->get_byte_size_sval (sval_mgr);
1446 const svalue *fill_sval
1447 = sval_mgr->get_or_create_repeated_svalue (reg->get_type (),
1448 byte_size_sval, sval);
1449 bind (mgr, reg, fill_sval);
1452 /* Clobber REG within this cluster and fill it with zeroes. */
1454 void
1455 binding_cluster::zero_fill_region (store_manager *mgr, const region *reg)
1457 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1458 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
1459 fill_region (mgr, reg, zero_sval);
1462 /* Mark REG_TO_BIND within this cluster as being unknown.
1464 Remove any bindings overlapping REG_FOR_OVERLAP.
1465 If UNCERTAINTY is non-NULL, use it to record any svalues that
1466 had bindings to them removed, as being maybe-bound.
1468 REG_TO_BIND and REG_FOR_OVERLAP are the same for
1469 store::mark_region_as_unknown, but are different in
1470 store::set_value's alias handling, for handling the case where
1471 we have a write to a symbolic REG_FOR_OVERLAP. */
1473 void
1474 binding_cluster::mark_region_as_unknown (store_manager *mgr,
1475 const region *reg_to_bind,
1476 const region *reg_for_overlap,
1477 uncertainty_t *uncertainty)
1479 if (reg_to_bind->empty_p ())
1480 return;
1482 remove_overlapping_bindings (mgr, reg_for_overlap, uncertainty);
1484 /* Add a default binding to "unknown". */
1485 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1486 const svalue *sval
1487 = sval_mgr->get_or_create_unknown_svalue (reg_to_bind->get_type ());
1488 bind (mgr, reg_to_bind, sval);
1491 /* Purge state involving SVAL. */
1493 void
1494 binding_cluster::purge_state_involving (const svalue *sval,
1495 region_model_manager *sval_mgr)
1497 auto_vec<const binding_key *> to_remove;
1498 auto_vec<std::pair<const binding_key *, tree> > to_make_unknown;
1499 for (auto iter : m_map)
1501 const binding_key *iter_key = iter.first;
1502 if (const symbolic_binding *symbolic_key
1503 = iter_key->dyn_cast_symbolic_binding ())
1505 const region *reg = symbolic_key->get_region ();
1506 if (reg->involves_p (sval))
1507 to_remove.safe_push (iter_key);
1509 const svalue *iter_sval = iter.second;
1510 if (iter_sval->involves_p (sval))
1511 to_make_unknown.safe_push (std::make_pair(iter_key,
1512 iter_sval->get_type ()));
1514 for (auto iter : to_remove)
1516 m_map.remove (iter);
1517 m_touched = true;
1519 for (auto iter : to_make_unknown)
1521 const svalue *new_sval
1522 = sval_mgr->get_or_create_unknown_svalue (iter.second);
1523 m_map.put (iter.first, new_sval);
1527 /* Get any SVAL bound to REG within this cluster via kind KIND,
1528 without checking parent regions of REG. */
1530 const svalue *
1531 binding_cluster::get_binding (store_manager *mgr,
1532 const region *reg) const
1534 if (reg->empty_p ())
1535 return NULL;
1536 const binding_key *reg_binding = binding_key::make (mgr, reg);
1537 const svalue *sval = m_map.get (reg_binding);
1538 if (sval)
1540 /* If we have a struct with a single field, then the binding of
1541 the field will equal that of the struct, and looking up e.g.
1542 PARENT_REG.field within:
1543 cluster for PARENT_REG: INIT_VAL(OTHER_REG)
1544 will erroneously return INIT_VAL(OTHER_REG), rather than
1545 SUB_VALUE(INIT_VAL(OTHER_REG), FIELD) == INIT_VAL(OTHER_REG.FIELD).
1546 Fix this issue by iterating upwards whilst the bindings are equal,
1547 expressing the lookups as subvalues.
1548 We have to gather a list of subregion accesses, then walk it
1549 in reverse to get the subvalues. */
1550 auto_vec<const region *> regions;
1551 while (const region *parent_reg = reg->get_parent_region ())
1553 const binding_key *parent_reg_binding
1554 = binding_key::make (mgr, parent_reg);
1555 if (parent_reg_binding == reg_binding
1556 && sval->get_type ()
1557 && reg->get_type ()
1558 && sval->get_type () != reg->get_type ())
1560 regions.safe_push (reg);
1561 reg = parent_reg;
1563 else
1564 break;
1566 if (sval->get_type ()
1567 && reg->get_type ()
1568 && sval->get_type () == reg->get_type ())
1570 unsigned i;
1571 const region *iter_reg;
1572 FOR_EACH_VEC_ELT_REVERSE (regions, i, iter_reg)
1574 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1575 sval = rmm_mgr->get_or_create_sub_svalue (iter_reg->get_type (),
1576 sval, iter_reg);
1580 return sval;
1583 /* Get any SVAL bound to REG within this cluster,
1584 either directly for REG, or recursively checking for bindings within
1585 parent regions and extracting subvalues if need be. */
1587 const svalue *
1588 binding_cluster::get_binding_recursive (store_manager *mgr,
1589 const region *reg) const
1591 if (const svalue *sval = get_binding (mgr, reg))
1592 return sval;
1593 if (reg != m_base_region)
1594 if (const region *parent_reg = reg->get_parent_region ())
1595 if (const svalue *parent_sval
1596 = get_binding_recursive (mgr, parent_reg))
1598 /* Extract child svalue from parent svalue. */
1599 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1600 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1601 parent_sval, reg);
1603 return NULL;
1606 /* Get any value bound for REG within this cluster. */
1608 const svalue *
1609 binding_cluster::get_any_binding (store_manager *mgr,
1610 const region *reg) const
1612 /* Look for a direct binding. */
1613 if (const svalue *direct_sval
1614 = get_binding_recursive (mgr, reg))
1615 return direct_sval;
1617 /* If we had a write to a cluster of unknown size, we might
1618 have a self-binding of the whole base region with an svalue,
1619 where the base region is symbolic.
1620 Handle such cases by returning sub_svalue instances. */
1621 if (const svalue *cluster_sval = maybe_get_simple_value (mgr))
1623 /* Extract child svalue from parent svalue. */
1624 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1625 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1626 cluster_sval, reg);
1629 /* If this cluster has been touched by a symbolic write, then the content
1630 of any subregion not currently specifically bound is "UNKNOWN". */
1631 if (m_touched)
1633 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1634 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1637 /* Alternatively, if this is a symbolic read and the cluster has any bindings,
1638 then we don't know if we're reading those values or not, so the result
1639 is also "UNKNOWN". */
1640 if (reg->get_offset (mgr->get_svalue_manager ()).symbolic_p ()
1641 && m_map.elements () > 0)
1643 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1644 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1647 if (const svalue *compound_sval = maybe_get_compound_binding (mgr, reg))
1648 return compound_sval;
1650 /* Otherwise, the initial value, or uninitialized. */
1651 return NULL;
1654 /* Attempt to get a compound_svalue for the bindings within the cluster
1655 affecting REG (which could be the base region itself).
1657 Create a compound_svalue with the subset of bindings the affect REG,
1658 offsetting them so that the offsets are relative to the start of REG
1659 within the cluster.
1661 For example, REG could be one element within an array of structs.
1663 Return the resulting compound_svalue, or NULL if there's a problem. */
1665 const svalue *
1666 binding_cluster::maybe_get_compound_binding (store_manager *mgr,
1667 const region *reg) const
1669 region_offset cluster_offset
1670 = m_base_region->get_offset (mgr->get_svalue_manager ());
1671 if (cluster_offset.symbolic_p ())
1672 return NULL;
1673 region_offset reg_offset = reg->get_offset (mgr->get_svalue_manager ());
1674 if (reg_offset.symbolic_p ())
1675 return NULL;
1677 if (reg->empty_p ())
1678 return NULL;
1680 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1682 /* We will a build the result map in two parts:
1683 (a) result_map, holding the concrete keys from this cluster,
1685 (b) default_map, holding the initial values for the region
1686 (e.g. uninitialized, initializer values, or zero), unless this
1687 cluster has been touched.
1689 We will populate (a), and as we do, clobber (b), trimming and
1690 splitting its bindings as necessary.
1691 Finally, we will merge (b) into (a), giving a concrete map
1692 that merges both the initial values and the bound values from
1693 the binding_cluster.
1694 Doing it this way reduces N for the O(N^2) intersection-finding,
1695 perhaps we should have a spatial-organized data structure for
1696 concrete keys, though. */
1698 binding_map result_map;
1699 binding_map default_map;
1701 /* Set up default values in default_map. */
1702 const svalue *default_sval;
1703 if (m_touched)
1704 default_sval = sval_mgr->get_or_create_unknown_svalue (reg->get_type ());
1705 else
1706 default_sval = sval_mgr->get_or_create_initial_value (reg);
1707 const binding_key *default_key = binding_key::make (mgr, reg);
1708 default_map.put (default_key, default_sval);
1710 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
1712 const binding_key *key = (*iter).first;
1713 const svalue *sval = (*iter).second;
1715 if (const concrete_binding *concrete_key
1716 = key->dyn_cast_concrete_binding ())
1718 const bit_range &bound_range = concrete_key->get_bit_range ();
1720 bit_size_t reg_bit_size;
1721 if (!reg->get_bit_size (&reg_bit_size))
1722 return NULL;
1724 bit_range reg_range (reg_offset.get_bit_offset (),
1725 reg_bit_size);
1727 /* Skip bindings that are outside the bit range of REG. */
1728 if (!bound_range.intersects_p (reg_range))
1729 continue;
1731 /* We shouldn't have an exact match; that should have been
1732 handled already. */
1733 gcc_assert (!(reg_range == bound_range));
1735 bit_range subrange (0, 0);
1736 if (reg_range.contains_p (bound_range, &subrange))
1738 /* We have a bound range fully within REG.
1739 Add it to map, offsetting accordingly. */
1741 /* Get offset of KEY relative to REG, rather than to
1742 the cluster. */
1743 const concrete_binding *offset_concrete_key
1744 = mgr->get_concrete_binding (subrange);
1745 result_map.put (offset_concrete_key, sval);
1747 /* Clobber default_map, removing/trimming/spliting where
1748 it overlaps with offset_concrete_key. */
1749 default_map.remove_overlapping_bindings (mgr,
1750 offset_concrete_key,
1751 NULL, false);
1753 else if (bound_range.contains_p (reg_range, &subrange))
1755 /* REG is fully within the bound range, but
1756 is not equal to it; we're extracting a subvalue. */
1757 return sval->extract_bit_range (reg->get_type (),
1758 subrange,
1759 mgr->get_svalue_manager ());
1761 else
1763 /* REG and the bound range partially overlap. */
1764 bit_range reg_subrange (0, 0);
1765 bit_range bound_subrange (0, 0);
1766 reg_range.intersects_p (bound_range,
1767 &reg_subrange, &bound_subrange);
1769 /* Get the bits from the bound value for the bits at the
1770 intersection (relative to the bound value). */
1771 const svalue *overlap_sval
1772 = sval->extract_bit_range (NULL_TREE,
1773 bound_subrange,
1774 mgr->get_svalue_manager ());
1776 /* Get key for overlap, relative to the REG. */
1777 const concrete_binding *overlap_concrete_key
1778 = mgr->get_concrete_binding (reg_subrange);
1779 result_map.put (overlap_concrete_key, overlap_sval);
1781 /* Clobber default_map, removing/trimming/spliting where
1782 it overlaps with overlap_concrete_key. */
1783 default_map.remove_overlapping_bindings (mgr,
1784 overlap_concrete_key,
1785 NULL, false);
1788 else
1789 /* Can't handle symbolic bindings. */
1790 return NULL;
1793 if (result_map.elements () == 0)
1794 return NULL;
1796 /* Merge any bindings from default_map into result_map. */
1797 for (auto iter : default_map)
1799 const binding_key *key = iter.first;
1800 const svalue *sval = iter.second;
1801 result_map.put (key, sval);
1804 return sval_mgr->get_or_create_compound_svalue (reg->get_type (), result_map);
1807 /* Remove, truncate, and/or split any bindings within this map that
1808 could overlap REG.
1810 If REG's base region or this cluster is symbolic and they're different
1811 base regions, then remove everything in this cluster's map, on the
1812 grounds that REG could be referring to the same memory as anything
1813 in the map.
1815 If UNCERTAINTY is non-NULL, use it to record any svalues that
1816 were removed, as being maybe-bound. */
1818 void
1819 binding_cluster::remove_overlapping_bindings (store_manager *mgr,
1820 const region *reg,
1821 uncertainty_t *uncertainty)
1823 if (reg->empty_p ())
1824 return;
1825 const binding_key *reg_binding = binding_key::make (mgr, reg);
1827 const region *cluster_base_reg = get_base_region ();
1828 const region *other_base_reg = reg->get_base_region ();
1829 /* If at least one of the base regions involved is symbolic, and they're
1830 not the same base region, then consider everything in the map as
1831 potentially overlapping with reg_binding (even if it's a concrete
1832 binding and things in the map are concrete - they could be referring
1833 to the same memory when the symbolic base regions are taken into
1834 account). */
1835 bool always_overlap = (cluster_base_reg != other_base_reg
1836 && (cluster_base_reg->get_kind () == RK_SYMBOLIC
1837 || other_base_reg->get_kind () == RK_SYMBOLIC));
1838 m_map.remove_overlapping_bindings (mgr, reg_binding, uncertainty,
1839 always_overlap);
1842 /* Attempt to merge CLUSTER_A and CLUSTER_B into OUT_CLUSTER, using
1843 MGR and MERGER.
1844 Return true if they can be merged, false otherwise. */
1846 bool
1847 binding_cluster::can_merge_p (const binding_cluster *cluster_a,
1848 const binding_cluster *cluster_b,
1849 binding_cluster *out_cluster,
1850 store *out_store,
1851 store_manager *mgr,
1852 model_merger *merger)
1854 gcc_assert (out_cluster);
1856 /* Merge flags ("ESCAPED" and "TOUCHED") by setting the merged flag to
1857 true if either of the inputs is true. */
1858 if ((cluster_a && cluster_a->m_escaped)
1859 || (cluster_b && cluster_b->m_escaped))
1860 out_cluster->m_escaped = true;
1861 if ((cluster_a && cluster_a->m_touched)
1862 || (cluster_b && cluster_b->m_touched))
1863 out_cluster->m_touched = true;
1865 /* At least one of CLUSTER_A and CLUSTER_B are non-NULL, but either
1866 could be NULL. Handle these cases. */
1867 if (cluster_a == NULL)
1869 gcc_assert (cluster_b != NULL);
1870 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1871 out_cluster->make_unknown_relative_to (cluster_b, out_store, mgr);
1872 return true;
1874 if (cluster_b == NULL)
1876 gcc_assert (cluster_a != NULL);
1877 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1878 out_cluster->make_unknown_relative_to (cluster_a, out_store, mgr);
1879 return true;
1882 /* The "both inputs are non-NULL" case. */
1883 gcc_assert (cluster_a != NULL && cluster_b != NULL);
1884 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1885 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1887 hash_set<const binding_key *> keys;
1888 for (map_t::iterator iter_a = cluster_a->m_map.begin ();
1889 iter_a != cluster_a->m_map.end (); ++iter_a)
1891 const binding_key *key_a = (*iter_a).first;
1892 keys.add (key_a);
1894 for (map_t::iterator iter_b = cluster_b->m_map.begin ();
1895 iter_b != cluster_b->m_map.end (); ++iter_b)
1897 const binding_key *key_b = (*iter_b).first;
1898 keys.add (key_b);
1900 int num_symbolic_keys = 0;
1901 int num_concrete_keys = 0;
1902 for (hash_set<const binding_key *>::iterator iter = keys.begin ();
1903 iter != keys.end (); ++iter)
1905 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1906 const binding_key *key = *iter;
1907 const svalue *sval_a = cluster_a->get_any_value (key);
1908 const svalue *sval_b = cluster_b->get_any_value (key);
1910 if (key->symbolic_p ())
1911 num_symbolic_keys++;
1912 else
1913 num_concrete_keys++;
1915 if (sval_a == sval_b)
1917 gcc_assert (sval_a);
1918 out_cluster->m_map.put (key, sval_a);
1919 continue;
1921 else if (sval_a && sval_b)
1923 if (const svalue *merged_sval
1924 = sval_a->can_merge_p (sval_b, sval_mgr, merger))
1926 out_cluster->m_map.put (key, merged_sval);
1927 continue;
1929 /* Merger of the svalues failed. Reject merger of the cluster. */
1930 return false;
1933 /* If we get here, then one cluster binds this key and the other
1934 doesn't; merge them as "UNKNOWN". */
1935 gcc_assert (sval_a || sval_b);
1937 const svalue *bound_sval = sval_a ? sval_a : sval_b;
1938 tree type = bound_sval->get_type ();
1939 const svalue *unknown_sval
1940 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue (type);
1942 /* ...but reject the merger if this sval shouldn't be mergeable
1943 (e.g. reject merging svalues that have non-purgable sm-state,
1944 to avoid falsely reporting memory leaks by merging them
1945 with something else). */
1946 if (!bound_sval->can_merge_p (unknown_sval, sval_mgr, merger))
1947 return false;
1949 out_cluster->m_map.put (key, unknown_sval);
1952 /* We can only have at most one symbolic key per cluster,
1953 and if we do, we can't have any concrete keys.
1954 If this happens, mark the cluster as touched, with no keys. */
1955 if (num_symbolic_keys >= 2
1956 || (num_concrete_keys > 0 && num_symbolic_keys > 0))
1958 out_cluster->m_touched = true;
1959 out_cluster->m_map.empty ();
1962 /* We don't handle other kinds of overlaps yet. */
1964 return true;
1967 /* Update this cluster to reflect an attempt to merge OTHER where there
1968 is no other cluster to merge with, and so we're notionally merging the
1969 bound values in OTHER with the initial value of the relevant regions.
1971 Any bound keys in OTHER should be bound to unknown in this. */
1973 void
1974 binding_cluster::make_unknown_relative_to (const binding_cluster *other,
1975 store *out_store,
1976 store_manager *mgr)
1978 for (map_t::iterator iter = other->m_map.begin ();
1979 iter != other->m_map.end (); ++iter)
1981 const binding_key *iter_key = (*iter).first;
1982 const svalue *iter_sval = (*iter).second;
1983 const svalue *unknown_sval
1984 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue
1985 (iter_sval->get_type ());
1986 m_map.put (iter_key, unknown_sval);
1988 /* For any pointers in OTHER, the merger means that the
1989 concrete pointer becomes an unknown value, which could
1990 show up as a false report of a leak when considering what
1991 pointers are live before vs after.
1992 Avoid this by marking the base regions they point to as having
1993 escaped. */
1994 if (const region_svalue *region_sval
1995 = iter_sval->dyn_cast_region_svalue ())
1997 const region *base_reg
1998 = region_sval->get_pointee ()->get_base_region ();
1999 if (base_reg->tracked_p ()
2000 && !base_reg->symbolic_for_unknown_ptr_p ())
2002 binding_cluster *c = out_store->get_or_create_cluster (base_reg);
2003 c->mark_as_escaped ();
2009 /* Mark this cluster as having escaped. */
2011 void
2012 binding_cluster::mark_as_escaped ()
2014 m_escaped = true;
2017 /* If this cluster has escaped (by this call, or by an earlier one, or
2018 by being an external param), then unbind all values and mark it
2019 as "touched", so that it has a conjured value, rather than an
2020 initial_svalue.
2021 Use P to purge state involving conjured_svalues. */
2023 void
2024 binding_cluster::on_unknown_fncall (const gcall *call,
2025 store_manager *mgr,
2026 const conjured_purge &p)
2028 if (m_escaped)
2030 m_map.empty ();
2032 if (!m_base_region->empty_p ())
2034 /* Bind it to a new "conjured" value using CALL. */
2035 const svalue *sval
2036 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
2037 (m_base_region->get_type (), call, m_base_region, p);
2038 bind (mgr, m_base_region, sval);
2041 m_touched = true;
2045 /* Mark this cluster as having been clobbered by STMT.
2046 Use P to purge state involving conjured_svalues. */
2048 void
2049 binding_cluster::on_asm (const gasm *stmt,
2050 store_manager *mgr,
2051 const conjured_purge &p)
2053 m_map.empty ();
2055 /* Bind it to a new "conjured" value using CALL. */
2056 const svalue *sval
2057 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
2058 (m_base_region->get_type (), stmt, m_base_region, p);
2059 bind (mgr, m_base_region, sval);
2061 m_touched = true;
2064 /* Return true if this cluster has escaped. */
2066 bool
2067 binding_cluster::escaped_p () const
2069 /* Consider the "errno" region to always have escaped. */
2070 if (m_base_region->get_kind () == RK_ERRNO)
2071 return true;
2072 return m_escaped;
2075 /* Return true if this binding_cluster has no information
2076 i.e. if there are no bindings, and it hasn't been marked as having
2077 escaped, or touched symbolically. */
2079 bool
2080 binding_cluster::redundant_p () const
2082 return (m_map.elements () == 0
2083 && !m_escaped
2084 && !m_touched);
2087 /* Add PV to OUT_PVS, casting it to TYPE if it is not already of that type. */
2089 static void
2090 append_pathvar_with_type (path_var pv,
2091 tree type,
2092 auto_vec<path_var> *out_pvs)
2094 gcc_assert (pv.m_tree);
2096 if (TREE_TYPE (pv.m_tree) != type)
2097 pv.m_tree = build1 (NOP_EXPR, type, pv.m_tree);
2099 out_pvs->safe_push (pv);
2102 /* Find representative path_vars for SVAL within this binding of BASE_REG,
2103 appending the results to OUT_PVS. */
2105 void
2106 binding_cluster::get_representative_path_vars (const region_model *model,
2107 svalue_set *visited,
2108 const region *base_reg,
2109 const svalue *sval,
2110 auto_vec<path_var> *out_pvs)
2111 const
2113 sval = simplify_for_binding (sval);
2115 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
2117 const binding_key *key = (*iter).first;
2118 const svalue *bound_sval = (*iter).second;
2119 if (bound_sval == sval)
2121 if (const concrete_binding *ckey
2122 = key->dyn_cast_concrete_binding ())
2124 auto_vec <const region *> subregions;
2125 base_reg->get_subregions_for_binding
2126 (model->get_manager (),
2127 ckey->get_start_bit_offset (),
2128 ckey->get_size_in_bits (),
2129 sval->get_type (),
2130 &subregions);
2131 unsigned i;
2132 const region *subregion;
2133 FOR_EACH_VEC_ELT (subregions, i, subregion)
2135 if (path_var pv
2136 = model->get_representative_path_var (subregion,
2137 visited))
2138 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2141 else
2143 const symbolic_binding *skey = (const symbolic_binding *)key;
2144 if (path_var pv
2145 = model->get_representative_path_var (skey->get_region (),
2146 visited))
2147 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2153 /* Get any svalue bound to KEY, or NULL. */
2155 const svalue *
2156 binding_cluster::get_any_value (const binding_key *key) const
2158 return m_map.get (key);
2161 /* If this cluster has a single direct binding for the whole of the region,
2162 return it.
2163 For use in simplifying dumps. */
2165 const svalue *
2166 binding_cluster::maybe_get_simple_value (store_manager *mgr) const
2168 /* Fail gracefully if MGR is NULL to make it easier to dump store
2169 instances in the debugger. */
2170 if (mgr == NULL)
2171 return NULL;
2173 if (m_map.elements () != 1)
2174 return NULL;
2176 if (m_base_region->empty_p ())
2177 return NULL;
2179 const binding_key *key = binding_key::make (mgr, m_base_region);
2180 return get_any_value (key);
2183 /* class store_manager. */
2185 logger *
2186 store_manager::get_logger () const
2188 return m_mgr->get_logger ();
2191 /* binding consolidation. */
2193 const concrete_binding *
2194 store_manager::get_concrete_binding (bit_offset_t start_bit_offset,
2195 bit_offset_t size_in_bits)
2197 concrete_binding b (start_bit_offset, size_in_bits);
2198 if (concrete_binding *existing = m_concrete_binding_key_mgr.get (b))
2199 return existing;
2201 concrete_binding *to_save = new concrete_binding (b);
2202 m_concrete_binding_key_mgr.put (b, to_save);
2203 return to_save;
2206 const symbolic_binding *
2207 store_manager::get_symbolic_binding (const region *reg)
2209 symbolic_binding b (reg);
2210 if (symbolic_binding *existing = m_symbolic_binding_key_mgr.get (b))
2211 return existing;
2213 symbolic_binding *to_save = new symbolic_binding (b);
2214 m_symbolic_binding_key_mgr.put (b, to_save);
2215 return to_save;
2218 /* class store. */
2220 /* store's default ctor. */
2222 store::store ()
2223 : m_called_unknown_fn (false)
2227 /* store's copy ctor. */
2229 store::store (const store &other)
2230 : m_cluster_map (other.m_cluster_map.elements ()),
2231 m_called_unknown_fn (other.m_called_unknown_fn)
2233 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2234 iter != other.m_cluster_map.end ();
2235 ++iter)
2237 const region *reg = (*iter).first;
2238 gcc_assert (reg);
2239 binding_cluster *c = (*iter).second;
2240 gcc_assert (c);
2241 m_cluster_map.put (reg, new binding_cluster (*c));
2245 /* store's dtor. */
2247 store::~store ()
2249 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2250 iter != m_cluster_map.end ();
2251 ++iter)
2252 delete (*iter).second;
2255 /* store's assignment operator. */
2257 store &
2258 store::operator= (const store &other)
2260 /* Delete existing cluster map. */
2261 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2262 iter != m_cluster_map.end ();
2263 ++iter)
2264 delete (*iter).second;
2265 m_cluster_map.empty ();
2267 m_called_unknown_fn = other.m_called_unknown_fn;
2269 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2270 iter != other.m_cluster_map.end ();
2271 ++iter)
2273 const region *reg = (*iter).first;
2274 gcc_assert (reg);
2275 binding_cluster *c = (*iter).second;
2276 gcc_assert (c);
2277 m_cluster_map.put (reg, new binding_cluster (*c));
2279 return *this;
2282 /* store's equality operator. */
2284 bool
2285 store::operator== (const store &other) const
2287 if (m_called_unknown_fn != other.m_called_unknown_fn)
2288 return false;
2290 if (m_cluster_map.elements () != other.m_cluster_map.elements ())
2291 return false;
2293 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2294 iter != m_cluster_map.end ();
2295 ++iter)
2297 const region *reg = (*iter).first;
2298 binding_cluster *c = (*iter).second;
2299 binding_cluster **other_slot
2300 = const_cast <cluster_map_t &> (other.m_cluster_map).get (reg);
2301 if (other_slot == NULL)
2302 return false;
2303 if (*c != **other_slot)
2304 return false;
2307 gcc_checking_assert (hash () == other.hash ());
2309 return true;
2312 /* Get a hash value for this store. */
2314 hashval_t
2315 store::hash () const
2317 hashval_t result = 0;
2318 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2319 iter != m_cluster_map.end ();
2320 ++iter)
2321 result ^= (*iter).second->hash ();
2322 return result;
2325 /* Populate OUT with a sorted list of parent regions for the regions in IN,
2326 removing duplicate parents. */
2328 static void
2329 get_sorted_parent_regions (auto_vec<const region *> *out,
2330 auto_vec<const region *> &in)
2332 /* Get the set of parent regions. */
2333 hash_set<const region *> parent_regions;
2334 const region *iter_reg;
2335 unsigned i;
2336 FOR_EACH_VEC_ELT (in, i, iter_reg)
2338 const region *parent_reg = iter_reg->get_parent_region ();
2339 gcc_assert (parent_reg);
2340 parent_regions.add (parent_reg);
2343 /* Write to OUT. */
2344 for (hash_set<const region *>::iterator iter = parent_regions.begin();
2345 iter != parent_regions.end(); ++iter)
2346 out->safe_push (*iter);
2348 /* Sort OUT. */
2349 out->qsort (region::cmp_ptr_ptr);
2352 /* Dump a representation of this store to PP, using SIMPLE to control how
2353 svalues and regions are printed.
2354 MGR is used for simplifying dumps if non-NULL, but can also be NULL
2355 (to make it easier to use from the debugger). */
2357 void
2358 store::dump_to_pp (pretty_printer *pp, bool simple, bool multiline,
2359 store_manager *mgr) const
2361 /* Sort into some deterministic order. */
2362 auto_vec<const region *> base_regions;
2363 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2364 iter != m_cluster_map.end (); ++iter)
2366 const region *base_reg = (*iter).first;
2367 base_regions.safe_push (base_reg);
2369 base_regions.qsort (region::cmp_ptr_ptr);
2371 /* Gather clusters, organize by parent region, so that we can group
2372 together locals, globals, etc. */
2373 auto_vec<const region *> parent_regions;
2374 get_sorted_parent_regions (&parent_regions, base_regions);
2376 const region *parent_reg;
2377 unsigned i;
2378 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2380 gcc_assert (parent_reg);
2381 pp_string (pp, "clusters within ");
2382 parent_reg->dump_to_pp (pp, simple);
2383 if (multiline)
2384 pp_newline (pp);
2385 else
2386 pp_string (pp, " {");
2388 const region *base_reg;
2389 unsigned j;
2390 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2392 /* This is O(N * M), but N ought to be small. */
2393 if (base_reg->get_parent_region () != parent_reg)
2394 continue;
2395 binding_cluster *cluster
2396 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2397 if (!multiline)
2399 if (j > 0)
2400 pp_string (pp, ", ");
2402 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
2404 /* Special-case to simplify dumps for the common case where
2405 we just have one value directly bound to the whole of a
2406 region. */
2407 if (multiline)
2409 pp_string (pp, " cluster for: ");
2410 base_reg->dump_to_pp (pp, simple);
2411 pp_string (pp, ": ");
2412 sval->dump_to_pp (pp, simple);
2413 if (cluster->escaped_p ())
2414 pp_string (pp, " (ESCAPED)");
2415 if (cluster->touched_p ())
2416 pp_string (pp, " (TOUCHED)");
2417 pp_newline (pp);
2419 else
2421 pp_string (pp, "region: {");
2422 base_reg->dump_to_pp (pp, simple);
2423 pp_string (pp, ", value: ");
2424 sval->dump_to_pp (pp, simple);
2425 if (cluster->escaped_p ())
2426 pp_string (pp, " (ESCAPED)");
2427 if (cluster->touched_p ())
2428 pp_string (pp, " (TOUCHED)");
2429 pp_string (pp, "}");
2432 else if (multiline)
2434 pp_string (pp, " cluster for: ");
2435 base_reg->dump_to_pp (pp, simple);
2436 pp_newline (pp);
2437 cluster->dump_to_pp (pp, simple, multiline);
2439 else
2441 pp_string (pp, "base region: {");
2442 base_reg->dump_to_pp (pp, simple);
2443 pp_string (pp, "} has cluster: {");
2444 cluster->dump_to_pp (pp, simple, multiline);
2445 pp_string (pp, "}");
2448 if (!multiline)
2449 pp_string (pp, "}");
2451 pp_printf (pp, "m_called_unknown_fn: %s",
2452 m_called_unknown_fn ? "TRUE" : "FALSE");
2453 if (multiline)
2454 pp_newline (pp);
2457 /* Dump a multiline representation of this store to stderr. */
2459 DEBUG_FUNCTION void
2460 store::dump (bool simple) const
2462 pretty_printer pp;
2463 pp_format_decoder (&pp) = default_tree_printer;
2464 pp_show_color (&pp) = pp_show_color (global_dc->printer);
2465 pp.buffer->stream = stderr;
2466 dump_to_pp (&pp, simple, true, NULL);
2467 pp_newline (&pp);
2468 pp_flush (&pp);
2471 /* Assert that this object is valid. */
2473 void
2474 store::validate () const
2476 for (auto iter : m_cluster_map)
2477 iter.second->validate ();
2480 /* Return a new json::object of the form
2481 {PARENT_REGION_DESC: {BASE_REGION_DESC: object for binding_map,
2482 ... for each cluster within parent region},
2483 ...for each parent region,
2484 "called_unknown_fn": true/false}. */
2486 json::object *
2487 store::to_json () const
2489 json::object *store_obj = new json::object ();
2491 /* Sort into some deterministic order. */
2492 auto_vec<const region *> base_regions;
2493 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2494 iter != m_cluster_map.end (); ++iter)
2496 const region *base_reg = (*iter).first;
2497 base_regions.safe_push (base_reg);
2499 base_regions.qsort (region::cmp_ptr_ptr);
2501 /* Gather clusters, organize by parent region, so that we can group
2502 together locals, globals, etc. */
2503 auto_vec<const region *> parent_regions;
2504 get_sorted_parent_regions (&parent_regions, base_regions);
2506 const region *parent_reg;
2507 unsigned i;
2508 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2510 gcc_assert (parent_reg);
2512 json::object *clusters_in_parent_reg_obj = new json::object ();
2514 const region *base_reg;
2515 unsigned j;
2516 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2518 /* This is O(N * M), but N ought to be small. */
2519 if (base_reg->get_parent_region () != parent_reg)
2520 continue;
2521 binding_cluster *cluster
2522 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2523 label_text base_reg_desc = base_reg->get_desc ();
2524 clusters_in_parent_reg_obj->set (base_reg_desc.get (),
2525 cluster->to_json ());
2527 label_text parent_reg_desc = parent_reg->get_desc ();
2528 store_obj->set (parent_reg_desc.get (), clusters_in_parent_reg_obj);
2531 store_obj->set ("called_unknown_fn", new json::literal (m_called_unknown_fn));
2533 return store_obj;
2536 /* Get any svalue bound to REG, or NULL. */
2538 const svalue *
2539 store::get_any_binding (store_manager *mgr, const region *reg) const
2541 const region *base_reg = reg->get_base_region ();
2542 binding_cluster **cluster_slot
2543 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg);
2544 if (!cluster_slot)
2545 return NULL;
2546 return (*cluster_slot)->get_any_binding (mgr, reg);
2549 /* Set the value of LHS_REG to RHS_SVAL. */
2551 void
2552 store::set_value (store_manager *mgr, const region *lhs_reg,
2553 const svalue *rhs_sval,
2554 uncertainty_t *uncertainty)
2556 logger *logger = mgr->get_logger ();
2557 LOG_SCOPE (logger);
2559 remove_overlapping_bindings (mgr, lhs_reg, uncertainty);
2561 if (lhs_reg->get_type ())
2562 rhs_sval = simplify_for_binding (rhs_sval);
2563 /* ...but if we have no type for the region, retain any cast. */
2565 const region *lhs_base_reg = lhs_reg->get_base_region ();
2566 binding_cluster *lhs_cluster;
2567 if (lhs_base_reg->symbolic_for_unknown_ptr_p ())
2569 /* Reject attempting to bind values into a symbolic region
2570 for an unknown ptr; merely invalidate values below. */
2571 lhs_cluster = NULL;
2573 /* The LHS of the write is *UNKNOWN. If the RHS is a pointer,
2574 then treat the region being pointed to as having escaped. */
2575 if (const region_svalue *ptr_sval = rhs_sval->dyn_cast_region_svalue ())
2577 const region *ptr_dst = ptr_sval->get_pointee ();
2578 const region *ptr_base_reg = ptr_dst->get_base_region ();
2579 mark_as_escaped (ptr_base_reg);
2581 if (uncertainty)
2582 uncertainty->on_maybe_bound_sval (rhs_sval);
2584 else if (lhs_base_reg->tracked_p ())
2586 lhs_cluster = get_or_create_cluster (lhs_base_reg);
2587 lhs_cluster->bind (mgr, lhs_reg, rhs_sval);
2589 else
2591 /* Reject attempting to bind values into an untracked region;
2592 merely invalidate values below. */
2593 lhs_cluster = NULL;
2596 /* Bindings to a cluster can affect other clusters if a symbolic
2597 base region is involved.
2598 Writes to concrete clusters can't affect other concrete clusters,
2599 but can affect symbolic clusters.
2600 Writes to symbolic clusters can affect both concrete and symbolic
2601 clusters.
2602 Invalidate our knowledge of other clusters that might have been
2603 affected by the write. */
2604 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2605 iter != m_cluster_map.end (); ++iter)
2607 const region *iter_base_reg = (*iter).first;
2608 binding_cluster *iter_cluster = (*iter).second;
2609 if (iter_base_reg != lhs_base_reg
2610 && (lhs_cluster == NULL
2611 || lhs_cluster->symbolic_p ()
2612 || iter_cluster->symbolic_p ()))
2614 tristate t_alias = eval_alias (lhs_base_reg, iter_base_reg);
2615 switch (t_alias.get_value ())
2617 default:
2618 gcc_unreachable ();
2620 case tristate::TS_UNKNOWN:
2621 if (logger)
2623 pretty_printer *pp = logger->get_printer ();
2624 logger->start_log_line ();
2625 logger->log_partial ("possible aliasing of ");
2626 iter_base_reg->dump_to_pp (pp, true);
2627 logger->log_partial (" when writing SVAL: ");
2628 rhs_sval->dump_to_pp (pp, true);
2629 logger->log_partial (" to LHS_REG: ");
2630 lhs_reg->dump_to_pp (pp, true);
2631 logger->end_log_line ();
2633 /* Mark all of iter_cluster's iter_base_reg as unknown,
2634 using LHS_REG when considering overlaps, to handle
2635 symbolic vs concrete issues. */
2636 iter_cluster->mark_region_as_unknown
2637 (mgr,
2638 iter_base_reg, /* reg_to_bind */
2639 lhs_reg, /* reg_for_overlap */
2640 uncertainty);
2641 break;
2643 case tristate::TS_TRUE:
2644 gcc_unreachable ();
2645 break;
2647 case tristate::TS_FALSE:
2648 /* If they can't be aliases, then don't invalidate this
2649 cluster. */
2650 break;
2656 /* Determine if BASE_REG_A could be an alias of BASE_REG_B. */
2658 tristate
2659 store::eval_alias (const region *base_reg_a,
2660 const region *base_reg_b) const
2662 /* SSA names can't alias. */
2663 tree decl_a = base_reg_a->maybe_get_decl ();
2664 if (decl_a && TREE_CODE (decl_a) == SSA_NAME)
2665 return tristate::TS_FALSE;
2666 tree decl_b = base_reg_b->maybe_get_decl ();
2667 if (decl_b && TREE_CODE (decl_b) == SSA_NAME)
2668 return tristate::TS_FALSE;
2670 /* Try both ways, for symmetry. */
2671 tristate ts_ab = eval_alias_1 (base_reg_a, base_reg_b);
2672 if (ts_ab.is_false ())
2673 return tristate::TS_FALSE;
2674 tristate ts_ba = eval_alias_1 (base_reg_b, base_reg_a);
2675 if (ts_ba.is_false ())
2676 return tristate::TS_FALSE;
2677 return tristate::TS_UNKNOWN;
2680 /* Half of store::eval_alias; called twice for symmetry. */
2682 tristate
2683 store::eval_alias_1 (const region *base_reg_a,
2684 const region *base_reg_b) const
2686 if (const symbolic_region *sym_reg_a
2687 = base_reg_a->dyn_cast_symbolic_region ())
2689 const svalue *sval_a = sym_reg_a->get_pointer ();
2690 if (tree decl_b = base_reg_b->maybe_get_decl ())
2692 if (!may_be_aliased (decl_b))
2693 return tristate::TS_FALSE;
2694 if (sval_a->get_kind () == SK_INITIAL)
2695 if (!is_global_var (decl_b))
2697 /* The initial value of a pointer can't point to a local. */
2698 return tristate::TS_FALSE;
2701 if (sval_a->get_kind () == SK_INITIAL
2702 && base_reg_b->get_kind () == RK_HEAP_ALLOCATED)
2704 /* The initial value of a pointer can't point to a
2705 region that was allocated on the heap after the beginning of the
2706 path. */
2707 return tristate::TS_FALSE;
2709 if (const widening_svalue *widening_sval_a
2710 = sval_a->dyn_cast_widening_svalue ())
2712 const svalue *base = widening_sval_a->get_base_svalue ();
2713 if (const region_svalue *region_sval
2714 = base->dyn_cast_region_svalue ())
2716 const region *pointee = region_sval->get_pointee ();
2717 /* If we have sval_a is WIDENING(&REGION, OP), and
2718 B can't alias REGION, then B can't alias A either.
2719 For example, A might arise from
2720 for (ptr = &REGION; ...; ptr++)
2721 where sval_a is ptr in the 2nd iteration of the loop.
2722 We want to ensure that "*ptr" can only clobber things
2723 within REGION's base region. */
2724 tristate ts = eval_alias (pointee->get_base_region (),
2725 base_reg_b);
2726 if (ts.is_false ())
2727 return tristate::TS_FALSE;
2731 return tristate::TS_UNKNOWN;
2734 /* Remove all bindings overlapping REG within this store. */
2736 void
2737 store::clobber_region (store_manager *mgr, const region *reg)
2739 const region *base_reg = reg->get_base_region ();
2740 binding_cluster **slot = m_cluster_map.get (base_reg);
2741 if (!slot)
2742 return;
2743 binding_cluster *cluster = *slot;
2744 cluster->clobber_region (mgr, reg);
2745 if (cluster->redundant_p ())
2747 delete cluster;
2748 m_cluster_map.remove (base_reg);
2752 /* Remove any bindings for REG within this store. */
2754 void
2755 store::purge_region (store_manager *mgr, const region *reg)
2757 const region *base_reg = reg->get_base_region ();
2758 binding_cluster **slot = m_cluster_map.get (base_reg);
2759 if (!slot)
2760 return;
2761 binding_cluster *cluster = *slot;
2762 cluster->purge_region (mgr, reg);
2763 if (cluster->redundant_p ())
2765 delete cluster;
2766 m_cluster_map.remove (base_reg);
2770 /* Fill REG with SVAL. */
2772 void
2773 store::fill_region (store_manager *mgr, const region *reg, const svalue *sval)
2775 /* Filling an empty region is a no-op. */
2776 if (reg->empty_p ())
2777 return;
2779 const region *base_reg = reg->get_base_region ();
2780 if (base_reg->symbolic_for_unknown_ptr_p ()
2781 || !base_reg->tracked_p ())
2782 return;
2783 binding_cluster *cluster = get_or_create_cluster (base_reg);
2784 cluster->fill_region (mgr, reg, sval);
2787 /* Zero-fill REG. */
2789 void
2790 store::zero_fill_region (store_manager *mgr, const region *reg)
2792 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
2793 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
2794 fill_region (mgr, reg, zero_sval);
2797 /* Mark REG as having unknown content. */
2799 void
2800 store::mark_region_as_unknown (store_manager *mgr, const region *reg,
2801 uncertainty_t *uncertainty)
2803 const region *base_reg = reg->get_base_region ();
2804 if (base_reg->symbolic_for_unknown_ptr_p ()
2805 || !base_reg->tracked_p ())
2806 return;
2807 binding_cluster *cluster = get_or_create_cluster (base_reg);
2808 cluster->mark_region_as_unknown (mgr, reg, reg, uncertainty);
2811 /* Purge state involving SVAL. */
2813 void
2814 store::purge_state_involving (const svalue *sval,
2815 region_model_manager *sval_mgr)
2817 auto_vec <const region *> base_regs_to_purge;
2818 for (auto iter : m_cluster_map)
2820 const region *base_reg = iter.first;
2821 if (base_reg->involves_p (sval))
2822 base_regs_to_purge.safe_push (base_reg);
2823 else
2825 binding_cluster *cluster = iter.second;
2826 cluster->purge_state_involving (sval, sval_mgr);
2830 for (auto iter : base_regs_to_purge)
2831 purge_cluster (iter);
2834 /* Get the cluster for BASE_REG, or NULL (const version). */
2836 const binding_cluster *
2837 store::get_cluster (const region *base_reg) const
2839 gcc_assert (base_reg);
2840 gcc_assert (base_reg->get_base_region () == base_reg);
2841 if (binding_cluster **slot
2842 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg))
2843 return *slot;
2844 else
2845 return NULL;
2848 /* Get the cluster for BASE_REG, or NULL (non-const version). */
2850 binding_cluster *
2851 store::get_cluster (const region *base_reg)
2853 gcc_assert (base_reg);
2854 gcc_assert (base_reg->get_base_region () == base_reg);
2855 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2856 return *slot;
2857 else
2858 return NULL;
2861 /* Get the cluster for BASE_REG, creating it if doesn't already exist. */
2863 binding_cluster *
2864 store::get_or_create_cluster (const region *base_reg)
2866 gcc_assert (base_reg);
2867 gcc_assert (base_reg->get_base_region () == base_reg);
2869 /* We shouldn't create clusters for dereferencing an UNKNOWN ptr. */
2870 gcc_assert (!base_reg->symbolic_for_unknown_ptr_p ());
2872 /* We shouldn't create clusters for base regions that aren't trackable. */
2873 gcc_assert (base_reg->tracked_p ());
2875 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2876 return *slot;
2878 binding_cluster *cluster = new binding_cluster (base_reg);
2879 m_cluster_map.put (base_reg, cluster);
2881 return cluster;
2884 /* Remove any cluster for BASE_REG, for use by
2885 region_model::unbind_region_and_descendents
2886 when popping stack frames and handling deleted heap regions. */
2888 void
2889 store::purge_cluster (const region *base_reg)
2891 gcc_assert (base_reg->get_base_region () == base_reg);
2892 binding_cluster **slot = m_cluster_map.get (base_reg);
2893 if (!slot)
2894 return;
2895 binding_cluster *cluster = *slot;
2896 delete cluster;
2897 m_cluster_map.remove (base_reg);
2900 /* Attempt to merge STORE_A and STORE_B into OUT_STORE.
2901 Return true if successful, or false if the stores can't be merged. */
2903 bool
2904 store::can_merge_p (const store *store_a, const store *store_b,
2905 store *out_store, store_manager *mgr,
2906 model_merger *merger)
2908 if (store_a->m_called_unknown_fn || store_b->m_called_unknown_fn)
2909 out_store->m_called_unknown_fn = true;
2911 /* Get the union of all base regions for STORE_A and STORE_B. */
2912 hash_set<const region *> base_regions;
2913 for (cluster_map_t::iterator iter_a = store_a->m_cluster_map.begin ();
2914 iter_a != store_a->m_cluster_map.end (); ++iter_a)
2916 const region *base_reg_a = (*iter_a).first;
2917 base_regions.add (base_reg_a);
2919 for (cluster_map_t::iterator iter_b = store_b->m_cluster_map.begin ();
2920 iter_b != store_b->m_cluster_map.end (); ++iter_b)
2922 const region *base_reg_b = (*iter_b).first;
2923 base_regions.add (base_reg_b);
2926 /* Sort the base regions before considering them. This ought not to
2927 affect the results, but can affect which types UNKNOWN_REGIONs are
2928 created for in a run; sorting them thus avoids minor differences
2929 in logfiles. */
2930 auto_vec<const region *> vec_base_regions (base_regions.elements ());
2931 for (hash_set<const region *>::iterator iter = base_regions.begin ();
2932 iter != base_regions.end (); ++iter)
2933 vec_base_regions.quick_push (*iter);
2934 vec_base_regions.qsort (region::cmp_ptr_ptr);
2935 unsigned i;
2936 const region *base_reg;
2937 FOR_EACH_VEC_ELT (vec_base_regions, i, base_reg)
2939 const binding_cluster *cluster_a = store_a->get_cluster (base_reg);
2940 const binding_cluster *cluster_b = store_b->get_cluster (base_reg);
2941 /* At least one of cluster_a and cluster_b must be non-NULL. */
2942 binding_cluster *out_cluster
2943 = out_store->get_or_create_cluster (base_reg);
2944 if (!binding_cluster::can_merge_p (cluster_a, cluster_b,
2945 out_cluster, out_store, mgr, merger))
2946 return false;
2948 return true;
2951 /* Mark the cluster for BASE_REG as having escaped.
2952 For use when handling an unrecognized function call, and
2953 for params to "top-level" calls.
2954 Further unknown function calls could touch it, even if the cluster
2955 isn't reachable from args of those calls. */
2957 void
2958 store::mark_as_escaped (const region *base_reg)
2960 gcc_assert (base_reg);
2961 gcc_assert (base_reg->get_base_region () == base_reg);
2963 if (base_reg->symbolic_for_unknown_ptr_p ()
2964 || !base_reg->tracked_p ())
2965 return;
2967 binding_cluster *cluster = get_or_create_cluster (base_reg);
2968 cluster->mark_as_escaped ();
2971 /* Handle an unknown fncall by updating any clusters that have escaped
2972 (either in this fncall, or in a prior one). */
2974 void
2975 store::on_unknown_fncall (const gcall *call, store_manager *mgr,
2976 const conjured_purge &p)
2978 m_called_unknown_fn = true;
2980 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2981 iter != m_cluster_map.end (); ++iter)
2982 (*iter).second->on_unknown_fncall (call, mgr, p);
2985 /* Return true if a non-const pointer to BASE_REG (or something within it)
2986 has escaped to code outside of the TU being analyzed. */
2988 bool
2989 store::escaped_p (const region *base_reg) const
2991 gcc_assert (base_reg);
2992 gcc_assert (base_reg->get_base_region () == base_reg);
2994 /* "errno" can always be modified by external code. */
2995 if (base_reg->get_kind () == RK_ERRNO)
2996 return true;
2998 if (binding_cluster **cluster_slot
2999 = const_cast <cluster_map_t &>(m_cluster_map).get (base_reg))
3000 return (*cluster_slot)->escaped_p ();
3001 return false;
3004 /* Populate OUT_PVS with a list of path_vars for describing SVAL based on
3005 this store, using VISITED to ensure the traversal terminates. */
3007 void
3008 store::get_representative_path_vars (const region_model *model,
3009 svalue_set *visited,
3010 const svalue *sval,
3011 auto_vec<path_var> *out_pvs) const
3013 gcc_assert (sval);
3015 /* Find all bindings that reference SVAL. */
3016 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3017 iter != m_cluster_map.end (); ++iter)
3019 const region *base_reg = (*iter).first;
3020 binding_cluster *cluster = (*iter).second;
3021 cluster->get_representative_path_vars (model, visited, base_reg, sval,
3022 out_pvs);
3025 if (const initial_svalue *init_sval = sval->dyn_cast_initial_svalue ())
3027 const region *reg = init_sval->get_region ();
3028 if (path_var pv = model->get_representative_path_var (reg,
3029 visited))
3030 out_pvs->safe_push (pv);
3034 /* Remove all bindings overlapping REG within this store, removing
3035 any clusters that become redundant.
3037 If UNCERTAINTY is non-NULL, use it to record any svalues that
3038 were removed, as being maybe-bound. */
3040 void
3041 store::remove_overlapping_bindings (store_manager *mgr, const region *reg,
3042 uncertainty_t *uncertainty)
3044 const region *base_reg = reg->get_base_region ();
3045 if (binding_cluster **cluster_slot = m_cluster_map.get (base_reg))
3047 binding_cluster *cluster = *cluster_slot;
3048 if (reg == base_reg && !escaped_p (base_reg))
3050 /* Remove whole cluster. */
3051 m_cluster_map.remove (base_reg);
3052 delete cluster;
3053 return;
3055 cluster->remove_overlapping_bindings (mgr, reg, uncertainty);
3059 /* Subclass of visitor that accumulates a hash_set of the regions that
3060 were visited. */
3062 struct region_finder : public visitor
3064 void visit_region (const region *reg) final override
3066 m_regs.add (reg);
3069 hash_set<const region *> m_regs;
3072 /* Canonicalize this store, to maximize the chance of equality between
3073 instances. */
3075 void
3076 store::canonicalize (store_manager *mgr)
3078 /* If we have e.g.:
3079 cluster for: HEAP_ALLOCATED_REGION(543)
3080 ESCAPED
3081 TOUCHED
3082 where the heap region is empty and unreferenced, then purge that
3083 cluster, to avoid unbounded state chains involving these. */
3085 /* Find regions that are referenced by bound values in the store. */
3086 region_finder s;
3087 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3088 iter != m_cluster_map.end (); ++iter)
3090 binding_cluster *cluster = (*iter).second;
3091 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3092 bind_iter != cluster->m_map.end (); ++bind_iter)
3093 (*bind_iter).second->accept (&s);
3096 /* Locate heap-allocated regions that have empty bindings that weren't
3097 found above. */
3098 hash_set<const region *> purgeable_regions;
3099 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
3100 iter != m_cluster_map.end (); ++iter)
3102 const region *base_reg = (*iter).first;
3103 binding_cluster *cluster = (*iter).second;
3104 if (base_reg->get_kind () == RK_HEAP_ALLOCATED)
3106 /* Don't purge a heap-allocated region that's been marked as
3107 escaping, since this could be recording that a ptr to it
3108 was written to an unknown symbolic region along this
3109 path, and so we don't know whether it's referenced or
3110 not, and hence should report it as leaking
3111 (PR analyzer/106473). */
3112 if (cluster->escaped_p ())
3113 continue;
3115 if (cluster->empty_p ())
3116 if (!s.m_regs.contains (base_reg))
3117 purgeable_regions.add (base_reg);
3119 /* Also cover the UNKNOWN case. */
3120 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
3121 if (sval->get_kind () == SK_UNKNOWN)
3122 if (!s.m_regs.contains (base_reg))
3123 purgeable_regions.add (base_reg);
3127 /* Purge them. */
3128 for (hash_set<const region *>::iterator iter = purgeable_regions.begin ();
3129 iter != purgeable_regions.end (); ++iter)
3131 const region *base_reg = *iter;
3132 purge_cluster (base_reg);
3136 /* Subroutine for use by exploded_path::feasible_p.
3138 We need to deal with state differences between:
3139 (a) when the exploded_graph is being initially constructed and
3140 (b) when replaying the state changes along a specific path in
3141 in exploded_path::feasible_p.
3143 In (a), state merging happens, so when exploring a loop
3144 for (i = 0; i < 1024; i++)
3145 on successive iterations we have i == 0, then i == WIDENING.
3147 In (b), no state merging happens, so naively replaying the path
3148 that goes twice through the loop then exits it
3149 would lead to i == 0, then i == 1, and then a (i >= 1024) eedge
3150 that exits the loop, which would be found to be infeasible as i == 1,
3151 and the path would be rejected.
3153 We need to fix up state during replay. This subroutine is
3154 called whenever we enter a supernode that we've already
3155 visited along this exploded_path, passing in OTHER_STORE
3156 from the destination enode's state.
3158 Find bindings to widening values in OTHER_STORE.
3159 For all that are found, update the binding in this store to UNKNOWN. */
3161 void
3162 store::loop_replay_fixup (const store *other_store,
3163 region_model_manager *mgr)
3165 gcc_assert (other_store);
3166 for (cluster_map_t::iterator iter = other_store->m_cluster_map.begin ();
3167 iter != other_store->m_cluster_map.end (); ++iter)
3169 const region *base_reg = (*iter).first;
3170 binding_cluster *cluster = (*iter).second;
3171 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3172 bind_iter != cluster->m_map.end (); ++bind_iter)
3174 const binding_key *key = (*bind_iter).first;
3175 const svalue *sval = (*bind_iter).second;
3176 if (sval->get_kind () == SK_WIDENING)
3178 binding_cluster *this_cluster
3179 = get_or_create_cluster (base_reg);
3180 const svalue *unknown
3181 = mgr->get_or_create_unknown_svalue (sval->get_type ());
3182 this_cluster->bind_key (key, unknown);
3188 /* Use R to replay the bindings from SUMMARY into this object. */
3190 void
3191 store::replay_call_summary (call_summary_replay &r,
3192 const store &summary)
3194 if (summary.m_called_unknown_fn)
3196 /* A call to an external function occurred in the summary.
3197 Hence we need to invalidate our knownledge of globals,
3198 escaped regions, etc. */
3199 on_unknown_fncall (r.get_call_stmt (),
3200 r.get_store_manager (),
3201 conjured_purge (r.get_caller_model (),
3202 r.get_ctxt ()));
3205 auto_vec<const region *> keys (summary.m_cluster_map.elements ());
3206 for (auto kv : summary.m_cluster_map)
3207 keys.quick_push (kv.first);
3208 keys.qsort (region::cmp_ptr_ptr);
3209 for (auto base_reg : keys)
3210 replay_call_summary_cluster (r, summary, base_reg);
3213 /* Use R and SUMMARY to replay the bindings in SUMMARY_CLUSTER
3214 into this object. */
3216 void
3217 store::replay_call_summary_cluster (call_summary_replay &r,
3218 const store &summary,
3219 const region *summary_base_reg)
3221 const call_details &cd = r.get_call_details ();
3222 region_model_manager *reg_mgr = r.get_manager ();
3223 store_manager *mgr = reg_mgr->get_store_manager ();
3224 const binding_cluster *summary_cluster
3225 = summary.get_cluster (summary_base_reg);
3227 /* Handle "ESCAPED" and "TOUCHED" flags. */
3228 if (summary_cluster->escaped_p () || summary_cluster->touched_p ())
3229 if (const region *caller_reg
3230 = r.convert_region_from_summary (summary_base_reg))
3232 const region *caller_base_reg = caller_reg->get_base_region ();
3233 if (caller_base_reg->tracked_p ()
3234 && !caller_base_reg->symbolic_for_unknown_ptr_p ())
3236 binding_cluster *caller_cluster
3237 = get_or_create_cluster (caller_base_reg);
3238 if (summary_cluster->escaped_p ())
3239 caller_cluster->mark_as_escaped ();
3240 if (summary_cluster->touched_p ())
3241 caller_cluster->m_touched = true;
3245 switch (summary_base_reg->get_kind ())
3247 /* Top-level regions. */
3248 case RK_FRAME:
3249 case RK_GLOBALS:
3250 case RK_CODE:
3251 case RK_STACK:
3252 case RK_HEAP:
3253 case RK_THREAD_LOCAL:
3254 case RK_ROOT:
3255 /* Child regions. */
3256 case RK_FIELD:
3257 case RK_ELEMENT:
3258 case RK_OFFSET:
3259 case RK_SIZED:
3260 case RK_CAST:
3261 case RK_BIT_RANGE:
3262 /* Other regions. */
3263 case RK_VAR_ARG:
3264 case RK_UNKNOWN:
3265 /* These should never be the base region of a binding cluster. */
3266 gcc_unreachable ();
3267 break;
3269 case RK_FUNCTION:
3270 case RK_LABEL:
3271 case RK_STRING:
3272 /* These can be marked as escaping. */
3273 break;
3275 case RK_SYMBOLIC:
3277 const symbolic_region *summary_symbolic_reg
3278 = as_a <const symbolic_region *> (summary_base_reg);
3279 const svalue *summary_ptr_sval = summary_symbolic_reg->get_pointer ();
3280 const svalue *caller_ptr_sval
3281 = r.convert_svalue_from_summary (summary_ptr_sval);
3282 if (!caller_ptr_sval)
3283 return;
3284 const region *caller_dest_reg
3285 = cd.get_model ()->deref_rvalue (caller_ptr_sval,
3286 NULL_TREE,
3287 cd.get_ctxt ());
3288 const svalue *summary_sval
3289 = summary.get_any_binding (mgr, summary_base_reg);
3290 if (!summary_sval)
3291 return;
3292 const svalue *caller_sval
3293 = r.convert_svalue_from_summary (summary_sval);
3294 if (!caller_sval)
3295 caller_sval =
3296 reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
3297 set_value (mgr, caller_dest_reg,
3298 caller_sval, NULL /* uncertainty_t * */);
3300 break;
3302 case RK_HEAP_ALLOCATED:
3303 case RK_DECL:
3304 case RK_ERRNO:
3306 const region *caller_dest_reg
3307 = r.convert_region_from_summary (summary_base_reg);
3308 if (!caller_dest_reg)
3309 return;
3310 const svalue *summary_sval
3311 = summary.get_any_binding (mgr, summary_base_reg);
3312 if (!summary_sval)
3313 summary_sval = reg_mgr->get_or_create_compound_svalue
3314 (summary_base_reg->get_type (),
3315 summary_cluster->get_map ());
3316 const svalue *caller_sval
3317 = r.convert_svalue_from_summary (summary_sval);
3318 if (!caller_sval)
3319 caller_sval =
3320 reg_mgr->get_or_create_unknown_svalue (summary_sval->get_type ());
3321 set_value (mgr, caller_dest_reg,
3322 caller_sval, NULL /* uncertainty_t * */);
3324 break;
3326 case RK_ALLOCA:
3327 /* Ignore bindings of alloca regions in the summary. */
3328 break;
3332 #if CHECKING_P
3334 namespace selftest {
3336 /* Verify that bit_range::intersects_p works as expected. */
3338 static void
3339 test_bit_range_intersects_p ()
3341 bit_range b0 (0, 1);
3342 bit_range b1 (1, 1);
3343 bit_range b2 (2, 1);
3344 bit_range b3 (3, 1);
3345 bit_range b4 (4, 1);
3346 bit_range b5 (5, 1);
3347 bit_range b6 (6, 1);
3348 bit_range b7 (7, 1);
3349 bit_range b1_to_6 (1, 6);
3350 bit_range b0_to_7 (0, 8);
3351 bit_range b3_to_5 (3, 3);
3352 bit_range b6_to_7 (6, 2);
3354 /* self-intersection is true. */
3355 ASSERT_TRUE (b0.intersects_p (b0));
3356 ASSERT_TRUE (b7.intersects_p (b7));
3357 ASSERT_TRUE (b1_to_6.intersects_p (b1_to_6));
3358 ASSERT_TRUE (b0_to_7.intersects_p (b0_to_7));
3360 ASSERT_FALSE (b0.intersects_p (b1));
3361 ASSERT_FALSE (b1.intersects_p (b0));
3362 ASSERT_FALSE (b0.intersects_p (b7));
3363 ASSERT_FALSE (b7.intersects_p (b0));
3365 ASSERT_TRUE (b0_to_7.intersects_p (b0));
3366 ASSERT_TRUE (b0_to_7.intersects_p (b7));
3367 ASSERT_TRUE (b0.intersects_p (b0_to_7));
3368 ASSERT_TRUE (b7.intersects_p (b0_to_7));
3370 ASSERT_FALSE (b0.intersects_p (b1_to_6));
3371 ASSERT_FALSE (b1_to_6.intersects_p (b0));
3372 ASSERT_TRUE (b1.intersects_p (b1_to_6));
3373 ASSERT_TRUE (b1_to_6.intersects_p (b1));
3374 ASSERT_TRUE (b1_to_6.intersects_p (b6));
3375 ASSERT_FALSE (b1_to_6.intersects_p (b7));
3377 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7));
3378 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6));
3380 ASSERT_FALSE (b3_to_5.intersects_p (b6_to_7));
3381 ASSERT_FALSE (b6_to_7.intersects_p (b3_to_5));
3383 bit_range r1 (0,0);
3384 bit_range r2 (0,0);
3385 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7, &r1, &r2));
3386 ASSERT_EQ (r1.get_start_bit_offset (), 0);
3387 ASSERT_EQ (r1.m_size_in_bits, 6);
3388 ASSERT_EQ (r2.get_start_bit_offset (), 1);
3389 ASSERT_EQ (r2.m_size_in_bits, 6);
3391 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6, &r1, &r2));
3392 ASSERT_EQ (r1.get_start_bit_offset (), 1);
3393 ASSERT_EQ (r1.m_size_in_bits, 6);
3394 ASSERT_EQ (r2.get_start_bit_offset (), 0);
3395 ASSERT_EQ (r2.m_size_in_bits, 6);
3398 /* Implementation detail of ASSERT_BIT_RANGE_FROM_MASK_EQ. */
3400 static void
3401 assert_bit_range_from_mask_eq (const location &loc,
3402 unsigned HOST_WIDE_INT mask,
3403 const bit_range &expected)
3405 bit_range actual (0, 0);
3406 bool ok = bit_range::from_mask (mask, &actual);
3407 ASSERT_TRUE_AT (loc, ok);
3408 ASSERT_EQ_AT (loc, actual, expected);
3411 /* Assert that bit_range::from_mask (MASK) returns true, and writes
3412 out EXPECTED_BIT_RANGE. */
3414 #define ASSERT_BIT_RANGE_FROM_MASK_EQ(MASK, EXPECTED_BIT_RANGE) \
3415 SELFTEST_BEGIN_STMT \
3416 assert_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK, \
3417 EXPECTED_BIT_RANGE); \
3418 SELFTEST_END_STMT
3420 /* Implementation detail of ASSERT_NO_BIT_RANGE_FROM_MASK. */
3422 static void
3423 assert_no_bit_range_from_mask_eq (const location &loc,
3424 unsigned HOST_WIDE_INT mask)
3426 bit_range actual (0, 0);
3427 bool ok = bit_range::from_mask (mask, &actual);
3428 ASSERT_FALSE_AT (loc, ok);
3431 /* Assert that bit_range::from_mask (MASK) returns false. */
3433 #define ASSERT_NO_BIT_RANGE_FROM_MASK(MASK) \
3434 SELFTEST_BEGIN_STMT \
3435 assert_no_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK); \
3436 SELFTEST_END_STMT
3438 /* Verify that bit_range::from_mask works as expected. */
3440 static void
3441 test_bit_range_from_mask ()
3443 /* Should fail on zero. */
3444 ASSERT_NO_BIT_RANGE_FROM_MASK (0);
3446 /* Verify 1-bit masks. */
3447 ASSERT_BIT_RANGE_FROM_MASK_EQ (1, bit_range (0, 1));
3448 ASSERT_BIT_RANGE_FROM_MASK_EQ (2, bit_range (1, 1));
3449 ASSERT_BIT_RANGE_FROM_MASK_EQ (4, bit_range (2, 1));
3450 ASSERT_BIT_RANGE_FROM_MASK_EQ (8, bit_range (3, 1));
3451 ASSERT_BIT_RANGE_FROM_MASK_EQ (16, bit_range (4, 1));
3452 ASSERT_BIT_RANGE_FROM_MASK_EQ (32, bit_range (5, 1));
3453 ASSERT_BIT_RANGE_FROM_MASK_EQ (64, bit_range (6, 1));
3454 ASSERT_BIT_RANGE_FROM_MASK_EQ (128, bit_range (7, 1));
3456 /* Verify N-bit masks starting at bit 0. */
3457 ASSERT_BIT_RANGE_FROM_MASK_EQ (3, bit_range (0, 2));
3458 ASSERT_BIT_RANGE_FROM_MASK_EQ (7, bit_range (0, 3));
3459 ASSERT_BIT_RANGE_FROM_MASK_EQ (15, bit_range (0, 4));
3460 ASSERT_BIT_RANGE_FROM_MASK_EQ (31, bit_range (0, 5));
3461 ASSERT_BIT_RANGE_FROM_MASK_EQ (63, bit_range (0, 6));
3462 ASSERT_BIT_RANGE_FROM_MASK_EQ (127, bit_range (0, 7));
3463 ASSERT_BIT_RANGE_FROM_MASK_EQ (255, bit_range (0, 8));
3464 ASSERT_BIT_RANGE_FROM_MASK_EQ (0xffff, bit_range (0, 16));
3466 /* Various other tests. */
3467 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x30, bit_range (4, 2));
3468 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x700, bit_range (8, 3));
3469 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x600, bit_range (9, 2));
3471 /* Multiple ranges of set bits should fail. */
3472 ASSERT_NO_BIT_RANGE_FROM_MASK (0x101);
3473 ASSERT_NO_BIT_RANGE_FROM_MASK (0xf0f0f0f0);
3476 /* Implementation detail of ASSERT_OVERLAP. */
3478 static void
3479 assert_overlap (const location &loc,
3480 const concrete_binding *b1,
3481 const concrete_binding *b2)
3483 ASSERT_TRUE_AT (loc, b1->overlaps_p (*b2));
3484 ASSERT_TRUE_AT (loc, b2->overlaps_p (*b1));
3487 /* Implementation detail of ASSERT_DISJOINT. */
3489 static void
3490 assert_disjoint (const location &loc,
3491 const concrete_binding *b1,
3492 const concrete_binding *b2)
3494 ASSERT_FALSE_AT (loc, b1->overlaps_p (*b2));
3495 ASSERT_FALSE_AT (loc, b2->overlaps_p (*b1));
3498 /* Assert that B1 and B2 overlap, checking both ways. */
3500 #define ASSERT_OVERLAP(B1, B2) \
3501 SELFTEST_BEGIN_STMT \
3502 assert_overlap (SELFTEST_LOCATION, B1, B2); \
3503 SELFTEST_END_STMT
3505 /* Assert that B1 and B2 do not overlap, checking both ways. */
3507 #define ASSERT_DISJOINT(B1, B2) \
3508 SELFTEST_BEGIN_STMT \
3509 assert_disjoint (SELFTEST_LOCATION, B1, B2); \
3510 SELFTEST_END_STMT
3512 /* Verify that concrete_binding::overlaps_p works as expected. */
3514 static void
3515 test_binding_key_overlap ()
3517 store_manager mgr (NULL);
3519 /* Various 8-bit bindings. */
3520 const concrete_binding *cb_0_7 = mgr.get_concrete_binding (0, 8);
3521 const concrete_binding *cb_8_15 = mgr.get_concrete_binding (8, 8);
3522 const concrete_binding *cb_16_23 = mgr.get_concrete_binding (16, 8);
3523 const concrete_binding *cb_24_31 = mgr.get_concrete_binding (24, 8);
3525 /* 16-bit bindings. */
3526 const concrete_binding *cb_0_15 = mgr.get_concrete_binding (0, 16);
3527 const concrete_binding *cb_8_23 = mgr.get_concrete_binding (8, 16);
3528 const concrete_binding *cb_16_31 = mgr.get_concrete_binding (16, 16);
3530 /* 32-bit binding. */
3531 const concrete_binding *cb_0_31 = mgr.get_concrete_binding (0, 32);
3533 /* Everything should self-overlap. */
3534 ASSERT_OVERLAP (cb_0_7, cb_0_7);
3535 ASSERT_OVERLAP (cb_8_15, cb_8_15);
3536 ASSERT_OVERLAP (cb_16_23, cb_16_23);
3537 ASSERT_OVERLAP (cb_24_31, cb_24_31);
3538 ASSERT_OVERLAP (cb_0_15, cb_0_15);
3539 ASSERT_OVERLAP (cb_8_23, cb_8_23);
3540 ASSERT_OVERLAP (cb_16_31, cb_16_31);
3541 ASSERT_OVERLAP (cb_0_31, cb_0_31);
3543 /* Verify the 8-bit bindings that don't overlap each other. */
3544 ASSERT_DISJOINT (cb_0_7, cb_8_15);
3545 ASSERT_DISJOINT (cb_8_15, cb_16_23);
3547 /* Check for overlap of differently-sized bindings. */
3548 ASSERT_OVERLAP (cb_0_7, cb_0_31);
3549 /* ...and with differing start points. */
3550 ASSERT_OVERLAP (cb_8_15, cb_0_31);
3551 ASSERT_DISJOINT (cb_8_15, cb_16_31);
3552 ASSERT_OVERLAP (cb_16_23, cb_0_31);
3553 ASSERT_OVERLAP (cb_16_31, cb_0_31);
3555 ASSERT_DISJOINT (cb_0_7, cb_8_23);
3556 ASSERT_OVERLAP (cb_8_23, cb_16_23);
3557 ASSERT_OVERLAP (cb_8_23, cb_16_31);
3558 ASSERT_DISJOINT (cb_8_23, cb_24_31);
3561 /* Run all of the selftests within this file. */
3563 void
3564 analyzer_store_cc_tests ()
3566 test_bit_range_intersects_p ();
3567 test_bit_range_from_mask ();
3568 test_binding_key_overlap ();
3571 } // namespace selftest
3573 #endif /* CHECKING_P */
3575 } // namespace ana
3577 #endif /* #if ENABLE_ANALYZER */