[aarch64] Use op_mode instead of vmode in aarch64_vectorize_vec_perm_const.
[official-gcc.git] / gcc / analyzer / store.cc
blobd558d477115e059992e074f5ab09c48ca041b5a8
1 /* Classes for modeling the state of memory.
2 Copyright (C) 2020-2022 Free Software Foundation, Inc.
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tree.h"
25 #include "function.h"
26 #include "basic-block.h"
27 #include "gimple.h"
28 #include "gimple-iterator.h"
29 #include "diagnostic-core.h"
30 #include "graphviz.h"
31 #include "options.h"
32 #include "cgraph.h"
33 #include "tree-dfa.h"
34 #include "stringpool.h"
35 #include "convert.h"
36 #include "target.h"
37 #include "fold-const.h"
38 #include "tree-pretty-print.h"
39 #include "diagnostic-color.h"
40 #include "diagnostic-metadata.h"
41 #include "tristate.h"
42 #include "bitmap.h"
43 #include "selftest.h"
44 #include "function.h"
45 #include "json.h"
46 #include "analyzer/analyzer.h"
47 #include "analyzer/analyzer-logging.h"
48 #include "ordered-hash-map.h"
49 #include "options.h"
50 #include "cgraph.h"
51 #include "cfg.h"
52 #include "digraph.h"
53 #include "analyzer/supergraph.h"
54 #include "sbitmap.h"
55 #include "analyzer/call-string.h"
56 #include "analyzer/program-point.h"
57 #include "analyzer/store.h"
58 #include "analyzer/region-model.h"
59 #include "analyzer/analyzer-selftests.h"
60 #include "stor-layout.h"
62 #if ENABLE_ANALYZER
64 namespace ana {
66 /* Dump SVALS to PP, sorting them to ensure determinism. */
68 static void
69 dump_svalue_set (const hash_set <const svalue *> &svals,
70 pretty_printer *pp, bool simple)
72 auto_vec <const svalue *> v;
73 for (hash_set<const svalue *>::iterator iter = svals.begin ();
74 iter != svals.end (); ++iter)
76 v.safe_push (*iter);
78 v.qsort (svalue::cmp_ptr_ptr);
80 pp_character (pp, '{');
81 const svalue *sval;
82 unsigned i;
83 FOR_EACH_VEC_ELT (v, i, sval)
85 if (i > 0)
86 pp_string (pp, ", ");
87 sval->dump_to_pp (pp, simple);
89 pp_character (pp, '}');
92 /* class uncertainty_t. */
94 /* Dump this object to PP. */
96 void
97 uncertainty_t::dump_to_pp (pretty_printer *pp, bool simple) const
99 pp_string (pp, "{m_maybe_bound_svals: ");
100 dump_svalue_set (m_maybe_bound_svals, pp, simple);
102 pp_string (pp, ", m_mutable_at_unknown_call_svals: ");
103 dump_svalue_set (m_mutable_at_unknown_call_svals, pp, simple);
104 pp_string (pp, "}");
107 /* Dump this object to stderr. */
109 DEBUG_FUNCTION void
110 uncertainty_t::dump (bool simple) const
112 pretty_printer pp;
113 pp_format_decoder (&pp) = default_tree_printer;
114 pp_show_color (&pp) = pp_show_color (global_dc->printer);
115 pp.buffer->stream = stderr;
116 dump_to_pp (&pp, simple);
117 pp_newline (&pp);
118 pp_flush (&pp);
121 /* class binding_key. */
123 const binding_key *
124 binding_key::make (store_manager *mgr, const region *r)
126 region_offset offset = r->get_offset ();
127 if (offset.symbolic_p ())
128 return mgr->get_symbolic_binding (r);
129 else
131 bit_size_t bit_size;
132 if (r->get_bit_size (&bit_size))
133 return mgr->get_concrete_binding (offset.get_bit_offset (),
134 bit_size);
135 else
136 return mgr->get_symbolic_binding (r);
140 /* Dump this binding_key to stderr. */
142 DEBUG_FUNCTION void
143 binding_key::dump (bool simple) const
145 pretty_printer pp;
146 pp_format_decoder (&pp) = default_tree_printer;
147 pp_show_color (&pp) = pp_show_color (global_dc->printer);
148 pp.buffer->stream = stderr;
149 dump_to_pp (&pp, simple);
150 pp_newline (&pp);
151 pp_flush (&pp);
154 /* Get a description of this binding_key. */
156 label_text
157 binding_key::get_desc (bool simple) const
159 pretty_printer pp;
160 pp_format_decoder (&pp) = default_tree_printer;
161 dump_to_pp (&pp, simple);
162 return label_text::take (xstrdup (pp_formatted_text (&pp)));
165 /* qsort callback. */
168 binding_key::cmp_ptrs (const void *p1, const void *p2)
170 const binding_key * const *pk1 = (const binding_key * const *)p1;
171 const binding_key * const *pk2 = (const binding_key * const *)p2;
172 return cmp (*pk1, *pk2);
175 /* Comparator for binding_keys. */
178 binding_key::cmp (const binding_key *k1, const binding_key *k2)
180 int concrete1 = k1->concrete_p ();
181 int concrete2 = k2->concrete_p ();
182 if (int concrete_cmp = concrete1 - concrete2)
183 return concrete_cmp;
184 if (concrete1)
186 const concrete_binding *b1 = (const concrete_binding *)k1;
187 const concrete_binding *b2 = (const concrete_binding *)k2;
188 if (int start_cmp = wi::cmp (b1->get_start_bit_offset (),
189 b2->get_start_bit_offset (),
190 SIGNED))
191 return start_cmp;
192 return wi::cmp (b1->get_next_bit_offset (), b2->get_next_bit_offset (),
193 SIGNED);
195 else
197 const symbolic_binding *s1 = (const symbolic_binding *)k1;
198 const symbolic_binding *s2 = (const symbolic_binding *)k2;
199 if (s1 > s2)
200 return 1;
201 if (s1 < s2)
202 return -1;
203 return 0;
207 /* struct bit_range. */
209 void
210 bit_range::dump_to_pp (pretty_printer *pp) const
212 byte_range bytes (0, 0);
213 if (as_byte_range (&bytes))
214 bytes.dump_to_pp (pp);
215 else
217 pp_string (pp, "start: ");
218 pp_wide_int (pp, m_start_bit_offset, SIGNED);
219 pp_string (pp, ", size: ");
220 pp_wide_int (pp, m_size_in_bits, SIGNED);
221 pp_string (pp, ", next: ");
222 pp_wide_int (pp, get_next_bit_offset (), SIGNED);
226 /* Dump this object to stderr. */
228 DEBUG_FUNCTION void
229 bit_range::dump () const
231 pretty_printer pp;
232 pp.buffer->stream = stderr;
233 dump_to_pp (&pp);
234 pp_newline (&pp);
235 pp_flush (&pp);
238 /* If OTHER is a subset of this, return true and write
239 to *OUT the relative range of OTHER within this.
240 Otherwise return false. */
242 bool
243 bit_range::contains_p (const bit_range &other, bit_range *out) const
245 if (contains_p (other.get_start_bit_offset ())
246 && contains_p (other.get_last_bit_offset ()))
248 out->m_start_bit_offset = other.m_start_bit_offset - m_start_bit_offset;
249 out->m_size_in_bits = other.m_size_in_bits;
250 return true;
252 else
253 return false;
256 /* If OTHER intersects this, return true and write
257 the relative range of OTHER within THIS to *OUT_THIS,
258 and the relative range of THIS within OTHER to *OUT_OTHER.
259 Otherwise return false. */
261 bool
262 bit_range::intersects_p (const bit_range &other,
263 bit_range *out_this,
264 bit_range *out_other) const
266 if (get_start_bit_offset () < other.get_next_bit_offset ()
267 && other.get_start_bit_offset () < get_next_bit_offset ())
269 bit_offset_t overlap_start
270 = MAX (get_start_bit_offset (),
271 other.get_start_bit_offset ());
272 bit_offset_t overlap_next
273 = MIN (get_next_bit_offset (),
274 other.get_next_bit_offset ());
275 gcc_assert (overlap_next > overlap_start);
276 bit_range abs_overlap_bits (overlap_start, overlap_next - overlap_start);
277 *out_this = abs_overlap_bits - get_start_bit_offset ();
278 *out_other = abs_overlap_bits - other.get_start_bit_offset ();
279 return true;
281 else
282 return false;
286 bit_range::cmp (const bit_range &br1, const bit_range &br2)
288 if (int start_cmp = wi::cmps (br1.m_start_bit_offset,
289 br2.m_start_bit_offset))
290 return start_cmp;
292 return wi::cmpu (br1.m_size_in_bits, br2.m_size_in_bits);
295 /* Offset this range by OFFSET. */
297 bit_range
298 bit_range::operator- (bit_offset_t offset) const
300 return bit_range (m_start_bit_offset - offset, m_size_in_bits);
303 /* If MASK is a contiguous range of set bits, write them
304 to *OUT and return true.
305 Otherwise return false. */
307 bool
308 bit_range::from_mask (unsigned HOST_WIDE_INT mask, bit_range *out)
310 unsigned iter_bit_idx = 0;
311 unsigned HOST_WIDE_INT iter_bit_mask = 1;
313 /* Find the first contiguous run of set bits in MASK. */
315 /* Find first set bit in MASK. */
316 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
318 if (mask & iter_bit_mask)
319 break;
320 iter_bit_idx++;
321 iter_bit_mask <<= 1;
323 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
324 /* MASK is zero. */
325 return false;
327 unsigned first_set_iter_bit_idx = iter_bit_idx;
328 unsigned num_set_bits = 1;
329 iter_bit_idx++;
330 iter_bit_mask <<= 1;
332 /* Find next unset bit in MASK. */
333 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
335 if (!(mask & iter_bit_mask))
336 break;
337 num_set_bits++;
338 iter_bit_idx++;
339 iter_bit_mask <<= 1;
341 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
343 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
344 return true;
347 /* We now have the first contiguous run of set bits in MASK.
348 Fail if any other bits are set. */
349 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
351 if (mask & iter_bit_mask)
352 return false;
353 iter_bit_idx++;
354 iter_bit_mask <<= 1;
357 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
358 return true;
361 /* Attempt to convert this bit_range to a byte_range.
362 Return true if it is possible, writing the result to *OUT.
363 Otherwise return false. */
365 bool
366 bit_range::as_byte_range (byte_range *out) const
368 if (m_start_bit_offset % BITS_PER_UNIT == 0
369 && m_size_in_bits % BITS_PER_UNIT == 0)
371 out->m_start_byte_offset = m_start_bit_offset / BITS_PER_UNIT;
372 out->m_size_in_bytes = m_size_in_bits / BITS_PER_UNIT;
373 return true;
375 return false;
378 /* Dump this object to PP. */
380 void
381 byte_range::dump_to_pp (pretty_printer *pp) const
383 if (m_size_in_bytes == 1)
385 pp_string (pp, "byte ");
386 pp_wide_int (pp, m_start_byte_offset, SIGNED);
388 else
390 pp_string (pp, "bytes ");
391 pp_wide_int (pp, m_start_byte_offset, SIGNED);
392 pp_string (pp, "-");
393 pp_wide_int (pp, get_last_byte_offset (), SIGNED);
397 /* Dump this object to stderr. */
399 DEBUG_FUNCTION void
400 byte_range::dump () const
402 pretty_printer pp;
403 pp.buffer->stream = stderr;
404 dump_to_pp (&pp);
405 pp_newline (&pp);
406 pp_flush (&pp);
409 /* If OTHER is a subset of this, return true and write
410 to *OUT the relative range of OTHER within this.
411 Otherwise return false. */
413 bool
414 byte_range::contains_p (const byte_range &other, byte_range *out) const
416 if (contains_p (other.get_start_byte_offset ())
417 && contains_p (other.get_last_byte_offset ()))
419 out->m_start_byte_offset = other.m_start_byte_offset - m_start_byte_offset;
420 out->m_size_in_bytes = other.m_size_in_bytes;
421 return true;
423 else
424 return false;
427 /* qsort comparator for byte ranges. */
430 byte_range::cmp (const byte_range &br1, const byte_range &br2)
432 /* Order first by offset. */
433 if (int start_cmp = wi::cmps (br1.m_start_byte_offset,
434 br2.m_start_byte_offset))
435 return start_cmp;
437 /* ...then by size. */
438 return wi::cmpu (br1.m_size_in_bytes, br2.m_size_in_bytes);
441 /* class concrete_binding : public binding_key. */
443 /* Implementation of binding_key::dump_to_pp vfunc for concrete_binding. */
445 void
446 concrete_binding::dump_to_pp (pretty_printer *pp, bool) const
448 m_bit_range.dump_to_pp (pp);
451 /* Return true if this binding overlaps with OTHER. */
453 bool
454 concrete_binding::overlaps_p (const concrete_binding &other) const
456 if (get_start_bit_offset () < other.get_next_bit_offset ()
457 && get_next_bit_offset () > other.get_start_bit_offset ())
458 return true;
459 return false;
462 /* Comparator for use by vec<const concrete_binding *>::qsort. */
465 concrete_binding::cmp_ptr_ptr (const void *p1, const void *p2)
467 const concrete_binding *b1 = *(const concrete_binding * const *)p1;
468 const concrete_binding *b2 = *(const concrete_binding * const *)p2;
470 return bit_range::cmp (b1->m_bit_range, b2->m_bit_range);
473 /* class symbolic_binding : public binding_key. */
475 void
476 symbolic_binding::dump_to_pp (pretty_printer *pp, bool simple) const
478 //binding_key::dump_to_pp (pp, simple);
479 pp_string (pp, "region: ");
480 m_region->dump_to_pp (pp, simple);
483 /* Comparator for use by vec<const symbolic_binding *>::qsort. */
486 symbolic_binding::cmp_ptr_ptr (const void *p1, const void *p2)
488 const symbolic_binding *b1 = *(const symbolic_binding * const *)p1;
489 const symbolic_binding *b2 = *(const symbolic_binding * const *)p2;
491 return region::cmp_ids (b1->get_region (), b2->get_region ());
494 /* The store is oblivious to the types of the svalues bound within
495 it: any type can get bound at any location.
496 Simplify any casts before binding.
498 For example, if we have:
499 struct big { int ia[1024]; };
500 struct big src, dst;
501 memcpy (&dst, &src, sizeof (struct big));
502 this reaches us in gimple form as:
503 MEM <unsigned char[4096]> [(char * {ref-all})&dst]
504 = MEM <unsigned char[4096]> [(char * {ref-all})&src];
505 Using cast_region when handling the MEM_REF would give us:
506 INIT_VAL(CAST_REG(unsigned char[4096], src))
507 as rhs_sval, but we can fold that into a cast svalue:
508 CAST(unsigned char[4096], INIT_VAL(src))
509 We can discard that cast from the svalue when binding it in
510 the store for "dst", and simply store:
511 cluster for: dst
512 key: {kind: direct, start: 0, size: 32768, next: 32768}
513 value: ‘struct big’ {INIT_VAL(src)}. */
515 static const svalue *
516 simplify_for_binding (const svalue *sval)
518 if (const svalue *cast_sval = sval->maybe_undo_cast ())
519 sval = cast_sval;
520 return sval;
523 /* class binding_map. */
525 /* binding_map's copy ctor. */
527 binding_map::binding_map (const binding_map &other)
528 : m_map (other.m_map)
532 /* binding_map's assignment operator. */
534 binding_map&
535 binding_map::operator=(const binding_map &other)
537 /* For now, assume we only ever copy to an empty cluster. */
538 gcc_assert (m_map.elements () == 0);
539 for (map_t::iterator iter = other.m_map.begin (); iter != other.m_map.end ();
540 ++iter)
542 const binding_key *key = (*iter).first;
543 const svalue *sval = (*iter).second;
544 m_map.put (key, sval);
546 return *this;
549 /* binding_map's equality operator. */
551 bool
552 binding_map::operator== (const binding_map &other) const
554 if (m_map.elements () != other.m_map.elements ())
555 return false;
557 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
559 const binding_key *key = (*iter).first;
560 const svalue *sval = (*iter).second;
561 const svalue **other_slot
562 = const_cast <map_t &> (other.m_map).get (key);
563 if (other_slot == NULL)
564 return false;
565 if (sval != *other_slot)
566 return false;
568 gcc_checking_assert (hash () == other.hash ());
569 return true;
572 /* Generate a hash value for this binding_map. */
574 hashval_t
575 binding_map::hash () const
577 hashval_t result = 0;
578 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
580 /* Use a new hasher for each key to avoid depending on the ordering
581 of keys when accumulating the result. */
582 inchash::hash hstate;
583 hstate.add_ptr ((*iter).first);
584 hstate.add_ptr ((*iter).second);
585 result ^= hstate.end ();
587 return result;
590 /* Dump a representation of this binding_map to PP.
591 SIMPLE controls how values and regions are to be printed.
592 If MULTILINE, then split the dump over multiple lines and
593 use whitespace for readability, otherwise put all on one line. */
595 void
596 binding_map::dump_to_pp (pretty_printer *pp, bool simple,
597 bool multiline) const
599 auto_vec <const binding_key *> binding_keys;
600 for (map_t::iterator iter = m_map.begin ();
601 iter != m_map.end (); ++iter)
603 const binding_key *key = (*iter).first;
604 binding_keys.safe_push (key);
606 binding_keys.qsort (binding_key::cmp_ptrs);
608 const binding_key *key;
609 unsigned i;
610 FOR_EACH_VEC_ELT (binding_keys, i, key)
612 const svalue *value = *const_cast <map_t &> (m_map).get (key);
613 if (multiline)
615 pp_string (pp, " key: {");
616 key->dump_to_pp (pp, simple);
617 pp_string (pp, "}");
618 pp_newline (pp);
619 pp_string (pp, " value: ");
620 if (tree t = value->get_type ())
621 dump_quoted_tree (pp, t);
622 pp_string (pp, " {");
623 value->dump_to_pp (pp, simple);
624 pp_string (pp, "}");
625 pp_newline (pp);
627 else
629 if (i > 0)
630 pp_string (pp, ", ");
631 pp_string (pp, "binding key: {");
632 key->dump_to_pp (pp, simple);
633 pp_string (pp, "}, value: {");
634 value->dump_to_pp (pp, simple);
635 pp_string (pp, "}");
640 /* Dump a multiline representation of this binding_map to stderr. */
642 DEBUG_FUNCTION void
643 binding_map::dump (bool simple) const
645 pretty_printer pp;
646 pp_format_decoder (&pp) = default_tree_printer;
647 pp_show_color (&pp) = pp_show_color (global_dc->printer);
648 pp.buffer->stream = stderr;
649 dump_to_pp (&pp, simple, true);
650 pp_newline (&pp);
651 pp_flush (&pp);
654 /* Return a new json::object of the form
655 {KEY_DESC : SVALUE_DESC,
656 ...for the various key/value pairs in this binding_map}. */
658 json::object *
659 binding_map::to_json () const
661 json::object *map_obj = new json::object ();
663 auto_vec <const binding_key *> binding_keys;
664 for (map_t::iterator iter = m_map.begin ();
665 iter != m_map.end (); ++iter)
667 const binding_key *key = (*iter).first;
668 binding_keys.safe_push (key);
670 binding_keys.qsort (binding_key::cmp_ptrs);
672 const binding_key *key;
673 unsigned i;
674 FOR_EACH_VEC_ELT (binding_keys, i, key)
676 const svalue *value = *const_cast <map_t &> (m_map).get (key);
677 label_text key_desc = key->get_desc ();
678 map_obj->set (key_desc.m_buffer, value->to_json ());
681 return map_obj;
684 /* Comparator for imposing an order on binding_maps. */
687 binding_map::cmp (const binding_map &map1, const binding_map &map2)
689 if (int count_cmp = map1.elements () - map2.elements ())
690 return count_cmp;
692 auto_vec <const binding_key *> keys1 (map1.elements ());
693 for (map_t::iterator iter = map1.begin ();
694 iter != map1.end (); ++iter)
695 keys1.quick_push ((*iter).first);
696 keys1.qsort (binding_key::cmp_ptrs);
698 auto_vec <const binding_key *> keys2 (map2.elements ());
699 for (map_t::iterator iter = map2.begin ();
700 iter != map2.end (); ++iter)
701 keys2.quick_push ((*iter).first);
702 keys2.qsort (binding_key::cmp_ptrs);
704 for (size_t i = 0; i < keys1.length (); i++)
706 const binding_key *k1 = keys1[i];
707 const binding_key *k2 = keys2[i];
708 if (int key_cmp = binding_key::cmp (k1, k2))
709 return key_cmp;
710 gcc_assert (k1 == k2);
711 if (int sval_cmp = svalue::cmp_ptr (map1.get (k1), map2.get (k2)))
712 return sval_cmp;
715 return 0;
718 /* Get the child region of PARENT_REG based upon INDEX within a
719 CONSTRUCTOR. */
721 static const region *
722 get_subregion_within_ctor (const region *parent_reg, tree index,
723 region_model_manager *mgr)
725 switch (TREE_CODE (index))
727 default:
728 gcc_unreachable ();
729 case INTEGER_CST:
731 const svalue *index_sval
732 = mgr->get_or_create_constant_svalue (index);
733 return mgr->get_element_region (parent_reg,
734 TREE_TYPE (parent_reg->get_type ()),
735 index_sval);
737 break;
738 case FIELD_DECL:
739 return mgr->get_field_region (parent_reg, index);
743 /* Get the svalue for VAL, a non-CONSTRUCTOR value within a CONSTRUCTOR. */
745 static const svalue *
746 get_svalue_for_ctor_val (tree val, region_model_manager *mgr)
748 /* Reuse the get_rvalue logic from region_model. */
749 region_model m (mgr);
750 return m.get_rvalue (path_var (val, 0), NULL);
753 /* Bind values from CONSTRUCTOR to this map, relative to
754 PARENT_REG's relationship to its base region.
755 Return true if successful, false if there was a problem (e.g. due
756 to hitting a complexity limit). */
758 bool
759 binding_map::apply_ctor_to_region (const region *parent_reg, tree ctor,
760 region_model_manager *mgr)
762 gcc_assert (parent_reg);
763 gcc_assert (TREE_CODE (ctor) == CONSTRUCTOR);
765 unsigned ix;
766 tree index;
767 tree val;
768 tree parent_type = parent_reg->get_type ();
769 tree field;
770 if (TREE_CODE (parent_type) == RECORD_TYPE)
771 field = TYPE_FIELDS (parent_type);
772 else
773 field = NULL_TREE;
774 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), ix, index, val)
776 if (!index)
778 /* If index is NULL, then iterate through the fields for
779 a RECORD_TYPE, or use an INTEGER_CST otherwise.
780 Compare with similar logic in output_constructor. */
781 if (field)
783 index = field;
784 field = DECL_CHAIN (field);
786 else
787 index = build_int_cst (integer_type_node, ix);
789 else if (TREE_CODE (index) == RANGE_EXPR)
791 tree min_index = TREE_OPERAND (index, 0);
792 tree max_index = TREE_OPERAND (index, 1);
793 if (min_index == max_index)
795 if (!apply_ctor_pair_to_child_region (parent_reg, mgr,
796 min_index, val))
797 return false;
799 else
801 if (!apply_ctor_val_to_range (parent_reg, mgr,
802 min_index, max_index, val))
803 return false;
805 continue;
807 if (!apply_ctor_pair_to_child_region (parent_reg, mgr, index, val))
808 return false;
810 return true;
813 /* Bind the value VAL into the range of elements within PARENT_REF
814 from MIN_INDEX to MAX_INDEX (including endpoints).
815 For use in handling RANGE_EXPR within a CONSTRUCTOR.
816 Return true if successful, false if there was a problem (e.g. due
817 to hitting a complexity limit). */
819 bool
820 binding_map::apply_ctor_val_to_range (const region *parent_reg,
821 region_model_manager *mgr,
822 tree min_index, tree max_index,
823 tree val)
825 gcc_assert (TREE_CODE (min_index) == INTEGER_CST);
826 gcc_assert (TREE_CODE (max_index) == INTEGER_CST);
828 /* Generate a binding key for the range. */
829 const region *min_element
830 = get_subregion_within_ctor (parent_reg, min_index, mgr);
831 const region *max_element
832 = get_subregion_within_ctor (parent_reg, max_index, mgr);
833 region_offset min_offset = min_element->get_offset ();
834 if (min_offset.symbolic_p ())
835 return false;
836 bit_offset_t start_bit_offset = min_offset.get_bit_offset ();
837 store_manager *smgr = mgr->get_store_manager ();
838 const binding_key *max_element_key = binding_key::make (smgr, max_element);
839 if (max_element_key->symbolic_p ())
840 return false;
841 const concrete_binding *max_element_ckey
842 = max_element_key->dyn_cast_concrete_binding ();
843 bit_size_t range_size_in_bits
844 = max_element_ckey->get_next_bit_offset () - start_bit_offset;
845 const concrete_binding *range_key
846 = smgr->get_concrete_binding (start_bit_offset, range_size_in_bits);
847 if (range_key->symbolic_p ())
848 return false;
850 /* Get the value. */
851 if (TREE_CODE (val) == CONSTRUCTOR)
852 return false;
853 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
855 /* Bind the value to the range. */
856 put (range_key, sval);
857 return true;
860 /* Bind the value VAL into INDEX within PARENT_REF.
861 For use in handling a pair of entries within a CONSTRUCTOR.
862 Return true if successful, false if there was a problem (e.g. due
863 to hitting a complexity limit). */
865 bool
866 binding_map::apply_ctor_pair_to_child_region (const region *parent_reg,
867 region_model_manager *mgr,
868 tree index, tree val)
870 const region *child_reg
871 = get_subregion_within_ctor (parent_reg, index, mgr);
872 if (TREE_CODE (val) == CONSTRUCTOR)
873 return apply_ctor_to_region (child_reg, val, mgr);
874 else
876 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
877 const binding_key *k
878 = binding_key::make (mgr->get_store_manager (), child_reg);
879 /* Handle the case where we have an unknown size for child_reg
880 (e.g. due to it being a trailing field with incomplete array
881 type. */
882 if (!k->concrete_p ())
884 /* Assume that sval has a well-defined size for this case. */
885 tree sval_type = sval->get_type ();
886 gcc_assert (sval_type);
887 HOST_WIDE_INT sval_byte_size = int_size_in_bytes (sval_type);
888 gcc_assert (sval_byte_size != -1);
889 bit_size_t sval_bit_size = sval_byte_size * BITS_PER_UNIT;
890 /* Get offset of child relative to base region. */
891 region_offset child_base_offset = child_reg->get_offset ();
892 if (child_base_offset.symbolic_p ())
893 return false;
894 /* Convert to an offset relative to the parent region. */
895 region_offset parent_base_offset = parent_reg->get_offset ();
896 gcc_assert (!parent_base_offset.symbolic_p ());
897 bit_offset_t child_parent_offset
898 = (child_base_offset.get_bit_offset ()
899 - parent_base_offset.get_bit_offset ());
900 /* Create a concrete key for the child within the parent. */
901 k = mgr->get_store_manager ()->get_concrete_binding
902 (child_parent_offset, sval_bit_size);
904 gcc_assert (k->concrete_p ());
905 put (k, sval);
906 return true;
910 /* Populate OUT with all bindings within this map that overlap KEY. */
912 void
913 binding_map::get_overlapping_bindings (const binding_key *key,
914 auto_vec<const binding_key *> *out)
916 for (auto iter : *this)
918 const binding_key *iter_key = iter.first;
919 if (const concrete_binding *ckey
920 = key->dyn_cast_concrete_binding ())
922 if (const concrete_binding *iter_ckey
923 = iter_key->dyn_cast_concrete_binding ())
925 if (ckey->overlaps_p (*iter_ckey))
926 out->safe_push (iter_key);
928 else
930 /* Assume overlap. */
931 out->safe_push (iter_key);
934 else
936 /* Assume overlap. */
937 out->safe_push (iter_key);
942 /* Remove, truncate, and/or split any bindings within this map that
943 overlap DROP_KEY.
945 For example, if we have:
947 +------------------------------------+
948 | old binding |
949 +------------------------------------+
951 which is to be overwritten with:
953 .......+----------------------+.......
954 .......| new binding |.......
955 .......+----------------------+.......
957 this function "cuts a hole" out of the old binding:
959 +------+......................+------+
960 |prefix| hole for new binding |suffix|
961 +------+......................+------+
963 into which the new binding can be added without
964 overlapping the prefix or suffix.
966 The prefix and suffix (if added) will be bound to the pertinent
967 parts of the value of the old binding.
969 For example, given:
970 struct s5
972 char arr[8];
974 void test_5 (struct s5 *p)
976 struct s5 f = *p;
977 f.arr[3] = 42;
979 then after the "f = *p;" we have:
980 cluster for: f: INIT_VAL((*INIT_VAL(p_33(D))))
981 and at the "f.arr[3] = 42;" we remove the bindings overlapping
982 "f.arr[3]", replacing it with a prefix (bytes 0-2) and suffix (bytes 4-7)
983 giving:
984 cluster for: f
985 key: {bytes 0-2}
986 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
987 key: {bytes 4-7}
988 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
989 punching a hole into which the new value can be written at byte 3:
990 cluster for: f
991 key: {bytes 0-2}
992 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
993 key: {byte 3}
994 value: 'char' {(char)42}
995 key: {bytes 4-7}
996 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
998 If UNCERTAINTY is non-NULL, use it to record any svalues that
999 were removed, as being maybe-bound.
1001 If ALWAYS_OVERLAP, then assume that DROP_KEY can overlap anything
1002 in the map, due to one or both of the underlying clusters being
1003 symbolic (but not the same symbolic region). Hence even if DROP_KEY is a
1004 concrete binding it could actually be referring to the same memory as
1005 distinct concrete bindings in the map. Remove all bindings, but
1006 register any svalues with *UNCERTAINTY. */
1008 void
1009 binding_map::remove_overlapping_bindings (store_manager *mgr,
1010 const binding_key *drop_key,
1011 uncertainty_t *uncertainty,
1012 bool always_overlap)
1014 /* Get the bindings of interest within this map. */
1015 auto_vec<const binding_key *> bindings;
1016 if (always_overlap)
1017 for (auto iter : *this)
1018 bindings.safe_push (iter.first); /* Add all bindings. */
1019 else
1020 /* Just add overlapping bindings. */
1021 get_overlapping_bindings (drop_key, &bindings);
1023 unsigned i;
1024 const binding_key *iter_binding;
1025 FOR_EACH_VEC_ELT (bindings, i, iter_binding)
1027 /* Record any svalues that were removed to *UNCERTAINTY as being
1028 maybe-bound, provided at least some part of the binding is symbolic.
1030 Specifically, if at least one of the bindings is symbolic, or we
1031 have ALWAYS_OVERLAP for the case where we have possibly aliasing
1032 regions, then we don't know that the svalue has been overwritten,
1033 and should record that to *UNCERTAINTY.
1035 However, if we have concrete keys accessing within the same symbolic
1036 region, then we *know* that the symbolic region has been overwritten,
1037 so we don't record it to *UNCERTAINTY, as this could be a genuine
1038 leak. */
1039 const svalue *old_sval = get (iter_binding);
1040 if (uncertainty
1041 && (drop_key->symbolic_p ()
1042 || iter_binding->symbolic_p ()
1043 || always_overlap))
1044 uncertainty->on_maybe_bound_sval (old_sval);
1046 /* Begin by removing the old binding. */
1047 m_map.remove (iter_binding);
1049 /* Don't attempt to handle prefixes/suffixes for the
1050 "always_overlap" case; everything's being removed. */
1051 if (always_overlap)
1052 continue;
1054 /* Now potentially add the prefix and suffix. */
1055 if (const concrete_binding *drop_ckey
1056 = drop_key->dyn_cast_concrete_binding ())
1057 if (const concrete_binding *iter_ckey
1058 = iter_binding->dyn_cast_concrete_binding ())
1060 gcc_assert (drop_ckey->overlaps_p (*iter_ckey));
1062 const bit_range &drop_bits = drop_ckey->get_bit_range ();
1063 const bit_range &iter_bits = iter_ckey->get_bit_range ();
1065 if (iter_bits.get_start_bit_offset ()
1066 < drop_bits.get_start_bit_offset ())
1068 /* We have a truncated prefix. */
1069 bit_range prefix_bits (iter_bits.get_start_bit_offset (),
1070 (drop_bits.get_start_bit_offset ()
1071 - iter_bits.get_start_bit_offset ()));
1072 const concrete_binding *prefix_key
1073 = mgr->get_concrete_binding (prefix_bits);
1074 bit_range rel_prefix (0, prefix_bits.m_size_in_bits);
1075 const svalue *prefix_sval
1076 = old_sval->extract_bit_range (NULL_TREE,
1077 rel_prefix,
1078 mgr->get_svalue_manager ());
1079 m_map.put (prefix_key, prefix_sval);
1082 if (iter_bits.get_next_bit_offset ()
1083 > drop_bits.get_next_bit_offset ())
1085 /* We have a truncated suffix. */
1086 bit_range suffix_bits (drop_bits.get_next_bit_offset (),
1087 (iter_bits.get_next_bit_offset ()
1088 - drop_bits.get_next_bit_offset ()));
1089 const concrete_binding *suffix_key
1090 = mgr->get_concrete_binding (suffix_bits);
1091 bit_range rel_suffix (drop_bits.get_next_bit_offset ()
1092 - iter_bits.get_start_bit_offset (),
1093 suffix_bits.m_size_in_bits);
1094 const svalue *suffix_sval
1095 = old_sval->extract_bit_range (NULL_TREE,
1096 rel_suffix,
1097 mgr->get_svalue_manager ());
1098 m_map.put (suffix_key, suffix_sval);
1104 /* class binding_cluster. */
1106 /* binding_cluster's copy ctor. */
1108 binding_cluster::binding_cluster (const binding_cluster &other)
1109 : m_base_region (other.m_base_region), m_map (other.m_map),
1110 m_escaped (other.m_escaped), m_touched (other.m_touched)
1114 /* binding_cluster's assignment operator. */
1116 binding_cluster&
1117 binding_cluster::operator= (const binding_cluster &other)
1119 gcc_assert (m_base_region == other.m_base_region);
1120 m_map = other.m_map;
1121 m_escaped = other.m_escaped;
1122 m_touched = other.m_touched;
1123 return *this;
1126 /* binding_cluster's equality operator. */
1128 bool
1129 binding_cluster::operator== (const binding_cluster &other) const
1131 if (m_map != other.m_map)
1132 return false;
1134 if (m_base_region != other.m_base_region)
1135 return false;
1137 if (m_escaped != other.m_escaped)
1138 return false;
1140 if (m_touched != other.m_touched)
1141 return false;
1143 gcc_checking_assert (hash () == other.hash ());
1145 return true;
1148 /* Generate a hash value for this binding_cluster. */
1150 hashval_t
1151 binding_cluster::hash () const
1153 return m_map.hash ();
1156 /* Return true if this binding_cluster is symbolic
1157 i.e. its base region is symbolic. */
1159 bool
1160 binding_cluster::symbolic_p () const
1162 return m_base_region->get_kind () == RK_SYMBOLIC;
1165 /* Dump a representation of this binding_cluster to PP.
1166 SIMPLE controls how values and regions are to be printed.
1167 If MULTILINE, then split the dump over multiple lines and
1168 use whitespace for readability, otherwise put all on one line. */
1170 void
1171 binding_cluster::dump_to_pp (pretty_printer *pp, bool simple,
1172 bool multiline) const
1174 if (m_escaped)
1176 if (multiline)
1178 pp_string (pp, " ESCAPED");
1179 pp_newline (pp);
1181 else
1182 pp_string (pp, "(ESCAPED)");
1184 if (m_touched)
1186 if (multiline)
1188 pp_string (pp, " TOUCHED");
1189 pp_newline (pp);
1191 else
1192 pp_string (pp, "(TOUCHED)");
1195 m_map.dump_to_pp (pp, simple, multiline);
1198 /* Dump a multiline representation of this binding_cluster to stderr. */
1200 DEBUG_FUNCTION void
1201 binding_cluster::dump (bool simple) const
1203 pretty_printer pp;
1204 pp_format_decoder (&pp) = default_tree_printer;
1205 pp_show_color (&pp) = pp_show_color (global_dc->printer);
1206 pp.buffer->stream = stderr;
1207 pp_string (&pp, " cluster for: ");
1208 m_base_region->dump_to_pp (&pp, simple);
1209 pp_string (&pp, ": ");
1210 pp_newline (&pp);
1211 dump_to_pp (&pp, simple, true);
1212 pp_newline (&pp);
1213 pp_flush (&pp);
1216 /* Assert that this object is valid. */
1218 void
1219 binding_cluster::validate () const
1221 int num_symbolic = 0;
1222 int num_concrete = 0;
1223 for (auto iter : m_map)
1225 if (iter.first->symbolic_p ())
1226 num_symbolic++;
1227 else
1228 num_concrete++;
1230 /* We shouldn't have more than one symbolic key per cluster
1231 (or one would have clobbered the other). */
1232 gcc_assert (num_symbolic < 2);
1233 /* We can't have both concrete and symbolic keys. */
1234 gcc_assert (num_concrete == 0 || num_symbolic == 0);
1237 /* Return a new json::object of the form
1238 {"escaped": true/false,
1239 "touched": true/false,
1240 "map" : object for the binding_map. */
1242 json::object *
1243 binding_cluster::to_json () const
1245 json::object *cluster_obj = new json::object ();
1247 cluster_obj->set ("escaped", new json::literal (m_escaped));
1248 cluster_obj->set ("touched", new json::literal (m_touched));
1249 cluster_obj->set ("map", m_map.to_json ());
1251 return cluster_obj;
1254 /* Add a binding of SVAL of kind KIND to REG, unpacking SVAL if it is a
1255 compound_sval. */
1257 void
1258 binding_cluster::bind (store_manager *mgr,
1259 const region *reg, const svalue *sval)
1261 if (const compound_svalue *compound_sval
1262 = sval->dyn_cast_compound_svalue ())
1264 bind_compound_sval (mgr, reg, compound_sval);
1265 return;
1268 const binding_key *binding = binding_key::make (mgr, reg);
1269 bind_key (binding, sval);
1272 /* Bind SVAL to KEY.
1273 Unpacking of compound_svalues should already have been done by the
1274 time this is called. */
1276 void
1277 binding_cluster::bind_key (const binding_key *key, const svalue *sval)
1279 gcc_assert (sval->get_kind () != SK_COMPOUND);
1281 m_map.put (key, sval);
1282 if (key->symbolic_p ())
1283 m_touched = true;
1286 /* Subroutine of binding_cluster::bind.
1287 Unpack compound_svals when binding them, so that we bind them
1288 element-wise. */
1290 void
1291 binding_cluster::bind_compound_sval (store_manager *mgr,
1292 const region *reg,
1293 const compound_svalue *compound_sval)
1295 region_offset reg_offset = reg->get_offset ();
1296 if (reg_offset.symbolic_p ())
1298 m_touched = true;
1299 clobber_region (mgr, reg);
1300 return;
1303 for (map_t::iterator iter = compound_sval->begin ();
1304 iter != compound_sval->end (); ++iter)
1306 const binding_key *iter_key = (*iter).first;
1307 const svalue *iter_sval = (*iter).second;
1309 if (const concrete_binding *concrete_key
1310 = iter_key->dyn_cast_concrete_binding ())
1312 bit_offset_t effective_start
1313 = (concrete_key->get_start_bit_offset ()
1314 + reg_offset.get_bit_offset ());
1315 const concrete_binding *effective_concrete_key
1316 = mgr->get_concrete_binding (effective_start,
1317 concrete_key->get_size_in_bits ());
1318 bind_key (effective_concrete_key, iter_sval);
1320 else
1321 gcc_unreachable ();
1325 /* Remove all bindings overlapping REG within this cluster. */
1327 void
1328 binding_cluster::clobber_region (store_manager *mgr, const region *reg)
1330 remove_overlapping_bindings (mgr, reg, NULL);
1333 /* Remove any bindings for REG within this cluster. */
1335 void
1336 binding_cluster::purge_region (store_manager *mgr, const region *reg)
1338 gcc_assert (reg->get_kind () == RK_DECL);
1339 const binding_key *binding
1340 = binding_key::make (mgr, const_cast<region *> (reg));
1341 m_map.remove (binding);
1344 /* Clobber REG and fill it with repeated copies of SVAL. */
1346 void
1347 binding_cluster::fill_region (store_manager *mgr,
1348 const region *reg,
1349 const svalue *sval)
1351 clobber_region (mgr, reg);
1353 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1354 const svalue *byte_size_sval = reg->get_byte_size_sval (sval_mgr);
1355 const svalue *fill_sval
1356 = sval_mgr->get_or_create_repeated_svalue (reg->get_type (),
1357 byte_size_sval, sval);
1358 bind (mgr, reg, fill_sval);
1361 /* Clobber REG within this cluster and fill it with zeroes. */
1363 void
1364 binding_cluster::zero_fill_region (store_manager *mgr, const region *reg)
1366 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1367 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
1368 fill_region (mgr, reg, zero_sval);
1371 /* Mark REG_TO_BIND within this cluster as being unknown.
1373 Remove any bindings overlapping REG_FOR_OVERLAP.
1374 If UNCERTAINTY is non-NULL, use it to record any svalues that
1375 had bindings to them removed, as being maybe-bound.
1377 REG_TO_BIND and REG_FOR_OVERLAP are the same for
1378 store::mark_region_as_unknown, but are different in
1379 store::set_value's alias handling, for handling the case where
1380 we have a write to a symbolic REG_FOR_OVERLAP. */
1382 void
1383 binding_cluster::mark_region_as_unknown (store_manager *mgr,
1384 const region *reg_to_bind,
1385 const region *reg_for_overlap,
1386 uncertainty_t *uncertainty)
1388 remove_overlapping_bindings (mgr, reg_for_overlap, uncertainty);
1390 /* Add a default binding to "unknown". */
1391 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1392 const svalue *sval
1393 = sval_mgr->get_or_create_unknown_svalue (reg_to_bind->get_type ());
1394 bind (mgr, reg_to_bind, sval);
1397 /* Purge state involving SVAL. */
1399 void
1400 binding_cluster::purge_state_involving (const svalue *sval,
1401 region_model_manager *sval_mgr)
1403 auto_vec<const binding_key *> to_remove;
1404 auto_vec<std::pair<const binding_key *, tree> > to_make_unknown;
1405 for (auto iter : m_map)
1407 const binding_key *iter_key = iter.first;
1408 if (const symbolic_binding *symbolic_key
1409 = iter_key->dyn_cast_symbolic_binding ())
1411 const region *reg = symbolic_key->get_region ();
1412 if (reg->involves_p (sval))
1413 to_remove.safe_push (iter_key);
1415 const svalue *iter_sval = iter.second;
1416 if (iter_sval->involves_p (sval))
1417 to_make_unknown.safe_push (std::make_pair(iter_key,
1418 iter_sval->get_type ()));
1420 for (auto iter : to_remove)
1422 m_map.remove (iter);
1423 m_touched = true;
1425 for (auto iter : to_make_unknown)
1427 const svalue *new_sval
1428 = sval_mgr->get_or_create_unknown_svalue (iter.second);
1429 m_map.put (iter.first, new_sval);
1433 /* Get any SVAL bound to REG within this cluster via kind KIND,
1434 without checking parent regions of REG. */
1436 const svalue *
1437 binding_cluster::get_binding (store_manager *mgr,
1438 const region *reg) const
1440 const binding_key *reg_binding = binding_key::make (mgr, reg);
1441 const svalue *sval = m_map.get (reg_binding);
1442 if (sval)
1444 /* If we have a struct with a single field, then the binding of
1445 the field will equal that of the struct, and looking up e.g.
1446 PARENT_REG.field within:
1447 cluster for PARENT_REG: INIT_VAL(OTHER_REG)
1448 will erroneously return INIT_VAL(OTHER_REG), rather than
1449 SUB_VALUE(INIT_VAL(OTHER_REG), FIELD) == INIT_VAL(OTHER_REG.FIELD).
1450 Fix this issue by iterating upwards whilst the bindings are equal,
1451 expressing the lookups as subvalues.
1452 We have to gather a list of subregion accesses, then walk it
1453 in reverse to get the subvalues. */
1454 auto_vec<const region *> regions;
1455 while (const region *parent_reg = reg->get_parent_region ())
1457 const binding_key *parent_reg_binding
1458 = binding_key::make (mgr, parent_reg);
1459 if (parent_reg_binding == reg_binding
1460 && sval->get_type ()
1461 && reg->get_type ()
1462 && sval->get_type () != reg->get_type ())
1464 regions.safe_push (reg);
1465 reg = parent_reg;
1467 else
1468 break;
1470 if (sval->get_type ()
1471 && reg->get_type ()
1472 && sval->get_type () == reg->get_type ())
1474 unsigned i;
1475 const region *iter_reg;
1476 FOR_EACH_VEC_ELT_REVERSE (regions, i, iter_reg)
1478 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1479 sval = rmm_mgr->get_or_create_sub_svalue (iter_reg->get_type (),
1480 sval, iter_reg);
1484 return sval;
1487 /* Get any SVAL bound to REG within this cluster,
1488 either directly for REG, or recursively checking for bindings within
1489 parent regions and extracting subvalues if need be. */
1491 const svalue *
1492 binding_cluster::get_binding_recursive (store_manager *mgr,
1493 const region *reg) const
1495 if (const svalue *sval = get_binding (mgr, reg))
1496 return sval;
1497 if (reg != m_base_region)
1498 if (const region *parent_reg = reg->get_parent_region ())
1499 if (const svalue *parent_sval
1500 = get_binding_recursive (mgr, parent_reg))
1502 /* Extract child svalue from parent svalue. */
1503 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1504 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1505 parent_sval, reg);
1507 return NULL;
1510 /* Get any value bound for REG within this cluster. */
1512 const svalue *
1513 binding_cluster::get_any_binding (store_manager *mgr,
1514 const region *reg) const
1516 /* Look for a direct binding. */
1517 if (const svalue *direct_sval
1518 = get_binding_recursive (mgr, reg))
1519 return direct_sval;
1521 /* If we had a write to a cluster of unknown size, we might
1522 have a self-binding of the whole base region with an svalue,
1523 where the base region is symbolic.
1524 Handle such cases by returning sub_svalue instances. */
1525 if (const svalue *cluster_sval = maybe_get_simple_value (mgr))
1527 /* Extract child svalue from parent svalue. */
1528 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1529 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1530 cluster_sval, reg);
1533 /* If this cluster has been touched by a symbolic write, then the content
1534 of any subregion not currently specifically bound is "UNKNOWN". */
1535 if (m_touched)
1537 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1538 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1541 /* Alternatively, if this is a symbolic read and the cluster has any bindings,
1542 then we don't know if we're reading those values or not, so the result
1543 is also "UNKNOWN". */
1544 if (reg->get_offset ().symbolic_p ()
1545 && m_map.elements () > 0)
1547 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1548 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1551 if (const svalue *compound_sval = maybe_get_compound_binding (mgr, reg))
1552 return compound_sval;
1554 /* Otherwise, the initial value, or uninitialized. */
1555 return NULL;
1558 /* Attempt to get a compound_svalue for the bindings within the cluster
1559 affecting REG (which could be the base region itself).
1561 Create a compound_svalue with the subset of bindings the affect REG,
1562 offsetting them so that the offsets are relative to the start of REG
1563 within the cluster.
1565 For example, REG could be one element within an array of structs.
1567 Return the resulting compound_svalue, or NULL if there's a problem. */
1569 const svalue *
1570 binding_cluster::maybe_get_compound_binding (store_manager *mgr,
1571 const region *reg) const
1573 region_offset cluster_offset = m_base_region->get_offset ();
1574 if (cluster_offset.symbolic_p ())
1575 return NULL;
1576 region_offset reg_offset = reg->get_offset ();
1577 if (reg_offset.symbolic_p ())
1578 return NULL;
1580 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1582 /* We will a build the result map in two parts:
1583 (a) result_map, holding the concrete keys from this cluster,
1585 (b) default_map, holding the initial values for the region
1586 (e.g. uninitialized, initializer values, or zero), unless this
1587 cluster has been touched.
1589 We will populate (a), and as we do, clobber (b), trimming and
1590 splitting its bindings as necessary.
1591 Finally, we will merge (b) into (a), giving a concrete map
1592 that merges both the initial values and the bound values from
1593 the binding_cluster.
1594 Doing it this way reduces N for the O(N^2) intersection-finding,
1595 perhaps we should have a spatial-organized data structure for
1596 concrete keys, though. */
1598 binding_map result_map;
1599 binding_map default_map;
1601 /* Set up default values in default_map. */
1602 const svalue *default_sval;
1603 if (m_touched)
1604 default_sval = sval_mgr->get_or_create_unknown_svalue (reg->get_type ());
1605 else
1606 default_sval = sval_mgr->get_or_create_initial_value (reg);
1607 const binding_key *default_key = binding_key::make (mgr, reg);
1608 default_map.put (default_key, default_sval);
1610 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
1612 const binding_key *key = (*iter).first;
1613 const svalue *sval = (*iter).second;
1615 if (const concrete_binding *concrete_key
1616 = key->dyn_cast_concrete_binding ())
1618 const bit_range &bound_range = concrete_key->get_bit_range ();
1620 bit_size_t reg_bit_size;
1621 if (!reg->get_bit_size (&reg_bit_size))
1622 return NULL;
1624 bit_range reg_range (reg_offset.get_bit_offset (),
1625 reg_bit_size);
1627 /* Skip bindings that are outside the bit range of REG. */
1628 if (!bound_range.intersects_p (reg_range))
1629 continue;
1631 /* We shouldn't have an exact match; that should have been
1632 handled already. */
1633 gcc_assert (!(reg_range == bound_range));
1635 bit_range subrange (0, 0);
1636 if (reg_range.contains_p (bound_range, &subrange))
1638 /* We have a bound range fully within REG.
1639 Add it to map, offsetting accordingly. */
1641 /* Get offset of KEY relative to REG, rather than to
1642 the cluster. */
1643 const concrete_binding *offset_concrete_key
1644 = mgr->get_concrete_binding (subrange);
1645 result_map.put (offset_concrete_key, sval);
1647 /* Clobber default_map, removing/trimming/spliting where
1648 it overlaps with offset_concrete_key. */
1649 default_map.remove_overlapping_bindings (mgr,
1650 offset_concrete_key,
1651 NULL, false);
1653 else if (bound_range.contains_p (reg_range, &subrange))
1655 /* REG is fully within the bound range, but
1656 is not equal to it; we're extracting a subvalue. */
1657 return sval->extract_bit_range (reg->get_type (),
1658 subrange,
1659 mgr->get_svalue_manager ());
1661 else
1663 /* REG and the bound range partially overlap. */
1664 bit_range reg_subrange (0, 0);
1665 bit_range bound_subrange (0, 0);
1666 reg_range.intersects_p (bound_range,
1667 &reg_subrange, &bound_subrange);
1669 /* Get the bits from the bound value for the bits at the
1670 intersection (relative to the bound value). */
1671 const svalue *overlap_sval
1672 = sval->extract_bit_range (NULL_TREE,
1673 bound_subrange,
1674 mgr->get_svalue_manager ());
1676 /* Get key for overlap, relative to the REG. */
1677 const concrete_binding *overlap_concrete_key
1678 = mgr->get_concrete_binding (reg_subrange);
1679 result_map.put (overlap_concrete_key, overlap_sval);
1681 /* Clobber default_map, removing/trimming/spliting where
1682 it overlaps with overlap_concrete_key. */
1683 default_map.remove_overlapping_bindings (mgr,
1684 overlap_concrete_key,
1685 NULL, false);
1688 else
1689 /* Can't handle symbolic bindings. */
1690 return NULL;
1693 if (result_map.elements () == 0)
1694 return NULL;
1696 /* Merge any bindings from default_map into result_map. */
1697 for (auto iter : default_map)
1699 const binding_key *key = iter.first;
1700 const svalue *sval = iter.second;
1701 result_map.put (key, sval);
1704 return sval_mgr->get_or_create_compound_svalue (reg->get_type (), result_map);
1707 /* Remove, truncate, and/or split any bindings within this map that
1708 could overlap REG.
1710 If REG's base region or this cluster is symbolic and they're different
1711 base regions, then remove everything in this cluster's map, on the
1712 grounds that REG could be referring to the same memory as anything
1713 in the map.
1715 If UNCERTAINTY is non-NULL, use it to record any svalues that
1716 were removed, as being maybe-bound. */
1718 void
1719 binding_cluster::remove_overlapping_bindings (store_manager *mgr,
1720 const region *reg,
1721 uncertainty_t *uncertainty)
1723 const binding_key *reg_binding = binding_key::make (mgr, reg);
1725 const region *cluster_base_reg = get_base_region ();
1726 const region *other_base_reg = reg->get_base_region ();
1727 /* If at least one of the base regions involved is symbolic, and they're
1728 not the same base region, then consider everything in the map as
1729 potentially overlapping with reg_binding (even if it's a concrete
1730 binding and things in the map are concrete - they could be referring
1731 to the same memory when the symbolic base regions are taken into
1732 account). */
1733 bool always_overlap = (cluster_base_reg != other_base_reg
1734 && (cluster_base_reg->get_kind () == RK_SYMBOLIC
1735 || other_base_reg->get_kind () == RK_SYMBOLIC));
1736 m_map.remove_overlapping_bindings (mgr, reg_binding, uncertainty,
1737 always_overlap);
1740 /* Attempt to merge CLUSTER_A and CLUSTER_B into OUT_CLUSTER, using
1741 MGR and MERGER.
1742 Return true if they can be merged, false otherwise. */
1744 bool
1745 binding_cluster::can_merge_p (const binding_cluster *cluster_a,
1746 const binding_cluster *cluster_b,
1747 binding_cluster *out_cluster,
1748 store *out_store,
1749 store_manager *mgr,
1750 model_merger *merger)
1752 gcc_assert (out_cluster);
1754 /* Merge flags ("ESCAPED" and "TOUCHED") by setting the merged flag to
1755 true if either of the inputs is true. */
1756 if ((cluster_a && cluster_a->m_escaped)
1757 || (cluster_b && cluster_b->m_escaped))
1758 out_cluster->m_escaped = true;
1759 if ((cluster_a && cluster_a->m_touched)
1760 || (cluster_b && cluster_b->m_touched))
1761 out_cluster->m_touched = true;
1763 /* At least one of CLUSTER_A and CLUSTER_B are non-NULL, but either
1764 could be NULL. Handle these cases. */
1765 if (cluster_a == NULL)
1767 gcc_assert (cluster_b != NULL);
1768 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1769 out_cluster->make_unknown_relative_to (cluster_b, out_store, mgr);
1770 return true;
1772 if (cluster_b == NULL)
1774 gcc_assert (cluster_a != NULL);
1775 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1776 out_cluster->make_unknown_relative_to (cluster_a, out_store, mgr);
1777 return true;
1780 /* The "both inputs are non-NULL" case. */
1781 gcc_assert (cluster_a != NULL && cluster_b != NULL);
1782 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1783 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1785 hash_set<const binding_key *> keys;
1786 for (map_t::iterator iter_a = cluster_a->m_map.begin ();
1787 iter_a != cluster_a->m_map.end (); ++iter_a)
1789 const binding_key *key_a = (*iter_a).first;
1790 keys.add (key_a);
1792 for (map_t::iterator iter_b = cluster_b->m_map.begin ();
1793 iter_b != cluster_b->m_map.end (); ++iter_b)
1795 const binding_key *key_b = (*iter_b).first;
1796 keys.add (key_b);
1798 int num_symbolic_keys = 0;
1799 int num_concrete_keys = 0;
1800 for (hash_set<const binding_key *>::iterator iter = keys.begin ();
1801 iter != keys.end (); ++iter)
1803 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1804 const binding_key *key = *iter;
1805 const svalue *sval_a = cluster_a->get_any_value (key);
1806 const svalue *sval_b = cluster_b->get_any_value (key);
1808 if (key->symbolic_p ())
1809 num_symbolic_keys++;
1810 else
1811 num_concrete_keys++;
1813 if (sval_a == sval_b)
1815 gcc_assert (sval_a);
1816 out_cluster->m_map.put (key, sval_a);
1817 continue;
1819 else if (sval_a && sval_b)
1821 if (const svalue *merged_sval
1822 = sval_a->can_merge_p (sval_b, sval_mgr, merger))
1824 out_cluster->m_map.put (key, merged_sval);
1825 continue;
1827 /* Merger of the svalues failed. Reject merger of the cluster. */
1828 return false;
1831 /* If we get here, then one cluster binds this key and the other
1832 doesn't; merge them as "UNKNOWN". */
1833 gcc_assert (sval_a || sval_b);
1835 const svalue *bound_sval = sval_a ? sval_a : sval_b;
1836 tree type = bound_sval->get_type ();
1837 const svalue *unknown_sval
1838 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue (type);
1840 /* ...but reject the merger if this sval shouldn't be mergeable
1841 (e.g. reject merging svalues that have non-purgable sm-state,
1842 to avoid falsely reporting memory leaks by merging them
1843 with something else). */
1844 if (!bound_sval->can_merge_p (unknown_sval, sval_mgr, merger))
1845 return false;
1847 out_cluster->m_map.put (key, unknown_sval);
1850 /* We can only have at most one symbolic key per cluster,
1851 and if we do, we can't have any concrete keys.
1852 If this happens, mark the cluster as touched, with no keys. */
1853 if (num_symbolic_keys >= 2
1854 || (num_concrete_keys > 0 && num_symbolic_keys > 0))
1856 out_cluster->m_touched = true;
1857 out_cluster->m_map.empty ();
1860 /* We don't handle other kinds of overlaps yet. */
1862 return true;
1865 /* Update this cluster to reflect an attempt to merge OTHER where there
1866 is no other cluster to merge with, and so we're notionally merging the
1867 bound values in OTHER with the initial value of the relevant regions.
1869 Any bound keys in OTHER should be bound to unknown in this. */
1871 void
1872 binding_cluster::make_unknown_relative_to (const binding_cluster *other,
1873 store *out_store,
1874 store_manager *mgr)
1876 for (map_t::iterator iter = other->m_map.begin ();
1877 iter != other->m_map.end (); ++iter)
1879 const binding_key *iter_key = (*iter).first;
1880 const svalue *iter_sval = (*iter).second;
1881 const svalue *unknown_sval
1882 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue
1883 (iter_sval->get_type ());
1884 m_map.put (iter_key, unknown_sval);
1886 /* For any pointers in OTHER, the merger means that the
1887 concrete pointer becomes an unknown value, which could
1888 show up as a false report of a leak when considering what
1889 pointers are live before vs after.
1890 Avoid this by marking the base regions they point to as having
1891 escaped. */
1892 if (const region_svalue *region_sval
1893 = iter_sval->dyn_cast_region_svalue ())
1895 const region *base_reg
1896 = region_sval->get_pointee ()->get_base_region ();
1897 if (base_reg->tracked_p ()
1898 && !base_reg->symbolic_for_unknown_ptr_p ())
1900 binding_cluster *c = out_store->get_or_create_cluster (base_reg);
1901 c->mark_as_escaped ();
1907 /* Mark this cluster as having escaped. */
1909 void
1910 binding_cluster::mark_as_escaped ()
1912 m_escaped = true;
1915 /* If this cluster has escaped (by this call, or by an earlier one, or
1916 by being an external param), then unbind all values and mark it
1917 as "touched", so that it has a conjured value, rather than an
1918 initial_svalue.
1919 Use P to purge state involving conjured_svalues. */
1921 void
1922 binding_cluster::on_unknown_fncall (const gcall *call,
1923 store_manager *mgr,
1924 const conjured_purge &p)
1926 if (m_escaped)
1928 m_map.empty ();
1930 /* Bind it to a new "conjured" value using CALL. */
1931 const svalue *sval
1932 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
1933 (m_base_region->get_type (), call, m_base_region, p);
1934 bind (mgr, m_base_region, sval);
1936 m_touched = true;
1940 /* Mark this cluster as having been clobbered by STMT.
1941 Use P to purge state involving conjured_svalues. */
1943 void
1944 binding_cluster::on_asm (const gasm *stmt,
1945 store_manager *mgr,
1946 const conjured_purge &p)
1948 m_map.empty ();
1950 /* Bind it to a new "conjured" value using CALL. */
1951 const svalue *sval
1952 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
1953 (m_base_region->get_type (), stmt, m_base_region, p);
1954 bind (mgr, m_base_region, sval);
1956 m_touched = true;
1959 /* Return true if this binding_cluster has no information
1960 i.e. if there are no bindings, and it hasn't been marked as having
1961 escaped, or touched symbolically. */
1963 bool
1964 binding_cluster::redundant_p () const
1966 return (m_map.elements () == 0
1967 && !m_escaped
1968 && !m_touched);
1971 /* Add PV to OUT_PVS, casting it to TYPE if it is not already of that type. */
1973 static void
1974 append_pathvar_with_type (path_var pv,
1975 tree type,
1976 auto_vec<path_var> *out_pvs)
1978 gcc_assert (pv.m_tree);
1980 if (TREE_TYPE (pv.m_tree) != type)
1981 pv.m_tree = build1 (NOP_EXPR, type, pv.m_tree);
1983 out_pvs->safe_push (pv);
1986 /* Find representative path_vars for SVAL within this binding of BASE_REG,
1987 appending the results to OUT_PVS. */
1989 void
1990 binding_cluster::get_representative_path_vars (const region_model *model,
1991 svalue_set *visited,
1992 const region *base_reg,
1993 const svalue *sval,
1994 auto_vec<path_var> *out_pvs)
1995 const
1997 sval = simplify_for_binding (sval);
1999 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
2001 const binding_key *key = (*iter).first;
2002 const svalue *bound_sval = (*iter).second;
2003 if (bound_sval == sval)
2005 if (const concrete_binding *ckey
2006 = key->dyn_cast_concrete_binding ())
2008 auto_vec <const region *> subregions;
2009 base_reg->get_subregions_for_binding
2010 (model->get_manager (),
2011 ckey->get_start_bit_offset (),
2012 ckey->get_size_in_bits (),
2013 sval->get_type (),
2014 &subregions);
2015 unsigned i;
2016 const region *subregion;
2017 FOR_EACH_VEC_ELT (subregions, i, subregion)
2019 if (path_var pv
2020 = model->get_representative_path_var (subregion,
2021 visited))
2022 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2025 else
2027 const symbolic_binding *skey = (const symbolic_binding *)key;
2028 if (path_var pv
2029 = model->get_representative_path_var (skey->get_region (),
2030 visited))
2031 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
2037 /* Get any svalue bound to KEY, or NULL. */
2039 const svalue *
2040 binding_cluster::get_any_value (const binding_key *key) const
2042 return m_map.get (key);
2045 /* If this cluster has a single direct binding for the whole of the region,
2046 return it.
2047 For use in simplifying dumps. */
2049 const svalue *
2050 binding_cluster::maybe_get_simple_value (store_manager *mgr) const
2052 /* Fail gracefully if MGR is NULL to make it easier to dump store
2053 instances in the debugger. */
2054 if (mgr == NULL)
2055 return NULL;
2057 if (m_map.elements () != 1)
2058 return NULL;
2060 const binding_key *key = binding_key::make (mgr, m_base_region);
2061 return get_any_value (key);
2064 /* class store_manager. */
2066 logger *
2067 store_manager::get_logger () const
2069 return m_mgr->get_logger ();
2072 /* binding consolidation. */
2074 const concrete_binding *
2075 store_manager::get_concrete_binding (bit_offset_t start_bit_offset,
2076 bit_offset_t size_in_bits)
2078 concrete_binding b (start_bit_offset, size_in_bits);
2079 if (concrete_binding *existing = m_concrete_binding_key_mgr.get (b))
2080 return existing;
2082 concrete_binding *to_save = new concrete_binding (b);
2083 m_concrete_binding_key_mgr.put (b, to_save);
2084 return to_save;
2087 const symbolic_binding *
2088 store_manager::get_symbolic_binding (const region *reg)
2090 symbolic_binding b (reg);
2091 if (symbolic_binding *existing = m_symbolic_binding_key_mgr.get (b))
2092 return existing;
2094 symbolic_binding *to_save = new symbolic_binding (b);
2095 m_symbolic_binding_key_mgr.put (b, to_save);
2096 return to_save;
2099 /* class store. */
2101 /* store's default ctor. */
2103 store::store ()
2104 : m_called_unknown_fn (false)
2108 /* store's copy ctor. */
2110 store::store (const store &other)
2111 : m_cluster_map (other.m_cluster_map.elements ()),
2112 m_called_unknown_fn (other.m_called_unknown_fn)
2114 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2115 iter != other.m_cluster_map.end ();
2116 ++iter)
2118 const region *reg = (*iter).first;
2119 gcc_assert (reg);
2120 binding_cluster *c = (*iter).second;
2121 gcc_assert (c);
2122 m_cluster_map.put (reg, new binding_cluster (*c));
2126 /* store's dtor. */
2128 store::~store ()
2130 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2131 iter != m_cluster_map.end ();
2132 ++iter)
2133 delete (*iter).second;
2136 /* store's assignment operator. */
2138 store &
2139 store::operator= (const store &other)
2141 /* Delete existing cluster map. */
2142 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2143 iter != m_cluster_map.end ();
2144 ++iter)
2145 delete (*iter).second;
2146 m_cluster_map.empty ();
2148 m_called_unknown_fn = other.m_called_unknown_fn;
2150 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2151 iter != other.m_cluster_map.end ();
2152 ++iter)
2154 const region *reg = (*iter).first;
2155 gcc_assert (reg);
2156 binding_cluster *c = (*iter).second;
2157 gcc_assert (c);
2158 m_cluster_map.put (reg, new binding_cluster (*c));
2160 return *this;
2163 /* store's equality operator. */
2165 bool
2166 store::operator== (const store &other) const
2168 if (m_called_unknown_fn != other.m_called_unknown_fn)
2169 return false;
2171 if (m_cluster_map.elements () != other.m_cluster_map.elements ())
2172 return false;
2174 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2175 iter != m_cluster_map.end ();
2176 ++iter)
2178 const region *reg = (*iter).first;
2179 binding_cluster *c = (*iter).second;
2180 binding_cluster **other_slot
2181 = const_cast <cluster_map_t &> (other.m_cluster_map).get (reg);
2182 if (other_slot == NULL)
2183 return false;
2184 if (*c != **other_slot)
2185 return false;
2188 gcc_checking_assert (hash () == other.hash ());
2190 return true;
2193 /* Get a hash value for this store. */
2195 hashval_t
2196 store::hash () const
2198 hashval_t result = 0;
2199 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2200 iter != m_cluster_map.end ();
2201 ++iter)
2202 result ^= (*iter).second->hash ();
2203 return result;
2206 /* Populate OUT with a sorted list of parent regions for the regions in IN,
2207 removing duplicate parents. */
2209 static void
2210 get_sorted_parent_regions (auto_vec<const region *> *out,
2211 auto_vec<const region *> &in)
2213 /* Get the set of parent regions. */
2214 hash_set<const region *> parent_regions;
2215 const region *iter_reg;
2216 unsigned i;
2217 FOR_EACH_VEC_ELT (in, i, iter_reg)
2219 const region *parent_reg = iter_reg->get_parent_region ();
2220 gcc_assert (parent_reg);
2221 parent_regions.add (parent_reg);
2224 /* Write to OUT. */
2225 for (hash_set<const region *>::iterator iter = parent_regions.begin();
2226 iter != parent_regions.end(); ++iter)
2227 out->safe_push (*iter);
2229 /* Sort OUT. */
2230 out->qsort (region::cmp_ptr_ptr);
2233 /* Dump a representation of this store to PP, using SIMPLE to control how
2234 svalues and regions are printed.
2235 MGR is used for simplifying dumps if non-NULL, but can also be NULL
2236 (to make it easier to use from the debugger). */
2238 void
2239 store::dump_to_pp (pretty_printer *pp, bool simple, bool multiline,
2240 store_manager *mgr) const
2242 /* Sort into some deterministic order. */
2243 auto_vec<const region *> base_regions;
2244 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2245 iter != m_cluster_map.end (); ++iter)
2247 const region *base_reg = (*iter).first;
2248 base_regions.safe_push (base_reg);
2250 base_regions.qsort (region::cmp_ptr_ptr);
2252 /* Gather clusters, organize by parent region, so that we can group
2253 together locals, globals, etc. */
2254 auto_vec<const region *> parent_regions;
2255 get_sorted_parent_regions (&parent_regions, base_regions);
2257 const region *parent_reg;
2258 unsigned i;
2259 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2261 gcc_assert (parent_reg);
2262 pp_string (pp, "clusters within ");
2263 parent_reg->dump_to_pp (pp, simple);
2264 if (multiline)
2265 pp_newline (pp);
2266 else
2267 pp_string (pp, " {");
2269 const region *base_reg;
2270 unsigned j;
2271 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2273 /* This is O(N * M), but N ought to be small. */
2274 if (base_reg->get_parent_region () != parent_reg)
2275 continue;
2276 binding_cluster *cluster
2277 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2278 if (!multiline)
2280 if (j > 0)
2281 pp_string (pp, ", ");
2283 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
2285 /* Special-case to simplify dumps for the common case where
2286 we just have one value directly bound to the whole of a
2287 region. */
2288 if (multiline)
2290 pp_string (pp, " cluster for: ");
2291 base_reg->dump_to_pp (pp, simple);
2292 pp_string (pp, ": ");
2293 sval->dump_to_pp (pp, simple);
2294 if (cluster->escaped_p ())
2295 pp_string (pp, " (ESCAPED)");
2296 if (cluster->touched_p ())
2297 pp_string (pp, " (TOUCHED)");
2298 pp_newline (pp);
2300 else
2302 pp_string (pp, "region: {");
2303 base_reg->dump_to_pp (pp, simple);
2304 pp_string (pp, ", value: ");
2305 sval->dump_to_pp (pp, simple);
2306 if (cluster->escaped_p ())
2307 pp_string (pp, " (ESCAPED)");
2308 if (cluster->touched_p ())
2309 pp_string (pp, " (TOUCHED)");
2310 pp_string (pp, "}");
2313 else if (multiline)
2315 pp_string (pp, " cluster for: ");
2316 base_reg->dump_to_pp (pp, simple);
2317 pp_newline (pp);
2318 cluster->dump_to_pp (pp, simple, multiline);
2320 else
2322 pp_string (pp, "base region: {");
2323 base_reg->dump_to_pp (pp, simple);
2324 pp_string (pp, "} has cluster: {");
2325 cluster->dump_to_pp (pp, simple, multiline);
2326 pp_string (pp, "}");
2329 if (!multiline)
2330 pp_string (pp, "}");
2332 pp_printf (pp, "m_called_unknown_fn: %s",
2333 m_called_unknown_fn ? "TRUE" : "FALSE");
2334 if (multiline)
2335 pp_newline (pp);
2338 /* Dump a multiline representation of this store to stderr. */
2340 DEBUG_FUNCTION void
2341 store::dump (bool simple) const
2343 pretty_printer pp;
2344 pp_format_decoder (&pp) = default_tree_printer;
2345 pp_show_color (&pp) = pp_show_color (global_dc->printer);
2346 pp.buffer->stream = stderr;
2347 dump_to_pp (&pp, simple, true, NULL);
2348 pp_newline (&pp);
2349 pp_flush (&pp);
2352 /* Assert that this object is valid. */
2354 void
2355 store::validate () const
2357 for (auto iter : m_cluster_map)
2358 iter.second->validate ();
2361 /* Return a new json::object of the form
2362 {PARENT_REGION_DESC: {BASE_REGION_DESC: object for binding_map,
2363 ... for each cluster within parent region},
2364 ...for each parent region,
2365 "called_unknown_fn": true/false}. */
2367 json::object *
2368 store::to_json () const
2370 json::object *store_obj = new json::object ();
2372 /* Sort into some deterministic order. */
2373 auto_vec<const region *> base_regions;
2374 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2375 iter != m_cluster_map.end (); ++iter)
2377 const region *base_reg = (*iter).first;
2378 base_regions.safe_push (base_reg);
2380 base_regions.qsort (region::cmp_ptr_ptr);
2382 /* Gather clusters, organize by parent region, so that we can group
2383 together locals, globals, etc. */
2384 auto_vec<const region *> parent_regions;
2385 get_sorted_parent_regions (&parent_regions, base_regions);
2387 const region *parent_reg;
2388 unsigned i;
2389 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2391 gcc_assert (parent_reg);
2393 json::object *clusters_in_parent_reg_obj = new json::object ();
2395 const region *base_reg;
2396 unsigned j;
2397 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2399 /* This is O(N * M), but N ought to be small. */
2400 if (base_reg->get_parent_region () != parent_reg)
2401 continue;
2402 binding_cluster *cluster
2403 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2404 label_text base_reg_desc = base_reg->get_desc ();
2405 clusters_in_parent_reg_obj->set (base_reg_desc.m_buffer,
2406 cluster->to_json ());
2408 label_text parent_reg_desc = parent_reg->get_desc ();
2409 store_obj->set (parent_reg_desc.m_buffer, clusters_in_parent_reg_obj);
2412 store_obj->set ("called_unknown_fn", new json::literal (m_called_unknown_fn));
2414 return store_obj;
2417 /* Get any svalue bound to REG, or NULL. */
2419 const svalue *
2420 store::get_any_binding (store_manager *mgr, const region *reg) const
2422 const region *base_reg = reg->get_base_region ();
2423 binding_cluster **cluster_slot
2424 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg);
2425 if (!cluster_slot)
2426 return NULL;
2427 return (*cluster_slot)->get_any_binding (mgr, reg);
2430 /* Set the value of LHS_REG to RHS_SVAL. */
2432 void
2433 store::set_value (store_manager *mgr, const region *lhs_reg,
2434 const svalue *rhs_sval,
2435 uncertainty_t *uncertainty)
2437 logger *logger = mgr->get_logger ();
2438 LOG_SCOPE (logger);
2440 remove_overlapping_bindings (mgr, lhs_reg, uncertainty);
2442 rhs_sval = simplify_for_binding (rhs_sval);
2444 const region *lhs_base_reg = lhs_reg->get_base_region ();
2445 binding_cluster *lhs_cluster;
2446 if (lhs_base_reg->symbolic_for_unknown_ptr_p ())
2448 /* Reject attempting to bind values into a symbolic region
2449 for an unknown ptr; merely invalidate values below. */
2450 lhs_cluster = NULL;
2452 /* The LHS of the write is *UNKNOWN. If the RHS is a pointer,
2453 then treat the region being pointed to as having escaped. */
2454 if (const region_svalue *ptr_sval = rhs_sval->dyn_cast_region_svalue ())
2456 const region *ptr_dst = ptr_sval->get_pointee ();
2457 const region *ptr_base_reg = ptr_dst->get_base_region ();
2458 mark_as_escaped (ptr_base_reg);
2461 else if (lhs_base_reg->tracked_p ())
2463 lhs_cluster = get_or_create_cluster (lhs_base_reg);
2464 lhs_cluster->bind (mgr, lhs_reg, rhs_sval);
2466 else
2468 /* Reject attempting to bind values into an untracked region;
2469 merely invalidate values below. */
2470 lhs_cluster = NULL;
2473 /* Bindings to a cluster can affect other clusters if a symbolic
2474 base region is involved.
2475 Writes to concrete clusters can't affect other concrete clusters,
2476 but can affect symbolic clusters.
2477 Writes to symbolic clusters can affect both concrete and symbolic
2478 clusters.
2479 Invalidate our knowledge of other clusters that might have been
2480 affected by the write. */
2481 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2482 iter != m_cluster_map.end (); ++iter)
2484 const region *iter_base_reg = (*iter).first;
2485 binding_cluster *iter_cluster = (*iter).second;
2486 if (iter_base_reg != lhs_base_reg
2487 && (lhs_cluster == NULL
2488 || lhs_cluster->symbolic_p ()
2489 || iter_cluster->symbolic_p ()))
2491 tristate t_alias = eval_alias (lhs_base_reg, iter_base_reg);
2492 switch (t_alias.get_value ())
2494 default:
2495 gcc_unreachable ();
2497 case tristate::TS_UNKNOWN:
2498 if (logger)
2500 pretty_printer *pp = logger->get_printer ();
2501 logger->start_log_line ();
2502 logger->log_partial ("possible aliasing of ");
2503 iter_base_reg->dump_to_pp (pp, true);
2504 logger->log_partial (" when writing SVAL: ");
2505 rhs_sval->dump_to_pp (pp, true);
2506 logger->log_partial (" to LHS_REG: ");
2507 lhs_reg->dump_to_pp (pp, true);
2508 logger->end_log_line ();
2510 /* Mark all of iter_cluster's iter_base_reg as unknown,
2511 using LHS_REG when considering overlaps, to handle
2512 symbolic vs concrete issues. */
2513 iter_cluster->mark_region_as_unknown
2514 (mgr,
2515 iter_base_reg, /* reg_to_bind */
2516 lhs_reg, /* reg_for_overlap */
2517 uncertainty);
2518 break;
2520 case tristate::TS_TRUE:
2521 gcc_unreachable ();
2522 break;
2524 case tristate::TS_FALSE:
2525 /* If they can't be aliases, then don't invalidate this
2526 cluster. */
2527 break;
2533 /* Determine if BASE_REG_A could be an alias of BASE_REG_B. */
2535 tristate
2536 store::eval_alias (const region *base_reg_a,
2537 const region *base_reg_b) const
2539 /* SSA names can't alias. */
2540 tree decl_a = base_reg_a->maybe_get_decl ();
2541 if (decl_a && TREE_CODE (decl_a) == SSA_NAME)
2542 return tristate::TS_FALSE;
2543 tree decl_b = base_reg_b->maybe_get_decl ();
2544 if (decl_b && TREE_CODE (decl_b) == SSA_NAME)
2545 return tristate::TS_FALSE;
2547 /* Try both ways, for symmetry. */
2548 tristate ts_ab = eval_alias_1 (base_reg_a, base_reg_b);
2549 if (ts_ab.is_false ())
2550 return tristate::TS_FALSE;
2551 tristate ts_ba = eval_alias_1 (base_reg_b, base_reg_a);
2552 if (ts_ba.is_false ())
2553 return tristate::TS_FALSE;
2554 return tristate::TS_UNKNOWN;
2557 /* Half of store::eval_alias; called twice for symmetry. */
2559 tristate
2560 store::eval_alias_1 (const region *base_reg_a,
2561 const region *base_reg_b) const
2563 if (const symbolic_region *sym_reg_a
2564 = base_reg_a->dyn_cast_symbolic_region ())
2566 const svalue *sval_a = sym_reg_a->get_pointer ();
2567 if (tree decl_b = base_reg_b->maybe_get_decl ())
2569 if (!may_be_aliased (decl_b))
2570 return tristate::TS_FALSE;
2571 if (sval_a->get_kind () == SK_INITIAL)
2572 if (!is_global_var (decl_b))
2574 /* The initial value of a pointer can't point to a local. */
2575 return tristate::TS_FALSE;
2578 if (sval_a->get_kind () == SK_INITIAL
2579 && base_reg_b->get_kind () == RK_HEAP_ALLOCATED)
2581 /* The initial value of a pointer can't point to a
2582 region that was allocated on the heap after the beginning of the
2583 path. */
2584 return tristate::TS_FALSE;
2586 if (const widening_svalue *widening_sval_a
2587 = sval_a->dyn_cast_widening_svalue ())
2589 const svalue *base = widening_sval_a->get_base_svalue ();
2590 if (const region_svalue *region_sval
2591 = base->dyn_cast_region_svalue ())
2593 const region *pointee = region_sval->get_pointee ();
2594 /* If we have sval_a is WIDENING(&REGION, OP), and
2595 B can't alias REGION, then B can't alias A either.
2596 For example, A might arise from
2597 for (ptr = &REGION; ...; ptr++)
2598 where sval_a is ptr in the 2nd iteration of the loop.
2599 We want to ensure that "*ptr" can only clobber things
2600 within REGION's base region. */
2601 tristate ts = eval_alias (pointee->get_base_region (),
2602 base_reg_b);
2603 if (ts.is_false ())
2604 return tristate::TS_FALSE;
2608 return tristate::TS_UNKNOWN;
2611 /* Remove all bindings overlapping REG within this store. */
2613 void
2614 store::clobber_region (store_manager *mgr, const region *reg)
2616 const region *base_reg = reg->get_base_region ();
2617 binding_cluster **slot = m_cluster_map.get (base_reg);
2618 if (!slot)
2619 return;
2620 binding_cluster *cluster = *slot;
2621 cluster->clobber_region (mgr, reg);
2622 if (cluster->redundant_p ())
2624 delete cluster;
2625 m_cluster_map.remove (base_reg);
2629 /* Remove any bindings for REG within this store. */
2631 void
2632 store::purge_region (store_manager *mgr, const region *reg)
2634 const region *base_reg = reg->get_base_region ();
2635 binding_cluster **slot = m_cluster_map.get (base_reg);
2636 if (!slot)
2637 return;
2638 binding_cluster *cluster = *slot;
2639 cluster->purge_region (mgr, reg);
2640 if (cluster->redundant_p ())
2642 delete cluster;
2643 m_cluster_map.remove (base_reg);
2647 /* Fill REG with SVAL. */
2649 void
2650 store::fill_region (store_manager *mgr, const region *reg, const svalue *sval)
2652 const region *base_reg = reg->get_base_region ();
2653 if (base_reg->symbolic_for_unknown_ptr_p ()
2654 || !base_reg->tracked_p ())
2655 return;
2656 binding_cluster *cluster = get_or_create_cluster (base_reg);
2657 cluster->fill_region (mgr, reg, sval);
2660 /* Zero-fill REG. */
2662 void
2663 store::zero_fill_region (store_manager *mgr, const region *reg)
2665 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
2666 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
2667 fill_region (mgr, reg, zero_sval);
2670 /* Mark REG as having unknown content. */
2672 void
2673 store::mark_region_as_unknown (store_manager *mgr, const region *reg,
2674 uncertainty_t *uncertainty)
2676 const region *base_reg = reg->get_base_region ();
2677 if (base_reg->symbolic_for_unknown_ptr_p ()
2678 || !base_reg->tracked_p ())
2679 return;
2680 binding_cluster *cluster = get_or_create_cluster (base_reg);
2681 cluster->mark_region_as_unknown (mgr, reg, reg, uncertainty);
2684 /* Purge state involving SVAL. */
2686 void
2687 store::purge_state_involving (const svalue *sval,
2688 region_model_manager *sval_mgr)
2690 auto_vec <const region *> base_regs_to_purge;
2691 for (auto iter : m_cluster_map)
2693 const region *base_reg = iter.first;
2694 if (base_reg->involves_p (sval))
2695 base_regs_to_purge.safe_push (base_reg);
2696 else
2698 binding_cluster *cluster = iter.second;
2699 cluster->purge_state_involving (sval, sval_mgr);
2703 for (auto iter : base_regs_to_purge)
2704 purge_cluster (iter);
2707 /* Get the cluster for BASE_REG, or NULL (const version). */
2709 const binding_cluster *
2710 store::get_cluster (const region *base_reg) const
2712 gcc_assert (base_reg);
2713 gcc_assert (base_reg->get_base_region () == base_reg);
2714 if (binding_cluster **slot
2715 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg))
2716 return *slot;
2717 else
2718 return NULL;
2721 /* Get the cluster for BASE_REG, or NULL (non-const version). */
2723 binding_cluster *
2724 store::get_cluster (const region *base_reg)
2726 gcc_assert (base_reg);
2727 gcc_assert (base_reg->get_base_region () == base_reg);
2728 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2729 return *slot;
2730 else
2731 return NULL;
2734 /* Get the cluster for BASE_REG, creating it if doesn't already exist. */
2736 binding_cluster *
2737 store::get_or_create_cluster (const region *base_reg)
2739 gcc_assert (base_reg);
2740 gcc_assert (base_reg->get_base_region () == base_reg);
2742 /* We shouldn't create clusters for dereferencing an UNKNOWN ptr. */
2743 gcc_assert (!base_reg->symbolic_for_unknown_ptr_p ());
2745 /* We shouldn't create clusters for base regions that aren't trackable. */
2746 gcc_assert (base_reg->tracked_p ());
2748 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2749 return *slot;
2751 binding_cluster *cluster = new binding_cluster (base_reg);
2752 m_cluster_map.put (base_reg, cluster);
2754 return cluster;
2757 /* Remove any cluster for BASE_REG, for use by
2758 region_model::unbind_region_and_descendents
2759 when popping stack frames and handling deleted heap regions. */
2761 void
2762 store::purge_cluster (const region *base_reg)
2764 gcc_assert (base_reg->get_base_region () == base_reg);
2765 binding_cluster **slot = m_cluster_map.get (base_reg);
2766 if (!slot)
2767 return;
2768 binding_cluster *cluster = *slot;
2769 delete cluster;
2770 m_cluster_map.remove (base_reg);
2773 /* Attempt to merge STORE_A and STORE_B into OUT_STORE.
2774 Return true if successful, or false if the stores can't be merged. */
2776 bool
2777 store::can_merge_p (const store *store_a, const store *store_b,
2778 store *out_store, store_manager *mgr,
2779 model_merger *merger)
2781 if (store_a->m_called_unknown_fn || store_b->m_called_unknown_fn)
2782 out_store->m_called_unknown_fn = true;
2784 /* Get the union of all base regions for STORE_A and STORE_B. */
2785 hash_set<const region *> base_regions;
2786 for (cluster_map_t::iterator iter_a = store_a->m_cluster_map.begin ();
2787 iter_a != store_a->m_cluster_map.end (); ++iter_a)
2789 const region *base_reg_a = (*iter_a).first;
2790 base_regions.add (base_reg_a);
2792 for (cluster_map_t::iterator iter_b = store_b->m_cluster_map.begin ();
2793 iter_b != store_b->m_cluster_map.end (); ++iter_b)
2795 const region *base_reg_b = (*iter_b).first;
2796 base_regions.add (base_reg_b);
2799 /* Sort the base regions before considering them. This ought not to
2800 affect the results, but can affect which types UNKNOWN_REGIONs are
2801 created for in a run; sorting them thus avoids minor differences
2802 in logfiles. */
2803 auto_vec<const region *> vec_base_regions (base_regions.elements ());
2804 for (hash_set<const region *>::iterator iter = base_regions.begin ();
2805 iter != base_regions.end (); ++iter)
2806 vec_base_regions.quick_push (*iter);
2807 vec_base_regions.qsort (region::cmp_ptr_ptr);
2808 unsigned i;
2809 const region *base_reg;
2810 FOR_EACH_VEC_ELT (vec_base_regions, i, base_reg)
2812 const binding_cluster *cluster_a = store_a->get_cluster (base_reg);
2813 const binding_cluster *cluster_b = store_b->get_cluster (base_reg);
2814 /* At least one of cluster_a and cluster_b must be non-NULL. */
2815 binding_cluster *out_cluster
2816 = out_store->get_or_create_cluster (base_reg);
2817 if (!binding_cluster::can_merge_p (cluster_a, cluster_b,
2818 out_cluster, out_store, mgr, merger))
2819 return false;
2821 return true;
2824 /* Mark the cluster for BASE_REG as having escaped.
2825 For use when handling an unrecognized function call, and
2826 for params to "top-level" calls.
2827 Further unknown function calls could touch it, even if the cluster
2828 isn't reachable from args of those calls. */
2830 void
2831 store::mark_as_escaped (const region *base_reg)
2833 gcc_assert (base_reg);
2834 gcc_assert (base_reg->get_base_region () == base_reg);
2836 if (base_reg->symbolic_for_unknown_ptr_p ()
2837 || !base_reg->tracked_p ())
2838 return;
2840 binding_cluster *cluster = get_or_create_cluster (base_reg);
2841 cluster->mark_as_escaped ();
2844 /* Handle an unknown fncall by updating any clusters that have escaped
2845 (either in this fncall, or in a prior one). */
2847 void
2848 store::on_unknown_fncall (const gcall *call, store_manager *mgr,
2849 const conjured_purge &p)
2851 m_called_unknown_fn = true;
2853 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2854 iter != m_cluster_map.end (); ++iter)
2855 (*iter).second->on_unknown_fncall (call, mgr, p);
2858 /* Return true if a non-const pointer to BASE_REG (or something within it)
2859 has escaped to code outside of the TU being analyzed. */
2861 bool
2862 store::escaped_p (const region *base_reg) const
2864 gcc_assert (base_reg);
2865 gcc_assert (base_reg->get_base_region () == base_reg);
2867 if (binding_cluster **cluster_slot
2868 = const_cast <cluster_map_t &>(m_cluster_map).get (base_reg))
2869 return (*cluster_slot)->escaped_p ();
2870 return false;
2873 /* Populate OUT_PVS with a list of path_vars for describing SVAL based on
2874 this store, using VISITED to ensure the traversal terminates. */
2876 void
2877 store::get_representative_path_vars (const region_model *model,
2878 svalue_set *visited,
2879 const svalue *sval,
2880 auto_vec<path_var> *out_pvs) const
2882 gcc_assert (sval);
2884 /* Find all bindings that reference SVAL. */
2885 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2886 iter != m_cluster_map.end (); ++iter)
2888 const region *base_reg = (*iter).first;
2889 binding_cluster *cluster = (*iter).second;
2890 cluster->get_representative_path_vars (model, visited, base_reg, sval,
2891 out_pvs);
2894 if (const initial_svalue *init_sval = sval->dyn_cast_initial_svalue ())
2896 const region *reg = init_sval->get_region ();
2897 if (path_var pv = model->get_representative_path_var (reg,
2898 visited))
2899 out_pvs->safe_push (pv);
2903 /* Remove all bindings overlapping REG within this store, removing
2904 any clusters that become redundant.
2906 If UNCERTAINTY is non-NULL, use it to record any svalues that
2907 were removed, as being maybe-bound. */
2909 void
2910 store::remove_overlapping_bindings (store_manager *mgr, const region *reg,
2911 uncertainty_t *uncertainty)
2913 const region *base_reg = reg->get_base_region ();
2914 if (binding_cluster **cluster_slot = m_cluster_map.get (base_reg))
2916 binding_cluster *cluster = *cluster_slot;
2917 if (reg == base_reg && !escaped_p (base_reg))
2919 /* Remove whole cluster. */
2920 m_cluster_map.remove (base_reg);
2921 delete cluster;
2922 return;
2924 cluster->remove_overlapping_bindings (mgr, reg, uncertainty);
2928 /* Subclass of visitor that accumulates a hash_set of the regions that
2929 were visited. */
2931 struct region_finder : public visitor
2933 void visit_region (const region *reg) final override
2935 m_regs.add (reg);
2938 hash_set<const region *> m_regs;
2941 /* Canonicalize this store, to maximize the chance of equality between
2942 instances. */
2944 void
2945 store::canonicalize (store_manager *mgr)
2947 /* If we have e.g.:
2948 cluster for: HEAP_ALLOCATED_REGION(543)
2949 ESCAPED
2950 TOUCHED
2951 where the heap region is empty and unreferenced, then purge that
2952 cluster, to avoid unbounded state chains involving these. */
2954 /* Find regions that are referenced by bound values in the store. */
2955 region_finder s;
2956 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2957 iter != m_cluster_map.end (); ++iter)
2959 binding_cluster *cluster = (*iter).second;
2960 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
2961 bind_iter != cluster->m_map.end (); ++bind_iter)
2962 (*bind_iter).second->accept (&s);
2965 /* Locate heap-allocated regions that have empty bindings that weren't
2966 found above. */
2967 hash_set<const region *> purgeable_regions;
2968 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2969 iter != m_cluster_map.end (); ++iter)
2971 const region *base_reg = (*iter).first;
2972 binding_cluster *cluster = (*iter).second;
2973 if (base_reg->get_kind () == RK_HEAP_ALLOCATED)
2975 if (cluster->empty_p ())
2976 if (!s.m_regs.contains (base_reg))
2977 purgeable_regions.add (base_reg);
2979 /* Also cover the UNKNOWN case. */
2980 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
2981 if (sval->get_kind () == SK_UNKNOWN)
2982 if (!s.m_regs.contains (base_reg))
2983 purgeable_regions.add (base_reg);
2987 /* Purge them. */
2988 for (hash_set<const region *>::iterator iter = purgeable_regions.begin ();
2989 iter != purgeable_regions.end (); ++iter)
2991 const region *base_reg = *iter;
2992 purge_cluster (base_reg);
2996 /* Subroutine for use by exploded_path::feasible_p.
2998 We need to deal with state differences between:
2999 (a) when the exploded_graph is being initially constructed and
3000 (b) when replaying the state changes along a specific path in
3001 in exploded_path::feasible_p.
3003 In (a), state merging happens, so when exploring a loop
3004 for (i = 0; i < 1024; i++)
3005 on successive iterations we have i == 0, then i == WIDENING.
3007 In (b), no state merging happens, so naively replaying the path
3008 that goes twice through the loop then exits it
3009 would lead to i == 0, then i == 1, and then a (i >= 1024) eedge
3010 that exits the loop, which would be found to be infeasible as i == 1,
3011 and the path would be rejected.
3013 We need to fix up state during replay. This subroutine is
3014 called whenever we enter a supernode that we've already
3015 visited along this exploded_path, passing in OTHER_STORE
3016 from the destination enode's state.
3018 Find bindings to widening values in OTHER_STORE.
3019 For all that are found, update the binding in this store to UNKNOWN. */
3021 void
3022 store::loop_replay_fixup (const store *other_store,
3023 region_model_manager *mgr)
3025 gcc_assert (other_store);
3026 for (cluster_map_t::iterator iter = other_store->m_cluster_map.begin ();
3027 iter != other_store->m_cluster_map.end (); ++iter)
3029 const region *base_reg = (*iter).first;
3030 binding_cluster *cluster = (*iter).second;
3031 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
3032 bind_iter != cluster->m_map.end (); ++bind_iter)
3034 const binding_key *key = (*bind_iter).first;
3035 const svalue *sval = (*bind_iter).second;
3036 if (sval->get_kind () == SK_WIDENING)
3038 binding_cluster *this_cluster
3039 = get_or_create_cluster (base_reg);
3040 const svalue *unknown
3041 = mgr->get_or_create_unknown_svalue (sval->get_type ());
3042 this_cluster->bind_key (key, unknown);
3048 #if CHECKING_P
3050 namespace selftest {
3052 /* Verify that bit_range::intersects_p works as expected. */
3054 static void
3055 test_bit_range_intersects_p ()
3057 bit_range b0 (0, 1);
3058 bit_range b1 (1, 1);
3059 bit_range b2 (2, 1);
3060 bit_range b3 (3, 1);
3061 bit_range b4 (4, 1);
3062 bit_range b5 (5, 1);
3063 bit_range b6 (6, 1);
3064 bit_range b7 (7, 1);
3065 bit_range b1_to_6 (1, 6);
3066 bit_range b0_to_7 (0, 8);
3067 bit_range b3_to_5 (3, 3);
3068 bit_range b6_to_7 (6, 2);
3070 /* self-intersection is true. */
3071 ASSERT_TRUE (b0.intersects_p (b0));
3072 ASSERT_TRUE (b7.intersects_p (b7));
3073 ASSERT_TRUE (b1_to_6.intersects_p (b1_to_6));
3074 ASSERT_TRUE (b0_to_7.intersects_p (b0_to_7));
3076 ASSERT_FALSE (b0.intersects_p (b1));
3077 ASSERT_FALSE (b1.intersects_p (b0));
3078 ASSERT_FALSE (b0.intersects_p (b7));
3079 ASSERT_FALSE (b7.intersects_p (b0));
3081 ASSERT_TRUE (b0_to_7.intersects_p (b0));
3082 ASSERT_TRUE (b0_to_7.intersects_p (b7));
3083 ASSERT_TRUE (b0.intersects_p (b0_to_7));
3084 ASSERT_TRUE (b7.intersects_p (b0_to_7));
3086 ASSERT_FALSE (b0.intersects_p (b1_to_6));
3087 ASSERT_FALSE (b1_to_6.intersects_p (b0));
3088 ASSERT_TRUE (b1.intersects_p (b1_to_6));
3089 ASSERT_TRUE (b1_to_6.intersects_p (b1));
3090 ASSERT_TRUE (b1_to_6.intersects_p (b6));
3091 ASSERT_FALSE (b1_to_6.intersects_p (b7));
3093 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7));
3094 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6));
3096 ASSERT_FALSE (b3_to_5.intersects_p (b6_to_7));
3097 ASSERT_FALSE (b6_to_7.intersects_p (b3_to_5));
3099 bit_range r1 (0,0);
3100 bit_range r2 (0,0);
3101 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7, &r1, &r2));
3102 ASSERT_EQ (r1.get_start_bit_offset (), 0);
3103 ASSERT_EQ (r1.m_size_in_bits, 6);
3104 ASSERT_EQ (r2.get_start_bit_offset (), 1);
3105 ASSERT_EQ (r2.m_size_in_bits, 6);
3107 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6, &r1, &r2));
3108 ASSERT_EQ (r1.get_start_bit_offset (), 1);
3109 ASSERT_EQ (r1.m_size_in_bits, 6);
3110 ASSERT_EQ (r2.get_start_bit_offset (), 0);
3111 ASSERT_EQ (r2.m_size_in_bits, 6);
3114 /* Implementation detail of ASSERT_BIT_RANGE_FROM_MASK_EQ. */
3116 static void
3117 assert_bit_range_from_mask_eq (const location &loc,
3118 unsigned HOST_WIDE_INT mask,
3119 const bit_range &expected)
3121 bit_range actual (0, 0);
3122 bool ok = bit_range::from_mask (mask, &actual);
3123 ASSERT_TRUE_AT (loc, ok);
3124 ASSERT_EQ_AT (loc, actual, expected);
3127 /* Assert that bit_range::from_mask (MASK) returns true, and writes
3128 out EXPECTED_BIT_RANGE. */
3130 #define ASSERT_BIT_RANGE_FROM_MASK_EQ(MASK, EXPECTED_BIT_RANGE) \
3131 SELFTEST_BEGIN_STMT \
3132 assert_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK, \
3133 EXPECTED_BIT_RANGE); \
3134 SELFTEST_END_STMT
3136 /* Implementation detail of ASSERT_NO_BIT_RANGE_FROM_MASK. */
3138 static void
3139 assert_no_bit_range_from_mask_eq (const location &loc,
3140 unsigned HOST_WIDE_INT mask)
3142 bit_range actual (0, 0);
3143 bool ok = bit_range::from_mask (mask, &actual);
3144 ASSERT_FALSE_AT (loc, ok);
3147 /* Assert that bit_range::from_mask (MASK) returns false. */
3149 #define ASSERT_NO_BIT_RANGE_FROM_MASK(MASK) \
3150 SELFTEST_BEGIN_STMT \
3151 assert_no_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK); \
3152 SELFTEST_END_STMT
3154 /* Verify that bit_range::from_mask works as expected. */
3156 static void
3157 test_bit_range_from_mask ()
3159 /* Should fail on zero. */
3160 ASSERT_NO_BIT_RANGE_FROM_MASK (0);
3162 /* Verify 1-bit masks. */
3163 ASSERT_BIT_RANGE_FROM_MASK_EQ (1, bit_range (0, 1));
3164 ASSERT_BIT_RANGE_FROM_MASK_EQ (2, bit_range (1, 1));
3165 ASSERT_BIT_RANGE_FROM_MASK_EQ (4, bit_range (2, 1));
3166 ASSERT_BIT_RANGE_FROM_MASK_EQ (8, bit_range (3, 1));
3167 ASSERT_BIT_RANGE_FROM_MASK_EQ (16, bit_range (4, 1));
3168 ASSERT_BIT_RANGE_FROM_MASK_EQ (32, bit_range (5, 1));
3169 ASSERT_BIT_RANGE_FROM_MASK_EQ (64, bit_range (6, 1));
3170 ASSERT_BIT_RANGE_FROM_MASK_EQ (128, bit_range (7, 1));
3172 /* Verify N-bit masks starting at bit 0. */
3173 ASSERT_BIT_RANGE_FROM_MASK_EQ (3, bit_range (0, 2));
3174 ASSERT_BIT_RANGE_FROM_MASK_EQ (7, bit_range (0, 3));
3175 ASSERT_BIT_RANGE_FROM_MASK_EQ (15, bit_range (0, 4));
3176 ASSERT_BIT_RANGE_FROM_MASK_EQ (31, bit_range (0, 5));
3177 ASSERT_BIT_RANGE_FROM_MASK_EQ (63, bit_range (0, 6));
3178 ASSERT_BIT_RANGE_FROM_MASK_EQ (127, bit_range (0, 7));
3179 ASSERT_BIT_RANGE_FROM_MASK_EQ (255, bit_range (0, 8));
3180 ASSERT_BIT_RANGE_FROM_MASK_EQ (0xffff, bit_range (0, 16));
3182 /* Various other tests. */
3183 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x30, bit_range (4, 2));
3184 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x700, bit_range (8, 3));
3185 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x600, bit_range (9, 2));
3187 /* Multiple ranges of set bits should fail. */
3188 ASSERT_NO_BIT_RANGE_FROM_MASK (0x101);
3189 ASSERT_NO_BIT_RANGE_FROM_MASK (0xf0f0f0f0);
3192 /* Implementation detail of ASSERT_OVERLAP. */
3194 static void
3195 assert_overlap (const location &loc,
3196 const concrete_binding *b1,
3197 const concrete_binding *b2)
3199 ASSERT_TRUE_AT (loc, b1->overlaps_p (*b2));
3200 ASSERT_TRUE_AT (loc, b2->overlaps_p (*b1));
3203 /* Implementation detail of ASSERT_DISJOINT. */
3205 static void
3206 assert_disjoint (const location &loc,
3207 const concrete_binding *b1,
3208 const concrete_binding *b2)
3210 ASSERT_FALSE_AT (loc, b1->overlaps_p (*b2));
3211 ASSERT_FALSE_AT (loc, b2->overlaps_p (*b1));
3214 /* Assert that B1 and B2 overlap, checking both ways. */
3216 #define ASSERT_OVERLAP(B1, B2) \
3217 SELFTEST_BEGIN_STMT \
3218 assert_overlap (SELFTEST_LOCATION, B1, B2); \
3219 SELFTEST_END_STMT
3221 /* Assert that B1 and B2 do not overlap, checking both ways. */
3223 #define ASSERT_DISJOINT(B1, B2) \
3224 SELFTEST_BEGIN_STMT \
3225 assert_disjoint (SELFTEST_LOCATION, B1, B2); \
3226 SELFTEST_END_STMT
3228 /* Verify that concrete_binding::overlaps_p works as expected. */
3230 static void
3231 test_binding_key_overlap ()
3233 store_manager mgr (NULL);
3235 /* Various 8-bit bindings. */
3236 const concrete_binding *cb_0_7 = mgr.get_concrete_binding (0, 8);
3237 const concrete_binding *cb_8_15 = mgr.get_concrete_binding (8, 8);
3238 const concrete_binding *cb_16_23 = mgr.get_concrete_binding (16, 8);
3239 const concrete_binding *cb_24_31 = mgr.get_concrete_binding (24, 8);
3241 /* 16-bit bindings. */
3242 const concrete_binding *cb_0_15 = mgr.get_concrete_binding (0, 16);
3243 const concrete_binding *cb_8_23 = mgr.get_concrete_binding (8, 16);
3244 const concrete_binding *cb_16_31 = mgr.get_concrete_binding (16, 16);
3246 /* 32-bit binding. */
3247 const concrete_binding *cb_0_31 = mgr.get_concrete_binding (0, 32);
3249 /* Everything should self-overlap. */
3250 ASSERT_OVERLAP (cb_0_7, cb_0_7);
3251 ASSERT_OVERLAP (cb_8_15, cb_8_15);
3252 ASSERT_OVERLAP (cb_16_23, cb_16_23);
3253 ASSERT_OVERLAP (cb_24_31, cb_24_31);
3254 ASSERT_OVERLAP (cb_0_15, cb_0_15);
3255 ASSERT_OVERLAP (cb_8_23, cb_8_23);
3256 ASSERT_OVERLAP (cb_16_31, cb_16_31);
3257 ASSERT_OVERLAP (cb_0_31, cb_0_31);
3259 /* Verify the 8-bit bindings that don't overlap each other. */
3260 ASSERT_DISJOINT (cb_0_7, cb_8_15);
3261 ASSERT_DISJOINT (cb_8_15, cb_16_23);
3263 /* Check for overlap of differently-sized bindings. */
3264 ASSERT_OVERLAP (cb_0_7, cb_0_31);
3265 /* ...and with differing start points. */
3266 ASSERT_OVERLAP (cb_8_15, cb_0_31);
3267 ASSERT_DISJOINT (cb_8_15, cb_16_31);
3268 ASSERT_OVERLAP (cb_16_23, cb_0_31);
3269 ASSERT_OVERLAP (cb_16_31, cb_0_31);
3271 ASSERT_DISJOINT (cb_0_7, cb_8_23);
3272 ASSERT_OVERLAP (cb_8_23, cb_16_23);
3273 ASSERT_OVERLAP (cb_8_23, cb_16_31);
3274 ASSERT_DISJOINT (cb_8_23, cb_24_31);
3277 /* Run all of the selftests within this file. */
3279 void
3280 analyzer_store_cc_tests ()
3282 test_bit_range_intersects_p ();
3283 test_bit_range_from_mask ();
3284 test_binding_key_overlap ();
3287 } // namespace selftest
3289 #endif /* CHECKING_P */
3291 } // namespace ana
3293 #endif /* #if ENABLE_ANALYZER */