Daily bump.
[official-gcc.git] / gcc / analyzer / store.cc
blob5eecbe6cf18da9137165b64d3d36e06f21ff735b
1 /* Classes for modeling the state of memory.
2 Copyright (C) 2020-2021 Free Software Foundation, Inc.
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tree.h"
25 #include "function.h"
26 #include "basic-block.h"
27 #include "gimple.h"
28 #include "gimple-iterator.h"
29 #include "diagnostic-core.h"
30 #include "graphviz.h"
31 #include "options.h"
32 #include "cgraph.h"
33 #include "tree-dfa.h"
34 #include "stringpool.h"
35 #include "convert.h"
36 #include "target.h"
37 #include "fold-const.h"
38 #include "tree-pretty-print.h"
39 #include "diagnostic-color.h"
40 #include "diagnostic-metadata.h"
41 #include "tristate.h"
42 #include "bitmap.h"
43 #include "selftest.h"
44 #include "function.h"
45 #include "json.h"
46 #include "analyzer/analyzer.h"
47 #include "analyzer/analyzer-logging.h"
48 #include "ordered-hash-map.h"
49 #include "options.h"
50 #include "cgraph.h"
51 #include "cfg.h"
52 #include "digraph.h"
53 #include "analyzer/supergraph.h"
54 #include "sbitmap.h"
55 #include "analyzer/call-string.h"
56 #include "analyzer/program-point.h"
57 #include "analyzer/store.h"
58 #include "analyzer/region-model.h"
59 #include "analyzer/analyzer-selftests.h"
60 #include "stor-layout.h"
62 #if ENABLE_ANALYZER
64 namespace ana {
66 /* Dump SVALS to PP, sorting them to ensure determinism. */
68 static void
69 dump_svalue_set (const hash_set <const svalue *> &svals,
70 pretty_printer *pp, bool simple)
72 auto_vec <const svalue *> v;
73 for (hash_set<const svalue *>::iterator iter = svals.begin ();
74 iter != svals.end (); ++iter)
76 v.safe_push (*iter);
78 v.qsort (svalue::cmp_ptr_ptr);
80 pp_character (pp, '{');
81 const svalue *sval;
82 unsigned i;
83 FOR_EACH_VEC_ELT (v, i, sval)
85 if (i > 0)
86 pp_string (pp, ", ");
87 sval->dump_to_pp (pp, simple);
89 pp_character (pp, '}');
92 /* class uncertainty_t. */
94 /* Dump this object to PP. */
96 void
97 uncertainty_t::dump_to_pp (pretty_printer *pp, bool simple) const
99 pp_string (pp, "{m_maybe_bound_svals: ");
100 dump_svalue_set (m_maybe_bound_svals, pp, simple);
102 pp_string (pp, ", m_mutable_at_unknown_call_svals: ");
103 dump_svalue_set (m_mutable_at_unknown_call_svals, pp, simple);
104 pp_string (pp, "}");
107 /* Dump this object to stderr. */
109 DEBUG_FUNCTION void
110 uncertainty_t::dump (bool simple) const
112 pretty_printer pp;
113 pp_format_decoder (&pp) = default_tree_printer;
114 pp_show_color (&pp) = pp_show_color (global_dc->printer);
115 pp.buffer->stream = stderr;
116 dump_to_pp (&pp, simple);
117 pp_newline (&pp);
118 pp_flush (&pp);
121 /* class binding_key. */
123 const binding_key *
124 binding_key::make (store_manager *mgr, const region *r)
126 region_offset offset = r->get_offset ();
127 if (offset.symbolic_p ())
128 return mgr->get_symbolic_binding (r);
129 else
131 bit_size_t bit_size;
132 if (r->get_bit_size (&bit_size))
133 return mgr->get_concrete_binding (offset.get_bit_offset (),
134 bit_size);
135 else
136 return mgr->get_symbolic_binding (r);
140 /* Dump this binding_key to stderr. */
142 DEBUG_FUNCTION void
143 binding_key::dump (bool simple) const
145 pretty_printer pp;
146 pp_format_decoder (&pp) = default_tree_printer;
147 pp_show_color (&pp) = pp_show_color (global_dc->printer);
148 pp.buffer->stream = stderr;
149 dump_to_pp (&pp, simple);
150 pp_newline (&pp);
151 pp_flush (&pp);
154 /* Get a description of this binding_key. */
156 label_text
157 binding_key::get_desc (bool simple) const
159 pretty_printer pp;
160 pp_format_decoder (&pp) = default_tree_printer;
161 dump_to_pp (&pp, simple);
162 return label_text::take (xstrdup (pp_formatted_text (&pp)));
165 /* qsort callback. */
168 binding_key::cmp_ptrs (const void *p1, const void *p2)
170 const binding_key * const *pk1 = (const binding_key * const *)p1;
171 const binding_key * const *pk2 = (const binding_key * const *)p2;
172 return cmp (*pk1, *pk2);
175 /* Comparator for binding_keys. */
178 binding_key::cmp (const binding_key *k1, const binding_key *k2)
180 int concrete1 = k1->concrete_p ();
181 int concrete2 = k2->concrete_p ();
182 if (int concrete_cmp = concrete1 - concrete2)
183 return concrete_cmp;
184 if (concrete1)
186 const concrete_binding *b1 = (const concrete_binding *)k1;
187 const concrete_binding *b2 = (const concrete_binding *)k2;
188 if (int start_cmp = wi::cmp (b1->get_start_bit_offset (),
189 b2->get_start_bit_offset (),
190 SIGNED))
191 return start_cmp;
192 return wi::cmp (b1->get_next_bit_offset (), b2->get_next_bit_offset (),
193 SIGNED);
195 else
197 const symbolic_binding *s1 = (const symbolic_binding *)k1;
198 const symbolic_binding *s2 = (const symbolic_binding *)k2;
199 if (s1 > s2)
200 return 1;
201 if (s1 < s2)
202 return -1;
203 return 0;
207 /* struct bit_range. */
209 void
210 bit_range::dump_to_pp (pretty_printer *pp) const
212 byte_range bytes (0, 0);
213 if (as_byte_range (&bytes))
214 bytes.dump_to_pp (pp);
215 else
217 pp_string (pp, "start: ");
218 pp_wide_int (pp, m_start_bit_offset, SIGNED);
219 pp_string (pp, ", size: ");
220 pp_wide_int (pp, m_size_in_bits, SIGNED);
221 pp_string (pp, ", next: ");
222 pp_wide_int (pp, get_next_bit_offset (), SIGNED);
226 /* Dump this object to stderr. */
228 DEBUG_FUNCTION void
229 bit_range::dump () const
231 pretty_printer pp;
232 pp.buffer->stream = stderr;
233 dump_to_pp (&pp);
234 pp_newline (&pp);
235 pp_flush (&pp);
238 /* If OTHER is a subset of this, return true and write
239 to *OUT the relative range of OTHER within this.
240 Otherwise return false. */
242 bool
243 bit_range::contains_p (const bit_range &other, bit_range *out) const
245 if (contains_p (other.get_start_bit_offset ())
246 && contains_p (other.get_last_bit_offset ()))
248 out->m_start_bit_offset = other.m_start_bit_offset - m_start_bit_offset;
249 out->m_size_in_bits = other.m_size_in_bits;
250 return true;
252 else
253 return false;
256 /* If OTHER intersects this, return true and write
257 the relative range of OTHER within THIS to *OUT_THIS,
258 and the relative range of THIS within OTHER to *OUT_OTHER.
259 Otherwise return false. */
261 bool
262 bit_range::intersects_p (const bit_range &other,
263 bit_range *out_this,
264 bit_range *out_other) const
266 if (get_start_bit_offset () < other.get_next_bit_offset ()
267 && other.get_start_bit_offset () < get_next_bit_offset ())
269 bit_offset_t overlap_start
270 = MAX (get_start_bit_offset (),
271 other.get_start_bit_offset ());
272 bit_offset_t overlap_next
273 = MIN (get_next_bit_offset (),
274 other.get_next_bit_offset ());
275 gcc_assert (overlap_next > overlap_start);
276 bit_range abs_overlap_bits (overlap_start, overlap_next - overlap_start);
277 *out_this = abs_overlap_bits - get_start_bit_offset ();
278 *out_other = abs_overlap_bits - other.get_start_bit_offset ();
279 return true;
281 else
282 return false;
286 bit_range::cmp (const bit_range &br1, const bit_range &br2)
288 if (int start_cmp = wi::cmps (br1.m_start_bit_offset,
289 br2.m_start_bit_offset))
290 return start_cmp;
292 return wi::cmpu (br1.m_size_in_bits, br2.m_size_in_bits);
295 /* Offset this range by OFFSET. */
297 bit_range
298 bit_range::operator- (bit_offset_t offset) const
300 return bit_range (m_start_bit_offset - offset, m_size_in_bits);
303 /* If MASK is a contiguous range of set bits, write them
304 to *OUT and return true.
305 Otherwise return false. */
307 bool
308 bit_range::from_mask (unsigned HOST_WIDE_INT mask, bit_range *out)
310 unsigned iter_bit_idx = 0;
311 unsigned HOST_WIDE_INT iter_bit_mask = 1;
313 /* Find the first contiguous run of set bits in MASK. */
315 /* Find first set bit in MASK. */
316 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
318 if (mask & iter_bit_mask)
319 break;
320 iter_bit_idx++;
321 iter_bit_mask <<= 1;
323 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
324 /* MASK is zero. */
325 return false;
327 unsigned first_set_iter_bit_idx = iter_bit_idx;
328 unsigned num_set_bits = 1;
329 iter_bit_idx++;
330 iter_bit_mask <<= 1;
332 /* Find next unset bit in MASK. */
333 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
335 if (!(mask & iter_bit_mask))
336 break;
337 num_set_bits++;
338 iter_bit_idx++;
339 iter_bit_mask <<= 1;
341 if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
343 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
344 return true;
347 /* We now have the first contiguous run of set bits in MASK.
348 Fail if any other bits are set. */
349 while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
351 if (mask & iter_bit_mask)
352 return false;
353 iter_bit_idx++;
354 iter_bit_mask <<= 1;
357 *out = bit_range (first_set_iter_bit_idx, num_set_bits);
358 return true;
361 /* Attempt to convert this bit_range to a byte_range.
362 Return true if it is possible, writing the result to *OUT.
363 Otherwise return false. */
365 bool
366 bit_range::as_byte_range (byte_range *out) const
368 if (m_start_bit_offset % BITS_PER_UNIT == 0
369 && m_size_in_bits % BITS_PER_UNIT == 0)
371 out->m_start_byte_offset = m_start_bit_offset / BITS_PER_UNIT;
372 out->m_size_in_bytes = m_size_in_bits / BITS_PER_UNIT;
373 return true;
375 return false;
378 /* Dump this object to PP. */
380 void
381 byte_range::dump_to_pp (pretty_printer *pp) const
383 if (m_size_in_bytes == 1)
385 pp_string (pp, "byte ");
386 pp_wide_int (pp, m_start_byte_offset, SIGNED);
388 else
390 pp_string (pp, "bytes ");
391 pp_wide_int (pp, m_start_byte_offset, SIGNED);
392 pp_string (pp, "-");
393 pp_wide_int (pp, get_last_byte_offset (), SIGNED);
397 /* Dump this object to stderr. */
399 DEBUG_FUNCTION void
400 byte_range::dump () const
402 pretty_printer pp;
403 pp.buffer->stream = stderr;
404 dump_to_pp (&pp);
405 pp_newline (&pp);
406 pp_flush (&pp);
409 /* If OTHER is a subset of this, return true and write
410 to *OUT the relative range of OTHER within this.
411 Otherwise return false. */
413 bool
414 byte_range::contains_p (const byte_range &other, byte_range *out) const
416 if (contains_p (other.get_start_byte_offset ())
417 && contains_p (other.get_last_byte_offset ()))
419 out->m_start_byte_offset = other.m_start_byte_offset - m_start_byte_offset;
420 out->m_size_in_bytes = other.m_size_in_bytes;
421 return true;
423 else
424 return false;
427 /* qsort comparator for byte ranges. */
430 byte_range::cmp (const byte_range &br1, const byte_range &br2)
432 /* Order first by offset. */
433 if (int start_cmp = wi::cmps (br1.m_start_byte_offset,
434 br2.m_start_byte_offset))
435 return start_cmp;
437 /* ...then by size. */
438 return wi::cmpu (br1.m_size_in_bytes, br2.m_size_in_bytes);
441 /* class concrete_binding : public binding_key. */
443 /* Implementation of binding_key::dump_to_pp vfunc for concrete_binding. */
445 void
446 concrete_binding::dump_to_pp (pretty_printer *pp, bool) const
448 m_bit_range.dump_to_pp (pp);
451 /* Return true if this binding overlaps with OTHER. */
453 bool
454 concrete_binding::overlaps_p (const concrete_binding &other) const
456 if (get_start_bit_offset () < other.get_next_bit_offset ()
457 && get_next_bit_offset () > other.get_start_bit_offset ())
458 return true;
459 return false;
462 /* Comparator for use by vec<const concrete_binding *>::qsort. */
465 concrete_binding::cmp_ptr_ptr (const void *p1, const void *p2)
467 const concrete_binding *b1 = *(const concrete_binding * const *)p1;
468 const concrete_binding *b2 = *(const concrete_binding * const *)p2;
470 return bit_range::cmp (b1->m_bit_range, b2->m_bit_range);
473 /* class symbolic_binding : public binding_key. */
475 void
476 symbolic_binding::dump_to_pp (pretty_printer *pp, bool simple) const
478 //binding_key::dump_to_pp (pp, simple);
479 pp_string (pp, "region: ");
480 m_region->dump_to_pp (pp, simple);
483 /* Comparator for use by vec<const symbolic_binding *>::qsort. */
486 symbolic_binding::cmp_ptr_ptr (const void *p1, const void *p2)
488 const symbolic_binding *b1 = *(const symbolic_binding * const *)p1;
489 const symbolic_binding *b2 = *(const symbolic_binding * const *)p2;
491 return region::cmp_ids (b1->get_region (), b2->get_region ());
494 /* The store is oblivious to the types of the svalues bound within
495 it: any type can get bound at any location.
496 Simplify any casts before binding.
498 For example, if we have:
499 struct big { int ia[1024]; };
500 struct big src, dst;
501 memcpy (&dst, &src, sizeof (struct big));
502 this reaches us in gimple form as:
503 MEM <unsigned char[4096]> [(char * {ref-all})&dst]
504 = MEM <unsigned char[4096]> [(char * {ref-all})&src];
505 Using cast_region when handling the MEM_REF would give us:
506 INIT_VAL(CAST_REG(unsigned char[4096], src))
507 as rhs_sval, but we can fold that into a cast svalue:
508 CAST(unsigned char[4096], INIT_VAL(src))
509 We can discard that cast from the svalue when binding it in
510 the store for "dst", and simply store:
511 cluster for: dst
512 key: {kind: direct, start: 0, size: 32768, next: 32768}
513 value: ‘struct big’ {INIT_VAL(src)}. */
515 static const svalue *
516 simplify_for_binding (const svalue *sval)
518 if (const svalue *cast_sval = sval->maybe_undo_cast ())
519 sval = cast_sval;
520 return sval;
523 /* class binding_map. */
525 /* binding_map's copy ctor. */
527 binding_map::binding_map (const binding_map &other)
528 : m_map (other.m_map)
532 /* binding_map's assignment operator. */
534 binding_map&
535 binding_map::operator=(const binding_map &other)
537 /* For now, assume we only ever copy to an empty cluster. */
538 gcc_assert (m_map.elements () == 0);
539 for (map_t::iterator iter = other.m_map.begin (); iter != other.m_map.end ();
540 ++iter)
542 const binding_key *key = (*iter).first;
543 const svalue *sval = (*iter).second;
544 m_map.put (key, sval);
546 return *this;
549 /* binding_map's equality operator. */
551 bool
552 binding_map::operator== (const binding_map &other) const
554 if (m_map.elements () != other.m_map.elements ())
555 return false;
557 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
559 const binding_key *key = (*iter).first;
560 const svalue *sval = (*iter).second;
561 const svalue **other_slot
562 = const_cast <map_t &> (other.m_map).get (key);
563 if (other_slot == NULL)
564 return false;
565 if (sval != *other_slot)
566 return false;
568 gcc_checking_assert (hash () == other.hash ());
569 return true;
572 /* Generate a hash value for this binding_map. */
574 hashval_t
575 binding_map::hash () const
577 hashval_t result = 0;
578 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
580 /* Use a new hasher for each key to avoid depending on the ordering
581 of keys when accumulating the result. */
582 inchash::hash hstate;
583 hstate.add_ptr ((*iter).first);
584 hstate.add_ptr ((*iter).second);
585 result ^= hstate.end ();
587 return result;
590 /* Dump a representation of this binding_map to PP.
591 SIMPLE controls how values and regions are to be printed.
592 If MULTILINE, then split the dump over multiple lines and
593 use whitespace for readability, otherwise put all on one line. */
595 void
596 binding_map::dump_to_pp (pretty_printer *pp, bool simple,
597 bool multiline) const
599 auto_vec <const binding_key *> binding_keys;
600 for (map_t::iterator iter = m_map.begin ();
601 iter != m_map.end (); ++iter)
603 const binding_key *key = (*iter).first;
604 binding_keys.safe_push (key);
606 binding_keys.qsort (binding_key::cmp_ptrs);
608 const binding_key *key;
609 unsigned i;
610 FOR_EACH_VEC_ELT (binding_keys, i, key)
612 const svalue *value = *const_cast <map_t &> (m_map).get (key);
613 if (multiline)
615 pp_string (pp, " key: {");
616 key->dump_to_pp (pp, simple);
617 pp_string (pp, "}");
618 pp_newline (pp);
619 pp_string (pp, " value: ");
620 if (tree t = value->get_type ())
621 dump_quoted_tree (pp, t);
622 pp_string (pp, " {");
623 value->dump_to_pp (pp, simple);
624 pp_string (pp, "}");
625 pp_newline (pp);
627 else
629 if (i > 0)
630 pp_string (pp, ", ");
631 pp_string (pp, "binding key: {");
632 key->dump_to_pp (pp, simple);
633 pp_string (pp, "}, value: {");
634 value->dump_to_pp (pp, simple);
635 pp_string (pp, "}");
640 /* Dump a multiline representation of this binding_map to stderr. */
642 DEBUG_FUNCTION void
643 binding_map::dump (bool simple) const
645 pretty_printer pp;
646 pp_format_decoder (&pp) = default_tree_printer;
647 pp_show_color (&pp) = pp_show_color (global_dc->printer);
648 pp.buffer->stream = stderr;
649 dump_to_pp (&pp, simple, true);
650 pp_newline (&pp);
651 pp_flush (&pp);
654 /* Return a new json::object of the form
655 {KEY_DESC : SVALUE_DESC,
656 ...for the various key/value pairs in this binding_map}. */
658 json::object *
659 binding_map::to_json () const
661 json::object *map_obj = new json::object ();
663 auto_vec <const binding_key *> binding_keys;
664 for (map_t::iterator iter = m_map.begin ();
665 iter != m_map.end (); ++iter)
667 const binding_key *key = (*iter).first;
668 binding_keys.safe_push (key);
670 binding_keys.qsort (binding_key::cmp_ptrs);
672 const binding_key *key;
673 unsigned i;
674 FOR_EACH_VEC_ELT (binding_keys, i, key)
676 const svalue *value = *const_cast <map_t &> (m_map).get (key);
677 label_text key_desc = key->get_desc ();
678 map_obj->set (key_desc.m_buffer, value->to_json ());
679 key_desc.maybe_free ();
682 return map_obj;
685 /* Comparator for imposing an order on binding_maps. */
688 binding_map::cmp (const binding_map &map1, const binding_map &map2)
690 if (int count_cmp = map1.elements () - map2.elements ())
691 return count_cmp;
693 auto_vec <const binding_key *> keys1 (map1.elements ());
694 for (map_t::iterator iter = map1.begin ();
695 iter != map1.end (); ++iter)
696 keys1.quick_push ((*iter).first);
697 keys1.qsort (binding_key::cmp_ptrs);
699 auto_vec <const binding_key *> keys2 (map2.elements ());
700 for (map_t::iterator iter = map2.begin ();
701 iter != map2.end (); ++iter)
702 keys2.quick_push ((*iter).first);
703 keys2.qsort (binding_key::cmp_ptrs);
705 for (size_t i = 0; i < keys1.length (); i++)
707 const binding_key *k1 = keys1[i];
708 const binding_key *k2 = keys2[i];
709 if (int key_cmp = binding_key::cmp (k1, k2))
710 return key_cmp;
711 gcc_assert (k1 == k2);
712 if (int sval_cmp = svalue::cmp_ptr (map1.get (k1), map2.get (k2)))
713 return sval_cmp;
716 return 0;
719 /* Get the child region of PARENT_REG based upon INDEX within a
720 CONSTRUCTOR. */
722 static const region *
723 get_subregion_within_ctor (const region *parent_reg, tree index,
724 region_model_manager *mgr)
726 switch (TREE_CODE (index))
728 default:
729 gcc_unreachable ();
730 case INTEGER_CST:
732 const svalue *index_sval
733 = mgr->get_or_create_constant_svalue (index);
734 return mgr->get_element_region (parent_reg,
735 TREE_TYPE (parent_reg->get_type ()),
736 index_sval);
738 break;
739 case FIELD_DECL:
740 return mgr->get_field_region (parent_reg, index);
744 /* Get the svalue for VAL, a non-CONSTRUCTOR value within a CONSTRUCTOR. */
746 static const svalue *
747 get_svalue_for_ctor_val (tree val, region_model_manager *mgr)
749 /* Reuse the get_rvalue logic from region_model. */
750 region_model m (mgr);
751 return m.get_rvalue (path_var (val, 0), NULL);
754 /* Bind values from CONSTRUCTOR to this map, relative to
755 PARENT_REG's relationship to its base region.
756 Return true if successful, false if there was a problem (e.g. due
757 to hitting a complexity limit). */
759 bool
760 binding_map::apply_ctor_to_region (const region *parent_reg, tree ctor,
761 region_model_manager *mgr)
763 gcc_assert (parent_reg);
764 gcc_assert (TREE_CODE (ctor) == CONSTRUCTOR);
766 unsigned ix;
767 tree index;
768 tree val;
769 tree parent_type = parent_reg->get_type ();
770 tree field;
771 if (TREE_CODE (parent_type) == RECORD_TYPE)
772 field = TYPE_FIELDS (parent_type);
773 else
774 field = NULL_TREE;
775 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), ix, index, val)
777 if (!index)
779 /* If index is NULL, then iterate through the fields for
780 a RECORD_TYPE, or use an INTEGER_CST otherwise.
781 Compare with similar logic in output_constructor. */
782 if (field)
784 index = field;
785 field = DECL_CHAIN (field);
787 else
788 index = build_int_cst (integer_type_node, ix);
790 else if (TREE_CODE (index) == RANGE_EXPR)
792 tree min_index = TREE_OPERAND (index, 0);
793 tree max_index = TREE_OPERAND (index, 1);
794 if (min_index == max_index)
796 if (!apply_ctor_pair_to_child_region (parent_reg, mgr,
797 min_index, val))
798 return false;
800 else
802 if (!apply_ctor_val_to_range (parent_reg, mgr,
803 min_index, max_index, val))
804 return false;
806 continue;
808 if (!apply_ctor_pair_to_child_region (parent_reg, mgr, index, val))
809 return false;
811 return true;
814 /* Bind the value VAL into the range of elements within PARENT_REF
815 from MIN_INDEX to MAX_INDEX (including endpoints).
816 For use in handling RANGE_EXPR within a CONSTRUCTOR.
817 Return true if successful, false if there was a problem (e.g. due
818 to hitting a complexity limit). */
820 bool
821 binding_map::apply_ctor_val_to_range (const region *parent_reg,
822 region_model_manager *mgr,
823 tree min_index, tree max_index,
824 tree val)
826 gcc_assert (TREE_CODE (min_index) == INTEGER_CST);
827 gcc_assert (TREE_CODE (max_index) == INTEGER_CST);
829 /* Generate a binding key for the range. */
830 const region *min_element
831 = get_subregion_within_ctor (parent_reg, min_index, mgr);
832 const region *max_element
833 = get_subregion_within_ctor (parent_reg, max_index, mgr);
834 region_offset min_offset = min_element->get_offset ();
835 if (min_offset.symbolic_p ())
836 return false;
837 bit_offset_t start_bit_offset = min_offset.get_bit_offset ();
838 store_manager *smgr = mgr->get_store_manager ();
839 const binding_key *max_element_key = binding_key::make (smgr, max_element);
840 if (max_element_key->symbolic_p ())
841 return false;
842 const concrete_binding *max_element_ckey
843 = max_element_key->dyn_cast_concrete_binding ();
844 bit_size_t range_size_in_bits
845 = max_element_ckey->get_next_bit_offset () - start_bit_offset;
846 const concrete_binding *range_key
847 = smgr->get_concrete_binding (start_bit_offset, range_size_in_bits);
848 if (range_key->symbolic_p ())
849 return false;
851 /* Get the value. */
852 if (TREE_CODE (val) == CONSTRUCTOR)
853 return false;
854 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
856 /* Bind the value to the range. */
857 put (range_key, sval);
858 return true;
861 /* Bind the value VAL into INDEX within PARENT_REF.
862 For use in handling a pair of entries within a CONSTRUCTOR.
863 Return true if successful, false if there was a problem (e.g. due
864 to hitting a complexity limit). */
866 bool
867 binding_map::apply_ctor_pair_to_child_region (const region *parent_reg,
868 region_model_manager *mgr,
869 tree index, tree val)
871 const region *child_reg
872 = get_subregion_within_ctor (parent_reg, index, mgr);
873 if (TREE_CODE (val) == CONSTRUCTOR)
874 return apply_ctor_to_region (child_reg, val, mgr);
875 else
877 const svalue *sval = get_svalue_for_ctor_val (val, mgr);
878 const binding_key *k
879 = binding_key::make (mgr->get_store_manager (), child_reg);
880 /* Handle the case where we have an unknown size for child_reg
881 (e.g. due to it being a trailing field with incomplete array
882 type. */
883 if (!k->concrete_p ())
885 /* Assume that sval has a well-defined size for this case. */
886 tree sval_type = sval->get_type ();
887 gcc_assert (sval_type);
888 HOST_WIDE_INT sval_byte_size = int_size_in_bytes (sval_type);
889 gcc_assert (sval_byte_size != -1);
890 bit_size_t sval_bit_size = sval_byte_size * BITS_PER_UNIT;
891 /* Get offset of child relative to base region. */
892 region_offset child_base_offset = child_reg->get_offset ();
893 if (child_base_offset.symbolic_p ())
894 return false;
895 /* Convert to an offset relative to the parent region. */
896 region_offset parent_base_offset = parent_reg->get_offset ();
897 gcc_assert (!parent_base_offset.symbolic_p ());
898 bit_offset_t child_parent_offset
899 = (child_base_offset.get_bit_offset ()
900 - parent_base_offset.get_bit_offset ());
901 /* Create a concrete key for the child within the parent. */
902 k = mgr->get_store_manager ()->get_concrete_binding
903 (child_parent_offset, sval_bit_size);
905 gcc_assert (k->concrete_p ());
906 put (k, sval);
907 return true;
911 /* Populate OUT with all bindings within this map that overlap KEY. */
913 void
914 binding_map::get_overlapping_bindings (const binding_key *key,
915 auto_vec<const binding_key *> *out)
917 for (auto iter : *this)
919 const binding_key *iter_key = iter.first;
920 if (const concrete_binding *ckey
921 = key->dyn_cast_concrete_binding ())
923 if (const concrete_binding *iter_ckey
924 = iter_key->dyn_cast_concrete_binding ())
926 if (ckey->overlaps_p (*iter_ckey))
927 out->safe_push (iter_key);
929 else
931 /* Assume overlap. */
932 out->safe_push (iter_key);
935 else
937 /* Assume overlap. */
938 out->safe_push (iter_key);
943 /* Remove, truncate, and/or split any bindings within this map that
944 overlap DROP_KEY.
946 For example, if we have:
948 +------------------------------------+
949 | old binding |
950 +------------------------------------+
952 which is to be overwritten with:
954 .......+----------------------+.......
955 .......| new binding |.......
956 .......+----------------------+.......
958 this function "cuts a hole" out of the old binding:
960 +------+......................+------+
961 |prefix| hole for new binding |suffix|
962 +------+......................+------+
964 into which the new binding can be added without
965 overlapping the prefix or suffix.
967 The prefix and suffix (if added) will be bound to the pertinent
968 parts of the value of the old binding.
970 For example, given:
971 struct s5
973 char arr[8];
975 void test_5 (struct s5 *p)
977 struct s5 f = *p;
978 f.arr[3] = 42;
980 then after the "f = *p;" we have:
981 cluster for: f: INIT_VAL((*INIT_VAL(p_33(D))))
982 and at the "f.arr[3] = 42;" we remove the bindings overlapping
983 "f.arr[3]", replacing it with a prefix (bytes 0-2) and suffix (bytes 4-7)
984 giving:
985 cluster for: f
986 key: {bytes 0-2}
987 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
988 key: {bytes 4-7}
989 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
990 punching a hole into which the new value can be written at byte 3:
991 cluster for: f
992 key: {bytes 0-2}
993 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
994 key: {byte 3}
995 value: 'char' {(char)42}
996 key: {bytes 4-7}
997 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
999 If UNCERTAINTY is non-NULL, use it to record any svalues that
1000 were removed, as being maybe-bound. */
1002 void
1003 binding_map::remove_overlapping_bindings (store_manager *mgr,
1004 const binding_key *drop_key,
1005 uncertainty_t *uncertainty)
1007 auto_vec<const binding_key *> bindings;
1008 get_overlapping_bindings (drop_key, &bindings);
1010 unsigned i;
1011 const binding_key *iter_binding;
1012 FOR_EACH_VEC_ELT (bindings, i, iter_binding)
1014 const svalue *old_sval = get (iter_binding);
1015 if (uncertainty)
1016 uncertainty->on_maybe_bound_sval (old_sval);
1018 /* Begin by removing the old binding. */
1019 m_map.remove (iter_binding);
1021 /* Now potentially add the prefix and suffix. */
1022 if (const concrete_binding *drop_ckey
1023 = drop_key->dyn_cast_concrete_binding ())
1024 if (const concrete_binding *iter_ckey
1025 = iter_binding->dyn_cast_concrete_binding ())
1027 gcc_assert (drop_ckey->overlaps_p (*iter_ckey));
1029 const bit_range &drop_bits = drop_ckey->get_bit_range ();
1030 const bit_range &iter_bits = iter_ckey->get_bit_range ();
1032 if (iter_bits.get_start_bit_offset ()
1033 < drop_bits.get_start_bit_offset ())
1035 /* We have a truncated prefix. */
1036 bit_range prefix_bits (iter_bits.get_start_bit_offset (),
1037 (drop_bits.get_start_bit_offset ()
1038 - iter_bits.get_start_bit_offset ()));
1039 const concrete_binding *prefix_key
1040 = mgr->get_concrete_binding (prefix_bits);
1041 bit_range rel_prefix (0, prefix_bits.m_size_in_bits);
1042 const svalue *prefix_sval
1043 = old_sval->extract_bit_range (NULL_TREE,
1044 rel_prefix,
1045 mgr->get_svalue_manager ());
1046 m_map.put (prefix_key, prefix_sval);
1049 if (iter_bits.get_next_bit_offset ()
1050 > drop_bits.get_next_bit_offset ())
1052 /* We have a truncated suffix. */
1053 bit_range suffix_bits (drop_bits.get_next_bit_offset (),
1054 (iter_bits.get_next_bit_offset ()
1055 - drop_bits.get_next_bit_offset ()));
1056 const concrete_binding *suffix_key
1057 = mgr->get_concrete_binding (suffix_bits);
1058 bit_range rel_suffix (drop_bits.get_next_bit_offset ()
1059 - iter_bits.get_start_bit_offset (),
1060 suffix_bits.m_size_in_bits);
1061 const svalue *suffix_sval
1062 = old_sval->extract_bit_range (NULL_TREE,
1063 rel_suffix,
1064 mgr->get_svalue_manager ());
1065 m_map.put (suffix_key, suffix_sval);
1071 /* class binding_cluster. */
1073 /* binding_cluster's copy ctor. */
1075 binding_cluster::binding_cluster (const binding_cluster &other)
1076 : m_base_region (other.m_base_region), m_map (other.m_map),
1077 m_escaped (other.m_escaped), m_touched (other.m_touched)
1081 /* binding_cluster's assignment operator. */
1083 binding_cluster&
1084 binding_cluster::operator= (const binding_cluster &other)
1086 gcc_assert (m_base_region == other.m_base_region);
1087 m_map = other.m_map;
1088 m_escaped = other.m_escaped;
1089 m_touched = other.m_touched;
1090 return *this;
1093 /* binding_cluster's equality operator. */
1095 bool
1096 binding_cluster::operator== (const binding_cluster &other) const
1098 if (m_map != other.m_map)
1099 return false;
1101 if (m_base_region != other.m_base_region)
1102 return false;
1104 if (m_escaped != other.m_escaped)
1105 return false;
1107 if (m_touched != other.m_touched)
1108 return false;
1110 gcc_checking_assert (hash () == other.hash ());
1112 return true;
1115 /* Generate a hash value for this binding_cluster. */
1117 hashval_t
1118 binding_cluster::hash () const
1120 return m_map.hash ();
1123 /* Return true if this binding_cluster is symbolic
1124 i.e. its base region is symbolic. */
1126 bool
1127 binding_cluster::symbolic_p () const
1129 return m_base_region->get_kind () == RK_SYMBOLIC;
1132 /* Dump a representation of this binding_cluster to PP.
1133 SIMPLE controls how values and regions are to be printed.
1134 If MULTILINE, then split the dump over multiple lines and
1135 use whitespace for readability, otherwise put all on one line. */
1137 void
1138 binding_cluster::dump_to_pp (pretty_printer *pp, bool simple,
1139 bool multiline) const
1141 if (m_escaped)
1143 if (multiline)
1145 pp_string (pp, " ESCAPED");
1146 pp_newline (pp);
1148 else
1149 pp_string (pp, "(ESCAPED)");
1151 if (m_touched)
1153 if (multiline)
1155 pp_string (pp, " TOUCHED");
1156 pp_newline (pp);
1158 else
1159 pp_string (pp, "(TOUCHED)");
1162 m_map.dump_to_pp (pp, simple, multiline);
1165 /* Dump a multiline representation of this binding_cluster to stderr. */
1167 DEBUG_FUNCTION void
1168 binding_cluster::dump (bool simple) const
1170 pretty_printer pp;
1171 pp_format_decoder (&pp) = default_tree_printer;
1172 pp_show_color (&pp) = pp_show_color (global_dc->printer);
1173 pp.buffer->stream = stderr;
1174 pp_string (&pp, " cluster for: ");
1175 m_base_region->dump_to_pp (&pp, simple);
1176 pp_string (&pp, ": ");
1177 pp_newline (&pp);
1178 dump_to_pp (&pp, simple, true);
1179 pp_newline (&pp);
1180 pp_flush (&pp);
1183 /* Assert that this object is valid. */
1185 void
1186 binding_cluster::validate () const
1188 int num_symbolic = 0;
1189 int num_concrete = 0;
1190 for (auto iter : m_map)
1192 if (iter.first->symbolic_p ())
1193 num_symbolic++;
1194 else
1195 num_concrete++;
1197 /* We shouldn't have more than one symbolic key per cluster
1198 (or one would have clobbered the other). */
1199 gcc_assert (num_symbolic < 2);
1200 /* We can't have both concrete and symbolic keys. */
1201 gcc_assert (num_concrete == 0 || num_symbolic == 0);
1204 /* Return a new json::object of the form
1205 {"escaped": true/false,
1206 "touched": true/false,
1207 "map" : object for the the binding_map. */
1209 json::object *
1210 binding_cluster::to_json () const
1212 json::object *cluster_obj = new json::object ();
1214 cluster_obj->set ("escaped", new json::literal (m_escaped));
1215 cluster_obj->set ("touched", new json::literal (m_touched));
1216 cluster_obj->set ("map", m_map.to_json ());
1218 return cluster_obj;
1221 /* Add a binding of SVAL of kind KIND to REG, unpacking SVAL if it is a
1222 compound_sval. */
1224 void
1225 binding_cluster::bind (store_manager *mgr,
1226 const region *reg, const svalue *sval)
1228 if (const compound_svalue *compound_sval
1229 = sval->dyn_cast_compound_svalue ())
1231 bind_compound_sval (mgr, reg, compound_sval);
1232 return;
1235 const binding_key *binding = binding_key::make (mgr, reg);
1236 bind_key (binding, sval);
1239 /* Bind SVAL to KEY.
1240 Unpacking of compound_svalues should already have been done by the
1241 time this is called. */
1243 void
1244 binding_cluster::bind_key (const binding_key *key, const svalue *sval)
1246 gcc_assert (sval->get_kind () != SK_COMPOUND);
1248 m_map.put (key, sval);
1249 if (key->symbolic_p ())
1250 m_touched = true;
1253 /* Subroutine of binding_cluster::bind.
1254 Unpack compound_svals when binding them, so that we bind them
1255 element-wise. */
1257 void
1258 binding_cluster::bind_compound_sval (store_manager *mgr,
1259 const region *reg,
1260 const compound_svalue *compound_sval)
1262 region_offset reg_offset = reg->get_offset ();
1263 if (reg_offset.symbolic_p ())
1265 m_touched = true;
1266 clobber_region (mgr, reg);
1267 return;
1270 for (map_t::iterator iter = compound_sval->begin ();
1271 iter != compound_sval->end (); ++iter)
1273 const binding_key *iter_key = (*iter).first;
1274 const svalue *iter_sval = (*iter).second;
1276 if (const concrete_binding *concrete_key
1277 = iter_key->dyn_cast_concrete_binding ())
1279 bit_offset_t effective_start
1280 = (concrete_key->get_start_bit_offset ()
1281 + reg_offset.get_bit_offset ());
1282 const concrete_binding *effective_concrete_key
1283 = mgr->get_concrete_binding (effective_start,
1284 concrete_key->get_size_in_bits ());
1285 bind_key (effective_concrete_key, iter_sval);
1287 else
1288 gcc_unreachable ();
1292 /* Remove all bindings overlapping REG within this cluster. */
1294 void
1295 binding_cluster::clobber_region (store_manager *mgr, const region *reg)
1297 remove_overlapping_bindings (mgr, reg, NULL);
1300 /* Remove any bindings for REG within this cluster. */
1302 void
1303 binding_cluster::purge_region (store_manager *mgr, const region *reg)
1305 gcc_assert (reg->get_kind () == RK_DECL);
1306 const binding_key *binding
1307 = binding_key::make (mgr, const_cast<region *> (reg));
1308 m_map.remove (binding);
1311 /* Clobber REG and fill it with repeated copies of SVAL. */
1313 void
1314 binding_cluster::fill_region (store_manager *mgr,
1315 const region *reg,
1316 const svalue *sval)
1318 clobber_region (mgr, reg);
1320 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1321 const svalue *byte_size_sval = reg->get_byte_size_sval (sval_mgr);
1322 const svalue *fill_sval
1323 = sval_mgr->get_or_create_repeated_svalue (reg->get_type (),
1324 byte_size_sval, sval);
1325 bind (mgr, reg, fill_sval);
1328 /* Clobber REG within this cluster and fill it with zeroes. */
1330 void
1331 binding_cluster::zero_fill_region (store_manager *mgr, const region *reg)
1333 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1334 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
1335 fill_region (mgr, reg, zero_sval);
1338 /* Mark REG within this cluster as being unknown.
1339 If UNCERTAINTY is non-NULL, use it to record any svalues that
1340 had bindings to them removed, as being maybe-bound. */
1342 void
1343 binding_cluster::mark_region_as_unknown (store_manager *mgr,
1344 const region *reg,
1345 uncertainty_t *uncertainty)
1347 remove_overlapping_bindings (mgr, reg, uncertainty);
1349 /* Add a default binding to "unknown". */
1350 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1351 const svalue *sval
1352 = sval_mgr->get_or_create_unknown_svalue (reg->get_type ());
1353 bind (mgr, reg, sval);
1356 /* Purge state involving SVAL. */
1358 void
1359 binding_cluster::purge_state_involving (const svalue *sval,
1360 region_model_manager *sval_mgr)
1362 auto_vec<const binding_key *> to_remove;
1363 auto_vec<std::pair<const binding_key *, tree> > to_make_unknown;
1364 for (auto iter : m_map)
1366 const binding_key *iter_key = iter.first;
1367 if (const symbolic_binding *symbolic_key
1368 = iter_key->dyn_cast_symbolic_binding ())
1370 const region *reg = symbolic_key->get_region ();
1371 if (reg->involves_p (sval))
1372 to_remove.safe_push (iter_key);
1374 const svalue *iter_sval = iter.second;
1375 if (iter_sval->involves_p (sval))
1376 to_make_unknown.safe_push (std::make_pair(iter_key,
1377 iter_sval->get_type ()));
1379 for (auto iter : to_remove)
1381 m_map.remove (iter);
1382 m_touched = true;
1384 for (auto iter : to_make_unknown)
1386 const svalue *new_sval
1387 = sval_mgr->get_or_create_unknown_svalue (iter.second);
1388 m_map.put (iter.first, new_sval);
1392 /* Get any SVAL bound to REG within this cluster via kind KIND,
1393 without checking parent regions of REG. */
1395 const svalue *
1396 binding_cluster::get_binding (store_manager *mgr,
1397 const region *reg) const
1399 const binding_key *reg_binding = binding_key::make (mgr, reg);
1400 const svalue *sval = m_map.get (reg_binding);
1401 if (sval)
1403 /* If we have a struct with a single field, then the binding of
1404 the field will equal that of the struct, and looking up e.g.
1405 PARENT_REG.field within:
1406 cluster for PARENT_REG: INIT_VAL(OTHER_REG)
1407 will erroneously return INIT_VAL(OTHER_REG), rather than
1408 SUB_VALUE(INIT_VAL(OTHER_REG), FIELD) == INIT_VAL(OTHER_REG.FIELD).
1409 Fix this issue by iterating upwards whilst the bindings are equal,
1410 expressing the lookups as subvalues.
1411 We have to gather a list of subregion accesses, then walk it
1412 in reverse to get the subvalues. */
1413 auto_vec<const region *> regions;
1414 while (const region *parent_reg = reg->get_parent_region ())
1416 const binding_key *parent_reg_binding
1417 = binding_key::make (mgr, parent_reg);
1418 if (parent_reg_binding == reg_binding
1419 && sval->get_type ()
1420 && reg->get_type ()
1421 && sval->get_type () != reg->get_type ())
1423 regions.safe_push (reg);
1424 reg = parent_reg;
1426 else
1427 break;
1429 if (sval->get_type ()
1430 && reg->get_type ()
1431 && sval->get_type () == reg->get_type ())
1433 unsigned i;
1434 const region *iter_reg;
1435 FOR_EACH_VEC_ELT_REVERSE (regions, i, iter_reg)
1437 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1438 sval = rmm_mgr->get_or_create_sub_svalue (iter_reg->get_type (),
1439 sval, iter_reg);
1443 return sval;
1446 /* Get any SVAL bound to REG within this cluster,
1447 either directly for REG, or recursively checking for bindings within
1448 parent regions and extracting subvalues if need be. */
1450 const svalue *
1451 binding_cluster::get_binding_recursive (store_manager *mgr,
1452 const region *reg) const
1454 if (const svalue *sval = get_binding (mgr, reg))
1455 return sval;
1456 if (reg != m_base_region)
1457 if (const region *parent_reg = reg->get_parent_region ())
1458 if (const svalue *parent_sval
1459 = get_binding_recursive (mgr, parent_reg))
1461 /* Extract child svalue from parent svalue. */
1462 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1463 return rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
1464 parent_sval, reg);
1466 return NULL;
1469 /* Get any value bound for REG within this cluster. */
1471 const svalue *
1472 binding_cluster::get_any_binding (store_manager *mgr,
1473 const region *reg) const
1475 /* Look for a direct binding. */
1476 if (const svalue *direct_sval
1477 = get_binding_recursive (mgr, reg))
1478 return direct_sval;
1480 /* If this cluster has been touched by a symbolic write, then the content
1481 of any subregion not currently specifically bound is "UNKNOWN". */
1482 if (m_touched)
1484 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1485 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1488 /* Alternatively, if this is a symbolic read and the cluster has any bindings,
1489 then we don't know if we're reading those values or not, so the result
1490 is also "UNKNOWN". */
1491 if (reg->get_offset ().symbolic_p ()
1492 && m_map.elements () > 0)
1494 region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
1495 return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
1498 if (const svalue *compound_sval = maybe_get_compound_binding (mgr, reg))
1499 return compound_sval;
1501 /* Otherwise, the initial value, or uninitialized. */
1502 return NULL;
1505 /* Attempt to get a compound_svalue for the bindings within the cluster
1506 affecting REG (which could be the base region itself).
1508 Create a compound_svalue with the subset of bindings the affect REG,
1509 offsetting them so that the offsets are relative to the start of REG
1510 within the cluster.
1512 For example, REG could be one element within an array of structs.
1514 Return the resulting compound_svalue, or NULL if there's a problem. */
1516 const svalue *
1517 binding_cluster::maybe_get_compound_binding (store_manager *mgr,
1518 const region *reg) const
1520 region_offset cluster_offset = m_base_region->get_offset ();
1521 if (cluster_offset.symbolic_p ())
1522 return NULL;
1523 region_offset reg_offset = reg->get_offset ();
1524 if (reg_offset.symbolic_p ())
1525 return NULL;
1527 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1529 /* We will a build the result map in two parts:
1530 (a) result_map, holding the concrete keys from this cluster,
1532 (b) default_map, holding the initial values for the region
1533 (e.g. uninitialized, initializer values, or zero), unless this
1534 cluster has been touched.
1536 We will populate (a), and as we do, clobber (b), trimming and
1537 splitting its bindings as necessary.
1538 Finally, we will merge (b) into (a), giving a concrete map
1539 that merges both the initial values and the bound values from
1540 the binding_cluster.
1541 Doing it this way reduces N for the O(N^2) intersection-finding,
1542 perhaps we should have a spatial-organized data structure for
1543 concrete keys, though. */
1545 binding_map result_map;
1546 binding_map default_map;
1548 /* Set up default values in default_map. */
1549 const svalue *default_sval;
1550 if (m_touched)
1551 default_sval = sval_mgr->get_or_create_unknown_svalue (reg->get_type ());
1552 else
1553 default_sval = sval_mgr->get_or_create_initial_value (reg);
1554 const binding_key *default_key = binding_key::make (mgr, reg);
1555 default_map.put (default_key, default_sval);
1557 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
1559 const binding_key *key = (*iter).first;
1560 const svalue *sval = (*iter).second;
1562 if (const concrete_binding *concrete_key
1563 = key->dyn_cast_concrete_binding ())
1565 const bit_range &bound_range = concrete_key->get_bit_range ();
1567 bit_size_t reg_bit_size;
1568 if (!reg->get_bit_size (&reg_bit_size))
1569 return NULL;
1571 bit_range reg_range (reg_offset.get_bit_offset (),
1572 reg_bit_size);
1574 /* Skip bindings that are outside the bit range of REG. */
1575 if (!bound_range.intersects_p (reg_range))
1576 continue;
1578 /* We shouldn't have an exact match; that should have been
1579 handled already. */
1580 gcc_assert (!(reg_range == bound_range));
1582 bit_range subrange (0, 0);
1583 if (reg_range.contains_p (bound_range, &subrange))
1585 /* We have a bound range fully within REG.
1586 Add it to map, offsetting accordingly. */
1588 /* Get offset of KEY relative to REG, rather than to
1589 the cluster. */
1590 const concrete_binding *offset_concrete_key
1591 = mgr->get_concrete_binding (subrange);
1592 result_map.put (offset_concrete_key, sval);
1594 /* Clobber default_map, removing/trimming/spliting where
1595 it overlaps with offset_concrete_key. */
1596 default_map.remove_overlapping_bindings (mgr,
1597 offset_concrete_key,
1598 NULL);
1600 else if (bound_range.contains_p (reg_range, &subrange))
1602 /* REG is fully within the bound range, but
1603 is not equal to it; we're extracting a subvalue. */
1604 return sval->extract_bit_range (reg->get_type (),
1605 subrange,
1606 mgr->get_svalue_manager ());
1608 else
1610 /* REG and the bound range partially overlap. */
1611 bit_range reg_subrange (0, 0);
1612 bit_range bound_subrange (0, 0);
1613 reg_range.intersects_p (bound_range,
1614 &reg_subrange, &bound_subrange);
1616 /* Get the bits from the bound value for the bits at the
1617 intersection (relative to the bound value). */
1618 const svalue *overlap_sval
1619 = sval->extract_bit_range (NULL_TREE,
1620 bound_subrange,
1621 mgr->get_svalue_manager ());
1623 /* Get key for overlap, relative to the REG. */
1624 const concrete_binding *overlap_concrete_key
1625 = mgr->get_concrete_binding (reg_subrange);
1626 result_map.put (overlap_concrete_key, overlap_sval);
1628 /* Clobber default_map, removing/trimming/spliting where
1629 it overlaps with overlap_concrete_key. */
1630 default_map.remove_overlapping_bindings (mgr,
1631 overlap_concrete_key,
1632 NULL);
1635 else
1636 /* Can't handle symbolic bindings. */
1637 return NULL;
1640 if (result_map.elements () == 0)
1641 return NULL;
1643 /* Merge any bindings from default_map into result_map. */
1644 for (auto iter : default_map)
1646 const binding_key *key = iter.first;
1647 const svalue *sval = iter.second;
1648 result_map.put (key, sval);
1651 return sval_mgr->get_or_create_compound_svalue (reg->get_type (), result_map);
1654 /* Remove, truncate, and/or split any bindings within this map that
1655 overlap REG.
1656 If UNCERTAINTY is non-NULL, use it to record any svalues that
1657 were removed, as being maybe-bound. */
1659 void
1660 binding_cluster::remove_overlapping_bindings (store_manager *mgr,
1661 const region *reg,
1662 uncertainty_t *uncertainty)
1664 const binding_key *reg_binding = binding_key::make (mgr, reg);
1666 m_map.remove_overlapping_bindings (mgr, reg_binding, uncertainty);
1669 /* Attempt to merge CLUSTER_A and CLUSTER_B into OUT_CLUSTER, using
1670 MGR and MERGER.
1671 Return true if they can be merged, false otherwise. */
1673 bool
1674 binding_cluster::can_merge_p (const binding_cluster *cluster_a,
1675 const binding_cluster *cluster_b,
1676 binding_cluster *out_cluster,
1677 store *out_store,
1678 store_manager *mgr,
1679 model_merger *merger)
1681 gcc_assert (out_cluster);
1683 /* Merge flags ("ESCAPED" and "TOUCHED") by setting the merged flag to
1684 true if either of the inputs is true. */
1685 if ((cluster_a && cluster_a->m_escaped)
1686 || (cluster_b && cluster_b->m_escaped))
1687 out_cluster->m_escaped = true;
1688 if ((cluster_a && cluster_a->m_touched)
1689 || (cluster_b && cluster_b->m_touched))
1690 out_cluster->m_touched = true;
1692 /* At least one of CLUSTER_A and CLUSTER_B are non-NULL, but either
1693 could be NULL. Handle these cases. */
1694 if (cluster_a == NULL)
1696 gcc_assert (cluster_b != NULL);
1697 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1698 out_cluster->make_unknown_relative_to (cluster_b, out_store, mgr);
1699 return true;
1701 if (cluster_b == NULL)
1703 gcc_assert (cluster_a != NULL);
1704 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1705 out_cluster->make_unknown_relative_to (cluster_a, out_store, mgr);
1706 return true;
1709 /* The "both inputs are non-NULL" case. */
1710 gcc_assert (cluster_a != NULL && cluster_b != NULL);
1711 gcc_assert (cluster_a->m_base_region == out_cluster->m_base_region);
1712 gcc_assert (cluster_b->m_base_region == out_cluster->m_base_region);
1714 hash_set<const binding_key *> keys;
1715 for (map_t::iterator iter_a = cluster_a->m_map.begin ();
1716 iter_a != cluster_a->m_map.end (); ++iter_a)
1718 const binding_key *key_a = (*iter_a).first;
1719 keys.add (key_a);
1721 for (map_t::iterator iter_b = cluster_b->m_map.begin ();
1722 iter_b != cluster_b->m_map.end (); ++iter_b)
1724 const binding_key *key_b = (*iter_b).first;
1725 keys.add (key_b);
1727 int num_symbolic_keys = 0;
1728 int num_concrete_keys = 0;
1729 for (hash_set<const binding_key *>::iterator iter = keys.begin ();
1730 iter != keys.end (); ++iter)
1732 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
1733 const binding_key *key = *iter;
1734 const svalue *sval_a = cluster_a->get_any_value (key);
1735 const svalue *sval_b = cluster_b->get_any_value (key);
1737 if (key->symbolic_p ())
1738 num_symbolic_keys++;
1739 else
1740 num_concrete_keys++;
1742 if (sval_a == sval_b)
1744 gcc_assert (sval_a);
1745 out_cluster->m_map.put (key, sval_a);
1746 continue;
1748 else if (sval_a && sval_b)
1750 if (const svalue *merged_sval
1751 = sval_a->can_merge_p (sval_b, sval_mgr, merger))
1753 out_cluster->m_map.put (key, merged_sval);
1754 continue;
1756 /* Merger of the svalues failed. Reject merger of the cluster. */
1757 return false;
1760 /* If we get here, then one cluster binds this key and the other
1761 doesn't; merge them as "UNKNOWN". */
1762 gcc_assert (sval_a || sval_b);
1764 const svalue *bound_sval = sval_a ? sval_a : sval_b;
1765 tree type = bound_sval->get_type ();
1766 const svalue *unknown_sval
1767 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue (type);
1769 /* ...but reject the merger if this sval shouldn't be mergeable
1770 (e.g. reject merging svalues that have non-purgable sm-state,
1771 to avoid falsely reporting memory leaks by merging them
1772 with something else). */
1773 if (!bound_sval->can_merge_p (unknown_sval, sval_mgr, merger))
1774 return false;
1776 out_cluster->m_map.put (key, unknown_sval);
1779 /* We can only have at most one symbolic key per cluster,
1780 and if we do, we can't have any concrete keys.
1781 If this happens, mark the cluster as touched, with no keys. */
1782 if (num_symbolic_keys >= 2
1783 || (num_concrete_keys > 0 && num_symbolic_keys > 0))
1785 out_cluster->m_touched = true;
1786 out_cluster->m_map.empty ();
1789 /* We don't handle other kinds of overlaps yet. */
1791 return true;
1794 /* Update this cluster to reflect an attempt to merge OTHER where there
1795 is no other cluster to merge with, and so we're notionally merging the
1796 bound values in OTHER with the initial value of the relevant regions.
1798 Any bound keys in OTHER should be bound to unknown in this. */
1800 void
1801 binding_cluster::make_unknown_relative_to (const binding_cluster *other,
1802 store *out_store,
1803 store_manager *mgr)
1805 for (map_t::iterator iter = other->m_map.begin ();
1806 iter != other->m_map.end (); ++iter)
1808 const binding_key *iter_key = (*iter).first;
1809 const svalue *iter_sval = (*iter).second;
1810 const svalue *unknown_sval
1811 = mgr->get_svalue_manager ()->get_or_create_unknown_svalue
1812 (iter_sval->get_type ());
1813 m_map.put (iter_key, unknown_sval);
1815 /* For any pointers in OTHER, the merger means that the
1816 concrete pointer becomes an unknown value, which could
1817 show up as a false report of a leak when considering what
1818 pointers are live before vs after.
1819 Avoid this by marking the base regions they point to as having
1820 escaped. */
1821 if (const region_svalue *region_sval
1822 = iter_sval->dyn_cast_region_svalue ())
1824 const region *base_reg
1825 = region_sval->get_pointee ()->get_base_region ();
1826 if (!base_reg->symbolic_for_unknown_ptr_p ())
1828 binding_cluster *c = out_store->get_or_create_cluster (base_reg);
1829 c->mark_as_escaped ();
1835 /* Mark this cluster as having escaped. */
1837 void
1838 binding_cluster::mark_as_escaped ()
1840 m_escaped = true;
1843 /* If this cluster has escaped (by this call, or by an earlier one, or
1844 by being an external param), then unbind all values and mark it
1845 as "touched", so that it has an unknown value, rather than an
1846 initial_svalue. */
1848 void
1849 binding_cluster::on_unknown_fncall (const gcall *call,
1850 store_manager *mgr)
1852 if (m_escaped)
1854 m_map.empty ();
1856 /* Bind it to a new "conjured" value using CALL. */
1857 const svalue *sval
1858 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
1859 (m_base_region->get_type (), call, m_base_region);
1860 bind (mgr, m_base_region, sval);
1862 m_touched = true;
1866 /* Mark this cluster as having been clobbered by STMT. */
1868 void
1869 binding_cluster::on_asm (const gasm *stmt,
1870 store_manager *mgr)
1872 m_map.empty ();
1874 /* Bind it to a new "conjured" value using CALL. */
1875 const svalue *sval
1876 = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
1877 (m_base_region->get_type (), stmt, m_base_region);
1878 bind (mgr, m_base_region, sval);
1880 m_touched = true;
1883 /* Return true if this binding_cluster has no information
1884 i.e. if there are no bindings, and it hasn't been marked as having
1885 escaped, or touched symbolically. */
1887 bool
1888 binding_cluster::redundant_p () const
1890 return (m_map.elements () == 0
1891 && !m_escaped
1892 && !m_touched);
1895 /* Add PV to OUT_PVS, casting it to TYPE if if is not already of that type. */
1897 static void
1898 append_pathvar_with_type (path_var pv,
1899 tree type,
1900 auto_vec<path_var> *out_pvs)
1902 gcc_assert (pv.m_tree);
1904 if (TREE_TYPE (pv.m_tree) != type)
1905 pv.m_tree = build1 (NOP_EXPR, type, pv.m_tree);
1907 out_pvs->safe_push (pv);
1910 /* Find representative path_vars for SVAL within this binding of BASE_REG,
1911 appending the results to OUT_PVS. */
1913 void
1914 binding_cluster::get_representative_path_vars (const region_model *model,
1915 svalue_set *visited,
1916 const region *base_reg,
1917 const svalue *sval,
1918 auto_vec<path_var> *out_pvs)
1919 const
1921 sval = simplify_for_binding (sval);
1923 for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
1925 const binding_key *key = (*iter).first;
1926 const svalue *bound_sval = (*iter).second;
1927 if (bound_sval == sval)
1929 if (const concrete_binding *ckey
1930 = key->dyn_cast_concrete_binding ())
1932 auto_vec <const region *> subregions;
1933 base_reg->get_subregions_for_binding
1934 (model->get_manager (),
1935 ckey->get_start_bit_offset (),
1936 ckey->get_size_in_bits (),
1937 sval->get_type (),
1938 &subregions);
1939 unsigned i;
1940 const region *subregion;
1941 FOR_EACH_VEC_ELT (subregions, i, subregion)
1943 if (path_var pv
1944 = model->get_representative_path_var (subregion,
1945 visited))
1946 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
1949 else
1951 const symbolic_binding *skey = (const symbolic_binding *)key;
1952 if (path_var pv
1953 = model->get_representative_path_var (skey->get_region (),
1954 visited))
1955 append_pathvar_with_type (pv, sval->get_type (), out_pvs);
1961 /* Get any svalue bound to KEY, or NULL. */
1963 const svalue *
1964 binding_cluster::get_any_value (const binding_key *key) const
1966 return m_map.get (key);
1969 /* If this cluster has a single direct binding for the whole of the region,
1970 return it.
1971 For use in simplifying dumps. */
1973 const svalue *
1974 binding_cluster::maybe_get_simple_value (store_manager *mgr) const
1976 /* Fail gracefully if MGR is NULL to make it easier to dump store
1977 instances in the debugger. */
1978 if (mgr == NULL)
1979 return NULL;
1981 if (m_map.elements () != 1)
1982 return NULL;
1984 const binding_key *key = binding_key::make (mgr, m_base_region);
1985 return get_any_value (key);
1988 /* class store_manager. */
1990 /* binding consolidation. */
1992 const concrete_binding *
1993 store_manager::get_concrete_binding (bit_offset_t start_bit_offset,
1994 bit_offset_t size_in_bits)
1996 concrete_binding b (start_bit_offset, size_in_bits);
1997 if (concrete_binding *existing = m_concrete_binding_key_mgr.get (b))
1998 return existing;
2000 concrete_binding *to_save = new concrete_binding (b);
2001 m_concrete_binding_key_mgr.put (b, to_save);
2002 return to_save;
2005 const symbolic_binding *
2006 store_manager::get_symbolic_binding (const region *reg)
2008 symbolic_binding b (reg);
2009 if (symbolic_binding *existing = m_symbolic_binding_key_mgr.get (b))
2010 return existing;
2012 symbolic_binding *to_save = new symbolic_binding (b);
2013 m_symbolic_binding_key_mgr.put (b, to_save);
2014 return to_save;
2017 /* class store. */
2019 /* store's default ctor. */
2021 store::store ()
2022 : m_called_unknown_fn (false)
2026 /* store's copy ctor. */
2028 store::store (const store &other)
2029 : m_called_unknown_fn (other.m_called_unknown_fn)
2031 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2032 iter != other.m_cluster_map.end ();
2033 ++iter)
2035 const region *reg = (*iter).first;
2036 gcc_assert (reg);
2037 binding_cluster *c = (*iter).second;
2038 gcc_assert (c);
2039 m_cluster_map.put (reg, new binding_cluster (*c));
2043 /* store's dtor. */
2045 store::~store ()
2047 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2048 iter != m_cluster_map.end ();
2049 ++iter)
2050 delete (*iter).second;
2053 /* store's assignment operator. */
2055 store &
2056 store::operator= (const store &other)
2058 /* Delete existing cluster map. */
2059 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2060 iter != m_cluster_map.end ();
2061 ++iter)
2062 delete (*iter).second;
2063 m_cluster_map.empty ();
2065 m_called_unknown_fn = other.m_called_unknown_fn;
2067 for (cluster_map_t::iterator iter = other.m_cluster_map.begin ();
2068 iter != other.m_cluster_map.end ();
2069 ++iter)
2071 const region *reg = (*iter).first;
2072 gcc_assert (reg);
2073 binding_cluster *c = (*iter).second;
2074 gcc_assert (c);
2075 m_cluster_map.put (reg, new binding_cluster (*c));
2077 return *this;
2080 /* store's equality operator. */
2082 bool
2083 store::operator== (const store &other) const
2085 if (m_called_unknown_fn != other.m_called_unknown_fn)
2086 return false;
2088 if (m_cluster_map.elements () != other.m_cluster_map.elements ())
2089 return false;
2091 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2092 iter != m_cluster_map.end ();
2093 ++iter)
2095 const region *reg = (*iter).first;
2096 binding_cluster *c = (*iter).second;
2097 binding_cluster **other_slot
2098 = const_cast <cluster_map_t &> (other.m_cluster_map).get (reg);
2099 if (other_slot == NULL)
2100 return false;
2101 if (*c != **other_slot)
2102 return false;
2105 gcc_checking_assert (hash () == other.hash ());
2107 return true;
2110 /* Get a hash value for this store. */
2112 hashval_t
2113 store::hash () const
2115 hashval_t result = 0;
2116 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2117 iter != m_cluster_map.end ();
2118 ++iter)
2119 result ^= (*iter).second->hash ();
2120 return result;
2123 /* Populate OUT with a sorted list of parent regions for the regions in IN,
2124 removing duplicate parents. */
2126 static void
2127 get_sorted_parent_regions (auto_vec<const region *> *out,
2128 auto_vec<const region *> &in)
2130 /* Get the set of parent regions. */
2131 hash_set<const region *> parent_regions;
2132 const region *iter_reg;
2133 unsigned i;
2134 FOR_EACH_VEC_ELT (in, i, iter_reg)
2136 const region *parent_reg = iter_reg->get_parent_region ();
2137 gcc_assert (parent_reg);
2138 parent_regions.add (parent_reg);
2141 /* Write to OUT. */
2142 for (hash_set<const region *>::iterator iter = parent_regions.begin();
2143 iter != parent_regions.end(); ++iter)
2144 out->safe_push (*iter);
2146 /* Sort OUT. */
2147 out->qsort (region::cmp_ptr_ptr);
2150 /* Dump a representation of this store to PP, using SIMPLE to control how
2151 svalues and regions are printed.
2152 MGR is used for simplifying dumps if non-NULL, but can also be NULL
2153 (to make it easier to use from the debugger). */
2155 void
2156 store::dump_to_pp (pretty_printer *pp, bool simple, bool multiline,
2157 store_manager *mgr) const
2159 /* Sort into some deterministic order. */
2160 auto_vec<const region *> base_regions;
2161 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2162 iter != m_cluster_map.end (); ++iter)
2164 const region *base_reg = (*iter).first;
2165 base_regions.safe_push (base_reg);
2167 base_regions.qsort (region::cmp_ptr_ptr);
2169 /* Gather clusters, organize by parent region, so that we can group
2170 together locals, globals, etc. */
2171 auto_vec<const region *> parent_regions;
2172 get_sorted_parent_regions (&parent_regions, base_regions);
2174 const region *parent_reg;
2175 unsigned i;
2176 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2178 gcc_assert (parent_reg);
2179 pp_string (pp, "clusters within ");
2180 parent_reg->dump_to_pp (pp, simple);
2181 if (multiline)
2182 pp_newline (pp);
2183 else
2184 pp_string (pp, " {");
2186 const region *base_reg;
2187 unsigned j;
2188 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2190 /* This is O(N * M), but N ought to be small. */
2191 if (base_reg->get_parent_region () != parent_reg)
2192 continue;
2193 binding_cluster *cluster
2194 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2195 if (!multiline)
2197 if (j > 0)
2198 pp_string (pp, ", ");
2200 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
2202 /* Special-case to simplify dumps for the common case where
2203 we just have one value directly bound to the whole of a
2204 region. */
2205 if (multiline)
2207 pp_string (pp, " cluster for: ");
2208 base_reg->dump_to_pp (pp, simple);
2209 pp_string (pp, ": ");
2210 sval->dump_to_pp (pp, simple);
2211 if (cluster->escaped_p ())
2212 pp_string (pp, " (ESCAPED)");
2213 if (cluster->touched_p ())
2214 pp_string (pp, " (TOUCHED)");
2215 pp_newline (pp);
2217 else
2219 pp_string (pp, "region: {");
2220 base_reg->dump_to_pp (pp, simple);
2221 pp_string (pp, ", value: ");
2222 sval->dump_to_pp (pp, simple);
2223 if (cluster->escaped_p ())
2224 pp_string (pp, " (ESCAPED)");
2225 if (cluster->touched_p ())
2226 pp_string (pp, " (TOUCHED)");
2227 pp_string (pp, "}");
2230 else if (multiline)
2232 pp_string (pp, " cluster for: ");
2233 base_reg->dump_to_pp (pp, simple);
2234 pp_newline (pp);
2235 cluster->dump_to_pp (pp, simple, multiline);
2237 else
2239 pp_string (pp, "base region: {");
2240 base_reg->dump_to_pp (pp, simple);
2241 pp_string (pp, "} has cluster: {");
2242 cluster->dump_to_pp (pp, simple, multiline);
2243 pp_string (pp, "}");
2246 if (!multiline)
2247 pp_string (pp, "}");
2249 pp_printf (pp, "m_called_unknown_fn: %s",
2250 m_called_unknown_fn ? "TRUE" : "FALSE");
2251 if (multiline)
2252 pp_newline (pp);
2255 /* Dump a multiline representation of this store to stderr. */
2257 DEBUG_FUNCTION void
2258 store::dump (bool simple) const
2260 pretty_printer pp;
2261 pp_format_decoder (&pp) = default_tree_printer;
2262 pp_show_color (&pp) = pp_show_color (global_dc->printer);
2263 pp.buffer->stream = stderr;
2264 dump_to_pp (&pp, simple, true, NULL);
2265 pp_newline (&pp);
2266 pp_flush (&pp);
2269 /* Assert that this object is valid. */
2271 void
2272 store::validate () const
2274 for (auto iter : m_cluster_map)
2275 iter.second->validate ();
2278 /* Return a new json::object of the form
2279 {PARENT_REGION_DESC: {BASE_REGION_DESC: object for binding_map,
2280 ... for each cluster within parent region},
2281 ...for each parent region,
2282 "called_unknown_fn": true/false}. */
2284 json::object *
2285 store::to_json () const
2287 json::object *store_obj = new json::object ();
2289 /* Sort into some deterministic order. */
2290 auto_vec<const region *> base_regions;
2291 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2292 iter != m_cluster_map.end (); ++iter)
2294 const region *base_reg = (*iter).first;
2295 base_regions.safe_push (base_reg);
2297 base_regions.qsort (region::cmp_ptr_ptr);
2299 /* Gather clusters, organize by parent region, so that we can group
2300 together locals, globals, etc. */
2301 auto_vec<const region *> parent_regions;
2302 get_sorted_parent_regions (&parent_regions, base_regions);
2304 const region *parent_reg;
2305 unsigned i;
2306 FOR_EACH_VEC_ELT (parent_regions, i, parent_reg)
2308 gcc_assert (parent_reg);
2310 json::object *clusters_in_parent_reg_obj = new json::object ();
2312 const region *base_reg;
2313 unsigned j;
2314 FOR_EACH_VEC_ELT (base_regions, j, base_reg)
2316 /* This is O(N * M), but N ought to be small. */
2317 if (base_reg->get_parent_region () != parent_reg)
2318 continue;
2319 binding_cluster *cluster
2320 = *const_cast<cluster_map_t &> (m_cluster_map).get (base_reg);
2321 label_text base_reg_desc = base_reg->get_desc ();
2322 clusters_in_parent_reg_obj->set (base_reg_desc.m_buffer,
2323 cluster->to_json ());
2324 base_reg_desc.maybe_free ();
2326 label_text parent_reg_desc = parent_reg->get_desc ();
2327 store_obj->set (parent_reg_desc.m_buffer, clusters_in_parent_reg_obj);
2328 parent_reg_desc.maybe_free ();
2331 store_obj->set ("called_unknown_fn", new json::literal (m_called_unknown_fn));
2333 return store_obj;
2336 /* Get any svalue bound to REG, or NULL. */
2338 const svalue *
2339 store::get_any_binding (store_manager *mgr, const region *reg) const
2341 const region *base_reg = reg->get_base_region ();
2342 binding_cluster **cluster_slot
2343 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg);
2344 if (!cluster_slot)
2345 return NULL;
2346 return (*cluster_slot)->get_any_binding (mgr, reg);
2349 /* Set the value of LHS_REG to RHS_SVAL. */
2351 void
2352 store::set_value (store_manager *mgr, const region *lhs_reg,
2353 const svalue *rhs_sval,
2354 uncertainty_t *uncertainty)
2356 remove_overlapping_bindings (mgr, lhs_reg);
2358 rhs_sval = simplify_for_binding (rhs_sval);
2360 const region *lhs_base_reg = lhs_reg->get_base_region ();
2361 binding_cluster *lhs_cluster;
2362 if (lhs_base_reg->symbolic_for_unknown_ptr_p ())
2364 /* Reject attempting to bind values into a symbolic region
2365 for an unknown ptr; merely invalidate values below. */
2366 lhs_cluster = NULL;
2368 /* The LHS of the write is *UNKNOWN. If the RHS is a pointer,
2369 then treat the region being pointed to as having escaped. */
2370 if (const region_svalue *ptr_sval = rhs_sval->dyn_cast_region_svalue ())
2372 const region *ptr_dst = ptr_sval->get_pointee ();
2373 const region *ptr_base_reg = ptr_dst->get_base_region ();
2374 mark_as_escaped (ptr_base_reg);
2377 else
2379 lhs_cluster = get_or_create_cluster (lhs_base_reg);
2380 lhs_cluster->bind (mgr, lhs_reg, rhs_sval);
2383 /* Bindings to a cluster can affect other clusters if a symbolic
2384 base region is involved.
2385 Writes to concrete clusters can't affect other concrete clusters,
2386 but can affect symbolic clusters.
2387 Writes to symbolic clusters can affect both concrete and symbolic
2388 clusters.
2389 Invalidate our knowledge of other clusters that might have been
2390 affected by the write. */
2391 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2392 iter != m_cluster_map.end (); ++iter)
2394 const region *iter_base_reg = (*iter).first;
2395 binding_cluster *iter_cluster = (*iter).second;
2396 if (iter_base_reg != lhs_base_reg
2397 && (lhs_cluster == NULL
2398 || lhs_cluster->symbolic_p ()
2399 || iter_cluster->symbolic_p ()))
2401 tristate t_alias = eval_alias (lhs_base_reg, iter_base_reg);
2402 switch (t_alias.get_value ())
2404 default:
2405 gcc_unreachable ();
2407 case tristate::TS_UNKNOWN:
2408 iter_cluster->mark_region_as_unknown (mgr, iter_base_reg,
2409 uncertainty);
2410 break;
2412 case tristate::TS_TRUE:
2413 gcc_unreachable ();
2414 break;
2416 case tristate::TS_FALSE:
2417 /* If they can't be aliases, then don't invalidate this
2418 cluster. */
2419 break;
2425 /* Determine if BASE_REG_A could be an alias of BASE_REG_B. */
2427 tristate
2428 store::eval_alias (const region *base_reg_a,
2429 const region *base_reg_b) const
2431 /* SSA names can't alias. */
2432 tree decl_a = base_reg_a->maybe_get_decl ();
2433 if (decl_a && TREE_CODE (decl_a) == SSA_NAME)
2434 return tristate::TS_FALSE;
2435 tree decl_b = base_reg_b->maybe_get_decl ();
2436 if (decl_b && TREE_CODE (decl_b) == SSA_NAME)
2437 return tristate::TS_FALSE;
2439 /* Try both ways, for symmetry. */
2440 tristate ts_ab = eval_alias_1 (base_reg_a, base_reg_b);
2441 if (ts_ab.is_false ())
2442 return tristate::TS_FALSE;
2443 tristate ts_ba = eval_alias_1 (base_reg_b, base_reg_a);
2444 if (ts_ba.is_false ())
2445 return tristate::TS_FALSE;
2446 return tristate::TS_UNKNOWN;
2449 /* Half of store::eval_alias; called twice for symmetry. */
2451 tristate
2452 store::eval_alias_1 (const region *base_reg_a,
2453 const region *base_reg_b) const
2455 if (const symbolic_region *sym_reg_a
2456 = base_reg_a->dyn_cast_symbolic_region ())
2458 const svalue *sval_a = sym_reg_a->get_pointer ();
2459 if (sval_a->get_kind () == SK_INITIAL)
2460 if (tree decl_b = base_reg_b->maybe_get_decl ())
2461 if (!is_global_var (decl_b))
2463 /* The initial value of a pointer can't point to a local. */
2464 return tristate::TS_FALSE;
2466 if (sval_a->get_kind () == SK_INITIAL
2467 && base_reg_b->get_kind () == RK_HEAP_ALLOCATED)
2469 /* The initial value of a pointer can't point to a
2470 region that was allocated on the heap after the beginning of the
2471 path. */
2472 return tristate::TS_FALSE;
2474 if (const widening_svalue *widening_sval_a
2475 = sval_a->dyn_cast_widening_svalue ())
2477 const svalue *base = widening_sval_a->get_base_svalue ();
2478 if (const region_svalue *region_sval
2479 = base->dyn_cast_region_svalue ())
2481 const region *pointee = region_sval->get_pointee ();
2482 /* If we have sval_a is WIDENING(&REGION, OP), and
2483 B can't alias REGION, then B can't alias A either.
2484 For example, A might arise from
2485 for (ptr = &REGION; ...; ptr++)
2486 where sval_a is ptr in the 2nd iteration of the loop.
2487 We want to ensure that "*ptr" can only clobber things
2488 within REGION's base region. */
2489 tristate ts = eval_alias (pointee->get_base_region (),
2490 base_reg_b);
2491 if (ts.is_false ())
2492 return tristate::TS_FALSE;
2496 return tristate::TS_UNKNOWN;
2499 /* Remove all bindings overlapping REG within this store. */
2501 void
2502 store::clobber_region (store_manager *mgr, const region *reg)
2504 const region *base_reg = reg->get_base_region ();
2505 binding_cluster **slot = m_cluster_map.get (base_reg);
2506 if (!slot)
2507 return;
2508 binding_cluster *cluster = *slot;
2509 cluster->clobber_region (mgr, reg);
2510 if (cluster->redundant_p ())
2512 delete cluster;
2513 m_cluster_map.remove (base_reg);
2517 /* Remove any bindings for REG within this store. */
2519 void
2520 store::purge_region (store_manager *mgr, const region *reg)
2522 const region *base_reg = reg->get_base_region ();
2523 binding_cluster **slot = m_cluster_map.get (base_reg);
2524 if (!slot)
2525 return;
2526 binding_cluster *cluster = *slot;
2527 cluster->purge_region (mgr, reg);
2528 if (cluster->redundant_p ())
2530 delete cluster;
2531 m_cluster_map.remove (base_reg);
2535 /* Fill REG with SVAL. */
2537 void
2538 store::fill_region (store_manager *mgr, const region *reg, const svalue *sval)
2540 const region *base_reg = reg->get_base_region ();
2541 if (base_reg->symbolic_for_unknown_ptr_p ())
2542 return;
2543 binding_cluster *cluster = get_or_create_cluster (base_reg);
2544 cluster->fill_region (mgr, reg, sval);
2547 /* Zero-fill REG. */
2549 void
2550 store::zero_fill_region (store_manager *mgr, const region *reg)
2552 region_model_manager *sval_mgr = mgr->get_svalue_manager ();
2553 const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
2554 fill_region (mgr, reg, zero_sval);
2557 /* Mark REG as having unknown content. */
2559 void
2560 store::mark_region_as_unknown (store_manager *mgr, const region *reg,
2561 uncertainty_t *uncertainty)
2563 const region *base_reg = reg->get_base_region ();
2564 if (base_reg->symbolic_for_unknown_ptr_p ())
2565 return;
2566 binding_cluster *cluster = get_or_create_cluster (base_reg);
2567 cluster->mark_region_as_unknown (mgr, reg, uncertainty);
2570 /* Purge state involving SVAL. */
2572 void
2573 store::purge_state_involving (const svalue *sval,
2574 region_model_manager *sval_mgr)
2576 auto_vec <const region *> base_regs_to_purge;
2577 for (auto iter : m_cluster_map)
2579 const region *base_reg = iter.first;
2580 if (base_reg->involves_p (sval))
2581 base_regs_to_purge.safe_push (base_reg);
2582 else
2584 binding_cluster *cluster = iter.second;
2585 cluster->purge_state_involving (sval, sval_mgr);
2589 for (auto iter : base_regs_to_purge)
2590 purge_cluster (iter);
2593 /* Get the cluster for BASE_REG, or NULL (const version). */
2595 const binding_cluster *
2596 store::get_cluster (const region *base_reg) const
2598 gcc_assert (base_reg);
2599 gcc_assert (base_reg->get_base_region () == base_reg);
2600 if (binding_cluster **slot
2601 = const_cast <cluster_map_t &> (m_cluster_map).get (base_reg))
2602 return *slot;
2603 else
2604 return NULL;
2607 /* Get the cluster for BASE_REG, or NULL (non-const version). */
2609 binding_cluster *
2610 store::get_cluster (const region *base_reg)
2612 gcc_assert (base_reg);
2613 gcc_assert (base_reg->get_base_region () == base_reg);
2614 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2615 return *slot;
2616 else
2617 return NULL;
2620 /* Get the cluster for BASE_REG, creating it if doesn't already exist. */
2622 binding_cluster *
2623 store::get_or_create_cluster (const region *base_reg)
2625 gcc_assert (base_reg);
2626 gcc_assert (base_reg->get_base_region () == base_reg);
2628 /* We shouldn't create clusters for dereferencing an UNKNOWN ptr. */
2629 gcc_assert (!base_reg->symbolic_for_unknown_ptr_p ());
2631 if (binding_cluster **slot = m_cluster_map.get (base_reg))
2632 return *slot;
2634 binding_cluster *cluster = new binding_cluster (base_reg);
2635 m_cluster_map.put (base_reg, cluster);
2637 return cluster;
2640 /* Remove any cluster for BASE_REG, for use by
2641 region_model::unbind_region_and_descendents
2642 when popping stack frames and handling deleted heap regions. */
2644 void
2645 store::purge_cluster (const region *base_reg)
2647 gcc_assert (base_reg->get_base_region () == base_reg);
2648 binding_cluster **slot = m_cluster_map.get (base_reg);
2649 if (!slot)
2650 return;
2651 binding_cluster *cluster = *slot;
2652 delete cluster;
2653 m_cluster_map.remove (base_reg);
2656 /* Attempt to merge STORE_A and STORE_B into OUT_STORE.
2657 Return true if successful, or false if the stores can't be merged. */
2659 bool
2660 store::can_merge_p (const store *store_a, const store *store_b,
2661 store *out_store, store_manager *mgr,
2662 model_merger *merger)
2664 if (store_a->m_called_unknown_fn || store_b->m_called_unknown_fn)
2665 out_store->m_called_unknown_fn = true;
2667 /* Get the union of all base regions for STORE_A and STORE_B. */
2668 hash_set<const region *> base_regions;
2669 for (cluster_map_t::iterator iter_a = store_a->m_cluster_map.begin ();
2670 iter_a != store_a->m_cluster_map.end (); ++iter_a)
2672 const region *base_reg_a = (*iter_a).first;
2673 base_regions.add (base_reg_a);
2675 for (cluster_map_t::iterator iter_b = store_b->m_cluster_map.begin ();
2676 iter_b != store_b->m_cluster_map.end (); ++iter_b)
2678 const region *base_reg_b = (*iter_b).first;
2679 base_regions.add (base_reg_b);
2682 /* Sort the base regions before considering them. This ought not to
2683 affect the results, but can affect which types UNKNOWN_REGIONs are
2684 created for in a run; sorting them thus avoids minor differences
2685 in logfiles. */
2686 auto_vec<const region *> vec_base_regions (base_regions.elements ());
2687 for (hash_set<const region *>::iterator iter = base_regions.begin ();
2688 iter != base_regions.end (); ++iter)
2689 vec_base_regions.quick_push (*iter);
2690 vec_base_regions.qsort (region::cmp_ptr_ptr);
2691 unsigned i;
2692 const region *base_reg;
2693 FOR_EACH_VEC_ELT (vec_base_regions, i, base_reg)
2695 const binding_cluster *cluster_a = store_a->get_cluster (base_reg);
2696 const binding_cluster *cluster_b = store_b->get_cluster (base_reg);
2697 /* At least one of cluster_a and cluster_b must be non-NULL. */
2698 binding_cluster *out_cluster
2699 = out_store->get_or_create_cluster (base_reg);
2700 if (!binding_cluster::can_merge_p (cluster_a, cluster_b,
2701 out_cluster, out_store, mgr, merger))
2702 return false;
2704 return true;
2707 /* Mark the cluster for BASE_REG as having escaped.
2708 For use when handling an unrecognized function call, and
2709 for params to "top-level" calls.
2710 Further unknown function calls could touch it, even if the cluster
2711 isn't reachable from args of those calls. */
2713 void
2714 store::mark_as_escaped (const region *base_reg)
2716 gcc_assert (base_reg);
2717 gcc_assert (base_reg->get_base_region () == base_reg);
2719 if (base_reg->symbolic_for_unknown_ptr_p ())
2720 return;
2722 binding_cluster *cluster = get_or_create_cluster (base_reg);
2723 cluster->mark_as_escaped ();
2726 /* Handle an unknown fncall by updating any clusters that have escaped
2727 (either in this fncall, or in a prior one). */
2729 void
2730 store::on_unknown_fncall (const gcall *call, store_manager *mgr)
2732 m_called_unknown_fn = true;
2734 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2735 iter != m_cluster_map.end (); ++iter)
2736 (*iter).second->on_unknown_fncall (call, mgr);
2739 /* Return true if a non-const pointer to BASE_REG (or something within it)
2740 has escaped to code outside of the TU being analyzed. */
2742 bool
2743 store::escaped_p (const region *base_reg) const
2745 gcc_assert (base_reg);
2746 gcc_assert (base_reg->get_base_region () == base_reg);
2748 if (binding_cluster **cluster_slot
2749 = const_cast <cluster_map_t &>(m_cluster_map).get (base_reg))
2750 return (*cluster_slot)->escaped_p ();
2751 return false;
2754 /* Populate OUT_PVS with a list of path_vars for describing SVAL based on
2755 this store, using VISITED to ensure the traversal terminates. */
2757 void
2758 store::get_representative_path_vars (const region_model *model,
2759 svalue_set *visited,
2760 const svalue *sval,
2761 auto_vec<path_var> *out_pvs) const
2763 gcc_assert (sval);
2765 /* Find all bindings that reference SVAL. */
2766 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2767 iter != m_cluster_map.end (); ++iter)
2769 const region *base_reg = (*iter).first;
2770 binding_cluster *cluster = (*iter).second;
2771 cluster->get_representative_path_vars (model, visited, base_reg, sval,
2772 out_pvs);
2775 if (const initial_svalue *init_sval = sval->dyn_cast_initial_svalue ())
2777 const region *reg = init_sval->get_region ();
2778 if (path_var pv = model->get_representative_path_var (reg,
2779 visited))
2780 out_pvs->safe_push (pv);
2784 /* Remove all bindings overlapping REG within this store, removing
2785 any clusters that become redundant. */
2787 void
2788 store::remove_overlapping_bindings (store_manager *mgr, const region *reg)
2790 const region *base_reg = reg->get_base_region ();
2791 if (binding_cluster **cluster_slot = m_cluster_map.get (base_reg))
2793 binding_cluster *cluster = *cluster_slot;
2794 if (reg == base_reg && !escaped_p (base_reg))
2796 /* Remove whole cluster. */
2797 m_cluster_map.remove (base_reg);
2798 delete cluster;
2799 return;
2801 cluster->remove_overlapping_bindings (mgr, reg, NULL);
2805 /* Subclass of visitor that accumulates a hash_set of the regions that
2806 were visited. */
2808 struct region_finder : public visitor
2810 void visit_region (const region *reg) FINAL OVERRIDE
2812 m_regs.add (reg);
2815 hash_set<const region *> m_regs;
2818 /* Canonicalize this store, to maximize the chance of equality between
2819 instances. */
2821 void
2822 store::canonicalize (store_manager *mgr)
2824 /* If we have e.g.:
2825 cluster for: HEAP_ALLOCATED_REGION(543)
2826 ESCAPED
2827 TOUCHED
2828 where the heap region is empty and unreferenced, then purge that
2829 cluster, to avoid unbounded state chains involving these. */
2831 /* Find regions that are referenced by bound values in the store. */
2832 region_finder s;
2833 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2834 iter != m_cluster_map.end (); ++iter)
2836 binding_cluster *cluster = (*iter).second;
2837 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
2838 bind_iter != cluster->m_map.end (); ++bind_iter)
2839 (*bind_iter).second->accept (&s);
2842 /* Locate heap-allocated regions that have empty bindings that weren't
2843 found above. */
2844 hash_set<const region *> purgeable_regions;
2845 for (cluster_map_t::iterator iter = m_cluster_map.begin ();
2846 iter != m_cluster_map.end (); ++iter)
2848 const region *base_reg = (*iter).first;
2849 binding_cluster *cluster = (*iter).second;
2850 if (base_reg->get_kind () == RK_HEAP_ALLOCATED)
2852 if (cluster->empty_p ())
2853 if (!s.m_regs.contains (base_reg))
2854 purgeable_regions.add (base_reg);
2856 /* Also cover the UNKNOWN case. */
2857 if (const svalue *sval = cluster->maybe_get_simple_value (mgr))
2858 if (sval->get_kind () == SK_UNKNOWN)
2859 if (!s.m_regs.contains (base_reg))
2860 purgeable_regions.add (base_reg);
2864 /* Purge them. */
2865 for (hash_set<const region *>::iterator iter = purgeable_regions.begin ();
2866 iter != purgeable_regions.end (); ++iter)
2868 const region *base_reg = *iter;
2869 purge_cluster (base_reg);
2873 /* Subroutine for use by exploded_path::feasible_p.
2875 We need to deal with state differences between:
2876 (a) when the exploded_graph is being initially constructed and
2877 (b) when replaying the state changes along a specific path in
2878 in exploded_path::feasible_p.
2880 In (a), state merging happens, so when exploring a loop
2881 for (i = 0; i < 1024; i++)
2882 on successive iterations we have i == 0, then i == WIDENING.
2884 In (b), no state merging happens, so naively replaying the path
2885 that goes twice through the loop then exits it
2886 would lead to i == 0, then i == 1, and then a (i >= 1024) eedge
2887 that exits the loop, which would be found to be infeasible as i == 1,
2888 and the path would be rejected.
2890 We need to fix up state during replay. This subroutine is
2891 called whenever we enter a supernode that we've already
2892 visited along this exploded_path, passing in OTHER_STORE
2893 from the destination enode's state.
2895 Find bindings to widening values in OTHER_STORE.
2896 For all that are found, update the binding in this store to UNKNOWN. */
2898 void
2899 store::loop_replay_fixup (const store *other_store,
2900 region_model_manager *mgr)
2902 gcc_assert (other_store);
2903 for (cluster_map_t::iterator iter = other_store->m_cluster_map.begin ();
2904 iter != other_store->m_cluster_map.end (); ++iter)
2906 const region *base_reg = (*iter).first;
2907 binding_cluster *cluster = (*iter).second;
2908 for (binding_cluster::iterator_t bind_iter = cluster->m_map.begin ();
2909 bind_iter != cluster->m_map.end (); ++bind_iter)
2911 const binding_key *key = (*bind_iter).first;
2912 const svalue *sval = (*bind_iter).second;
2913 if (sval->get_kind () == SK_WIDENING)
2915 binding_cluster *this_cluster
2916 = get_or_create_cluster (base_reg);
2917 const svalue *unknown
2918 = mgr->get_or_create_unknown_svalue (sval->get_type ());
2919 this_cluster->bind_key (key, unknown);
2925 #if CHECKING_P
2927 namespace selftest {
2929 /* Verify that bit_range::intersects_p works as expected. */
2931 static void
2932 test_bit_range_intersects_p ()
2934 bit_range b0 (0, 1);
2935 bit_range b1 (1, 1);
2936 bit_range b2 (2, 1);
2937 bit_range b3 (3, 1);
2938 bit_range b4 (4, 1);
2939 bit_range b5 (5, 1);
2940 bit_range b6 (6, 1);
2941 bit_range b7 (7, 1);
2942 bit_range b1_to_6 (1, 6);
2943 bit_range b0_to_7 (0, 8);
2944 bit_range b3_to_5 (3, 3);
2945 bit_range b6_to_7 (6, 2);
2947 /* self-intersection is true. */
2948 ASSERT_TRUE (b0.intersects_p (b0));
2949 ASSERT_TRUE (b7.intersects_p (b7));
2950 ASSERT_TRUE (b1_to_6.intersects_p (b1_to_6));
2951 ASSERT_TRUE (b0_to_7.intersects_p (b0_to_7));
2953 ASSERT_FALSE (b0.intersects_p (b1));
2954 ASSERT_FALSE (b1.intersects_p (b0));
2955 ASSERT_FALSE (b0.intersects_p (b7));
2956 ASSERT_FALSE (b7.intersects_p (b0));
2958 ASSERT_TRUE (b0_to_7.intersects_p (b0));
2959 ASSERT_TRUE (b0_to_7.intersects_p (b7));
2960 ASSERT_TRUE (b0.intersects_p (b0_to_7));
2961 ASSERT_TRUE (b7.intersects_p (b0_to_7));
2963 ASSERT_FALSE (b0.intersects_p (b1_to_6));
2964 ASSERT_FALSE (b1_to_6.intersects_p (b0));
2965 ASSERT_TRUE (b1.intersects_p (b1_to_6));
2966 ASSERT_TRUE (b1_to_6.intersects_p (b1));
2967 ASSERT_TRUE (b1_to_6.intersects_p (b6));
2968 ASSERT_FALSE (b1_to_6.intersects_p (b7));
2970 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7));
2971 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6));
2973 ASSERT_FALSE (b3_to_5.intersects_p (b6_to_7));
2974 ASSERT_FALSE (b6_to_7.intersects_p (b3_to_5));
2976 bit_range r1 (0,0);
2977 bit_range r2 (0,0);
2978 ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7, &r1, &r2));
2979 ASSERT_EQ (r1.get_start_bit_offset (), 0);
2980 ASSERT_EQ (r1.m_size_in_bits, 6);
2981 ASSERT_EQ (r2.get_start_bit_offset (), 1);
2982 ASSERT_EQ (r2.m_size_in_bits, 6);
2984 ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6, &r1, &r2));
2985 ASSERT_EQ (r1.get_start_bit_offset (), 1);
2986 ASSERT_EQ (r1.m_size_in_bits, 6);
2987 ASSERT_EQ (r2.get_start_bit_offset (), 0);
2988 ASSERT_EQ (r2.m_size_in_bits, 6);
2991 /* Implementation detail of ASSERT_BIT_RANGE_FROM_MASK_EQ. */
2993 static void
2994 assert_bit_range_from_mask_eq (const location &loc,
2995 unsigned HOST_WIDE_INT mask,
2996 const bit_range &expected)
2998 bit_range actual (0, 0);
2999 bool ok = bit_range::from_mask (mask, &actual);
3000 ASSERT_TRUE_AT (loc, ok);
3001 ASSERT_EQ_AT (loc, actual, expected);
3004 /* Assert that bit_range::from_mask (MASK) returns true, and writes
3005 out EXPECTED_BIT_RANGE. */
3007 #define ASSERT_BIT_RANGE_FROM_MASK_EQ(MASK, EXPECTED_BIT_RANGE) \
3008 SELFTEST_BEGIN_STMT \
3009 assert_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK, \
3010 EXPECTED_BIT_RANGE); \
3011 SELFTEST_END_STMT
3013 /* Implementation detail of ASSERT_NO_BIT_RANGE_FROM_MASK. */
3015 static void
3016 assert_no_bit_range_from_mask_eq (const location &loc,
3017 unsigned HOST_WIDE_INT mask)
3019 bit_range actual (0, 0);
3020 bool ok = bit_range::from_mask (mask, &actual);
3021 ASSERT_FALSE_AT (loc, ok);
3024 /* Assert that bit_range::from_mask (MASK) returns false. */
3026 #define ASSERT_NO_BIT_RANGE_FROM_MASK(MASK) \
3027 SELFTEST_BEGIN_STMT \
3028 assert_no_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK); \
3029 SELFTEST_END_STMT
3031 /* Verify that bit_range::from_mask works as expected. */
3033 static void
3034 test_bit_range_from_mask ()
3036 /* Should fail on zero. */
3037 ASSERT_NO_BIT_RANGE_FROM_MASK (0);
3039 /* Verify 1-bit masks. */
3040 ASSERT_BIT_RANGE_FROM_MASK_EQ (1, bit_range (0, 1));
3041 ASSERT_BIT_RANGE_FROM_MASK_EQ (2, bit_range (1, 1));
3042 ASSERT_BIT_RANGE_FROM_MASK_EQ (4, bit_range (2, 1));
3043 ASSERT_BIT_RANGE_FROM_MASK_EQ (8, bit_range (3, 1));
3044 ASSERT_BIT_RANGE_FROM_MASK_EQ (16, bit_range (4, 1));
3045 ASSERT_BIT_RANGE_FROM_MASK_EQ (32, bit_range (5, 1));
3046 ASSERT_BIT_RANGE_FROM_MASK_EQ (64, bit_range (6, 1));
3047 ASSERT_BIT_RANGE_FROM_MASK_EQ (128, bit_range (7, 1));
3049 /* Verify N-bit masks starting at bit 0. */
3050 ASSERT_BIT_RANGE_FROM_MASK_EQ (3, bit_range (0, 2));
3051 ASSERT_BIT_RANGE_FROM_MASK_EQ (7, bit_range (0, 3));
3052 ASSERT_BIT_RANGE_FROM_MASK_EQ (15, bit_range (0, 4));
3053 ASSERT_BIT_RANGE_FROM_MASK_EQ (31, bit_range (0, 5));
3054 ASSERT_BIT_RANGE_FROM_MASK_EQ (63, bit_range (0, 6));
3055 ASSERT_BIT_RANGE_FROM_MASK_EQ (127, bit_range (0, 7));
3056 ASSERT_BIT_RANGE_FROM_MASK_EQ (255, bit_range (0, 8));
3057 ASSERT_BIT_RANGE_FROM_MASK_EQ (0xffff, bit_range (0, 16));
3059 /* Various other tests. */
3060 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x30, bit_range (4, 2));
3061 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x700, bit_range (8, 3));
3062 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x600, bit_range (9, 2));
3064 /* Multiple ranges of set bits should fail. */
3065 ASSERT_NO_BIT_RANGE_FROM_MASK (0x101);
3066 ASSERT_NO_BIT_RANGE_FROM_MASK (0xf0f0f0f0);
3069 /* Implementation detail of ASSERT_OVERLAP. */
3071 static void
3072 assert_overlap (const location &loc,
3073 const concrete_binding *b1,
3074 const concrete_binding *b2)
3076 ASSERT_TRUE_AT (loc, b1->overlaps_p (*b2));
3077 ASSERT_TRUE_AT (loc, b2->overlaps_p (*b1));
3080 /* Implementation detail of ASSERT_DISJOINT. */
3082 static void
3083 assert_disjoint (const location &loc,
3084 const concrete_binding *b1,
3085 const concrete_binding *b2)
3087 ASSERT_FALSE_AT (loc, b1->overlaps_p (*b2));
3088 ASSERT_FALSE_AT (loc, b2->overlaps_p (*b1));
3091 /* Assert that B1 and B2 overlap, checking both ways. */
3093 #define ASSERT_OVERLAP(B1, B2) \
3094 SELFTEST_BEGIN_STMT \
3095 assert_overlap (SELFTEST_LOCATION, B1, B2); \
3096 SELFTEST_END_STMT
3098 /* Assert that B1 and B2 do not overlap, checking both ways. */
3100 #define ASSERT_DISJOINT(B1, B2) \
3101 SELFTEST_BEGIN_STMT \
3102 assert_disjoint (SELFTEST_LOCATION, B1, B2); \
3103 SELFTEST_END_STMT
3105 /* Verify that concrete_binding::overlaps_p works as expected. */
3107 static void
3108 test_binding_key_overlap ()
3110 store_manager mgr (NULL);
3112 /* Various 8-bit bindings. */
3113 const concrete_binding *cb_0_7 = mgr.get_concrete_binding (0, 8);
3114 const concrete_binding *cb_8_15 = mgr.get_concrete_binding (8, 8);
3115 const concrete_binding *cb_16_23 = mgr.get_concrete_binding (16, 8);
3116 const concrete_binding *cb_24_31 = mgr.get_concrete_binding (24, 8);
3118 /* 16-bit bindings. */
3119 const concrete_binding *cb_0_15 = mgr.get_concrete_binding (0, 16);
3120 const concrete_binding *cb_8_23 = mgr.get_concrete_binding (8, 16);
3121 const concrete_binding *cb_16_31 = mgr.get_concrete_binding (16, 16);
3123 /* 32-bit binding. */
3124 const concrete_binding *cb_0_31 = mgr.get_concrete_binding (0, 32);
3126 /* Everything should self-overlap. */
3127 ASSERT_OVERLAP (cb_0_7, cb_0_7);
3128 ASSERT_OVERLAP (cb_8_15, cb_8_15);
3129 ASSERT_OVERLAP (cb_16_23, cb_16_23);
3130 ASSERT_OVERLAP (cb_24_31, cb_24_31);
3131 ASSERT_OVERLAP (cb_0_15, cb_0_15);
3132 ASSERT_OVERLAP (cb_8_23, cb_8_23);
3133 ASSERT_OVERLAP (cb_16_31, cb_16_31);
3134 ASSERT_OVERLAP (cb_0_31, cb_0_31);
3136 /* Verify the 8-bit bindings that don't overlap each other. */
3137 ASSERT_DISJOINT (cb_0_7, cb_8_15);
3138 ASSERT_DISJOINT (cb_8_15, cb_16_23);
3140 /* Check for overlap of differently-sized bindings. */
3141 ASSERT_OVERLAP (cb_0_7, cb_0_31);
3142 /* ...and with differing start points. */
3143 ASSERT_OVERLAP (cb_8_15, cb_0_31);
3144 ASSERT_DISJOINT (cb_8_15, cb_16_31);
3145 ASSERT_OVERLAP (cb_16_23, cb_0_31);
3146 ASSERT_OVERLAP (cb_16_31, cb_0_31);
3148 ASSERT_DISJOINT (cb_0_7, cb_8_23);
3149 ASSERT_OVERLAP (cb_8_23, cb_16_23);
3150 ASSERT_OVERLAP (cb_8_23, cb_16_31);
3151 ASSERT_DISJOINT (cb_8_23, cb_24_31);
3154 /* Run all of the selftests within this file. */
3156 void
3157 analyzer_store_cc_tests ()
3159 test_bit_range_intersects_p ();
3160 test_bit_range_from_mask ();
3161 test_binding_key_overlap ();
3164 } // namespace selftest
3166 #endif /* CHECKING_P */
3168 } // namespace ana
3170 #endif /* #if ENABLE_ANALYZER */