1 /* Classes for modeling the state of memory.
2 Copyright (C) 2020-2024 Free Software Foundation, Inc.
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
22 #define INCLUDE_MEMORY
24 #include "coretypes.h"
27 #include "basic-block.h"
29 #include "gimple-iterator.h"
30 #include "diagnostic-core.h"
35 #include "stringpool.h"
38 #include "fold-const.h"
39 #include "tree-pretty-print.h"
40 #include "diagnostic-color.h"
43 #include "analyzer/analyzer.h"
44 #include "analyzer/analyzer-logging.h"
45 #include "ordered-hash-map.h"
48 #include "analyzer/supergraph.h"
50 #include "analyzer/call-string.h"
51 #include "analyzer/program-point.h"
52 #include "analyzer/store.h"
53 #include "analyzer/region-model.h"
54 #include "analyzer/call-summary.h"
55 #include "analyzer/analyzer-selftests.h"
56 #include "stor-layout.h"
62 /* Dump SVALS to PP, sorting them to ensure determinism. */
65 dump_svalue_set (const hash_set
<const svalue
*> &svals
,
66 pretty_printer
*pp
, bool simple
)
68 auto_vec
<const svalue
*> v
;
69 for (hash_set
<const svalue
*>::iterator iter
= svals
.begin ();
70 iter
!= svals
.end (); ++iter
)
74 v
.qsort (svalue::cmp_ptr_ptr
);
76 pp_character (pp
, '{');
79 FOR_EACH_VEC_ELT (v
, i
, sval
)
83 sval
->dump_to_pp (pp
, simple
);
85 pp_character (pp
, '}');
88 /* class uncertainty_t. */
90 /* Dump this object to PP. */
93 uncertainty_t::dump_to_pp (pretty_printer
*pp
, bool simple
) const
95 pp_string (pp
, "{m_maybe_bound_svals: ");
96 dump_svalue_set (m_maybe_bound_svals
, pp
, simple
);
98 pp_string (pp
, ", m_mutable_at_unknown_call_svals: ");
99 dump_svalue_set (m_mutable_at_unknown_call_svals
, pp
, simple
);
103 /* Dump this object to stderr. */
106 uncertainty_t::dump (bool simple
) const
109 pp_format_decoder (&pp
) = default_tree_printer
;
110 pp_show_color (&pp
) = pp_show_color (global_dc
->printer
);
111 pp
.buffer
->stream
= stderr
;
112 dump_to_pp (&pp
, simple
);
117 /* class binding_key. */
120 binding_key::make (store_manager
*mgr
, const region
*r
)
122 region_offset offset
= r
->get_offset (mgr
->get_svalue_manager ());
123 if (offset
.symbolic_p ())
124 return mgr
->get_symbolic_binding (r
);
128 if (r
->get_bit_size (&bit_size
))
130 /* Must be non-empty. */
131 gcc_assert (bit_size
> 0);
132 return mgr
->get_concrete_binding (offset
.get_bit_offset (),
136 return mgr
->get_symbolic_binding (r
);
140 /* Dump this binding_key to stderr. */
143 binding_key::dump (bool simple
) const
146 pp_format_decoder (&pp
) = default_tree_printer
;
147 pp_show_color (&pp
) = pp_show_color (global_dc
->printer
);
148 pp
.buffer
->stream
= stderr
;
149 dump_to_pp (&pp
, simple
);
154 /* Get a description of this binding_key. */
157 binding_key::get_desc (bool simple
) const
160 pp_format_decoder (&pp
) = default_tree_printer
;
161 dump_to_pp (&pp
, simple
);
162 return label_text::take (xstrdup (pp_formatted_text (&pp
)));
165 /* qsort callback. */
168 binding_key::cmp_ptrs (const void *p1
, const void *p2
)
170 const binding_key
* const *pk1
= (const binding_key
* const *)p1
;
171 const binding_key
* const *pk2
= (const binding_key
* const *)p2
;
172 return cmp (*pk1
, *pk2
);
175 /* Comparator for binding_keys. */
178 binding_key::cmp (const binding_key
*k1
, const binding_key
*k2
)
180 int concrete1
= k1
->concrete_p ();
181 int concrete2
= k2
->concrete_p ();
182 if (int concrete_cmp
= concrete1
- concrete2
)
186 const concrete_binding
*b1
= (const concrete_binding
*)k1
;
187 const concrete_binding
*b2
= (const concrete_binding
*)k2
;
188 if (int start_cmp
= wi::cmp (b1
->get_start_bit_offset (),
189 b2
->get_start_bit_offset (),
192 return wi::cmp (b1
->get_next_bit_offset (), b2
->get_next_bit_offset (),
197 const symbolic_binding
*s1
= (const symbolic_binding
*)k1
;
198 const symbolic_binding
*s2
= (const symbolic_binding
*)k2
;
207 /* struct bit_range. */
210 bit_range::dump_to_pp (pretty_printer
*pp
) const
212 byte_range
bytes (0, 0);
213 if (as_byte_range (&bytes
))
214 bytes
.dump_to_pp (pp
);
217 pp_string (pp
, "start: ");
218 pp_wide_int (pp
, m_start_bit_offset
, SIGNED
);
219 pp_string (pp
, ", size: ");
220 pp_wide_int (pp
, m_size_in_bits
, SIGNED
);
221 pp_string (pp
, ", next: ");
222 pp_wide_int (pp
, get_next_bit_offset (), SIGNED
);
226 /* Dump this object to stderr. */
229 bit_range::dump () const
232 pp
.buffer
->stream
= stderr
;
238 /* Generate a JSON value for this bit_range.
239 This is intended for debugging the analyzer rather
240 than serialization. */
243 bit_range::to_json () const
245 json::object
*obj
= new json::object ();
246 obj
->set ("start_bit_offset",
247 bit_offset_to_json (m_start_bit_offset
));
248 obj
->set ("size_in_bits",
249 bit_offset_to_json (m_size_in_bits
));
253 /* If OTHER is a subset of this, return true and, if OUT is
254 non-null, write to *OUT the relative range of OTHER within this.
255 Otherwise return false. */
258 bit_range::contains_p (const bit_range
&other
, bit_range
*out
) const
260 if (contains_p (other
.get_start_bit_offset ())
261 && contains_p (other
.get_last_bit_offset ()))
265 out
->m_start_bit_offset
= other
.m_start_bit_offset
- m_start_bit_offset
;
266 out
->m_size_in_bits
= other
.m_size_in_bits
;
274 /* If OTHER intersects this, return true and write
275 the relative range of OTHER within THIS to *OUT_THIS,
276 and the relative range of THIS within OTHER to *OUT_OTHER.
277 Otherwise return false. */
280 bit_range::intersects_p (const bit_range
&other
,
282 bit_range
*out_other
) const
284 if (get_start_bit_offset () < other
.get_next_bit_offset ()
285 && other
.get_start_bit_offset () < get_next_bit_offset ())
287 bit_offset_t overlap_start
288 = MAX (get_start_bit_offset (),
289 other
.get_start_bit_offset ());
290 bit_offset_t overlap_next
291 = MIN (get_next_bit_offset (),
292 other
.get_next_bit_offset ());
293 gcc_assert (overlap_next
> overlap_start
);
294 bit_range
abs_overlap_bits (overlap_start
, overlap_next
- overlap_start
);
295 *out_this
= abs_overlap_bits
- get_start_bit_offset ();
296 *out_other
= abs_overlap_bits
- other
.get_start_bit_offset ();
303 /* Return true if THIS and OTHER intersect and write the number
304 of bits both buffers overlap to *OUT_NUM_OVERLAP_BITS.
306 Otherwise return false. */
309 bit_range::intersects_p (const bit_range
&other
,
310 bit_size_t
*out_num_overlap_bits
) const
312 if (get_start_bit_offset () < other
.get_next_bit_offset ()
313 && other
.get_start_bit_offset () < get_next_bit_offset ())
315 bit_offset_t overlap_start
= MAX (get_start_bit_offset (),
316 other
.get_start_bit_offset ());
317 bit_offset_t overlap_next
= MIN (get_next_bit_offset (),
318 other
.get_next_bit_offset ());
319 gcc_assert (overlap_next
> overlap_start
);
320 *out_num_overlap_bits
= overlap_next
- overlap_start
;
327 /* Return true if THIS exceeds OTHER and write the overhanging
328 bit range to OUT_OVERHANGING_BIT_RANGE. */
331 bit_range::exceeds_p (const bit_range
&other
,
332 bit_range
*out_overhanging_bit_range
) const
334 gcc_assert (!empty_p ());
336 if (other
.get_next_bit_offset () < get_next_bit_offset ())
338 /* THIS definitely exceeds OTHER. */
339 bit_offset_t start
= MAX (get_start_bit_offset (),
340 other
.get_next_bit_offset ());
341 bit_offset_t size
= get_next_bit_offset () - start
;
342 gcc_assert (size
> 0);
343 out_overhanging_bit_range
->m_start_bit_offset
= start
;
344 out_overhanging_bit_range
->m_size_in_bits
= size
;
351 /* Return true if THIS falls short of OFFSET and write the
352 bit range fallen short to OUT_FALL_SHORT_BITS. */
355 bit_range::falls_short_of_p (bit_offset_t offset
,
356 bit_range
*out_fall_short_bits
) const
358 gcc_assert (!empty_p ());
360 if (get_start_bit_offset () < offset
)
362 /* THIS falls short of OFFSET. */
363 bit_offset_t start
= get_start_bit_offset ();
364 bit_offset_t size
= MIN (offset
, get_next_bit_offset ()) - start
;
365 gcc_assert (size
> 0);
366 out_fall_short_bits
->m_start_bit_offset
= start
;
367 out_fall_short_bits
->m_size_in_bits
= size
;
375 bit_range::cmp (const bit_range
&br1
, const bit_range
&br2
)
377 if (int start_cmp
= wi::cmps (br1
.m_start_bit_offset
,
378 br2
.m_start_bit_offset
))
381 return wi::cmpu (br1
.m_size_in_bits
, br2
.m_size_in_bits
);
384 /* Offset this range by OFFSET. */
387 bit_range::operator- (bit_offset_t offset
) const
389 return bit_range (m_start_bit_offset
- offset
, m_size_in_bits
);
392 /* If MASK is a contiguous range of set bits, write them
393 to *OUT and return true.
394 Otherwise return false. */
397 bit_range::from_mask (unsigned HOST_WIDE_INT mask
, bit_range
*out
)
399 unsigned iter_bit_idx
= 0;
400 unsigned HOST_WIDE_INT iter_bit_mask
= 1;
402 /* Find the first contiguous run of set bits in MASK. */
404 /* Find first set bit in MASK. */
405 while (iter_bit_idx
< HOST_BITS_PER_WIDE_INT
)
407 if (mask
& iter_bit_mask
)
412 if (iter_bit_idx
== HOST_BITS_PER_WIDE_INT
)
416 unsigned first_set_iter_bit_idx
= iter_bit_idx
;
417 unsigned num_set_bits
= 1;
421 /* Find next unset bit in MASK. */
422 while (iter_bit_idx
< HOST_BITS_PER_WIDE_INT
)
424 if (!(mask
& iter_bit_mask
))
430 if (iter_bit_idx
== HOST_BITS_PER_WIDE_INT
)
432 *out
= bit_range (first_set_iter_bit_idx
, num_set_bits
);
436 /* We now have the first contiguous run of set bits in MASK.
437 Fail if any other bits are set. */
438 while (iter_bit_idx
< HOST_BITS_PER_WIDE_INT
)
440 if (mask
& iter_bit_mask
)
446 *out
= bit_range (first_set_iter_bit_idx
, num_set_bits
);
450 /* Attempt to convert this bit_range to a byte_range.
451 Return true if it is possible, writing the result to *OUT.
452 Otherwise return false. */
455 bit_range::as_byte_range (byte_range
*out
) const
457 if (m_start_bit_offset
% BITS_PER_UNIT
== 0
458 && m_size_in_bits
% BITS_PER_UNIT
== 0)
460 out
->m_start_byte_offset
= m_start_bit_offset
/ BITS_PER_UNIT
;
461 out
->m_size_in_bytes
= m_size_in_bits
/ BITS_PER_UNIT
;
467 /* Dump this object to PP. */
470 byte_range::dump_to_pp (pretty_printer
*pp
) const
472 if (m_size_in_bytes
== 0)
474 pp_string (pp
, "empty");
476 else if (m_size_in_bytes
== 1)
478 pp_string (pp
, "byte ");
479 pp_wide_int (pp
, m_start_byte_offset
, SIGNED
);
483 pp_string (pp
, "bytes ");
484 pp_wide_int (pp
, m_start_byte_offset
, SIGNED
);
486 pp_wide_int (pp
, get_last_byte_offset (), SIGNED
);
490 /* Dump this object to stderr. */
493 byte_range::dump () const
496 pp
.buffer
->stream
= stderr
;
502 /* Generate a JSON value for this byte_range.
503 This is intended for debugging the analyzer rather
504 than serialization. */
507 byte_range::to_json () const
509 json::object
*obj
= new json::object ();
510 obj
->set ("start_byte_offset",
511 byte_offset_to_json (m_start_byte_offset
));
512 obj
->set ("size_in_bytes",
513 byte_offset_to_json (m_size_in_bytes
));
517 /* If OTHER is a subset of this, return true and write
518 to *OUT the relative range of OTHER within this.
519 Otherwise return false. */
522 byte_range::contains_p (const byte_range
&other
, byte_range
*out
) const
524 if (contains_p (other
.get_start_byte_offset ())
525 && contains_p (other
.get_last_byte_offset ()))
527 out
->m_start_byte_offset
= other
.m_start_byte_offset
- m_start_byte_offset
;
528 out
->m_size_in_bytes
= other
.m_size_in_bytes
;
535 /* qsort comparator for byte ranges. */
538 byte_range::cmp (const byte_range
&br1
, const byte_range
&br2
)
540 /* Order first by offset. */
541 if (int start_cmp
= wi::cmps (br1
.m_start_byte_offset
,
542 br2
.m_start_byte_offset
))
545 /* ...then by size. */
546 return wi::cmpu (br1
.m_size_in_bytes
, br2
.m_size_in_bytes
);
549 /* class concrete_binding : public binding_key. */
551 /* Implementation of binding_key::dump_to_pp vfunc for concrete_binding. */
554 concrete_binding::dump_to_pp (pretty_printer
*pp
, bool) const
556 m_bit_range
.dump_to_pp (pp
);
559 /* Return true if this binding overlaps with OTHER. */
562 concrete_binding::overlaps_p (const concrete_binding
&other
) const
564 if (get_start_bit_offset () < other
.get_next_bit_offset ()
565 && get_next_bit_offset () > other
.get_start_bit_offset ())
570 /* If this is expressible as a concrete byte range, return true
571 and write it to *OUT. Otherwise return false. */
574 concrete_binding::get_byte_range (byte_range
*out
) const
576 return m_bit_range
.as_byte_range (out
);
579 /* Comparator for use by vec<const concrete_binding *>::qsort. */
582 concrete_binding::cmp_ptr_ptr (const void *p1
, const void *p2
)
584 const concrete_binding
*b1
= *(const concrete_binding
* const *)p1
;
585 const concrete_binding
*b2
= *(const concrete_binding
* const *)p2
;
587 return bit_range::cmp (b1
->m_bit_range
, b2
->m_bit_range
);
590 /* class symbolic_binding : public binding_key. */
593 symbolic_binding::dump_to_pp (pretty_printer
*pp
, bool simple
) const
595 //binding_key::dump_to_pp (pp, simple);
596 pp_string (pp
, "region: ");
597 m_region
->dump_to_pp (pp
, simple
);
600 /* Comparator for use by vec<const symbolic_binding *>::qsort. */
603 symbolic_binding::cmp_ptr_ptr (const void *p1
, const void *p2
)
605 const symbolic_binding
*b1
= *(const symbolic_binding
* const *)p1
;
606 const symbolic_binding
*b2
= *(const symbolic_binding
* const *)p2
;
608 return region::cmp_ids (b1
->get_region (), b2
->get_region ());
611 /* The store is oblivious to the types of the svalues bound within
612 it: any type can get bound at any location.
613 Simplify any casts before binding.
615 For example, if we have:
616 struct big { int ia[1024]; };
618 memcpy (&dst, &src, sizeof (struct big));
619 this reaches us in gimple form as:
620 MEM <unsigned char[4096]> [(char * {ref-all})&dst]
621 = MEM <unsigned char[4096]> [(char * {ref-all})&src];
622 Using cast_region when handling the MEM_REF would give us:
623 INIT_VAL(CAST_REG(unsigned char[4096], src))
624 as rhs_sval, but we can fold that into a cast svalue:
625 CAST(unsigned char[4096], INIT_VAL(src))
626 We can discard that cast from the svalue when binding it in
627 the store for "dst", and simply store:
629 key: {kind: direct, start: 0, size: 32768, next: 32768}
630 value: ‘struct big’ {INIT_VAL(src)}. */
632 static const svalue
*
633 simplify_for_binding (const svalue
*sval
)
635 if (const svalue
*cast_sval
= sval
->maybe_undo_cast ())
640 /* class binding_map. */
642 /* binding_map's copy ctor. */
644 binding_map::binding_map (const binding_map
&other
)
645 : m_map (other
.m_map
)
649 /* binding_map's assignment operator. */
652 binding_map::operator=(const binding_map
&other
)
654 /* For now, assume we only ever copy to an empty cluster. */
655 gcc_assert (m_map
.elements () == 0);
656 for (map_t::iterator iter
= other
.m_map
.begin (); iter
!= other
.m_map
.end ();
659 const binding_key
*key
= (*iter
).first
;
660 const svalue
*sval
= (*iter
).second
;
661 m_map
.put (key
, sval
);
666 /* binding_map's equality operator. */
669 binding_map::operator== (const binding_map
&other
) const
671 if (m_map
.elements () != other
.m_map
.elements ())
674 for (map_t::iterator iter
= m_map
.begin (); iter
!= m_map
.end (); ++iter
)
676 const binding_key
*key
= (*iter
).first
;
677 const svalue
*sval
= (*iter
).second
;
678 const svalue
**other_slot
679 = const_cast <map_t
&> (other
.m_map
).get (key
);
680 if (other_slot
== NULL
)
682 if (sval
!= *other_slot
)
685 gcc_checking_assert (hash () == other
.hash ());
689 /* Generate a hash value for this binding_map. */
692 binding_map::hash () const
694 hashval_t result
= 0;
695 for (map_t::iterator iter
= m_map
.begin (); iter
!= m_map
.end (); ++iter
)
697 /* Use a new hasher for each key to avoid depending on the ordering
698 of keys when accumulating the result. */
699 inchash::hash hstate
;
700 hstate
.add_ptr ((*iter
).first
);
701 hstate
.add_ptr ((*iter
).second
);
702 result
^= hstate
.end ();
707 /* Dump a representation of this binding_map to PP.
708 SIMPLE controls how values and regions are to be printed.
709 If MULTILINE, then split the dump over multiple lines and
710 use whitespace for readability, otherwise put all on one line. */
713 binding_map::dump_to_pp (pretty_printer
*pp
, bool simple
,
714 bool multiline
) const
716 auto_vec
<const binding_key
*> binding_keys
;
717 for (map_t::iterator iter
= m_map
.begin ();
718 iter
!= m_map
.end (); ++iter
)
720 const binding_key
*key
= (*iter
).first
;
721 binding_keys
.safe_push (key
);
723 binding_keys
.qsort (binding_key::cmp_ptrs
);
725 const binding_key
*key
;
727 FOR_EACH_VEC_ELT (binding_keys
, i
, key
)
729 const svalue
*value
= *const_cast <map_t
&> (m_map
).get (key
);
732 pp_string (pp
, " key: {");
733 key
->dump_to_pp (pp
, simple
);
736 pp_string (pp
, " value: ");
737 if (tree t
= value
->get_type ())
738 dump_quoted_tree (pp
, t
);
739 pp_string (pp
, " {");
740 value
->dump_to_pp (pp
, simple
);
747 pp_string (pp
, ", ");
748 pp_string (pp
, "binding key: {");
749 key
->dump_to_pp (pp
, simple
);
750 pp_string (pp
, "}, value: {");
751 value
->dump_to_pp (pp
, simple
);
757 /* Dump a multiline representation of this binding_map to stderr. */
760 binding_map::dump (bool simple
) const
763 pp_format_decoder (&pp
) = default_tree_printer
;
764 pp_show_color (&pp
) = pp_show_color (global_dc
->printer
);
765 pp
.buffer
->stream
= stderr
;
766 dump_to_pp (&pp
, simple
, true);
771 /* Return a new json::object of the form
772 {KEY_DESC : SVALUE_DESC,
773 ...for the various key/value pairs in this binding_map}. */
776 binding_map::to_json () const
778 json::object
*map_obj
= new json::object ();
780 auto_vec
<const binding_key
*> binding_keys
;
781 for (map_t::iterator iter
= m_map
.begin ();
782 iter
!= m_map
.end (); ++iter
)
784 const binding_key
*key
= (*iter
).first
;
785 binding_keys
.safe_push (key
);
787 binding_keys
.qsort (binding_key::cmp_ptrs
);
789 const binding_key
*key
;
791 FOR_EACH_VEC_ELT (binding_keys
, i
, key
)
793 const svalue
*value
= *const_cast <map_t
&> (m_map
).get (key
);
794 label_text key_desc
= key
->get_desc ();
795 map_obj
->set (key_desc
.get (), value
->to_json ());
801 /* Comparator for imposing an order on binding_maps. */
804 binding_map::cmp (const binding_map
&map1
, const binding_map
&map2
)
806 if (int count_cmp
= map1
.elements () - map2
.elements ())
809 auto_vec
<const binding_key
*> keys1 (map1
.elements ());
810 for (map_t::iterator iter
= map1
.begin ();
811 iter
!= map1
.end (); ++iter
)
812 keys1
.quick_push ((*iter
).first
);
813 keys1
.qsort (binding_key::cmp_ptrs
);
815 auto_vec
<const binding_key
*> keys2 (map2
.elements ());
816 for (map_t::iterator iter
= map2
.begin ();
817 iter
!= map2
.end (); ++iter
)
818 keys2
.quick_push ((*iter
).first
);
819 keys2
.qsort (binding_key::cmp_ptrs
);
821 for (size_t i
= 0; i
< keys1
.length (); i
++)
823 const binding_key
*k1
= keys1
[i
];
824 const binding_key
*k2
= keys2
[i
];
825 if (int key_cmp
= binding_key::cmp (k1
, k2
))
827 gcc_assert (k1
== k2
);
828 if (int sval_cmp
= svalue::cmp_ptr (map1
.get (k1
), map2
.get (k2
)))
835 /* Get the child region of PARENT_REG based upon INDEX within a
838 static const region
*
839 get_subregion_within_ctor (const region
*parent_reg
, tree index
,
840 region_model_manager
*mgr
)
842 switch (TREE_CODE (index
))
848 const svalue
*index_sval
849 = mgr
->get_or_create_constant_svalue (index
);
850 return mgr
->get_element_region (parent_reg
,
851 TREE_TYPE (parent_reg
->get_type ()),
856 return mgr
->get_field_region (parent_reg
, index
);
860 /* Get the svalue for VAL, a non-CONSTRUCTOR value within a CONSTRUCTOR. */
862 static const svalue
*
863 get_svalue_for_ctor_val (tree val
, region_model_manager
*mgr
)
865 /* Reuse the get_rvalue logic from region_model. */
866 region_model
m (mgr
);
867 return m
.get_rvalue (path_var (val
, 0), NULL
);
870 /* Bind values from CONSTRUCTOR to this map, relative to
871 PARENT_REG's relationship to its base region.
872 Return true if successful, false if there was a problem (e.g. due
873 to hitting a complexity limit). */
876 binding_map::apply_ctor_to_region (const region
*parent_reg
, tree ctor
,
877 region_model_manager
*mgr
)
879 gcc_assert (parent_reg
);
880 gcc_assert (TREE_CODE (ctor
) == CONSTRUCTOR
);
885 tree parent_type
= parent_reg
->get_type ();
887 if (TREE_CODE (parent_type
) == RECORD_TYPE
)
888 field
= TYPE_FIELDS (parent_type
);
891 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor
), ix
, index
, val
)
895 /* If index is NULL, then iterate through the fields for
896 a RECORD_TYPE, or use an INTEGER_CST otherwise.
897 Compare with similar logic in output_constructor. */
901 field
= DECL_CHAIN (field
);
904 index
= build_int_cst (integer_type_node
, ix
);
906 else if (TREE_CODE (index
) == RANGE_EXPR
)
908 tree min_index
= TREE_OPERAND (index
, 0);
909 tree max_index
= TREE_OPERAND (index
, 1);
910 if (min_index
== max_index
)
912 if (!apply_ctor_pair_to_child_region (parent_reg
, mgr
,
918 if (!apply_ctor_val_to_range (parent_reg
, mgr
,
919 min_index
, max_index
, val
))
924 if (!apply_ctor_pair_to_child_region (parent_reg
, mgr
, index
, val
))
930 /* Bind the value VAL into the range of elements within PARENT_REF
931 from MIN_INDEX to MAX_INDEX (including endpoints).
932 For use in handling RANGE_EXPR within a CONSTRUCTOR.
933 Return true if successful, false if there was a problem (e.g. due
934 to hitting a complexity limit). */
937 binding_map::apply_ctor_val_to_range (const region
*parent_reg
,
938 region_model_manager
*mgr
,
939 tree min_index
, tree max_index
,
942 gcc_assert (TREE_CODE (min_index
) == INTEGER_CST
);
943 gcc_assert (TREE_CODE (max_index
) == INTEGER_CST
);
945 /* Generate a binding key for the range. */
946 const region
*min_element
947 = get_subregion_within_ctor (parent_reg
, min_index
, mgr
);
948 const region
*max_element
949 = get_subregion_within_ctor (parent_reg
, max_index
, mgr
);
950 region_offset min_offset
= min_element
->get_offset (mgr
);
951 if (min_offset
.symbolic_p ())
953 bit_offset_t start_bit_offset
= min_offset
.get_bit_offset ();
954 store_manager
*smgr
= mgr
->get_store_manager ();
955 if (max_element
->empty_p ())
957 const binding_key
*max_element_key
= binding_key::make (smgr
, max_element
);
958 if (max_element_key
->symbolic_p ())
960 const concrete_binding
*max_element_ckey
961 = max_element_key
->dyn_cast_concrete_binding ();
962 bit_size_t range_size_in_bits
963 = max_element_ckey
->get_next_bit_offset () - start_bit_offset
;
964 const concrete_binding
*range_key
965 = smgr
->get_concrete_binding (start_bit_offset
, range_size_in_bits
);
966 if (range_key
->symbolic_p ())
970 if (TREE_CODE (val
) == CONSTRUCTOR
)
972 const svalue
*sval
= get_svalue_for_ctor_val (val
, mgr
);
974 /* Bind the value to the range. */
975 put (range_key
, sval
);
979 /* Bind the value VAL into INDEX within PARENT_REF.
980 For use in handling a pair of entries within a CONSTRUCTOR.
981 Return true if successful, false if there was a problem (e.g. due
982 to hitting a complexity limit). */
985 binding_map::apply_ctor_pair_to_child_region (const region
*parent_reg
,
986 region_model_manager
*mgr
,
987 tree index
, tree val
)
989 const region
*child_reg
990 = get_subregion_within_ctor (parent_reg
, index
, mgr
);
991 if (TREE_CODE (val
) == CONSTRUCTOR
)
992 return apply_ctor_to_region (child_reg
, val
, mgr
);
995 const svalue
*sval
= get_svalue_for_ctor_val (val
, mgr
);
996 if (child_reg
->empty_p ())
999 = binding_key::make (mgr
->get_store_manager (), child_reg
);
1000 /* Handle the case where we have an unknown size for child_reg
1001 (e.g. due to it being a trailing field with incomplete array
1003 if (!k
->concrete_p ())
1005 /* Assume that sval has a well-defined size for this case. */
1006 tree sval_type
= sval
->get_type ();
1007 gcc_assert (sval_type
);
1008 HOST_WIDE_INT sval_byte_size
= int_size_in_bytes (sval_type
);
1009 gcc_assert (sval_byte_size
!= -1);
1010 bit_size_t sval_bit_size
= sval_byte_size
* BITS_PER_UNIT
;
1011 /* Get offset of child relative to base region. */
1012 region_offset child_base_offset
= child_reg
->get_offset (mgr
);
1013 if (child_base_offset
.symbolic_p ())
1015 /* Convert to an offset relative to the parent region. */
1016 region_offset parent_base_offset
= parent_reg
->get_offset (mgr
);
1017 gcc_assert (!parent_base_offset
.symbolic_p ());
1018 bit_offset_t child_parent_offset
1019 = (child_base_offset
.get_bit_offset ()
1020 - parent_base_offset
.get_bit_offset ());
1021 /* Create a concrete key for the child within the parent. */
1022 k
= mgr
->get_store_manager ()->get_concrete_binding
1023 (child_parent_offset
, sval_bit_size
);
1025 gcc_assert (k
->concrete_p ());
1031 /* Populate OUT with all bindings within this map that overlap KEY. */
1034 binding_map::get_overlapping_bindings (const binding_key
*key
,
1035 auto_vec
<const binding_key
*> *out
)
1037 for (auto iter
: *this)
1039 const binding_key
*iter_key
= iter
.first
;
1040 if (const concrete_binding
*ckey
1041 = key
->dyn_cast_concrete_binding ())
1043 if (const concrete_binding
*iter_ckey
1044 = iter_key
->dyn_cast_concrete_binding ())
1046 if (ckey
->overlaps_p (*iter_ckey
))
1047 out
->safe_push (iter_key
);
1051 /* Assume overlap. */
1052 out
->safe_push (iter_key
);
1057 /* Assume overlap. */
1058 out
->safe_push (iter_key
);
1063 /* Remove, truncate, and/or split any bindings within this map that
1066 For example, if we have:
1068 +------------------------------------+
1070 +------------------------------------+
1072 which is to be overwritten with:
1074 .......+----------------------+.......
1075 .......| new binding |.......
1076 .......+----------------------+.......
1078 this function "cuts a hole" out of the old binding:
1080 +------+......................+------+
1081 |prefix| hole for new binding |suffix|
1082 +------+......................+------+
1084 into which the new binding can be added without
1085 overlapping the prefix or suffix.
1087 The prefix and suffix (if added) will be bound to the pertinent
1088 parts of the value of the old binding.
1095 void test_5 (struct s5 *p)
1100 then after the "f = *p;" we have:
1101 cluster for: f: INIT_VAL((*INIT_VAL(p_33(D))))
1102 and at the "f.arr[3] = 42;" we remove the bindings overlapping
1103 "f.arr[3]", replacing it with a prefix (bytes 0-2) and suffix (bytes 4-7)
1107 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1109 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1110 punching a hole into which the new value can be written at byte 3:
1113 value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1115 value: 'char' {(char)42}
1117 value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
1119 If UNCERTAINTY is non-NULL, use it to record any svalues that
1120 were removed, as being maybe-bound.
1122 If MAYBE_LIVE_VALUES is non-NULL, then use it to record any svalues that
1123 were removed as being maybe-live.
1125 If ALWAYS_OVERLAP, then assume that DROP_KEY can overlap anything
1126 in the map, due to one or both of the underlying clusters being
1127 symbolic (but not the same symbolic region). Hence even if DROP_KEY is a
1128 concrete binding it could actually be referring to the same memory as
1129 distinct concrete bindings in the map. Remove all bindings, but
1130 register any svalues with *UNCERTAINTY. */
1133 binding_map::remove_overlapping_bindings (store_manager
*mgr
,
1134 const binding_key
*drop_key
,
1135 uncertainty_t
*uncertainty
,
1136 svalue_set
*maybe_live_values
,
1137 bool always_overlap
)
1139 /* Get the bindings of interest within this map. */
1140 auto_vec
<const binding_key
*> bindings
;
1142 for (auto iter
: *this)
1143 bindings
.safe_push (iter
.first
); /* Add all bindings. */
1145 /* Just add overlapping bindings. */
1146 get_overlapping_bindings (drop_key
, &bindings
);
1149 const binding_key
*iter_binding
;
1150 FOR_EACH_VEC_ELT (bindings
, i
, iter_binding
)
1152 /* Record any svalues that were removed to *UNCERTAINTY as being
1153 maybe-bound, provided at least some part of the binding is symbolic.
1155 Specifically, if at least one of the bindings is symbolic, or we
1156 have ALWAYS_OVERLAP for the case where we have possibly aliasing
1157 regions, then we don't know that the svalue has been overwritten,
1158 and should record that to *UNCERTAINTY.
1160 However, if we have concrete keys accessing within the same symbolic
1161 region, then we *know* that the symbolic region has been overwritten,
1162 so we don't record it to *UNCERTAINTY, as this could be a genuine
1164 const svalue
*old_sval
= get (iter_binding
);
1166 && (drop_key
->symbolic_p ()
1167 || iter_binding
->symbolic_p ()
1169 uncertainty
->on_maybe_bound_sval (old_sval
);
1171 /* Record any svalues that were removed to *MAYBE_LIVE_VALUES as being
1173 if (maybe_live_values
)
1174 maybe_live_values
->add (old_sval
);
1176 /* Begin by removing the old binding. */
1177 m_map
.remove (iter_binding
);
1179 /* Don't attempt to handle prefixes/suffixes for the
1180 "always_overlap" case; everything's being removed. */
1184 /* Now potentially add the prefix and suffix. */
1185 if (const concrete_binding
*drop_ckey
1186 = drop_key
->dyn_cast_concrete_binding ())
1187 if (const concrete_binding
*iter_ckey
1188 = iter_binding
->dyn_cast_concrete_binding ())
1190 gcc_assert (drop_ckey
->overlaps_p (*iter_ckey
));
1192 const bit_range
&drop_bits
= drop_ckey
->get_bit_range ();
1193 const bit_range
&iter_bits
= iter_ckey
->get_bit_range ();
1195 if (iter_bits
.get_start_bit_offset ()
1196 < drop_bits
.get_start_bit_offset ())
1198 /* We have a truncated prefix. */
1199 bit_range
prefix_bits (iter_bits
.get_start_bit_offset (),
1200 (drop_bits
.get_start_bit_offset ()
1201 - iter_bits
.get_start_bit_offset ()));
1202 const concrete_binding
*prefix_key
1203 = mgr
->get_concrete_binding (prefix_bits
);
1204 bit_range
rel_prefix (0, prefix_bits
.m_size_in_bits
);
1205 const svalue
*prefix_sval
1206 = old_sval
->extract_bit_range (NULL_TREE
,
1208 mgr
->get_svalue_manager ());
1209 m_map
.put (prefix_key
, prefix_sval
);
1212 if (iter_bits
.get_next_bit_offset ()
1213 > drop_bits
.get_next_bit_offset ())
1215 /* We have a truncated suffix. */
1216 bit_range
suffix_bits (drop_bits
.get_next_bit_offset (),
1217 (iter_bits
.get_next_bit_offset ()
1218 - drop_bits
.get_next_bit_offset ()));
1219 const concrete_binding
*suffix_key
1220 = mgr
->get_concrete_binding (suffix_bits
);
1221 bit_range
rel_suffix (drop_bits
.get_next_bit_offset ()
1222 - iter_bits
.get_start_bit_offset (),
1223 suffix_bits
.m_size_in_bits
);
1224 const svalue
*suffix_sval
1225 = old_sval
->extract_bit_range (NULL_TREE
,
1227 mgr
->get_svalue_manager ());
1228 m_map
.put (suffix_key
, suffix_sval
);
1234 /* class binding_cluster. */
1236 binding_cluster::binding_cluster (const region
*base_region
)
1237 : m_base_region (base_region
), m_map (),
1238 m_escaped (false), m_touched (false)
1242 /* binding_cluster's copy ctor. */
1244 binding_cluster::binding_cluster (const binding_cluster
&other
)
1245 : m_base_region (other
.m_base_region
), m_map (other
.m_map
),
1246 m_escaped (other
.m_escaped
), m_touched (other
.m_touched
)
1250 /* binding_cluster's assignment operator. */
1253 binding_cluster::operator= (const binding_cluster
&other
)
1255 gcc_assert (m_base_region
== other
.m_base_region
);
1256 m_map
= other
.m_map
;
1257 m_escaped
= other
.m_escaped
;
1258 m_touched
= other
.m_touched
;
1262 /* binding_cluster's equality operator. */
1265 binding_cluster::operator== (const binding_cluster
&other
) const
1267 if (m_map
!= other
.m_map
)
1270 if (m_base_region
!= other
.m_base_region
)
1273 if (m_escaped
!= other
.m_escaped
)
1276 if (m_touched
!= other
.m_touched
)
1279 gcc_checking_assert (hash () == other
.hash ());
1284 /* Generate a hash value for this binding_cluster. */
1287 binding_cluster::hash () const
1289 return m_map
.hash ();
1292 /* Return true if this binding_cluster is symbolic
1293 i.e. its base region is symbolic. */
1296 binding_cluster::symbolic_p () const
1298 return m_base_region
->get_kind () == RK_SYMBOLIC
;
1301 /* Dump a representation of this binding_cluster to PP.
1302 SIMPLE controls how values and regions are to be printed.
1303 If MULTILINE, then split the dump over multiple lines and
1304 use whitespace for readability, otherwise put all on one line. */
1307 binding_cluster::dump_to_pp (pretty_printer
*pp
, bool simple
,
1308 bool multiline
) const
1314 pp_string (pp
, " ESCAPED");
1318 pp_string (pp
, "(ESCAPED)");
1324 pp_string (pp
, " TOUCHED");
1328 pp_string (pp
, "(TOUCHED)");
1331 m_map
.dump_to_pp (pp
, simple
, multiline
);
1334 /* Dump a multiline representation of this binding_cluster to stderr. */
1337 binding_cluster::dump (bool simple
) const
1340 pp_format_decoder (&pp
) = default_tree_printer
;
1341 pp_show_color (&pp
) = pp_show_color (global_dc
->printer
);
1342 pp
.buffer
->stream
= stderr
;
1343 pp_string (&pp
, " cluster for: ");
1344 m_base_region
->dump_to_pp (&pp
, simple
);
1345 pp_string (&pp
, ": ");
1347 dump_to_pp (&pp
, simple
, true);
1352 /* Assert that this object is valid. */
1355 binding_cluster::validate () const
1357 int num_symbolic
= 0;
1358 int num_concrete
= 0;
1359 for (auto iter
: m_map
)
1361 if (iter
.first
->symbolic_p ())
1366 /* We shouldn't have more than one symbolic key per cluster
1367 (or one would have clobbered the other). */
1368 gcc_assert (num_symbolic
< 2);
1369 /* We can't have both concrete and symbolic keys. */
1370 gcc_assert (num_concrete
== 0 || num_symbolic
== 0);
1373 /* Return a new json::object of the form
1374 {"escaped": true/false,
1375 "touched": true/false,
1376 "map" : object for the binding_map. */
1379 binding_cluster::to_json () const
1381 json::object
*cluster_obj
= new json::object ();
1383 cluster_obj
->set ("escaped", new json::literal (m_escaped
));
1384 cluster_obj
->set ("touched", new json::literal (m_touched
));
1385 cluster_obj
->set ("map", m_map
.to_json ());
1390 /* Add a binding of SVAL of kind KIND to REG, unpacking SVAL if it is a
1394 binding_cluster::bind (store_manager
*mgr
,
1395 const region
*reg
, const svalue
*sval
)
1397 if (const compound_svalue
*compound_sval
1398 = sval
->dyn_cast_compound_svalue ())
1400 bind_compound_sval (mgr
, reg
, compound_sval
);
1404 if (reg
->empty_p ())
1406 const binding_key
*binding
= binding_key::make (mgr
, reg
);
1407 bind_key (binding
, sval
);
1410 /* Bind SVAL to KEY.
1411 Unpacking of compound_svalues should already have been done by the
1412 time this is called. */
1415 binding_cluster::bind_key (const binding_key
*key
, const svalue
*sval
)
1417 gcc_assert (sval
->get_kind () != SK_COMPOUND
);
1419 m_map
.put (key
, sval
);
1420 if (key
->symbolic_p ())
1424 /* Subroutine of binding_cluster::bind.
1425 Unpack compound_svals when binding them, so that we bind them
1429 binding_cluster::bind_compound_sval (store_manager
*mgr
,
1431 const compound_svalue
*compound_sval
)
1433 region_offset reg_offset
1434 = reg
->get_offset (mgr
->get_svalue_manager ());
1435 if (reg_offset
.symbolic_p ())
1438 clobber_region (mgr
, reg
);
1442 for (map_t::iterator iter
= compound_sval
->begin ();
1443 iter
!= compound_sval
->end (); ++iter
)
1445 const binding_key
*iter_key
= (*iter
).first
;
1446 const svalue
*iter_sval
= (*iter
).second
;
1448 if (const concrete_binding
*concrete_key
1449 = iter_key
->dyn_cast_concrete_binding ())
1451 bit_offset_t effective_start
1452 = (concrete_key
->get_start_bit_offset ()
1453 + reg_offset
.get_bit_offset ());
1454 const concrete_binding
*effective_concrete_key
1455 = mgr
->get_concrete_binding (effective_start
,
1456 concrete_key
->get_size_in_bits ());
1457 bind_key (effective_concrete_key
, iter_sval
);
1464 /* Remove all bindings overlapping REG within this cluster. */
1467 binding_cluster::clobber_region (store_manager
*mgr
, const region
*reg
)
1469 remove_overlapping_bindings (mgr
, reg
, NULL
, NULL
);
1472 /* Remove any bindings for REG within this cluster. */
1475 binding_cluster::purge_region (store_manager
*mgr
, const region
*reg
)
1477 gcc_assert (reg
->get_kind () == RK_DECL
);
1478 if (reg
->empty_p ())
1480 const binding_key
*binding
1481 = binding_key::make (mgr
, const_cast<region
*> (reg
));
1482 m_map
.remove (binding
);
1485 /* Clobber REG and fill it with repeated copies of SVAL. */
1488 binding_cluster::fill_region (store_manager
*mgr
,
1492 clobber_region (mgr
, reg
);
1494 region_model_manager
*sval_mgr
= mgr
->get_svalue_manager ();
1495 const svalue
*byte_size_sval
= reg
->get_byte_size_sval (sval_mgr
);
1496 const svalue
*fill_sval
1497 = sval_mgr
->get_or_create_repeated_svalue (reg
->get_type (),
1498 byte_size_sval
, sval
);
1499 bind (mgr
, reg
, fill_sval
);
1502 /* Clobber REG within this cluster and fill it with zeroes. */
1505 binding_cluster::zero_fill_region (store_manager
*mgr
, const region
*reg
)
1507 region_model_manager
*sval_mgr
= mgr
->get_svalue_manager ();
1508 const svalue
*zero_sval
= sval_mgr
->get_or_create_int_cst (char_type_node
, 0);
1509 fill_region (mgr
, reg
, zero_sval
);
1512 /* Mark REG_TO_BIND within this cluster as being unknown.
1514 Remove any bindings overlapping REG_FOR_OVERLAP.
1515 If UNCERTAINTY is non-NULL, use it to record any svalues that
1516 had bindings to them removed, as being maybe-bound.
1517 If MAYBE_LIVE_VALUES is non-NULL, use it to record any svalues that
1518 had bindings to them removed, as being maybe-live.
1520 REG_TO_BIND and REG_FOR_OVERLAP are the same for
1521 store::mark_region_as_unknown, but are different in
1522 store::set_value's alias handling, for handling the case where
1523 we have a write to a symbolic REG_FOR_OVERLAP. */
1526 binding_cluster::mark_region_as_unknown (store_manager
*mgr
,
1527 const region
*reg_to_bind
,
1528 const region
*reg_for_overlap
,
1529 uncertainty_t
*uncertainty
,
1530 svalue_set
*maybe_live_values
)
1532 if (reg_to_bind
->empty_p ())
1535 remove_overlapping_bindings (mgr
, reg_for_overlap
, uncertainty
,
1538 /* Add a default binding to "unknown". */
1539 region_model_manager
*sval_mgr
= mgr
->get_svalue_manager ();
1541 = sval_mgr
->get_or_create_unknown_svalue (reg_to_bind
->get_type ());
1542 bind (mgr
, reg_to_bind
, sval
);
1545 /* Purge state involving SVAL. */
1548 binding_cluster::purge_state_involving (const svalue
*sval
,
1549 region_model_manager
*sval_mgr
)
1551 auto_vec
<const binding_key
*> to_remove
;
1552 auto_vec
<std::pair
<const binding_key
*, tree
> > to_make_unknown
;
1553 for (auto iter
: m_map
)
1555 const binding_key
*iter_key
= iter
.first
;
1556 if (const symbolic_binding
*symbolic_key
1557 = iter_key
->dyn_cast_symbolic_binding ())
1559 const region
*reg
= symbolic_key
->get_region ();
1560 if (reg
->involves_p (sval
))
1561 to_remove
.safe_push (iter_key
);
1563 const svalue
*iter_sval
= iter
.second
;
1564 if (iter_sval
->involves_p (sval
))
1565 to_make_unknown
.safe_push (std::make_pair(iter_key
,
1566 iter_sval
->get_type ()));
1568 for (auto iter
: to_remove
)
1570 m_map
.remove (iter
);
1573 for (auto iter
: to_make_unknown
)
1575 const svalue
*new_sval
1576 = sval_mgr
->get_or_create_unknown_svalue (iter
.second
);
1577 m_map
.put (iter
.first
, new_sval
);
1581 /* Get any SVAL bound to REG within this cluster via kind KIND,
1582 without checking parent regions of REG. */
1585 binding_cluster::get_binding (store_manager
*mgr
,
1586 const region
*reg
) const
1588 if (reg
->empty_p ())
1590 const binding_key
*reg_binding
= binding_key::make (mgr
, reg
);
1591 const svalue
*sval
= m_map
.get (reg_binding
);
1594 /* If we have a struct with a single field, then the binding of
1595 the field will equal that of the struct, and looking up e.g.
1596 PARENT_REG.field within:
1597 cluster for PARENT_REG: INIT_VAL(OTHER_REG)
1598 will erroneously return INIT_VAL(OTHER_REG), rather than
1599 SUB_VALUE(INIT_VAL(OTHER_REG), FIELD) == INIT_VAL(OTHER_REG.FIELD).
1600 Fix this issue by iterating upwards whilst the bindings are equal,
1601 expressing the lookups as subvalues.
1602 We have to gather a list of subregion accesses, then walk it
1603 in reverse to get the subvalues. */
1604 auto_vec
<const region
*> regions
;
1605 while (const region
*parent_reg
= reg
->get_parent_region ())
1607 const binding_key
*parent_reg_binding
1608 = binding_key::make (mgr
, parent_reg
);
1609 if (parent_reg_binding
== reg_binding
1610 && sval
->get_type ()
1612 && sval
->get_type () != reg
->get_type ())
1614 regions
.safe_push (reg
);
1620 if (sval
->get_type ()
1622 && sval
->get_type () == reg
->get_type ())
1625 const region
*iter_reg
;
1626 FOR_EACH_VEC_ELT_REVERSE (regions
, i
, iter_reg
)
1628 region_model_manager
*rmm_mgr
= mgr
->get_svalue_manager ();
1629 sval
= rmm_mgr
->get_or_create_sub_svalue (iter_reg
->get_type (),
1637 /* Get any SVAL bound to REG within this cluster,
1638 either directly for REG, or recursively checking for bindings within
1639 parent regions and extracting subvalues if need be. */
1642 binding_cluster::get_binding_recursive (store_manager
*mgr
,
1643 const region
*reg
) const
1645 if (const svalue
*sval
= get_binding (mgr
, reg
))
1647 if (reg
!= m_base_region
)
1648 if (const region
*parent_reg
= reg
->get_parent_region ())
1649 if (const svalue
*parent_sval
1650 = get_binding_recursive (mgr
, parent_reg
))
1652 /* Extract child svalue from parent svalue. */
1653 region_model_manager
*rmm_mgr
= mgr
->get_svalue_manager ();
1654 return rmm_mgr
->get_or_create_sub_svalue (reg
->get_type (),
1660 /* Get any value bound for REG within this cluster. */
1663 binding_cluster::get_any_binding (store_manager
*mgr
,
1664 const region
*reg
) const
1666 /* Look for a direct binding. */
1667 if (const svalue
*direct_sval
1668 = get_binding_recursive (mgr
, reg
))
1671 /* If we had a write to a cluster of unknown size, we might
1672 have a self-binding of the whole base region with an svalue,
1673 where the base region is symbolic.
1674 Handle such cases by returning sub_svalue instances. */
1675 if (const svalue
*cluster_sval
= maybe_get_simple_value (mgr
))
1677 /* Extract child svalue from parent svalue. */
1678 region_model_manager
*rmm_mgr
= mgr
->get_svalue_manager ();
1679 return rmm_mgr
->get_or_create_sub_svalue (reg
->get_type (),
1683 /* If this cluster has been touched by a symbolic write, then the content
1684 of any subregion not currently specifically bound is "UNKNOWN". */
1687 region_model_manager
*rmm_mgr
= mgr
->get_svalue_manager ();
1688 return rmm_mgr
->get_or_create_unknown_svalue (reg
->get_type ());
1691 /* Alternatively, if this is a symbolic read and the cluster has any bindings,
1692 then we don't know if we're reading those values or not, so the result
1693 is also "UNKNOWN". */
1694 if (reg
->get_offset (mgr
->get_svalue_manager ()).symbolic_p ()
1695 && m_map
.elements () > 0)
1697 region_model_manager
*rmm_mgr
= mgr
->get_svalue_manager ();
1698 return rmm_mgr
->get_or_create_unknown_svalue (reg
->get_type ());
1701 if (const svalue
*compound_sval
= maybe_get_compound_binding (mgr
, reg
))
1702 return compound_sval
;
1704 /* Otherwise, the initial value, or uninitialized. */
1708 /* Attempt to get a compound_svalue for the bindings within the cluster
1709 affecting REG (which could be the base region itself).
1711 Create a compound_svalue with the subset of bindings the affect REG,
1712 offsetting them so that the offsets are relative to the start of REG
1715 For example, REG could be one element within an array of structs.
1717 Return the resulting compound_svalue, or NULL if there's a problem. */
1720 binding_cluster::maybe_get_compound_binding (store_manager
*mgr
,
1721 const region
*reg
) const
1723 region_offset cluster_offset
1724 = m_base_region
->get_offset (mgr
->get_svalue_manager ());
1725 if (cluster_offset
.symbolic_p ())
1727 region_offset reg_offset
= reg
->get_offset (mgr
->get_svalue_manager ());
1728 if (reg_offset
.symbolic_p ())
1731 if (reg
->empty_p ())
1734 region_model_manager
*sval_mgr
= mgr
->get_svalue_manager ();
1736 /* We will a build the result map in two parts:
1737 (a) result_map, holding the concrete keys from this cluster,
1739 (b) default_map, holding the initial values for the region
1740 (e.g. uninitialized, initializer values, or zero), unless this
1741 cluster has been touched.
1743 We will populate (a), and as we do, clobber (b), trimming and
1744 splitting its bindings as necessary.
1745 Finally, we will merge (b) into (a), giving a concrete map
1746 that merges both the initial values and the bound values from
1747 the binding_cluster.
1748 Doing it this way reduces N for the O(N^2) intersection-finding,
1749 perhaps we should have a spatial-organized data structure for
1750 concrete keys, though. */
1752 binding_map result_map
;
1753 binding_map default_map
;
1755 /* Set up default values in default_map. */
1756 const svalue
*default_sval
;
1758 default_sval
= sval_mgr
->get_or_create_unknown_svalue (reg
->get_type ());
1760 default_sval
= sval_mgr
->get_or_create_initial_value (reg
);
1761 const binding_key
*default_key
= binding_key::make (mgr
, reg
);
1763 /* Express the bit-range of the default key for REG relative to REG,
1764 rather than to the base region. */
1765 const concrete_binding
*concrete_default_key
1766 = default_key
->dyn_cast_concrete_binding ();
1767 if (!concrete_default_key
)
1769 const concrete_binding
*default_key_relative_to_reg
1770 = mgr
->get_concrete_binding (0, concrete_default_key
->get_size_in_bits ());
1771 default_map
.put (default_key_relative_to_reg
, default_sval
);
1773 for (map_t::iterator iter
= m_map
.begin (); iter
!= m_map
.end (); ++iter
)
1775 const binding_key
*key
= (*iter
).first
;
1776 const svalue
*sval
= (*iter
).second
;
1778 if (const concrete_binding
*concrete_key
1779 = key
->dyn_cast_concrete_binding ())
1781 const bit_range
&bound_range
= concrete_key
->get_bit_range ();
1783 bit_size_t reg_bit_size
;
1784 if (!reg
->get_bit_size (®_bit_size
))
1787 bit_range
reg_range (reg_offset
.get_bit_offset (),
1790 /* Skip bindings that are outside the bit range of REG. */
1791 if (!bound_range
.intersects_p (reg_range
))
1794 /* We shouldn't have an exact match; that should have been
1796 gcc_assert (!(reg_range
== bound_range
));
1798 bit_range
subrange (0, 0);
1799 if (reg_range
.contains_p (bound_range
, &subrange
))
1801 /* We have a bound range fully within REG.
1802 Add it to map, offsetting accordingly. */
1804 /* Get offset of KEY relative to REG, rather than to
1806 const concrete_binding
*offset_concrete_key
1807 = mgr
->get_concrete_binding (subrange
);
1808 result_map
.put (offset_concrete_key
, sval
);
1810 /* Clobber default_map, removing/trimming/spliting where
1811 it overlaps with offset_concrete_key. */
1812 default_map
.remove_overlapping_bindings (mgr
,
1813 offset_concrete_key
,
1816 else if (bound_range
.contains_p (reg_range
, &subrange
))
1818 /* REG is fully within the bound range, but
1819 is not equal to it; we're extracting a subvalue. */
1820 return sval
->extract_bit_range (reg
->get_type (),
1822 mgr
->get_svalue_manager ());
1826 /* REG and the bound range partially overlap. */
1827 bit_range
reg_subrange (0, 0);
1828 bit_range
bound_subrange (0, 0);
1829 reg_range
.intersects_p (bound_range
,
1830 ®_subrange
, &bound_subrange
);
1832 /* Get the bits from the bound value for the bits at the
1833 intersection (relative to the bound value). */
1834 const svalue
*overlap_sval
1835 = sval
->extract_bit_range (NULL_TREE
,
1837 mgr
->get_svalue_manager ());
1839 /* Get key for overlap, relative to the REG. */
1840 const concrete_binding
*overlap_concrete_key
1841 = mgr
->get_concrete_binding (reg_subrange
);
1842 result_map
.put (overlap_concrete_key
, overlap_sval
);
1844 /* Clobber default_map, removing/trimming/spliting where
1845 it overlaps with overlap_concrete_key. */
1846 default_map
.remove_overlapping_bindings (mgr
,
1847 overlap_concrete_key
,
1852 /* Can't handle symbolic bindings. */
1856 if (result_map
.elements () == 0)
1859 /* Merge any bindings from default_map into result_map. */
1860 for (auto iter
: default_map
)
1862 const binding_key
*key
= iter
.first
;
1863 const svalue
*sval
= iter
.second
;
1864 result_map
.put (key
, sval
);
1867 return sval_mgr
->get_or_create_compound_svalue (reg
->get_type (), result_map
);
1870 /* Remove, truncate, and/or split any bindings within this map that
1873 If REG's base region or this cluster is symbolic and they're different
1874 base regions, then remove everything in this cluster's map, on the
1875 grounds that REG could be referring to the same memory as anything
1878 If UNCERTAINTY is non-NULL, use it to record any svalues that
1879 were removed, as being maybe-bound.
1881 If MAYBE_LIVE_VALUES is non-NULL, use it to record any svalues that
1882 were removed, as being maybe-live. */
1885 binding_cluster::remove_overlapping_bindings (store_manager
*mgr
,
1887 uncertainty_t
*uncertainty
,
1888 svalue_set
*maybe_live_values
)
1890 if (reg
->empty_p ())
1892 const binding_key
*reg_binding
= binding_key::make (mgr
, reg
);
1894 const region
*cluster_base_reg
= get_base_region ();
1895 const region
*other_base_reg
= reg
->get_base_region ();
1896 /* If at least one of the base regions involved is symbolic, and they're
1897 not the same base region, then consider everything in the map as
1898 potentially overlapping with reg_binding (even if it's a concrete
1899 binding and things in the map are concrete - they could be referring
1900 to the same memory when the symbolic base regions are taken into
1902 bool always_overlap
= (cluster_base_reg
!= other_base_reg
1903 && (cluster_base_reg
->get_kind () == RK_SYMBOLIC
1904 || other_base_reg
->get_kind () == RK_SYMBOLIC
));
1905 m_map
.remove_overlapping_bindings (mgr
, reg_binding
, uncertainty
,
1910 /* Attempt to merge CLUSTER_A and CLUSTER_B into OUT_CLUSTER, using
1912 Return true if they can be merged, false otherwise. */
1915 binding_cluster::can_merge_p (const binding_cluster
*cluster_a
,
1916 const binding_cluster
*cluster_b
,
1917 binding_cluster
*out_cluster
,
1920 model_merger
*merger
)
1922 gcc_assert (out_cluster
);
1924 /* Merge flags ("ESCAPED" and "TOUCHED") by setting the merged flag to
1925 true if either of the inputs is true. */
1926 if ((cluster_a
&& cluster_a
->m_escaped
)
1927 || (cluster_b
&& cluster_b
->m_escaped
))
1928 out_cluster
->m_escaped
= true;
1929 if ((cluster_a
&& cluster_a
->m_touched
)
1930 || (cluster_b
&& cluster_b
->m_touched
))
1931 out_cluster
->m_touched
= true;
1933 /* At least one of CLUSTER_A and CLUSTER_B are non-NULL, but either
1934 could be NULL. Handle these cases. */
1935 if (cluster_a
== NULL
)
1937 gcc_assert (cluster_b
!= NULL
);
1938 gcc_assert (cluster_b
->m_base_region
== out_cluster
->m_base_region
);
1939 out_cluster
->make_unknown_relative_to (cluster_b
, out_store
, mgr
);
1942 if (cluster_b
== NULL
)
1944 gcc_assert (cluster_a
!= NULL
);
1945 gcc_assert (cluster_a
->m_base_region
== out_cluster
->m_base_region
);
1946 out_cluster
->make_unknown_relative_to (cluster_a
, out_store
, mgr
);
1950 /* The "both inputs are non-NULL" case. */
1951 gcc_assert (cluster_a
!= NULL
&& cluster_b
!= NULL
);
1952 gcc_assert (cluster_a
->m_base_region
== out_cluster
->m_base_region
);
1953 gcc_assert (cluster_b
->m_base_region
== out_cluster
->m_base_region
);
1955 hash_set
<const binding_key
*> keys
;
1956 for (map_t::iterator iter_a
= cluster_a
->m_map
.begin ();
1957 iter_a
!= cluster_a
->m_map
.end (); ++iter_a
)
1959 const binding_key
*key_a
= (*iter_a
).first
;
1962 for (map_t::iterator iter_b
= cluster_b
->m_map
.begin ();
1963 iter_b
!= cluster_b
->m_map
.end (); ++iter_b
)
1965 const binding_key
*key_b
= (*iter_b
).first
;
1968 int num_symbolic_keys
= 0;
1969 int num_concrete_keys
= 0;
1970 for (hash_set
<const binding_key
*>::iterator iter
= keys
.begin ();
1971 iter
!= keys
.end (); ++iter
)
1973 region_model_manager
*sval_mgr
= mgr
->get_svalue_manager ();
1974 const binding_key
*key
= *iter
;
1975 const svalue
*sval_a
= cluster_a
->get_any_value (key
);
1976 const svalue
*sval_b
= cluster_b
->get_any_value (key
);
1978 if (key
->symbolic_p ())
1979 num_symbolic_keys
++;
1981 num_concrete_keys
++;
1983 if (sval_a
== sval_b
)
1985 gcc_assert (sval_a
);
1986 out_cluster
->m_map
.put (key
, sval_a
);
1989 else if (sval_a
&& sval_b
)
1991 if (const svalue
*merged_sval
1992 = sval_a
->can_merge_p (sval_b
, sval_mgr
, merger
))
1994 out_cluster
->m_map
.put (key
, merged_sval
);
1997 /* Merger of the svalues failed. Reject merger of the cluster. */
2001 /* If we get here, then one cluster binds this key and the other
2002 doesn't; merge them as "UNKNOWN". */
2003 gcc_assert (sval_a
|| sval_b
);
2005 const svalue
*bound_sval
= sval_a
? sval_a
: sval_b
;
2006 tree type
= bound_sval
->get_type ();
2007 const svalue
*unknown_sval
2008 = mgr
->get_svalue_manager ()->get_or_create_unknown_svalue (type
);
2010 /* ...but reject the merger if this sval shouldn't be mergeable
2011 (e.g. reject merging svalues that have non-purgable sm-state,
2012 to avoid falsely reporting memory leaks by merging them
2013 with something else). */
2014 if (!bound_sval
->can_merge_p (unknown_sval
, sval_mgr
, merger
))
2017 out_cluster
->m_map
.put (key
, unknown_sval
);
2020 /* We can only have at most one symbolic key per cluster,
2021 and if we do, we can't have any concrete keys.
2022 If this happens, mark the cluster as touched, with no keys. */
2023 if (num_symbolic_keys
>= 2
2024 || (num_concrete_keys
> 0 && num_symbolic_keys
> 0))
2026 out_cluster
->m_touched
= true;
2027 out_cluster
->m_map
.empty ();
2030 /* We don't handle other kinds of overlaps yet. */
2035 /* Update this cluster to reflect an attempt to merge OTHER where there
2036 is no other cluster to merge with, and so we're notionally merging the
2037 bound values in OTHER with the initial value of the relevant regions.
2039 Any bound keys in OTHER should be bound to unknown in this. */
2042 binding_cluster::make_unknown_relative_to (const binding_cluster
*other
,
2046 for (map_t::iterator iter
= other
->m_map
.begin ();
2047 iter
!= other
->m_map
.end (); ++iter
)
2049 const binding_key
*iter_key
= (*iter
).first
;
2050 const svalue
*iter_sval
= (*iter
).second
;
2051 const svalue
*unknown_sval
2052 = mgr
->get_svalue_manager ()->get_or_create_unknown_svalue
2053 (iter_sval
->get_type ());
2054 m_map
.put (iter_key
, unknown_sval
);
2056 /* For any pointers in OTHER, the merger means that the
2057 concrete pointer becomes an unknown value, which could
2058 show up as a false report of a leak when considering what
2059 pointers are live before vs after.
2060 Avoid this by marking the base regions they point to as having
2062 if (const region_svalue
*region_sval
2063 = iter_sval
->dyn_cast_region_svalue ())
2065 const region
*base_reg
2066 = region_sval
->get_pointee ()->get_base_region ();
2067 if (base_reg
->tracked_p ()
2068 && !base_reg
->symbolic_for_unknown_ptr_p ())
2070 binding_cluster
*c
= out_store
->get_or_create_cluster (base_reg
);
2071 c
->mark_as_escaped ();
2077 /* Mark this cluster as having escaped. */
2080 binding_cluster::mark_as_escaped ()
2085 /* If this cluster has escaped (by this call, or by an earlier one, or
2086 by being an external param), then unbind all values and mark it
2087 as "touched", so that it has a conjured value, rather than an
2089 Use P to purge state involving conjured_svalues. */
2092 binding_cluster::on_unknown_fncall (const gcall
*call
,
2094 const conjured_purge
&p
)
2100 if (!m_base_region
->empty_p ())
2102 /* Bind it to a new "conjured" value using CALL. */
2104 = mgr
->get_svalue_manager ()->get_or_create_conjured_svalue
2105 (m_base_region
->get_type (), call
, m_base_region
, p
);
2106 bind (mgr
, m_base_region
, sval
);
2113 /* Mark this cluster as having been clobbered by STMT.
2114 Use P to purge state involving conjured_svalues. */
2117 binding_cluster::on_asm (const gasm
*stmt
,
2119 const conjured_purge
&p
)
2123 /* Bind it to a new "conjured" value using CALL. */
2125 = mgr
->get_svalue_manager ()->get_or_create_conjured_svalue
2126 (m_base_region
->get_type (), stmt
, m_base_region
, p
);
2127 bind (mgr
, m_base_region
, sval
);
2132 /* Return true if this cluster has escaped. */
2135 binding_cluster::escaped_p () const
2137 /* Consider the "errno" region to always have escaped. */
2138 if (m_base_region
->get_kind () == RK_ERRNO
)
2143 /* Return true if this binding_cluster has no information
2144 i.e. if there are no bindings, and it hasn't been marked as having
2145 escaped, or touched symbolically. */
2148 binding_cluster::redundant_p () const
2150 return (m_map
.elements () == 0
2155 /* Add PV to OUT_PVS, casting it to TYPE if it is not already of that type. */
2158 append_pathvar_with_type (path_var pv
,
2160 auto_vec
<path_var
> *out_pvs
)
2162 gcc_assert (pv
.m_tree
);
2164 if (TREE_TYPE (pv
.m_tree
) != type
)
2165 pv
.m_tree
= build1 (NOP_EXPR
, type
, pv
.m_tree
);
2167 out_pvs
->safe_push (pv
);
2170 /* Find representative path_vars for SVAL within this binding of BASE_REG,
2171 appending the results to OUT_PVS. */
2174 binding_cluster::get_representative_path_vars (const region_model
*model
,
2175 svalue_set
*visited
,
2176 const region
*base_reg
,
2178 auto_vec
<path_var
> *out_pvs
)
2181 sval
= simplify_for_binding (sval
);
2183 for (map_t::iterator iter
= m_map
.begin (); iter
!= m_map
.end (); ++iter
)
2185 const binding_key
*key
= (*iter
).first
;
2186 const svalue
*bound_sval
= (*iter
).second
;
2187 if (bound_sval
== sval
)
2189 if (const concrete_binding
*ckey
2190 = key
->dyn_cast_concrete_binding ())
2192 auto_vec
<const region
*> subregions
;
2193 base_reg
->get_subregions_for_binding
2194 (model
->get_manager (),
2195 ckey
->get_start_bit_offset (),
2196 ckey
->get_size_in_bits (),
2200 const region
*subregion
;
2201 FOR_EACH_VEC_ELT (subregions
, i
, subregion
)
2204 = model
->get_representative_path_var (subregion
,
2206 append_pathvar_with_type (pv
, sval
->get_type (), out_pvs
);
2211 const symbolic_binding
*skey
= (const symbolic_binding
*)key
;
2213 = model
->get_representative_path_var (skey
->get_region (),
2215 append_pathvar_with_type (pv
, sval
->get_type (), out_pvs
);
2221 /* Get any svalue bound to KEY, or NULL. */
2224 binding_cluster::get_any_value (const binding_key
*key
) const
2226 return m_map
.get (key
);
2229 /* If this cluster has a single direct binding for the whole of the region,
2231 For use in simplifying dumps. */
2234 binding_cluster::maybe_get_simple_value (store_manager
*mgr
) const
2236 /* Fail gracefully if MGR is NULL to make it easier to dump store
2237 instances in the debugger. */
2241 if (m_map
.elements () != 1)
2244 if (m_base_region
->empty_p ())
2247 const binding_key
*key
= binding_key::make (mgr
, m_base_region
);
2248 return get_any_value (key
);
2251 /* class store_manager. */
2254 store_manager::get_logger () const
2256 return m_mgr
->get_logger ();
2259 /* binding consolidation. */
2261 const concrete_binding
*
2262 store_manager::get_concrete_binding (bit_offset_t start_bit_offset
,
2263 bit_offset_t size_in_bits
)
2265 concrete_binding
b (start_bit_offset
, size_in_bits
);
2266 if (concrete_binding
*existing
= m_concrete_binding_key_mgr
.get (b
))
2269 concrete_binding
*to_save
= new concrete_binding (b
);
2270 m_concrete_binding_key_mgr
.put (b
, to_save
);
2274 const symbolic_binding
*
2275 store_manager::get_symbolic_binding (const region
*reg
)
2277 symbolic_binding
b (reg
);
2278 if (symbolic_binding
*existing
= m_symbolic_binding_key_mgr
.get (b
))
2281 symbolic_binding
*to_save
= new symbolic_binding (b
);
2282 m_symbolic_binding_key_mgr
.put (b
, to_save
);
2288 /* store's default ctor. */
2291 : m_called_unknown_fn (false)
2295 /* store's copy ctor. */
2297 store::store (const store
&other
)
2298 : m_cluster_map (other
.m_cluster_map
.elements ()),
2299 m_called_unknown_fn (other
.m_called_unknown_fn
)
2301 for (cluster_map_t::iterator iter
= other
.m_cluster_map
.begin ();
2302 iter
!= other
.m_cluster_map
.end ();
2305 const region
*reg
= (*iter
).first
;
2307 binding_cluster
*c
= (*iter
).second
;
2309 m_cluster_map
.put (reg
, new binding_cluster (*c
));
2317 for (cluster_map_t::iterator iter
= m_cluster_map
.begin ();
2318 iter
!= m_cluster_map
.end ();
2320 delete (*iter
).second
;
2323 /* store's assignment operator. */
2326 store::operator= (const store
&other
)
2328 /* Delete existing cluster map. */
2329 for (cluster_map_t::iterator iter
= m_cluster_map
.begin ();
2330 iter
!= m_cluster_map
.end ();
2332 delete (*iter
).second
;
2333 m_cluster_map
.empty ();
2335 m_called_unknown_fn
= other
.m_called_unknown_fn
;
2337 for (cluster_map_t::iterator iter
= other
.m_cluster_map
.begin ();
2338 iter
!= other
.m_cluster_map
.end ();
2341 const region
*reg
= (*iter
).first
;
2343 binding_cluster
*c
= (*iter
).second
;
2345 m_cluster_map
.put (reg
, new binding_cluster (*c
));
2350 /* store's equality operator. */
2353 store::operator== (const store
&other
) const
2355 if (m_called_unknown_fn
!= other
.m_called_unknown_fn
)
2358 if (m_cluster_map
.elements () != other
.m_cluster_map
.elements ())
2361 for (cluster_map_t::iterator iter
= m_cluster_map
.begin ();
2362 iter
!= m_cluster_map
.end ();
2365 const region
*reg
= (*iter
).first
;
2366 binding_cluster
*c
= (*iter
).second
;
2367 binding_cluster
**other_slot
2368 = const_cast <cluster_map_t
&> (other
.m_cluster_map
).get (reg
);
2369 if (other_slot
== NULL
)
2371 if (*c
!= **other_slot
)
2375 gcc_checking_assert (hash () == other
.hash ());
2380 /* Get a hash value for this store. */
2383 store::hash () const
2385 hashval_t result
= 0;
2386 for (cluster_map_t::iterator iter
= m_cluster_map
.begin ();
2387 iter
!= m_cluster_map
.end ();
2389 result
^= (*iter
).second
->hash ();
2393 /* Populate OUT with a sorted list of parent regions for the regions in IN,
2394 removing duplicate parents. */
2397 get_sorted_parent_regions (auto_vec
<const region
*> *out
,
2398 auto_vec
<const region
*> &in
)
2400 /* Get the set of parent regions. */
2401 hash_set
<const region
*> parent_regions
;
2402 const region
*iter_reg
;
2404 FOR_EACH_VEC_ELT (in
, i
, iter_reg
)
2406 const region
*parent_reg
= iter_reg
->get_parent_region ();
2407 gcc_assert (parent_reg
);
2408 parent_regions
.add (parent_reg
);
2412 for (hash_set
<const region
*>::iterator iter
= parent_regions
.begin();
2413 iter
!= parent_regions
.end(); ++iter
)
2414 out
->safe_push (*iter
);
2417 out
->qsort (region::cmp_ptr_ptr
);
2420 /* Dump a representation of this store to PP, using SIMPLE to control how
2421 svalues and regions are printed.
2422 MGR is used for simplifying dumps if non-NULL, but can also be NULL
2423 (to make it easier to use from the debugger). */
2426 store::dump_to_pp (pretty_printer
*pp
, bool simple
, bool multiline
,
2427 store_manager
*mgr
) const
2429 /* Sort into some deterministic order. */
2430 auto_vec
<const region
*> base_regions
;
2431 for (cluster_map_t::iterator iter
= m_cluster_map
.begin ();
2432 iter
!= m_cluster_map
.end (); ++iter
)
2434 const region
*base_reg
= (*iter
).first
;
2435 base_regions
.safe_push (base_reg
);
2437 base_regions
.qsort (region::cmp_ptr_ptr
);
2439 /* Gather clusters, organize by parent region, so that we can group
2440 together locals, globals, etc. */
2441 auto_vec
<const region
*> parent_regions
;
2442 get_sorted_parent_regions (&parent_regions
, base_regions
);
2444 const region
*parent_reg
;
2446 FOR_EACH_VEC_ELT (parent_regions
, i
, parent_reg
)
2448 gcc_assert (parent_reg
);
2449 pp_string (pp
, "clusters within ");
2450 parent_reg
->dump_to_pp (pp
, simple
);
2454 pp_string (pp
, " {");
2456 const region
*base_reg
;
2458 FOR_EACH_VEC_ELT (base_regions
, j
, base_reg
)
2460 /* This is O(N * M), but N ought to be small. */
2461 if (base_reg
->get_parent_region () != parent_reg
)
2463 binding_cluster
*cluster
2464 = *const_cast<cluster_map_t
&> (m_cluster_map
).get (base_reg
);
2468 pp_string (pp
, ", ");
2470 if (const svalue
*sval
= cluster
->maybe_get_simple_value (mgr
))
2472 /* Special-case to simplify dumps for the common case where
2473 we just have one value directly bound to the whole of a
2477 pp_string (pp
, " cluster for: ");
2478 base_reg
->dump_to_pp (pp
, simple
);
2479 pp_string (pp
, ": ");
2480 sval
->dump_to_pp (pp
, simple
);
2481 if (cluster
->escaped_p ())
2482 pp_string (pp
, " (ESCAPED)");
2483 if (cluster
->touched_p ())
2484 pp_string (pp
, " (TOUCHED)");
2489 pp_string (pp
, "region: {");
2490 base_reg
->dump_to_pp (pp
, simple
);
2491 pp_string (pp
, ", value: ");
2492 sval
->dump_to_pp (pp
, simple
);
2493 if (cluster
->escaped_p ())
2494 pp_string (pp
, " (ESCAPED)");
2495 if (cluster
->touched_p ())
2496 pp_string (pp
, " (TOUCHED)");
2497 pp_string (pp
, "}");
2502 pp_string (pp
, " cluster for: ");
2503 base_reg
->dump_to_pp (pp
, simple
);
2505 cluster
->dump_to_pp (pp
, simple
, multiline
);
2509 pp_string (pp
, "base region: {");
2510 base_reg
->dump_to_pp (pp
, simple
);
2511 pp_string (pp
, "} has cluster: {");
2512 cluster
->dump_to_pp (pp
, simple
, multiline
);
2513 pp_string (pp
, "}");
2517 pp_string (pp
, "}");
2519 pp_printf (pp
, "m_called_unknown_fn: %s",
2520 m_called_unknown_fn
? "TRUE" : "FALSE");
2525 /* Dump a multiline representation of this store to stderr. */
2528 store::dump (bool simple
) const
2531 pp_format_decoder (&pp
) = default_tree_printer
;
2532 pp_show_color (&pp
) = pp_show_color (global_dc
->printer
);
2533 pp
.buffer
->stream
= stderr
;
2534 dump_to_pp (&pp
, simple
, true, NULL
);
2539 /* Assert that this object is valid. */
2542 store::validate () const
2544 for (auto iter
: m_cluster_map
)
2545 iter
.second
->validate ();
2548 /* Return a new json::object of the form
2549 {PARENT_REGION_DESC: {BASE_REGION_DESC: object for binding_map,
2550 ... for each cluster within parent region},
2551 ...for each parent region,
2552 "called_unknown_fn": true/false}. */
2555 store::to_json () const
2557 json::object
*store_obj
= new json::object ();
2559 /* Sort into some deterministic order. */
2560 auto_vec
<const region
*> base_regions
;
2561 for (cluster_map_t::iterator iter
= m_cluster_map
.begin ();
2562 iter
!= m_cluster_map
.end (); ++iter
)
2564 const region
*base_reg
= (*iter
).first
;
2565 base_regions
.safe_push (base_reg
);
2567 base_regions
.qsort (region::cmp_ptr_ptr
);
2569 /* Gather clusters, organize by parent region, so that we can group
2570 together locals, globals, etc. */
2571 auto_vec
<const region
*> parent_regions
;
2572 get_sorted_parent_regions (&parent_regions
, base_regions
);
2574 const region
*parent_reg
;
2576 FOR_EACH_VEC_ELT (parent_regions
, i
, parent_reg
)
2578 gcc_assert (parent_reg
);
2580 json::object
*clusters_in_parent_reg_obj
= new json::object ();
2582 const region
*base_reg
;
2584 FOR_EACH_VEC_ELT (base_regions
, j
, base_reg
)
2586 /* This is O(N * M), but N ought to be small. */
2587 if (base_reg
->get_parent_region () != parent_reg
)
2589 binding_cluster
*cluster
2590 = *const_cast<cluster_map_t
&> (m_cluster_map
).get (base_reg
);
2591 label_text base_reg_desc
= base_reg
->get_desc ();
2592 clusters_in_parent_reg_obj
->set (base_reg_desc
.get (),
2593 cluster
->to_json ());
2595 label_text parent_reg_desc
= parent_reg
->get_desc ();
2596 store_obj
->set (parent_reg_desc
.get (), clusters_in_parent_reg_obj
);
2599 store_obj
->set ("called_unknown_fn", new json::literal (m_called_unknown_fn
));
2604 /* Get any svalue bound to REG, or NULL. */
2607 store::get_any_binding (store_manager
*mgr
, const region
*reg
) const
2609 const region
*base_reg
= reg
->get_base_region ();
2610 binding_cluster
**cluster_slot
2611 = const_cast <cluster_map_t
&> (m_cluster_map
).get (base_reg
);
2614 return (*cluster_slot
)->get_any_binding (mgr
, reg
);
2617 /* Set the value of LHS_REG to RHS_SVAL. */
2620 store::set_value (store_manager
*mgr
, const region
*lhs_reg
,
2621 const svalue
*rhs_sval
,
2622 uncertainty_t
*uncertainty
)
2624 logger
*logger
= mgr
->get_logger ();
2627 remove_overlapping_bindings (mgr
, lhs_reg
, uncertainty
);
2629 if (lhs_reg
->get_type ())
2630 rhs_sval
= simplify_for_binding (rhs_sval
);
2631 /* ...but if we have no type for the region, retain any cast. */
2633 const region
*lhs_base_reg
= lhs_reg
->get_base_region ();
2634 binding_cluster
*lhs_cluster
;
2635 if (lhs_base_reg
->symbolic_for_unknown_ptr_p ())
2637 /* Reject attempting to bind values into a symbolic region
2638 for an unknown ptr; merely invalidate values below. */
2641 /* The LHS of the write is *UNKNOWN. If the RHS is a pointer,
2642 then treat the region being pointed to as having escaped. */
2643 if (const region_svalue
*ptr_sval
= rhs_sval
->dyn_cast_region_svalue ())
2645 const region
*ptr_dst
= ptr_sval
->get_pointee ();
2646 const region
*ptr_base_reg
= ptr_dst
->get_base_region ();
2647 mark_as_escaped (ptr_base_reg
);
2650 uncertainty
->on_maybe_bound_sval (rhs_sval
);
2652 else if (lhs_base_reg
->tracked_p ())
2654 lhs_cluster
= get_or_create_cluster (lhs_base_reg
);
2655 lhs_cluster
->bind (mgr
, lhs_reg
, rhs_sval
);
2659 /* Reject attempting to bind values into an untracked region;
2660 merely invalidate values below. */
2664 /* Bindings to a cluster can affect other clusters if a symbolic
2665 base region is involved.
2666 Writes to concrete clusters can't affect other concrete clusters,
2667 but can affect symbolic clusters.
2668 Writes to symbolic clusters can affect both concrete and symbolic
2670 Invalidate our knowledge of other clusters that might have been
2671 affected by the write.
2672 Gather the set of all svalues that might still be live even if
2673 the store doesn't refer to them. */
2674 svalue_set maybe_live_values
;
2675 for (cluster_map_t::iterator iter
= m_cluster_map
.begin ();
2676 iter
!= m_cluster_map
.end (); ++iter
)
2678 const region
*iter_base_reg
= (*iter
).first
;
2679 binding_cluster
*iter_cluster
= (*iter
).second
;
2680 if (iter_base_reg
!= lhs_base_reg
2681 && (lhs_cluster
== NULL
2682 || lhs_cluster
->symbolic_p ()
2683 || iter_cluster
->symbolic_p ()))
2685 tristate t_alias
= eval_alias (lhs_base_reg
, iter_base_reg
);
2686 switch (t_alias
.get_value ())
2691 case tristate::TS_UNKNOWN
:
2694 pretty_printer
*pp
= logger
->get_printer ();
2695 logger
->start_log_line ();
2696 logger
->log_partial ("possible aliasing of ");
2697 iter_base_reg
->dump_to_pp (pp
, true);
2698 logger
->log_partial (" when writing SVAL: ");
2699 rhs_sval
->dump_to_pp (pp
, true);
2700 logger
->log_partial (" to LHS_REG: ");
2701 lhs_reg
->dump_to_pp (pp
, true);
2702 logger
->end_log_line ();
2704 /* Mark all of iter_cluster's iter_base_reg as unknown,
2705 using LHS_REG when considering overlaps, to handle
2706 symbolic vs concrete issues. */
2707 iter_cluster
->mark_region_as_unknown
2709 iter_base_reg
, /* reg_to_bind */
2710 lhs_reg
, /* reg_for_overlap */
2712 &maybe_live_values
);
2715 case tristate::TS_TRUE
:
2719 case tristate::TS_FALSE
:
2720 /* If they can't be aliases, then don't invalidate this
2726 /* Given the set of svalues that might still be live, process them
2727 (e.g. marking regions as escaped).
2728 We do this after the iteration to avoid potentially changing
2729 m_cluster_map whilst iterating over it. */
2730 on_maybe_live_values (maybe_live_values
);
2733 /* Determine if BASE_REG_A could be an alias of BASE_REG_B. */
2736 store::eval_alias (const region
*base_reg_a
,
2737 const region
*base_reg_b
) const
2739 /* SSA names can't alias. */
2740 tree decl_a
= base_reg_a
->maybe_get_decl ();
2741 if (decl_a
&& TREE_CODE (decl_a
) == SSA_NAME
)
2742 return tristate::TS_FALSE
;
2743 tree decl_b
= base_reg_b
->maybe_get_decl ();
2744 if (decl_b
&& TREE_CODE (decl_b
) == SSA_NAME
)
2745 return tristate::TS_FALSE
;
2747 /* Try both ways, for symmetry. */
2748 tristate ts_ab
= eval_alias_1 (base_reg_a
, base_reg_b
);
2749 if (ts_ab
.is_false ())
2750 return tristate::TS_FALSE
;
2751 tristate ts_ba
= eval_alias_1 (base_reg_b
, base_reg_a
);
2752 if (ts_ba
.is_false ())
2753 return tristate::TS_FALSE
;
2754 return tristate::TS_UNKNOWN
;
2757 /* Half of store::eval_alias; called twice for symmetry. */
2760 store::eval_alias_1 (const region
*base_reg_a
,
2761 const region
*base_reg_b
) const
2763 /* If they're in different memory spaces, they can't alias. */
2765 enum memory_space memspace_a
= base_reg_a
->get_memory_space ();
2766 if (memspace_a
!= MEMSPACE_UNKNOWN
)
2768 enum memory_space memspace_b
= base_reg_b
->get_memory_space ();
2769 if (memspace_b
!= MEMSPACE_UNKNOWN
2770 && memspace_a
!= memspace_b
)
2771 return tristate::TS_FALSE
;
2775 if (const symbolic_region
*sym_reg_a
2776 = base_reg_a
->dyn_cast_symbolic_region ())
2778 const svalue
*sval_a
= sym_reg_a
->get_pointer ();
2779 if (tree decl_b
= base_reg_b
->maybe_get_decl ())
2781 if (!may_be_aliased (decl_b
))
2782 return tristate::TS_FALSE
;
2783 if (sval_a
->get_kind () == SK_INITIAL
)
2784 if (!is_global_var (decl_b
))
2786 /* The initial value of a pointer can't point to a local. */
2787 return tristate::TS_FALSE
;
2790 if (sval_a
->get_kind () == SK_INITIAL
2791 && base_reg_b
->get_kind () == RK_HEAP_ALLOCATED
)
2793 /* The initial value of a pointer can't point to a
2794 region that was allocated on the heap after the beginning of the
2796 return tristate::TS_FALSE
;
2798 if (const widening_svalue
*widening_sval_a
2799 = sval_a
->dyn_cast_widening_svalue ())
2801 const svalue
*base
= widening_sval_a
->get_base_svalue ();
2802 if (const region_svalue
*region_sval
2803 = base
->dyn_cast_region_svalue ())
2805 const region
*pointee
= region_sval
->get_pointee ();
2806 /* If we have sval_a is WIDENING(®ION, OP), and
2807 B can't alias REGION, then B can't alias A either.
2808 For example, A might arise from
2809 for (ptr = ®ION; ...; ptr++)
2810 where sval_a is ptr in the 2nd iteration of the loop.
2811 We want to ensure that "*ptr" can only clobber things
2812 within REGION's base region. */
2813 tristate ts
= eval_alias (pointee
->get_base_region (),
2816 return tristate::TS_FALSE
;
2820 return tristate::TS_UNKNOWN
;
2823 /* Record all of the values in MAYBE_LIVE_VALUES as being possibly live. */
2826 store::on_maybe_live_values (const svalue_set
&maybe_live_values
)
2828 for (auto sval
: maybe_live_values
)
2830 if (const region_svalue
*ptr_sval
= sval
->dyn_cast_region_svalue ())
2832 const region
*base_reg
= ptr_sval
->get_pointee ()->get_base_region ();
2833 mark_as_escaped (base_reg
);
2838 /* Remove all bindings overlapping REG within this store. */
2841 store::clobber_region (store_manager
*mgr
, const region
*reg
)
2843 const region
*base_reg
= reg
->get_base_region ();
2844 binding_cluster
**slot
= m_cluster_map
.get (base_reg
);
2847 binding_cluster
*cluster
= *slot
;
2848 cluster
->clobber_region (mgr
, reg
);
2849 if (cluster
->redundant_p ())
2852 m_cluster_map
.remove (base_reg
);
2856 /* Remove any bindings for REG within this store. */
2859 store::purge_region (store_manager
*mgr
, const region
*reg
)
2861 const region
*base_reg
= reg
->get_base_region ();
2862 binding_cluster
**slot
= m_cluster_map
.get (base_reg
);
2865 binding_cluster
*cluster
= *slot
;
2866 cluster
->purge_region (mgr
, reg
);
2867 if (cluster
->redundant_p ())
2870 m_cluster_map
.remove (base_reg
);
2874 /* Fill REG with SVAL. */
2877 store::fill_region (store_manager
*mgr
, const region
*reg
, const svalue
*sval
)
2879 /* Filling an empty region is a no-op. */
2880 if (reg
->empty_p ())
2883 const region
*base_reg
= reg
->get_base_region ();
2884 if (base_reg
->symbolic_for_unknown_ptr_p ()
2885 || !base_reg
->tracked_p ())
2887 binding_cluster
*cluster
= get_or_create_cluster (base_reg
);
2888 cluster
->fill_region (mgr
, reg
, sval
);
2891 /* Zero-fill REG. */
2894 store::zero_fill_region (store_manager
*mgr
, const region
*reg
)
2896 region_model_manager
*sval_mgr
= mgr
->get_svalue_manager ();
2897 const svalue
*zero_sval
= sval_mgr
->get_or_create_int_cst (char_type_node
, 0);
2898 fill_region (mgr
, reg
, zero_sval
);
2901 /* Mark REG as having unknown content. */
2904 store::mark_region_as_unknown (store_manager
*mgr
, const region
*reg
,
2905 uncertainty_t
*uncertainty
,
2906 svalue_set
*maybe_live_values
)
2908 const region
*base_reg
= reg
->get_base_region ();
2909 if (base_reg
->symbolic_for_unknown_ptr_p ()
2910 || !base_reg
->tracked_p ())
2912 binding_cluster
*cluster
= get_or_create_cluster (base_reg
);
2913 cluster
->mark_region_as_unknown (mgr
, reg
, reg
, uncertainty
,
2917 /* Purge state involving SVAL. */
2920 store::purge_state_involving (const svalue
*sval
,
2921 region_model_manager
*sval_mgr
)
2923 auto_vec
<const region
*> base_regs_to_purge
;
2924 for (auto iter
: m_cluster_map
)
2926 const region
*base_reg
= iter
.first
;
2927 if (base_reg
->involves_p (sval
))
2928 base_regs_to_purge
.safe_push (base_reg
);
2931 binding_cluster
*cluster
= iter
.second
;
2932 cluster
->purge_state_involving (sval
, sval_mgr
);
2936 for (auto iter
: base_regs_to_purge
)
2937 purge_cluster (iter
);
2940 /* Get the cluster for BASE_REG, or NULL (const version). */
2942 const binding_cluster
*
2943 store::get_cluster (const region
*base_reg
) const
2945 gcc_assert (base_reg
);
2946 gcc_assert (base_reg
->get_base_region () == base_reg
);
2947 if (binding_cluster
**slot
2948 = const_cast <cluster_map_t
&> (m_cluster_map
).get (base_reg
))
2954 /* Get the cluster for BASE_REG, or NULL (non-const version). */
2957 store::get_cluster (const region
*base_reg
)
2959 gcc_assert (base_reg
);
2960 gcc_assert (base_reg
->get_base_region () == base_reg
);
2961 if (binding_cluster
**slot
= m_cluster_map
.get (base_reg
))
2967 /* Get the cluster for BASE_REG, creating it if doesn't already exist. */
2970 store::get_or_create_cluster (const region
*base_reg
)
2972 gcc_assert (base_reg
);
2973 gcc_assert (base_reg
->get_base_region () == base_reg
);
2975 /* We shouldn't create clusters for dereferencing an UNKNOWN ptr. */
2976 gcc_assert (!base_reg
->symbolic_for_unknown_ptr_p ());
2978 /* We shouldn't create clusters for base regions that aren't trackable. */
2979 gcc_assert (base_reg
->tracked_p ());
2981 if (binding_cluster
**slot
= m_cluster_map
.get (base_reg
))
2984 binding_cluster
*cluster
= new binding_cluster (base_reg
);
2985 m_cluster_map
.put (base_reg
, cluster
);
2990 /* Remove any cluster for BASE_REG, for use by
2991 region_model::unbind_region_and_descendents
2992 when popping stack frames and handling deleted heap regions. */
2995 store::purge_cluster (const region
*base_reg
)
2997 gcc_assert (base_reg
->get_base_region () == base_reg
);
2998 binding_cluster
**slot
= m_cluster_map
.get (base_reg
);
3001 binding_cluster
*cluster
= *slot
;
3003 m_cluster_map
.remove (base_reg
);
3006 /* Attempt to merge STORE_A and STORE_B into OUT_STORE.
3007 Return true if successful, or false if the stores can't be merged. */
3010 store::can_merge_p (const store
*store_a
, const store
*store_b
,
3011 store
*out_store
, store_manager
*mgr
,
3012 model_merger
*merger
)
3014 if (store_a
->m_called_unknown_fn
|| store_b
->m_called_unknown_fn
)
3015 out_store
->m_called_unknown_fn
= true;
3017 /* Get the union of all base regions for STORE_A and STORE_B. */
3018 hash_set
<const region
*> base_regions
;
3019 for (cluster_map_t::iterator iter_a
= store_a
->m_cluster_map
.begin ();
3020 iter_a
!= store_a
->m_cluster_map
.end (); ++iter_a
)
3022 const region
*base_reg_a
= (*iter_a
).first
;
3023 base_regions
.add (base_reg_a
);
3025 for (cluster_map_t::iterator iter_b
= store_b
->m_cluster_map
.begin ();
3026 iter_b
!= store_b
->m_cluster_map
.end (); ++iter_b
)
3028 const region
*base_reg_b
= (*iter_b
).first
;
3029 base_regions
.add (base_reg_b
);
3032 /* Sort the base regions before considering them. This ought not to
3033 affect the results, but can affect which types UNKNOWN_REGIONs are
3034 created for in a run; sorting them thus avoids minor differences
3036 auto_vec
<const region
*> vec_base_regions (base_regions
.elements ());
3037 for (hash_set
<const region
*>::iterator iter
= base_regions
.begin ();
3038 iter
!= base_regions
.end (); ++iter
)
3039 vec_base_regions
.quick_push (*iter
);
3040 vec_base_regions
.qsort (region::cmp_ptr_ptr
);
3042 const region
*base_reg
;
3043 FOR_EACH_VEC_ELT (vec_base_regions
, i
, base_reg
)
3045 const binding_cluster
*cluster_a
= store_a
->get_cluster (base_reg
);
3046 const binding_cluster
*cluster_b
= store_b
->get_cluster (base_reg
);
3047 /* At least one of cluster_a and cluster_b must be non-NULL. */
3048 binding_cluster
*out_cluster
3049 = out_store
->get_or_create_cluster (base_reg
);
3050 if (!binding_cluster::can_merge_p (cluster_a
, cluster_b
,
3051 out_cluster
, out_store
, mgr
, merger
))
3057 /* Mark the cluster for BASE_REG as having escaped.
3058 For use when handling an unrecognized function call, and
3059 for params to "top-level" calls.
3060 Further unknown function calls could touch it, even if the cluster
3061 isn't reachable from args of those calls. */
3064 store::mark_as_escaped (const region
*base_reg
)
3066 gcc_assert (base_reg
);
3067 gcc_assert (base_reg
->get_base_region () == base_reg
);
3069 if (base_reg
->symbolic_for_unknown_ptr_p ()
3070 || !base_reg
->tracked_p ())
3073 binding_cluster
*cluster
= get_or_create_cluster (base_reg
);
3074 cluster
->mark_as_escaped ();
3077 /* Handle an unknown fncall by updating any clusters that have escaped
3078 (either in this fncall, or in a prior one). */
3081 store::on_unknown_fncall (const gcall
*call
, store_manager
*mgr
,
3082 const conjured_purge
&p
)
3084 m_called_unknown_fn
= true;
3086 for (cluster_map_t::iterator iter
= m_cluster_map
.begin ();
3087 iter
!= m_cluster_map
.end (); ++iter
)
3088 (*iter
).second
->on_unknown_fncall (call
, mgr
, p
);
3091 /* Return true if a non-const pointer to BASE_REG (or something within it)
3092 has escaped to code outside of the TU being analyzed. */
3095 store::escaped_p (const region
*base_reg
) const
3097 gcc_assert (base_reg
);
3098 gcc_assert (base_reg
->get_base_region () == base_reg
);
3100 /* "errno" can always be modified by external code. */
3101 if (base_reg
->get_kind () == RK_ERRNO
)
3104 if (binding_cluster
**cluster_slot
3105 = const_cast <cluster_map_t
&>(m_cluster_map
).get (base_reg
))
3106 return (*cluster_slot
)->escaped_p ();
3110 /* Populate OUT_PVS with a list of path_vars for describing SVAL based on
3111 this store, using VISITED to ensure the traversal terminates. */
3114 store::get_representative_path_vars (const region_model
*model
,
3115 svalue_set
*visited
,
3117 auto_vec
<path_var
> *out_pvs
) const
3121 /* Find all bindings that reference SVAL. */
3122 for (cluster_map_t::iterator iter
= m_cluster_map
.begin ();
3123 iter
!= m_cluster_map
.end (); ++iter
)
3125 const region
*base_reg
= (*iter
).first
;
3126 binding_cluster
*cluster
= (*iter
).second
;
3127 cluster
->get_representative_path_vars (model
, visited
, base_reg
, sval
,
3131 if (const initial_svalue
*init_sval
= sval
->dyn_cast_initial_svalue ())
3133 const region
*reg
= init_sval
->get_region ();
3134 if (path_var pv
= model
->get_representative_path_var (reg
,
3136 out_pvs
->safe_push (pv
);
3140 /* Remove all bindings overlapping REG within this store, removing
3141 any clusters that become redundant.
3143 If UNCERTAINTY is non-NULL, use it to record any svalues that
3144 were removed, as being maybe-bound. */
3147 store::remove_overlapping_bindings (store_manager
*mgr
, const region
*reg
,
3148 uncertainty_t
*uncertainty
)
3150 const region
*base_reg
= reg
->get_base_region ();
3151 if (binding_cluster
**cluster_slot
= m_cluster_map
.get (base_reg
))
3153 binding_cluster
*cluster
= *cluster_slot
;
3154 if (reg
== base_reg
&& !escaped_p (base_reg
))
3156 /* Remove whole cluster. */
3157 m_cluster_map
.remove (base_reg
);
3161 /* Pass NULL for the maybe_live_values here, as we don't want to
3162 record the old svalues as being maybe-bound. */
3163 cluster
->remove_overlapping_bindings (mgr
, reg
, uncertainty
, NULL
);
3167 /* Subclass of visitor that accumulates a hash_set of the regions that
3170 struct region_finder
: public visitor
3172 void visit_region (const region
*reg
) final override
3177 hash_set
<const region
*> m_regs
;
3180 /* Canonicalize this store, to maximize the chance of equality between
3184 store::canonicalize (store_manager
*mgr
)
3187 cluster for: HEAP_ALLOCATED_REGION(543)
3190 where the heap region is empty and unreferenced, then purge that
3191 cluster, to avoid unbounded state chains involving these. */
3193 /* Find regions that are referenced by bound values in the store. */
3195 for (cluster_map_t::iterator iter
= m_cluster_map
.begin ();
3196 iter
!= m_cluster_map
.end (); ++iter
)
3198 binding_cluster
*cluster
= (*iter
).second
;
3199 for (binding_cluster::iterator_t bind_iter
= cluster
->m_map
.begin ();
3200 bind_iter
!= cluster
->m_map
.end (); ++bind_iter
)
3201 (*bind_iter
).second
->accept (&s
);
3204 /* Locate heap-allocated regions that have empty bindings that weren't
3206 hash_set
<const region
*> purgeable_regions
;
3207 for (cluster_map_t::iterator iter
= m_cluster_map
.begin ();
3208 iter
!= m_cluster_map
.end (); ++iter
)
3210 const region
*base_reg
= (*iter
).first
;
3211 binding_cluster
*cluster
= (*iter
).second
;
3212 if (base_reg
->get_kind () == RK_HEAP_ALLOCATED
)
3214 /* Don't purge a heap-allocated region that's been marked as
3215 escaping, since this could be recording that a ptr to it
3216 was written to an unknown symbolic region along this
3217 path, and so we don't know whether it's referenced or
3218 not, and hence should report it as leaking
3219 (PR analyzer/106473). */
3220 if (cluster
->escaped_p ())
3223 if (cluster
->empty_p ())
3224 if (!s
.m_regs
.contains (base_reg
))
3225 purgeable_regions
.add (base_reg
);
3227 /* Also cover the UNKNOWN case. */
3228 if (const svalue
*sval
= cluster
->maybe_get_simple_value (mgr
))
3229 if (sval
->get_kind () == SK_UNKNOWN
)
3230 if (!s
.m_regs
.contains (base_reg
))
3231 purgeable_regions
.add (base_reg
);
3236 for (hash_set
<const region
*>::iterator iter
= purgeable_regions
.begin ();
3237 iter
!= purgeable_regions
.end (); ++iter
)
3239 const region
*base_reg
= *iter
;
3240 purge_cluster (base_reg
);
3244 /* Subroutine for use by exploded_path::feasible_p.
3246 We need to deal with state differences between:
3247 (a) when the exploded_graph is being initially constructed and
3248 (b) when replaying the state changes along a specific path in
3249 in exploded_path::feasible_p.
3251 In (a), state merging happens, so when exploring a loop
3252 for (i = 0; i < 1024; i++)
3253 on successive iterations we have i == 0, then i == WIDENING.
3255 In (b), no state merging happens, so naively replaying the path
3256 that goes twice through the loop then exits it
3257 would lead to i == 0, then i == 1, and then a (i >= 1024) eedge
3258 that exits the loop, which would be found to be infeasible as i == 1,
3259 and the path would be rejected.
3261 We need to fix up state during replay. This subroutine is
3262 called whenever we enter a supernode that we've already
3263 visited along this exploded_path, passing in OTHER_STORE
3264 from the destination enode's state.
3266 Find bindings to widening values in OTHER_STORE.
3267 For all that are found, update the binding in this store to UNKNOWN. */
3270 store::loop_replay_fixup (const store
*other_store
,
3271 region_model_manager
*mgr
)
3273 gcc_assert (other_store
);
3274 for (cluster_map_t::iterator iter
= other_store
->m_cluster_map
.begin ();
3275 iter
!= other_store
->m_cluster_map
.end (); ++iter
)
3277 const region
*base_reg
= (*iter
).first
;
3278 binding_cluster
*cluster
= (*iter
).second
;
3279 for (binding_cluster::iterator_t bind_iter
= cluster
->m_map
.begin ();
3280 bind_iter
!= cluster
->m_map
.end (); ++bind_iter
)
3282 const binding_key
*key
= (*bind_iter
).first
;
3283 const svalue
*sval
= (*bind_iter
).second
;
3284 if (sval
->get_kind () == SK_WIDENING
)
3286 binding_cluster
*this_cluster
3287 = get_or_create_cluster (base_reg
);
3288 const svalue
*unknown
3289 = mgr
->get_or_create_unknown_svalue (sval
->get_type ());
3290 this_cluster
->bind_key (key
, unknown
);
3296 /* Use R to replay the bindings from SUMMARY into this object. */
3299 store::replay_call_summary (call_summary_replay
&r
,
3300 const store
&summary
)
3302 if (summary
.m_called_unknown_fn
)
3304 /* A call to an external function occurred in the summary.
3305 Hence we need to invalidate our knownledge of globals,
3306 escaped regions, etc. */
3307 on_unknown_fncall (r
.get_call_stmt (),
3308 r
.get_store_manager (),
3309 conjured_purge (r
.get_caller_model (),
3313 auto_vec
<const region
*> keys (summary
.m_cluster_map
.elements ());
3314 for (auto kv
: summary
.m_cluster_map
)
3315 keys
.quick_push (kv
.first
);
3316 keys
.qsort (region::cmp_ptr_ptr
);
3317 for (auto base_reg
: keys
)
3318 replay_call_summary_cluster (r
, summary
, base_reg
);
3321 /* Use R and SUMMARY to replay the bindings in SUMMARY_CLUSTER
3322 into this object. */
3325 store::replay_call_summary_cluster (call_summary_replay
&r
,
3326 const store
&summary
,
3327 const region
*summary_base_reg
)
3329 const call_details
&cd
= r
.get_call_details ();
3330 region_model_manager
*reg_mgr
= r
.get_manager ();
3331 store_manager
*mgr
= reg_mgr
->get_store_manager ();
3332 const binding_cluster
*summary_cluster
3333 = summary
.get_cluster (summary_base_reg
);
3335 /* Handle "ESCAPED" and "TOUCHED" flags. */
3336 if (summary_cluster
->escaped_p () || summary_cluster
->touched_p ())
3337 if (const region
*caller_reg
3338 = r
.convert_region_from_summary (summary_base_reg
))
3340 const region
*caller_base_reg
= caller_reg
->get_base_region ();
3341 if (caller_base_reg
->tracked_p ()
3342 && !caller_base_reg
->symbolic_for_unknown_ptr_p ())
3344 binding_cluster
*caller_cluster
3345 = get_or_create_cluster (caller_base_reg
);
3346 if (summary_cluster
->escaped_p ())
3347 caller_cluster
->mark_as_escaped ();
3348 if (summary_cluster
->touched_p ())
3349 caller_cluster
->m_touched
= true;
3353 switch (summary_base_reg
->get_kind ())
3355 /* Top-level regions. */
3361 case RK_THREAD_LOCAL
:
3363 /* Child regions. */
3370 /* Other regions. */
3373 /* These should never be the base region of a binding cluster. */
3380 /* These can be marked as escaping. */
3385 const symbolic_region
*summary_symbolic_reg
3386 = as_a
<const symbolic_region
*> (summary_base_reg
);
3387 const svalue
*summary_ptr_sval
= summary_symbolic_reg
->get_pointer ();
3388 const svalue
*caller_ptr_sval
3389 = r
.convert_svalue_from_summary (summary_ptr_sval
);
3390 if (!caller_ptr_sval
)
3392 const region
*caller_dest_reg
3393 = cd
.get_model ()->deref_rvalue (caller_ptr_sval
,
3396 const svalue
*summary_sval
3397 = summary
.get_any_binding (mgr
, summary_base_reg
);
3400 const svalue
*caller_sval
3401 = r
.convert_svalue_from_summary (summary_sval
);
3404 reg_mgr
->get_or_create_unknown_svalue (summary_sval
->get_type ());
3405 set_value (mgr
, caller_dest_reg
,
3406 caller_sval
, NULL
/* uncertainty_t * */);
3410 case RK_HEAP_ALLOCATED
:
3415 const region
*caller_dest_reg
3416 = r
.convert_region_from_summary (summary_base_reg
);
3417 if (!caller_dest_reg
)
3419 const svalue
*summary_sval
3420 = summary
.get_any_binding (mgr
, summary_base_reg
);
3422 summary_sval
= reg_mgr
->get_or_create_compound_svalue
3423 (summary_base_reg
->get_type (),
3424 summary_cluster
->get_map ());
3425 const svalue
*caller_sval
3426 = r
.convert_svalue_from_summary (summary_sval
);
3429 reg_mgr
->get_or_create_unknown_svalue (summary_sval
->get_type ());
3430 set_value (mgr
, caller_dest_reg
,
3431 caller_sval
, NULL
/* uncertainty_t * */);
3436 /* Ignore bindings of alloca regions in the summary. */
3443 namespace selftest
{
3445 /* Verify that bit_range::intersects_p works as expected. */
3448 test_bit_range_intersects_p ()
3450 bit_range
b0 (0, 1);
3451 bit_range
b1 (1, 1);
3452 bit_range
b2 (2, 1);
3453 bit_range
b3 (3, 1);
3454 bit_range
b4 (4, 1);
3455 bit_range
b5 (5, 1);
3456 bit_range
b6 (6, 1);
3457 bit_range
b7 (7, 1);
3458 bit_range
b1_to_6 (1, 6);
3459 bit_range
b0_to_7 (0, 8);
3460 bit_range
b3_to_5 (3, 3);
3461 bit_range
b6_to_7 (6, 2);
3463 /* self-intersection is true. */
3464 ASSERT_TRUE (b0
.intersects_p (b0
));
3465 ASSERT_TRUE (b7
.intersects_p (b7
));
3466 ASSERT_TRUE (b1_to_6
.intersects_p (b1_to_6
));
3467 ASSERT_TRUE (b0_to_7
.intersects_p (b0_to_7
));
3469 ASSERT_FALSE (b0
.intersects_p (b1
));
3470 ASSERT_FALSE (b1
.intersects_p (b0
));
3471 ASSERT_FALSE (b0
.intersects_p (b7
));
3472 ASSERT_FALSE (b7
.intersects_p (b0
));
3474 ASSERT_TRUE (b0_to_7
.intersects_p (b0
));
3475 ASSERT_TRUE (b0_to_7
.intersects_p (b7
));
3476 ASSERT_TRUE (b0
.intersects_p (b0_to_7
));
3477 ASSERT_TRUE (b7
.intersects_p (b0_to_7
));
3479 ASSERT_FALSE (b0
.intersects_p (b1_to_6
));
3480 ASSERT_FALSE (b1_to_6
.intersects_p (b0
));
3481 ASSERT_TRUE (b1
.intersects_p (b1_to_6
));
3482 ASSERT_TRUE (b1_to_6
.intersects_p (b1
));
3483 ASSERT_TRUE (b1_to_6
.intersects_p (b6
));
3484 ASSERT_FALSE (b1_to_6
.intersects_p (b7
));
3486 ASSERT_TRUE (b1_to_6
.intersects_p (b0_to_7
));
3487 ASSERT_TRUE (b0_to_7
.intersects_p (b1_to_6
));
3489 ASSERT_FALSE (b3_to_5
.intersects_p (b6_to_7
));
3490 ASSERT_FALSE (b6_to_7
.intersects_p (b3_to_5
));
3494 ASSERT_TRUE (b1_to_6
.intersects_p (b0_to_7
, &r1
, &r2
));
3495 ASSERT_EQ (r1
.get_start_bit_offset (), 0);
3496 ASSERT_EQ (r1
.m_size_in_bits
, 6);
3497 ASSERT_EQ (r2
.get_start_bit_offset (), 1);
3498 ASSERT_EQ (r2
.m_size_in_bits
, 6);
3500 ASSERT_TRUE (b0_to_7
.intersects_p (b1_to_6
, &r1
, &r2
));
3501 ASSERT_EQ (r1
.get_start_bit_offset (), 1);
3502 ASSERT_EQ (r1
.m_size_in_bits
, 6);
3503 ASSERT_EQ (r2
.get_start_bit_offset (), 0);
3504 ASSERT_EQ (r2
.m_size_in_bits
, 6);
3507 /* Implementation detail of ASSERT_BIT_RANGE_FROM_MASK_EQ. */
3510 assert_bit_range_from_mask_eq (const location
&loc
,
3511 unsigned HOST_WIDE_INT mask
,
3512 const bit_range
&expected
)
3514 bit_range
actual (0, 0);
3515 bool ok
= bit_range::from_mask (mask
, &actual
);
3516 ASSERT_TRUE_AT (loc
, ok
);
3517 ASSERT_EQ_AT (loc
, actual
, expected
);
3520 /* Assert that bit_range::from_mask (MASK) returns true, and writes
3521 out EXPECTED_BIT_RANGE. */
3523 #define ASSERT_BIT_RANGE_FROM_MASK_EQ(MASK, EXPECTED_BIT_RANGE) \
3524 SELFTEST_BEGIN_STMT \
3525 assert_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK, \
3526 EXPECTED_BIT_RANGE); \
3529 /* Implementation detail of ASSERT_NO_BIT_RANGE_FROM_MASK. */
3532 assert_no_bit_range_from_mask_eq (const location
&loc
,
3533 unsigned HOST_WIDE_INT mask
)
3535 bit_range
actual (0, 0);
3536 bool ok
= bit_range::from_mask (mask
, &actual
);
3537 ASSERT_FALSE_AT (loc
, ok
);
3540 /* Assert that bit_range::from_mask (MASK) returns false. */
3542 #define ASSERT_NO_BIT_RANGE_FROM_MASK(MASK) \
3543 SELFTEST_BEGIN_STMT \
3544 assert_no_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK); \
3547 /* Verify that bit_range::from_mask works as expected. */
3550 test_bit_range_from_mask ()
3552 /* Should fail on zero. */
3553 ASSERT_NO_BIT_RANGE_FROM_MASK (0);
3555 /* Verify 1-bit masks. */
3556 ASSERT_BIT_RANGE_FROM_MASK_EQ (1, bit_range (0, 1));
3557 ASSERT_BIT_RANGE_FROM_MASK_EQ (2, bit_range (1, 1));
3558 ASSERT_BIT_RANGE_FROM_MASK_EQ (4, bit_range (2, 1));
3559 ASSERT_BIT_RANGE_FROM_MASK_EQ (8, bit_range (3, 1));
3560 ASSERT_BIT_RANGE_FROM_MASK_EQ (16, bit_range (4, 1));
3561 ASSERT_BIT_RANGE_FROM_MASK_EQ (32, bit_range (5, 1));
3562 ASSERT_BIT_RANGE_FROM_MASK_EQ (64, bit_range (6, 1));
3563 ASSERT_BIT_RANGE_FROM_MASK_EQ (128, bit_range (7, 1));
3565 /* Verify N-bit masks starting at bit 0. */
3566 ASSERT_BIT_RANGE_FROM_MASK_EQ (3, bit_range (0, 2));
3567 ASSERT_BIT_RANGE_FROM_MASK_EQ (7, bit_range (0, 3));
3568 ASSERT_BIT_RANGE_FROM_MASK_EQ (15, bit_range (0, 4));
3569 ASSERT_BIT_RANGE_FROM_MASK_EQ (31, bit_range (0, 5));
3570 ASSERT_BIT_RANGE_FROM_MASK_EQ (63, bit_range (0, 6));
3571 ASSERT_BIT_RANGE_FROM_MASK_EQ (127, bit_range (0, 7));
3572 ASSERT_BIT_RANGE_FROM_MASK_EQ (255, bit_range (0, 8));
3573 ASSERT_BIT_RANGE_FROM_MASK_EQ (0xffff, bit_range (0, 16));
3575 /* Various other tests. */
3576 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x30, bit_range (4, 2));
3577 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x700, bit_range (8, 3));
3578 ASSERT_BIT_RANGE_FROM_MASK_EQ (0x600, bit_range (9, 2));
3580 /* Multiple ranges of set bits should fail. */
3581 ASSERT_NO_BIT_RANGE_FROM_MASK (0x101);
3582 ASSERT_NO_BIT_RANGE_FROM_MASK (0xf0f0f0f0);
3585 /* Implementation detail of ASSERT_OVERLAP. */
3588 assert_overlap (const location
&loc
,
3589 const concrete_binding
*b1
,
3590 const concrete_binding
*b2
)
3592 ASSERT_TRUE_AT (loc
, b1
->overlaps_p (*b2
));
3593 ASSERT_TRUE_AT (loc
, b2
->overlaps_p (*b1
));
3596 /* Implementation detail of ASSERT_DISJOINT. */
3599 assert_disjoint (const location
&loc
,
3600 const concrete_binding
*b1
,
3601 const concrete_binding
*b2
)
3603 ASSERT_FALSE_AT (loc
, b1
->overlaps_p (*b2
));
3604 ASSERT_FALSE_AT (loc
, b2
->overlaps_p (*b1
));
3607 /* Assert that B1 and B2 overlap, checking both ways. */
3609 #define ASSERT_OVERLAP(B1, B2) \
3610 SELFTEST_BEGIN_STMT \
3611 assert_overlap (SELFTEST_LOCATION, B1, B2); \
3614 /* Assert that B1 and B2 do not overlap, checking both ways. */
3616 #define ASSERT_DISJOINT(B1, B2) \
3617 SELFTEST_BEGIN_STMT \
3618 assert_disjoint (SELFTEST_LOCATION, B1, B2); \
3621 /* Verify that concrete_binding::overlaps_p works as expected. */
3624 test_binding_key_overlap ()
3626 store_manager
mgr (NULL
);
3628 /* Various 8-bit bindings. */
3629 const concrete_binding
*cb_0_7
= mgr
.get_concrete_binding (0, 8);
3630 const concrete_binding
*cb_8_15
= mgr
.get_concrete_binding (8, 8);
3631 const concrete_binding
*cb_16_23
= mgr
.get_concrete_binding (16, 8);
3632 const concrete_binding
*cb_24_31
= mgr
.get_concrete_binding (24, 8);
3634 /* 16-bit bindings. */
3635 const concrete_binding
*cb_0_15
= mgr
.get_concrete_binding (0, 16);
3636 const concrete_binding
*cb_8_23
= mgr
.get_concrete_binding (8, 16);
3637 const concrete_binding
*cb_16_31
= mgr
.get_concrete_binding (16, 16);
3639 /* 32-bit binding. */
3640 const concrete_binding
*cb_0_31
= mgr
.get_concrete_binding (0, 32);
3642 /* Everything should self-overlap. */
3643 ASSERT_OVERLAP (cb_0_7
, cb_0_7
);
3644 ASSERT_OVERLAP (cb_8_15
, cb_8_15
);
3645 ASSERT_OVERLAP (cb_16_23
, cb_16_23
);
3646 ASSERT_OVERLAP (cb_24_31
, cb_24_31
);
3647 ASSERT_OVERLAP (cb_0_15
, cb_0_15
);
3648 ASSERT_OVERLAP (cb_8_23
, cb_8_23
);
3649 ASSERT_OVERLAP (cb_16_31
, cb_16_31
);
3650 ASSERT_OVERLAP (cb_0_31
, cb_0_31
);
3652 /* Verify the 8-bit bindings that don't overlap each other. */
3653 ASSERT_DISJOINT (cb_0_7
, cb_8_15
);
3654 ASSERT_DISJOINT (cb_8_15
, cb_16_23
);
3656 /* Check for overlap of differently-sized bindings. */
3657 ASSERT_OVERLAP (cb_0_7
, cb_0_31
);
3658 /* ...and with differing start points. */
3659 ASSERT_OVERLAP (cb_8_15
, cb_0_31
);
3660 ASSERT_DISJOINT (cb_8_15
, cb_16_31
);
3661 ASSERT_OVERLAP (cb_16_23
, cb_0_31
);
3662 ASSERT_OVERLAP (cb_16_31
, cb_0_31
);
3664 ASSERT_DISJOINT (cb_0_7
, cb_8_23
);
3665 ASSERT_OVERLAP (cb_8_23
, cb_16_23
);
3666 ASSERT_OVERLAP (cb_8_23
, cb_16_31
);
3667 ASSERT_DISJOINT (cb_8_23
, cb_24_31
);
3670 /* Run all of the selftests within this file. */
3673 analyzer_store_cc_tests ()
3675 test_bit_range_intersects_p ();
3676 test_bit_range_from_mask ();
3677 test_binding_key_overlap ();
3680 } // namespace selftest
3682 #endif /* CHECKING_P */
3686 #endif /* #if ENABLE_ANALYZER */