d: Add testcase from PR108962
[official-gcc.git] / gcc / analyzer / kf.cc
blob3e319a076bbda2a705ac7d170338c2221b6db7bf
1 /* Handling for the known behavior of various specific functions.
2 Copyright (C) 2020-2023 Free Software Foundation, Inc.
3 Contributed by David Malcolm <dmalcolm@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #define INCLUDE_MEMORY
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tree.h"
26 #include "function.h"
27 #include "basic-block.h"
28 #include "gimple.h"
29 #include "diagnostic-core.h"
30 #include "diagnostic-metadata.h"
31 #include "analyzer/analyzer.h"
32 #include "analyzer/analyzer-logging.h"
33 #include "diagnostic.h"
34 #include "analyzer/region-model.h"
35 #include "analyzer/call-details.h"
36 #include "analyzer/call-info.h"
37 #include "make-unique.h"
39 #if ENABLE_ANALYZER
41 namespace ana {
43 /* Implementations of specific functions. */
45 /* Handler for "alloca". */
47 class kf_alloca : public known_function
49 public:
50 bool matches_call_types_p (const call_details &cd) const final override
52 return cd.num_args () == 1;
54 void impl_call_pre (const call_details &cd) const final override;
57 void
58 kf_alloca::impl_call_pre (const call_details &cd) const
60 const svalue *size_sval = cd.get_arg_svalue (0);
62 region_model *model = cd.get_model ();
63 region_model_manager *mgr = cd.get_manager ();
65 const region *new_reg
66 = model->create_region_for_alloca (size_sval, cd.get_ctxt ());
67 const svalue *ptr_sval
68 = mgr->get_ptr_svalue (cd.get_lhs_type (), new_reg);
69 cd.maybe_set_lhs (ptr_sval);
72 /* Handler for:
73 void __atomic_exchange (type *ptr, type *val, type *ret, int memorder). */
75 class kf_atomic_exchange : public internal_known_function
77 public:
78 /* This is effectively:
79 *RET = *PTR;
80 *PTR = *VAL;
82 void impl_call_pre (const call_details &cd) const final override
84 const svalue *ptr_ptr_sval = cd.get_arg_svalue (0);
85 tree ptr_ptr_tree = cd.get_arg_tree (0);
86 const svalue *val_ptr_sval = cd.get_arg_svalue (1);
87 tree val_ptr_tree = cd.get_arg_tree (1);
88 const svalue *ret_ptr_sval = cd.get_arg_svalue (2);
89 tree ret_ptr_tree = cd.get_arg_tree (2);
90 /* Ignore the memorder param. */
92 region_model *model = cd.get_model ();
93 region_model_context *ctxt = cd.get_ctxt ();
95 const region *val_region
96 = model->deref_rvalue (val_ptr_sval, val_ptr_tree, ctxt);
97 const svalue *star_val_sval = model->get_store_value (val_region, ctxt);
98 const region *ptr_region
99 = model->deref_rvalue (ptr_ptr_sval, ptr_ptr_tree, ctxt);
100 const svalue *star_ptr_sval = model->get_store_value (ptr_region, ctxt);
101 const region *ret_region
102 = model->deref_rvalue (ret_ptr_sval, ret_ptr_tree, ctxt);
103 model->set_value (ptr_region, star_val_sval, ctxt);
104 model->set_value (ret_region, star_ptr_sval, ctxt);
108 /* Handler for:
109 __atomic_exchange_n (type *ptr, type val, int memorder). */
111 class kf_atomic_exchange_n : public internal_known_function
113 public:
114 /* This is effectively:
115 RET = *PTR;
116 *PTR = VAL;
117 return RET;
119 void impl_call_pre (const call_details &cd) const final override
121 const svalue *ptr_sval = cd.get_arg_svalue (0);
122 tree ptr_tree = cd.get_arg_tree (0);
123 const svalue *set_sval = cd.get_arg_svalue (1);
124 /* Ignore the memorder param. */
126 region_model *model = cd.get_model ();
127 region_model_context *ctxt = cd.get_ctxt ();
129 const region *dst_region = model->deref_rvalue (ptr_sval, ptr_tree, ctxt);
130 const svalue *ret_sval = model->get_store_value (dst_region, ctxt);
131 model->set_value (dst_region, set_sval, ctxt);
132 cd.maybe_set_lhs (ret_sval);
136 /* Handler for:
137 type __atomic_fetch_add (type *ptr, type val, int memorder);
138 type __atomic_fetch_sub (type *ptr, type val, int memorder);
139 type __atomic_fetch_and (type *ptr, type val, int memorder);
140 type __atomic_fetch_xor (type *ptr, type val, int memorder);
141 type __atomic_fetch_or (type *ptr, type val, int memorder);
144 class kf_atomic_fetch_op : public internal_known_function
146 public:
147 kf_atomic_fetch_op (enum tree_code op): m_op (op) {}
149 /* This is effectively:
150 RET = *PTR;
151 *PTR = RET OP VAL;
152 return RET;
154 void impl_call_pre (const call_details &cd) const final override
156 const svalue *ptr_sval = cd.get_arg_svalue (0);
157 tree ptr_tree = cd.get_arg_tree (0);
158 const svalue *val_sval = cd.get_arg_svalue (1);
159 /* Ignore the memorder param. */
161 region_model *model = cd.get_model ();
162 region_model_manager *mgr = cd.get_manager ();
163 region_model_context *ctxt = cd.get_ctxt ();
165 const region *star_ptr_region
166 = model->deref_rvalue (ptr_sval, ptr_tree, ctxt);
167 const svalue *old_sval = model->get_store_value (star_ptr_region, ctxt);
168 const svalue *new_sval = mgr->get_or_create_binop (old_sval->get_type (),
169 m_op,
170 old_sval, val_sval);
171 model->set_value (star_ptr_region, new_sval, ctxt);
172 cd.maybe_set_lhs (old_sval);
175 private:
176 enum tree_code m_op;
179 /* Handler for:
180 type __atomic_add_fetch (type *ptr, type val, int memorder);
181 type __atomic_sub_fetch (type *ptr, type val, int memorder);
182 type __atomic_and_fetch (type *ptr, type val, int memorder);
183 type __atomic_xor_fetch (type *ptr, type val, int memorder);
184 type __atomic_or_fetch (type *ptr, type val, int memorder);
187 class kf_atomic_op_fetch : public internal_known_function
189 public:
190 kf_atomic_op_fetch (enum tree_code op): m_op (op) {}
192 /* This is effectively:
193 *PTR = RET OP VAL;
194 return *PTR;
196 void impl_call_pre (const call_details &cd) const final override
198 const svalue *ptr_sval = cd.get_arg_svalue (0);
199 tree ptr_tree = cd.get_arg_tree (0);
200 const svalue *val_sval = cd.get_arg_svalue (1);
201 /* Ignore the memorder param. */
203 region_model *model = cd.get_model ();
204 region_model_manager *mgr = cd.get_manager ();
205 region_model_context *ctxt = cd.get_ctxt ();
207 const region *star_ptr_region
208 = model->deref_rvalue (ptr_sval, ptr_tree, ctxt);
209 const svalue *old_sval = model->get_store_value (star_ptr_region, ctxt);
210 const svalue *new_sval = mgr->get_or_create_binop (old_sval->get_type (),
211 m_op,
212 old_sval, val_sval);
213 model->set_value (star_ptr_region, new_sval, ctxt);
214 cd.maybe_set_lhs (new_sval);
217 private:
218 enum tree_code m_op;
221 /* Handler for:
222 void __atomic_load (type *ptr, type *ret, int memorder). */
224 class kf_atomic_load : public internal_known_function
226 public:
227 /* This is effectively:
228 *RET = *PTR;
230 void impl_call_pre (const call_details &cd) const final override
232 const svalue *ptr_ptr_sval = cd.get_arg_svalue (0);
233 tree ptr_ptr_tree = cd.get_arg_tree (0);
234 const svalue *ret_ptr_sval = cd.get_arg_svalue (1);
235 tree ret_ptr_tree = cd.get_arg_tree (1);
236 /* Ignore the memorder param. */
238 region_model *model = cd.get_model ();
239 region_model_context *ctxt = cd.get_ctxt ();
241 const region *ptr_region
242 = model->deref_rvalue (ptr_ptr_sval, ptr_ptr_tree, ctxt);
243 const svalue *star_ptr_sval = model->get_store_value (ptr_region, ctxt);
244 const region *ret_region
245 = model->deref_rvalue (ret_ptr_sval, ret_ptr_tree, ctxt);
246 model->set_value (ret_region, star_ptr_sval, ctxt);
250 /* Handler for:
251 type __atomic_load_n (type *ptr, int memorder) */
253 class kf_atomic_load_n : public internal_known_function
255 public:
256 /* This is effectively:
257 RET = *PTR;
258 return RET;
260 void impl_call_pre (const call_details &cd) const final override
262 const svalue *ptr_ptr_sval = cd.get_arg_svalue (0);
263 tree ptr_ptr_tree = cd.get_arg_tree (0);
264 /* Ignore the memorder param. */
266 region_model *model = cd.get_model ();
267 region_model_context *ctxt = cd.get_ctxt ();
269 const region *ptr_region
270 = model->deref_rvalue (ptr_ptr_sval, ptr_ptr_tree, ctxt);
271 const svalue *star_ptr_sval = model->get_store_value (ptr_region, ctxt);
272 cd.maybe_set_lhs (star_ptr_sval);
276 /* Handler for:
277 void __atomic_store_n (type *ptr, type val, int memorder) */
279 class kf_atomic_store_n : public internal_known_function
281 public:
282 /* This is effectively:
283 *PTR = VAL;
285 void impl_call_pre (const call_details &cd) const final override
287 const svalue *ptr_sval = cd.get_arg_svalue (0);
288 tree ptr_tree = cd.get_arg_tree (0);
289 const svalue *new_sval = cd.get_arg_svalue (1);
290 /* Ignore the memorder param. */
292 region_model *model = cd.get_model ();
293 region_model_context *ctxt = cd.get_ctxt ();
295 const region *star_ptr_region
296 = model->deref_rvalue (ptr_sval, ptr_tree, ctxt);
297 model->set_value (star_ptr_region, new_sval, ctxt);
301 /* Handler for "__builtin_expect" etc. */
303 class kf_expect : public internal_known_function
305 public:
306 void impl_call_pre (const call_details &cd) const final override
308 /* __builtin_expect's return value is its initial argument. */
309 const svalue *sval = cd.get_arg_svalue (0);
310 cd.maybe_set_lhs (sval);
314 /* Handler for "calloc". */
316 class kf_calloc : public known_function
318 public:
319 bool matches_call_types_p (const call_details &cd) const final override
321 return (cd.num_args () == 2
322 && cd.arg_is_size_p (0)
323 && cd.arg_is_size_p (1));
325 void impl_call_pre (const call_details &cd) const final override;
328 void
329 kf_calloc::impl_call_pre (const call_details &cd) const
331 region_model *model = cd.get_model ();
332 region_model_manager *mgr = cd.get_manager ();
333 const svalue *nmemb_sval = cd.get_arg_svalue (0);
334 const svalue *size_sval = cd.get_arg_svalue (1);
335 /* TODO: check for overflow here? */
336 const svalue *prod_sval
337 = mgr->get_or_create_binop (size_type_node, MULT_EXPR,
338 nmemb_sval, size_sval);
339 const region *new_reg
340 = model->get_or_create_region_for_heap_alloc (prod_sval, cd.get_ctxt ());
341 const region *sized_reg
342 = mgr->get_sized_region (new_reg, NULL_TREE, prod_sval);
343 model->zero_fill_region (sized_reg);
344 if (cd.get_lhs_type ())
346 const svalue *ptr_sval
347 = mgr->get_ptr_svalue (cd.get_lhs_type (), new_reg);
348 cd.maybe_set_lhs (ptr_sval);
352 /* Handler for glibc's "__errno_location". */
354 class kf_errno_location : public known_function
356 public:
357 bool matches_call_types_p (const call_details &cd) const final override
359 return cd.num_args () == 0;
362 void impl_call_pre (const call_details &cd) const final override
364 if (cd.get_lhs_region ())
366 region_model_manager *mgr = cd.get_manager ();
367 const region *errno_reg = mgr->get_errno_region ();
368 const svalue *errno_ptr = mgr->get_ptr_svalue (cd.get_lhs_type (),
369 errno_reg);
370 cd.maybe_set_lhs (errno_ptr);
375 /* Handler for "error" and "error_at_line" from GNU's non-standard <error.h>.
376 MIN_ARGS identifies the minimum number of expected arguments
377 to be consistent with such a call (3 and 5 respectively). */
379 class kf_error : public known_function
381 public:
382 kf_error (unsigned min_args) : m_min_args (min_args) {}
384 bool matches_call_types_p (const call_details &cd) const final override
386 return (cd.num_args () >= m_min_args
387 && cd.get_arg_type (0) == integer_type_node);
390 void impl_call_pre (const call_details &cd) const final override;
392 private:
393 unsigned m_min_args;
396 void
397 kf_error::impl_call_pre (const call_details &cd) const
399 /* The process exits if status != 0, so it only continues
400 for the case where status == 0.
401 Add that constraint, or terminate this analysis path. */
402 tree status = cd.get_arg_tree (0);
403 region_model_context *ctxt = cd.get_ctxt ();
404 region_model *model = cd.get_model ();
405 if (!model->add_constraint (status, EQ_EXPR, integer_zero_node, ctxt))
406 if (ctxt)
407 ctxt->terminate_path ();
410 /* Handler for "free", after sm-handling.
412 If the ptr points to an underlying heap region, delete the region,
413 poisoning pointers to it and regions within it.
415 We delay this until after sm-state has been updated so that the
416 sm-handling can transition all of the various casts of the pointer
417 to a "freed" state *before* we delete the related region here.
419 This has to be done here so that the sm-handling can use the fact
420 that they point to the same region to establish that they are equal
421 (in region_model::eval_condition), and thus transition
422 all pointers to the region to the "freed" state together, regardless
423 of casts. */
425 class kf_free : public known_function
427 public:
428 bool matches_call_types_p (const call_details &cd) const final override
430 return (cd.num_args () == 0 && cd.arg_is_pointer_p (0));
432 void impl_call_post (const call_details &cd) const final override;
435 void
436 kf_free::impl_call_post (const call_details &cd) const
438 const svalue *ptr_sval = cd.get_arg_svalue (0);
439 if (const region *freed_reg = ptr_sval->maybe_get_region ())
441 /* If the ptr points to an underlying heap region, delete it,
442 poisoning pointers. */
443 region_model *model = cd.get_model ();
444 model->unbind_region_and_descendents (freed_reg, POISON_KIND_FREED);
445 model->unset_dynamic_extents (freed_reg);
449 /* Handle the on_call_pre part of "malloc". */
451 class kf_malloc : public known_function
453 public:
454 bool matches_call_types_p (const call_details &cd) const final override
456 return (cd.num_args () == 1
457 && cd.arg_is_size_p (0));
459 void impl_call_pre (const call_details &cd) const final override;
462 void
463 kf_malloc::impl_call_pre (const call_details &cd) const
465 region_model *model = cd.get_model ();
466 region_model_manager *mgr = cd.get_manager ();
467 const svalue *size_sval = cd.get_arg_svalue (0);
468 const region *new_reg
469 = model->get_or_create_region_for_heap_alloc (size_sval, cd.get_ctxt ());
470 if (cd.get_lhs_type ())
472 const svalue *ptr_sval
473 = mgr->get_ptr_svalue (cd.get_lhs_type (), new_reg);
474 cd.maybe_set_lhs (ptr_sval);
478 /* Handler for "memcpy" and "__builtin_memcpy",
479 "memmove", and "__builtin_memmove". */
480 /* TODO: complain about overlapping src and dest for the memcpy
481 variants. */
483 class kf_memcpy_memmove : public known_function
485 public:
486 bool matches_call_types_p (const call_details &cd) const final override
488 return (cd.num_args () == 3
489 && cd.arg_is_pointer_p (0)
490 && cd.arg_is_pointer_p (1)
491 && cd.arg_is_size_p (2));
493 void impl_call_pre (const call_details &cd) const final override;
496 void
497 kf_memcpy_memmove::impl_call_pre (const call_details &cd) const
499 const svalue *dest_ptr_sval = cd.get_arg_svalue (0);
500 const svalue *src_ptr_sval = cd.get_arg_svalue (1);
501 const svalue *num_bytes_sval = cd.get_arg_svalue (2);
503 region_model *model = cd.get_model ();
504 region_model_manager *mgr = cd.get_manager ();
506 const region *dest_reg
507 = model->deref_rvalue (dest_ptr_sval, cd.get_arg_tree (0), cd.get_ctxt ());
508 const region *src_reg
509 = model->deref_rvalue (src_ptr_sval, cd.get_arg_tree (1), cd.get_ctxt ());
511 cd.maybe_set_lhs (dest_ptr_sval);
513 const region *sized_src_reg
514 = mgr->get_sized_region (src_reg, NULL_TREE, num_bytes_sval);
515 const region *sized_dest_reg
516 = mgr->get_sized_region (dest_reg, NULL_TREE, num_bytes_sval);
517 const svalue *src_contents_sval
518 = model->get_store_value (sized_src_reg, cd.get_ctxt ());
519 model->check_for_poison (src_contents_sval, cd.get_arg_tree (1),
520 sized_src_reg, cd.get_ctxt ());
521 model->set_value (sized_dest_reg, src_contents_sval, cd.get_ctxt ());
524 /* Handler for "memset" and "__builtin_memset". */
526 class kf_memset : public known_function
528 public:
529 bool matches_call_types_p (const call_details &cd) const final override
531 return (cd.num_args () == 3 && cd.arg_is_pointer_p (0));
534 void impl_call_pre (const call_details &cd) const final override;
537 void
538 kf_memset::impl_call_pre (const call_details &cd) const
540 const svalue *dest_sval = cd.get_arg_svalue (0);
541 const svalue *fill_value_sval = cd.get_arg_svalue (1);
542 const svalue *num_bytes_sval = cd.get_arg_svalue (2);
544 region_model *model = cd.get_model ();
545 region_model_manager *mgr = cd.get_manager ();
547 const region *dest_reg
548 = model->deref_rvalue (dest_sval, cd.get_arg_tree (0), cd.get_ctxt ());
550 const svalue *fill_value_u8
551 = mgr->get_or_create_cast (unsigned_char_type_node, fill_value_sval);
553 const region *sized_dest_reg = mgr->get_sized_region (dest_reg,
554 NULL_TREE,
555 num_bytes_sval);
556 model->check_region_for_write (sized_dest_reg,
557 nullptr,
558 cd.get_ctxt ());
559 model->fill_region (sized_dest_reg, fill_value_u8);
562 /* A subclass of pending_diagnostic for complaining about 'putenv'
563 called on an auto var. */
565 class putenv_of_auto_var
566 : public pending_diagnostic_subclass<putenv_of_auto_var>
568 public:
569 putenv_of_auto_var (tree fndecl, const region *reg)
570 : m_fndecl (fndecl), m_reg (reg),
571 m_var_decl (reg->get_base_region ()->maybe_get_decl ())
575 const char *get_kind () const final override
577 return "putenv_of_auto_var";
580 bool operator== (const putenv_of_auto_var &other) const
582 return (m_fndecl == other.m_fndecl
583 && m_reg == other.m_reg
584 && same_tree_p (m_var_decl, other.m_var_decl));
587 int get_controlling_option () const final override
589 return OPT_Wanalyzer_putenv_of_auto_var;
592 bool emit (rich_location *rich_loc, logger *) final override
594 auto_diagnostic_group d;
595 diagnostic_metadata m;
597 /* SEI CERT C Coding Standard: "POS34-C. Do not call putenv() with a
598 pointer to an automatic variable as the argument". */
599 diagnostic_metadata::precanned_rule
600 rule ("POS34-C", "https://wiki.sei.cmu.edu/confluence/x/6NYxBQ");
601 m.add_rule (rule);
603 bool warned;
604 if (m_var_decl)
605 warned = warning_meta (rich_loc, m, get_controlling_option (),
606 "%qE on a pointer to automatic variable %qE",
607 m_fndecl, m_var_decl);
608 else
609 warned = warning_meta (rich_loc, m, get_controlling_option (),
610 "%qE on a pointer to an on-stack buffer",
611 m_fndecl);
612 if (warned)
614 if (m_var_decl)
615 inform (DECL_SOURCE_LOCATION (m_var_decl),
616 "%qE declared on stack here", m_var_decl);
617 inform (rich_loc->get_loc (), "perhaps use %qs rather than %qE",
618 "setenv", m_fndecl);
621 return warned;
624 label_text describe_final_event (const evdesc::final_event &ev) final override
626 if (m_var_decl)
627 return ev.formatted_print ("%qE on a pointer to automatic variable %qE",
628 m_fndecl, m_var_decl);
629 else
630 return ev.formatted_print ("%qE on a pointer to an on-stack buffer",
631 m_fndecl);
634 void mark_interesting_stuff (interesting_t *interest) final override
636 if (!m_var_decl)
637 interest->add_region_creation (m_reg->get_base_region ());
640 private:
641 tree m_fndecl; // non-NULL
642 const region *m_reg; // non-NULL
643 tree m_var_decl; // could be NULL
646 /* Handler for calls to "putenv".
648 In theory we could try to model the state of the environment variables
649 for the process; for now we merely complain about putenv of regions
650 on the stack. */
652 class kf_putenv : public known_function
654 public:
655 bool matches_call_types_p (const call_details &cd) const final override
657 return (cd.num_args () == 1 && cd.arg_is_pointer_p (0));
660 void impl_call_pre (const call_details &cd) const final override
662 tree fndecl = cd.get_fndecl_for_call ();
663 gcc_assert (fndecl);
664 region_model_context *ctxt = cd.get_ctxt ();
665 region_model *model = cd.get_model ();
666 const svalue *ptr_sval = cd.get_arg_svalue (0);
667 const region *reg
668 = model->deref_rvalue (ptr_sval, cd.get_arg_tree (0), ctxt);
669 model->get_store ()->mark_as_escaped (reg);
670 enum memory_space mem_space = reg->get_memory_space ();
671 switch (mem_space)
673 default:
674 gcc_unreachable ();
675 case MEMSPACE_UNKNOWN:
676 case MEMSPACE_CODE:
677 case MEMSPACE_GLOBALS:
678 case MEMSPACE_HEAP:
679 case MEMSPACE_READONLY_DATA:
680 break;
681 case MEMSPACE_STACK:
682 if (ctxt)
683 ctxt->warn (make_unique<putenv_of_auto_var> (fndecl, reg));
684 break;
689 /* Handler for "realloc":
691 void *realloc(void *ptr, size_t size);
693 realloc(3) is awkward, since it has various different outcomes
694 that are best modelled as separate exploded nodes/edges.
696 We first check for sm-state, in
697 malloc_state_machine::on_realloc_call, so that we
698 can complain about issues such as realloc of a non-heap
699 pointer, and terminate the path for such cases (and issue
700 the complaints at the call's exploded node).
702 Assuming that these checks pass, we split the path here into
703 three special cases (and terminate the "standard" path):
704 (A) failure, returning NULL
705 (B) success, growing the buffer in-place without moving it
706 (C) success, allocating a new buffer, copying the content
707 of the old buffer to it, and freeing the old buffer.
709 Each of these has a custom_edge_info subclass, which updates
710 the region_model and sm-state of the destination state. */
712 class kf_realloc : public known_function
714 public:
715 bool matches_call_types_p (const call_details &cd) const final override
717 return (cd.num_args () == 2
718 && cd.arg_is_pointer_p (0)
719 && cd.arg_is_size_p (1));
721 void impl_call_post (const call_details &cd) const final override;
724 void
725 kf_realloc::impl_call_post (const call_details &cd) const
727 /* Three custom subclasses of custom_edge_info, for handling the various
728 outcomes of "realloc". */
730 /* Concrete custom_edge_info: a realloc call that fails, returning NULL. */
731 class failure : public failed_call_info
733 public:
734 failure (const call_details &cd)
735 : failed_call_info (cd)
739 bool update_model (region_model *model,
740 const exploded_edge *,
741 region_model_context *ctxt) const final override
743 /* Return NULL; everything else is unchanged. */
744 const call_details cd (get_call_details (model, ctxt));
745 region_model_manager *mgr = cd.get_manager ();
746 if (cd.get_lhs_type ())
748 const svalue *zero
749 = mgr->get_or_create_int_cst (cd.get_lhs_type (), 0);
750 model->set_value (cd.get_lhs_region (),
751 zero,
752 cd.get_ctxt ());
754 return true;
758 /* Concrete custom_edge_info: a realloc call that succeeds, growing
759 the existing buffer without moving it. */
760 class success_no_move : public call_info
762 public:
763 success_no_move (const call_details &cd)
764 : call_info (cd)
768 label_text get_desc (bool can_colorize) const final override
770 return make_label_text (can_colorize,
771 "when %qE succeeds, without moving buffer",
772 get_fndecl ());
775 bool update_model (region_model *model,
776 const exploded_edge *,
777 region_model_context *ctxt) const final override
779 /* Update size of buffer and return the ptr unchanged. */
780 const call_details cd (get_call_details (model, ctxt));
781 region_model_manager *mgr = cd.get_manager ();
782 const svalue *ptr_sval = cd.get_arg_svalue (0);
783 const svalue *size_sval = cd.get_arg_svalue (1);
785 /* We can only grow in place with a non-NULL pointer. */
787 const svalue *null_ptr
788 = mgr->get_or_create_int_cst (ptr_sval->get_type (), 0);
789 if (!model->add_constraint (ptr_sval, NE_EXPR, null_ptr,
790 cd.get_ctxt ()))
791 return false;
794 if (const region *buffer_reg = model->deref_rvalue (ptr_sval, NULL_TREE,
795 ctxt))
796 if (compat_types_p (size_sval->get_type (), size_type_node))
797 model->set_dynamic_extents (buffer_reg, size_sval, ctxt);
798 if (cd.get_lhs_region ())
800 model->set_value (cd.get_lhs_region (), ptr_sval, cd.get_ctxt ());
801 const svalue *zero
802 = mgr->get_or_create_int_cst (cd.get_lhs_type (), 0);
803 return model->add_constraint (ptr_sval, NE_EXPR, zero, ctxt);
805 else
806 return true;
810 /* Concrete custom_edge_info: a realloc call that succeeds, freeing
811 the existing buffer and moving the content to a freshly allocated
812 buffer. */
813 class success_with_move : public call_info
815 public:
816 success_with_move (const call_details &cd)
817 : call_info (cd)
821 label_text get_desc (bool can_colorize) const final override
823 return make_label_text (can_colorize,
824 "when %qE succeeds, moving buffer",
825 get_fndecl ());
827 bool update_model (region_model *model,
828 const exploded_edge *,
829 region_model_context *ctxt) const final override
831 const call_details cd (get_call_details (model, ctxt));
832 region_model_manager *mgr = cd.get_manager ();
833 const svalue *old_ptr_sval = cd.get_arg_svalue (0);
834 const svalue *new_size_sval = cd.get_arg_svalue (1);
836 /* Create the new region. */
837 const region *new_reg
838 = model->get_or_create_region_for_heap_alloc (new_size_sval, ctxt);
839 const svalue *new_ptr_sval
840 = mgr->get_ptr_svalue (cd.get_lhs_type (), new_reg);
841 if (!model->add_constraint (new_ptr_sval, NE_EXPR, old_ptr_sval,
842 cd.get_ctxt ()))
843 return false;
845 if (cd.get_lhs_type ())
846 cd.maybe_set_lhs (new_ptr_sval);
848 if (const region *freed_reg = model->deref_rvalue (old_ptr_sval,
849 NULL_TREE, ctxt))
851 /* Copy the data. */
852 const svalue *old_size_sval = model->get_dynamic_extents (freed_reg);
853 if (old_size_sval)
855 const svalue *copied_size_sval
856 = get_copied_size (model, old_size_sval, new_size_sval);
857 const region *copied_old_reg
858 = mgr->get_sized_region (freed_reg, NULL, copied_size_sval);
859 const svalue *buffer_content_sval
860 = model->get_store_value (copied_old_reg, cd.get_ctxt ());
861 const region *copied_new_reg
862 = mgr->get_sized_region (new_reg, NULL, copied_size_sval);
863 model->set_value (copied_new_reg, buffer_content_sval,
864 cd.get_ctxt ());
866 else
868 /* We don't know how big the old region was;
869 mark the new region as having been touched to avoid uninit
870 issues. */
871 model->mark_region_as_unknown (new_reg, cd.get_uncertainty ());
874 /* Free the old region, so that pointers to the old buffer become
875 invalid. */
877 /* If the ptr points to an underlying heap region, delete it,
878 poisoning pointers. */
879 model->unbind_region_and_descendents (freed_reg, POISON_KIND_FREED);
880 model->unset_dynamic_extents (freed_reg);
883 /* Update the sm-state: mark the old_ptr_sval as "freed",
884 and the new_ptr_sval as "nonnull". */
885 model->on_realloc_with_move (cd, old_ptr_sval, new_ptr_sval);
887 if (cd.get_lhs_type ())
889 const svalue *zero
890 = mgr->get_or_create_int_cst (cd.get_lhs_type (), 0);
891 return model->add_constraint (new_ptr_sval, NE_EXPR, zero,
892 cd.get_ctxt ());
894 else
895 return true;
898 private:
899 /* Return the lesser of OLD_SIZE_SVAL and NEW_SIZE_SVAL.
900 If unknown, OLD_SIZE_SVAL is returned. */
901 const svalue *get_copied_size (region_model *model,
902 const svalue *old_size_sval,
903 const svalue *new_size_sval) const
905 tristate res
906 = model->eval_condition (old_size_sval, GT_EXPR, new_size_sval);
907 switch (res.get_value ())
909 case tristate::TS_TRUE:
910 return new_size_sval;
911 case tristate::TS_FALSE:
912 case tristate::TS_UNKNOWN:
913 return old_size_sval;
914 default:
915 gcc_unreachable ();
920 /* Body of kf_realloc::impl_call_post. */
922 if (cd.get_ctxt ())
924 cd.get_ctxt ()->bifurcate (make_unique<failure> (cd));
925 cd.get_ctxt ()->bifurcate (make_unique<success_no_move> (cd));
926 cd.get_ctxt ()->bifurcate (make_unique<success_with_move> (cd));
927 cd.get_ctxt ()->terminate_path ();
931 /* Handler for "strchr" and "__builtin_strchr". */
933 class kf_strchr : public known_function
935 public:
936 bool matches_call_types_p (const call_details &cd) const final override
938 return (cd.num_args () == 2 && cd.arg_is_pointer_p (0));
940 void impl_call_post (const call_details &cd) const final override;
943 void
944 kf_strchr::impl_call_post (const call_details &cd) const
946 class strchr_call_info : public call_info
948 public:
949 strchr_call_info (const call_details &cd, bool found)
950 : call_info (cd), m_found (found)
954 label_text get_desc (bool can_colorize) const final override
956 if (m_found)
957 return make_label_text (can_colorize,
958 "when %qE returns non-NULL",
959 get_fndecl ());
960 else
961 return make_label_text (can_colorize,
962 "when %qE returns NULL",
963 get_fndecl ());
966 bool update_model (region_model *model,
967 const exploded_edge *,
968 region_model_context *ctxt) const final override
970 const call_details cd (get_call_details (model, ctxt));
971 if (tree lhs_type = cd.get_lhs_type ())
973 region_model_manager *mgr = model->get_manager ();
974 const svalue *result;
975 if (m_found)
977 const svalue *str_sval = cd.get_arg_svalue (0);
978 const region *str_reg
979 = model->deref_rvalue (str_sval, cd.get_arg_tree (0),
980 cd.get_ctxt ());
981 /* We want str_sval + OFFSET for some unknown OFFSET.
982 Use a conjured_svalue to represent the offset,
983 using the str_reg as the id of the conjured_svalue. */
984 const svalue *offset
985 = mgr->get_or_create_conjured_svalue (size_type_node,
986 cd.get_call_stmt (),
987 str_reg,
988 conjured_purge (model,
989 ctxt));
990 result = mgr->get_or_create_binop (lhs_type, POINTER_PLUS_EXPR,
991 str_sval, offset);
993 else
994 result = mgr->get_or_create_int_cst (lhs_type, 0);
995 cd.maybe_set_lhs (result);
997 return true;
999 private:
1000 bool m_found;
1003 /* Body of kf_strchr::impl_call_post. */
1004 if (cd.get_ctxt ())
1006 cd.get_ctxt ()->bifurcate (make_unique<strchr_call_info> (cd, false));
1007 cd.get_ctxt ()->bifurcate (make_unique<strchr_call_info> (cd, true));
1008 cd.get_ctxt ()->terminate_path ();
1012 /* Handler for "sprintf".
1013 int sprintf(char *str, const char *format, ...);
1016 class kf_sprintf : public known_function
1018 public:
1019 bool matches_call_types_p (const call_details &cd) const final override
1021 return (cd.num_args () >= 2
1022 && cd.arg_is_pointer_p (0)
1023 && cd.arg_is_pointer_p (1));
1026 void impl_call_pre (const call_details &cd) const final override
1028 /* For now, merely assume that the destination buffer gets set to a
1029 new svalue. */
1030 region_model *model = cd.get_model ();
1031 region_model_context *ctxt = cd.get_ctxt ();
1032 const svalue *dst_ptr = cd.get_arg_svalue (0);
1033 const region *dst_reg
1034 = model->deref_rvalue (dst_ptr, cd.get_arg_tree (0), ctxt);
1035 const svalue *content = cd.get_or_create_conjured_svalue (dst_reg);
1036 model->set_value (dst_reg, content, ctxt);
1040 /* Handler for "__builtin_stack_restore". */
1042 class kf_stack_restore : public known_function
1044 public:
1045 bool matches_call_types_p (const call_details &) const final override
1047 return true;
1050 /* Currently a no-op. */
1053 /* Handler for "__builtin_stack_save". */
1055 class kf_stack_save : public known_function
1057 public:
1058 bool matches_call_types_p (const call_details &) const final override
1060 return true;
1063 /* Currently a no-op. */
1066 /* Handler for "strcpy" and "__builtin_strcpy_chk". */
1068 class kf_strcpy : public known_function
1070 public:
1071 kf_strcpy (unsigned int num_args) : m_num_args (num_args) {}
1072 bool matches_call_types_p (const call_details &cd) const final override
1074 return (cd.num_args () == m_num_args
1075 && cd.arg_is_pointer_p (0)
1076 && cd.arg_is_pointer_p (1));
1079 void impl_call_pre (const call_details &cd) const final override;
1081 private:
1082 unsigned int m_num_args;
1085 void
1086 kf_strcpy::impl_call_pre (const call_details &cd) const
1088 region_model *model = cd.get_model ();
1089 region_model_manager *mgr = cd.get_manager ();
1091 const svalue *dest_sval = cd.get_arg_svalue (0);
1092 const region *dest_reg = model->deref_rvalue (dest_sval, cd.get_arg_tree (0),
1093 cd.get_ctxt ());
1094 const svalue *src_sval = cd.get_arg_svalue (1);
1095 const region *src_reg = model->deref_rvalue (src_sval, cd.get_arg_tree (1),
1096 cd.get_ctxt ());
1097 const svalue *src_contents_sval = model->get_store_value (src_reg,
1098 cd.get_ctxt ());
1100 cd.maybe_set_lhs (dest_sval);
1102 /* Try to get the string size if SRC_REG is a string_region. */
1103 const svalue *copied_bytes_sval = model->get_string_size (src_reg);
1104 /* Otherwise, check if the contents of SRC_REG is a string. */
1105 if (copied_bytes_sval->get_kind () == SK_UNKNOWN)
1106 copied_bytes_sval = model->get_string_size (src_contents_sval);
1108 const region *sized_dest_reg
1109 = mgr->get_sized_region (dest_reg, NULL_TREE, copied_bytes_sval);
1110 model->set_value (sized_dest_reg, src_contents_sval, cd.get_ctxt ());
1113 /* Handler for "strdup" and "__builtin_strdup". */
1115 class kf_strdup : public known_function
1117 public:
1118 bool matches_call_types_p (const call_details &cd) const final override
1120 return (cd.num_args () == 1 && cd.arg_is_pointer_p (0));
1122 void impl_call_pre (const call_details &cd) const final override
1124 region_model *model = cd.get_model ();
1125 region_model_manager *mgr = cd.get_manager ();
1126 /* Ideally we'd get the size here, and simulate copying the bytes. */
1127 const region *new_reg
1128 = model->get_or_create_region_for_heap_alloc (NULL, cd.get_ctxt ());
1129 model->mark_region_as_unknown (new_reg, NULL);
1130 if (cd.get_lhs_type ())
1132 const svalue *ptr_sval
1133 = mgr->get_ptr_svalue (cd.get_lhs_type (), new_reg);
1134 cd.maybe_set_lhs (ptr_sval);
1139 /* Handle the on_call_pre part of "strlen". */
1141 class kf_strlen : public known_function
1143 public:
1144 bool matches_call_types_p (const call_details &cd) const final override
1146 return (cd.num_args () == 1 && cd.arg_is_pointer_p (0));
1148 void impl_call_pre (const call_details &cd) const final override;
1151 void
1152 kf_strlen::impl_call_pre (const call_details &cd) const
1154 region_model_context *ctxt = cd.get_ctxt ();
1155 region_model *model = cd.get_model ();
1156 region_model_manager *mgr = cd.get_manager ();
1158 const svalue *arg_sval = cd.get_arg_svalue (0);
1159 const region *buf_reg
1160 = model->deref_rvalue (arg_sval, cd.get_arg_tree (0), ctxt);
1161 if (const string_region *str_reg
1162 = buf_reg->dyn_cast_string_region ())
1164 tree str_cst = str_reg->get_string_cst ();
1165 /* TREE_STRING_LENGTH is sizeof, not strlen. */
1166 int sizeof_cst = TREE_STRING_LENGTH (str_cst);
1167 int strlen_cst = sizeof_cst - 1;
1168 if (cd.get_lhs_type ())
1170 tree t_cst = build_int_cst (cd.get_lhs_type (), strlen_cst);
1171 const svalue *result_sval
1172 = mgr->get_or_create_constant_svalue (t_cst);
1173 cd.maybe_set_lhs (result_sval);
1174 return;
1177 /* Otherwise a conjured value. */
1180 /* Handler for "strndup" and "__builtin_strndup". */
1182 class kf_strndup : public known_function
1184 public:
1185 bool matches_call_types_p (const call_details &cd) const final override
1187 return (cd.num_args () == 2 && cd.arg_is_pointer_p (0));
1189 void impl_call_pre (const call_details &cd) const final override
1191 region_model *model = cd.get_model ();
1192 region_model_manager *mgr = cd.get_manager ();
1193 /* Ideally we'd get the size here, and simulate copying the bytes. */
1194 const region *new_reg
1195 = model->get_or_create_region_for_heap_alloc (NULL, cd.get_ctxt ());
1196 model->mark_region_as_unknown (new_reg, NULL);
1197 if (cd.get_lhs_type ())
1199 const svalue *ptr_sval
1200 = mgr->get_ptr_svalue (cd.get_lhs_type (), new_reg);
1201 cd.maybe_set_lhs (ptr_sval);
1206 class kf_ubsan_bounds : public internal_known_function
1208 /* Empty. */
1211 /* Handle calls to functions referenced by
1212 __attribute__((malloc(FOO))). */
1214 void
1215 region_model::impl_deallocation_call (const call_details &cd)
1217 kf_free kf;
1218 kf.impl_call_post (cd);
1221 static void
1222 register_atomic_builtins (known_function_manager &kfm)
1224 kfm.add (BUILT_IN_ATOMIC_EXCHANGE, make_unique<kf_atomic_exchange> ());
1225 kfm.add (BUILT_IN_ATOMIC_EXCHANGE_N, make_unique<kf_atomic_exchange_n> ());
1226 kfm.add (BUILT_IN_ATOMIC_EXCHANGE_1, make_unique<kf_atomic_exchange_n> ());
1227 kfm.add (BUILT_IN_ATOMIC_EXCHANGE_2, make_unique<kf_atomic_exchange_n> ());
1228 kfm.add (BUILT_IN_ATOMIC_EXCHANGE_4, make_unique<kf_atomic_exchange_n> ());
1229 kfm.add (BUILT_IN_ATOMIC_EXCHANGE_8, make_unique<kf_atomic_exchange_n> ());
1230 kfm.add (BUILT_IN_ATOMIC_EXCHANGE_16, make_unique<kf_atomic_exchange_n> ());
1231 kfm.add (BUILT_IN_ATOMIC_LOAD, make_unique<kf_atomic_load> ());
1232 kfm.add (BUILT_IN_ATOMIC_LOAD_N, make_unique<kf_atomic_load_n> ());
1233 kfm.add (BUILT_IN_ATOMIC_LOAD_1, make_unique<kf_atomic_load_n> ());
1234 kfm.add (BUILT_IN_ATOMIC_LOAD_2, make_unique<kf_atomic_load_n> ());
1235 kfm.add (BUILT_IN_ATOMIC_LOAD_4, make_unique<kf_atomic_load_n> ());
1236 kfm.add (BUILT_IN_ATOMIC_LOAD_8, make_unique<kf_atomic_load_n> ());
1237 kfm.add (BUILT_IN_ATOMIC_LOAD_16, make_unique<kf_atomic_load_n> ());
1238 kfm.add (BUILT_IN_ATOMIC_STORE_N, make_unique<kf_atomic_store_n> ());
1239 kfm.add (BUILT_IN_ATOMIC_STORE_1, make_unique<kf_atomic_store_n> ());
1240 kfm.add (BUILT_IN_ATOMIC_STORE_2, make_unique<kf_atomic_store_n> ());
1241 kfm.add (BUILT_IN_ATOMIC_STORE_4, make_unique<kf_atomic_store_n> ());
1242 kfm.add (BUILT_IN_ATOMIC_STORE_8, make_unique<kf_atomic_store_n> ());
1243 kfm.add (BUILT_IN_ATOMIC_STORE_16, make_unique<kf_atomic_store_n> ());
1244 kfm.add (BUILT_IN_ATOMIC_ADD_FETCH_1,
1245 make_unique<kf_atomic_op_fetch> (PLUS_EXPR));
1246 kfm.add (BUILT_IN_ATOMIC_ADD_FETCH_2,
1247 make_unique<kf_atomic_op_fetch> (PLUS_EXPR));
1248 kfm.add (BUILT_IN_ATOMIC_ADD_FETCH_4,
1249 make_unique<kf_atomic_op_fetch> (PLUS_EXPR));
1250 kfm.add (BUILT_IN_ATOMIC_ADD_FETCH_8,
1251 make_unique<kf_atomic_op_fetch> (PLUS_EXPR));
1252 kfm.add (BUILT_IN_ATOMIC_ADD_FETCH_16,
1253 make_unique<kf_atomic_op_fetch> (PLUS_EXPR));
1254 kfm.add (BUILT_IN_ATOMIC_SUB_FETCH_1,
1255 make_unique<kf_atomic_op_fetch> (MINUS_EXPR));
1256 kfm.add (BUILT_IN_ATOMIC_SUB_FETCH_2,
1257 make_unique<kf_atomic_op_fetch> (MINUS_EXPR));
1258 kfm.add (BUILT_IN_ATOMIC_SUB_FETCH_4,
1259 make_unique<kf_atomic_op_fetch> (MINUS_EXPR));
1260 kfm.add (BUILT_IN_ATOMIC_SUB_FETCH_8,
1261 make_unique<kf_atomic_op_fetch> (MINUS_EXPR));
1262 kfm.add (BUILT_IN_ATOMIC_SUB_FETCH_16,
1263 make_unique<kf_atomic_op_fetch> (MINUS_EXPR));
1264 kfm.add (BUILT_IN_ATOMIC_AND_FETCH_1,
1265 make_unique<kf_atomic_op_fetch> (BIT_AND_EXPR));
1266 kfm.add (BUILT_IN_ATOMIC_AND_FETCH_2,
1267 make_unique<kf_atomic_op_fetch> (BIT_AND_EXPR));
1268 kfm.add (BUILT_IN_ATOMIC_AND_FETCH_4,
1269 make_unique<kf_atomic_op_fetch> (BIT_AND_EXPR));
1270 kfm.add (BUILT_IN_ATOMIC_AND_FETCH_8,
1271 make_unique<kf_atomic_op_fetch> (BIT_AND_EXPR));
1272 kfm.add (BUILT_IN_ATOMIC_AND_FETCH_16,
1273 make_unique<kf_atomic_op_fetch> (BIT_AND_EXPR));
1274 kfm.add (BUILT_IN_ATOMIC_XOR_FETCH_1,
1275 make_unique<kf_atomic_op_fetch> (BIT_XOR_EXPR));
1276 kfm.add (BUILT_IN_ATOMIC_XOR_FETCH_2,
1277 make_unique<kf_atomic_op_fetch> (BIT_XOR_EXPR));
1278 kfm.add (BUILT_IN_ATOMIC_XOR_FETCH_4,
1279 make_unique<kf_atomic_op_fetch> (BIT_XOR_EXPR));
1280 kfm.add (BUILT_IN_ATOMIC_XOR_FETCH_8,
1281 make_unique<kf_atomic_op_fetch> (BIT_XOR_EXPR));
1282 kfm.add (BUILT_IN_ATOMIC_XOR_FETCH_16,
1283 make_unique<kf_atomic_op_fetch> (BIT_XOR_EXPR));
1284 kfm.add (BUILT_IN_ATOMIC_OR_FETCH_1,
1285 make_unique<kf_atomic_op_fetch> (BIT_IOR_EXPR));
1286 kfm.add (BUILT_IN_ATOMIC_OR_FETCH_2,
1287 make_unique<kf_atomic_op_fetch> (BIT_IOR_EXPR));
1288 kfm.add (BUILT_IN_ATOMIC_OR_FETCH_4,
1289 make_unique<kf_atomic_op_fetch> (BIT_IOR_EXPR));
1290 kfm.add (BUILT_IN_ATOMIC_OR_FETCH_8,
1291 make_unique<kf_atomic_op_fetch> (BIT_IOR_EXPR));
1292 kfm.add (BUILT_IN_ATOMIC_OR_FETCH_16,
1293 make_unique<kf_atomic_op_fetch> (BIT_IOR_EXPR));
1294 kfm.add (BUILT_IN_ATOMIC_FETCH_ADD_1,
1295 make_unique<kf_atomic_fetch_op> (PLUS_EXPR));
1296 kfm.add (BUILT_IN_ATOMIC_FETCH_ADD_2,
1297 make_unique<kf_atomic_fetch_op> (PLUS_EXPR));
1298 kfm.add (BUILT_IN_ATOMIC_FETCH_ADD_4,
1299 make_unique<kf_atomic_fetch_op> (PLUS_EXPR));
1300 kfm.add (BUILT_IN_ATOMIC_FETCH_ADD_8,
1301 make_unique<kf_atomic_fetch_op> (PLUS_EXPR));
1302 kfm.add (BUILT_IN_ATOMIC_FETCH_ADD_16,
1303 make_unique<kf_atomic_fetch_op> (PLUS_EXPR));
1304 kfm.add (BUILT_IN_ATOMIC_FETCH_SUB_1,
1305 make_unique<kf_atomic_fetch_op> (MINUS_EXPR));
1306 kfm.add (BUILT_IN_ATOMIC_FETCH_SUB_2,
1307 make_unique<kf_atomic_fetch_op> (MINUS_EXPR));
1308 kfm.add (BUILT_IN_ATOMIC_FETCH_SUB_4,
1309 make_unique<kf_atomic_fetch_op> (MINUS_EXPR));
1310 kfm.add (BUILT_IN_ATOMIC_FETCH_SUB_8,
1311 make_unique<kf_atomic_fetch_op> (MINUS_EXPR));
1312 kfm.add (BUILT_IN_ATOMIC_FETCH_SUB_16,
1313 make_unique<kf_atomic_fetch_op> (MINUS_EXPR));
1314 kfm.add (BUILT_IN_ATOMIC_FETCH_AND_1,
1315 make_unique<kf_atomic_fetch_op> (BIT_AND_EXPR));
1316 kfm.add (BUILT_IN_ATOMIC_FETCH_AND_2,
1317 make_unique<kf_atomic_fetch_op> (BIT_AND_EXPR));
1318 kfm.add (BUILT_IN_ATOMIC_FETCH_AND_4,
1319 make_unique<kf_atomic_fetch_op> (BIT_AND_EXPR));
1320 kfm.add (BUILT_IN_ATOMIC_FETCH_AND_8,
1321 make_unique<kf_atomic_fetch_op> (BIT_AND_EXPR));
1322 kfm.add (BUILT_IN_ATOMIC_FETCH_AND_16,
1323 make_unique<kf_atomic_fetch_op> (BIT_AND_EXPR));
1324 kfm.add (BUILT_IN_ATOMIC_FETCH_XOR_1,
1325 make_unique<kf_atomic_fetch_op> (BIT_XOR_EXPR));
1326 kfm.add (BUILT_IN_ATOMIC_FETCH_XOR_2,
1327 make_unique<kf_atomic_fetch_op> (BIT_XOR_EXPR));
1328 kfm.add (BUILT_IN_ATOMIC_FETCH_XOR_4,
1329 make_unique<kf_atomic_fetch_op> (BIT_XOR_EXPR));
1330 kfm.add (BUILT_IN_ATOMIC_FETCH_XOR_8,
1331 make_unique<kf_atomic_fetch_op> (BIT_XOR_EXPR));
1332 kfm.add (BUILT_IN_ATOMIC_FETCH_XOR_16,
1333 make_unique<kf_atomic_fetch_op> (BIT_XOR_EXPR));
1334 kfm.add (BUILT_IN_ATOMIC_FETCH_OR_1,
1335 make_unique<kf_atomic_fetch_op> (BIT_IOR_EXPR));
1336 kfm.add (BUILT_IN_ATOMIC_FETCH_OR_2,
1337 make_unique<kf_atomic_fetch_op> (BIT_IOR_EXPR));
1338 kfm.add (BUILT_IN_ATOMIC_FETCH_OR_4,
1339 make_unique<kf_atomic_fetch_op> (BIT_IOR_EXPR));
1340 kfm.add (BUILT_IN_ATOMIC_FETCH_OR_8,
1341 make_unique<kf_atomic_fetch_op> (BIT_IOR_EXPR));
1342 kfm.add (BUILT_IN_ATOMIC_FETCH_OR_16,
1343 make_unique<kf_atomic_fetch_op> (BIT_IOR_EXPR));
1346 /* Populate KFM with instances of known functions supported by the core of the
1347 analyzer (as opposed to plugins). */
1349 void
1350 register_known_functions (known_function_manager &kfm)
1352 /* Debugging/test support functions, all with a "__analyzer_" prefix. */
1353 register_known_analyzer_functions (kfm);
1355 /* Internal fns the analyzer has known_functions for. */
1357 kfm.add (IFN_BUILTIN_EXPECT, make_unique<kf_expect> ());
1358 kfm.add (IFN_UBSAN_BOUNDS, make_unique<kf_ubsan_bounds> ());
1361 /* Built-ins the analyzer has known_functions for. */
1363 kfm.add (BUILT_IN_ALLOCA, make_unique<kf_alloca> ());
1364 kfm.add (BUILT_IN_ALLOCA_WITH_ALIGN, make_unique<kf_alloca> ());
1365 kfm.add (BUILT_IN_CALLOC, make_unique<kf_calloc> ());
1366 kfm.add (BUILT_IN_EXPECT, make_unique<kf_expect> ());
1367 kfm.add (BUILT_IN_EXPECT_WITH_PROBABILITY, make_unique<kf_expect> ());
1368 kfm.add (BUILT_IN_FREE, make_unique<kf_free> ());
1369 kfm.add (BUILT_IN_MALLOC, make_unique<kf_malloc> ());
1370 kfm.add (BUILT_IN_MEMCPY, make_unique<kf_memcpy_memmove> ());
1371 kfm.add (BUILT_IN_MEMCPY_CHK, make_unique<kf_memcpy_memmove> ());
1372 kfm.add (BUILT_IN_MEMMOVE, make_unique<kf_memcpy_memmove> ());
1373 kfm.add (BUILT_IN_MEMMOVE_CHK, make_unique<kf_memcpy_memmove> ());
1374 kfm.add (BUILT_IN_MEMSET, make_unique<kf_memset> ());
1375 kfm.add (BUILT_IN_MEMSET_CHK, make_unique<kf_memset> ());
1376 kfm.add (BUILT_IN_REALLOC, make_unique<kf_realloc> ());
1377 kfm.add (BUILT_IN_SPRINTF, make_unique<kf_sprintf> ());
1378 kfm.add (BUILT_IN_STACK_RESTORE, make_unique<kf_stack_restore> ());
1379 kfm.add (BUILT_IN_STACK_SAVE, make_unique<kf_stack_save> ());
1380 kfm.add (BUILT_IN_STRCHR, make_unique<kf_strchr> ());
1381 kfm.add (BUILT_IN_STRCPY, make_unique<kf_strcpy> (2));
1382 kfm.add (BUILT_IN_STRCPY_CHK, make_unique<kf_strcpy> (3));
1383 kfm.add (BUILT_IN_STRDUP, make_unique<kf_strdup> ());
1384 kfm.add (BUILT_IN_STRNDUP, make_unique<kf_strndup> ());
1385 kfm.add (BUILT_IN_STRLEN, make_unique<kf_strlen> ());
1387 register_atomic_builtins (kfm);
1388 register_varargs_builtins (kfm);
1391 /* Known builtins and C standard library functions. */
1393 kfm.add ("memset", make_unique<kf_memset> ());
1394 kfm.add ("strdup", make_unique<kf_strdup> ());
1395 kfm.add ("strndup", make_unique<kf_strndup> ());
1398 /* Known POSIX functions, and some non-standard extensions. */
1400 kfm.add ("putenv", make_unique<kf_putenv> ());
1402 register_known_fd_functions (kfm);
1403 register_known_file_functions (kfm);
1406 /* glibc functions. */
1408 kfm.add ("__errno_location", make_unique<kf_errno_location> ());
1409 kfm.add ("error", make_unique<kf_error> (3));
1410 kfm.add ("error_at_line", make_unique<kf_error> (5));
1413 /* Other implementations of C standard library. */
1415 /* According to PR 107807 comment #2, Solaris implements "errno"
1416 like this:
1417 extern int *___errno(void) __attribute__((__const__));
1418 #define errno (*(___errno()))
1419 and OS X like this:
1420 extern int * __error(void);
1421 #define errno (*__error())
1422 and similarly __errno for newlib.
1423 Add these as synonyms for "__errno_location". */
1424 kfm.add ("___errno", make_unique<kf_errno_location> ());
1425 kfm.add ("__error", make_unique<kf_errno_location> ());
1426 kfm.add ("__errno", make_unique<kf_errno_location> ());
1429 /* Language-specific support functions. */
1430 register_known_functions_lang_cp (kfm);
1433 } // namespace ana
1435 #endif /* #if ENABLE_ANALYZER */