1 /* Predicate aware uninitialized variable warning.
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 Contributed by Xinliang David Li <davidxl@google.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #include "basic-block.h"
30 #include "gimple-pretty-print.h"
32 #include "pointer-set.h"
34 #include "gimple-iterator.h"
35 #include "gimple-ssa.h"
36 #include "tree-phinodes.h"
37 #include "ssa-iterators.h"
39 #include "tree-inline.h"
41 #include "tree-pass.h"
42 #include "diagnostic-core.h"
44 /* This implements the pass that does predicate aware warning on uses of
45 possibly uninitialized variables. The pass first collects the set of
46 possibly uninitialized SSA names. For each such name, it walks through
47 all its immediate uses. For each immediate use, it rebuilds the condition
48 expression (the predicate) that guards the use. The predicate is then
49 examined to see if the variable is always defined under that same condition.
50 This is done either by pruning the unrealizable paths that lead to the
51 default definitions or by checking if the predicate set that guards the
52 defining paths is a superset of the use predicate. */
55 /* Pointer set of potentially undefined ssa names, i.e.,
56 ssa names that are defined by phi with operands that
57 are not defined or potentially undefined. */
58 static struct pointer_set_t
*possibly_undefined_names
= 0;
60 /* Bit mask handling macros. */
61 #define MASK_SET_BIT(mask, pos) mask |= (1 << pos)
62 #define MASK_TEST_BIT(mask, pos) (mask & (1 << pos))
63 #define MASK_EMPTY(mask) (mask == 0)
65 /* Returns the first bit position (starting from LSB)
66 in mask that is non zero. Returns -1 if the mask is empty. */
68 get_mask_first_set_bit (unsigned mask
)
74 while ((mask
& (1 << pos
)) == 0)
79 #define MASK_FIRST_SET_BIT(mask) get_mask_first_set_bit (mask)
81 /* Return true if T, an SSA_NAME, has an undefined value. */
83 has_undefined_value_p (tree t
)
85 return (ssa_undefined_value_p (t
)
86 || (possibly_undefined_names
87 && pointer_set_contains (possibly_undefined_names
, t
)));
92 /* Like has_undefined_value_p, but don't return true if TREE_NO_WARNING
93 is set on SSA_NAME_VAR. */
96 uninit_undefined_value_p (tree t
) {
97 if (!has_undefined_value_p (t
))
99 if (SSA_NAME_VAR (t
) && TREE_NO_WARNING (SSA_NAME_VAR (t
)))
104 /* Emit warnings for uninitialized variables. This is done in two passes.
106 The first pass notices real uses of SSA names with undefined values.
107 Such uses are unconditionally uninitialized, and we can be certain that
108 such a use is a mistake. This pass is run before most optimizations,
109 so that we catch as many as we can.
111 The second pass follows PHI nodes to find uses that are potentially
112 uninitialized. In this case we can't necessarily prove that the use
113 is really uninitialized. This pass is run after most optimizations,
114 so that we thread as many jumps and possible, and delete as much dead
115 code as possible, in order to reduce false positives. We also look
116 again for plain uninitialized variables, since optimization may have
117 changed conditionally uninitialized to unconditionally uninitialized. */
119 /* Emit a warning for EXPR based on variable VAR at the point in the
120 program T, an SSA_NAME, is used being uninitialized. The exact
121 warning text is in MSGID and LOCUS may contain a location or be null.
122 WC is the warning code. */
125 warn_uninit (enum opt_code wc
, tree t
,
126 tree expr
, tree var
, const char *gmsgid
, void *data
)
128 gimple context
= (gimple
) data
;
129 location_t location
, cfun_loc
;
130 expanded_location xloc
, floc
;
132 if (!has_undefined_value_p (t
))
135 /* TREE_NO_WARNING either means we already warned, or the front end
136 wishes to suppress the warning. */
138 && (gimple_no_warning_p (context
)
139 || (gimple_assign_single_p (context
)
140 && TREE_NO_WARNING (gimple_assign_rhs1 (context
)))))
141 || TREE_NO_WARNING (expr
))
144 location
= (context
!= NULL
&& gimple_has_location (context
))
145 ? gimple_location (context
)
146 : DECL_SOURCE_LOCATION (var
);
147 location
= linemap_resolve_location (line_table
, location
,
148 LRK_SPELLING_LOCATION
,
150 cfun_loc
= DECL_SOURCE_LOCATION (cfun
->decl
);
151 xloc
= expand_location (location
);
152 floc
= expand_location (cfun_loc
);
153 if (warning_at (location
, wc
, gmsgid
, expr
))
155 TREE_NO_WARNING (expr
) = 1;
157 if (location
== DECL_SOURCE_LOCATION (var
))
159 if (xloc
.file
!= floc
.file
160 || linemap_location_before_p (line_table
,
162 || linemap_location_before_p (line_table
,
163 cfun
->function_end_locus
,
165 inform (DECL_SOURCE_LOCATION (var
), "%qD was declared here", var
);
170 warn_uninitialized_vars (bool warn_possibly_uninitialized
)
172 gimple_stmt_iterator gsi
;
177 bool always_executed
= dominated_by_p (CDI_POST_DOMINATORS
,
178 single_succ (ENTRY_BLOCK_PTR
), bb
);
179 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
181 gimple stmt
= gsi_stmt (gsi
);
186 if (is_gimple_debug (stmt
))
189 /* We only do data flow with SSA_NAMEs, so that's all we
191 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, op_iter
, SSA_OP_USE
)
193 use
= USE_FROM_PTR (use_p
);
195 warn_uninit (OPT_Wuninitialized
, use
,
196 SSA_NAME_VAR (use
), SSA_NAME_VAR (use
),
197 "%qD is used uninitialized in this function",
199 else if (warn_possibly_uninitialized
)
200 warn_uninit (OPT_Wmaybe_uninitialized
, use
,
201 SSA_NAME_VAR (use
), SSA_NAME_VAR (use
),
202 "%qD may be used uninitialized in this function",
206 /* For memory the only cheap thing we can do is see if we
207 have a use of the default def of the virtual operand.
208 ??? Note that at -O0 we do not have virtual operands.
209 ??? Not so cheap would be to use the alias oracle via
210 walk_aliased_vdefs, if we don't find any aliasing vdef
211 warn as is-used-uninitialized, if we don't find an aliasing
212 vdef that kills our use (stmt_kills_ref_p), warn as
213 may-be-used-uninitialized. But this walk is quadratic and
214 so must be limited which means we would miss warning
216 use
= gimple_vuse (stmt
);
218 && gimple_assign_single_p (stmt
)
219 && !gimple_vdef (stmt
)
220 && SSA_NAME_IS_DEFAULT_DEF (use
))
222 tree rhs
= gimple_assign_rhs1 (stmt
);
223 tree base
= get_base_address (rhs
);
225 /* Do not warn if it can be initialized outside this function. */
226 if (TREE_CODE (base
) != VAR_DECL
227 || DECL_HARD_REGISTER (base
)
228 || is_global_var (base
))
232 warn_uninit (OPT_Wuninitialized
, use
,
233 gimple_assign_rhs1 (stmt
), base
,
234 "%qE is used uninitialized in this function",
236 else if (warn_possibly_uninitialized
)
237 warn_uninit (OPT_Wmaybe_uninitialized
, use
,
238 gimple_assign_rhs1 (stmt
), base
,
239 "%qE may be used uninitialized in this function",
248 /* Checks if the operand OPND of PHI is defined by
249 another phi with one operand defined by this PHI,
250 but the rest operands are all defined. If yes,
251 returns true to skip this this operand as being
252 redundant. Can be enhanced to be more general. */
255 can_skip_redundant_opnd (tree opnd
, gimple phi
)
261 phi_def
= gimple_phi_result (phi
);
262 op_def
= SSA_NAME_DEF_STMT (opnd
);
263 if (gimple_code (op_def
) != GIMPLE_PHI
)
265 n
= gimple_phi_num_args (op_def
);
266 for (i
= 0; i
< n
; ++i
)
268 tree op
= gimple_phi_arg_def (op_def
, i
);
269 if (TREE_CODE (op
) != SSA_NAME
)
271 if (op
!= phi_def
&& uninit_undefined_value_p (op
))
278 /* Returns a bit mask holding the positions of arguments in PHI
279 that have empty (or possibly empty) definitions. */
282 compute_uninit_opnds_pos (gimple phi
)
285 unsigned uninit_opnds
= 0;
287 n
= gimple_phi_num_args (phi
);
288 /* Bail out for phi with too many args. */
292 for (i
= 0; i
< n
; ++i
)
294 tree op
= gimple_phi_arg_def (phi
, i
);
295 if (TREE_CODE (op
) == SSA_NAME
296 && uninit_undefined_value_p (op
)
297 && !can_skip_redundant_opnd (op
, phi
))
299 if (cfun
->has_nonlocal_label
|| cfun
->calls_setjmp
)
301 /* Ignore SSA_NAMEs that appear on abnormal edges
303 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op
))
306 MASK_SET_BIT (uninit_opnds
, i
);
312 /* Find the immediate postdominator PDOM of the specified
313 basic block BLOCK. */
315 static inline basic_block
316 find_pdom (basic_block block
)
318 if (block
== EXIT_BLOCK_PTR
)
319 return EXIT_BLOCK_PTR
;
323 = get_immediate_dominator (CDI_POST_DOMINATORS
, block
);
325 return EXIT_BLOCK_PTR
;
330 /* Find the immediate DOM of the specified
331 basic block BLOCK. */
333 static inline basic_block
334 find_dom (basic_block block
)
336 if (block
== ENTRY_BLOCK_PTR
)
337 return ENTRY_BLOCK_PTR
;
340 basic_block bb
= get_immediate_dominator (CDI_DOMINATORS
, block
);
342 return ENTRY_BLOCK_PTR
;
347 /* Returns true if BB1 is postdominating BB2 and BB1 is
348 not a loop exit bb. The loop exit bb check is simple and does
349 not cover all cases. */
352 is_non_loop_exit_postdominating (basic_block bb1
, basic_block bb2
)
354 if (!dominated_by_p (CDI_POST_DOMINATORS
, bb2
, bb1
))
357 if (single_pred_p (bb1
) && !single_succ_p (bb2
))
363 /* Find the closest postdominator of a specified BB, which is control
366 static inline basic_block
367 find_control_equiv_block (basic_block bb
)
371 pdom
= find_pdom (bb
);
373 /* Skip the postdominating bb that is also loop exit. */
374 if (!is_non_loop_exit_postdominating (pdom
, bb
))
377 if (dominated_by_p (CDI_DOMINATORS
, pdom
, bb
))
383 #define MAX_NUM_CHAINS 8
384 #define MAX_CHAIN_LEN 5
385 #define MAX_POSTDOM_CHECK 8
387 /* Computes the control dependence chains (paths of edges)
388 for DEP_BB up to the dominating basic block BB (the head node of a
389 chain should be dominated by it). CD_CHAINS is pointer to a
390 dynamic array holding the result chains. CUR_CD_CHAIN is the current
391 chain being computed. *NUM_CHAINS is total number of chains. The
392 function returns true if the information is successfully computed,
393 return false if there is no control dependence or not computed. */
396 compute_control_dep_chain (basic_block bb
, basic_block dep_bb
,
397 vec
<edge
> *cd_chains
,
399 vec
<edge
> *cur_cd_chain
)
404 bool found_cd_chain
= false;
405 size_t cur_chain_len
= 0;
407 if (EDGE_COUNT (bb
->succs
) < 2)
410 /* Could use a set instead. */
411 cur_chain_len
= cur_cd_chain
->length ();
412 if (cur_chain_len
> MAX_CHAIN_LEN
)
415 for (i
= 0; i
< cur_chain_len
; i
++)
417 edge e
= (*cur_cd_chain
)[i
];
418 /* cycle detected. */
423 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
426 int post_dom_check
= 0;
427 if (e
->flags
& (EDGE_FAKE
| EDGE_ABNORMAL
))
431 cur_cd_chain
->safe_push (e
);
432 while (!is_non_loop_exit_postdominating (cd_bb
, bb
))
436 /* Found a direct control dependence. */
437 if (*num_chains
< MAX_NUM_CHAINS
)
439 cd_chains
[*num_chains
] = cur_cd_chain
->copy ();
442 found_cd_chain
= true;
443 /* check path from next edge. */
447 /* Now check if DEP_BB is indirectly control dependent on BB. */
448 if (compute_control_dep_chain (cd_bb
, dep_bb
, cd_chains
,
449 num_chains
, cur_cd_chain
))
451 found_cd_chain
= true;
455 cd_bb
= find_pdom (cd_bb
);
457 if (cd_bb
== EXIT_BLOCK_PTR
|| post_dom_check
> MAX_POSTDOM_CHECK
)
460 cur_cd_chain
->pop ();
461 gcc_assert (cur_cd_chain
->length () == cur_chain_len
);
463 gcc_assert (cur_cd_chain
->length () == cur_chain_len
);
465 return found_cd_chain
;
468 typedef struct use_pred_info
476 /* Converts the chains of control dependence edges into a set of
477 predicates. A control dependence chain is represented by a vector
478 edges. DEP_CHAINS points to an array of dependence chains.
479 NUM_CHAINS is the size of the chain array. One edge in a dependence
480 chain is mapped to predicate expression represented by use_pred_info_t
481 type. One dependence chain is converted to a composite predicate that
482 is the result of AND operation of use_pred_info_t mapped to each edge.
483 A composite predicate is presented by a vector of use_pred_info_t. On
484 return, *PREDS points to the resulting array of composite predicates.
485 *NUM_PREDS is the number of composite predictes. */
488 convert_control_dep_chain_into_preds (vec
<edge
> *dep_chains
,
490 vec
<use_pred_info_t
> **preds
,
493 bool has_valid_pred
= false;
495 if (num_chains
== 0 || num_chains
>= MAX_NUM_CHAINS
)
498 /* Now convert the control dep chain into a set
500 typedef vec
<use_pred_info_t
> vec_use_pred_info_t_heap
;
501 *preds
= XCNEWVEC (vec_use_pred_info_t_heap
, num_chains
);
502 *num_preds
= num_chains
;
504 for (i
= 0; i
< num_chains
; i
++)
506 vec
<edge
> one_cd_chain
= dep_chains
[i
];
508 has_valid_pred
= false;
509 for (j
= 0; j
< one_cd_chain
.length (); j
++)
512 gimple_stmt_iterator gsi
;
513 basic_block guard_bb
;
514 use_pred_info_t one_pred
;
519 gsi
= gsi_last_bb (guard_bb
);
522 has_valid_pred
= false;
525 cond_stmt
= gsi_stmt (gsi
);
526 if (gimple_code (cond_stmt
) == GIMPLE_CALL
527 && EDGE_COUNT (e
->src
->succs
) >= 2)
529 /* Ignore EH edge. Can add assertion
530 on the other edge's flag. */
533 /* Skip if there is essentially one succesor. */
534 if (EDGE_COUNT (e
->src
->succs
) == 2)
540 FOR_EACH_EDGE (e1
, ei1
, e
->src
->succs
)
542 if (EDGE_COUNT (e1
->dest
->succs
) == 0)
551 if (gimple_code (cond_stmt
) != GIMPLE_COND
)
553 has_valid_pred
= false;
556 one_pred
= XNEW (struct use_pred_info
);
557 one_pred
->cond
= cond_stmt
;
558 one_pred
->invert
= !!(e
->flags
& EDGE_FALSE_VALUE
);
559 (*preds
)[i
].safe_push (one_pred
);
560 has_valid_pred
= true;
566 return has_valid_pred
;
569 /* Computes all control dependence chains for USE_BB. The control
570 dependence chains are then converted to an array of composite
571 predicates pointed to by PREDS. PHI_BB is the basic block of
572 the phi whose result is used in USE_BB. */
575 find_predicates (vec
<use_pred_info_t
> **preds
,
580 size_t num_chains
= 0, i
;
581 vec
<edge
> *dep_chains
= 0;
582 vec
<edge
> cur_chain
= vNULL
;
583 bool has_valid_pred
= false;
584 basic_block cd_root
= 0;
586 typedef vec
<edge
> vec_edge_heap
;
587 dep_chains
= XCNEWVEC (vec_edge_heap
, MAX_NUM_CHAINS
);
589 /* First find the closest bb that is control equivalent to PHI_BB
590 that also dominates USE_BB. */
592 while (dominated_by_p (CDI_DOMINATORS
, use_bb
, cd_root
))
594 basic_block ctrl_eq_bb
= find_control_equiv_block (cd_root
);
595 if (ctrl_eq_bb
&& dominated_by_p (CDI_DOMINATORS
, use_bb
, ctrl_eq_bb
))
596 cd_root
= ctrl_eq_bb
;
601 compute_control_dep_chain (cd_root
, use_bb
,
602 dep_chains
, &num_chains
,
606 = convert_control_dep_chain_into_preds (dep_chains
,
610 /* Free individual chain */
611 cur_chain
.release ();
612 for (i
= 0; i
< num_chains
; i
++)
613 dep_chains
[i
].release ();
615 return has_valid_pred
;
618 /* Computes the set of incoming edges of PHI that have non empty
619 definitions of a phi chain. The collection will be done
620 recursively on operands that are defined by phis. CD_ROOT
621 is the control dependence root. *EDGES holds the result, and
622 VISITED_PHIS is a pointer set for detecting cycles. */
625 collect_phi_def_edges (gimple phi
, basic_block cd_root
,
627 struct pointer_set_t
*visited_phis
)
633 if (pointer_set_insert (visited_phis
, phi
))
636 n
= gimple_phi_num_args (phi
);
637 for (i
= 0; i
< n
; i
++)
639 opnd_edge
= gimple_phi_arg_edge (phi
, i
);
640 opnd
= gimple_phi_arg_def (phi
, i
);
642 if (TREE_CODE (opnd
) != SSA_NAME
)
644 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
646 fprintf (dump_file
, "\n[CHECK] Found def edge %d in ", (int)i
);
647 print_gimple_stmt (dump_file
, phi
, 0, 0);
649 edges
->safe_push (opnd_edge
);
653 gimple def
= SSA_NAME_DEF_STMT (opnd
);
655 if (gimple_code (def
) == GIMPLE_PHI
656 && dominated_by_p (CDI_DOMINATORS
,
657 gimple_bb (def
), cd_root
))
658 collect_phi_def_edges (def
, cd_root
, edges
,
660 else if (!uninit_undefined_value_p (opnd
))
662 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
664 fprintf (dump_file
, "\n[CHECK] Found def edge %d in ", (int)i
);
665 print_gimple_stmt (dump_file
, phi
, 0, 0);
667 edges
->safe_push (opnd_edge
);
673 /* For each use edge of PHI, computes all control dependence chains.
674 The control dependence chains are then converted to an array of
675 composite predicates pointed to by PREDS. */
678 find_def_preds (vec
<use_pred_info_t
> **preds
,
679 size_t *num_preds
, gimple phi
)
681 size_t num_chains
= 0, i
, n
;
682 vec
<edge
> *dep_chains
= 0;
683 vec
<edge
> cur_chain
= vNULL
;
684 vec
<edge
> def_edges
= vNULL
;
685 bool has_valid_pred
= false;
686 basic_block phi_bb
, cd_root
= 0;
687 struct pointer_set_t
*visited_phis
;
689 typedef vec
<edge
> vec_edge_heap
;
690 dep_chains
= XCNEWVEC (vec_edge_heap
, MAX_NUM_CHAINS
);
692 phi_bb
= gimple_bb (phi
);
693 /* First find the closest dominating bb to be
694 the control dependence root */
695 cd_root
= find_dom (phi_bb
);
699 visited_phis
= pointer_set_create ();
700 collect_phi_def_edges (phi
, cd_root
, &def_edges
, visited_phis
);
701 pointer_set_destroy (visited_phis
);
703 n
= def_edges
.length ();
707 for (i
= 0; i
< n
; i
++)
712 opnd_edge
= def_edges
[i
];
713 prev_nc
= num_chains
;
714 compute_control_dep_chain (cd_root
, opnd_edge
->src
,
715 dep_chains
, &num_chains
,
717 /* Free individual chain */
718 cur_chain
.release ();
720 /* Now update the newly added chains with
721 the phi operand edge: */
722 if (EDGE_COUNT (opnd_edge
->src
->succs
) > 1)
724 if (prev_nc
== num_chains
725 && num_chains
< MAX_NUM_CHAINS
)
727 for (j
= prev_nc
; j
< num_chains
; j
++)
729 dep_chains
[j
].safe_push (opnd_edge
);
735 = convert_control_dep_chain_into_preds (dep_chains
,
739 for (i
= 0; i
< num_chains
; i
++)
740 dep_chains
[i
].release ();
742 return has_valid_pred
;
745 /* Dumps the predicates (PREDS) for USESTMT. */
748 dump_predicates (gimple usestmt
, size_t num_preds
,
749 vec
<use_pred_info_t
> *preds
,
753 vec
<use_pred_info_t
> one_pred_chain
;
754 fprintf (dump_file
, msg
);
755 print_gimple_stmt (dump_file
, usestmt
, 0, 0);
756 fprintf (dump_file
, "is guarded by :\n");
757 /* do some dumping here: */
758 for (i
= 0; i
< num_preds
; i
++)
762 one_pred_chain
= preds
[i
];
763 np
= one_pred_chain
.length ();
765 for (j
= 0; j
< np
; j
++)
767 use_pred_info_t one_pred
769 if (one_pred
->invert
)
770 fprintf (dump_file
, " (.NOT.) ");
771 print_gimple_stmt (dump_file
, one_pred
->cond
, 0, 0);
773 fprintf (dump_file
, "(.AND.)\n");
775 if (i
< num_preds
- 1)
776 fprintf (dump_file
, "(.OR.)\n");
780 /* Destroys the predicate set *PREDS. */
783 destroy_predicate_vecs (size_t n
,
784 vec
<use_pred_info_t
> * preds
)
787 for (i
= 0; i
< n
; i
++)
789 for (j
= 0; j
< preds
[i
].length (); j
++)
797 /* Computes the 'normalized' conditional code with operand
798 swapping and condition inversion. */
800 static enum tree_code
801 get_cmp_code (enum tree_code orig_cmp_code
,
802 bool swap_cond
, bool invert
)
804 enum tree_code tc
= orig_cmp_code
;
807 tc
= swap_tree_comparison (orig_cmp_code
);
809 tc
= invert_tree_comparison (tc
, false);
826 /* Returns true if VAL falls in the range defined by BOUNDARY and CMPC, i.e.
827 all values in the range satisfies (x CMPC BOUNDARY) == true. */
830 is_value_included_in (tree val
, tree boundary
, enum tree_code cmpc
)
832 bool inverted
= false;
836 /* Only handle integer constant here. */
837 if (TREE_CODE (val
) != INTEGER_CST
838 || TREE_CODE (boundary
) != INTEGER_CST
)
841 is_unsigned
= TYPE_UNSIGNED (TREE_TYPE (val
));
843 if (cmpc
== GE_EXPR
|| cmpc
== GT_EXPR
846 cmpc
= invert_tree_comparison (cmpc
, false);
853 result
= tree_int_cst_equal (val
, boundary
);
854 else if (cmpc
== LT_EXPR
)
855 result
= INT_CST_LT_UNSIGNED (val
, boundary
);
858 gcc_assert (cmpc
== LE_EXPR
);
859 result
= (tree_int_cst_equal (val
, boundary
)
860 || INT_CST_LT_UNSIGNED (val
, boundary
));
866 result
= tree_int_cst_equal (val
, boundary
);
867 else if (cmpc
== LT_EXPR
)
868 result
= INT_CST_LT (val
, boundary
);
871 gcc_assert (cmpc
== LE_EXPR
);
872 result
= (tree_int_cst_equal (val
, boundary
)
873 || INT_CST_LT (val
, boundary
));
883 /* Returns true if PRED is common among all the predicate
884 chains (PREDS) (and therefore can be factored out).
885 NUM_PRED_CHAIN is the size of array PREDS. */
888 find_matching_predicate_in_rest_chains (use_pred_info_t pred
,
889 vec
<use_pred_info_t
> *preds
,
890 size_t num_pred_chains
)
895 if (num_pred_chains
== 1)
898 for (i
= 1; i
< num_pred_chains
; i
++)
901 vec
<use_pred_info_t
> one_chain
= preds
[i
];
902 n
= one_chain
.length ();
903 for (j
= 0; j
< n
; j
++)
905 use_pred_info_t pred2
907 /* can relax the condition comparison to not
908 use address comparison. However, the most common
909 case is that multiple control dependent paths share
910 a common path prefix, so address comparison should
913 if (pred2
->cond
== pred
->cond
914 && pred2
->invert
== pred
->invert
)
926 /* Forward declaration. */
928 is_use_properly_guarded (gimple use_stmt
,
931 unsigned uninit_opnds
,
932 struct pointer_set_t
*visited_phis
);
934 /* Returns true if all uninitialized opnds are pruned. Returns false
935 otherwise. PHI is the phi node with uninitialized operands,
936 UNINIT_OPNDS is the bitmap of the uninitialize operand positions,
937 FLAG_DEF is the statement defining the flag guarding the use of the
938 PHI output, BOUNDARY_CST is the const value used in the predicate
939 associated with the flag, CMP_CODE is the comparison code used in
940 the predicate, VISITED_PHIS is the pointer set of phis visited, and
941 VISITED_FLAG_PHIS is the pointer to the pointer set of flag definitions
947 flag_1 = phi <0, 1> // (1)
948 var_1 = phi <undef, some_val>
952 flag_2 = phi <0, flag_1, flag_1> // (2)
953 var_2 = phi <undef, var_1, var_1>
960 Because some flag arg in (1) is not constant, if we do not look into the
961 flag phis recursively, it is conservatively treated as unknown and var_1
962 is thought to be flowed into use at (3). Since var_1 is potentially uninitialized
963 a false warning will be emitted. Checking recursively into (1), the compiler can
964 find out that only some_val (which is defined) can flow into (3) which is OK.
969 prune_uninit_phi_opnds_in_unrealizable_paths (
970 gimple phi
, unsigned uninit_opnds
,
971 gimple flag_def
, tree boundary_cst
,
972 enum tree_code cmp_code
,
973 struct pointer_set_t
*visited_phis
,
974 bitmap
*visited_flag_phis
)
978 for (i
= 0; i
< MIN (32, gimple_phi_num_args (flag_def
)); i
++)
982 if (!MASK_TEST_BIT (uninit_opnds
, i
))
985 flag_arg
= gimple_phi_arg_def (flag_def
, i
);
986 if (!is_gimple_constant (flag_arg
))
988 gimple flag_arg_def
, phi_arg_def
;
990 unsigned uninit_opnds_arg_phi
;
992 if (TREE_CODE (flag_arg
) != SSA_NAME
)
994 flag_arg_def
= SSA_NAME_DEF_STMT (flag_arg
);
995 if (gimple_code (flag_arg_def
) != GIMPLE_PHI
)
998 phi_arg
= gimple_phi_arg_def (phi
, i
);
999 if (TREE_CODE (phi_arg
) != SSA_NAME
)
1002 phi_arg_def
= SSA_NAME_DEF_STMT (phi_arg
);
1003 if (gimple_code (phi_arg_def
) != GIMPLE_PHI
)
1006 if (gimple_bb (phi_arg_def
) != gimple_bb (flag_arg_def
))
1009 if (!*visited_flag_phis
)
1010 *visited_flag_phis
= BITMAP_ALLOC (NULL
);
1012 if (bitmap_bit_p (*visited_flag_phis
,
1013 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def
))))
1016 bitmap_set_bit (*visited_flag_phis
,
1017 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def
)));
1019 /* Now recursively prune the uninitialized phi args. */
1020 uninit_opnds_arg_phi
= compute_uninit_opnds_pos (phi_arg_def
);
1021 if (!prune_uninit_phi_opnds_in_unrealizable_paths (
1022 phi_arg_def
, uninit_opnds_arg_phi
,
1023 flag_arg_def
, boundary_cst
, cmp_code
,
1024 visited_phis
, visited_flag_phis
))
1027 bitmap_clear_bit (*visited_flag_phis
,
1028 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def
)));
1032 /* Now check if the constant is in the guarded range. */
1033 if (is_value_included_in (flag_arg
, boundary_cst
, cmp_code
))
1038 /* Now that we know that this undefined edge is not
1039 pruned. If the operand is defined by another phi,
1040 we can further prune the incoming edges of that
1041 phi by checking the predicates of this operands. */
1043 opnd
= gimple_phi_arg_def (phi
, i
);
1044 opnd_def
= SSA_NAME_DEF_STMT (opnd
);
1045 if (gimple_code (opnd_def
) == GIMPLE_PHI
)
1048 unsigned uninit_opnds2
1049 = compute_uninit_opnds_pos (opnd_def
);
1050 gcc_assert (!MASK_EMPTY (uninit_opnds2
));
1051 opnd_edge
= gimple_phi_arg_edge (phi
, i
);
1052 if (!is_use_properly_guarded (phi
,
1067 /* A helper function that determines if the predicate set
1068 of the use is not overlapping with that of the uninit paths.
1069 The most common senario of guarded use is in Example 1:
1082 The real world examples are usually more complicated, but similar
1083 and usually result from inlining:
1085 bool init_func (int * x)
1104 Another possible use scenario is in the following trivial example:
1116 Predicate analysis needs to compute the composite predicate:
1118 1) 'x' use predicate: (n > 0) .AND. (m < 2)
1119 2) 'x' default value (non-def) predicate: .NOT. (n > 0)
1120 (the predicate chain for phi operand defs can be computed
1121 starting from a bb that is control equivalent to the phi's
1122 bb and is dominating the operand def.)
1124 and check overlapping:
1125 (n > 0) .AND. (m < 2) .AND. (.NOT. (n > 0))
1128 This implementation provides framework that can handle
1129 scenarios. (Note that many simple cases are handled properly
1130 without the predicate analysis -- this is due to jump threading
1131 transformation which eliminates the merge point thus makes
1132 path sensitive analysis unnecessary.)
1134 NUM_PREDS is the number is the number predicate chains, PREDS is
1135 the array of chains, PHI is the phi node whose incoming (undefined)
1136 paths need to be pruned, and UNINIT_OPNDS is the bitmap holding
1137 uninit operand positions. VISITED_PHIS is the pointer set of phi
1138 stmts being checked. */
1142 use_pred_not_overlap_with_undef_path_pred (
1144 vec
<use_pred_info_t
> *preds
,
1145 gimple phi
, unsigned uninit_opnds
,
1146 struct pointer_set_t
*visited_phis
)
1149 gimple flag_def
= 0;
1150 tree boundary_cst
= 0;
1151 enum tree_code cmp_code
;
1152 bool swap_cond
= false;
1153 bool invert
= false;
1154 vec
<use_pred_info_t
> the_pred_chain
;
1155 bitmap visited_flag_phis
= NULL
;
1156 bool all_pruned
= false;
1158 gcc_assert (num_preds
> 0);
1159 /* Find within the common prefix of multiple predicate chains
1160 a predicate that is a comparison of a flag variable against
1162 the_pred_chain
= preds
[0];
1163 n
= the_pred_chain
.length ();
1164 for (i
= 0; i
< n
; i
++)
1167 tree cond_lhs
, cond_rhs
, flag
= 0;
1169 use_pred_info_t the_pred
1170 = the_pred_chain
[i
];
1172 cond
= the_pred
->cond
;
1173 invert
= the_pred
->invert
;
1174 cond_lhs
= gimple_cond_lhs (cond
);
1175 cond_rhs
= gimple_cond_rhs (cond
);
1176 cmp_code
= gimple_cond_code (cond
);
1178 if (cond_lhs
!= NULL_TREE
&& TREE_CODE (cond_lhs
) == SSA_NAME
1179 && cond_rhs
!= NULL_TREE
&& is_gimple_constant (cond_rhs
))
1181 boundary_cst
= cond_rhs
;
1184 else if (cond_rhs
!= NULL_TREE
&& TREE_CODE (cond_rhs
) == SSA_NAME
1185 && cond_lhs
!= NULL_TREE
&& is_gimple_constant (cond_lhs
))
1187 boundary_cst
= cond_lhs
;
1195 flag_def
= SSA_NAME_DEF_STMT (flag
);
1200 if ((gimple_code (flag_def
) == GIMPLE_PHI
)
1201 && (gimple_bb (flag_def
) == gimple_bb (phi
))
1202 && find_matching_predicate_in_rest_chains (
1203 the_pred
, preds
, num_preds
))
1212 /* Now check all the uninit incoming edge has a constant flag value
1213 that is in conflict with the use guard/predicate. */
1214 cmp_code
= get_cmp_code (cmp_code
, swap_cond
, invert
);
1216 if (cmp_code
== ERROR_MARK
)
1219 all_pruned
= prune_uninit_phi_opnds_in_unrealizable_paths (phi
,
1225 &visited_flag_phis
);
1227 if (visited_flag_phis
)
1228 BITMAP_FREE (visited_flag_phis
);
1233 /* Returns true if TC is AND or OR */
1236 is_and_or_or (enum tree_code tc
, tree typ
)
1238 return (tc
== BIT_IOR_EXPR
1239 || (tc
== BIT_AND_EXPR
1240 && (typ
== 0 || TREE_CODE (typ
) == BOOLEAN_TYPE
)));
1243 typedef struct norm_cond
1246 enum tree_code cond_code
;
1251 /* Normalizes gimple condition COND. The normalization follows
1252 UD chains to form larger condition expression trees. NORM_COND
1253 holds the normalized result. COND_CODE is the logical opcode
1254 (AND or OR) of the normalized tree. */
1257 normalize_cond_1 (gimple cond
,
1258 norm_cond_t norm_cond
,
1259 enum tree_code cond_code
)
1261 enum gimple_code gc
;
1262 enum tree_code cur_cond_code
;
1265 gc
= gimple_code (cond
);
1266 if (gc
!= GIMPLE_ASSIGN
)
1268 norm_cond
->conds
.safe_push (cond
);
1272 cur_cond_code
= gimple_assign_rhs_code (cond
);
1273 rhs1
= gimple_assign_rhs1 (cond
);
1274 rhs2
= gimple_assign_rhs2 (cond
);
1275 if (cur_cond_code
== NE_EXPR
)
1277 if (integer_zerop (rhs2
)
1278 && (TREE_CODE (rhs1
) == SSA_NAME
))
1280 SSA_NAME_DEF_STMT (rhs1
),
1281 norm_cond
, cond_code
);
1282 else if (integer_zerop (rhs1
)
1283 && (TREE_CODE (rhs2
) == SSA_NAME
))
1285 SSA_NAME_DEF_STMT (rhs2
),
1286 norm_cond
, cond_code
);
1288 norm_cond
->conds
.safe_push (cond
);
1293 if (is_and_or_or (cur_cond_code
, TREE_TYPE (rhs1
))
1294 && (cond_code
== cur_cond_code
|| cond_code
== ERROR_MARK
)
1295 && (TREE_CODE (rhs1
) == SSA_NAME
&& TREE_CODE (rhs2
) == SSA_NAME
))
1297 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs1
),
1298 norm_cond
, cur_cond_code
);
1299 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs2
),
1300 norm_cond
, cur_cond_code
);
1301 norm_cond
->cond_code
= cur_cond_code
;
1304 norm_cond
->conds
.safe_push (cond
);
1307 /* See normalize_cond_1 for details. INVERT is a flag to indicate
1308 if COND needs to be inverted or not. */
1311 normalize_cond (gimple cond
, norm_cond_t norm_cond
, bool invert
)
1313 enum tree_code cond_code
;
1315 norm_cond
->cond_code
= ERROR_MARK
;
1316 norm_cond
->invert
= false;
1317 norm_cond
->conds
.create (0);
1318 gcc_assert (gimple_code (cond
) == GIMPLE_COND
);
1319 cond_code
= gimple_cond_code (cond
);
1321 cond_code
= invert_tree_comparison (cond_code
, false);
1323 if (cond_code
== NE_EXPR
)
1325 if (integer_zerop (gimple_cond_rhs (cond
))
1326 && (TREE_CODE (gimple_cond_lhs (cond
)) == SSA_NAME
))
1328 SSA_NAME_DEF_STMT (gimple_cond_lhs (cond
)),
1329 norm_cond
, ERROR_MARK
);
1330 else if (integer_zerop (gimple_cond_lhs (cond
))
1331 && (TREE_CODE (gimple_cond_rhs (cond
)) == SSA_NAME
))
1333 SSA_NAME_DEF_STMT (gimple_cond_rhs (cond
)),
1334 norm_cond
, ERROR_MARK
);
1337 norm_cond
->conds
.safe_push (cond
);
1338 norm_cond
->invert
= invert
;
1343 norm_cond
->conds
.safe_push (cond
);
1344 norm_cond
->invert
= invert
;
1347 gcc_assert (norm_cond
->conds
.length () == 1
1348 || is_and_or_or (norm_cond
->cond_code
, NULL
));
1351 /* Returns true if the domain for condition COND1 is a subset of
1352 COND2. REVERSE is a flag. when it is true the function checks
1353 if COND1 is a superset of COND2. INVERT1 and INVERT2 are flags
1354 to indicate if COND1 and COND2 need to be inverted or not. */
1357 is_gcond_subset_of (gimple cond1
, bool invert1
,
1358 gimple cond2
, bool invert2
,
1361 enum gimple_code gc1
, gc2
;
1362 enum tree_code cond1_code
, cond2_code
;
1364 tree cond1_lhs
, cond1_rhs
, cond2_lhs
, cond2_rhs
;
1366 /* Take the short cut. */
1377 gc1
= gimple_code (cond1
);
1378 gc2
= gimple_code (cond2
);
1380 if ((gc1
!= GIMPLE_ASSIGN
&& gc1
!= GIMPLE_COND
)
1381 || (gc2
!= GIMPLE_ASSIGN
&& gc2
!= GIMPLE_COND
))
1382 return cond1
== cond2
;
1384 cond1_code
= ((gc1
== GIMPLE_ASSIGN
)
1385 ? gimple_assign_rhs_code (cond1
)
1386 : gimple_cond_code (cond1
));
1388 cond2_code
= ((gc2
== GIMPLE_ASSIGN
)
1389 ? gimple_assign_rhs_code (cond2
)
1390 : gimple_cond_code (cond2
));
1392 if (TREE_CODE_CLASS (cond1_code
) != tcc_comparison
1393 || TREE_CODE_CLASS (cond2_code
) != tcc_comparison
)
1397 cond1_code
= invert_tree_comparison (cond1_code
, false);
1399 cond2_code
= invert_tree_comparison (cond2_code
, false);
1401 cond1_lhs
= ((gc1
== GIMPLE_ASSIGN
)
1402 ? gimple_assign_rhs1 (cond1
)
1403 : gimple_cond_lhs (cond1
));
1404 cond1_rhs
= ((gc1
== GIMPLE_ASSIGN
)
1405 ? gimple_assign_rhs2 (cond1
)
1406 : gimple_cond_rhs (cond1
));
1407 cond2_lhs
= ((gc2
== GIMPLE_ASSIGN
)
1408 ? gimple_assign_rhs1 (cond2
)
1409 : gimple_cond_lhs (cond2
));
1410 cond2_rhs
= ((gc2
== GIMPLE_ASSIGN
)
1411 ? gimple_assign_rhs2 (cond2
)
1412 : gimple_cond_rhs (cond2
));
1414 /* Assuming const operands have been swapped to the
1415 rhs at this point of the analysis. */
1417 if (cond1_lhs
!= cond2_lhs
)
1420 if (!is_gimple_constant (cond1_rhs
)
1421 || TREE_CODE (cond1_rhs
) != INTEGER_CST
)
1422 return (cond1_rhs
== cond2_rhs
);
1424 if (!is_gimple_constant (cond2_rhs
)
1425 || TREE_CODE (cond2_rhs
) != INTEGER_CST
)
1426 return (cond1_rhs
== cond2_rhs
);
1428 if (cond1_code
== EQ_EXPR
)
1429 return is_value_included_in (cond1_rhs
,
1430 cond2_rhs
, cond2_code
);
1431 if (cond1_code
== NE_EXPR
|| cond2_code
== EQ_EXPR
)
1432 return ((cond2_code
== cond1_code
)
1433 && tree_int_cst_equal (cond1_rhs
, cond2_rhs
));
1435 if (((cond1_code
== GE_EXPR
|| cond1_code
== GT_EXPR
)
1436 && (cond2_code
== LE_EXPR
|| cond2_code
== LT_EXPR
))
1437 || ((cond1_code
== LE_EXPR
|| cond1_code
== LT_EXPR
)
1438 && (cond2_code
== GE_EXPR
|| cond2_code
== GT_EXPR
)))
1441 if (cond1_code
!= GE_EXPR
&& cond1_code
!= GT_EXPR
1442 && cond1_code
!= LE_EXPR
&& cond1_code
!= LT_EXPR
)
1445 if (cond1_code
== GT_EXPR
)
1447 cond1_code
= GE_EXPR
;
1448 cond1_rhs
= fold_binary (PLUS_EXPR
, TREE_TYPE (cond1_rhs
),
1450 fold_convert (TREE_TYPE (cond1_rhs
),
1453 else if (cond1_code
== LT_EXPR
)
1455 cond1_code
= LE_EXPR
;
1456 cond1_rhs
= fold_binary (MINUS_EXPR
, TREE_TYPE (cond1_rhs
),
1458 fold_convert (TREE_TYPE (cond1_rhs
),
1465 gcc_assert (cond1_code
== GE_EXPR
|| cond1_code
== LE_EXPR
);
1467 if (cond2_code
== GE_EXPR
|| cond2_code
== GT_EXPR
||
1468 cond2_code
== LE_EXPR
|| cond2_code
== LT_EXPR
)
1469 return is_value_included_in (cond1_rhs
,
1470 cond2_rhs
, cond2_code
);
1471 else if (cond2_code
== NE_EXPR
)
1473 (is_value_included_in (cond1_rhs
,
1474 cond2_rhs
, cond2_code
)
1475 && !is_value_included_in (cond2_rhs
,
1476 cond1_rhs
, cond1_code
));
1480 /* Returns true if the domain of the condition expression
1481 in COND is a subset of any of the sub-conditions
1482 of the normalized condtion NORM_COND. INVERT is a flag
1483 to indicate of the COND needs to be inverted.
1484 REVERSE is a flag. When it is true, the check is reversed --
1485 it returns true if COND is a superset of any of the subconditions
1489 is_subset_of_any (gimple cond
, bool invert
,
1490 norm_cond_t norm_cond
, bool reverse
)
1493 size_t len
= norm_cond
->conds
.length ();
1495 for (i
= 0; i
< len
; i
++)
1497 if (is_gcond_subset_of (cond
, invert
,
1498 norm_cond
->conds
[i
],
1505 /* NORM_COND1 and NORM_COND2 are normalized logical/BIT OR
1506 expressions (formed by following UD chains not control
1507 dependence chains). The function returns true of domain
1508 of and expression NORM_COND1 is a subset of NORM_COND2's.
1509 The implementation is conservative, and it returns false if
1510 it the inclusion relationship may not hold. */
1513 is_or_set_subset_of (norm_cond_t norm_cond1
,
1514 norm_cond_t norm_cond2
)
1517 size_t len
= norm_cond1
->conds
.length ();
1519 for (i
= 0; i
< len
; i
++)
1521 if (!is_subset_of_any (norm_cond1
->conds
[i
],
1522 false, norm_cond2
, false))
1528 /* NORM_COND1 and NORM_COND2 are normalized logical AND
1529 expressions (formed by following UD chains not control
1530 dependence chains). The function returns true of domain
1531 of and expression NORM_COND1 is a subset of NORM_COND2's. */
1534 is_and_set_subset_of (norm_cond_t norm_cond1
,
1535 norm_cond_t norm_cond2
)
1538 size_t len
= norm_cond2
->conds
.length ();
1540 for (i
= 0; i
< len
; i
++)
1542 if (!is_subset_of_any (norm_cond2
->conds
[i
],
1543 false, norm_cond1
, true))
1549 /* Returns true of the domain if NORM_COND1 is a subset
1550 of that of NORM_COND2. Returns false if it can not be
1554 is_norm_cond_subset_of (norm_cond_t norm_cond1
,
1555 norm_cond_t norm_cond2
)
1558 enum tree_code code1
, code2
;
1560 code1
= norm_cond1
->cond_code
;
1561 code2
= norm_cond2
->cond_code
;
1563 if (code1
== BIT_AND_EXPR
)
1565 /* Both conditions are AND expressions. */
1566 if (code2
== BIT_AND_EXPR
)
1567 return is_and_set_subset_of (norm_cond1
, norm_cond2
);
1568 /* NORM_COND1 is an AND expression, and NORM_COND2 is an OR
1569 expression. In this case, returns true if any subexpression
1570 of NORM_COND1 is a subset of any subexpression of NORM_COND2. */
1571 else if (code2
== BIT_IOR_EXPR
)
1574 len1
= norm_cond1
->conds
.length ();
1575 for (i
= 0; i
< len1
; i
++)
1577 gimple cond1
= norm_cond1
->conds
[i
];
1578 if (is_subset_of_any (cond1
, false, norm_cond2
, false))
1585 gcc_assert (code2
== ERROR_MARK
);
1586 gcc_assert (norm_cond2
->conds
.length () == 1);
1587 return is_subset_of_any (norm_cond2
->conds
[0],
1588 norm_cond2
->invert
, norm_cond1
, true);
1591 /* NORM_COND1 is an OR expression */
1592 else if (code1
== BIT_IOR_EXPR
)
1597 return is_or_set_subset_of (norm_cond1
, norm_cond2
);
1601 gcc_assert (code1
== ERROR_MARK
);
1602 gcc_assert (norm_cond1
->conds
.length () == 1);
1603 /* Conservatively returns false if NORM_COND1 is non-decomposible
1604 and NORM_COND2 is an AND expression. */
1605 if (code2
== BIT_AND_EXPR
)
1608 if (code2
== BIT_IOR_EXPR
)
1609 return is_subset_of_any (norm_cond1
->conds
[0],
1610 norm_cond1
->invert
, norm_cond2
, false);
1612 gcc_assert (code2
== ERROR_MARK
);
1613 gcc_assert (norm_cond2
->conds
.length () == 1);
1614 return is_gcond_subset_of (norm_cond1
->conds
[0],
1616 norm_cond2
->conds
[0],
1617 norm_cond2
->invert
, false);
1621 /* Returns true of the domain of single predicate expression
1622 EXPR1 is a subset of that of EXPR2. Returns false if it
1623 can not be proved. */
1626 is_pred_expr_subset_of (use_pred_info_t expr1
,
1627 use_pred_info_t expr2
)
1629 gimple cond1
, cond2
;
1630 enum tree_code code1
, code2
;
1631 struct norm_cond norm_cond1
, norm_cond2
;
1632 bool is_subset
= false;
1634 cond1
= expr1
->cond
;
1635 cond2
= expr2
->cond
;
1636 code1
= gimple_cond_code (cond1
);
1637 code2
= gimple_cond_code (cond2
);
1640 code1
= invert_tree_comparison (code1
, false);
1642 code2
= invert_tree_comparison (code2
, false);
1644 /* Fast path -- match exactly */
1645 if ((gimple_cond_lhs (cond1
) == gimple_cond_lhs (cond2
))
1646 && (gimple_cond_rhs (cond1
) == gimple_cond_rhs (cond2
))
1647 && (code1
== code2
))
1650 /* Normalize conditions. To keep NE_EXPR, do not invert
1651 with both need inversion. */
1652 normalize_cond (cond1
, &norm_cond1
, (expr1
->invert
));
1653 normalize_cond (cond2
, &norm_cond2
, (expr2
->invert
));
1655 is_subset
= is_norm_cond_subset_of (&norm_cond1
, &norm_cond2
);
1658 norm_cond1
.conds
.release ();
1659 norm_cond2
.conds
.release ();
1663 /* Returns true if the domain of PRED1 is a subset
1664 of that of PRED2. Returns false if it can not be proved so. */
1667 is_pred_chain_subset_of (vec
<use_pred_info_t
> pred1
,
1668 vec
<use_pred_info_t
> pred2
)
1670 size_t np1
, np2
, i1
, i2
;
1672 np1
= pred1
.length ();
1673 np2
= pred2
.length ();
1675 for (i2
= 0; i2
< np2
; i2
++)
1678 use_pred_info_t info2
1680 for (i1
= 0; i1
< np1
; i1
++)
1682 use_pred_info_t info1
1684 if (is_pred_expr_subset_of (info1
, info2
))
1696 /* Returns true if the domain defined by
1697 one pred chain ONE_PRED is a subset of the domain
1698 of *PREDS. It returns false if ONE_PRED's domain is
1699 not a subset of any of the sub-domains of PREDS (
1700 corresponding to each individual chains in it), even
1701 though it may be still be a subset of whole domain
1702 of PREDS which is the union (ORed) of all its subdomains.
1703 In other words, the result is conservative. */
1706 is_included_in (vec
<use_pred_info_t
> one_pred
,
1707 vec
<use_pred_info_t
> *preds
,
1712 for (i
= 0; i
< n
; i
++)
1714 if (is_pred_chain_subset_of (one_pred
, preds
[i
]))
1721 /* compares two predicate sets PREDS1 and PREDS2 and returns
1722 true if the domain defined by PREDS1 is a superset
1723 of PREDS2's domain. N1 and N2 are array sizes of PREDS1 and
1724 PREDS2 respectively. The implementation chooses not to build
1725 generic trees (and relying on the folding capability of the
1726 compiler), but instead performs brute force comparison of
1727 individual predicate chains (won't be a compile time problem
1728 as the chains are pretty short). When the function returns
1729 false, it does not necessarily mean *PREDS1 is not a superset
1730 of *PREDS2, but mean it may not be so since the analysis can
1731 not prove it. In such cases, false warnings may still be
1735 is_superset_of (vec
<use_pred_info_t
> *preds1
,
1737 vec
<use_pred_info_t
> *preds2
,
1741 vec
<use_pred_info_t
> one_pred_chain
;
1743 for (i
= 0; i
< n2
; i
++)
1745 one_pred_chain
= preds2
[i
];
1746 if (!is_included_in (one_pred_chain
, preds1
, n1
))
1753 /* Comparison function used by qsort. It is used to
1754 sort predicate chains to allow predicate
1758 pred_chain_length_cmp (const void *p1
, const void *p2
)
1760 use_pred_info_t i1
, i2
;
1761 vec
<use_pred_info_t
> const *chain1
1762 = (vec
<use_pred_info_t
> const *)p1
;
1763 vec
<use_pred_info_t
> const *chain2
1764 = (vec
<use_pred_info_t
> const *)p2
;
1766 if (chain1
->length () != chain2
->length ())
1767 return (chain1
->length () - chain2
->length ());
1772 /* Allow predicates with similar prefix come together. */
1773 if (!i1
->invert
&& i2
->invert
)
1775 else if (i1
->invert
&& !i2
->invert
)
1778 return gimple_uid (i1
->cond
) - gimple_uid (i2
->cond
);
1781 /* x OR (!x AND y) is equivalent to x OR y.
1782 This function normalizes x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3)
1783 into x1 OR x2 OR x3. PREDS is the predicate chains, and N is
1784 the number of chains. Returns true if normalization happens. */
1787 normalize_preds (vec
<use_pred_info_t
> *preds
, size_t *n
)
1790 vec
<use_pred_info_t
> pred_chain
;
1791 vec
<use_pred_info_t
> x
= vNULL
;
1792 use_pred_info_t xj
= 0, nxj
= 0;
1797 /* First sort the chains in ascending order of lengths. */
1798 qsort (preds
, *n
, sizeof (void *), pred_chain_length_cmp
);
1799 pred_chain
= preds
[0];
1800 ll
= pred_chain
.length ();
1805 use_pred_info_t xx
, yy
, xx2
, nyy
;
1806 vec
<use_pred_info_t
> pred_chain2
= preds
[1];
1807 if (pred_chain2
.length () != 2)
1810 /* See if simplification x AND y OR x AND !y is possible. */
1813 xx2
= pred_chain2
[0];
1814 nyy
= pred_chain2
[1];
1815 if (gimple_cond_lhs (xx
->cond
) != gimple_cond_lhs (xx2
->cond
)
1816 || gimple_cond_rhs (xx
->cond
) != gimple_cond_rhs (xx2
->cond
)
1817 || gimple_cond_code (xx
->cond
) != gimple_cond_code (xx2
->cond
)
1818 || (xx
->invert
!= xx2
->invert
))
1820 if (gimple_cond_lhs (yy
->cond
) != gimple_cond_lhs (nyy
->cond
)
1821 || gimple_cond_rhs (yy
->cond
) != gimple_cond_rhs (nyy
->cond
)
1822 || gimple_cond_code (yy
->cond
) != gimple_cond_code (nyy
->cond
)
1823 || (yy
->invert
== nyy
->invert
))
1826 /* Now merge the first two chains. */
1830 pred_chain
.release ();
1831 pred_chain2
.release ();
1832 pred_chain
.safe_push (xx
);
1833 preds
[0] = pred_chain
;
1834 for (i
= 1; i
< *n
- 1; i
++)
1835 preds
[i
] = preds
[i
+ 1];
1837 preds
[*n
- 1].create (0);
1844 x
.safe_push (pred_chain
[0]);
1846 /* The loop extracts x1, x2, x3, etc from chains
1847 x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3) OR ... */
1848 for (i
= 1; i
< *n
; i
++)
1850 pred_chain
= preds
[i
];
1851 if (pred_chain
.length () != i
+ 1)
1854 for (j
= 0; j
< i
; j
++)
1857 nxj
= pred_chain
[j
];
1859 /* Check if nxj is !xj */
1860 if (gimple_cond_lhs (xj
->cond
) != gimple_cond_lhs (nxj
->cond
)
1861 || gimple_cond_rhs (xj
->cond
) != gimple_cond_rhs (nxj
->cond
)
1862 || gimple_cond_code (xj
->cond
) != gimple_cond_code (nxj
->cond
)
1863 || (xj
->invert
== nxj
->invert
))
1867 x
.safe_push (pred_chain
[i
]);
1870 /* Now normalize the pred chains using the extraced x1, x2, x3 etc. */
1871 for (j
= 0; j
< *n
; j
++)
1876 t
= XNEW (struct use_pred_info
);
1882 for (i
= 0; i
< *n
; i
++)
1884 pred_chain
= preds
[i
];
1885 for (j
= 0; j
< pred_chain
.length (); j
++)
1886 free (pred_chain
[j
]);
1887 pred_chain
.release ();
1889 pred_chain
.safe_push (x
[i
]);
1890 preds
[i
] = pred_chain
;
1897 /* Computes the predicates that guard the use and checks
1898 if the incoming paths that have empty (or possibly
1899 empty) definition can be pruned/filtered. The function returns
1900 true if it can be determined that the use of PHI's def in
1901 USE_STMT is guarded with a predicate set not overlapping with
1902 predicate sets of all runtime paths that do not have a definition.
1903 Returns false if it is not or it can not be determined. USE_BB is
1904 the bb of the use (for phi operand use, the bb is not the bb of
1905 the phi stmt, but the src bb of the operand edge). UNINIT_OPNDS
1906 is a bit vector. If an operand of PHI is uninitialized, the
1907 corresponding bit in the vector is 1. VISIED_PHIS is a pointer
1908 set of phis being visted. */
1911 is_use_properly_guarded (gimple use_stmt
,
1914 unsigned uninit_opnds
,
1915 struct pointer_set_t
*visited_phis
)
1918 vec
<use_pred_info_t
> *preds
= 0;
1919 vec
<use_pred_info_t
> *def_preds
= 0;
1920 size_t num_preds
= 0, num_def_preds
= 0;
1921 bool has_valid_preds
= false;
1922 bool is_properly_guarded
= false;
1924 if (pointer_set_insert (visited_phis
, phi
))
1927 phi_bb
= gimple_bb (phi
);
1929 if (is_non_loop_exit_postdominating (use_bb
, phi_bb
))
1932 has_valid_preds
= find_predicates (&preds
, &num_preds
,
1935 if (!has_valid_preds
)
1937 destroy_predicate_vecs (num_preds
, preds
);
1942 dump_predicates (use_stmt
, num_preds
, preds
,
1945 has_valid_preds
= find_def_preds (&def_preds
,
1946 &num_def_preds
, phi
);
1948 if (has_valid_preds
)
1952 dump_predicates (phi
, num_def_preds
, def_preds
,
1953 "Operand defs of phi ");
1955 normed
= normalize_preds (def_preds
, &num_def_preds
);
1956 if (normed
&& dump_file
)
1958 fprintf (dump_file
, "\nNormalized to\n");
1959 dump_predicates (phi
, num_def_preds
, def_preds
,
1960 "Operand defs of phi ");
1962 is_properly_guarded
=
1963 is_superset_of (def_preds
, num_def_preds
,
1967 /* further prune the dead incoming phi edges. */
1968 if (!is_properly_guarded
)
1970 = use_pred_not_overlap_with_undef_path_pred (
1971 num_preds
, preds
, phi
, uninit_opnds
, visited_phis
);
1973 destroy_predicate_vecs (num_preds
, preds
);
1974 destroy_predicate_vecs (num_def_preds
, def_preds
);
1975 return is_properly_guarded
;
1978 /* Searches through all uses of a potentially
1979 uninitialized variable defined by PHI and returns a use
1980 statement if the use is not properly guarded. It returns
1981 NULL if all uses are guarded. UNINIT_OPNDS is a bitvector
1982 holding the position(s) of uninit PHI operands. WORKLIST
1983 is the vector of candidate phis that may be updated by this
1984 function. ADDED_TO_WORKLIST is the pointer set tracking
1985 if the new phi is already in the worklist. */
1988 find_uninit_use (gimple phi
, unsigned uninit_opnds
,
1989 vec
<gimple
> *worklist
,
1990 struct pointer_set_t
*added_to_worklist
)
1993 use_operand_p use_p
;
1995 imm_use_iterator iter
;
1997 phi_result
= gimple_phi_result (phi
);
1999 FOR_EACH_IMM_USE_FAST (use_p
, iter
, phi_result
)
2001 struct pointer_set_t
*visited_phis
;
2004 use_stmt
= USE_STMT (use_p
);
2005 if (is_gimple_debug (use_stmt
))
2008 visited_phis
= pointer_set_create ();
2010 if (gimple_code (use_stmt
) == GIMPLE_PHI
)
2011 use_bb
= gimple_phi_arg_edge (use_stmt
,
2012 PHI_ARG_INDEX_FROM_USE (use_p
))->src
;
2014 use_bb
= gimple_bb (use_stmt
);
2016 if (is_use_properly_guarded (use_stmt
,
2022 pointer_set_destroy (visited_phis
);
2025 pointer_set_destroy (visited_phis
);
2027 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2029 fprintf (dump_file
, "[CHECK]: Found unguarded use: ");
2030 print_gimple_stmt (dump_file
, use_stmt
, 0, 0);
2032 /* Found one real use, return. */
2033 if (gimple_code (use_stmt
) != GIMPLE_PHI
)
2036 /* Found a phi use that is not guarded,
2037 add the phi to the worklist. */
2038 if (!pointer_set_insert (added_to_worklist
,
2041 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2043 fprintf (dump_file
, "[WORKLIST]: Update worklist with phi: ");
2044 print_gimple_stmt (dump_file
, use_stmt
, 0, 0);
2047 worklist
->safe_push (use_stmt
);
2048 pointer_set_insert (possibly_undefined_names
, phi_result
);
2055 /* Look for inputs to PHI that are SSA_NAMEs that have empty definitions
2056 and gives warning if there exists a runtime path from the entry to a
2057 use of the PHI def that does not contain a definition. In other words,
2058 the warning is on the real use. The more dead paths that can be pruned
2059 by the compiler, the fewer false positives the warning is. WORKLIST
2060 is a vector of candidate phis to be examined. ADDED_TO_WORKLIST is
2061 a pointer set tracking if the new phi is added to the worklist or not. */
2064 warn_uninitialized_phi (gimple phi
, vec
<gimple
> *worklist
,
2065 struct pointer_set_t
*added_to_worklist
)
2067 unsigned uninit_opnds
;
2068 gimple uninit_use_stmt
= 0;
2071 /* Don't look at virtual operands. */
2072 if (virtual_operand_p (gimple_phi_result (phi
)))
2075 uninit_opnds
= compute_uninit_opnds_pos (phi
);
2077 if (MASK_EMPTY (uninit_opnds
))
2080 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2082 fprintf (dump_file
, "[CHECK]: examining phi: ");
2083 print_gimple_stmt (dump_file
, phi
, 0, 0);
2086 /* Now check if we have any use of the value without proper guard. */
2087 uninit_use_stmt
= find_uninit_use (phi
, uninit_opnds
,
2088 worklist
, added_to_worklist
);
2090 /* All uses are properly guarded. */
2091 if (!uninit_use_stmt
)
2094 uninit_op
= gimple_phi_arg_def (phi
, MASK_FIRST_SET_BIT (uninit_opnds
));
2095 if (SSA_NAME_VAR (uninit_op
) == NULL_TREE
)
2097 warn_uninit (OPT_Wmaybe_uninitialized
, uninit_op
, SSA_NAME_VAR (uninit_op
),
2098 SSA_NAME_VAR (uninit_op
),
2099 "%qD may be used uninitialized in this function",
2105 /* Entry point to the late uninitialized warning pass. */
2108 execute_late_warn_uninitialized (void)
2111 gimple_stmt_iterator gsi
;
2112 vec
<gimple
> worklist
= vNULL
;
2113 struct pointer_set_t
*added_to_worklist
;
2115 calculate_dominance_info (CDI_DOMINATORS
);
2116 calculate_dominance_info (CDI_POST_DOMINATORS
);
2117 /* Re-do the plain uninitialized variable check, as optimization may have
2118 straightened control flow. Do this first so that we don't accidentally
2119 get a "may be" warning when we'd have seen an "is" warning later. */
2120 warn_uninitialized_vars (/*warn_possibly_uninitialized=*/1);
2122 timevar_push (TV_TREE_UNINIT
);
2124 possibly_undefined_names
= pointer_set_create ();
2125 added_to_worklist
= pointer_set_create ();
2127 /* Initialize worklist */
2129 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2131 gimple phi
= gsi_stmt (gsi
);
2134 n
= gimple_phi_num_args (phi
);
2136 /* Don't look at virtual operands. */
2137 if (virtual_operand_p (gimple_phi_result (phi
)))
2140 for (i
= 0; i
< n
; ++i
)
2142 tree op
= gimple_phi_arg_def (phi
, i
);
2143 if (TREE_CODE (op
) == SSA_NAME
2144 && uninit_undefined_value_p (op
))
2146 worklist
.safe_push (phi
);
2147 pointer_set_insert (added_to_worklist
, phi
);
2148 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2150 fprintf (dump_file
, "[WORKLIST]: add to initial list: ");
2151 print_gimple_stmt (dump_file
, phi
, 0, 0);
2158 while (worklist
.length () != 0)
2161 cur_phi
= worklist
.pop ();
2162 warn_uninitialized_phi (cur_phi
, &worklist
, added_to_worklist
);
2165 worklist
.release ();
2166 pointer_set_destroy (added_to_worklist
);
2167 pointer_set_destroy (possibly_undefined_names
);
2168 possibly_undefined_names
= NULL
;
2169 free_dominance_info (CDI_POST_DOMINATORS
);
2170 timevar_pop (TV_TREE_UNINIT
);
2175 gate_warn_uninitialized (void)
2177 return warn_uninitialized
!= 0;
2182 const pass_data pass_data_late_warn_uninitialized
=
2184 GIMPLE_PASS
, /* type */
2185 "uninit", /* name */
2186 OPTGROUP_NONE
, /* optinfo_flags */
2187 true, /* has_gate */
2188 true, /* has_execute */
2189 TV_NONE
, /* tv_id */
2190 PROP_ssa
, /* properties_required */
2191 0, /* properties_provided */
2192 0, /* properties_destroyed */
2193 0, /* todo_flags_start */
2194 0, /* todo_flags_finish */
2197 class pass_late_warn_uninitialized
: public gimple_opt_pass
2200 pass_late_warn_uninitialized (gcc::context
*ctxt
)
2201 : gimple_opt_pass (pass_data_late_warn_uninitialized
, ctxt
)
2204 /* opt_pass methods: */
2205 opt_pass
* clone () { return new pass_late_warn_uninitialized (m_ctxt
); }
2206 bool gate () { return gate_warn_uninitialized (); }
2207 unsigned int execute () { return execute_late_warn_uninitialized (); }
2209 }; // class pass_late_warn_uninitialized
2214 make_pass_late_warn_uninitialized (gcc::context
*ctxt
)
2216 return new pass_late_warn_uninitialized (ctxt
);
2221 execute_early_warn_uninitialized (void)
2223 /* Currently, this pass runs always but
2224 execute_late_warn_uninitialized only runs with optimization. With
2225 optimization we want to warn about possible uninitialized as late
2226 as possible, thus don't do it here. However, without
2227 optimization we need to warn here about "may be uninitialized".
2229 calculate_dominance_info (CDI_POST_DOMINATORS
);
2231 warn_uninitialized_vars (/*warn_possibly_uninitialized=*/!optimize
);
2233 /* Post-dominator information can not be reliably updated. Free it
2236 free_dominance_info (CDI_POST_DOMINATORS
);
2243 const pass_data pass_data_early_warn_uninitialized
=
2245 GIMPLE_PASS
, /* type */
2246 "*early_warn_uninitialized", /* name */
2247 OPTGROUP_NONE
, /* optinfo_flags */
2248 true, /* has_gate */
2249 true, /* has_execute */
2250 TV_TREE_UNINIT
, /* tv_id */
2251 PROP_ssa
, /* properties_required */
2252 0, /* properties_provided */
2253 0, /* properties_destroyed */
2254 0, /* todo_flags_start */
2255 0, /* todo_flags_finish */
2258 class pass_early_warn_uninitialized
: public gimple_opt_pass
2261 pass_early_warn_uninitialized (gcc::context
*ctxt
)
2262 : gimple_opt_pass (pass_data_early_warn_uninitialized
, ctxt
)
2265 /* opt_pass methods: */
2266 bool gate () { return gate_warn_uninitialized (); }
2267 unsigned int execute () { return execute_early_warn_uninitialized (); }
2269 }; // class pass_early_warn_uninitialized
2274 make_pass_early_warn_uninitialized (gcc::context
*ctxt
)
2276 return new pass_early_warn_uninitialized (ctxt
);