1 /* Predicate aware uninitialized variable warning.
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 Contributed by Xinliang David Li <davidxl@google.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #include "basic-block.h"
30 #include "gimple-pretty-print.h"
32 #include "pointer-set.h"
35 #include "tree-inline.h"
37 #include "tree-pass.h"
38 #include "diagnostic-core.h"
40 /* This implements the pass that does predicate aware warning on uses of
41 possibly uninitialized variables. The pass first collects the set of
42 possibly uninitialized SSA names. For each such name, it walks through
43 all its immediate uses. For each immediate use, it rebuilds the condition
44 expression (the predicate) that guards the use. The predicate is then
45 examined to see if the variable is always defined under that same condition.
46 This is done either by pruning the unrealizable paths that lead to the
47 default definitions or by checking if the predicate set that guards the
48 defining paths is a superset of the use predicate. */
51 /* Pointer set of potentially undefined ssa names, i.e.,
52 ssa names that are defined by phi with operands that
53 are not defined or potentially undefined. */
54 static struct pointer_set_t
*possibly_undefined_names
= 0;
56 /* Bit mask handling macros. */
57 #define MASK_SET_BIT(mask, pos) mask |= (1 << pos)
58 #define MASK_TEST_BIT(mask, pos) (mask & (1 << pos))
59 #define MASK_EMPTY(mask) (mask == 0)
61 /* Returns the first bit position (starting from LSB)
62 in mask that is non zero. Returns -1 if the mask is empty. */
64 get_mask_first_set_bit (unsigned mask
)
70 while ((mask
& (1 << pos
)) == 0)
75 #define MASK_FIRST_SET_BIT(mask) get_mask_first_set_bit (mask)
77 /* Return true if T, an SSA_NAME, has an undefined value. */
79 has_undefined_value_p (tree t
)
81 return (ssa_undefined_value_p (t
)
82 || (possibly_undefined_names
83 && pointer_set_contains (possibly_undefined_names
, t
)));
88 /* Like has_undefined_value_p, but don't return true if TREE_NO_WARNING
89 is set on SSA_NAME_VAR. */
92 uninit_undefined_value_p (tree t
) {
93 if (!has_undefined_value_p (t
))
95 if (SSA_NAME_VAR (t
) && TREE_NO_WARNING (SSA_NAME_VAR (t
)))
100 /* Emit warnings for uninitialized variables. This is done in two passes.
102 The first pass notices real uses of SSA names with undefined values.
103 Such uses are unconditionally uninitialized, and we can be certain that
104 such a use is a mistake. This pass is run before most optimizations,
105 so that we catch as many as we can.
107 The second pass follows PHI nodes to find uses that are potentially
108 uninitialized. In this case we can't necessarily prove that the use
109 is really uninitialized. This pass is run after most optimizations,
110 so that we thread as many jumps and possible, and delete as much dead
111 code as possible, in order to reduce false positives. We also look
112 again for plain uninitialized variables, since optimization may have
113 changed conditionally uninitialized to unconditionally uninitialized. */
115 /* Emit a warning for EXPR based on variable VAR at the point in the
116 program T, an SSA_NAME, is used being uninitialized. The exact
117 warning text is in MSGID and LOCUS may contain a location or be null.
118 WC is the warning code. */
121 warn_uninit (enum opt_code wc
, tree t
,
122 tree expr
, tree var
, const char *gmsgid
, void *data
)
124 gimple context
= (gimple
) data
;
125 location_t location
, cfun_loc
;
126 expanded_location xloc
, floc
;
128 if (!has_undefined_value_p (t
))
131 /* TREE_NO_WARNING either means we already warned, or the front end
132 wishes to suppress the warning. */
134 && (gimple_no_warning_p (context
)
135 || (gimple_assign_single_p (context
)
136 && TREE_NO_WARNING (gimple_assign_rhs1 (context
)))))
137 || TREE_NO_WARNING (expr
))
140 location
= (context
!= NULL
&& gimple_has_location (context
))
141 ? gimple_location (context
)
142 : DECL_SOURCE_LOCATION (var
);
143 location
= linemap_resolve_location (line_table
, location
,
144 LRK_SPELLING_LOCATION
,
146 cfun_loc
= DECL_SOURCE_LOCATION (cfun
->decl
);
147 xloc
= expand_location (location
);
148 floc
= expand_location (cfun_loc
);
149 if (warning_at (location
, wc
, gmsgid
, expr
))
151 TREE_NO_WARNING (expr
) = 1;
153 if (location
== DECL_SOURCE_LOCATION (var
))
155 if (xloc
.file
!= floc
.file
156 || linemap_location_before_p (line_table
,
158 || linemap_location_before_p (line_table
,
159 cfun
->function_end_locus
,
161 inform (DECL_SOURCE_LOCATION (var
), "%qD was declared here", var
);
166 warn_uninitialized_vars (bool warn_possibly_uninitialized
)
168 gimple_stmt_iterator gsi
;
173 bool always_executed
= dominated_by_p (CDI_POST_DOMINATORS
,
174 single_succ (ENTRY_BLOCK_PTR
), bb
);
175 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
177 gimple stmt
= gsi_stmt (gsi
);
182 if (is_gimple_debug (stmt
))
185 /* We only do data flow with SSA_NAMEs, so that's all we
187 FOR_EACH_SSA_USE_OPERAND (use_p
, stmt
, op_iter
, SSA_OP_USE
)
189 use
= USE_FROM_PTR (use_p
);
191 warn_uninit (OPT_Wuninitialized
, use
,
192 SSA_NAME_VAR (use
), SSA_NAME_VAR (use
),
193 "%qD is used uninitialized in this function",
195 else if (warn_possibly_uninitialized
)
196 warn_uninit (OPT_Wmaybe_uninitialized
, use
,
197 SSA_NAME_VAR (use
), SSA_NAME_VAR (use
),
198 "%qD may be used uninitialized in this function",
202 /* For memory the only cheap thing we can do is see if we
203 have a use of the default def of the virtual operand.
204 ??? Note that at -O0 we do not have virtual operands.
205 ??? Not so cheap would be to use the alias oracle via
206 walk_aliased_vdefs, if we don't find any aliasing vdef
207 warn as is-used-uninitialized, if we don't find an aliasing
208 vdef that kills our use (stmt_kills_ref_p), warn as
209 may-be-used-uninitialized. But this walk is quadratic and
210 so must be limited which means we would miss warning
212 use
= gimple_vuse (stmt
);
214 && gimple_assign_single_p (stmt
)
215 && !gimple_vdef (stmt
)
216 && SSA_NAME_IS_DEFAULT_DEF (use
))
218 tree rhs
= gimple_assign_rhs1 (stmt
);
219 tree base
= get_base_address (rhs
);
221 /* Do not warn if it can be initialized outside this function. */
222 if (TREE_CODE (base
) != VAR_DECL
223 || DECL_HARD_REGISTER (base
)
224 || is_global_var (base
))
228 warn_uninit (OPT_Wuninitialized
, use
,
229 gimple_assign_rhs1 (stmt
), base
,
230 "%qE is used uninitialized in this function",
232 else if (warn_possibly_uninitialized
)
233 warn_uninit (OPT_Wmaybe_uninitialized
, use
,
234 gimple_assign_rhs1 (stmt
), base
,
235 "%qE may be used uninitialized in this function",
244 /* Checks if the operand OPND of PHI is defined by
245 another phi with one operand defined by this PHI,
246 but the rest operands are all defined. If yes,
247 returns true to skip this this operand as being
248 redundant. Can be enhanced to be more general. */
251 can_skip_redundant_opnd (tree opnd
, gimple phi
)
257 phi_def
= gimple_phi_result (phi
);
258 op_def
= SSA_NAME_DEF_STMT (opnd
);
259 if (gimple_code (op_def
) != GIMPLE_PHI
)
261 n
= gimple_phi_num_args (op_def
);
262 for (i
= 0; i
< n
; ++i
)
264 tree op
= gimple_phi_arg_def (op_def
, i
);
265 if (TREE_CODE (op
) != SSA_NAME
)
267 if (op
!= phi_def
&& uninit_undefined_value_p (op
))
274 /* Returns a bit mask holding the positions of arguments in PHI
275 that have empty (or possibly empty) definitions. */
278 compute_uninit_opnds_pos (gimple phi
)
281 unsigned uninit_opnds
= 0;
283 n
= gimple_phi_num_args (phi
);
284 /* Bail out for phi with too many args. */
288 for (i
= 0; i
< n
; ++i
)
290 tree op
= gimple_phi_arg_def (phi
, i
);
291 if (TREE_CODE (op
) == SSA_NAME
292 && uninit_undefined_value_p (op
)
293 && !can_skip_redundant_opnd (op
, phi
))
295 if (cfun
->has_nonlocal_label
|| cfun
->calls_setjmp
)
297 /* Ignore SSA_NAMEs that appear on abnormal edges
299 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op
))
302 MASK_SET_BIT (uninit_opnds
, i
);
308 /* Find the immediate postdominator PDOM of the specified
309 basic block BLOCK. */
311 static inline basic_block
312 find_pdom (basic_block block
)
314 if (block
== EXIT_BLOCK_PTR
)
315 return EXIT_BLOCK_PTR
;
319 = get_immediate_dominator (CDI_POST_DOMINATORS
, block
);
321 return EXIT_BLOCK_PTR
;
326 /* Find the immediate DOM of the specified
327 basic block BLOCK. */
329 static inline basic_block
330 find_dom (basic_block block
)
332 if (block
== ENTRY_BLOCK_PTR
)
333 return ENTRY_BLOCK_PTR
;
336 basic_block bb
= get_immediate_dominator (CDI_DOMINATORS
, block
);
338 return ENTRY_BLOCK_PTR
;
343 /* Returns true if BB1 is postdominating BB2 and BB1 is
344 not a loop exit bb. The loop exit bb check is simple and does
345 not cover all cases. */
348 is_non_loop_exit_postdominating (basic_block bb1
, basic_block bb2
)
350 if (!dominated_by_p (CDI_POST_DOMINATORS
, bb2
, bb1
))
353 if (single_pred_p (bb1
) && !single_succ_p (bb2
))
359 /* Find the closest postdominator of a specified BB, which is control
362 static inline basic_block
363 find_control_equiv_block (basic_block bb
)
367 pdom
= find_pdom (bb
);
369 /* Skip the postdominating bb that is also loop exit. */
370 if (!is_non_loop_exit_postdominating (pdom
, bb
))
373 if (dominated_by_p (CDI_DOMINATORS
, pdom
, bb
))
379 #define MAX_NUM_CHAINS 8
380 #define MAX_CHAIN_LEN 5
381 #define MAX_POSTDOM_CHECK 8
383 /* Computes the control dependence chains (paths of edges)
384 for DEP_BB up to the dominating basic block BB (the head node of a
385 chain should be dominated by it). CD_CHAINS is pointer to a
386 dynamic array holding the result chains. CUR_CD_CHAIN is the current
387 chain being computed. *NUM_CHAINS is total number of chains. The
388 function returns true if the information is successfully computed,
389 return false if there is no control dependence or not computed. */
392 compute_control_dep_chain (basic_block bb
, basic_block dep_bb
,
393 vec
<edge
> *cd_chains
,
395 vec
<edge
> *cur_cd_chain
)
400 bool found_cd_chain
= false;
401 size_t cur_chain_len
= 0;
403 if (EDGE_COUNT (bb
->succs
) < 2)
406 /* Could use a set instead. */
407 cur_chain_len
= cur_cd_chain
->length ();
408 if (cur_chain_len
> MAX_CHAIN_LEN
)
411 for (i
= 0; i
< cur_chain_len
; i
++)
413 edge e
= (*cur_cd_chain
)[i
];
414 /* cycle detected. */
419 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
422 int post_dom_check
= 0;
423 if (e
->flags
& (EDGE_FAKE
| EDGE_ABNORMAL
))
427 cur_cd_chain
->safe_push (e
);
428 while (!is_non_loop_exit_postdominating (cd_bb
, bb
))
432 /* Found a direct control dependence. */
433 if (*num_chains
< MAX_NUM_CHAINS
)
435 cd_chains
[*num_chains
] = cur_cd_chain
->copy ();
438 found_cd_chain
= true;
439 /* check path from next edge. */
443 /* Now check if DEP_BB is indirectly control dependent on BB. */
444 if (compute_control_dep_chain (cd_bb
, dep_bb
, cd_chains
,
445 num_chains
, cur_cd_chain
))
447 found_cd_chain
= true;
451 cd_bb
= find_pdom (cd_bb
);
453 if (cd_bb
== EXIT_BLOCK_PTR
|| post_dom_check
> MAX_POSTDOM_CHECK
)
456 cur_cd_chain
->pop ();
457 gcc_assert (cur_cd_chain
->length () == cur_chain_len
);
459 gcc_assert (cur_cd_chain
->length () == cur_chain_len
);
461 return found_cd_chain
;
464 typedef struct use_pred_info
472 /* Converts the chains of control dependence edges into a set of
473 predicates. A control dependence chain is represented by a vector
474 edges. DEP_CHAINS points to an array of dependence chains.
475 NUM_CHAINS is the size of the chain array. One edge in a dependence
476 chain is mapped to predicate expression represented by use_pred_info_t
477 type. One dependence chain is converted to a composite predicate that
478 is the result of AND operation of use_pred_info_t mapped to each edge.
479 A composite predicate is presented by a vector of use_pred_info_t. On
480 return, *PREDS points to the resulting array of composite predicates.
481 *NUM_PREDS is the number of composite predictes. */
484 convert_control_dep_chain_into_preds (vec
<edge
> *dep_chains
,
486 vec
<use_pred_info_t
> **preds
,
489 bool has_valid_pred
= false;
491 if (num_chains
== 0 || num_chains
>= MAX_NUM_CHAINS
)
494 /* Now convert the control dep chain into a set
496 typedef vec
<use_pred_info_t
> vec_use_pred_info_t_heap
;
497 *preds
= XCNEWVEC (vec_use_pred_info_t_heap
, num_chains
);
498 *num_preds
= num_chains
;
500 for (i
= 0; i
< num_chains
; i
++)
502 vec
<edge
> one_cd_chain
= dep_chains
[i
];
504 has_valid_pred
= false;
505 for (j
= 0; j
< one_cd_chain
.length (); j
++)
508 gimple_stmt_iterator gsi
;
509 basic_block guard_bb
;
510 use_pred_info_t one_pred
;
515 gsi
= gsi_last_bb (guard_bb
);
518 has_valid_pred
= false;
521 cond_stmt
= gsi_stmt (gsi
);
522 if (gimple_code (cond_stmt
) == GIMPLE_CALL
523 && EDGE_COUNT (e
->src
->succs
) >= 2)
525 /* Ignore EH edge. Can add assertion
526 on the other edge's flag. */
529 /* Skip if there is essentially one succesor. */
530 if (EDGE_COUNT (e
->src
->succs
) == 2)
536 FOR_EACH_EDGE (e1
, ei1
, e
->src
->succs
)
538 if (EDGE_COUNT (e1
->dest
->succs
) == 0)
547 if (gimple_code (cond_stmt
) != GIMPLE_COND
)
549 has_valid_pred
= false;
552 one_pred
= XNEW (struct use_pred_info
);
553 one_pred
->cond
= cond_stmt
;
554 one_pred
->invert
= !!(e
->flags
& EDGE_FALSE_VALUE
);
555 (*preds
)[i
].safe_push (one_pred
);
556 has_valid_pred
= true;
562 return has_valid_pred
;
565 /* Computes all control dependence chains for USE_BB. The control
566 dependence chains are then converted to an array of composite
567 predicates pointed to by PREDS. PHI_BB is the basic block of
568 the phi whose result is used in USE_BB. */
571 find_predicates (vec
<use_pred_info_t
> **preds
,
576 size_t num_chains
= 0, i
;
577 vec
<edge
> *dep_chains
= 0;
578 vec
<edge
> cur_chain
= vNULL
;
579 bool has_valid_pred
= false;
580 basic_block cd_root
= 0;
582 typedef vec
<edge
> vec_edge_heap
;
583 dep_chains
= XCNEWVEC (vec_edge_heap
, MAX_NUM_CHAINS
);
585 /* First find the closest bb that is control equivalent to PHI_BB
586 that also dominates USE_BB. */
588 while (dominated_by_p (CDI_DOMINATORS
, use_bb
, cd_root
))
590 basic_block ctrl_eq_bb
= find_control_equiv_block (cd_root
);
591 if (ctrl_eq_bb
&& dominated_by_p (CDI_DOMINATORS
, use_bb
, ctrl_eq_bb
))
592 cd_root
= ctrl_eq_bb
;
597 compute_control_dep_chain (cd_root
, use_bb
,
598 dep_chains
, &num_chains
,
602 = convert_control_dep_chain_into_preds (dep_chains
,
606 /* Free individual chain */
607 cur_chain
.release ();
608 for (i
= 0; i
< num_chains
; i
++)
609 dep_chains
[i
].release ();
611 return has_valid_pred
;
614 /* Computes the set of incoming edges of PHI that have non empty
615 definitions of a phi chain. The collection will be done
616 recursively on operands that are defined by phis. CD_ROOT
617 is the control dependence root. *EDGES holds the result, and
618 VISITED_PHIS is a pointer set for detecting cycles. */
621 collect_phi_def_edges (gimple phi
, basic_block cd_root
,
623 struct pointer_set_t
*visited_phis
)
629 if (pointer_set_insert (visited_phis
, phi
))
632 n
= gimple_phi_num_args (phi
);
633 for (i
= 0; i
< n
; i
++)
635 opnd_edge
= gimple_phi_arg_edge (phi
, i
);
636 opnd
= gimple_phi_arg_def (phi
, i
);
638 if (TREE_CODE (opnd
) != SSA_NAME
)
640 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
642 fprintf (dump_file
, "\n[CHECK] Found def edge %d in ", (int)i
);
643 print_gimple_stmt (dump_file
, phi
, 0, 0);
645 edges
->safe_push (opnd_edge
);
649 gimple def
= SSA_NAME_DEF_STMT (opnd
);
651 if (gimple_code (def
) == GIMPLE_PHI
652 && dominated_by_p (CDI_DOMINATORS
,
653 gimple_bb (def
), cd_root
))
654 collect_phi_def_edges (def
, cd_root
, edges
,
656 else if (!uninit_undefined_value_p (opnd
))
658 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
660 fprintf (dump_file
, "\n[CHECK] Found def edge %d in ", (int)i
);
661 print_gimple_stmt (dump_file
, phi
, 0, 0);
663 edges
->safe_push (opnd_edge
);
669 /* For each use edge of PHI, computes all control dependence chains.
670 The control dependence chains are then converted to an array of
671 composite predicates pointed to by PREDS. */
674 find_def_preds (vec
<use_pred_info_t
> **preds
,
675 size_t *num_preds
, gimple phi
)
677 size_t num_chains
= 0, i
, n
;
678 vec
<edge
> *dep_chains
= 0;
679 vec
<edge
> cur_chain
= vNULL
;
680 vec
<edge
> def_edges
= vNULL
;
681 bool has_valid_pred
= false;
682 basic_block phi_bb
, cd_root
= 0;
683 struct pointer_set_t
*visited_phis
;
685 typedef vec
<edge
> vec_edge_heap
;
686 dep_chains
= XCNEWVEC (vec_edge_heap
, MAX_NUM_CHAINS
);
688 phi_bb
= gimple_bb (phi
);
689 /* First find the closest dominating bb to be
690 the control dependence root */
691 cd_root
= find_dom (phi_bb
);
695 visited_phis
= pointer_set_create ();
696 collect_phi_def_edges (phi
, cd_root
, &def_edges
, visited_phis
);
697 pointer_set_destroy (visited_phis
);
699 n
= def_edges
.length ();
703 for (i
= 0; i
< n
; i
++)
708 opnd_edge
= def_edges
[i
];
709 prev_nc
= num_chains
;
710 compute_control_dep_chain (cd_root
, opnd_edge
->src
,
711 dep_chains
, &num_chains
,
713 /* Free individual chain */
714 cur_chain
.release ();
716 /* Now update the newly added chains with
717 the phi operand edge: */
718 if (EDGE_COUNT (opnd_edge
->src
->succs
) > 1)
720 if (prev_nc
== num_chains
721 && num_chains
< MAX_NUM_CHAINS
)
723 for (j
= prev_nc
; j
< num_chains
; j
++)
725 dep_chains
[j
].safe_push (opnd_edge
);
731 = convert_control_dep_chain_into_preds (dep_chains
,
735 for (i
= 0; i
< num_chains
; i
++)
736 dep_chains
[i
].release ();
738 return has_valid_pred
;
741 /* Dumps the predicates (PREDS) for USESTMT. */
744 dump_predicates (gimple usestmt
, size_t num_preds
,
745 vec
<use_pred_info_t
> *preds
,
749 vec
<use_pred_info_t
> one_pred_chain
;
750 fprintf (dump_file
, msg
);
751 print_gimple_stmt (dump_file
, usestmt
, 0, 0);
752 fprintf (dump_file
, "is guarded by :\n");
753 /* do some dumping here: */
754 for (i
= 0; i
< num_preds
; i
++)
758 one_pred_chain
= preds
[i
];
759 np
= one_pred_chain
.length ();
761 for (j
= 0; j
< np
; j
++)
763 use_pred_info_t one_pred
765 if (one_pred
->invert
)
766 fprintf (dump_file
, " (.NOT.) ");
767 print_gimple_stmt (dump_file
, one_pred
->cond
, 0, 0);
769 fprintf (dump_file
, "(.AND.)\n");
771 if (i
< num_preds
- 1)
772 fprintf (dump_file
, "(.OR.)\n");
776 /* Destroys the predicate set *PREDS. */
779 destroy_predicate_vecs (size_t n
,
780 vec
<use_pred_info_t
> * preds
)
783 for (i
= 0; i
< n
; i
++)
785 for (j
= 0; j
< preds
[i
].length (); j
++)
793 /* Computes the 'normalized' conditional code with operand
794 swapping and condition inversion. */
796 static enum tree_code
797 get_cmp_code (enum tree_code orig_cmp_code
,
798 bool swap_cond
, bool invert
)
800 enum tree_code tc
= orig_cmp_code
;
803 tc
= swap_tree_comparison (orig_cmp_code
);
805 tc
= invert_tree_comparison (tc
, false);
822 /* Returns true if VAL falls in the range defined by BOUNDARY and CMPC, i.e.
823 all values in the range satisfies (x CMPC BOUNDARY) == true. */
826 is_value_included_in (tree val
, tree boundary
, enum tree_code cmpc
)
828 bool inverted
= false;
832 /* Only handle integer constant here. */
833 if (TREE_CODE (val
) != INTEGER_CST
834 || TREE_CODE (boundary
) != INTEGER_CST
)
837 is_unsigned
= TYPE_UNSIGNED (TREE_TYPE (val
));
839 if (cmpc
== GE_EXPR
|| cmpc
== GT_EXPR
842 cmpc
= invert_tree_comparison (cmpc
, false);
849 result
= tree_int_cst_equal (val
, boundary
);
850 else if (cmpc
== LT_EXPR
)
851 result
= INT_CST_LT_UNSIGNED (val
, boundary
);
854 gcc_assert (cmpc
== LE_EXPR
);
855 result
= (tree_int_cst_equal (val
, boundary
)
856 || INT_CST_LT_UNSIGNED (val
, boundary
));
862 result
= tree_int_cst_equal (val
, boundary
);
863 else if (cmpc
== LT_EXPR
)
864 result
= INT_CST_LT (val
, boundary
);
867 gcc_assert (cmpc
== LE_EXPR
);
868 result
= (tree_int_cst_equal (val
, boundary
)
869 || INT_CST_LT (val
, boundary
));
879 /* Returns true if PRED is common among all the predicate
880 chains (PREDS) (and therefore can be factored out).
881 NUM_PRED_CHAIN is the size of array PREDS. */
884 find_matching_predicate_in_rest_chains (use_pred_info_t pred
,
885 vec
<use_pred_info_t
> *preds
,
886 size_t num_pred_chains
)
891 if (num_pred_chains
== 1)
894 for (i
= 1; i
< num_pred_chains
; i
++)
897 vec
<use_pred_info_t
> one_chain
= preds
[i
];
898 n
= one_chain
.length ();
899 for (j
= 0; j
< n
; j
++)
901 use_pred_info_t pred2
903 /* can relax the condition comparison to not
904 use address comparison. However, the most common
905 case is that multiple control dependent paths share
906 a common path prefix, so address comparison should
909 if (pred2
->cond
== pred
->cond
910 && pred2
->invert
== pred
->invert
)
922 /* Forward declaration. */
924 is_use_properly_guarded (gimple use_stmt
,
927 unsigned uninit_opnds
,
928 struct pointer_set_t
*visited_phis
);
930 /* Returns true if all uninitialized opnds are pruned. Returns false
931 otherwise. PHI is the phi node with uninitialized operands,
932 UNINIT_OPNDS is the bitmap of the uninitialize operand positions,
933 FLAG_DEF is the statement defining the flag guarding the use of the
934 PHI output, BOUNDARY_CST is the const value used in the predicate
935 associated with the flag, CMP_CODE is the comparison code used in
936 the predicate, VISITED_PHIS is the pointer set of phis visited, and
937 VISITED_FLAG_PHIS is the pointer to the pointer set of flag definitions
943 flag_1 = phi <0, 1> // (1)
944 var_1 = phi <undef, some_val>
948 flag_2 = phi <0, flag_1, flag_1> // (2)
949 var_2 = phi <undef, var_1, var_1>
956 Because some flag arg in (1) is not constant, if we do not look into the
957 flag phis recursively, it is conservatively treated as unknown and var_1
958 is thought to be flowed into use at (3). Since var_1 is potentially uninitialized
959 a false warning will be emitted. Checking recursively into (1), the compiler can
960 find out that only some_val (which is defined) can flow into (3) which is OK.
965 prune_uninit_phi_opnds_in_unrealizable_paths (
966 gimple phi
, unsigned uninit_opnds
,
967 gimple flag_def
, tree boundary_cst
,
968 enum tree_code cmp_code
,
969 struct pointer_set_t
*visited_phis
,
970 bitmap
*visited_flag_phis
)
974 for (i
= 0; i
< MIN (32, gimple_phi_num_args (flag_def
)); i
++)
978 if (!MASK_TEST_BIT (uninit_opnds
, i
))
981 flag_arg
= gimple_phi_arg_def (flag_def
, i
);
982 if (!is_gimple_constant (flag_arg
))
984 gimple flag_arg_def
, phi_arg_def
;
986 unsigned uninit_opnds_arg_phi
;
988 if (TREE_CODE (flag_arg
) != SSA_NAME
)
990 flag_arg_def
= SSA_NAME_DEF_STMT (flag_arg
);
991 if (gimple_code (flag_arg_def
) != GIMPLE_PHI
)
994 phi_arg
= gimple_phi_arg_def (phi
, i
);
995 if (TREE_CODE (phi_arg
) != SSA_NAME
)
998 phi_arg_def
= SSA_NAME_DEF_STMT (phi_arg
);
999 if (gimple_code (phi_arg_def
) != GIMPLE_PHI
)
1002 if (gimple_bb (phi_arg_def
) != gimple_bb (flag_arg_def
))
1005 if (!*visited_flag_phis
)
1006 *visited_flag_phis
= BITMAP_ALLOC (NULL
);
1008 if (bitmap_bit_p (*visited_flag_phis
,
1009 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def
))))
1012 bitmap_set_bit (*visited_flag_phis
,
1013 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def
)));
1015 /* Now recursively prune the uninitialized phi args. */
1016 uninit_opnds_arg_phi
= compute_uninit_opnds_pos (phi_arg_def
);
1017 if (!prune_uninit_phi_opnds_in_unrealizable_paths (
1018 phi_arg_def
, uninit_opnds_arg_phi
,
1019 flag_arg_def
, boundary_cst
, cmp_code
,
1020 visited_phis
, visited_flag_phis
))
1023 bitmap_clear_bit (*visited_flag_phis
,
1024 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def
)));
1028 /* Now check if the constant is in the guarded range. */
1029 if (is_value_included_in (flag_arg
, boundary_cst
, cmp_code
))
1034 /* Now that we know that this undefined edge is not
1035 pruned. If the operand is defined by another phi,
1036 we can further prune the incoming edges of that
1037 phi by checking the predicates of this operands. */
1039 opnd
= gimple_phi_arg_def (phi
, i
);
1040 opnd_def
= SSA_NAME_DEF_STMT (opnd
);
1041 if (gimple_code (opnd_def
) == GIMPLE_PHI
)
1044 unsigned uninit_opnds2
1045 = compute_uninit_opnds_pos (opnd_def
);
1046 gcc_assert (!MASK_EMPTY (uninit_opnds2
));
1047 opnd_edge
= gimple_phi_arg_edge (phi
, i
);
1048 if (!is_use_properly_guarded (phi
,
1063 /* A helper function that determines if the predicate set
1064 of the use is not overlapping with that of the uninit paths.
1065 The most common senario of guarded use is in Example 1:
1078 The real world examples are usually more complicated, but similar
1079 and usually result from inlining:
1081 bool init_func (int * x)
1100 Another possible use scenario is in the following trivial example:
1112 Predicate analysis needs to compute the composite predicate:
1114 1) 'x' use predicate: (n > 0) .AND. (m < 2)
1115 2) 'x' default value (non-def) predicate: .NOT. (n > 0)
1116 (the predicate chain for phi operand defs can be computed
1117 starting from a bb that is control equivalent to the phi's
1118 bb and is dominating the operand def.)
1120 and check overlapping:
1121 (n > 0) .AND. (m < 2) .AND. (.NOT. (n > 0))
1124 This implementation provides framework that can handle
1125 scenarios. (Note that many simple cases are handled properly
1126 without the predicate analysis -- this is due to jump threading
1127 transformation which eliminates the merge point thus makes
1128 path sensitive analysis unnecessary.)
1130 NUM_PREDS is the number is the number predicate chains, PREDS is
1131 the array of chains, PHI is the phi node whose incoming (undefined)
1132 paths need to be pruned, and UNINIT_OPNDS is the bitmap holding
1133 uninit operand positions. VISITED_PHIS is the pointer set of phi
1134 stmts being checked. */
1138 use_pred_not_overlap_with_undef_path_pred (
1140 vec
<use_pred_info_t
> *preds
,
1141 gimple phi
, unsigned uninit_opnds
,
1142 struct pointer_set_t
*visited_phis
)
1145 gimple flag_def
= 0;
1146 tree boundary_cst
= 0;
1147 enum tree_code cmp_code
;
1148 bool swap_cond
= false;
1149 bool invert
= false;
1150 vec
<use_pred_info_t
> the_pred_chain
;
1151 bitmap visited_flag_phis
= NULL
;
1152 bool all_pruned
= false;
1154 gcc_assert (num_preds
> 0);
1155 /* Find within the common prefix of multiple predicate chains
1156 a predicate that is a comparison of a flag variable against
1158 the_pred_chain
= preds
[0];
1159 n
= the_pred_chain
.length ();
1160 for (i
= 0; i
< n
; i
++)
1163 tree cond_lhs
, cond_rhs
, flag
= 0;
1165 use_pred_info_t the_pred
1166 = the_pred_chain
[i
];
1168 cond
= the_pred
->cond
;
1169 invert
= the_pred
->invert
;
1170 cond_lhs
= gimple_cond_lhs (cond
);
1171 cond_rhs
= gimple_cond_rhs (cond
);
1172 cmp_code
= gimple_cond_code (cond
);
1174 if (cond_lhs
!= NULL_TREE
&& TREE_CODE (cond_lhs
) == SSA_NAME
1175 && cond_rhs
!= NULL_TREE
&& is_gimple_constant (cond_rhs
))
1177 boundary_cst
= cond_rhs
;
1180 else if (cond_rhs
!= NULL_TREE
&& TREE_CODE (cond_rhs
) == SSA_NAME
1181 && cond_lhs
!= NULL_TREE
&& is_gimple_constant (cond_lhs
))
1183 boundary_cst
= cond_lhs
;
1191 flag_def
= SSA_NAME_DEF_STMT (flag
);
1196 if ((gimple_code (flag_def
) == GIMPLE_PHI
)
1197 && (gimple_bb (flag_def
) == gimple_bb (phi
))
1198 && find_matching_predicate_in_rest_chains (
1199 the_pred
, preds
, num_preds
))
1208 /* Now check all the uninit incoming edge has a constant flag value
1209 that is in conflict with the use guard/predicate. */
1210 cmp_code
= get_cmp_code (cmp_code
, swap_cond
, invert
);
1212 if (cmp_code
== ERROR_MARK
)
1215 all_pruned
= prune_uninit_phi_opnds_in_unrealizable_paths (phi
,
1221 &visited_flag_phis
);
1223 if (visited_flag_phis
)
1224 BITMAP_FREE (visited_flag_phis
);
1229 /* Returns true if TC is AND or OR */
1232 is_and_or_or (enum tree_code tc
, tree typ
)
1234 return (tc
== BIT_IOR_EXPR
1235 || (tc
== BIT_AND_EXPR
1236 && (typ
== 0 || TREE_CODE (typ
) == BOOLEAN_TYPE
)));
1239 typedef struct norm_cond
1242 enum tree_code cond_code
;
1247 /* Normalizes gimple condition COND. The normalization follows
1248 UD chains to form larger condition expression trees. NORM_COND
1249 holds the normalized result. COND_CODE is the logical opcode
1250 (AND or OR) of the normalized tree. */
1253 normalize_cond_1 (gimple cond
,
1254 norm_cond_t norm_cond
,
1255 enum tree_code cond_code
)
1257 enum gimple_code gc
;
1258 enum tree_code cur_cond_code
;
1261 gc
= gimple_code (cond
);
1262 if (gc
!= GIMPLE_ASSIGN
)
1264 norm_cond
->conds
.safe_push (cond
);
1268 cur_cond_code
= gimple_assign_rhs_code (cond
);
1269 rhs1
= gimple_assign_rhs1 (cond
);
1270 rhs2
= gimple_assign_rhs2 (cond
);
1271 if (cur_cond_code
== NE_EXPR
)
1273 if (integer_zerop (rhs2
)
1274 && (TREE_CODE (rhs1
) == SSA_NAME
))
1276 SSA_NAME_DEF_STMT (rhs1
),
1277 norm_cond
, cond_code
);
1278 else if (integer_zerop (rhs1
)
1279 && (TREE_CODE (rhs2
) == SSA_NAME
))
1281 SSA_NAME_DEF_STMT (rhs2
),
1282 norm_cond
, cond_code
);
1284 norm_cond
->conds
.safe_push (cond
);
1289 if (is_and_or_or (cur_cond_code
, TREE_TYPE (rhs1
))
1290 && (cond_code
== cur_cond_code
|| cond_code
== ERROR_MARK
)
1291 && (TREE_CODE (rhs1
) == SSA_NAME
&& TREE_CODE (rhs2
) == SSA_NAME
))
1293 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs1
),
1294 norm_cond
, cur_cond_code
);
1295 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs2
),
1296 norm_cond
, cur_cond_code
);
1297 norm_cond
->cond_code
= cur_cond_code
;
1300 norm_cond
->conds
.safe_push (cond
);
1303 /* See normalize_cond_1 for details. INVERT is a flag to indicate
1304 if COND needs to be inverted or not. */
1307 normalize_cond (gimple cond
, norm_cond_t norm_cond
, bool invert
)
1309 enum tree_code cond_code
;
1311 norm_cond
->cond_code
= ERROR_MARK
;
1312 norm_cond
->invert
= false;
1313 norm_cond
->conds
.create (0);
1314 gcc_assert (gimple_code (cond
) == GIMPLE_COND
);
1315 cond_code
= gimple_cond_code (cond
);
1317 cond_code
= invert_tree_comparison (cond_code
, false);
1319 if (cond_code
== NE_EXPR
)
1321 if (integer_zerop (gimple_cond_rhs (cond
))
1322 && (TREE_CODE (gimple_cond_lhs (cond
)) == SSA_NAME
))
1324 SSA_NAME_DEF_STMT (gimple_cond_lhs (cond
)),
1325 norm_cond
, ERROR_MARK
);
1326 else if (integer_zerop (gimple_cond_lhs (cond
))
1327 && (TREE_CODE (gimple_cond_rhs (cond
)) == SSA_NAME
))
1329 SSA_NAME_DEF_STMT (gimple_cond_rhs (cond
)),
1330 norm_cond
, ERROR_MARK
);
1333 norm_cond
->conds
.safe_push (cond
);
1334 norm_cond
->invert
= invert
;
1339 norm_cond
->conds
.safe_push (cond
);
1340 norm_cond
->invert
= invert
;
1343 gcc_assert (norm_cond
->conds
.length () == 1
1344 || is_and_or_or (norm_cond
->cond_code
, NULL
));
1347 /* Returns true if the domain for condition COND1 is a subset of
1348 COND2. REVERSE is a flag. when it is true the function checks
1349 if COND1 is a superset of COND2. INVERT1 and INVERT2 are flags
1350 to indicate if COND1 and COND2 need to be inverted or not. */
1353 is_gcond_subset_of (gimple cond1
, bool invert1
,
1354 gimple cond2
, bool invert2
,
1357 enum gimple_code gc1
, gc2
;
1358 enum tree_code cond1_code
, cond2_code
;
1360 tree cond1_lhs
, cond1_rhs
, cond2_lhs
, cond2_rhs
;
1362 /* Take the short cut. */
1373 gc1
= gimple_code (cond1
);
1374 gc2
= gimple_code (cond2
);
1376 if ((gc1
!= GIMPLE_ASSIGN
&& gc1
!= GIMPLE_COND
)
1377 || (gc2
!= GIMPLE_ASSIGN
&& gc2
!= GIMPLE_COND
))
1378 return cond1
== cond2
;
1380 cond1_code
= ((gc1
== GIMPLE_ASSIGN
)
1381 ? gimple_assign_rhs_code (cond1
)
1382 : gimple_cond_code (cond1
));
1384 cond2_code
= ((gc2
== GIMPLE_ASSIGN
)
1385 ? gimple_assign_rhs_code (cond2
)
1386 : gimple_cond_code (cond2
));
1388 if (TREE_CODE_CLASS (cond1_code
) != tcc_comparison
1389 || TREE_CODE_CLASS (cond2_code
) != tcc_comparison
)
1393 cond1_code
= invert_tree_comparison (cond1_code
, false);
1395 cond2_code
= invert_tree_comparison (cond2_code
, false);
1397 cond1_lhs
= ((gc1
== GIMPLE_ASSIGN
)
1398 ? gimple_assign_rhs1 (cond1
)
1399 : gimple_cond_lhs (cond1
));
1400 cond1_rhs
= ((gc1
== GIMPLE_ASSIGN
)
1401 ? gimple_assign_rhs2 (cond1
)
1402 : gimple_cond_rhs (cond1
));
1403 cond2_lhs
= ((gc2
== GIMPLE_ASSIGN
)
1404 ? gimple_assign_rhs1 (cond2
)
1405 : gimple_cond_lhs (cond2
));
1406 cond2_rhs
= ((gc2
== GIMPLE_ASSIGN
)
1407 ? gimple_assign_rhs2 (cond2
)
1408 : gimple_cond_rhs (cond2
));
1410 /* Assuming const operands have been swapped to the
1411 rhs at this point of the analysis. */
1413 if (cond1_lhs
!= cond2_lhs
)
1416 if (!is_gimple_constant (cond1_rhs
)
1417 || TREE_CODE (cond1_rhs
) != INTEGER_CST
)
1418 return (cond1_rhs
== cond2_rhs
);
1420 if (!is_gimple_constant (cond2_rhs
)
1421 || TREE_CODE (cond2_rhs
) != INTEGER_CST
)
1422 return (cond1_rhs
== cond2_rhs
);
1424 if (cond1_code
== EQ_EXPR
)
1425 return is_value_included_in (cond1_rhs
,
1426 cond2_rhs
, cond2_code
);
1427 if (cond1_code
== NE_EXPR
|| cond2_code
== EQ_EXPR
)
1428 return ((cond2_code
== cond1_code
)
1429 && tree_int_cst_equal (cond1_rhs
, cond2_rhs
));
1431 if (((cond1_code
== GE_EXPR
|| cond1_code
== GT_EXPR
)
1432 && (cond2_code
== LE_EXPR
|| cond2_code
== LT_EXPR
))
1433 || ((cond1_code
== LE_EXPR
|| cond1_code
== LT_EXPR
)
1434 && (cond2_code
== GE_EXPR
|| cond2_code
== GT_EXPR
)))
1437 if (cond1_code
!= GE_EXPR
&& cond1_code
!= GT_EXPR
1438 && cond1_code
!= LE_EXPR
&& cond1_code
!= LT_EXPR
)
1441 if (cond1_code
== GT_EXPR
)
1443 cond1_code
= GE_EXPR
;
1444 cond1_rhs
= fold_binary (PLUS_EXPR
, TREE_TYPE (cond1_rhs
),
1446 fold_convert (TREE_TYPE (cond1_rhs
),
1449 else if (cond1_code
== LT_EXPR
)
1451 cond1_code
= LE_EXPR
;
1452 cond1_rhs
= fold_binary (MINUS_EXPR
, TREE_TYPE (cond1_rhs
),
1454 fold_convert (TREE_TYPE (cond1_rhs
),
1461 gcc_assert (cond1_code
== GE_EXPR
|| cond1_code
== LE_EXPR
);
1463 if (cond2_code
== GE_EXPR
|| cond2_code
== GT_EXPR
||
1464 cond2_code
== LE_EXPR
|| cond2_code
== LT_EXPR
)
1465 return is_value_included_in (cond1_rhs
,
1466 cond2_rhs
, cond2_code
);
1467 else if (cond2_code
== NE_EXPR
)
1469 (is_value_included_in (cond1_rhs
,
1470 cond2_rhs
, cond2_code
)
1471 && !is_value_included_in (cond2_rhs
,
1472 cond1_rhs
, cond1_code
));
1476 /* Returns true if the domain of the condition expression
1477 in COND is a subset of any of the sub-conditions
1478 of the normalized condtion NORM_COND. INVERT is a flag
1479 to indicate of the COND needs to be inverted.
1480 REVERSE is a flag. When it is true, the check is reversed --
1481 it returns true if COND is a superset of any of the subconditions
1485 is_subset_of_any (gimple cond
, bool invert
,
1486 norm_cond_t norm_cond
, bool reverse
)
1489 size_t len
= norm_cond
->conds
.length ();
1491 for (i
= 0; i
< len
; i
++)
1493 if (is_gcond_subset_of (cond
, invert
,
1494 norm_cond
->conds
[i
],
1501 /* NORM_COND1 and NORM_COND2 are normalized logical/BIT OR
1502 expressions (formed by following UD chains not control
1503 dependence chains). The function returns true of domain
1504 of and expression NORM_COND1 is a subset of NORM_COND2's.
1505 The implementation is conservative, and it returns false if
1506 it the inclusion relationship may not hold. */
1509 is_or_set_subset_of (norm_cond_t norm_cond1
,
1510 norm_cond_t norm_cond2
)
1513 size_t len
= norm_cond1
->conds
.length ();
1515 for (i
= 0; i
< len
; i
++)
1517 if (!is_subset_of_any (norm_cond1
->conds
[i
],
1518 false, norm_cond2
, false))
1524 /* NORM_COND1 and NORM_COND2 are normalized logical AND
1525 expressions (formed by following UD chains not control
1526 dependence chains). The function returns true of domain
1527 of and expression NORM_COND1 is a subset of NORM_COND2's. */
1530 is_and_set_subset_of (norm_cond_t norm_cond1
,
1531 norm_cond_t norm_cond2
)
1534 size_t len
= norm_cond2
->conds
.length ();
1536 for (i
= 0; i
< len
; i
++)
1538 if (!is_subset_of_any (norm_cond2
->conds
[i
],
1539 false, norm_cond1
, true))
1545 /* Returns true of the domain if NORM_COND1 is a subset
1546 of that of NORM_COND2. Returns false if it can not be
1550 is_norm_cond_subset_of (norm_cond_t norm_cond1
,
1551 norm_cond_t norm_cond2
)
1554 enum tree_code code1
, code2
;
1556 code1
= norm_cond1
->cond_code
;
1557 code2
= norm_cond2
->cond_code
;
1559 if (code1
== BIT_AND_EXPR
)
1561 /* Both conditions are AND expressions. */
1562 if (code2
== BIT_AND_EXPR
)
1563 return is_and_set_subset_of (norm_cond1
, norm_cond2
);
1564 /* NORM_COND1 is an AND expression, and NORM_COND2 is an OR
1565 expression. In this case, returns true if any subexpression
1566 of NORM_COND1 is a subset of any subexpression of NORM_COND2. */
1567 else if (code2
== BIT_IOR_EXPR
)
1570 len1
= norm_cond1
->conds
.length ();
1571 for (i
= 0; i
< len1
; i
++)
1573 gimple cond1
= norm_cond1
->conds
[i
];
1574 if (is_subset_of_any (cond1
, false, norm_cond2
, false))
1581 gcc_assert (code2
== ERROR_MARK
);
1582 gcc_assert (norm_cond2
->conds
.length () == 1);
1583 return is_subset_of_any (norm_cond2
->conds
[0],
1584 norm_cond2
->invert
, norm_cond1
, true);
1587 /* NORM_COND1 is an OR expression */
1588 else if (code1
== BIT_IOR_EXPR
)
1593 return is_or_set_subset_of (norm_cond1
, norm_cond2
);
1597 gcc_assert (code1
== ERROR_MARK
);
1598 gcc_assert (norm_cond1
->conds
.length () == 1);
1599 /* Conservatively returns false if NORM_COND1 is non-decomposible
1600 and NORM_COND2 is an AND expression. */
1601 if (code2
== BIT_AND_EXPR
)
1604 if (code2
== BIT_IOR_EXPR
)
1605 return is_subset_of_any (norm_cond1
->conds
[0],
1606 norm_cond1
->invert
, norm_cond2
, false);
1608 gcc_assert (code2
== ERROR_MARK
);
1609 gcc_assert (norm_cond2
->conds
.length () == 1);
1610 return is_gcond_subset_of (norm_cond1
->conds
[0],
1612 norm_cond2
->conds
[0],
1613 norm_cond2
->invert
, false);
1617 /* Returns true of the domain of single predicate expression
1618 EXPR1 is a subset of that of EXPR2. Returns false if it
1619 can not be proved. */
1622 is_pred_expr_subset_of (use_pred_info_t expr1
,
1623 use_pred_info_t expr2
)
1625 gimple cond1
, cond2
;
1626 enum tree_code code1
, code2
;
1627 struct norm_cond norm_cond1
, norm_cond2
;
1628 bool is_subset
= false;
1630 cond1
= expr1
->cond
;
1631 cond2
= expr2
->cond
;
1632 code1
= gimple_cond_code (cond1
);
1633 code2
= gimple_cond_code (cond2
);
1636 code1
= invert_tree_comparison (code1
, false);
1638 code2
= invert_tree_comparison (code2
, false);
1640 /* Fast path -- match exactly */
1641 if ((gimple_cond_lhs (cond1
) == gimple_cond_lhs (cond2
))
1642 && (gimple_cond_rhs (cond1
) == gimple_cond_rhs (cond2
))
1643 && (code1
== code2
))
1646 /* Normalize conditions. To keep NE_EXPR, do not invert
1647 with both need inversion. */
1648 normalize_cond (cond1
, &norm_cond1
, (expr1
->invert
));
1649 normalize_cond (cond2
, &norm_cond2
, (expr2
->invert
));
1651 is_subset
= is_norm_cond_subset_of (&norm_cond1
, &norm_cond2
);
1654 norm_cond1
.conds
.release ();
1655 norm_cond2
.conds
.release ();
1659 /* Returns true if the domain of PRED1 is a subset
1660 of that of PRED2. Returns false if it can not be proved so. */
1663 is_pred_chain_subset_of (vec
<use_pred_info_t
> pred1
,
1664 vec
<use_pred_info_t
> pred2
)
1666 size_t np1
, np2
, i1
, i2
;
1668 np1
= pred1
.length ();
1669 np2
= pred2
.length ();
1671 for (i2
= 0; i2
< np2
; i2
++)
1674 use_pred_info_t info2
1676 for (i1
= 0; i1
< np1
; i1
++)
1678 use_pred_info_t info1
1680 if (is_pred_expr_subset_of (info1
, info2
))
1692 /* Returns true if the domain defined by
1693 one pred chain ONE_PRED is a subset of the domain
1694 of *PREDS. It returns false if ONE_PRED's domain is
1695 not a subset of any of the sub-domains of PREDS (
1696 corresponding to each individual chains in it), even
1697 though it may be still be a subset of whole domain
1698 of PREDS which is the union (ORed) of all its subdomains.
1699 In other words, the result is conservative. */
1702 is_included_in (vec
<use_pred_info_t
> one_pred
,
1703 vec
<use_pred_info_t
> *preds
,
1708 for (i
= 0; i
< n
; i
++)
1710 if (is_pred_chain_subset_of (one_pred
, preds
[i
]))
1717 /* compares two predicate sets PREDS1 and PREDS2 and returns
1718 true if the domain defined by PREDS1 is a superset
1719 of PREDS2's domain. N1 and N2 are array sizes of PREDS1 and
1720 PREDS2 respectively. The implementation chooses not to build
1721 generic trees (and relying on the folding capability of the
1722 compiler), but instead performs brute force comparison of
1723 individual predicate chains (won't be a compile time problem
1724 as the chains are pretty short). When the function returns
1725 false, it does not necessarily mean *PREDS1 is not a superset
1726 of *PREDS2, but mean it may not be so since the analysis can
1727 not prove it. In such cases, false warnings may still be
1731 is_superset_of (vec
<use_pred_info_t
> *preds1
,
1733 vec
<use_pred_info_t
> *preds2
,
1737 vec
<use_pred_info_t
> one_pred_chain
;
1739 for (i
= 0; i
< n2
; i
++)
1741 one_pred_chain
= preds2
[i
];
1742 if (!is_included_in (one_pred_chain
, preds1
, n1
))
1749 /* Comparison function used by qsort. It is used to
1750 sort predicate chains to allow predicate
1754 pred_chain_length_cmp (const void *p1
, const void *p2
)
1756 use_pred_info_t i1
, i2
;
1757 vec
<use_pred_info_t
> const *chain1
1758 = (vec
<use_pred_info_t
> const *)p1
;
1759 vec
<use_pred_info_t
> const *chain2
1760 = (vec
<use_pred_info_t
> const *)p2
;
1762 if (chain1
->length () != chain2
->length ())
1763 return (chain1
->length () - chain2
->length ());
1768 /* Allow predicates with similar prefix come together. */
1769 if (!i1
->invert
&& i2
->invert
)
1771 else if (i1
->invert
&& !i2
->invert
)
1774 return gimple_uid (i1
->cond
) - gimple_uid (i2
->cond
);
1777 /* x OR (!x AND y) is equivalent to x OR y.
1778 This function normalizes x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3)
1779 into x1 OR x2 OR x3. PREDS is the predicate chains, and N is
1780 the number of chains. Returns true if normalization happens. */
1783 normalize_preds (vec
<use_pred_info_t
> *preds
, size_t *n
)
1786 vec
<use_pred_info_t
> pred_chain
;
1787 vec
<use_pred_info_t
> x
= vNULL
;
1788 use_pred_info_t xj
= 0, nxj
= 0;
1793 /* First sort the chains in ascending order of lengths. */
1794 qsort (preds
, *n
, sizeof (void *), pred_chain_length_cmp
);
1795 pred_chain
= preds
[0];
1796 ll
= pred_chain
.length ();
1801 use_pred_info_t xx
, yy
, xx2
, nyy
;
1802 vec
<use_pred_info_t
> pred_chain2
= preds
[1];
1803 if (pred_chain2
.length () != 2)
1806 /* See if simplification x AND y OR x AND !y is possible. */
1809 xx2
= pred_chain2
[0];
1810 nyy
= pred_chain2
[1];
1811 if (gimple_cond_lhs (xx
->cond
) != gimple_cond_lhs (xx2
->cond
)
1812 || gimple_cond_rhs (xx
->cond
) != gimple_cond_rhs (xx2
->cond
)
1813 || gimple_cond_code (xx
->cond
) != gimple_cond_code (xx2
->cond
)
1814 || (xx
->invert
!= xx2
->invert
))
1816 if (gimple_cond_lhs (yy
->cond
) != gimple_cond_lhs (nyy
->cond
)
1817 || gimple_cond_rhs (yy
->cond
) != gimple_cond_rhs (nyy
->cond
)
1818 || gimple_cond_code (yy
->cond
) != gimple_cond_code (nyy
->cond
)
1819 || (yy
->invert
== nyy
->invert
))
1822 /* Now merge the first two chains. */
1826 pred_chain
.release ();
1827 pred_chain2
.release ();
1828 pred_chain
.safe_push (xx
);
1829 preds
[0] = pred_chain
;
1830 for (i
= 1; i
< *n
- 1; i
++)
1831 preds
[i
] = preds
[i
+ 1];
1833 preds
[*n
- 1].create (0);
1840 x
.safe_push (pred_chain
[0]);
1842 /* The loop extracts x1, x2, x3, etc from chains
1843 x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3) OR ... */
1844 for (i
= 1; i
< *n
; i
++)
1846 pred_chain
= preds
[i
];
1847 if (pred_chain
.length () != i
+ 1)
1850 for (j
= 0; j
< i
; j
++)
1853 nxj
= pred_chain
[j
];
1855 /* Check if nxj is !xj */
1856 if (gimple_cond_lhs (xj
->cond
) != gimple_cond_lhs (nxj
->cond
)
1857 || gimple_cond_rhs (xj
->cond
) != gimple_cond_rhs (nxj
->cond
)
1858 || gimple_cond_code (xj
->cond
) != gimple_cond_code (nxj
->cond
)
1859 || (xj
->invert
== nxj
->invert
))
1863 x
.safe_push (pred_chain
[i
]);
1866 /* Now normalize the pred chains using the extraced x1, x2, x3 etc. */
1867 for (j
= 0; j
< *n
; j
++)
1872 t
= XNEW (struct use_pred_info
);
1878 for (i
= 0; i
< *n
; i
++)
1880 pred_chain
= preds
[i
];
1881 for (j
= 0; j
< pred_chain
.length (); j
++)
1882 free (pred_chain
[j
]);
1883 pred_chain
.release ();
1885 pred_chain
.safe_push (x
[i
]);
1886 preds
[i
] = pred_chain
;
1893 /* Computes the predicates that guard the use and checks
1894 if the incoming paths that have empty (or possibly
1895 empty) definition can be pruned/filtered. The function returns
1896 true if it can be determined that the use of PHI's def in
1897 USE_STMT is guarded with a predicate set not overlapping with
1898 predicate sets of all runtime paths that do not have a definition.
1899 Returns false if it is not or it can not be determined. USE_BB is
1900 the bb of the use (for phi operand use, the bb is not the bb of
1901 the phi stmt, but the src bb of the operand edge). UNINIT_OPNDS
1902 is a bit vector. If an operand of PHI is uninitialized, the
1903 corresponding bit in the vector is 1. VISIED_PHIS is a pointer
1904 set of phis being visted. */
1907 is_use_properly_guarded (gimple use_stmt
,
1910 unsigned uninit_opnds
,
1911 struct pointer_set_t
*visited_phis
)
1914 vec
<use_pred_info_t
> *preds
= 0;
1915 vec
<use_pred_info_t
> *def_preds
= 0;
1916 size_t num_preds
= 0, num_def_preds
= 0;
1917 bool has_valid_preds
= false;
1918 bool is_properly_guarded
= false;
1920 if (pointer_set_insert (visited_phis
, phi
))
1923 phi_bb
= gimple_bb (phi
);
1925 if (is_non_loop_exit_postdominating (use_bb
, phi_bb
))
1928 has_valid_preds
= find_predicates (&preds
, &num_preds
,
1931 if (!has_valid_preds
)
1933 destroy_predicate_vecs (num_preds
, preds
);
1938 dump_predicates (use_stmt
, num_preds
, preds
,
1941 has_valid_preds
= find_def_preds (&def_preds
,
1942 &num_def_preds
, phi
);
1944 if (has_valid_preds
)
1948 dump_predicates (phi
, num_def_preds
, def_preds
,
1949 "Operand defs of phi ");
1951 normed
= normalize_preds (def_preds
, &num_def_preds
);
1952 if (normed
&& dump_file
)
1954 fprintf (dump_file
, "\nNormalized to\n");
1955 dump_predicates (phi
, num_def_preds
, def_preds
,
1956 "Operand defs of phi ");
1958 is_properly_guarded
=
1959 is_superset_of (def_preds
, num_def_preds
,
1963 /* further prune the dead incoming phi edges. */
1964 if (!is_properly_guarded
)
1966 = use_pred_not_overlap_with_undef_path_pred (
1967 num_preds
, preds
, phi
, uninit_opnds
, visited_phis
);
1969 destroy_predicate_vecs (num_preds
, preds
);
1970 destroy_predicate_vecs (num_def_preds
, def_preds
);
1971 return is_properly_guarded
;
1974 /* Searches through all uses of a potentially
1975 uninitialized variable defined by PHI and returns a use
1976 statement if the use is not properly guarded. It returns
1977 NULL if all uses are guarded. UNINIT_OPNDS is a bitvector
1978 holding the position(s) of uninit PHI operands. WORKLIST
1979 is the vector of candidate phis that may be updated by this
1980 function. ADDED_TO_WORKLIST is the pointer set tracking
1981 if the new phi is already in the worklist. */
1984 find_uninit_use (gimple phi
, unsigned uninit_opnds
,
1985 vec
<gimple
> *worklist
,
1986 struct pointer_set_t
*added_to_worklist
)
1989 use_operand_p use_p
;
1991 imm_use_iterator iter
;
1993 phi_result
= gimple_phi_result (phi
);
1995 FOR_EACH_IMM_USE_FAST (use_p
, iter
, phi_result
)
1997 struct pointer_set_t
*visited_phis
;
2000 use_stmt
= USE_STMT (use_p
);
2001 if (is_gimple_debug (use_stmt
))
2004 visited_phis
= pointer_set_create ();
2006 if (gimple_code (use_stmt
) == GIMPLE_PHI
)
2007 use_bb
= gimple_phi_arg_edge (use_stmt
,
2008 PHI_ARG_INDEX_FROM_USE (use_p
))->src
;
2010 use_bb
= gimple_bb (use_stmt
);
2012 if (is_use_properly_guarded (use_stmt
,
2018 pointer_set_destroy (visited_phis
);
2021 pointer_set_destroy (visited_phis
);
2023 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2025 fprintf (dump_file
, "[CHECK]: Found unguarded use: ");
2026 print_gimple_stmt (dump_file
, use_stmt
, 0, 0);
2028 /* Found one real use, return. */
2029 if (gimple_code (use_stmt
) != GIMPLE_PHI
)
2032 /* Found a phi use that is not guarded,
2033 add the phi to the worklist. */
2034 if (!pointer_set_insert (added_to_worklist
,
2037 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2039 fprintf (dump_file
, "[WORKLIST]: Update worklist with phi: ");
2040 print_gimple_stmt (dump_file
, use_stmt
, 0, 0);
2043 worklist
->safe_push (use_stmt
);
2044 pointer_set_insert (possibly_undefined_names
, phi_result
);
2051 /* Look for inputs to PHI that are SSA_NAMEs that have empty definitions
2052 and gives warning if there exists a runtime path from the entry to a
2053 use of the PHI def that does not contain a definition. In other words,
2054 the warning is on the real use. The more dead paths that can be pruned
2055 by the compiler, the fewer false positives the warning is. WORKLIST
2056 is a vector of candidate phis to be examined. ADDED_TO_WORKLIST is
2057 a pointer set tracking if the new phi is added to the worklist or not. */
2060 warn_uninitialized_phi (gimple phi
, vec
<gimple
> *worklist
,
2061 struct pointer_set_t
*added_to_worklist
)
2063 unsigned uninit_opnds
;
2064 gimple uninit_use_stmt
= 0;
2067 /* Don't look at virtual operands. */
2068 if (virtual_operand_p (gimple_phi_result (phi
)))
2071 uninit_opnds
= compute_uninit_opnds_pos (phi
);
2073 if (MASK_EMPTY (uninit_opnds
))
2076 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2078 fprintf (dump_file
, "[CHECK]: examining phi: ");
2079 print_gimple_stmt (dump_file
, phi
, 0, 0);
2082 /* Now check if we have any use of the value without proper guard. */
2083 uninit_use_stmt
= find_uninit_use (phi
, uninit_opnds
,
2084 worklist
, added_to_worklist
);
2086 /* All uses are properly guarded. */
2087 if (!uninit_use_stmt
)
2090 uninit_op
= gimple_phi_arg_def (phi
, MASK_FIRST_SET_BIT (uninit_opnds
));
2091 if (SSA_NAME_VAR (uninit_op
) == NULL_TREE
)
2093 warn_uninit (OPT_Wmaybe_uninitialized
, uninit_op
, SSA_NAME_VAR (uninit_op
),
2094 SSA_NAME_VAR (uninit_op
),
2095 "%qD may be used uninitialized in this function",
2101 /* Entry point to the late uninitialized warning pass. */
2104 execute_late_warn_uninitialized (void)
2107 gimple_stmt_iterator gsi
;
2108 vec
<gimple
> worklist
= vNULL
;
2109 struct pointer_set_t
*added_to_worklist
;
2111 calculate_dominance_info (CDI_DOMINATORS
);
2112 calculate_dominance_info (CDI_POST_DOMINATORS
);
2113 /* Re-do the plain uninitialized variable check, as optimization may have
2114 straightened control flow. Do this first so that we don't accidentally
2115 get a "may be" warning when we'd have seen an "is" warning later. */
2116 warn_uninitialized_vars (/*warn_possibly_uninitialized=*/1);
2118 timevar_push (TV_TREE_UNINIT
);
2120 possibly_undefined_names
= pointer_set_create ();
2121 added_to_worklist
= pointer_set_create ();
2123 /* Initialize worklist */
2125 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2127 gimple phi
= gsi_stmt (gsi
);
2130 n
= gimple_phi_num_args (phi
);
2132 /* Don't look at virtual operands. */
2133 if (virtual_operand_p (gimple_phi_result (phi
)))
2136 for (i
= 0; i
< n
; ++i
)
2138 tree op
= gimple_phi_arg_def (phi
, i
);
2139 if (TREE_CODE (op
) == SSA_NAME
2140 && uninit_undefined_value_p (op
))
2142 worklist
.safe_push (phi
);
2143 pointer_set_insert (added_to_worklist
, phi
);
2144 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2146 fprintf (dump_file
, "[WORKLIST]: add to initial list: ");
2147 print_gimple_stmt (dump_file
, phi
, 0, 0);
2154 while (worklist
.length () != 0)
2157 cur_phi
= worklist
.pop ();
2158 warn_uninitialized_phi (cur_phi
, &worklist
, added_to_worklist
);
2161 worklist
.release ();
2162 pointer_set_destroy (added_to_worklist
);
2163 pointer_set_destroy (possibly_undefined_names
);
2164 possibly_undefined_names
= NULL
;
2165 free_dominance_info (CDI_POST_DOMINATORS
);
2166 timevar_pop (TV_TREE_UNINIT
);
2171 gate_warn_uninitialized (void)
2173 return warn_uninitialized
!= 0;
2178 const pass_data pass_data_late_warn_uninitialized
=
2180 GIMPLE_PASS
, /* type */
2181 "uninit", /* name */
2182 OPTGROUP_NONE
, /* optinfo_flags */
2183 true, /* has_gate */
2184 true, /* has_execute */
2185 TV_NONE
, /* tv_id */
2186 PROP_ssa
, /* properties_required */
2187 0, /* properties_provided */
2188 0, /* properties_destroyed */
2189 0, /* todo_flags_start */
2190 0, /* todo_flags_finish */
2193 class pass_late_warn_uninitialized
: public gimple_opt_pass
2196 pass_late_warn_uninitialized(gcc::context
*ctxt
)
2197 : gimple_opt_pass(pass_data_late_warn_uninitialized
, ctxt
)
2200 /* opt_pass methods: */
2201 opt_pass
* clone () { return new pass_late_warn_uninitialized (ctxt_
); }
2202 bool gate () { return gate_warn_uninitialized (); }
2203 unsigned int execute () { return execute_late_warn_uninitialized (); }
2205 }; // class pass_late_warn_uninitialized
2210 make_pass_late_warn_uninitialized (gcc::context
*ctxt
)
2212 return new pass_late_warn_uninitialized (ctxt
);
2217 execute_early_warn_uninitialized (void)
2219 /* Currently, this pass runs always but
2220 execute_late_warn_uninitialized only runs with optimization. With
2221 optimization we want to warn about possible uninitialized as late
2222 as possible, thus don't do it here. However, without
2223 optimization we need to warn here about "may be uninitialized".
2225 calculate_dominance_info (CDI_POST_DOMINATORS
);
2227 warn_uninitialized_vars (/*warn_possibly_uninitialized=*/!optimize
);
2229 /* Post-dominator information can not be reliably updated. Free it
2232 free_dominance_info (CDI_POST_DOMINATORS
);
2239 const pass_data pass_data_early_warn_uninitialized
=
2241 GIMPLE_PASS
, /* type */
2242 "*early_warn_uninitialized", /* name */
2243 OPTGROUP_NONE
, /* optinfo_flags */
2244 true, /* has_gate */
2245 true, /* has_execute */
2246 TV_TREE_UNINIT
, /* tv_id */
2247 PROP_ssa
, /* properties_required */
2248 0, /* properties_provided */
2249 0, /* properties_destroyed */
2250 0, /* todo_flags_start */
2251 0, /* todo_flags_finish */
2254 class pass_early_warn_uninitialized
: public gimple_opt_pass
2257 pass_early_warn_uninitialized(gcc::context
*ctxt
)
2258 : gimple_opt_pass(pass_data_early_warn_uninitialized
, ctxt
)
2261 /* opt_pass methods: */
2262 bool gate () { return gate_warn_uninitialized (); }
2263 unsigned int execute () { return execute_early_warn_uninitialized (); }
2265 }; // class pass_early_warn_uninitialized
2270 make_pass_early_warn_uninitialized (gcc::context
*ctxt
)
2272 return new pass_early_warn_uninitialized (ctxt
);