1 /* Predicate aware uninitialized variable warning.
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 Contributed by Xinliang David Li <davidxl@google.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #include "basic-block.h"
30 #include "gimple-pretty-print.h"
32 #include "pointer-set.h"
33 #include "tree-flow.h"
35 #include "tree-inline.h"
37 #include "tree-pass.h"
38 #include "diagnostic-core.h"
40 /* This implements the pass that does predicate aware warning on uses of
41 possibly uninitialized variables. The pass first collects the set of
42 possibly uninitialized SSA names. For each such name, it walks through
43 all its immediate uses. For each immediate use, it rebuilds the condition
44 expression (the predicate) that guards the use. The predicate is then
45 examined to see if the variable is always defined under that same condition.
46 This is done either by pruning the unrealizable paths that lead to the
47 default definitions or by checking if the predicate set that guards the
48 defining paths is a superset of the use predicate. */
51 /* Pointer set of potentially undefined ssa names, i.e.,
52 ssa names that are defined by phi with operands that
53 are not defined or potentially undefined. */
54 static struct pointer_set_t
*possibly_undefined_names
= 0;
56 /* Bit mask handling macros. */
57 #define MASK_SET_BIT(mask, pos) mask |= (1 << pos)
58 #define MASK_TEST_BIT(mask, pos) (mask & (1 << pos))
59 #define MASK_EMPTY(mask) (mask == 0)
61 /* Returns the first bit position (starting from LSB)
62 in mask that is non zero. Returns -1 if the mask is empty. */
64 get_mask_first_set_bit (unsigned mask
)
70 while ((mask
& (1 << pos
)) == 0)
75 #define MASK_FIRST_SET_BIT(mask) get_mask_first_set_bit (mask)
78 /* Return true if T, an SSA_NAME, has an undefined value. */
81 ssa_undefined_value_p (tree t
)
83 tree var
= SSA_NAME_VAR (t
);
87 /* Parameters get their initial value from the function entry. */
88 else if (TREE_CODE (var
) == PARM_DECL
)
90 /* When returning by reference the return address is actually a hidden
92 else if (TREE_CODE (var
) == RESULT_DECL
&& DECL_BY_REFERENCE (var
))
94 /* Hard register variables get their initial value from the ether. */
95 else if (TREE_CODE (var
) == VAR_DECL
&& DECL_HARD_REGISTER (var
))
98 /* The value is undefined iff its definition statement is empty. */
99 return (gimple_nop_p (SSA_NAME_DEF_STMT (t
))
100 || (possibly_undefined_names
101 && pointer_set_contains (possibly_undefined_names
, t
)));
104 /* Like ssa_undefined_value_p, but don't return true if TREE_NO_WARNING
105 is set on SSA_NAME_VAR. */
108 uninit_undefined_value_p (tree t
)
110 if (!ssa_undefined_value_p (t
))
112 if (SSA_NAME_VAR (t
) && TREE_NO_WARNING (SSA_NAME_VAR (t
)))
117 /* Checks if the operand OPND of PHI is defined by
118 another phi with one operand defined by this PHI,
119 but the rest operands are all defined. If yes,
120 returns true to skip this this operand as being
121 redundant. Can be enhanced to be more general. */
124 can_skip_redundant_opnd (tree opnd
, gimple phi
)
130 phi_def
= gimple_phi_result (phi
);
131 op_def
= SSA_NAME_DEF_STMT (opnd
);
132 if (gimple_code (op_def
) != GIMPLE_PHI
)
134 n
= gimple_phi_num_args (op_def
);
135 for (i
= 0; i
< n
; ++i
)
137 tree op
= gimple_phi_arg_def (op_def
, i
);
138 if (TREE_CODE (op
) != SSA_NAME
)
140 if (op
!= phi_def
&& uninit_undefined_value_p (op
))
147 /* Returns a bit mask holding the positions of arguments in PHI
148 that have empty (or possibly empty) definitions. */
151 compute_uninit_opnds_pos (gimple phi
)
154 unsigned uninit_opnds
= 0;
156 n
= gimple_phi_num_args (phi
);
157 /* Bail out for phi with too many args. */
161 for (i
= 0; i
< n
; ++i
)
163 tree op
= gimple_phi_arg_def (phi
, i
);
164 if (TREE_CODE (op
) == SSA_NAME
165 && uninit_undefined_value_p (op
)
166 && !can_skip_redundant_opnd (op
, phi
))
168 /* Ignore SSA_NAMEs on abnormal edges to setjmp
169 or nonlocal goto receiver. */
170 if (cfun
->has_nonlocal_label
|| cfun
->calls_setjmp
)
172 edge e
= gimple_phi_arg_edge (phi
, i
);
173 if (e
->flags
& EDGE_ABNORMAL
)
175 gimple last
= last_stmt (e
->src
);
176 if (last
&& stmt_can_make_abnormal_goto (last
))
180 MASK_SET_BIT (uninit_opnds
, i
);
186 /* Find the immediate postdominator PDOM of the specified
187 basic block BLOCK. */
189 static inline basic_block
190 find_pdom (basic_block block
)
192 if (block
== EXIT_BLOCK_PTR
)
193 return EXIT_BLOCK_PTR
;
197 = get_immediate_dominator (CDI_POST_DOMINATORS
, block
);
199 return EXIT_BLOCK_PTR
;
204 /* Find the immediate DOM of the specified
205 basic block BLOCK. */
207 static inline basic_block
208 find_dom (basic_block block
)
210 if (block
== ENTRY_BLOCK_PTR
)
211 return ENTRY_BLOCK_PTR
;
214 basic_block bb
= get_immediate_dominator (CDI_DOMINATORS
, block
);
216 return ENTRY_BLOCK_PTR
;
221 /* Returns true if BB1 is postdominating BB2 and BB1 is
222 not a loop exit bb. The loop exit bb check is simple and does
223 not cover all cases. */
226 is_non_loop_exit_postdominating (basic_block bb1
, basic_block bb2
)
228 if (!dominated_by_p (CDI_POST_DOMINATORS
, bb2
, bb1
))
231 if (single_pred_p (bb1
) && !single_succ_p (bb2
))
237 /* Find the closest postdominator of a specified BB, which is control
240 static inline basic_block
241 find_control_equiv_block (basic_block bb
)
245 pdom
= find_pdom (bb
);
247 /* Skip the postdominating bb that is also loop exit. */
248 if (!is_non_loop_exit_postdominating (pdom
, bb
))
251 if (dominated_by_p (CDI_DOMINATORS
, pdom
, bb
))
257 #define MAX_NUM_CHAINS 8
258 #define MAX_CHAIN_LEN 5
259 #define MAX_POSTDOM_CHECK 8
261 /* Computes the control dependence chains (paths of edges)
262 for DEP_BB up to the dominating basic block BB (the head node of a
263 chain should be dominated by it). CD_CHAINS is pointer to a
264 dynamic array holding the result chains. CUR_CD_CHAIN is the current
265 chain being computed. *NUM_CHAINS is total number of chains. The
266 function returns true if the information is successfully computed,
267 return false if there is no control dependence or not computed. */
270 compute_control_dep_chain (basic_block bb
, basic_block dep_bb
,
271 vec
<edge
> *cd_chains
,
273 vec
<edge
> *cur_cd_chain
)
278 bool found_cd_chain
= false;
279 size_t cur_chain_len
= 0;
281 if (EDGE_COUNT (bb
->succs
) < 2)
284 /* Could use a set instead. */
285 cur_chain_len
= cur_cd_chain
->length ();
286 if (cur_chain_len
> MAX_CHAIN_LEN
)
289 for (i
= 0; i
< cur_chain_len
; i
++)
291 edge e
= (*cur_cd_chain
)[i
];
292 /* cycle detected. */
297 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
300 int post_dom_check
= 0;
301 if (e
->flags
& (EDGE_FAKE
| EDGE_ABNORMAL
))
305 cur_cd_chain
->safe_push (e
);
306 while (!is_non_loop_exit_postdominating (cd_bb
, bb
))
310 /* Found a direct control dependence. */
311 if (*num_chains
< MAX_NUM_CHAINS
)
313 cd_chains
[*num_chains
] = cur_cd_chain
->copy ();
316 found_cd_chain
= true;
317 /* check path from next edge. */
321 /* Now check if DEP_BB is indirectly control dependent on BB. */
322 if (compute_control_dep_chain (cd_bb
, dep_bb
, cd_chains
,
323 num_chains
, cur_cd_chain
))
325 found_cd_chain
= true;
329 cd_bb
= find_pdom (cd_bb
);
331 if (cd_bb
== EXIT_BLOCK_PTR
|| post_dom_check
> MAX_POSTDOM_CHECK
)
334 cur_cd_chain
->pop ();
335 gcc_assert (cur_cd_chain
->length () == cur_chain_len
);
337 gcc_assert (cur_cd_chain
->length () == cur_chain_len
);
339 return found_cd_chain
;
342 typedef struct use_pred_info
350 /* Converts the chains of control dependence edges into a set of
351 predicates. A control dependence chain is represented by a vector
352 edges. DEP_CHAINS points to an array of dependence chains.
353 NUM_CHAINS is the size of the chain array. One edge in a dependence
354 chain is mapped to predicate expression represented by use_pred_info_t
355 type. One dependence chain is converted to a composite predicate that
356 is the result of AND operation of use_pred_info_t mapped to each edge.
357 A composite predicate is presented by a vector of use_pred_info_t. On
358 return, *PREDS points to the resulting array of composite predicates.
359 *NUM_PREDS is the number of composite predictes. */
362 convert_control_dep_chain_into_preds (vec
<edge
> *dep_chains
,
364 vec
<use_pred_info_t
> **preds
,
367 bool has_valid_pred
= false;
369 if (num_chains
== 0 || num_chains
>= MAX_NUM_CHAINS
)
372 /* Now convert the control dep chain into a set
374 typedef vec
<use_pred_info_t
> vec_use_pred_info_t_heap
;
375 *preds
= XCNEWVEC (vec_use_pred_info_t_heap
, num_chains
);
376 *num_preds
= num_chains
;
378 for (i
= 0; i
< num_chains
; i
++)
380 vec
<edge
> one_cd_chain
= dep_chains
[i
];
382 has_valid_pred
= false;
383 for (j
= 0; j
< one_cd_chain
.length (); j
++)
386 gimple_stmt_iterator gsi
;
387 basic_block guard_bb
;
388 use_pred_info_t one_pred
;
393 gsi
= gsi_last_bb (guard_bb
);
396 has_valid_pred
= false;
399 cond_stmt
= gsi_stmt (gsi
);
400 if (gimple_code (cond_stmt
) == GIMPLE_CALL
401 && EDGE_COUNT (e
->src
->succs
) >= 2)
403 /* Ignore EH edge. Can add assertion
404 on the other edge's flag. */
407 /* Skip if there is essentially one succesor. */
408 if (EDGE_COUNT (e
->src
->succs
) == 2)
414 FOR_EACH_EDGE (e1
, ei1
, e
->src
->succs
)
416 if (EDGE_COUNT (e1
->dest
->succs
) == 0)
425 if (gimple_code (cond_stmt
) != GIMPLE_COND
)
427 has_valid_pred
= false;
430 one_pred
= XNEW (struct use_pred_info
);
431 one_pred
->cond
= cond_stmt
;
432 one_pred
->invert
= !!(e
->flags
& EDGE_FALSE_VALUE
);
433 (*preds
)[i
].safe_push (one_pred
);
434 has_valid_pred
= true;
440 return has_valid_pred
;
443 /* Computes all control dependence chains for USE_BB. The control
444 dependence chains are then converted to an array of composite
445 predicates pointed to by PREDS. PHI_BB is the basic block of
446 the phi whose result is used in USE_BB. */
449 find_predicates (vec
<use_pred_info_t
> **preds
,
454 size_t num_chains
= 0, i
;
455 vec
<edge
> *dep_chains
= 0;
456 vec
<edge
> cur_chain
= vNULL
;
457 bool has_valid_pred
= false;
458 basic_block cd_root
= 0;
460 typedef vec
<edge
> vec_edge_heap
;
461 dep_chains
= XCNEWVEC (vec_edge_heap
, MAX_NUM_CHAINS
);
463 /* First find the closest bb that is control equivalent to PHI_BB
464 that also dominates USE_BB. */
466 while (dominated_by_p (CDI_DOMINATORS
, use_bb
, cd_root
))
468 basic_block ctrl_eq_bb
= find_control_equiv_block (cd_root
);
469 if (ctrl_eq_bb
&& dominated_by_p (CDI_DOMINATORS
, use_bb
, ctrl_eq_bb
))
470 cd_root
= ctrl_eq_bb
;
475 compute_control_dep_chain (cd_root
, use_bb
,
476 dep_chains
, &num_chains
,
480 = convert_control_dep_chain_into_preds (dep_chains
,
484 /* Free individual chain */
485 cur_chain
.release ();
486 for (i
= 0; i
< num_chains
; i
++)
487 dep_chains
[i
].release ();
489 return has_valid_pred
;
492 /* Computes the set of incoming edges of PHI that have non empty
493 definitions of a phi chain. The collection will be done
494 recursively on operands that are defined by phis. CD_ROOT
495 is the control dependence root. *EDGES holds the result, and
496 VISITED_PHIS is a pointer set for detecting cycles. */
499 collect_phi_def_edges (gimple phi
, basic_block cd_root
,
501 struct pointer_set_t
*visited_phis
)
507 if (pointer_set_insert (visited_phis
, phi
))
510 n
= gimple_phi_num_args (phi
);
511 for (i
= 0; i
< n
; i
++)
513 opnd_edge
= gimple_phi_arg_edge (phi
, i
);
514 opnd
= gimple_phi_arg_def (phi
, i
);
516 if (TREE_CODE (opnd
) != SSA_NAME
)
518 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
520 fprintf (dump_file
, "\n[CHECK] Found def edge %d in ", (int)i
);
521 print_gimple_stmt (dump_file
, phi
, 0, 0);
523 edges
->safe_push (opnd_edge
);
527 gimple def
= SSA_NAME_DEF_STMT (opnd
);
529 if (gimple_code (def
) == GIMPLE_PHI
530 && dominated_by_p (CDI_DOMINATORS
,
531 gimple_bb (def
), cd_root
))
532 collect_phi_def_edges (def
, cd_root
, edges
,
534 else if (!uninit_undefined_value_p (opnd
))
536 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
538 fprintf (dump_file
, "\n[CHECK] Found def edge %d in ", (int)i
);
539 print_gimple_stmt (dump_file
, phi
, 0, 0);
541 edges
->safe_push (opnd_edge
);
547 /* For each use edge of PHI, computes all control dependence chains.
548 The control dependence chains are then converted to an array of
549 composite predicates pointed to by PREDS. */
552 find_def_preds (vec
<use_pred_info_t
> **preds
,
553 size_t *num_preds
, gimple phi
)
555 size_t num_chains
= 0, i
, n
;
556 vec
<edge
> *dep_chains
= 0;
557 vec
<edge
> cur_chain
= vNULL
;
558 vec
<edge
> def_edges
= vNULL
;
559 bool has_valid_pred
= false;
560 basic_block phi_bb
, cd_root
= 0;
561 struct pointer_set_t
*visited_phis
;
563 typedef vec
<edge
> vec_edge_heap
;
564 dep_chains
= XCNEWVEC (vec_edge_heap
, MAX_NUM_CHAINS
);
566 phi_bb
= gimple_bb (phi
);
567 /* First find the closest dominating bb to be
568 the control dependence root */
569 cd_root
= find_dom (phi_bb
);
573 visited_phis
= pointer_set_create ();
574 collect_phi_def_edges (phi
, cd_root
, &def_edges
, visited_phis
);
575 pointer_set_destroy (visited_phis
);
577 n
= def_edges
.length ();
581 for (i
= 0; i
< n
; i
++)
586 opnd_edge
= def_edges
[i
];
587 prev_nc
= num_chains
;
588 compute_control_dep_chain (cd_root
, opnd_edge
->src
,
589 dep_chains
, &num_chains
,
591 /* Free individual chain */
592 cur_chain
.release ();
594 /* Now update the newly added chains with
595 the phi operand edge: */
596 if (EDGE_COUNT (opnd_edge
->src
->succs
) > 1)
598 if (prev_nc
== num_chains
599 && num_chains
< MAX_NUM_CHAINS
)
601 for (j
= prev_nc
; j
< num_chains
; j
++)
603 dep_chains
[j
].safe_push (opnd_edge
);
609 = convert_control_dep_chain_into_preds (dep_chains
,
613 for (i
= 0; i
< num_chains
; i
++)
614 dep_chains
[i
].release ();
616 return has_valid_pred
;
619 /* Dumps the predicates (PREDS) for USESTMT. */
622 dump_predicates (gimple usestmt
, size_t num_preds
,
623 vec
<use_pred_info_t
> *preds
,
627 vec
<use_pred_info_t
> one_pred_chain
;
628 fprintf (dump_file
, msg
);
629 print_gimple_stmt (dump_file
, usestmt
, 0, 0);
630 fprintf (dump_file
, "is guarded by :\n");
631 /* do some dumping here: */
632 for (i
= 0; i
< num_preds
; i
++)
636 one_pred_chain
= preds
[i
];
637 np
= one_pred_chain
.length ();
639 for (j
= 0; j
< np
; j
++)
641 use_pred_info_t one_pred
643 if (one_pred
->invert
)
644 fprintf (dump_file
, " (.NOT.) ");
645 print_gimple_stmt (dump_file
, one_pred
->cond
, 0, 0);
647 fprintf (dump_file
, "(.AND.)\n");
649 if (i
< num_preds
- 1)
650 fprintf (dump_file
, "(.OR.)\n");
654 /* Destroys the predicate set *PREDS. */
657 destroy_predicate_vecs (size_t n
,
658 vec
<use_pred_info_t
> * preds
)
661 for (i
= 0; i
< n
; i
++)
663 for (j
= 0; j
< preds
[i
].length (); j
++)
671 /* Computes the 'normalized' conditional code with operand
672 swapping and condition inversion. */
674 static enum tree_code
675 get_cmp_code (enum tree_code orig_cmp_code
,
676 bool swap_cond
, bool invert
)
678 enum tree_code tc
= orig_cmp_code
;
681 tc
= swap_tree_comparison (orig_cmp_code
);
683 tc
= invert_tree_comparison (tc
, false);
700 /* Returns true if VAL falls in the range defined by BOUNDARY and CMPC, i.e.
701 all values in the range satisfies (x CMPC BOUNDARY) == true. */
704 is_value_included_in (tree val
, tree boundary
, enum tree_code cmpc
)
706 bool inverted
= false;
710 /* Only handle integer constant here. */
711 if (TREE_CODE (val
) != INTEGER_CST
712 || TREE_CODE (boundary
) != INTEGER_CST
)
715 is_unsigned
= TYPE_UNSIGNED (TREE_TYPE (val
));
717 if (cmpc
== GE_EXPR
|| cmpc
== GT_EXPR
720 cmpc
= invert_tree_comparison (cmpc
, false);
727 result
= tree_int_cst_equal (val
, boundary
);
728 else if (cmpc
== LT_EXPR
)
729 result
= INT_CST_LT_UNSIGNED (val
, boundary
);
732 gcc_assert (cmpc
== LE_EXPR
);
733 result
= (tree_int_cst_equal (val
, boundary
)
734 || INT_CST_LT_UNSIGNED (val
, boundary
));
740 result
= tree_int_cst_equal (val
, boundary
);
741 else if (cmpc
== LT_EXPR
)
742 result
= INT_CST_LT (val
, boundary
);
745 gcc_assert (cmpc
== LE_EXPR
);
746 result
= (tree_int_cst_equal (val
, boundary
)
747 || INT_CST_LT (val
, boundary
));
757 /* Returns true if PRED is common among all the predicate
758 chains (PREDS) (and therefore can be factored out).
759 NUM_PRED_CHAIN is the size of array PREDS. */
762 find_matching_predicate_in_rest_chains (use_pred_info_t pred
,
763 vec
<use_pred_info_t
> *preds
,
764 size_t num_pred_chains
)
769 if (num_pred_chains
== 1)
772 for (i
= 1; i
< num_pred_chains
; i
++)
775 vec
<use_pred_info_t
> one_chain
= preds
[i
];
776 n
= one_chain
.length ();
777 for (j
= 0; j
< n
; j
++)
779 use_pred_info_t pred2
781 /* can relax the condition comparison to not
782 use address comparison. However, the most common
783 case is that multiple control dependent paths share
784 a common path prefix, so address comparison should
787 if (pred2
->cond
== pred
->cond
788 && pred2
->invert
== pred
->invert
)
800 /* Forward declaration. */
802 is_use_properly_guarded (gimple use_stmt
,
805 unsigned uninit_opnds
,
806 struct pointer_set_t
*visited_phis
);
808 /* Returns true if all uninitialized opnds are pruned. Returns false
809 otherwise. PHI is the phi node with uninitialized operands,
810 UNINIT_OPNDS is the bitmap of the uninitialize operand positions,
811 FLAG_DEF is the statement defining the flag guarding the use of the
812 PHI output, BOUNDARY_CST is the const value used in the predicate
813 associated with the flag, CMP_CODE is the comparison code used in
814 the predicate, VISITED_PHIS is the pointer set of phis visited, and
815 VISITED_FLAG_PHIS is the pointer to the pointer set of flag definitions
821 flag_1 = phi <0, 1> // (1)
822 var_1 = phi <undef, some_val>
826 flag_2 = phi <0, flag_1, flag_1> // (2)
827 var_2 = phi <undef, var_1, var_1>
834 Because some flag arg in (1) is not constant, if we do not look into the
835 flag phis recursively, it is conservatively treated as unknown and var_1
836 is thought to be flowed into use at (3). Since var_1 is potentially uninitialized
837 a false warning will be emitted. Checking recursively into (1), the compiler can
838 find out that only some_val (which is defined) can flow into (3) which is OK.
843 prune_uninit_phi_opnds_in_unrealizable_paths (
844 gimple phi
, unsigned uninit_opnds
,
845 gimple flag_def
, tree boundary_cst
,
846 enum tree_code cmp_code
,
847 struct pointer_set_t
*visited_phis
,
848 bitmap
*visited_flag_phis
)
852 for (i
= 0; i
< MIN (32, gimple_phi_num_args (flag_def
)); i
++)
856 if (!MASK_TEST_BIT (uninit_opnds
, i
))
859 flag_arg
= gimple_phi_arg_def (flag_def
, i
);
860 if (!is_gimple_constant (flag_arg
))
862 gimple flag_arg_def
, phi_arg_def
;
864 unsigned uninit_opnds_arg_phi
;
866 if (TREE_CODE (flag_arg
) != SSA_NAME
)
868 flag_arg_def
= SSA_NAME_DEF_STMT (flag_arg
);
869 if (gimple_code (flag_arg_def
) != GIMPLE_PHI
)
872 phi_arg
= gimple_phi_arg_def (phi
, i
);
873 if (TREE_CODE (phi_arg
) != SSA_NAME
)
876 phi_arg_def
= SSA_NAME_DEF_STMT (phi_arg
);
877 if (gimple_code (phi_arg_def
) != GIMPLE_PHI
)
880 if (gimple_bb (phi_arg_def
) != gimple_bb (flag_arg_def
))
883 if (!*visited_flag_phis
)
884 *visited_flag_phis
= BITMAP_ALLOC (NULL
);
886 if (bitmap_bit_p (*visited_flag_phis
,
887 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def
))))
890 bitmap_set_bit (*visited_flag_phis
,
891 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def
)));
893 /* Now recursively prune the uninitialized phi args. */
894 uninit_opnds_arg_phi
= compute_uninit_opnds_pos (phi_arg_def
);
895 if (!prune_uninit_phi_opnds_in_unrealizable_paths (
896 phi_arg_def
, uninit_opnds_arg_phi
,
897 flag_arg_def
, boundary_cst
, cmp_code
,
898 visited_phis
, visited_flag_phis
))
901 bitmap_clear_bit (*visited_flag_phis
,
902 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def
)));
906 /* Now check if the constant is in the guarded range. */
907 if (is_value_included_in (flag_arg
, boundary_cst
, cmp_code
))
912 /* Now that we know that this undefined edge is not
913 pruned. If the operand is defined by another phi,
914 we can further prune the incoming edges of that
915 phi by checking the predicates of this operands. */
917 opnd
= gimple_phi_arg_def (phi
, i
);
918 opnd_def
= SSA_NAME_DEF_STMT (opnd
);
919 if (gimple_code (opnd_def
) == GIMPLE_PHI
)
922 unsigned uninit_opnds2
923 = compute_uninit_opnds_pos (opnd_def
);
924 gcc_assert (!MASK_EMPTY (uninit_opnds2
));
925 opnd_edge
= gimple_phi_arg_edge (phi
, i
);
926 if (!is_use_properly_guarded (phi
,
941 /* A helper function that determines if the predicate set
942 of the use is not overlapping with that of the uninit paths.
943 The most common senario of guarded use is in Example 1:
956 The real world examples are usually more complicated, but similar
957 and usually result from inlining:
959 bool init_func (int * x)
978 Another possible use scenario is in the following trivial example:
990 Predicate analysis needs to compute the composite predicate:
992 1) 'x' use predicate: (n > 0) .AND. (m < 2)
993 2) 'x' default value (non-def) predicate: .NOT. (n > 0)
994 (the predicate chain for phi operand defs can be computed
995 starting from a bb that is control equivalent to the phi's
996 bb and is dominating the operand def.)
998 and check overlapping:
999 (n > 0) .AND. (m < 2) .AND. (.NOT. (n > 0))
1002 This implementation provides framework that can handle
1003 scenarios. (Note that many simple cases are handled properly
1004 without the predicate analysis -- this is due to jump threading
1005 transformation which eliminates the merge point thus makes
1006 path sensitive analysis unnecessary.)
1008 NUM_PREDS is the number is the number predicate chains, PREDS is
1009 the array of chains, PHI is the phi node whose incoming (undefined)
1010 paths need to be pruned, and UNINIT_OPNDS is the bitmap holding
1011 uninit operand positions. VISITED_PHIS is the pointer set of phi
1012 stmts being checked. */
1016 use_pred_not_overlap_with_undef_path_pred (
1018 vec
<use_pred_info_t
> *preds
,
1019 gimple phi
, unsigned uninit_opnds
,
1020 struct pointer_set_t
*visited_phis
)
1023 gimple flag_def
= 0;
1024 tree boundary_cst
= 0;
1025 enum tree_code cmp_code
;
1026 bool swap_cond
= false;
1027 bool invert
= false;
1028 vec
<use_pred_info_t
> the_pred_chain
;
1029 bitmap visited_flag_phis
= NULL
;
1030 bool all_pruned
= false;
1032 gcc_assert (num_preds
> 0);
1033 /* Find within the common prefix of multiple predicate chains
1034 a predicate that is a comparison of a flag variable against
1036 the_pred_chain
= preds
[0];
1037 n
= the_pred_chain
.length ();
1038 for (i
= 0; i
< n
; i
++)
1041 tree cond_lhs
, cond_rhs
, flag
= 0;
1043 use_pred_info_t the_pred
1044 = the_pred_chain
[i
];
1046 cond
= the_pred
->cond
;
1047 invert
= the_pred
->invert
;
1048 cond_lhs
= gimple_cond_lhs (cond
);
1049 cond_rhs
= gimple_cond_rhs (cond
);
1050 cmp_code
= gimple_cond_code (cond
);
1052 if (cond_lhs
!= NULL_TREE
&& TREE_CODE (cond_lhs
) == SSA_NAME
1053 && cond_rhs
!= NULL_TREE
&& is_gimple_constant (cond_rhs
))
1055 boundary_cst
= cond_rhs
;
1058 else if (cond_rhs
!= NULL_TREE
&& TREE_CODE (cond_rhs
) == SSA_NAME
1059 && cond_lhs
!= NULL_TREE
&& is_gimple_constant (cond_lhs
))
1061 boundary_cst
= cond_lhs
;
1069 flag_def
= SSA_NAME_DEF_STMT (flag
);
1074 if ((gimple_code (flag_def
) == GIMPLE_PHI
)
1075 && (gimple_bb (flag_def
) == gimple_bb (phi
))
1076 && find_matching_predicate_in_rest_chains (
1077 the_pred
, preds
, num_preds
))
1086 /* Now check all the uninit incoming edge has a constant flag value
1087 that is in conflict with the use guard/predicate. */
1088 cmp_code
= get_cmp_code (cmp_code
, swap_cond
, invert
);
1090 if (cmp_code
== ERROR_MARK
)
1093 all_pruned
= prune_uninit_phi_opnds_in_unrealizable_paths (phi
,
1099 &visited_flag_phis
);
1101 if (visited_flag_phis
)
1102 BITMAP_FREE (visited_flag_phis
);
1107 /* Returns true if TC is AND or OR */
1110 is_and_or_or (enum tree_code tc
, tree typ
)
1112 return (tc
== BIT_IOR_EXPR
1113 || (tc
== BIT_AND_EXPR
1114 && (typ
== 0 || TREE_CODE (typ
) == BOOLEAN_TYPE
)));
1117 typedef struct norm_cond
1120 enum tree_code cond_code
;
1125 /* Normalizes gimple condition COND. The normalization follows
1126 UD chains to form larger condition expression trees. NORM_COND
1127 holds the normalized result. COND_CODE is the logical opcode
1128 (AND or OR) of the normalized tree. */
1131 normalize_cond_1 (gimple cond
,
1132 norm_cond_t norm_cond
,
1133 enum tree_code cond_code
)
1135 enum gimple_code gc
;
1136 enum tree_code cur_cond_code
;
1139 gc
= gimple_code (cond
);
1140 if (gc
!= GIMPLE_ASSIGN
)
1142 norm_cond
->conds
.safe_push (cond
);
1146 cur_cond_code
= gimple_assign_rhs_code (cond
);
1147 rhs1
= gimple_assign_rhs1 (cond
);
1148 rhs2
= gimple_assign_rhs2 (cond
);
1149 if (cur_cond_code
== NE_EXPR
)
1151 if (integer_zerop (rhs2
)
1152 && (TREE_CODE (rhs1
) == SSA_NAME
))
1154 SSA_NAME_DEF_STMT (rhs1
),
1155 norm_cond
, cond_code
);
1156 else if (integer_zerop (rhs1
)
1157 && (TREE_CODE (rhs2
) == SSA_NAME
))
1159 SSA_NAME_DEF_STMT (rhs2
),
1160 norm_cond
, cond_code
);
1162 norm_cond
->conds
.safe_push (cond
);
1167 if (is_and_or_or (cur_cond_code
, TREE_TYPE (rhs1
))
1168 && (cond_code
== cur_cond_code
|| cond_code
== ERROR_MARK
)
1169 && (TREE_CODE (rhs1
) == SSA_NAME
&& TREE_CODE (rhs2
) == SSA_NAME
))
1171 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs1
),
1172 norm_cond
, cur_cond_code
);
1173 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs2
),
1174 norm_cond
, cur_cond_code
);
1175 norm_cond
->cond_code
= cur_cond_code
;
1178 norm_cond
->conds
.safe_push (cond
);
1181 /* See normalize_cond_1 for details. INVERT is a flag to indicate
1182 if COND needs to be inverted or not. */
1185 normalize_cond (gimple cond
, norm_cond_t norm_cond
, bool invert
)
1187 enum tree_code cond_code
;
1189 norm_cond
->cond_code
= ERROR_MARK
;
1190 norm_cond
->invert
= false;
1191 norm_cond
->conds
.create (0);
1192 gcc_assert (gimple_code (cond
) == GIMPLE_COND
);
1193 cond_code
= gimple_cond_code (cond
);
1195 cond_code
= invert_tree_comparison (cond_code
, false);
1197 if (cond_code
== NE_EXPR
)
1199 if (integer_zerop (gimple_cond_rhs (cond
))
1200 && (TREE_CODE (gimple_cond_lhs (cond
)) == SSA_NAME
))
1202 SSA_NAME_DEF_STMT (gimple_cond_lhs (cond
)),
1203 norm_cond
, ERROR_MARK
);
1204 else if (integer_zerop (gimple_cond_lhs (cond
))
1205 && (TREE_CODE (gimple_cond_rhs (cond
)) == SSA_NAME
))
1207 SSA_NAME_DEF_STMT (gimple_cond_rhs (cond
)),
1208 norm_cond
, ERROR_MARK
);
1211 norm_cond
->conds
.safe_push (cond
);
1212 norm_cond
->invert
= invert
;
1217 norm_cond
->conds
.safe_push (cond
);
1218 norm_cond
->invert
= invert
;
1221 gcc_assert (norm_cond
->conds
.length () == 1
1222 || is_and_or_or (norm_cond
->cond_code
, NULL
));
1225 /* Returns true if the domain for condition COND1 is a subset of
1226 COND2. REVERSE is a flag. when it is true the function checks
1227 if COND1 is a superset of COND2. INVERT1 and INVERT2 are flags
1228 to indicate if COND1 and COND2 need to be inverted or not. */
1231 is_gcond_subset_of (gimple cond1
, bool invert1
,
1232 gimple cond2
, bool invert2
,
1235 enum gimple_code gc1
, gc2
;
1236 enum tree_code cond1_code
, cond2_code
;
1238 tree cond1_lhs
, cond1_rhs
, cond2_lhs
, cond2_rhs
;
1240 /* Take the short cut. */
1251 gc1
= gimple_code (cond1
);
1252 gc2
= gimple_code (cond2
);
1254 if ((gc1
!= GIMPLE_ASSIGN
&& gc1
!= GIMPLE_COND
)
1255 || (gc2
!= GIMPLE_ASSIGN
&& gc2
!= GIMPLE_COND
))
1256 return cond1
== cond2
;
1258 cond1_code
= ((gc1
== GIMPLE_ASSIGN
)
1259 ? gimple_assign_rhs_code (cond1
)
1260 : gimple_cond_code (cond1
));
1262 cond2_code
= ((gc2
== GIMPLE_ASSIGN
)
1263 ? gimple_assign_rhs_code (cond2
)
1264 : gimple_cond_code (cond2
));
1266 if (TREE_CODE_CLASS (cond1_code
) != tcc_comparison
1267 || TREE_CODE_CLASS (cond2_code
) != tcc_comparison
)
1271 cond1_code
= invert_tree_comparison (cond1_code
, false);
1273 cond2_code
= invert_tree_comparison (cond2_code
, false);
1275 cond1_lhs
= ((gc1
== GIMPLE_ASSIGN
)
1276 ? gimple_assign_rhs1 (cond1
)
1277 : gimple_cond_lhs (cond1
));
1278 cond1_rhs
= ((gc1
== GIMPLE_ASSIGN
)
1279 ? gimple_assign_rhs2 (cond1
)
1280 : gimple_cond_rhs (cond1
));
1281 cond2_lhs
= ((gc2
== GIMPLE_ASSIGN
)
1282 ? gimple_assign_rhs1 (cond2
)
1283 : gimple_cond_lhs (cond2
));
1284 cond2_rhs
= ((gc2
== GIMPLE_ASSIGN
)
1285 ? gimple_assign_rhs2 (cond2
)
1286 : gimple_cond_rhs (cond2
));
1288 /* Assuming const operands have been swapped to the
1289 rhs at this point of the analysis. */
1291 if (cond1_lhs
!= cond2_lhs
)
1294 if (!is_gimple_constant (cond1_rhs
)
1295 || TREE_CODE (cond1_rhs
) != INTEGER_CST
)
1296 return (cond1_rhs
== cond2_rhs
);
1298 if (!is_gimple_constant (cond2_rhs
)
1299 || TREE_CODE (cond2_rhs
) != INTEGER_CST
)
1300 return (cond1_rhs
== cond2_rhs
);
1302 if (cond1_code
== EQ_EXPR
)
1303 return is_value_included_in (cond1_rhs
,
1304 cond2_rhs
, cond2_code
);
1305 if (cond1_code
== NE_EXPR
|| cond2_code
== EQ_EXPR
)
1306 return ((cond2_code
== cond1_code
)
1307 && tree_int_cst_equal (cond1_rhs
, cond2_rhs
));
1309 if (((cond1_code
== GE_EXPR
|| cond1_code
== GT_EXPR
)
1310 && (cond2_code
== LE_EXPR
|| cond2_code
== LT_EXPR
))
1311 || ((cond1_code
== LE_EXPR
|| cond1_code
== LT_EXPR
)
1312 && (cond2_code
== GE_EXPR
|| cond2_code
== GT_EXPR
)))
1315 if (cond1_code
!= GE_EXPR
&& cond1_code
!= GT_EXPR
1316 && cond1_code
!= LE_EXPR
&& cond1_code
!= LT_EXPR
)
1319 if (cond1_code
== GT_EXPR
)
1321 cond1_code
= GE_EXPR
;
1322 cond1_rhs
= fold_binary (PLUS_EXPR
, TREE_TYPE (cond1_rhs
),
1324 fold_convert (TREE_TYPE (cond1_rhs
),
1327 else if (cond1_code
== LT_EXPR
)
1329 cond1_code
= LE_EXPR
;
1330 cond1_rhs
= fold_binary (MINUS_EXPR
, TREE_TYPE (cond1_rhs
),
1332 fold_convert (TREE_TYPE (cond1_rhs
),
1339 gcc_assert (cond1_code
== GE_EXPR
|| cond1_code
== LE_EXPR
);
1341 if (cond2_code
== GE_EXPR
|| cond2_code
== GT_EXPR
||
1342 cond2_code
== LE_EXPR
|| cond2_code
== LT_EXPR
)
1343 return is_value_included_in (cond1_rhs
,
1344 cond2_rhs
, cond2_code
);
1345 else if (cond2_code
== NE_EXPR
)
1347 (is_value_included_in (cond1_rhs
,
1348 cond2_rhs
, cond2_code
)
1349 && !is_value_included_in (cond2_rhs
,
1350 cond1_rhs
, cond1_code
));
1354 /* Returns true if the domain of the condition expression
1355 in COND is a subset of any of the sub-conditions
1356 of the normalized condtion NORM_COND. INVERT is a flag
1357 to indicate of the COND needs to be inverted.
1358 REVERSE is a flag. When it is true, the check is reversed --
1359 it returns true if COND is a superset of any of the subconditions
1363 is_subset_of_any (gimple cond
, bool invert
,
1364 norm_cond_t norm_cond
, bool reverse
)
1367 size_t len
= norm_cond
->conds
.length ();
1369 for (i
= 0; i
< len
; i
++)
1371 if (is_gcond_subset_of (cond
, invert
,
1372 norm_cond
->conds
[i
],
1379 /* NORM_COND1 and NORM_COND2 are normalized logical/BIT OR
1380 expressions (formed by following UD chains not control
1381 dependence chains). The function returns true of domain
1382 of and expression NORM_COND1 is a subset of NORM_COND2's.
1383 The implementation is conservative, and it returns false if
1384 it the inclusion relationship may not hold. */
1387 is_or_set_subset_of (norm_cond_t norm_cond1
,
1388 norm_cond_t norm_cond2
)
1391 size_t len
= norm_cond1
->conds
.length ();
1393 for (i
= 0; i
< len
; i
++)
1395 if (!is_subset_of_any (norm_cond1
->conds
[i
],
1396 false, norm_cond2
, false))
1402 /* NORM_COND1 and NORM_COND2 are normalized logical AND
1403 expressions (formed by following UD chains not control
1404 dependence chains). The function returns true of domain
1405 of and expression NORM_COND1 is a subset of NORM_COND2's. */
1408 is_and_set_subset_of (norm_cond_t norm_cond1
,
1409 norm_cond_t norm_cond2
)
1412 size_t len
= norm_cond2
->conds
.length ();
1414 for (i
= 0; i
< len
; i
++)
1416 if (!is_subset_of_any (norm_cond2
->conds
[i
],
1417 false, norm_cond1
, true))
1423 /* Returns true of the domain if NORM_COND1 is a subset
1424 of that of NORM_COND2. Returns false if it can not be
1428 is_norm_cond_subset_of (norm_cond_t norm_cond1
,
1429 norm_cond_t norm_cond2
)
1432 enum tree_code code1
, code2
;
1434 code1
= norm_cond1
->cond_code
;
1435 code2
= norm_cond2
->cond_code
;
1437 if (code1
== BIT_AND_EXPR
)
1439 /* Both conditions are AND expressions. */
1440 if (code2
== BIT_AND_EXPR
)
1441 return is_and_set_subset_of (norm_cond1
, norm_cond2
);
1442 /* NORM_COND1 is an AND expression, and NORM_COND2 is an OR
1443 expression. In this case, returns true if any subexpression
1444 of NORM_COND1 is a subset of any subexpression of NORM_COND2. */
1445 else if (code2
== BIT_IOR_EXPR
)
1448 len1
= norm_cond1
->conds
.length ();
1449 for (i
= 0; i
< len1
; i
++)
1451 gimple cond1
= norm_cond1
->conds
[i
];
1452 if (is_subset_of_any (cond1
, false, norm_cond2
, false))
1459 gcc_assert (code2
== ERROR_MARK
);
1460 gcc_assert (norm_cond2
->conds
.length () == 1);
1461 return is_subset_of_any (norm_cond2
->conds
[0],
1462 norm_cond2
->invert
, norm_cond1
, true);
1465 /* NORM_COND1 is an OR expression */
1466 else if (code1
== BIT_IOR_EXPR
)
1471 return is_or_set_subset_of (norm_cond1
, norm_cond2
);
1475 gcc_assert (code1
== ERROR_MARK
);
1476 gcc_assert (norm_cond1
->conds
.length () == 1);
1477 /* Conservatively returns false if NORM_COND1 is non-decomposible
1478 and NORM_COND2 is an AND expression. */
1479 if (code2
== BIT_AND_EXPR
)
1482 if (code2
== BIT_IOR_EXPR
)
1483 return is_subset_of_any (norm_cond1
->conds
[0],
1484 norm_cond1
->invert
, norm_cond2
, false);
1486 gcc_assert (code2
== ERROR_MARK
);
1487 gcc_assert (norm_cond2
->conds
.length () == 1);
1488 return is_gcond_subset_of (norm_cond1
->conds
[0],
1490 norm_cond2
->conds
[0],
1491 norm_cond2
->invert
, false);
1495 /* Returns true of the domain of single predicate expression
1496 EXPR1 is a subset of that of EXPR2. Returns false if it
1497 can not be proved. */
1500 is_pred_expr_subset_of (use_pred_info_t expr1
,
1501 use_pred_info_t expr2
)
1503 gimple cond1
, cond2
;
1504 enum tree_code code1
, code2
;
1505 struct norm_cond norm_cond1
, norm_cond2
;
1506 bool is_subset
= false;
1508 cond1
= expr1
->cond
;
1509 cond2
= expr2
->cond
;
1510 code1
= gimple_cond_code (cond1
);
1511 code2
= gimple_cond_code (cond2
);
1514 code1
= invert_tree_comparison (code1
, false);
1516 code2
= invert_tree_comparison (code2
, false);
1518 /* Fast path -- match exactly */
1519 if ((gimple_cond_lhs (cond1
) == gimple_cond_lhs (cond2
))
1520 && (gimple_cond_rhs (cond1
) == gimple_cond_rhs (cond2
))
1521 && (code1
== code2
))
1524 /* Normalize conditions. To keep NE_EXPR, do not invert
1525 with both need inversion. */
1526 normalize_cond (cond1
, &norm_cond1
, (expr1
->invert
));
1527 normalize_cond (cond2
, &norm_cond2
, (expr2
->invert
));
1529 is_subset
= is_norm_cond_subset_of (&norm_cond1
, &norm_cond2
);
1532 norm_cond1
.conds
.release ();
1533 norm_cond2
.conds
.release ();
1537 /* Returns true if the domain of PRED1 is a subset
1538 of that of PRED2. Returns false if it can not be proved so. */
1541 is_pred_chain_subset_of (vec
<use_pred_info_t
> pred1
,
1542 vec
<use_pred_info_t
> pred2
)
1544 size_t np1
, np2
, i1
, i2
;
1546 np1
= pred1
.length ();
1547 np2
= pred2
.length ();
1549 for (i2
= 0; i2
< np2
; i2
++)
1552 use_pred_info_t info2
1554 for (i1
= 0; i1
< np1
; i1
++)
1556 use_pred_info_t info1
1558 if (is_pred_expr_subset_of (info1
, info2
))
1570 /* Returns true if the domain defined by
1571 one pred chain ONE_PRED is a subset of the domain
1572 of *PREDS. It returns false if ONE_PRED's domain is
1573 not a subset of any of the sub-domains of PREDS (
1574 corresponding to each individual chains in it), even
1575 though it may be still be a subset of whole domain
1576 of PREDS which is the union (ORed) of all its subdomains.
1577 In other words, the result is conservative. */
1580 is_included_in (vec
<use_pred_info_t
> one_pred
,
1581 vec
<use_pred_info_t
> *preds
,
1586 for (i
= 0; i
< n
; i
++)
1588 if (is_pred_chain_subset_of (one_pred
, preds
[i
]))
1595 /* compares two predicate sets PREDS1 and PREDS2 and returns
1596 true if the domain defined by PREDS1 is a superset
1597 of PREDS2's domain. N1 and N2 are array sizes of PREDS1 and
1598 PREDS2 respectively. The implementation chooses not to build
1599 generic trees (and relying on the folding capability of the
1600 compiler), but instead performs brute force comparison of
1601 individual predicate chains (won't be a compile time problem
1602 as the chains are pretty short). When the function returns
1603 false, it does not necessarily mean *PREDS1 is not a superset
1604 of *PREDS2, but mean it may not be so since the analysis can
1605 not prove it. In such cases, false warnings may still be
1609 is_superset_of (vec
<use_pred_info_t
> *preds1
,
1611 vec
<use_pred_info_t
> *preds2
,
1615 vec
<use_pred_info_t
> one_pred_chain
;
1617 for (i
= 0; i
< n2
; i
++)
1619 one_pred_chain
= preds2
[i
];
1620 if (!is_included_in (one_pred_chain
, preds1
, n1
))
1627 /* Comparison function used by qsort. It is used to
1628 sort predicate chains to allow predicate
1632 pred_chain_length_cmp (const void *p1
, const void *p2
)
1634 use_pred_info_t i1
, i2
;
1635 vec
<use_pred_info_t
> const *chain1
1636 = (vec
<use_pred_info_t
> const *)p1
;
1637 vec
<use_pred_info_t
> const *chain2
1638 = (vec
<use_pred_info_t
> const *)p2
;
1640 if (chain1
->length () != chain2
->length ())
1641 return (chain1
->length () - chain2
->length ());
1646 /* Allow predicates with similar prefix come together. */
1647 if (!i1
->invert
&& i2
->invert
)
1649 else if (i1
->invert
&& !i2
->invert
)
1652 return gimple_uid (i1
->cond
) - gimple_uid (i2
->cond
);
1655 /* x OR (!x AND y) is equivalent to x OR y.
1656 This function normalizes x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3)
1657 into x1 OR x2 OR x3. PREDS is the predicate chains, and N is
1658 the number of chains. Returns true if normalization happens. */
1661 normalize_preds (vec
<use_pred_info_t
> *preds
, size_t *n
)
1664 vec
<use_pred_info_t
> pred_chain
;
1665 vec
<use_pred_info_t
> x
= vNULL
;
1666 use_pred_info_t xj
= 0, nxj
= 0;
1671 /* First sort the chains in ascending order of lengths. */
1672 qsort (preds
, *n
, sizeof (void *), pred_chain_length_cmp
);
1673 pred_chain
= preds
[0];
1674 ll
= pred_chain
.length ();
1679 use_pred_info_t xx
, yy
, xx2
, nyy
;
1680 vec
<use_pred_info_t
> pred_chain2
= preds
[1];
1681 if (pred_chain2
.length () != 2)
1684 /* See if simplification x AND y OR x AND !y is possible. */
1687 xx2
= pred_chain2
[0];
1688 nyy
= pred_chain2
[1];
1689 if (gimple_cond_lhs (xx
->cond
) != gimple_cond_lhs (xx2
->cond
)
1690 || gimple_cond_rhs (xx
->cond
) != gimple_cond_rhs (xx2
->cond
)
1691 || gimple_cond_code (xx
->cond
) != gimple_cond_code (xx2
->cond
)
1692 || (xx
->invert
!= xx2
->invert
))
1694 if (gimple_cond_lhs (yy
->cond
) != gimple_cond_lhs (nyy
->cond
)
1695 || gimple_cond_rhs (yy
->cond
) != gimple_cond_rhs (nyy
->cond
)
1696 || gimple_cond_code (yy
->cond
) != gimple_cond_code (nyy
->cond
)
1697 || (yy
->invert
== nyy
->invert
))
1700 /* Now merge the first two chains. */
1704 pred_chain
.release ();
1705 pred_chain2
.release ();
1706 pred_chain
.safe_push (xx
);
1707 preds
[0] = pred_chain
;
1708 for (i
= 1; i
< *n
- 1; i
++)
1709 preds
[i
] = preds
[i
+ 1];
1711 preds
[*n
- 1].create (0);
1718 x
.safe_push (pred_chain
[0]);
1720 /* The loop extracts x1, x2, x3, etc from chains
1721 x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3) OR ... */
1722 for (i
= 1; i
< *n
; i
++)
1724 pred_chain
= preds
[i
];
1725 if (pred_chain
.length () != i
+ 1)
1728 for (j
= 0; j
< i
; j
++)
1731 nxj
= pred_chain
[j
];
1733 /* Check if nxj is !xj */
1734 if (gimple_cond_lhs (xj
->cond
) != gimple_cond_lhs (nxj
->cond
)
1735 || gimple_cond_rhs (xj
->cond
) != gimple_cond_rhs (nxj
->cond
)
1736 || gimple_cond_code (xj
->cond
) != gimple_cond_code (nxj
->cond
)
1737 || (xj
->invert
== nxj
->invert
))
1741 x
.safe_push (pred_chain
[i
]);
1744 /* Now normalize the pred chains using the extraced x1, x2, x3 etc. */
1745 for (j
= 0; j
< *n
; j
++)
1750 t
= XNEW (struct use_pred_info
);
1756 for (i
= 0; i
< *n
; i
++)
1758 pred_chain
= preds
[i
];
1759 for (j
= 0; j
< pred_chain
.length (); j
++)
1760 free (pred_chain
[j
]);
1761 pred_chain
.release ();
1763 pred_chain
.safe_push (x
[i
]);
1764 preds
[i
] = pred_chain
;
1771 /* Computes the predicates that guard the use and checks
1772 if the incoming paths that have empty (or possibly
1773 empty) definition can be pruned/filtered. The function returns
1774 true if it can be determined that the use of PHI's def in
1775 USE_STMT is guarded with a predicate set not overlapping with
1776 predicate sets of all runtime paths that do not have a definition.
1777 Returns false if it is not or it can not be determined. USE_BB is
1778 the bb of the use (for phi operand use, the bb is not the bb of
1779 the phi stmt, but the src bb of the operand edge). UNINIT_OPNDS
1780 is a bit vector. If an operand of PHI is uninitialized, the
1781 corresponding bit in the vector is 1. VISIED_PHIS is a pointer
1782 set of phis being visted. */
1785 is_use_properly_guarded (gimple use_stmt
,
1788 unsigned uninit_opnds
,
1789 struct pointer_set_t
*visited_phis
)
1792 vec
<use_pred_info_t
> *preds
= 0;
1793 vec
<use_pred_info_t
> *def_preds
= 0;
1794 size_t num_preds
= 0, num_def_preds
= 0;
1795 bool has_valid_preds
= false;
1796 bool is_properly_guarded
= false;
1798 if (pointer_set_insert (visited_phis
, phi
))
1801 phi_bb
= gimple_bb (phi
);
1803 if (is_non_loop_exit_postdominating (use_bb
, phi_bb
))
1806 has_valid_preds
= find_predicates (&preds
, &num_preds
,
1809 if (!has_valid_preds
)
1811 destroy_predicate_vecs (num_preds
, preds
);
1816 dump_predicates (use_stmt
, num_preds
, preds
,
1819 has_valid_preds
= find_def_preds (&def_preds
,
1820 &num_def_preds
, phi
);
1822 if (has_valid_preds
)
1826 dump_predicates (phi
, num_def_preds
, def_preds
,
1827 "Operand defs of phi ");
1829 normed
= normalize_preds (def_preds
, &num_def_preds
);
1830 if (normed
&& dump_file
)
1832 fprintf (dump_file
, "\nNormalized to\n");
1833 dump_predicates (phi
, num_def_preds
, def_preds
,
1834 "Operand defs of phi ");
1836 is_properly_guarded
=
1837 is_superset_of (def_preds
, num_def_preds
,
1841 /* further prune the dead incoming phi edges. */
1842 if (!is_properly_guarded
)
1844 = use_pred_not_overlap_with_undef_path_pred (
1845 num_preds
, preds
, phi
, uninit_opnds
, visited_phis
);
1847 destroy_predicate_vecs (num_preds
, preds
);
1848 destroy_predicate_vecs (num_def_preds
, def_preds
);
1849 return is_properly_guarded
;
1852 /* Searches through all uses of a potentially
1853 uninitialized variable defined by PHI and returns a use
1854 statement if the use is not properly guarded. It returns
1855 NULL if all uses are guarded. UNINIT_OPNDS is a bitvector
1856 holding the position(s) of uninit PHI operands. WORKLIST
1857 is the vector of candidate phis that may be updated by this
1858 function. ADDED_TO_WORKLIST is the pointer set tracking
1859 if the new phi is already in the worklist. */
1862 find_uninit_use (gimple phi
, unsigned uninit_opnds
,
1863 vec
<gimple
> *worklist
,
1864 struct pointer_set_t
*added_to_worklist
)
1867 use_operand_p use_p
;
1869 imm_use_iterator iter
;
1871 phi_result
= gimple_phi_result (phi
);
1873 FOR_EACH_IMM_USE_FAST (use_p
, iter
, phi_result
)
1875 struct pointer_set_t
*visited_phis
;
1878 use_stmt
= USE_STMT (use_p
);
1879 if (is_gimple_debug (use_stmt
))
1882 visited_phis
= pointer_set_create ();
1884 if (gimple_code (use_stmt
) == GIMPLE_PHI
)
1885 use_bb
= gimple_phi_arg_edge (use_stmt
,
1886 PHI_ARG_INDEX_FROM_USE (use_p
))->src
;
1888 use_bb
= gimple_bb (use_stmt
);
1890 if (is_use_properly_guarded (use_stmt
,
1896 pointer_set_destroy (visited_phis
);
1899 pointer_set_destroy (visited_phis
);
1901 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1903 fprintf (dump_file
, "[CHECK]: Found unguarded use: ");
1904 print_gimple_stmt (dump_file
, use_stmt
, 0, 0);
1906 /* Found one real use, return. */
1907 if (gimple_code (use_stmt
) != GIMPLE_PHI
)
1910 /* Found a phi use that is not guarded,
1911 add the phi to the worklist. */
1912 if (!pointer_set_insert (added_to_worklist
,
1915 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1917 fprintf (dump_file
, "[WORKLIST]: Update worklist with phi: ");
1918 print_gimple_stmt (dump_file
, use_stmt
, 0, 0);
1921 worklist
->safe_push (use_stmt
);
1922 pointer_set_insert (possibly_undefined_names
, phi_result
);
1929 /* Look for inputs to PHI that are SSA_NAMEs that have empty definitions
1930 and gives warning if there exists a runtime path from the entry to a
1931 use of the PHI def that does not contain a definition. In other words,
1932 the warning is on the real use. The more dead paths that can be pruned
1933 by the compiler, the fewer false positives the warning is. WORKLIST
1934 is a vector of candidate phis to be examined. ADDED_TO_WORKLIST is
1935 a pointer set tracking if the new phi is added to the worklist or not. */
1938 warn_uninitialized_phi (gimple phi
, vec
<gimple
> *worklist
,
1939 struct pointer_set_t
*added_to_worklist
)
1941 unsigned uninit_opnds
;
1942 gimple uninit_use_stmt
= 0;
1945 /* Don't look at virtual operands. */
1946 if (virtual_operand_p (gimple_phi_result (phi
)))
1949 uninit_opnds
= compute_uninit_opnds_pos (phi
);
1951 if (MASK_EMPTY (uninit_opnds
))
1954 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1956 fprintf (dump_file
, "[CHECK]: examining phi: ");
1957 print_gimple_stmt (dump_file
, phi
, 0, 0);
1960 /* Now check if we have any use of the value without proper guard. */
1961 uninit_use_stmt
= find_uninit_use (phi
, uninit_opnds
,
1962 worklist
, added_to_worklist
);
1964 /* All uses are properly guarded. */
1965 if (!uninit_use_stmt
)
1968 uninit_op
= gimple_phi_arg_def (phi
, MASK_FIRST_SET_BIT (uninit_opnds
));
1969 if (SSA_NAME_VAR (uninit_op
) == NULL_TREE
)
1971 warn_uninit (OPT_Wmaybe_uninitialized
, uninit_op
, SSA_NAME_VAR (uninit_op
),
1972 SSA_NAME_VAR (uninit_op
),
1973 "%qD may be used uninitialized in this function",
1979 /* Entry point to the late uninitialized warning pass. */
1982 execute_late_warn_uninitialized (void)
1985 gimple_stmt_iterator gsi
;
1986 vec
<gimple
> worklist
= vNULL
;
1987 struct pointer_set_t
*added_to_worklist
;
1989 calculate_dominance_info (CDI_DOMINATORS
);
1990 calculate_dominance_info (CDI_POST_DOMINATORS
);
1991 /* Re-do the plain uninitialized variable check, as optimization may have
1992 straightened control flow. Do this first so that we don't accidentally
1993 get a "may be" warning when we'd have seen an "is" warning later. */
1994 warn_uninitialized_vars (/*warn_possibly_uninitialized=*/1);
1996 timevar_push (TV_TREE_UNINIT
);
1998 possibly_undefined_names
= pointer_set_create ();
1999 added_to_worklist
= pointer_set_create ();
2001 /* Initialize worklist */
2003 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2005 gimple phi
= gsi_stmt (gsi
);
2008 n
= gimple_phi_num_args (phi
);
2010 /* Don't look at virtual operands. */
2011 if (virtual_operand_p (gimple_phi_result (phi
)))
2014 for (i
= 0; i
< n
; ++i
)
2016 tree op
= gimple_phi_arg_def (phi
, i
);
2017 if (TREE_CODE (op
) == SSA_NAME
2018 && uninit_undefined_value_p (op
))
2020 worklist
.safe_push (phi
);
2021 pointer_set_insert (added_to_worklist
, phi
);
2022 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2024 fprintf (dump_file
, "[WORKLIST]: add to initial list: ");
2025 print_gimple_stmt (dump_file
, phi
, 0, 0);
2032 while (worklist
.length () != 0)
2035 cur_phi
= worklist
.pop ();
2036 warn_uninitialized_phi (cur_phi
, &worklist
, added_to_worklist
);
2039 worklist
.release ();
2040 pointer_set_destroy (added_to_worklist
);
2041 pointer_set_destroy (possibly_undefined_names
);
2042 possibly_undefined_names
= NULL
;
2043 free_dominance_info (CDI_POST_DOMINATORS
);
2044 timevar_pop (TV_TREE_UNINIT
);
2049 gate_warn_uninitialized (void)
2051 return warn_uninitialized
!= 0;
2054 struct gimple_opt_pass pass_late_warn_uninitialized
=
2058 "uninit", /* name */
2059 OPTGROUP_NONE
, /* optinfo_flags */
2060 gate_warn_uninitialized
, /* gate */
2061 execute_late_warn_uninitialized
, /* execute */
2064 0, /* static_pass_number */
2065 TV_NONE
, /* tv_id */
2066 PROP_ssa
, /* properties_required */
2067 0, /* properties_provided */
2068 0, /* properties_destroyed */
2069 0, /* todo_flags_start */
2070 0 /* todo_flags_finish */