1 /* Predicate aware uninitialized variable warning.
2 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2010 Free Software
4 Contributed by Xinliang David Li <davidxl@google.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
29 #include "langhooks.h"
30 #include "basic-block.h"
33 #include "gimple-pretty-print.h"
35 #include "pointer-set.h"
36 #include "tree-flow.h"
38 #include "tree-inline.h"
41 #include "tree-dump.h"
42 #include "tree-pass.h"
43 #include "diagnostic-core.h"
46 /* This implements the pass that does predicate aware warning on uses of
47 possibly uninitialized variables. The pass first collects the set of
48 possibly uninitialized SSA names. For each such name, it walks through
49 all its immediate uses. For each immediate use, it rebuilds the condition
50 expression (the predicate) that guards the use. The predicate is then
51 examined to see if the variable is always defined under that same condition.
52 This is done either by pruning the unrealizable paths that lead to the
53 default definitions or by checking if the predicate set that guards the
54 defining paths is a superset of the use predicate. */
57 /* Pointer set of potentially undefined ssa names, i.e.,
58 ssa names that are defined by phi with operands that
59 are not defined or potentially undefined. */
60 static struct pointer_set_t
*possibly_undefined_names
= 0;
62 /* Bit mask handling macros. */
63 #define MASK_SET_BIT(mask, pos) mask |= (1 << pos)
64 #define MASK_TEST_BIT(mask, pos) (mask & (1 << pos))
65 #define MASK_EMPTY(mask) (mask == 0)
67 /* Returns the first bit position (starting from LSB)
68 in mask that is non zero. Returns -1 if the mask is empty. */
70 get_mask_first_set_bit (unsigned mask
)
76 while ((mask
& (1 << pos
)) == 0)
81 #define MASK_FIRST_SET_BIT(mask) get_mask_first_set_bit (mask)
84 /* Return true if T, an SSA_NAME, has an undefined value. */
87 ssa_undefined_value_p (tree t
)
89 tree var
= SSA_NAME_VAR (t
);
91 /* Parameters get their initial value from the function entry. */
92 if (TREE_CODE (var
) == PARM_DECL
)
95 /* When returning by reference the return address is actually a hidden
97 if (TREE_CODE (SSA_NAME_VAR (t
)) == RESULT_DECL
98 && DECL_BY_REFERENCE (SSA_NAME_VAR (t
)))
101 /* Hard register variables get their initial value from the ether. */
102 if (TREE_CODE (var
) == VAR_DECL
&& DECL_HARD_REGISTER (var
))
105 /* The value is undefined iff its definition statement is empty. */
106 return (gimple_nop_p (SSA_NAME_DEF_STMT (t
))
107 || (possibly_undefined_names
108 && pointer_set_contains (possibly_undefined_names
, t
)));
111 /* Checks if the operand OPND of PHI is defined by
112 another phi with one operand defined by this PHI,
113 but the rest operands are all defined. If yes,
114 returns true to skip this this operand as being
115 redundant. Can be enhanced to be more general. */
118 can_skip_redundant_opnd (tree opnd
, gimple phi
)
124 phi_def
= gimple_phi_result (phi
);
125 op_def
= SSA_NAME_DEF_STMT (opnd
);
126 if (gimple_code (op_def
) != GIMPLE_PHI
)
128 n
= gimple_phi_num_args (op_def
);
129 for (i
= 0; i
< n
; ++i
)
131 tree op
= gimple_phi_arg_def (op_def
, i
);
132 if (TREE_CODE (op
) != SSA_NAME
)
134 if (op
!= phi_def
&& ssa_undefined_value_p (op
))
141 /* Returns a bit mask holding the positions of arguments in PHI
142 that have empty (or possibly empty) definitions. */
145 compute_uninit_opnds_pos (gimple phi
)
148 unsigned uninit_opnds
= 0;
150 n
= gimple_phi_num_args (phi
);
151 /* Bail out for phi with too many args. */
155 for (i
= 0; i
< n
; ++i
)
157 tree op
= gimple_phi_arg_def (phi
, i
);
158 if (TREE_CODE (op
) == SSA_NAME
159 && ssa_undefined_value_p (op
)
160 && !can_skip_redundant_opnd (op
, phi
))
161 MASK_SET_BIT (uninit_opnds
, i
);
166 /* Find the immediate postdominator PDOM of the specified
167 basic block BLOCK. */
169 static inline basic_block
170 find_pdom (basic_block block
)
172 if (block
== EXIT_BLOCK_PTR
)
173 return EXIT_BLOCK_PTR
;
177 = get_immediate_dominator (CDI_POST_DOMINATORS
, block
);
179 return EXIT_BLOCK_PTR
;
184 /* Find the immediate DOM of the specified
185 basic block BLOCK. */
187 static inline basic_block
188 find_dom (basic_block block
)
190 if (block
== ENTRY_BLOCK_PTR
)
191 return ENTRY_BLOCK_PTR
;
194 basic_block bb
= get_immediate_dominator (CDI_DOMINATORS
, block
);
196 return ENTRY_BLOCK_PTR
;
201 /* Returns true if BB1 is postdominating BB2 and BB1 is
202 not a loop exit bb. The loop exit bb check is simple and does
203 not cover all cases. */
206 is_non_loop_exit_postdominating (basic_block bb1
, basic_block bb2
)
208 if (!dominated_by_p (CDI_POST_DOMINATORS
, bb2
, bb1
))
211 if (single_pred_p (bb1
) && !single_succ_p (bb2
))
217 /* Find the closest postdominator of a specified BB, which is control
220 static inline basic_block
221 find_control_equiv_block (basic_block bb
)
225 pdom
= find_pdom (bb
);
227 /* Skip the postdominating bb that is also loop exit. */
228 if (!is_non_loop_exit_postdominating (pdom
, bb
))
231 if (dominated_by_p (CDI_DOMINATORS
, pdom
, bb
))
237 #define MAX_NUM_CHAINS 8
238 #define MAX_CHAIN_LEN 5
240 /* Computes the control dependence chains (paths of edges)
241 for DEP_BB up to the dominating basic block BB (the head node of a
242 chain should be dominated by it). CD_CHAINS is pointer to a
243 dynamic array holding the result chains. CUR_CD_CHAIN is the current
244 chain being computed. *NUM_CHAINS is total number of chains. The
245 function returns true if the information is successfully computed,
246 return false if there is no control dependence or not computed. */
249 compute_control_dep_chain (basic_block bb
, basic_block dep_bb
,
250 VEC(edge
, heap
) **cd_chains
,
252 VEC(edge
, heap
) **cur_cd_chain
)
257 bool found_cd_chain
= false;
258 size_t cur_chain_len
= 0;
260 if (EDGE_COUNT (bb
->succs
) < 2)
263 /* Could use a set instead. */
264 cur_chain_len
= VEC_length (edge
, *cur_cd_chain
);
265 if (cur_chain_len
> MAX_CHAIN_LEN
)
268 for (i
= 0; i
< cur_chain_len
; i
++)
270 edge e
= VEC_index (edge
, *cur_cd_chain
, i
);
271 /* cycle detected. */
276 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
279 if (e
->flags
& (EDGE_FAKE
| EDGE_ABNORMAL
))
283 VEC_safe_push (edge
, heap
, *cur_cd_chain
, e
);
284 while (!is_non_loop_exit_postdominating (cd_bb
, bb
))
288 /* Found a direct control dependence. */
289 if (*num_chains
< MAX_NUM_CHAINS
)
291 cd_chains
[*num_chains
]
292 = VEC_copy (edge
, heap
, *cur_cd_chain
);
295 found_cd_chain
= true;
296 /* check path from next edge. */
300 /* Now check if DEP_BB is indirectly control dependent on BB. */
301 if (compute_control_dep_chain (cd_bb
, dep_bb
, cd_chains
,
302 num_chains
, cur_cd_chain
))
304 found_cd_chain
= true;
308 cd_bb
= find_pdom (cd_bb
);
309 if (cd_bb
== EXIT_BLOCK_PTR
)
312 VEC_pop (edge
, *cur_cd_chain
);
313 gcc_assert (VEC_length (edge
, *cur_cd_chain
) == cur_chain_len
);
315 gcc_assert (VEC_length (edge
, *cur_cd_chain
) == cur_chain_len
);
317 return found_cd_chain
;
320 typedef struct use_pred_info
326 DEF_VEC_P(use_pred_info_t
);
327 DEF_VEC_ALLOC_P(use_pred_info_t
, heap
);
330 /* Converts the chains of control dependence edges into a set of
331 predicates. A control dependence chain is represented by a vector
332 edges. DEP_CHAINS points to an array of dependence chains.
333 NUM_CHAINS is the size of the chain array. One edge in a dependence
334 chain is mapped to predicate expression represented by use_pred_info_t
335 type. One dependence chain is converted to a composite predicate that
336 is the result of AND operation of use_pred_info_t mapped to each edge.
337 A composite predicate is presented by a vector of use_pred_info_t. On
338 return, *PREDS points to the resulting array of composite predicates.
339 *NUM_PREDS is the number of composite predictes. */
342 convert_control_dep_chain_into_preds (VEC(edge
, heap
) **dep_chains
,
344 VEC(use_pred_info_t
, heap
) ***preds
,
347 bool has_valid_pred
= false;
349 if (num_chains
== 0 || num_chains
>= MAX_NUM_CHAINS
)
352 /* Now convert the control dep chain into a set
354 *preds
= XCNEWVEC (VEC(use_pred_info_t
, heap
) *,
356 *num_preds
= num_chains
;
358 for (i
= 0; i
< num_chains
; i
++)
360 VEC(edge
, heap
) *one_cd_chain
= dep_chains
[i
];
361 for (j
= 0; j
< VEC_length (edge
, one_cd_chain
); j
++)
364 gimple_stmt_iterator gsi
;
365 basic_block guard_bb
;
366 use_pred_info_t one_pred
;
369 e
= VEC_index (edge
, one_cd_chain
, j
);
371 gsi
= gsi_last_bb (guard_bb
);
374 has_valid_pred
= false;
377 cond_stmt
= gsi_stmt (gsi
);
378 if (gimple_code (cond_stmt
) == GIMPLE_CALL
379 && EDGE_COUNT (e
->src
->succs
) >= 2)
381 /* Ignore EH edge. Can add assertion
382 on the other edge's flag. */
385 /* Skip if there is essentially one succesor. */
386 if (EDGE_COUNT (e
->src
->succs
) == 2)
392 FOR_EACH_EDGE (e1
, ei1
, e
->src
->succs
)
394 if (EDGE_COUNT (e1
->dest
->succs
) == 0)
403 if (gimple_code (cond_stmt
) != GIMPLE_COND
)
405 has_valid_pred
= false;
408 one_pred
= XNEW (struct use_pred_info
);
409 one_pred
->cond
= cond_stmt
;
410 one_pred
->invert
= !!(e
->flags
& EDGE_FALSE_VALUE
);
411 VEC_safe_push (use_pred_info_t
, heap
, (*preds
)[i
], one_pred
);
412 has_valid_pred
= true;
418 return has_valid_pred
;
421 /* Computes all control dependence chains for USE_BB. The control
422 dependence chains are then converted to an array of composite
423 predicates pointed to by PREDS. PHI_BB is the basic block of
424 the phi whose result is used in USE_BB. */
427 find_predicates (VEC(use_pred_info_t
, heap
) ***preds
,
432 size_t num_chains
= 0, i
;
433 VEC(edge
, heap
) **dep_chains
= 0;
434 VEC(edge
, heap
) *cur_chain
= 0;
435 bool has_valid_pred
= false;
436 basic_block cd_root
= 0;
438 dep_chains
= XCNEWVEC (VEC(edge
, heap
) *, MAX_NUM_CHAINS
);
440 /* First find the closest bb that is control equivalent to PHI_BB
441 that also dominates USE_BB. */
443 while (dominated_by_p (CDI_DOMINATORS
, use_bb
, cd_root
))
445 basic_block ctrl_eq_bb
= find_control_equiv_block (cd_root
);
446 if (ctrl_eq_bb
&& dominated_by_p (CDI_DOMINATORS
, use_bb
, ctrl_eq_bb
))
447 cd_root
= ctrl_eq_bb
;
452 compute_control_dep_chain (cd_root
, use_bb
,
453 dep_chains
, &num_chains
,
457 = convert_control_dep_chain_into_preds (dep_chains
,
461 /* Free individual chain */
462 VEC_free (edge
, heap
, cur_chain
);
463 for (i
= 0; i
< num_chains
; i
++)
464 VEC_free (edge
, heap
, dep_chains
[i
]);
466 return has_valid_pred
;
469 /* Computes the set of incoming edges of PHI that have non empty
470 definitions of a phi chain. The collection will be done
471 recursively on operands that are defined by phis. CD_ROOT
472 is the control dependence root. *EDGES holds the result, and
473 VISITED_PHIS is a pointer set for detecting cycles. */
476 collect_phi_def_edges (gimple phi
, basic_block cd_root
,
477 VEC(edge
, heap
) **edges
,
478 struct pointer_set_t
*visited_phis
)
484 if (pointer_set_insert (visited_phis
, phi
))
487 n
= gimple_phi_num_args (phi
);
488 for (i
= 0; i
< n
; i
++)
490 opnd_edge
= gimple_phi_arg_edge (phi
, i
);
491 opnd
= gimple_phi_arg_def (phi
, i
);
493 if (TREE_CODE (opnd
) != SSA_NAME
)
495 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
497 fprintf (dump_file
, "\n[CHECK] Found def edge %d in ", (int)i
);
498 print_gimple_stmt (dump_file
, phi
, 0, 0);
500 VEC_safe_push (edge
, heap
, *edges
, opnd_edge
);
504 gimple def
= SSA_NAME_DEF_STMT (opnd
);
506 if (gimple_code (def
) == GIMPLE_PHI
507 && dominated_by_p (CDI_DOMINATORS
,
508 gimple_bb (def
), cd_root
))
509 collect_phi_def_edges (def
, cd_root
, edges
,
511 else if (!ssa_undefined_value_p (opnd
))
513 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
515 fprintf (dump_file
, "\n[CHECK] Found def edge %d in ", (int)i
);
516 print_gimple_stmt (dump_file
, phi
, 0, 0);
518 VEC_safe_push (edge
, heap
, *edges
, opnd_edge
);
524 /* For each use edge of PHI, computes all control dependence chains.
525 The control dependence chains are then converted to an array of
526 composite predicates pointed to by PREDS. */
529 find_def_preds (VEC(use_pred_info_t
, heap
) ***preds
,
530 size_t *num_preds
, gimple phi
)
532 size_t num_chains
= 0, i
, n
;
533 VEC(edge
, heap
) **dep_chains
= 0;
534 VEC(edge
, heap
) *cur_chain
= 0;
535 VEC(edge
, heap
) *def_edges
= 0;
536 bool has_valid_pred
= false;
537 basic_block phi_bb
, cd_root
= 0;
538 struct pointer_set_t
*visited_phis
;
540 dep_chains
= XCNEWVEC (VEC(edge
, heap
) *, MAX_NUM_CHAINS
);
542 phi_bb
= gimple_bb (phi
);
543 /* First find the closest dominating bb to be
544 the control dependence root */
545 cd_root
= find_dom (phi_bb
);
549 visited_phis
= pointer_set_create ();
550 collect_phi_def_edges (phi
, cd_root
, &def_edges
, visited_phis
);
551 pointer_set_destroy (visited_phis
);
553 n
= VEC_length (edge
, def_edges
);
557 for (i
= 0; i
< n
; i
++)
562 opnd_edge
= VEC_index (edge
, def_edges
, i
);
563 prev_nc
= num_chains
;
564 compute_control_dep_chain (cd_root
, opnd_edge
->src
,
565 dep_chains
, &num_chains
,
567 /* Free individual chain */
568 VEC_free (edge
, heap
, cur_chain
);
571 /* Now update the newly added chains with
572 the phi operand edge: */
573 if (EDGE_COUNT (opnd_edge
->src
->succs
) > 1)
575 if (prev_nc
== num_chains
576 && num_chains
< MAX_NUM_CHAINS
)
578 for (j
= prev_nc
; j
< num_chains
; j
++)
580 VEC_safe_push (edge
, heap
, dep_chains
[j
], opnd_edge
);
586 = convert_control_dep_chain_into_preds (dep_chains
,
590 for (i
= 0; i
< num_chains
; i
++)
591 VEC_free (edge
, heap
, dep_chains
[i
]);
593 return has_valid_pred
;
596 /* Dumps the predicates (PREDS) for USESTMT. */
599 dump_predicates (gimple usestmt
, size_t num_preds
,
600 VEC(use_pred_info_t
, heap
) **preds
,
604 VEC(use_pred_info_t
, heap
) *one_pred_chain
;
605 fprintf (dump_file
, msg
);
606 print_gimple_stmt (dump_file
, usestmt
, 0, 0);
607 fprintf (dump_file
, "is guarded by :\n");
608 /* do some dumping here: */
609 for (i
= 0; i
< num_preds
; i
++)
613 one_pred_chain
= preds
[i
];
614 np
= VEC_length (use_pred_info_t
, one_pred_chain
);
616 for (j
= 0; j
< np
; j
++)
618 use_pred_info_t one_pred
619 = VEC_index (use_pred_info_t
, one_pred_chain
, j
);
620 if (one_pred
->invert
)
621 fprintf (dump_file
, " (.NOT.) ");
622 print_gimple_stmt (dump_file
, one_pred
->cond
, 0, 0);
624 fprintf (dump_file
, "(.AND.)\n");
626 if (i
< num_preds
- 1)
627 fprintf (dump_file
, "(.OR.)\n");
631 /* Destroys the predicate set *PREDS. */
634 destroy_predicate_vecs (size_t n
,
635 VEC(use_pred_info_t
, heap
) ** preds
)
638 for (i
= 0; i
< n
; i
++)
640 for (j
= 0; j
< VEC_length (use_pred_info_t
, preds
[i
]); j
++)
641 free (VEC_index (use_pred_info_t
, preds
[i
], j
));
642 VEC_free (use_pred_info_t
, heap
, preds
[i
]);
648 /* Computes the 'normalized' conditional code with operand
649 swapping and condition inversion. */
651 static enum tree_code
652 get_cmp_code (enum tree_code orig_cmp_code
,
653 bool swap_cond
, bool invert
)
655 enum tree_code tc
= orig_cmp_code
;
658 tc
= swap_tree_comparison (orig_cmp_code
);
660 tc
= invert_tree_comparison (tc
, false);
677 /* Returns true if VAL falls in the range defined by BOUNDARY and CMPC, i.e.
678 all values in the range satisfies (x CMPC BOUNDARY) == true. */
681 is_value_included_in (tree val
, tree boundary
, enum tree_code cmpc
)
683 bool inverted
= false;
687 /* Only handle integer constant here. */
688 if (TREE_CODE (val
) != INTEGER_CST
689 || TREE_CODE (boundary
) != INTEGER_CST
)
692 is_unsigned
= TYPE_UNSIGNED (TREE_TYPE (val
));
694 if (cmpc
== GE_EXPR
|| cmpc
== GT_EXPR
697 cmpc
= invert_tree_comparison (cmpc
, false);
704 result
= tree_int_cst_equal (val
, boundary
);
705 else if (cmpc
== LT_EXPR
)
706 result
= INT_CST_LT_UNSIGNED (val
, boundary
);
709 gcc_assert (cmpc
== LE_EXPR
);
710 result
= (tree_int_cst_equal (val
, boundary
)
711 || INT_CST_LT_UNSIGNED (val
, boundary
));
717 result
= tree_int_cst_equal (val
, boundary
);
718 else if (cmpc
== LT_EXPR
)
719 result
= INT_CST_LT (val
, boundary
);
722 gcc_assert (cmpc
== LE_EXPR
);
723 result
= (tree_int_cst_equal (val
, boundary
)
724 || INT_CST_LT (val
, boundary
));
734 /* Returns true if PRED is common among all the predicate
735 chains (PREDS) (and therefore can be factored out).
736 NUM_PRED_CHAIN is the size of array PREDS. */
739 find_matching_predicate_in_rest_chains (use_pred_info_t pred
,
740 VEC(use_pred_info_t
, heap
) **preds
,
741 size_t num_pred_chains
)
746 if (num_pred_chains
== 1)
749 for (i
= 1; i
< num_pred_chains
; i
++)
752 VEC(use_pred_info_t
, heap
) *one_chain
= preds
[i
];
753 n
= VEC_length (use_pred_info_t
, one_chain
);
754 for (j
= 0; j
< n
; j
++)
756 use_pred_info_t pred2
757 = VEC_index (use_pred_info_t
, one_chain
, j
);
758 /* can relax the condition comparison to not
759 use address comparison. However, the most common
760 case is that multiple control dependent paths share
761 a common path prefix, so address comparison should
764 if (pred2
->cond
== pred
->cond
765 && pred2
->invert
== pred
->invert
)
777 /* Forward declaration. */
779 is_use_properly_guarded (gimple use_stmt
,
782 unsigned uninit_opnds
,
783 struct pointer_set_t
*visited_phis
);
785 /* Returns true if all uninitialized opnds are pruned. Returns false
786 otherwise. PHI is the phi node with uninitialized operands,
787 UNINIT_OPNDS is the bitmap of the uninitialize operand positions,
788 FLAG_DEF is the statement defining the flag guarding the use of the
789 PHI output, BOUNDARY_CST is the const value used in the predicate
790 associated with the flag, CMP_CODE is the comparison code used in
791 the predicate, VISITED_PHIS is the pointer set of phis visited, and
792 VISITED_FLAG_PHIS is the pointer to the pointer set of flag definitions
798 flag_1 = phi <0, 1> // (1)
799 var_1 = phi <undef, some_val>
803 flag_2 = phi <0, flag_1, flag_1> // (2)
804 var_2 = phi <undef, var_1, var_1>
811 Because some flag arg in (1) is not constant, if we do not look into the
812 flag phis recursively, it is conservatively treated as unknown and var_1
813 is thought to be flowed into use at (3). Since var_1 is potentially uninitialized
814 a false warning will be emitted. Checking recursively into (1), the compiler can
815 find out that only some_val (which is defined) can flow into (3) which is OK.
820 prune_uninit_phi_opnds_in_unrealizable_paths (
821 gimple phi
, unsigned uninit_opnds
,
822 gimple flag_def
, tree boundary_cst
,
823 enum tree_code cmp_code
,
824 struct pointer_set_t
*visited_phis
,
825 bitmap
*visited_flag_phis
)
829 for (i
= 0; i
< MIN (32, gimple_phi_num_args (flag_def
)); i
++)
833 if (!MASK_TEST_BIT (uninit_opnds
, i
))
836 flag_arg
= gimple_phi_arg_def (flag_def
, i
);
837 if (!is_gimple_constant (flag_arg
))
839 gimple flag_arg_def
, phi_arg_def
;
841 unsigned uninit_opnds_arg_phi
;
843 if (TREE_CODE (flag_arg
) != SSA_NAME
)
845 flag_arg_def
= SSA_NAME_DEF_STMT (flag_arg
);
846 if (gimple_code (flag_arg_def
) != GIMPLE_PHI
)
849 phi_arg
= gimple_phi_arg_def (phi
, i
);
850 if (TREE_CODE (phi_arg
) != SSA_NAME
)
853 phi_arg_def
= SSA_NAME_DEF_STMT (phi_arg
);
854 if (gimple_code (phi_arg_def
) != GIMPLE_PHI
)
857 if (gimple_bb (phi_arg_def
) != gimple_bb (flag_arg_def
))
860 if (!*visited_flag_phis
)
861 *visited_flag_phis
= BITMAP_ALLOC (NULL
);
863 if (bitmap_bit_p (*visited_flag_phis
,
864 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def
))))
867 bitmap_set_bit (*visited_flag_phis
,
868 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def
)));
870 /* Now recursively prune the uninitialized phi args. */
871 uninit_opnds_arg_phi
= compute_uninit_opnds_pos (phi_arg_def
);
872 if (!prune_uninit_phi_opnds_in_unrealizable_paths (
873 phi_arg_def
, uninit_opnds_arg_phi
,
874 flag_arg_def
, boundary_cst
, cmp_code
,
875 visited_phis
, visited_flag_phis
))
878 bitmap_clear_bit (*visited_flag_phis
,
879 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def
)));
883 /* Now check if the constant is in the guarded range. */
884 if (is_value_included_in (flag_arg
, boundary_cst
, cmp_code
))
889 /* Now that we know that this undefined edge is not
890 pruned. If the operand is defined by another phi,
891 we can further prune the incoming edges of that
892 phi by checking the predicates of this operands. */
894 opnd
= gimple_phi_arg_def (phi
, i
);
895 opnd_def
= SSA_NAME_DEF_STMT (opnd
);
896 if (gimple_code (opnd_def
) == GIMPLE_PHI
)
899 unsigned uninit_opnds2
900 = compute_uninit_opnds_pos (opnd_def
);
901 gcc_assert (!MASK_EMPTY (uninit_opnds2
));
902 opnd_edge
= gimple_phi_arg_edge (phi
, i
);
903 if (!is_use_properly_guarded (phi
,
918 /* A helper function that determines if the predicate set
919 of the use is not overlapping with that of the uninit paths.
920 The most common senario of guarded use is in Example 1:
933 The real world examples are usually more complicated, but similar
934 and usually result from inlining:
936 bool init_func (int * x)
955 Another possible use scenario is in the following trivial example:
967 Predicate analysis needs to compute the composite predicate:
969 1) 'x' use predicate: (n > 0) .AND. (m < 2)
970 2) 'x' default value (non-def) predicate: .NOT. (n > 0)
971 (the predicate chain for phi operand defs can be computed
972 starting from a bb that is control equivalent to the phi's
973 bb and is dominating the operand def.)
975 and check overlapping:
976 (n > 0) .AND. (m < 2) .AND. (.NOT. (n > 0))
979 This implementation provides framework that can handle
980 scenarios. (Note that many simple cases are handled properly
981 without the predicate analysis -- this is due to jump threading
982 transformation which eliminates the merge point thus makes
983 path sensitive analysis unnecessary.)
985 NUM_PREDS is the number is the number predicate chains, PREDS is
986 the array of chains, PHI is the phi node whose incoming (undefined)
987 paths need to be pruned, and UNINIT_OPNDS is the bitmap holding
988 uninit operand positions. VISITED_PHIS is the pointer set of phi
989 stmts being checked. */
993 use_pred_not_overlap_with_undef_path_pred (
995 VEC(use_pred_info_t
, heap
) **preds
,
996 gimple phi
, unsigned uninit_opnds
,
997 struct pointer_set_t
*visited_phis
)
1000 gimple flag_def
= 0;
1001 tree boundary_cst
= 0;
1002 enum tree_code cmp_code
;
1003 bool swap_cond
= false;
1004 bool invert
= false;
1005 VEC(use_pred_info_t
, heap
) *the_pred_chain
;
1006 bitmap visited_flag_phis
= NULL
;
1007 bool all_pruned
= false;
1009 gcc_assert (num_preds
> 0);
1010 /* Find within the common prefix of multiple predicate chains
1011 a predicate that is a comparison of a flag variable against
1013 the_pred_chain
= preds
[0];
1014 n
= VEC_length (use_pred_info_t
, the_pred_chain
);
1015 for (i
= 0; i
< n
; i
++)
1018 tree cond_lhs
, cond_rhs
, flag
= 0;
1020 use_pred_info_t the_pred
1021 = VEC_index (use_pred_info_t
, the_pred_chain
, i
);
1023 cond
= the_pred
->cond
;
1024 invert
= the_pred
->invert
;
1025 cond_lhs
= gimple_cond_lhs (cond
);
1026 cond_rhs
= gimple_cond_rhs (cond
);
1027 cmp_code
= gimple_cond_code (cond
);
1029 if (cond_lhs
!= NULL_TREE
&& TREE_CODE (cond_lhs
) == SSA_NAME
1030 && cond_rhs
!= NULL_TREE
&& is_gimple_constant (cond_rhs
))
1032 boundary_cst
= cond_rhs
;
1035 else if (cond_rhs
!= NULL_TREE
&& TREE_CODE (cond_rhs
) == SSA_NAME
1036 && cond_lhs
!= NULL_TREE
&& is_gimple_constant (cond_lhs
))
1038 boundary_cst
= cond_lhs
;
1046 flag_def
= SSA_NAME_DEF_STMT (flag
);
1051 if ((gimple_code (flag_def
) == GIMPLE_PHI
)
1052 && (gimple_bb (flag_def
) == gimple_bb (phi
))
1053 && find_matching_predicate_in_rest_chains (
1054 the_pred
, preds
, num_preds
))
1063 /* Now check all the uninit incoming edge has a constant flag value
1064 that is in conflict with the use guard/predicate. */
1065 cmp_code
= get_cmp_code (cmp_code
, swap_cond
, invert
);
1067 if (cmp_code
== ERROR_MARK
)
1070 all_pruned
= prune_uninit_phi_opnds_in_unrealizable_paths (phi
,
1076 &visited_flag_phis
);
1078 if (visited_flag_phis
)
1079 BITMAP_FREE (visited_flag_phis
);
1084 /* Returns true if TC is AND or OR */
1087 is_and_or_or (enum tree_code tc
, tree typ
)
1089 return (tc
== TRUTH_AND_EXPR
1090 || tc
== TRUTH_OR_EXPR
1091 || tc
== BIT_IOR_EXPR
1092 || (tc
== BIT_AND_EXPR
1093 && (typ
== 0 || TREE_CODE (typ
) == BOOLEAN_TYPE
)));
1096 typedef struct norm_cond
1098 VEC(gimple
, heap
) *conds
;
1099 enum tree_code cond_code
;
1104 /* Normalizes gimple condition COND. The normalization follows
1105 UD chains to form larger condition expression trees. NORM_COND
1106 holds the normalized result. COND_CODE is the logical opcode
1107 (AND or OR) of the normalized tree. */
1110 normalize_cond_1 (gimple cond
,
1111 norm_cond_t norm_cond
,
1112 enum tree_code cond_code
)
1114 enum gimple_code gc
;
1115 enum tree_code cur_cond_code
;
1118 gc
= gimple_code (cond
);
1119 if (gc
!= GIMPLE_ASSIGN
)
1121 VEC_safe_push (gimple
, heap
, norm_cond
->conds
, cond
);
1125 cur_cond_code
= gimple_assign_rhs_code (cond
);
1126 rhs1
= gimple_assign_rhs1 (cond
);
1127 rhs2
= gimple_assign_rhs2 (cond
);
1128 if (cur_cond_code
== NE_EXPR
)
1130 if (integer_zerop (rhs2
)
1131 && (TREE_CODE (rhs1
) == SSA_NAME
))
1133 SSA_NAME_DEF_STMT (rhs1
),
1134 norm_cond
, cond_code
);
1135 else if (integer_zerop (rhs1
)
1136 && (TREE_CODE (rhs2
) == SSA_NAME
))
1138 SSA_NAME_DEF_STMT (rhs2
),
1139 norm_cond
, cond_code
);
1141 VEC_safe_push (gimple
, heap
, norm_cond
->conds
, cond
);
1146 if (is_and_or_or (cur_cond_code
, TREE_TYPE (rhs1
))
1147 && (cond_code
== cur_cond_code
|| cond_code
== ERROR_MARK
)
1148 && (TREE_CODE (rhs1
) == SSA_NAME
&& TREE_CODE (rhs2
) == SSA_NAME
))
1150 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs1
),
1151 norm_cond
, cur_cond_code
);
1152 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs2
),
1153 norm_cond
, cur_cond_code
);
1154 norm_cond
->cond_code
= cur_cond_code
;
1157 VEC_safe_push (gimple
, heap
, norm_cond
->conds
, cond
);
1160 /* See normalize_cond_1 for details. INVERT is a flag to indicate
1161 if COND needs to be inverted or not. */
1164 normalize_cond (gimple cond
, norm_cond_t norm_cond
, bool invert
)
1166 enum tree_code cond_code
;
1168 norm_cond
->cond_code
= ERROR_MARK
;
1169 norm_cond
->invert
= false;
1170 norm_cond
->conds
= NULL
;
1171 gcc_assert (gimple_code (cond
) == GIMPLE_COND
);
1172 cond_code
= gimple_cond_code (cond
);
1174 cond_code
= invert_tree_comparison (cond_code
, false);
1176 if (cond_code
== NE_EXPR
)
1178 if (integer_zerop (gimple_cond_rhs (cond
))
1179 && (TREE_CODE (gimple_cond_lhs (cond
)) == SSA_NAME
))
1181 SSA_NAME_DEF_STMT (gimple_cond_lhs (cond
)),
1182 norm_cond
, ERROR_MARK
);
1183 else if (integer_zerop (gimple_cond_lhs (cond
))
1184 && (TREE_CODE (gimple_cond_rhs (cond
)) == SSA_NAME
))
1186 SSA_NAME_DEF_STMT (gimple_cond_rhs (cond
)),
1187 norm_cond
, ERROR_MARK
);
1190 VEC_safe_push (gimple
, heap
, norm_cond
->conds
, cond
);
1191 norm_cond
->invert
= invert
;
1196 VEC_safe_push (gimple
, heap
, norm_cond
->conds
, cond
);
1197 norm_cond
->invert
= invert
;
1200 gcc_assert (VEC_length (gimple
, norm_cond
->conds
) == 1
1201 || is_and_or_or (norm_cond
->cond_code
, NULL
));
1204 /* Returns true if the domain for condition COND1 is a subset of
1205 COND2. REVERSE is a flag. when it is true the function checks
1206 if COND1 is a superset of COND2. INVERT1 and INVERT2 are flags
1207 to indicate if COND1 and COND2 need to be inverted or not. */
1210 is_gcond_subset_of (gimple cond1
, bool invert1
,
1211 gimple cond2
, bool invert2
,
1214 enum gimple_code gc1
, gc2
;
1215 enum tree_code cond1_code
, cond2_code
;
1217 tree cond1_lhs
, cond1_rhs
, cond2_lhs
, cond2_rhs
;
1219 /* Take the short cut. */
1230 gc1
= gimple_code (cond1
);
1231 gc2
= gimple_code (cond2
);
1233 if ((gc1
!= GIMPLE_ASSIGN
&& gc1
!= GIMPLE_COND
)
1234 || (gc2
!= GIMPLE_ASSIGN
&& gc2
!= GIMPLE_COND
))
1235 return cond1
== cond2
;
1237 cond1_code
= ((gc1
== GIMPLE_ASSIGN
)
1238 ? gimple_assign_rhs_code (cond1
)
1239 : gimple_cond_code (cond1
));
1241 cond2_code
= ((gc2
== GIMPLE_ASSIGN
)
1242 ? gimple_assign_rhs_code (cond2
)
1243 : gimple_cond_code (cond2
));
1245 if (TREE_CODE_CLASS (cond1_code
) != tcc_comparison
1246 || TREE_CODE_CLASS (cond2_code
) != tcc_comparison
)
1250 cond1_code
= invert_tree_comparison (cond1_code
, false);
1252 cond2_code
= invert_tree_comparison (cond2_code
, false);
1254 cond1_lhs
= ((gc1
== GIMPLE_ASSIGN
)
1255 ? gimple_assign_rhs1 (cond1
)
1256 : gimple_cond_lhs (cond1
));
1257 cond1_rhs
= ((gc1
== GIMPLE_ASSIGN
)
1258 ? gimple_assign_rhs2 (cond1
)
1259 : gimple_cond_rhs (cond1
));
1260 cond2_lhs
= ((gc2
== GIMPLE_ASSIGN
)
1261 ? gimple_assign_rhs1 (cond2
)
1262 : gimple_cond_lhs (cond2
));
1263 cond2_rhs
= ((gc2
== GIMPLE_ASSIGN
)
1264 ? gimple_assign_rhs2 (cond2
)
1265 : gimple_cond_rhs (cond2
));
1267 /* Assuming const operands have been swapped to the
1268 rhs at this point of the analysis. */
1270 if (cond1_lhs
!= cond2_lhs
)
1273 if (!is_gimple_constant (cond1_rhs
)
1274 || TREE_CODE (cond1_rhs
) != INTEGER_CST
)
1275 return (cond1_rhs
== cond2_rhs
);
1277 if (!is_gimple_constant (cond2_rhs
)
1278 || TREE_CODE (cond2_rhs
) != INTEGER_CST
)
1279 return (cond1_rhs
== cond2_rhs
);
1281 if (cond1_code
== EQ_EXPR
)
1282 return is_value_included_in (cond1_rhs
,
1283 cond2_rhs
, cond2_code
);
1284 if (cond1_code
== NE_EXPR
|| cond2_code
== EQ_EXPR
)
1285 return ((cond2_code
== cond1_code
)
1286 && tree_int_cst_equal (cond1_rhs
, cond2_rhs
));
1288 if (((cond1_code
== GE_EXPR
|| cond1_code
== GT_EXPR
)
1289 && (cond2_code
== LE_EXPR
|| cond2_code
== LT_EXPR
))
1290 || ((cond1_code
== LE_EXPR
|| cond1_code
== LT_EXPR
)
1291 && (cond2_code
== GE_EXPR
|| cond2_code
== GT_EXPR
)))
1294 if (cond1_code
!= GE_EXPR
&& cond1_code
!= GT_EXPR
1295 && cond1_code
!= LE_EXPR
&& cond1_code
!= LT_EXPR
)
1298 if (cond1_code
== GT_EXPR
)
1300 cond1_code
= GE_EXPR
;
1301 cond1_rhs
= fold_binary (PLUS_EXPR
, TREE_TYPE (cond1_rhs
),
1303 fold_convert (TREE_TYPE (cond1_rhs
),
1306 else if (cond1_code
== LT_EXPR
)
1308 cond1_code
= LE_EXPR
;
1309 cond1_rhs
= fold_binary (MINUS_EXPR
, TREE_TYPE (cond1_rhs
),
1311 fold_convert (TREE_TYPE (cond1_rhs
),
1318 gcc_assert (cond1_code
== GE_EXPR
|| cond1_code
== LE_EXPR
);
1320 if (cond2_code
== GE_EXPR
|| cond2_code
== GT_EXPR
||
1321 cond2_code
== LE_EXPR
|| cond2_code
== LT_EXPR
)
1322 return is_value_included_in (cond1_rhs
,
1323 cond2_rhs
, cond2_code
);
1324 else if (cond2_code
== NE_EXPR
)
1326 (is_value_included_in (cond1_rhs
,
1327 cond2_rhs
, cond2_code
)
1328 && !is_value_included_in (cond2_rhs
,
1329 cond1_rhs
, cond1_code
));
1333 /* Returns true if the domain of the condition expression
1334 in COND is a subset of any of the sub-conditions
1335 of the normalized condtion NORM_COND. INVERT is a flag
1336 to indicate of the COND needs to be inverted.
1337 REVERSE is a flag. When it is true, the check is reversed --
1338 it returns true if COND is a superset of any of the subconditions
1342 is_subset_of_any (gimple cond
, bool invert
,
1343 norm_cond_t norm_cond
, bool reverse
)
1346 size_t len
= VEC_length (gimple
, norm_cond
->conds
);
1348 for (i
= 0; i
< len
; i
++)
1350 if (is_gcond_subset_of (cond
, invert
,
1351 VEC_index (gimple
, norm_cond
->conds
, i
),
1358 /* NORM_COND1 and NORM_COND2 are normalized logical/BIT OR
1359 expressions (formed by following UD chains not control
1360 dependence chains). The function returns true of domain
1361 of and expression NORM_COND1 is a subset of NORM_COND2's.
1362 The implementation is conservative, and it returns false if
1363 it the inclusion relationship may not hold. */
1366 is_or_set_subset_of (norm_cond_t norm_cond1
,
1367 norm_cond_t norm_cond2
)
1370 size_t len
= VEC_length (gimple
, norm_cond1
->conds
);
1372 for (i
= 0; i
< len
; i
++)
1374 if (!is_subset_of_any (VEC_index (gimple
, norm_cond1
->conds
, i
),
1375 false, norm_cond2
, false))
1381 /* NORM_COND1 and NORM_COND2 are normalized logical AND
1382 expressions (formed by following UD chains not control
1383 dependence chains). The function returns true of domain
1384 of and expression NORM_COND1 is a subset of NORM_COND2's. */
1387 is_and_set_subset_of (norm_cond_t norm_cond1
,
1388 norm_cond_t norm_cond2
)
1391 size_t len
= VEC_length (gimple
, norm_cond2
->conds
);
1393 for (i
= 0; i
< len
; i
++)
1395 if (!is_subset_of_any (VEC_index (gimple
, norm_cond2
->conds
, i
),
1396 false, norm_cond1
, true))
1402 /* Returns true of the domain if NORM_COND1 is a subset
1403 of that of NORM_COND2. Returns false if it can not be
1407 is_norm_cond_subset_of (norm_cond_t norm_cond1
,
1408 norm_cond_t norm_cond2
)
1411 enum tree_code code1
, code2
;
1413 code1
= norm_cond1
->cond_code
;
1414 code2
= norm_cond2
->cond_code
;
1416 if (code1
== TRUTH_AND_EXPR
|| code1
== BIT_AND_EXPR
)
1418 /* Both conditions are AND expressions. */
1419 if (code2
== TRUTH_AND_EXPR
|| code2
== BIT_AND_EXPR
)
1420 return is_and_set_subset_of (norm_cond1
, norm_cond2
);
1421 /* NORM_COND1 is an AND expression, and NORM_COND2 is an OR
1422 expression. In this case, returns true if any subexpression
1423 of NORM_COND1 is a subset of any subexpression of NORM_COND2. */
1424 else if (code2
== TRUTH_OR_EXPR
|| code2
== BIT_IOR_EXPR
)
1427 len1
= VEC_length (gimple
, norm_cond1
->conds
);
1428 for (i
= 0; i
< len1
; i
++)
1430 gimple cond1
= VEC_index (gimple
, norm_cond1
->conds
, i
);
1431 if (is_subset_of_any (cond1
, false, norm_cond2
, false))
1438 gcc_assert (code2
== ERROR_MARK
);
1439 gcc_assert (VEC_length (gimple
, norm_cond2
->conds
) == 1);
1440 return is_subset_of_any (VEC_index (gimple
, norm_cond2
->conds
, 0),
1441 norm_cond2
->invert
, norm_cond1
, true);
1444 /* NORM_COND1 is an OR expression */
1445 else if (code1
== TRUTH_OR_EXPR
|| code1
== BIT_IOR_EXPR
)
1450 return is_or_set_subset_of (norm_cond1
, norm_cond2
);
1454 gcc_assert (code1
== ERROR_MARK
);
1455 gcc_assert (VEC_length (gimple
, norm_cond1
->conds
) == 1);
1456 /* Conservatively returns false if NORM_COND1 is non-decomposible
1457 and NORM_COND2 is an AND expression. */
1458 if (code2
== TRUTH_AND_EXPR
|| code2
== BIT_AND_EXPR
)
1461 if (code2
== TRUTH_OR_EXPR
|| code2
== BIT_IOR_EXPR
)
1462 return is_subset_of_any (VEC_index (gimple
, norm_cond1
->conds
, 0),
1463 norm_cond1
->invert
, norm_cond2
, false);
1465 gcc_assert (code2
== ERROR_MARK
);
1466 gcc_assert (VEC_length (gimple
, norm_cond2
->conds
) == 1);
1467 return is_gcond_subset_of (VEC_index (gimple
, norm_cond1
->conds
, 0),
1469 VEC_index (gimple
, norm_cond2
->conds
, 0),
1470 norm_cond2
->invert
, false);
1474 /* Returns true of the domain of single predicate expression
1475 EXPR1 is a subset of that of EXPR2. Returns false if it
1476 can not be proved. */
1479 is_pred_expr_subset_of (use_pred_info_t expr1
,
1480 use_pred_info_t expr2
)
1482 gimple cond1
, cond2
;
1483 enum tree_code code1
, code2
;
1484 struct norm_cond norm_cond1
, norm_cond2
;
1485 bool is_subset
= false;
1487 cond1
= expr1
->cond
;
1488 cond2
= expr2
->cond
;
1489 code1
= gimple_cond_code (cond1
);
1490 code2
= gimple_cond_code (cond2
);
1493 code1
= invert_tree_comparison (code1
, false);
1495 code2
= invert_tree_comparison (code2
, false);
1497 /* Fast path -- match exactly */
1498 if ((gimple_cond_lhs (cond1
) == gimple_cond_lhs (cond2
))
1499 && (gimple_cond_rhs (cond1
) == gimple_cond_rhs (cond2
))
1500 && (code1
== code2
))
1503 /* Normalize conditions. To keep NE_EXPR, do not invert
1504 with both need inversion. */
1505 normalize_cond (cond1
, &norm_cond1
, (expr1
->invert
));
1506 normalize_cond (cond2
, &norm_cond2
, (expr2
->invert
));
1508 is_subset
= is_norm_cond_subset_of (&norm_cond1
, &norm_cond2
);
1511 VEC_free (gimple
, heap
, norm_cond1
.conds
);
1512 VEC_free (gimple
, heap
, norm_cond2
.conds
);
1516 /* Returns true if the domain of PRED1 is a subset
1517 of that of PRED2. Returns false if it can not be proved so. */
1520 is_pred_chain_subset_of (VEC(use_pred_info_t
, heap
) *pred1
,
1521 VEC(use_pred_info_t
, heap
) *pred2
)
1523 size_t np1
, np2
, i1
, i2
;
1525 np1
= VEC_length (use_pred_info_t
, pred1
);
1526 np2
= VEC_length (use_pred_info_t
, pred2
);
1528 for (i2
= 0; i2
< np2
; i2
++)
1531 use_pred_info_t info2
1532 = VEC_index (use_pred_info_t
, pred2
, i2
);
1533 for (i1
= 0; i1
< np1
; i1
++)
1535 use_pred_info_t info1
1536 = VEC_index (use_pred_info_t
, pred1
, i1
);
1537 if (is_pred_expr_subset_of (info1
, info2
))
1549 /* Returns true if the domain defined by
1550 one pred chain ONE_PRED is a subset of the domain
1551 of *PREDS. It returns false if ONE_PRED's domain is
1552 not a subset of any of the sub-domains of PREDS (
1553 corresponding to each individual chains in it), even
1554 though it may be still be a subset of whole domain
1555 of PREDS which is the union (ORed) of all its subdomains.
1556 In other words, the result is conservative. */
1559 is_included_in (VEC(use_pred_info_t
, heap
) *one_pred
,
1560 VEC(use_pred_info_t
, heap
) **preds
,
1565 for (i
= 0; i
< n
; i
++)
1567 if (is_pred_chain_subset_of (one_pred
, preds
[i
]))
1574 /* compares two predicate sets PREDS1 and PREDS2 and returns
1575 true if the domain defined by PREDS1 is a superset
1576 of PREDS2's domain. N1 and N2 are array sizes of PREDS1 and
1577 PREDS2 respectively. The implementation chooses not to build
1578 generic trees (and relying on the folding capability of the
1579 compiler), but instead performs brute force comparison of
1580 individual predicate chains (won't be a compile time problem
1581 as the chains are pretty short). When the function returns
1582 false, it does not necessarily mean *PREDS1 is not a superset
1583 of *PREDS2, but mean it may not be so since the analysis can
1584 not prove it. In such cases, false warnings may still be
1588 is_superset_of (VEC(use_pred_info_t
, heap
) **preds1
,
1590 VEC(use_pred_info_t
, heap
) **preds2
,
1594 VEC(use_pred_info_t
, heap
) *one_pred_chain
;
1596 for (i
= 0; i
< n2
; i
++)
1598 one_pred_chain
= preds2
[i
];
1599 if (!is_included_in (one_pred_chain
, preds1
, n1
))
1606 /* Comparison function used by qsort. It is used to
1607 sort predicate chains to allow predicate
1611 pred_chain_length_cmp (const void *p1
, const void *p2
)
1613 use_pred_info_t i1
, i2
;
1614 VEC(use_pred_info_t
, heap
) * const *chain1
1615 = (VEC(use_pred_info_t
, heap
) * const *)p1
;
1616 VEC(use_pred_info_t
, heap
) * const *chain2
1617 = (VEC(use_pred_info_t
, heap
) * const *)p2
;
1619 if (VEC_length (use_pred_info_t
, *chain1
)
1620 != VEC_length (use_pred_info_t
, *chain2
))
1621 return (VEC_length (use_pred_info_t
, *chain1
)
1622 - VEC_length (use_pred_info_t
, *chain2
));
1624 i1
= VEC_index (use_pred_info_t
, *chain1
, 0);
1625 i2
= VEC_index (use_pred_info_t
, *chain2
, 0);
1627 /* Allow predicates with similar prefix come together. */
1628 if (!i1
->invert
&& i2
->invert
)
1630 else if (i1
->invert
&& !i2
->invert
)
1633 return gimple_uid (i1
->cond
) - gimple_uid (i2
->cond
);
1636 /* x OR (!x AND y) is equivalent to x OR y.
1637 This function normalizes x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3)
1638 into x1 OR x2 OR x3. PREDS is the predicate chains, and N is
1639 the number of chains. Returns true if normalization happens. */
1642 normalize_preds (VEC(use_pred_info_t
, heap
) **preds
, size_t *n
)
1645 VEC(use_pred_info_t
, heap
) *pred_chain
;
1646 VEC(use_pred_info_t
, heap
) *x
= 0;
1647 use_pred_info_t xj
= 0, nxj
= 0;
1652 /* First sort the chains in ascending order of lengths. */
1653 qsort (preds
, *n
, sizeof (void *), pred_chain_length_cmp
);
1654 pred_chain
= preds
[0];
1655 ll
= VEC_length (use_pred_info_t
, pred_chain
);
1660 use_pred_info_t xx
, yy
, xx2
, nyy
;
1661 VEC(use_pred_info_t
, heap
) *pred_chain2
= preds
[1];
1662 if (VEC_length (use_pred_info_t
, pred_chain2
) != 2)
1665 /* See if simplification x AND y OR x AND !y is possible. */
1666 xx
= VEC_index (use_pred_info_t
, pred_chain
, 0);
1667 yy
= VEC_index (use_pred_info_t
, pred_chain
, 1);
1668 xx2
= VEC_index (use_pred_info_t
, pred_chain2
, 0);
1669 nyy
= VEC_index (use_pred_info_t
, pred_chain2
, 1);
1670 if (gimple_cond_lhs (xx
->cond
) != gimple_cond_lhs (xx2
->cond
)
1671 || gimple_cond_rhs (xx
->cond
) != gimple_cond_rhs (xx2
->cond
)
1672 || gimple_cond_code (xx
->cond
) != gimple_cond_code (xx2
->cond
)
1673 || (xx
->invert
!= xx2
->invert
))
1675 if (gimple_cond_lhs (yy
->cond
) != gimple_cond_lhs (nyy
->cond
)
1676 || gimple_cond_rhs (yy
->cond
) != gimple_cond_rhs (nyy
->cond
)
1677 || gimple_cond_code (yy
->cond
) != gimple_cond_code (nyy
->cond
)
1678 || (yy
->invert
== nyy
->invert
))
1681 /* Now merge the first two chains. */
1685 VEC_free (use_pred_info_t
, heap
, pred_chain
);
1686 VEC_free (use_pred_info_t
, heap
, pred_chain2
);
1688 VEC_safe_push (use_pred_info_t
, heap
, pred_chain
, xx
);
1689 preds
[0] = pred_chain
;
1690 for (i
= 1; i
< *n
- 1; i
++)
1691 preds
[i
] = preds
[i
+ 1];
1700 VEC_safe_push (use_pred_info_t
, heap
, x
,
1701 VEC_index (use_pred_info_t
, pred_chain
, 0));
1703 /* The loop extracts x1, x2, x3, etc from chains
1704 x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3) OR ... */
1705 for (i
= 1; i
< *n
; i
++)
1707 pred_chain
= preds
[i
];
1708 if (VEC_length (use_pred_info_t
, pred_chain
) != i
+ 1)
1711 for (j
= 0; j
< i
; j
++)
1713 xj
= VEC_index (use_pred_info_t
, x
, j
);
1714 nxj
= VEC_index (use_pred_info_t
, pred_chain
, j
);
1716 /* Check if nxj is !xj */
1717 if (gimple_cond_lhs (xj
->cond
) != gimple_cond_lhs (nxj
->cond
)
1718 || gimple_cond_rhs (xj
->cond
) != gimple_cond_rhs (nxj
->cond
)
1719 || gimple_cond_code (xj
->cond
) != gimple_cond_code (nxj
->cond
)
1720 || (xj
->invert
== nxj
->invert
))
1724 VEC_safe_push (use_pred_info_t
, heap
, x
,
1725 VEC_index (use_pred_info_t
, pred_chain
, i
));
1728 /* Now normalize the pred chains using the extraced x1, x2, x3 etc. */
1729 for (j
= 0; j
< *n
; j
++)
1732 xj
= VEC_index (use_pred_info_t
, x
, j
);
1734 t
= XNEW (struct use_pred_info
);
1737 VEC_replace (use_pred_info_t
, x
, j
, t
);
1740 for (i
= 0; i
< *n
; i
++)
1742 pred_chain
= preds
[i
];
1743 for (j
= 0; j
< VEC_length (use_pred_info_t
, pred_chain
); j
++)
1744 free (VEC_index (use_pred_info_t
, pred_chain
, j
));
1745 VEC_free (use_pred_info_t
, heap
, pred_chain
);
1748 VEC_safe_push (use_pred_info_t
, heap
, pred_chain
,
1749 VEC_index (use_pred_info_t
, x
, i
));
1750 preds
[i
] = pred_chain
;
1757 /* Computes the predicates that guard the use and checks
1758 if the incoming paths that have empty (or possibly
1759 empty) defintion can be pruned/filtered. The function returns
1760 true if it can be determined that the use of PHI's def in
1761 USE_STMT is guarded with a predicate set not overlapping with
1762 predicate sets of all runtime paths that do not have a definition.
1763 Returns false if it is not or it can not be determined. USE_BB is
1764 the bb of the use (for phi operand use, the bb is not the bb of
1765 the phi stmt, but the src bb of the operand edge). UNINIT_OPNDS
1766 is a bit vector. If an operand of PHI is uninitialized, the
1767 correponding bit in the vector is 1. VISIED_PHIS is a pointer
1768 set of phis being visted. */
1771 is_use_properly_guarded (gimple use_stmt
,
1774 unsigned uninit_opnds
,
1775 struct pointer_set_t
*visited_phis
)
1778 VEC(use_pred_info_t
, heap
) **preds
= 0;
1779 VEC(use_pred_info_t
, heap
) **def_preds
= 0;
1780 size_t num_preds
= 0, num_def_preds
= 0;
1781 bool has_valid_preds
= false;
1782 bool is_properly_guarded
= false;
1784 if (pointer_set_insert (visited_phis
, phi
))
1787 phi_bb
= gimple_bb (phi
);
1789 if (is_non_loop_exit_postdominating (use_bb
, phi_bb
))
1792 has_valid_preds
= find_predicates (&preds
, &num_preds
,
1795 if (!has_valid_preds
)
1797 destroy_predicate_vecs (num_preds
, preds
);
1802 dump_predicates (use_stmt
, num_preds
, preds
,
1805 has_valid_preds
= find_def_preds (&def_preds
,
1806 &num_def_preds
, phi
);
1808 if (has_valid_preds
)
1812 dump_predicates (phi
, num_def_preds
, def_preds
,
1813 "Operand defs of phi ");
1815 normed
= normalize_preds (def_preds
, &num_def_preds
);
1816 if (normed
&& dump_file
)
1818 fprintf (dump_file
, "\nNormalized to\n");
1819 dump_predicates (phi
, num_def_preds
, def_preds
,
1820 "Operand defs of phi ");
1822 is_properly_guarded
=
1823 is_superset_of (def_preds
, num_def_preds
,
1827 /* further prune the dead incoming phi edges. */
1828 if (!is_properly_guarded
)
1830 = use_pred_not_overlap_with_undef_path_pred (
1831 num_preds
, preds
, phi
, uninit_opnds
, visited_phis
);
1833 destroy_predicate_vecs (num_preds
, preds
);
1834 destroy_predicate_vecs (num_def_preds
, def_preds
);
1835 return is_properly_guarded
;
1838 /* Searches through all uses of a potentially
1839 uninitialized variable defined by PHI and returns a use
1840 statement if the use is not properly guarded. It returns
1841 NULL if all uses are guarded. UNINIT_OPNDS is a bitvector
1842 holding the position(s) of uninit PHI operands. WORKLIST
1843 is the vector of candidate phis that may be updated by this
1844 function. ADDED_TO_WORKLIST is the pointer set tracking
1845 if the new phi is already in the worklist. */
1848 find_uninit_use (gimple phi
, unsigned uninit_opnds
,
1849 VEC(gimple
, heap
) **worklist
,
1850 struct pointer_set_t
*added_to_worklist
)
1853 use_operand_p use_p
;
1855 imm_use_iterator iter
;
1857 phi_result
= gimple_phi_result (phi
);
1859 FOR_EACH_IMM_USE_FAST (use_p
, iter
, phi_result
)
1861 struct pointer_set_t
*visited_phis
;
1864 use_stmt
= USE_STMT (use_p
);
1865 if (is_gimple_debug (use_stmt
))
1868 visited_phis
= pointer_set_create ();
1870 if (gimple_code (use_stmt
) == GIMPLE_PHI
)
1871 use_bb
= gimple_phi_arg_edge (use_stmt
,
1872 PHI_ARG_INDEX_FROM_USE (use_p
))->src
;
1874 use_bb
= gimple_bb (use_stmt
);
1876 if (is_use_properly_guarded (use_stmt
,
1882 pointer_set_destroy (visited_phis
);
1885 pointer_set_destroy (visited_phis
);
1887 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1889 fprintf (dump_file
, "[CHECK]: Found unguarded use: ");
1890 print_gimple_stmt (dump_file
, use_stmt
, 0, 0);
1892 /* Found one real use, return. */
1893 if (gimple_code (use_stmt
) != GIMPLE_PHI
)
1896 /* Found a phi use that is not guarded,
1897 add the phi to the worklist. */
1898 if (!pointer_set_insert (added_to_worklist
,
1901 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1903 fprintf (dump_file
, "[WORKLIST]: Update worklist with phi: ");
1904 print_gimple_stmt (dump_file
, use_stmt
, 0, 0);
1907 VEC_safe_push (gimple
, heap
, *worklist
, use_stmt
);
1908 pointer_set_insert (possibly_undefined_names
,
1916 /* Look for inputs to PHI that are SSA_NAMEs that have empty definitions
1917 and gives warning if there exists a runtime path from the entry to a
1918 use of the PHI def that does not contain a definition. In other words,
1919 the warning is on the real use. The more dead paths that can be pruned
1920 by the compiler, the fewer false positives the warning is. WORKLIST
1921 is a vector of candidate phis to be examined. ADDED_TO_WORKLIST is
1922 a pointer set tracking if the new phi is added to the worklist or not. */
1925 warn_uninitialized_phi (gimple phi
, VEC(gimple
, heap
) **worklist
,
1926 struct pointer_set_t
*added_to_worklist
)
1928 unsigned uninit_opnds
;
1929 gimple uninit_use_stmt
= 0;
1932 /* Don't look at memory tags. */
1933 if (!is_gimple_reg (gimple_phi_result (phi
)))
1936 uninit_opnds
= compute_uninit_opnds_pos (phi
);
1938 if (MASK_EMPTY (uninit_opnds
))
1941 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1943 fprintf (dump_file
, "[CHECK]: examining phi: ");
1944 print_gimple_stmt (dump_file
, phi
, 0, 0);
1947 /* Now check if we have any use of the value without proper guard. */
1948 uninit_use_stmt
= find_uninit_use (phi
, uninit_opnds
,
1949 worklist
, added_to_worklist
);
1951 /* All uses are properly guarded. */
1952 if (!uninit_use_stmt
)
1955 uninit_op
= gimple_phi_arg_def (phi
, MASK_FIRST_SET_BIT (uninit_opnds
));
1956 warn_uninit (uninit_op
,
1957 "%qD may be used uninitialized in this function",
1963 /* Entry point to the late uninitialized warning pass. */
1966 execute_late_warn_uninitialized (void)
1969 gimple_stmt_iterator gsi
;
1970 VEC(gimple
, heap
) *worklist
= 0;
1971 struct pointer_set_t
*added_to_worklist
;
1973 calculate_dominance_info (CDI_DOMINATORS
);
1974 calculate_dominance_info (CDI_POST_DOMINATORS
);
1975 /* Re-do the plain uninitialized variable check, as optimization may have
1976 straightened control flow. Do this first so that we don't accidentally
1977 get a "may be" warning when we'd have seen an "is" warning later. */
1978 warn_uninitialized_vars (/*warn_possibly_uninitialized=*/1);
1980 timevar_push (TV_TREE_UNINIT
);
1982 possibly_undefined_names
= pointer_set_create ();
1983 added_to_worklist
= pointer_set_create ();
1985 /* Initialize worklist */
1987 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1989 gimple phi
= gsi_stmt (gsi
);
1992 n
= gimple_phi_num_args (phi
);
1994 /* Don't look at memory tags. */
1995 if (!is_gimple_reg (gimple_phi_result (phi
)))
1998 for (i
= 0; i
< n
; ++i
)
2000 tree op
= gimple_phi_arg_def (phi
, i
);
2001 if (TREE_CODE (op
) == SSA_NAME
2002 && ssa_undefined_value_p (op
))
2004 VEC_safe_push (gimple
, heap
, worklist
, phi
);
2005 pointer_set_insert (added_to_worklist
, phi
);
2006 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2008 fprintf (dump_file
, "[WORKLIST]: add to initial list: ");
2009 print_gimple_stmt (dump_file
, phi
, 0, 0);
2016 while (VEC_length (gimple
, worklist
) != 0)
2019 cur_phi
= VEC_pop (gimple
, worklist
);
2020 warn_uninitialized_phi (cur_phi
, &worklist
, added_to_worklist
);
2023 VEC_free (gimple
, heap
, worklist
);
2024 pointer_set_destroy (added_to_worklist
);
2025 pointer_set_destroy (possibly_undefined_names
);
2026 possibly_undefined_names
= NULL
;
2027 free_dominance_info (CDI_POST_DOMINATORS
);
2028 timevar_pop (TV_TREE_UNINIT
);
2033 gate_warn_uninitialized (void)
2035 return warn_uninitialized
!= 0;
2038 struct gimple_opt_pass pass_late_warn_uninitialized
=
2042 "uninit", /* name */
2043 gate_warn_uninitialized
, /* gate */
2044 execute_late_warn_uninitialized
, /* execute */
2047 0, /* static_pass_number */
2048 TV_NONE
, /* tv_id */
2049 PROP_ssa
, /* properties_required */
2050 0, /* properties_provided */
2051 0, /* properties_destroyed */
2052 0, /* todo_flags_start */
2053 0 /* todo_flags_finish */