* config/rx/rx.c (ADD_RX_BUILTIN0): New macro, used for builtins
[official-gcc.git] / gcc / tree-ssa-uninit.c
blob5e376a1df51407661d27becb0cbaf5f7d045d51c
1 /* Predicate aware uninitialized variable warning.
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 Contributed by Xinliang David Li <davidxl@google.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "flags.h"
27 #include "tm_p.h"
28 #include "basic-block.h"
29 #include "function.h"
30 #include "gimple-pretty-print.h"
31 #include "bitmap.h"
32 #include "pointer-set.h"
33 #include "gimple.h"
34 #include "gimple-ssa.h"
35 #include "tree-phinodes.h"
36 #include "ssa-iterators.h"
37 #include "tree-ssa.h"
38 #include "tree-inline.h"
39 #include "hashtab.h"
40 #include "tree-pass.h"
41 #include "diagnostic-core.h"
43 /* This implements the pass that does predicate aware warning on uses of
44 possibly uninitialized variables. The pass first collects the set of
45 possibly uninitialized SSA names. For each such name, it walks through
46 all its immediate uses. For each immediate use, it rebuilds the condition
47 expression (the predicate) that guards the use. The predicate is then
48 examined to see if the variable is always defined under that same condition.
49 This is done either by pruning the unrealizable paths that lead to the
50 default definitions or by checking if the predicate set that guards the
51 defining paths is a superset of the use predicate. */
54 /* Pointer set of potentially undefined ssa names, i.e.,
55 ssa names that are defined by phi with operands that
56 are not defined or potentially undefined. */
57 static struct pointer_set_t *possibly_undefined_names = 0;
59 /* Bit mask handling macros. */
60 #define MASK_SET_BIT(mask, pos) mask |= (1 << pos)
61 #define MASK_TEST_BIT(mask, pos) (mask & (1 << pos))
62 #define MASK_EMPTY(mask) (mask == 0)
64 /* Returns the first bit position (starting from LSB)
65 in mask that is non zero. Returns -1 if the mask is empty. */
66 static int
67 get_mask_first_set_bit (unsigned mask)
69 int pos = 0;
70 if (mask == 0)
71 return -1;
73 while ((mask & (1 << pos)) == 0)
74 pos++;
76 return pos;
78 #define MASK_FIRST_SET_BIT(mask) get_mask_first_set_bit (mask)
80 /* Return true if T, an SSA_NAME, has an undefined value. */
81 static bool
82 has_undefined_value_p (tree t)
84 return (ssa_undefined_value_p (t)
85 || (possibly_undefined_names
86 && pointer_set_contains (possibly_undefined_names, t)));
91 /* Like has_undefined_value_p, but don't return true if TREE_NO_WARNING
92 is set on SSA_NAME_VAR. */
94 static inline bool
95 uninit_undefined_value_p (tree t) {
96 if (!has_undefined_value_p (t))
97 return false;
98 if (SSA_NAME_VAR (t) && TREE_NO_WARNING (SSA_NAME_VAR (t)))
99 return false;
100 return true;
103 /* Emit warnings for uninitialized variables. This is done in two passes.
105 The first pass notices real uses of SSA names with undefined values.
106 Such uses are unconditionally uninitialized, and we can be certain that
107 such a use is a mistake. This pass is run before most optimizations,
108 so that we catch as many as we can.
110 The second pass follows PHI nodes to find uses that are potentially
111 uninitialized. In this case we can't necessarily prove that the use
112 is really uninitialized. This pass is run after most optimizations,
113 so that we thread as many jumps and possible, and delete as much dead
114 code as possible, in order to reduce false positives. We also look
115 again for plain uninitialized variables, since optimization may have
116 changed conditionally uninitialized to unconditionally uninitialized. */
118 /* Emit a warning for EXPR based on variable VAR at the point in the
119 program T, an SSA_NAME, is used being uninitialized. The exact
120 warning text is in MSGID and LOCUS may contain a location or be null.
121 WC is the warning code. */
123 static void
124 warn_uninit (enum opt_code wc, tree t,
125 tree expr, tree var, const char *gmsgid, void *data)
127 gimple context = (gimple) data;
128 location_t location, cfun_loc;
129 expanded_location xloc, floc;
131 if (!has_undefined_value_p (t))
132 return;
134 /* TREE_NO_WARNING either means we already warned, or the front end
135 wishes to suppress the warning. */
136 if ((context
137 && (gimple_no_warning_p (context)
138 || (gimple_assign_single_p (context)
139 && TREE_NO_WARNING (gimple_assign_rhs1 (context)))))
140 || TREE_NO_WARNING (expr))
141 return;
143 location = (context != NULL && gimple_has_location (context))
144 ? gimple_location (context)
145 : DECL_SOURCE_LOCATION (var);
146 location = linemap_resolve_location (line_table, location,
147 LRK_SPELLING_LOCATION,
148 NULL);
149 cfun_loc = DECL_SOURCE_LOCATION (cfun->decl);
150 xloc = expand_location (location);
151 floc = expand_location (cfun_loc);
152 if (warning_at (location, wc, gmsgid, expr))
154 TREE_NO_WARNING (expr) = 1;
156 if (location == DECL_SOURCE_LOCATION (var))
157 return;
158 if (xloc.file != floc.file
159 || linemap_location_before_p (line_table,
160 location, cfun_loc)
161 || linemap_location_before_p (line_table,
162 cfun->function_end_locus,
163 location))
164 inform (DECL_SOURCE_LOCATION (var), "%qD was declared here", var);
168 static unsigned int
169 warn_uninitialized_vars (bool warn_possibly_uninitialized)
171 gimple_stmt_iterator gsi;
172 basic_block bb;
174 FOR_EACH_BB (bb)
176 bool always_executed = dominated_by_p (CDI_POST_DOMINATORS,
177 single_succ (ENTRY_BLOCK_PTR), bb);
178 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
180 gimple stmt = gsi_stmt (gsi);
181 use_operand_p use_p;
182 ssa_op_iter op_iter;
183 tree use;
185 if (is_gimple_debug (stmt))
186 continue;
188 /* We only do data flow with SSA_NAMEs, so that's all we
189 can warn about. */
190 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, op_iter, SSA_OP_USE)
192 use = USE_FROM_PTR (use_p);
193 if (always_executed)
194 warn_uninit (OPT_Wuninitialized, use,
195 SSA_NAME_VAR (use), SSA_NAME_VAR (use),
196 "%qD is used uninitialized in this function",
197 stmt);
198 else if (warn_possibly_uninitialized)
199 warn_uninit (OPT_Wmaybe_uninitialized, use,
200 SSA_NAME_VAR (use), SSA_NAME_VAR (use),
201 "%qD may be used uninitialized in this function",
202 stmt);
205 /* For memory the only cheap thing we can do is see if we
206 have a use of the default def of the virtual operand.
207 ??? Note that at -O0 we do not have virtual operands.
208 ??? Not so cheap would be to use the alias oracle via
209 walk_aliased_vdefs, if we don't find any aliasing vdef
210 warn as is-used-uninitialized, if we don't find an aliasing
211 vdef that kills our use (stmt_kills_ref_p), warn as
212 may-be-used-uninitialized. But this walk is quadratic and
213 so must be limited which means we would miss warning
214 opportunities. */
215 use = gimple_vuse (stmt);
216 if (use
217 && gimple_assign_single_p (stmt)
218 && !gimple_vdef (stmt)
219 && SSA_NAME_IS_DEFAULT_DEF (use))
221 tree rhs = gimple_assign_rhs1 (stmt);
222 tree base = get_base_address (rhs);
224 /* Do not warn if it can be initialized outside this function. */
225 if (TREE_CODE (base) != VAR_DECL
226 || DECL_HARD_REGISTER (base)
227 || is_global_var (base))
228 continue;
230 if (always_executed)
231 warn_uninit (OPT_Wuninitialized, use,
232 gimple_assign_rhs1 (stmt), base,
233 "%qE is used uninitialized in this function",
234 stmt);
235 else if (warn_possibly_uninitialized)
236 warn_uninit (OPT_Wmaybe_uninitialized, use,
237 gimple_assign_rhs1 (stmt), base,
238 "%qE may be used uninitialized in this function",
239 stmt);
244 return 0;
247 /* Checks if the operand OPND of PHI is defined by
248 another phi with one operand defined by this PHI,
249 but the rest operands are all defined. If yes,
250 returns true to skip this this operand as being
251 redundant. Can be enhanced to be more general. */
253 static bool
254 can_skip_redundant_opnd (tree opnd, gimple phi)
256 gimple op_def;
257 tree phi_def;
258 int i, n;
260 phi_def = gimple_phi_result (phi);
261 op_def = SSA_NAME_DEF_STMT (opnd);
262 if (gimple_code (op_def) != GIMPLE_PHI)
263 return false;
264 n = gimple_phi_num_args (op_def);
265 for (i = 0; i < n; ++i)
267 tree op = gimple_phi_arg_def (op_def, i);
268 if (TREE_CODE (op) != SSA_NAME)
269 continue;
270 if (op != phi_def && uninit_undefined_value_p (op))
271 return false;
274 return true;
277 /* Returns a bit mask holding the positions of arguments in PHI
278 that have empty (or possibly empty) definitions. */
280 static unsigned
281 compute_uninit_opnds_pos (gimple phi)
283 size_t i, n;
284 unsigned uninit_opnds = 0;
286 n = gimple_phi_num_args (phi);
287 /* Bail out for phi with too many args. */
288 if (n > 32)
289 return 0;
291 for (i = 0; i < n; ++i)
293 tree op = gimple_phi_arg_def (phi, i);
294 if (TREE_CODE (op) == SSA_NAME
295 && uninit_undefined_value_p (op)
296 && !can_skip_redundant_opnd (op, phi))
298 if (cfun->has_nonlocal_label || cfun->calls_setjmp)
300 /* Ignore SSA_NAMEs that appear on abnormal edges
301 somewhere. */
302 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op))
303 continue;
305 MASK_SET_BIT (uninit_opnds, i);
308 return uninit_opnds;
311 /* Find the immediate postdominator PDOM of the specified
312 basic block BLOCK. */
314 static inline basic_block
315 find_pdom (basic_block block)
317 if (block == EXIT_BLOCK_PTR)
318 return EXIT_BLOCK_PTR;
319 else
321 basic_block bb
322 = get_immediate_dominator (CDI_POST_DOMINATORS, block);
323 if (! bb)
324 return EXIT_BLOCK_PTR;
325 return bb;
329 /* Find the immediate DOM of the specified
330 basic block BLOCK. */
332 static inline basic_block
333 find_dom (basic_block block)
335 if (block == ENTRY_BLOCK_PTR)
336 return ENTRY_BLOCK_PTR;
337 else
339 basic_block bb = get_immediate_dominator (CDI_DOMINATORS, block);
340 if (! bb)
341 return ENTRY_BLOCK_PTR;
342 return bb;
346 /* Returns true if BB1 is postdominating BB2 and BB1 is
347 not a loop exit bb. The loop exit bb check is simple and does
348 not cover all cases. */
350 static bool
351 is_non_loop_exit_postdominating (basic_block bb1, basic_block bb2)
353 if (!dominated_by_p (CDI_POST_DOMINATORS, bb2, bb1))
354 return false;
356 if (single_pred_p (bb1) && !single_succ_p (bb2))
357 return false;
359 return true;
362 /* Find the closest postdominator of a specified BB, which is control
363 equivalent to BB. */
365 static inline basic_block
366 find_control_equiv_block (basic_block bb)
368 basic_block pdom;
370 pdom = find_pdom (bb);
372 /* Skip the postdominating bb that is also loop exit. */
373 if (!is_non_loop_exit_postdominating (pdom, bb))
374 return NULL;
376 if (dominated_by_p (CDI_DOMINATORS, pdom, bb))
377 return pdom;
379 return NULL;
382 #define MAX_NUM_CHAINS 8
383 #define MAX_CHAIN_LEN 5
384 #define MAX_POSTDOM_CHECK 8
386 /* Computes the control dependence chains (paths of edges)
387 for DEP_BB up to the dominating basic block BB (the head node of a
388 chain should be dominated by it). CD_CHAINS is pointer to a
389 dynamic array holding the result chains. CUR_CD_CHAIN is the current
390 chain being computed. *NUM_CHAINS is total number of chains. The
391 function returns true if the information is successfully computed,
392 return false if there is no control dependence or not computed. */
394 static bool
395 compute_control_dep_chain (basic_block bb, basic_block dep_bb,
396 vec<edge> *cd_chains,
397 size_t *num_chains,
398 vec<edge> *cur_cd_chain)
400 edge_iterator ei;
401 edge e;
402 size_t i;
403 bool found_cd_chain = false;
404 size_t cur_chain_len = 0;
406 if (EDGE_COUNT (bb->succs) < 2)
407 return false;
409 /* Could use a set instead. */
410 cur_chain_len = cur_cd_chain->length ();
411 if (cur_chain_len > MAX_CHAIN_LEN)
412 return false;
414 for (i = 0; i < cur_chain_len; i++)
416 edge e = (*cur_cd_chain)[i];
417 /* cycle detected. */
418 if (e->src == bb)
419 return false;
422 FOR_EACH_EDGE (e, ei, bb->succs)
424 basic_block cd_bb;
425 int post_dom_check = 0;
426 if (e->flags & (EDGE_FAKE | EDGE_ABNORMAL))
427 continue;
429 cd_bb = e->dest;
430 cur_cd_chain->safe_push (e);
431 while (!is_non_loop_exit_postdominating (cd_bb, bb))
433 if (cd_bb == dep_bb)
435 /* Found a direct control dependence. */
436 if (*num_chains < MAX_NUM_CHAINS)
438 cd_chains[*num_chains] = cur_cd_chain->copy ();
439 (*num_chains)++;
441 found_cd_chain = true;
442 /* check path from next edge. */
443 break;
446 /* Now check if DEP_BB is indirectly control dependent on BB. */
447 if (compute_control_dep_chain (cd_bb, dep_bb, cd_chains,
448 num_chains, cur_cd_chain))
450 found_cd_chain = true;
451 break;
454 cd_bb = find_pdom (cd_bb);
455 post_dom_check++;
456 if (cd_bb == EXIT_BLOCK_PTR || post_dom_check > MAX_POSTDOM_CHECK)
457 break;
459 cur_cd_chain->pop ();
460 gcc_assert (cur_cd_chain->length () == cur_chain_len);
462 gcc_assert (cur_cd_chain->length () == cur_chain_len);
464 return found_cd_chain;
467 typedef struct use_pred_info
469 gimple cond;
470 bool invert;
471 } *use_pred_info_t;
475 /* Converts the chains of control dependence edges into a set of
476 predicates. A control dependence chain is represented by a vector
477 edges. DEP_CHAINS points to an array of dependence chains.
478 NUM_CHAINS is the size of the chain array. One edge in a dependence
479 chain is mapped to predicate expression represented by use_pred_info_t
480 type. One dependence chain is converted to a composite predicate that
481 is the result of AND operation of use_pred_info_t mapped to each edge.
482 A composite predicate is presented by a vector of use_pred_info_t. On
483 return, *PREDS points to the resulting array of composite predicates.
484 *NUM_PREDS is the number of composite predictes. */
486 static bool
487 convert_control_dep_chain_into_preds (vec<edge> *dep_chains,
488 size_t num_chains,
489 vec<use_pred_info_t> **preds,
490 size_t *num_preds)
492 bool has_valid_pred = false;
493 size_t i, j;
494 if (num_chains == 0 || num_chains >= MAX_NUM_CHAINS)
495 return false;
497 /* Now convert the control dep chain into a set
498 of predicates. */
499 typedef vec<use_pred_info_t> vec_use_pred_info_t_heap;
500 *preds = XCNEWVEC (vec_use_pred_info_t_heap, num_chains);
501 *num_preds = num_chains;
503 for (i = 0; i < num_chains; i++)
505 vec<edge> one_cd_chain = dep_chains[i];
507 has_valid_pred = false;
508 for (j = 0; j < one_cd_chain.length (); j++)
510 gimple cond_stmt;
511 gimple_stmt_iterator gsi;
512 basic_block guard_bb;
513 use_pred_info_t one_pred;
514 edge e;
516 e = one_cd_chain[j];
517 guard_bb = e->src;
518 gsi = gsi_last_bb (guard_bb);
519 if (gsi_end_p (gsi))
521 has_valid_pred = false;
522 break;
524 cond_stmt = gsi_stmt (gsi);
525 if (gimple_code (cond_stmt) == GIMPLE_CALL
526 && EDGE_COUNT (e->src->succs) >= 2)
528 /* Ignore EH edge. Can add assertion
529 on the other edge's flag. */
530 continue;
532 /* Skip if there is essentially one succesor. */
533 if (EDGE_COUNT (e->src->succs) == 2)
535 edge e1;
536 edge_iterator ei1;
537 bool skip = false;
539 FOR_EACH_EDGE (e1, ei1, e->src->succs)
541 if (EDGE_COUNT (e1->dest->succs) == 0)
543 skip = true;
544 break;
547 if (skip)
548 continue;
550 if (gimple_code (cond_stmt) != GIMPLE_COND)
552 has_valid_pred = false;
553 break;
555 one_pred = XNEW (struct use_pred_info);
556 one_pred->cond = cond_stmt;
557 one_pred->invert = !!(e->flags & EDGE_FALSE_VALUE);
558 (*preds)[i].safe_push (one_pred);
559 has_valid_pred = true;
562 if (!has_valid_pred)
563 break;
565 return has_valid_pred;
568 /* Computes all control dependence chains for USE_BB. The control
569 dependence chains are then converted to an array of composite
570 predicates pointed to by PREDS. PHI_BB is the basic block of
571 the phi whose result is used in USE_BB. */
573 static bool
574 find_predicates (vec<use_pred_info_t> **preds,
575 size_t *num_preds,
576 basic_block phi_bb,
577 basic_block use_bb)
579 size_t num_chains = 0, i;
580 vec<edge> *dep_chains = 0;
581 vec<edge> cur_chain = vNULL;
582 bool has_valid_pred = false;
583 basic_block cd_root = 0;
585 typedef vec<edge> vec_edge_heap;
586 dep_chains = XCNEWVEC (vec_edge_heap, MAX_NUM_CHAINS);
588 /* First find the closest bb that is control equivalent to PHI_BB
589 that also dominates USE_BB. */
590 cd_root = phi_bb;
591 while (dominated_by_p (CDI_DOMINATORS, use_bb, cd_root))
593 basic_block ctrl_eq_bb = find_control_equiv_block (cd_root);
594 if (ctrl_eq_bb && dominated_by_p (CDI_DOMINATORS, use_bb, ctrl_eq_bb))
595 cd_root = ctrl_eq_bb;
596 else
597 break;
600 compute_control_dep_chain (cd_root, use_bb,
601 dep_chains, &num_chains,
602 &cur_chain);
604 has_valid_pred
605 = convert_control_dep_chain_into_preds (dep_chains,
606 num_chains,
607 preds,
608 num_preds);
609 /* Free individual chain */
610 cur_chain.release ();
611 for (i = 0; i < num_chains; i++)
612 dep_chains[i].release ();
613 free (dep_chains);
614 return has_valid_pred;
617 /* Computes the set of incoming edges of PHI that have non empty
618 definitions of a phi chain. The collection will be done
619 recursively on operands that are defined by phis. CD_ROOT
620 is the control dependence root. *EDGES holds the result, and
621 VISITED_PHIS is a pointer set for detecting cycles. */
623 static void
624 collect_phi_def_edges (gimple phi, basic_block cd_root,
625 vec<edge> *edges,
626 struct pointer_set_t *visited_phis)
628 size_t i, n;
629 edge opnd_edge;
630 tree opnd;
632 if (pointer_set_insert (visited_phis, phi))
633 return;
635 n = gimple_phi_num_args (phi);
636 for (i = 0; i < n; i++)
638 opnd_edge = gimple_phi_arg_edge (phi, i);
639 opnd = gimple_phi_arg_def (phi, i);
641 if (TREE_CODE (opnd) != SSA_NAME)
643 if (dump_file && (dump_flags & TDF_DETAILS))
645 fprintf (dump_file, "\n[CHECK] Found def edge %d in ", (int)i);
646 print_gimple_stmt (dump_file, phi, 0, 0);
648 edges->safe_push (opnd_edge);
650 else
652 gimple def = SSA_NAME_DEF_STMT (opnd);
654 if (gimple_code (def) == GIMPLE_PHI
655 && dominated_by_p (CDI_DOMINATORS,
656 gimple_bb (def), cd_root))
657 collect_phi_def_edges (def, cd_root, edges,
658 visited_phis);
659 else if (!uninit_undefined_value_p (opnd))
661 if (dump_file && (dump_flags & TDF_DETAILS))
663 fprintf (dump_file, "\n[CHECK] Found def edge %d in ", (int)i);
664 print_gimple_stmt (dump_file, phi, 0, 0);
666 edges->safe_push (opnd_edge);
672 /* For each use edge of PHI, computes all control dependence chains.
673 The control dependence chains are then converted to an array of
674 composite predicates pointed to by PREDS. */
676 static bool
677 find_def_preds (vec<use_pred_info_t> **preds,
678 size_t *num_preds, gimple phi)
680 size_t num_chains = 0, i, n;
681 vec<edge> *dep_chains = 0;
682 vec<edge> cur_chain = vNULL;
683 vec<edge> def_edges = vNULL;
684 bool has_valid_pred = false;
685 basic_block phi_bb, cd_root = 0;
686 struct pointer_set_t *visited_phis;
688 typedef vec<edge> vec_edge_heap;
689 dep_chains = XCNEWVEC (vec_edge_heap, MAX_NUM_CHAINS);
691 phi_bb = gimple_bb (phi);
692 /* First find the closest dominating bb to be
693 the control dependence root */
694 cd_root = find_dom (phi_bb);
695 if (!cd_root)
696 return false;
698 visited_phis = pointer_set_create ();
699 collect_phi_def_edges (phi, cd_root, &def_edges, visited_phis);
700 pointer_set_destroy (visited_phis);
702 n = def_edges.length ();
703 if (n == 0)
704 return false;
706 for (i = 0; i < n; i++)
708 size_t prev_nc, j;
709 edge opnd_edge;
711 opnd_edge = def_edges[i];
712 prev_nc = num_chains;
713 compute_control_dep_chain (cd_root, opnd_edge->src,
714 dep_chains, &num_chains,
715 &cur_chain);
716 /* Free individual chain */
717 cur_chain.release ();
719 /* Now update the newly added chains with
720 the phi operand edge: */
721 if (EDGE_COUNT (opnd_edge->src->succs) > 1)
723 if (prev_nc == num_chains
724 && num_chains < MAX_NUM_CHAINS)
725 num_chains++;
726 for (j = prev_nc; j < num_chains; j++)
728 dep_chains[j].safe_push (opnd_edge);
733 has_valid_pred
734 = convert_control_dep_chain_into_preds (dep_chains,
735 num_chains,
736 preds,
737 num_preds);
738 for (i = 0; i < num_chains; i++)
739 dep_chains[i].release ();
740 free (dep_chains);
741 return has_valid_pred;
744 /* Dumps the predicates (PREDS) for USESTMT. */
746 static void
747 dump_predicates (gimple usestmt, size_t num_preds,
748 vec<use_pred_info_t> *preds,
749 const char* msg)
751 size_t i, j;
752 vec<use_pred_info_t> one_pred_chain;
753 fprintf (dump_file, msg);
754 print_gimple_stmt (dump_file, usestmt, 0, 0);
755 fprintf (dump_file, "is guarded by :\n");
756 /* do some dumping here: */
757 for (i = 0; i < num_preds; i++)
759 size_t np;
761 one_pred_chain = preds[i];
762 np = one_pred_chain.length ();
764 for (j = 0; j < np; j++)
766 use_pred_info_t one_pred
767 = one_pred_chain[j];
768 if (one_pred->invert)
769 fprintf (dump_file, " (.NOT.) ");
770 print_gimple_stmt (dump_file, one_pred->cond, 0, 0);
771 if (j < np - 1)
772 fprintf (dump_file, "(.AND.)\n");
774 if (i < num_preds - 1)
775 fprintf (dump_file, "(.OR.)\n");
779 /* Destroys the predicate set *PREDS. */
781 static void
782 destroy_predicate_vecs (size_t n,
783 vec<use_pred_info_t> * preds)
785 size_t i, j;
786 for (i = 0; i < n; i++)
788 for (j = 0; j < preds[i].length (); j++)
789 free (preds[i][j]);
790 preds[i].release ();
792 free (preds);
796 /* Computes the 'normalized' conditional code with operand
797 swapping and condition inversion. */
799 static enum tree_code
800 get_cmp_code (enum tree_code orig_cmp_code,
801 bool swap_cond, bool invert)
803 enum tree_code tc = orig_cmp_code;
805 if (swap_cond)
806 tc = swap_tree_comparison (orig_cmp_code);
807 if (invert)
808 tc = invert_tree_comparison (tc, false);
810 switch (tc)
812 case LT_EXPR:
813 case LE_EXPR:
814 case GT_EXPR:
815 case GE_EXPR:
816 case EQ_EXPR:
817 case NE_EXPR:
818 break;
819 default:
820 return ERROR_MARK;
822 return tc;
825 /* Returns true if VAL falls in the range defined by BOUNDARY and CMPC, i.e.
826 all values in the range satisfies (x CMPC BOUNDARY) == true. */
828 static bool
829 is_value_included_in (tree val, tree boundary, enum tree_code cmpc)
831 bool inverted = false;
832 bool is_unsigned;
833 bool result;
835 /* Only handle integer constant here. */
836 if (TREE_CODE (val) != INTEGER_CST
837 || TREE_CODE (boundary) != INTEGER_CST)
838 return true;
840 is_unsigned = TYPE_UNSIGNED (TREE_TYPE (val));
842 if (cmpc == GE_EXPR || cmpc == GT_EXPR
843 || cmpc == NE_EXPR)
845 cmpc = invert_tree_comparison (cmpc, false);
846 inverted = true;
849 if (is_unsigned)
851 if (cmpc == EQ_EXPR)
852 result = tree_int_cst_equal (val, boundary);
853 else if (cmpc == LT_EXPR)
854 result = INT_CST_LT_UNSIGNED (val, boundary);
855 else
857 gcc_assert (cmpc == LE_EXPR);
858 result = (tree_int_cst_equal (val, boundary)
859 || INT_CST_LT_UNSIGNED (val, boundary));
862 else
864 if (cmpc == EQ_EXPR)
865 result = tree_int_cst_equal (val, boundary);
866 else if (cmpc == LT_EXPR)
867 result = INT_CST_LT (val, boundary);
868 else
870 gcc_assert (cmpc == LE_EXPR);
871 result = (tree_int_cst_equal (val, boundary)
872 || INT_CST_LT (val, boundary));
876 if (inverted)
877 result ^= 1;
879 return result;
882 /* Returns true if PRED is common among all the predicate
883 chains (PREDS) (and therefore can be factored out).
884 NUM_PRED_CHAIN is the size of array PREDS. */
886 static bool
887 find_matching_predicate_in_rest_chains (use_pred_info_t pred,
888 vec<use_pred_info_t> *preds,
889 size_t num_pred_chains)
891 size_t i, j, n;
893 /* trival case */
894 if (num_pred_chains == 1)
895 return true;
897 for (i = 1; i < num_pred_chains; i++)
899 bool found = false;
900 vec<use_pred_info_t> one_chain = preds[i];
901 n = one_chain.length ();
902 for (j = 0; j < n; j++)
904 use_pred_info_t pred2
905 = one_chain[j];
906 /* can relax the condition comparison to not
907 use address comparison. However, the most common
908 case is that multiple control dependent paths share
909 a common path prefix, so address comparison should
910 be ok. */
912 if (pred2->cond == pred->cond
913 && pred2->invert == pred->invert)
915 found = true;
916 break;
919 if (!found)
920 return false;
922 return true;
925 /* Forward declaration. */
926 static bool
927 is_use_properly_guarded (gimple use_stmt,
928 basic_block use_bb,
929 gimple phi,
930 unsigned uninit_opnds,
931 struct pointer_set_t *visited_phis);
933 /* Returns true if all uninitialized opnds are pruned. Returns false
934 otherwise. PHI is the phi node with uninitialized operands,
935 UNINIT_OPNDS is the bitmap of the uninitialize operand positions,
936 FLAG_DEF is the statement defining the flag guarding the use of the
937 PHI output, BOUNDARY_CST is the const value used in the predicate
938 associated with the flag, CMP_CODE is the comparison code used in
939 the predicate, VISITED_PHIS is the pointer set of phis visited, and
940 VISITED_FLAG_PHIS is the pointer to the pointer set of flag definitions
941 that are also phis.
943 Example scenario:
945 BB1:
946 flag_1 = phi <0, 1> // (1)
947 var_1 = phi <undef, some_val>
950 BB2:
951 flag_2 = phi <0, flag_1, flag_1> // (2)
952 var_2 = phi <undef, var_1, var_1>
953 if (flag_2 == 1)
954 goto BB3;
956 BB3:
957 use of var_2 // (3)
959 Because some flag arg in (1) is not constant, if we do not look into the
960 flag phis recursively, it is conservatively treated as unknown and var_1
961 is thought to be flowed into use at (3). Since var_1 is potentially uninitialized
962 a false warning will be emitted. Checking recursively into (1), the compiler can
963 find out that only some_val (which is defined) can flow into (3) which is OK.
967 static bool
968 prune_uninit_phi_opnds_in_unrealizable_paths (
969 gimple phi, unsigned uninit_opnds,
970 gimple flag_def, tree boundary_cst,
971 enum tree_code cmp_code,
972 struct pointer_set_t *visited_phis,
973 bitmap *visited_flag_phis)
975 unsigned i;
977 for (i = 0; i < MIN (32, gimple_phi_num_args (flag_def)); i++)
979 tree flag_arg;
981 if (!MASK_TEST_BIT (uninit_opnds, i))
982 continue;
984 flag_arg = gimple_phi_arg_def (flag_def, i);
985 if (!is_gimple_constant (flag_arg))
987 gimple flag_arg_def, phi_arg_def;
988 tree phi_arg;
989 unsigned uninit_opnds_arg_phi;
991 if (TREE_CODE (flag_arg) != SSA_NAME)
992 return false;
993 flag_arg_def = SSA_NAME_DEF_STMT (flag_arg);
994 if (gimple_code (flag_arg_def) != GIMPLE_PHI)
995 return false;
997 phi_arg = gimple_phi_arg_def (phi, i);
998 if (TREE_CODE (phi_arg) != SSA_NAME)
999 return false;
1001 phi_arg_def = SSA_NAME_DEF_STMT (phi_arg);
1002 if (gimple_code (phi_arg_def) != GIMPLE_PHI)
1003 return false;
1005 if (gimple_bb (phi_arg_def) != gimple_bb (flag_arg_def))
1006 return false;
1008 if (!*visited_flag_phis)
1009 *visited_flag_phis = BITMAP_ALLOC (NULL);
1011 if (bitmap_bit_p (*visited_flag_phis,
1012 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def))))
1013 return false;
1015 bitmap_set_bit (*visited_flag_phis,
1016 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def)));
1018 /* Now recursively prune the uninitialized phi args. */
1019 uninit_opnds_arg_phi = compute_uninit_opnds_pos (phi_arg_def);
1020 if (!prune_uninit_phi_opnds_in_unrealizable_paths (
1021 phi_arg_def, uninit_opnds_arg_phi,
1022 flag_arg_def, boundary_cst, cmp_code,
1023 visited_phis, visited_flag_phis))
1024 return false;
1026 bitmap_clear_bit (*visited_flag_phis,
1027 SSA_NAME_VERSION (gimple_phi_result (flag_arg_def)));
1028 continue;
1031 /* Now check if the constant is in the guarded range. */
1032 if (is_value_included_in (flag_arg, boundary_cst, cmp_code))
1034 tree opnd;
1035 gimple opnd_def;
1037 /* Now that we know that this undefined edge is not
1038 pruned. If the operand is defined by another phi,
1039 we can further prune the incoming edges of that
1040 phi by checking the predicates of this operands. */
1042 opnd = gimple_phi_arg_def (phi, i);
1043 opnd_def = SSA_NAME_DEF_STMT (opnd);
1044 if (gimple_code (opnd_def) == GIMPLE_PHI)
1046 edge opnd_edge;
1047 unsigned uninit_opnds2
1048 = compute_uninit_opnds_pos (opnd_def);
1049 gcc_assert (!MASK_EMPTY (uninit_opnds2));
1050 opnd_edge = gimple_phi_arg_edge (phi, i);
1051 if (!is_use_properly_guarded (phi,
1052 opnd_edge->src,
1053 opnd_def,
1054 uninit_opnds2,
1055 visited_phis))
1056 return false;
1058 else
1059 return false;
1063 return true;
1066 /* A helper function that determines if the predicate set
1067 of the use is not overlapping with that of the uninit paths.
1068 The most common senario of guarded use is in Example 1:
1069 Example 1:
1070 if (some_cond)
1072 x = ...;
1073 flag = true;
1076 ... some code ...
1078 if (flag)
1079 use (x);
1081 The real world examples are usually more complicated, but similar
1082 and usually result from inlining:
1084 bool init_func (int * x)
1086 if (some_cond)
1087 return false;
1088 *x = ..
1089 return true;
1092 void foo(..)
1094 int x;
1096 if (!init_func(&x))
1097 return;
1099 .. some_code ...
1100 use (x);
1103 Another possible use scenario is in the following trivial example:
1105 Example 2:
1106 if (n > 0)
1107 x = 1;
1109 if (n > 0)
1111 if (m < 2)
1112 .. = x;
1115 Predicate analysis needs to compute the composite predicate:
1117 1) 'x' use predicate: (n > 0) .AND. (m < 2)
1118 2) 'x' default value (non-def) predicate: .NOT. (n > 0)
1119 (the predicate chain for phi operand defs can be computed
1120 starting from a bb that is control equivalent to the phi's
1121 bb and is dominating the operand def.)
1123 and check overlapping:
1124 (n > 0) .AND. (m < 2) .AND. (.NOT. (n > 0))
1125 <==> false
1127 This implementation provides framework that can handle
1128 scenarios. (Note that many simple cases are handled properly
1129 without the predicate analysis -- this is due to jump threading
1130 transformation which eliminates the merge point thus makes
1131 path sensitive analysis unnecessary.)
1133 NUM_PREDS is the number is the number predicate chains, PREDS is
1134 the array of chains, PHI is the phi node whose incoming (undefined)
1135 paths need to be pruned, and UNINIT_OPNDS is the bitmap holding
1136 uninit operand positions. VISITED_PHIS is the pointer set of phi
1137 stmts being checked. */
1140 static bool
1141 use_pred_not_overlap_with_undef_path_pred (
1142 size_t num_preds,
1143 vec<use_pred_info_t> *preds,
1144 gimple phi, unsigned uninit_opnds,
1145 struct pointer_set_t *visited_phis)
1147 unsigned int i, n;
1148 gimple flag_def = 0;
1149 tree boundary_cst = 0;
1150 enum tree_code cmp_code;
1151 bool swap_cond = false;
1152 bool invert = false;
1153 vec<use_pred_info_t> the_pred_chain;
1154 bitmap visited_flag_phis = NULL;
1155 bool all_pruned = false;
1157 gcc_assert (num_preds > 0);
1158 /* Find within the common prefix of multiple predicate chains
1159 a predicate that is a comparison of a flag variable against
1160 a constant. */
1161 the_pred_chain = preds[0];
1162 n = the_pred_chain.length ();
1163 for (i = 0; i < n; i++)
1165 gimple cond;
1166 tree cond_lhs, cond_rhs, flag = 0;
1168 use_pred_info_t the_pred
1169 = the_pred_chain[i];
1171 cond = the_pred->cond;
1172 invert = the_pred->invert;
1173 cond_lhs = gimple_cond_lhs (cond);
1174 cond_rhs = gimple_cond_rhs (cond);
1175 cmp_code = gimple_cond_code (cond);
1177 if (cond_lhs != NULL_TREE && TREE_CODE (cond_lhs) == SSA_NAME
1178 && cond_rhs != NULL_TREE && is_gimple_constant (cond_rhs))
1180 boundary_cst = cond_rhs;
1181 flag = cond_lhs;
1183 else if (cond_rhs != NULL_TREE && TREE_CODE (cond_rhs) == SSA_NAME
1184 && cond_lhs != NULL_TREE && is_gimple_constant (cond_lhs))
1186 boundary_cst = cond_lhs;
1187 flag = cond_rhs;
1188 swap_cond = true;
1191 if (!flag)
1192 continue;
1194 flag_def = SSA_NAME_DEF_STMT (flag);
1196 if (!flag_def)
1197 continue;
1199 if ((gimple_code (flag_def) == GIMPLE_PHI)
1200 && (gimple_bb (flag_def) == gimple_bb (phi))
1201 && find_matching_predicate_in_rest_chains (
1202 the_pred, preds, num_preds))
1203 break;
1205 flag_def = 0;
1208 if (!flag_def)
1209 return false;
1211 /* Now check all the uninit incoming edge has a constant flag value
1212 that is in conflict with the use guard/predicate. */
1213 cmp_code = get_cmp_code (cmp_code, swap_cond, invert);
1215 if (cmp_code == ERROR_MARK)
1216 return false;
1218 all_pruned = prune_uninit_phi_opnds_in_unrealizable_paths (phi,
1219 uninit_opnds,
1220 flag_def,
1221 boundary_cst,
1222 cmp_code,
1223 visited_phis,
1224 &visited_flag_phis);
1226 if (visited_flag_phis)
1227 BITMAP_FREE (visited_flag_phis);
1229 return all_pruned;
1232 /* Returns true if TC is AND or OR */
1234 static inline bool
1235 is_and_or_or (enum tree_code tc, tree typ)
1237 return (tc == BIT_IOR_EXPR
1238 || (tc == BIT_AND_EXPR
1239 && (typ == 0 || TREE_CODE (typ) == BOOLEAN_TYPE)));
1242 typedef struct norm_cond
1244 vec<gimple> conds;
1245 enum tree_code cond_code;
1246 bool invert;
1247 } *norm_cond_t;
1250 /* Normalizes gimple condition COND. The normalization follows
1251 UD chains to form larger condition expression trees. NORM_COND
1252 holds the normalized result. COND_CODE is the logical opcode
1253 (AND or OR) of the normalized tree. */
1255 static void
1256 normalize_cond_1 (gimple cond,
1257 norm_cond_t norm_cond,
1258 enum tree_code cond_code)
1260 enum gimple_code gc;
1261 enum tree_code cur_cond_code;
1262 tree rhs1, rhs2;
1264 gc = gimple_code (cond);
1265 if (gc != GIMPLE_ASSIGN)
1267 norm_cond->conds.safe_push (cond);
1268 return;
1271 cur_cond_code = gimple_assign_rhs_code (cond);
1272 rhs1 = gimple_assign_rhs1 (cond);
1273 rhs2 = gimple_assign_rhs2 (cond);
1274 if (cur_cond_code == NE_EXPR)
1276 if (integer_zerop (rhs2)
1277 && (TREE_CODE (rhs1) == SSA_NAME))
1278 normalize_cond_1 (
1279 SSA_NAME_DEF_STMT (rhs1),
1280 norm_cond, cond_code);
1281 else if (integer_zerop (rhs1)
1282 && (TREE_CODE (rhs2) == SSA_NAME))
1283 normalize_cond_1 (
1284 SSA_NAME_DEF_STMT (rhs2),
1285 norm_cond, cond_code);
1286 else
1287 norm_cond->conds.safe_push (cond);
1289 return;
1292 if (is_and_or_or (cur_cond_code, TREE_TYPE (rhs1))
1293 && (cond_code == cur_cond_code || cond_code == ERROR_MARK)
1294 && (TREE_CODE (rhs1) == SSA_NAME && TREE_CODE (rhs2) == SSA_NAME))
1296 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs1),
1297 norm_cond, cur_cond_code);
1298 normalize_cond_1 (SSA_NAME_DEF_STMT (rhs2),
1299 norm_cond, cur_cond_code);
1300 norm_cond->cond_code = cur_cond_code;
1302 else
1303 norm_cond->conds.safe_push (cond);
1306 /* See normalize_cond_1 for details. INVERT is a flag to indicate
1307 if COND needs to be inverted or not. */
1309 static void
1310 normalize_cond (gimple cond, norm_cond_t norm_cond, bool invert)
1312 enum tree_code cond_code;
1314 norm_cond->cond_code = ERROR_MARK;
1315 norm_cond->invert = false;
1316 norm_cond->conds.create (0);
1317 gcc_assert (gimple_code (cond) == GIMPLE_COND);
1318 cond_code = gimple_cond_code (cond);
1319 if (invert)
1320 cond_code = invert_tree_comparison (cond_code, false);
1322 if (cond_code == NE_EXPR)
1324 if (integer_zerop (gimple_cond_rhs (cond))
1325 && (TREE_CODE (gimple_cond_lhs (cond)) == SSA_NAME))
1326 normalize_cond_1 (
1327 SSA_NAME_DEF_STMT (gimple_cond_lhs (cond)),
1328 norm_cond, ERROR_MARK);
1329 else if (integer_zerop (gimple_cond_lhs (cond))
1330 && (TREE_CODE (gimple_cond_rhs (cond)) == SSA_NAME))
1331 normalize_cond_1 (
1332 SSA_NAME_DEF_STMT (gimple_cond_rhs (cond)),
1333 norm_cond, ERROR_MARK);
1334 else
1336 norm_cond->conds.safe_push (cond);
1337 norm_cond->invert = invert;
1340 else
1342 norm_cond->conds.safe_push (cond);
1343 norm_cond->invert = invert;
1346 gcc_assert (norm_cond->conds.length () == 1
1347 || is_and_or_or (norm_cond->cond_code, NULL));
1350 /* Returns true if the domain for condition COND1 is a subset of
1351 COND2. REVERSE is a flag. when it is true the function checks
1352 if COND1 is a superset of COND2. INVERT1 and INVERT2 are flags
1353 to indicate if COND1 and COND2 need to be inverted or not. */
1355 static bool
1356 is_gcond_subset_of (gimple cond1, bool invert1,
1357 gimple cond2, bool invert2,
1358 bool reverse)
1360 enum gimple_code gc1, gc2;
1361 enum tree_code cond1_code, cond2_code;
1362 gimple tmp;
1363 tree cond1_lhs, cond1_rhs, cond2_lhs, cond2_rhs;
1365 /* Take the short cut. */
1366 if (cond1 == cond2)
1367 return true;
1369 if (reverse)
1371 tmp = cond1;
1372 cond1 = cond2;
1373 cond2 = tmp;
1376 gc1 = gimple_code (cond1);
1377 gc2 = gimple_code (cond2);
1379 if ((gc1 != GIMPLE_ASSIGN && gc1 != GIMPLE_COND)
1380 || (gc2 != GIMPLE_ASSIGN && gc2 != GIMPLE_COND))
1381 return cond1 == cond2;
1383 cond1_code = ((gc1 == GIMPLE_ASSIGN)
1384 ? gimple_assign_rhs_code (cond1)
1385 : gimple_cond_code (cond1));
1387 cond2_code = ((gc2 == GIMPLE_ASSIGN)
1388 ? gimple_assign_rhs_code (cond2)
1389 : gimple_cond_code (cond2));
1391 if (TREE_CODE_CLASS (cond1_code) != tcc_comparison
1392 || TREE_CODE_CLASS (cond2_code) != tcc_comparison)
1393 return false;
1395 if (invert1)
1396 cond1_code = invert_tree_comparison (cond1_code, false);
1397 if (invert2)
1398 cond2_code = invert_tree_comparison (cond2_code, false);
1400 cond1_lhs = ((gc1 == GIMPLE_ASSIGN)
1401 ? gimple_assign_rhs1 (cond1)
1402 : gimple_cond_lhs (cond1));
1403 cond1_rhs = ((gc1 == GIMPLE_ASSIGN)
1404 ? gimple_assign_rhs2 (cond1)
1405 : gimple_cond_rhs (cond1));
1406 cond2_lhs = ((gc2 == GIMPLE_ASSIGN)
1407 ? gimple_assign_rhs1 (cond2)
1408 : gimple_cond_lhs (cond2));
1409 cond2_rhs = ((gc2 == GIMPLE_ASSIGN)
1410 ? gimple_assign_rhs2 (cond2)
1411 : gimple_cond_rhs (cond2));
1413 /* Assuming const operands have been swapped to the
1414 rhs at this point of the analysis. */
1416 if (cond1_lhs != cond2_lhs)
1417 return false;
1419 if (!is_gimple_constant (cond1_rhs)
1420 || TREE_CODE (cond1_rhs) != INTEGER_CST)
1421 return (cond1_rhs == cond2_rhs);
1423 if (!is_gimple_constant (cond2_rhs)
1424 || TREE_CODE (cond2_rhs) != INTEGER_CST)
1425 return (cond1_rhs == cond2_rhs);
1427 if (cond1_code == EQ_EXPR)
1428 return is_value_included_in (cond1_rhs,
1429 cond2_rhs, cond2_code);
1430 if (cond1_code == NE_EXPR || cond2_code == EQ_EXPR)
1431 return ((cond2_code == cond1_code)
1432 && tree_int_cst_equal (cond1_rhs, cond2_rhs));
1434 if (((cond1_code == GE_EXPR || cond1_code == GT_EXPR)
1435 && (cond2_code == LE_EXPR || cond2_code == LT_EXPR))
1436 || ((cond1_code == LE_EXPR || cond1_code == LT_EXPR)
1437 && (cond2_code == GE_EXPR || cond2_code == GT_EXPR)))
1438 return false;
1440 if (cond1_code != GE_EXPR && cond1_code != GT_EXPR
1441 && cond1_code != LE_EXPR && cond1_code != LT_EXPR)
1442 return false;
1444 if (cond1_code == GT_EXPR)
1446 cond1_code = GE_EXPR;
1447 cond1_rhs = fold_binary (PLUS_EXPR, TREE_TYPE (cond1_rhs),
1448 cond1_rhs,
1449 fold_convert (TREE_TYPE (cond1_rhs),
1450 integer_one_node));
1452 else if (cond1_code == LT_EXPR)
1454 cond1_code = LE_EXPR;
1455 cond1_rhs = fold_binary (MINUS_EXPR, TREE_TYPE (cond1_rhs),
1456 cond1_rhs,
1457 fold_convert (TREE_TYPE (cond1_rhs),
1458 integer_one_node));
1461 if (!cond1_rhs)
1462 return false;
1464 gcc_assert (cond1_code == GE_EXPR || cond1_code == LE_EXPR);
1466 if (cond2_code == GE_EXPR || cond2_code == GT_EXPR ||
1467 cond2_code == LE_EXPR || cond2_code == LT_EXPR)
1468 return is_value_included_in (cond1_rhs,
1469 cond2_rhs, cond2_code);
1470 else if (cond2_code == NE_EXPR)
1471 return
1472 (is_value_included_in (cond1_rhs,
1473 cond2_rhs, cond2_code)
1474 && !is_value_included_in (cond2_rhs,
1475 cond1_rhs, cond1_code));
1476 return false;
1479 /* Returns true if the domain of the condition expression
1480 in COND is a subset of any of the sub-conditions
1481 of the normalized condtion NORM_COND. INVERT is a flag
1482 to indicate of the COND needs to be inverted.
1483 REVERSE is a flag. When it is true, the check is reversed --
1484 it returns true if COND is a superset of any of the subconditions
1485 of NORM_COND. */
1487 static bool
1488 is_subset_of_any (gimple cond, bool invert,
1489 norm_cond_t norm_cond, bool reverse)
1491 size_t i;
1492 size_t len = norm_cond->conds.length ();
1494 for (i = 0; i < len; i++)
1496 if (is_gcond_subset_of (cond, invert,
1497 norm_cond->conds[i],
1498 false, reverse))
1499 return true;
1501 return false;
1504 /* NORM_COND1 and NORM_COND2 are normalized logical/BIT OR
1505 expressions (formed by following UD chains not control
1506 dependence chains). The function returns true of domain
1507 of and expression NORM_COND1 is a subset of NORM_COND2's.
1508 The implementation is conservative, and it returns false if
1509 it the inclusion relationship may not hold. */
1511 static bool
1512 is_or_set_subset_of (norm_cond_t norm_cond1,
1513 norm_cond_t norm_cond2)
1515 size_t i;
1516 size_t len = norm_cond1->conds.length ();
1518 for (i = 0; i < len; i++)
1520 if (!is_subset_of_any (norm_cond1->conds[i],
1521 false, norm_cond2, false))
1522 return false;
1524 return true;
1527 /* NORM_COND1 and NORM_COND2 are normalized logical AND
1528 expressions (formed by following UD chains not control
1529 dependence chains). The function returns true of domain
1530 of and expression NORM_COND1 is a subset of NORM_COND2's. */
1532 static bool
1533 is_and_set_subset_of (norm_cond_t norm_cond1,
1534 norm_cond_t norm_cond2)
1536 size_t i;
1537 size_t len = norm_cond2->conds.length ();
1539 for (i = 0; i < len; i++)
1541 if (!is_subset_of_any (norm_cond2->conds[i],
1542 false, norm_cond1, true))
1543 return false;
1545 return true;
1548 /* Returns true of the domain if NORM_COND1 is a subset
1549 of that of NORM_COND2. Returns false if it can not be
1550 proved to be so. */
1552 static bool
1553 is_norm_cond_subset_of (norm_cond_t norm_cond1,
1554 norm_cond_t norm_cond2)
1556 size_t i;
1557 enum tree_code code1, code2;
1559 code1 = norm_cond1->cond_code;
1560 code2 = norm_cond2->cond_code;
1562 if (code1 == BIT_AND_EXPR)
1564 /* Both conditions are AND expressions. */
1565 if (code2 == BIT_AND_EXPR)
1566 return is_and_set_subset_of (norm_cond1, norm_cond2);
1567 /* NORM_COND1 is an AND expression, and NORM_COND2 is an OR
1568 expression. In this case, returns true if any subexpression
1569 of NORM_COND1 is a subset of any subexpression of NORM_COND2. */
1570 else if (code2 == BIT_IOR_EXPR)
1572 size_t len1;
1573 len1 = norm_cond1->conds.length ();
1574 for (i = 0; i < len1; i++)
1576 gimple cond1 = norm_cond1->conds[i];
1577 if (is_subset_of_any (cond1, false, norm_cond2, false))
1578 return true;
1580 return false;
1582 else
1584 gcc_assert (code2 == ERROR_MARK);
1585 gcc_assert (norm_cond2->conds.length () == 1);
1586 return is_subset_of_any (norm_cond2->conds[0],
1587 norm_cond2->invert, norm_cond1, true);
1590 /* NORM_COND1 is an OR expression */
1591 else if (code1 == BIT_IOR_EXPR)
1593 if (code2 != code1)
1594 return false;
1596 return is_or_set_subset_of (norm_cond1, norm_cond2);
1598 else
1600 gcc_assert (code1 == ERROR_MARK);
1601 gcc_assert (norm_cond1->conds.length () == 1);
1602 /* Conservatively returns false if NORM_COND1 is non-decomposible
1603 and NORM_COND2 is an AND expression. */
1604 if (code2 == BIT_AND_EXPR)
1605 return false;
1607 if (code2 == BIT_IOR_EXPR)
1608 return is_subset_of_any (norm_cond1->conds[0],
1609 norm_cond1->invert, norm_cond2, false);
1611 gcc_assert (code2 == ERROR_MARK);
1612 gcc_assert (norm_cond2->conds.length () == 1);
1613 return is_gcond_subset_of (norm_cond1->conds[0],
1614 norm_cond1->invert,
1615 norm_cond2->conds[0],
1616 norm_cond2->invert, false);
1620 /* Returns true of the domain of single predicate expression
1621 EXPR1 is a subset of that of EXPR2. Returns false if it
1622 can not be proved. */
1624 static bool
1625 is_pred_expr_subset_of (use_pred_info_t expr1,
1626 use_pred_info_t expr2)
1628 gimple cond1, cond2;
1629 enum tree_code code1, code2;
1630 struct norm_cond norm_cond1, norm_cond2;
1631 bool is_subset = false;
1633 cond1 = expr1->cond;
1634 cond2 = expr2->cond;
1635 code1 = gimple_cond_code (cond1);
1636 code2 = gimple_cond_code (cond2);
1638 if (expr1->invert)
1639 code1 = invert_tree_comparison (code1, false);
1640 if (expr2->invert)
1641 code2 = invert_tree_comparison (code2, false);
1643 /* Fast path -- match exactly */
1644 if ((gimple_cond_lhs (cond1) == gimple_cond_lhs (cond2))
1645 && (gimple_cond_rhs (cond1) == gimple_cond_rhs (cond2))
1646 && (code1 == code2))
1647 return true;
1649 /* Normalize conditions. To keep NE_EXPR, do not invert
1650 with both need inversion. */
1651 normalize_cond (cond1, &norm_cond1, (expr1->invert));
1652 normalize_cond (cond2, &norm_cond2, (expr2->invert));
1654 is_subset = is_norm_cond_subset_of (&norm_cond1, &norm_cond2);
1656 /* Free memory */
1657 norm_cond1.conds.release ();
1658 norm_cond2.conds.release ();
1659 return is_subset ;
1662 /* Returns true if the domain of PRED1 is a subset
1663 of that of PRED2. Returns false if it can not be proved so. */
1665 static bool
1666 is_pred_chain_subset_of (vec<use_pred_info_t> pred1,
1667 vec<use_pred_info_t> pred2)
1669 size_t np1, np2, i1, i2;
1671 np1 = pred1.length ();
1672 np2 = pred2.length ();
1674 for (i2 = 0; i2 < np2; i2++)
1676 bool found = false;
1677 use_pred_info_t info2
1678 = pred2[i2];
1679 for (i1 = 0; i1 < np1; i1++)
1681 use_pred_info_t info1
1682 = pred1[i1];
1683 if (is_pred_expr_subset_of (info1, info2))
1685 found = true;
1686 break;
1689 if (!found)
1690 return false;
1692 return true;
1695 /* Returns true if the domain defined by
1696 one pred chain ONE_PRED is a subset of the domain
1697 of *PREDS. It returns false if ONE_PRED's domain is
1698 not a subset of any of the sub-domains of PREDS (
1699 corresponding to each individual chains in it), even
1700 though it may be still be a subset of whole domain
1701 of PREDS which is the union (ORed) of all its subdomains.
1702 In other words, the result is conservative. */
1704 static bool
1705 is_included_in (vec<use_pred_info_t> one_pred,
1706 vec<use_pred_info_t> *preds,
1707 size_t n)
1709 size_t i;
1711 for (i = 0; i < n; i++)
1713 if (is_pred_chain_subset_of (one_pred, preds[i]))
1714 return true;
1717 return false;
1720 /* compares two predicate sets PREDS1 and PREDS2 and returns
1721 true if the domain defined by PREDS1 is a superset
1722 of PREDS2's domain. N1 and N2 are array sizes of PREDS1 and
1723 PREDS2 respectively. The implementation chooses not to build
1724 generic trees (and relying on the folding capability of the
1725 compiler), but instead performs brute force comparison of
1726 individual predicate chains (won't be a compile time problem
1727 as the chains are pretty short). When the function returns
1728 false, it does not necessarily mean *PREDS1 is not a superset
1729 of *PREDS2, but mean it may not be so since the analysis can
1730 not prove it. In such cases, false warnings may still be
1731 emitted. */
1733 static bool
1734 is_superset_of (vec<use_pred_info_t> *preds1,
1735 size_t n1,
1736 vec<use_pred_info_t> *preds2,
1737 size_t n2)
1739 size_t i;
1740 vec<use_pred_info_t> one_pred_chain;
1742 for (i = 0; i < n2; i++)
1744 one_pred_chain = preds2[i];
1745 if (!is_included_in (one_pred_chain, preds1, n1))
1746 return false;
1749 return true;
1752 /* Comparison function used by qsort. It is used to
1753 sort predicate chains to allow predicate
1754 simplification. */
1756 static int
1757 pred_chain_length_cmp (const void *p1, const void *p2)
1759 use_pred_info_t i1, i2;
1760 vec<use_pred_info_t> const *chain1
1761 = (vec<use_pred_info_t> const *)p1;
1762 vec<use_pred_info_t> const *chain2
1763 = (vec<use_pred_info_t> const *)p2;
1765 if (chain1->length () != chain2->length ())
1766 return (chain1->length () - chain2->length ());
1768 i1 = (*chain1)[0];
1769 i2 = (*chain2)[0];
1771 /* Allow predicates with similar prefix come together. */
1772 if (!i1->invert && i2->invert)
1773 return -1;
1774 else if (i1->invert && !i2->invert)
1775 return 1;
1777 return gimple_uid (i1->cond) - gimple_uid (i2->cond);
1780 /* x OR (!x AND y) is equivalent to x OR y.
1781 This function normalizes x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3)
1782 into x1 OR x2 OR x3. PREDS is the predicate chains, and N is
1783 the number of chains. Returns true if normalization happens. */
1785 static bool
1786 normalize_preds (vec<use_pred_info_t> *preds, size_t *n)
1788 size_t i, j, ll;
1789 vec<use_pred_info_t> pred_chain;
1790 vec<use_pred_info_t> x = vNULL;
1791 use_pred_info_t xj = 0, nxj = 0;
1793 if (*n < 2)
1794 return false;
1796 /* First sort the chains in ascending order of lengths. */
1797 qsort (preds, *n, sizeof (void *), pred_chain_length_cmp);
1798 pred_chain = preds[0];
1799 ll = pred_chain.length ();
1800 if (ll != 1)
1802 if (ll == 2)
1804 use_pred_info_t xx, yy, xx2, nyy;
1805 vec<use_pred_info_t> pred_chain2 = preds[1];
1806 if (pred_chain2.length () != 2)
1807 return false;
1809 /* See if simplification x AND y OR x AND !y is possible. */
1810 xx = pred_chain[0];
1811 yy = pred_chain[1];
1812 xx2 = pred_chain2[0];
1813 nyy = pred_chain2[1];
1814 if (gimple_cond_lhs (xx->cond) != gimple_cond_lhs (xx2->cond)
1815 || gimple_cond_rhs (xx->cond) != gimple_cond_rhs (xx2->cond)
1816 || gimple_cond_code (xx->cond) != gimple_cond_code (xx2->cond)
1817 || (xx->invert != xx2->invert))
1818 return false;
1819 if (gimple_cond_lhs (yy->cond) != gimple_cond_lhs (nyy->cond)
1820 || gimple_cond_rhs (yy->cond) != gimple_cond_rhs (nyy->cond)
1821 || gimple_cond_code (yy->cond) != gimple_cond_code (nyy->cond)
1822 || (yy->invert == nyy->invert))
1823 return false;
1825 /* Now merge the first two chains. */
1826 free (yy);
1827 free (nyy);
1828 free (xx2);
1829 pred_chain.release ();
1830 pred_chain2.release ();
1831 pred_chain.safe_push (xx);
1832 preds[0] = pred_chain;
1833 for (i = 1; i < *n - 1; i++)
1834 preds[i] = preds[i + 1];
1836 preds[*n - 1].create (0);
1837 *n = *n - 1;
1839 else
1840 return false;
1843 x.safe_push (pred_chain[0]);
1845 /* The loop extracts x1, x2, x3, etc from chains
1846 x1 OR (!x1 AND x2) OR (!x1 AND !x2 AND x3) OR ... */
1847 for (i = 1; i < *n; i++)
1849 pred_chain = preds[i];
1850 if (pred_chain.length () != i + 1)
1851 return false;
1853 for (j = 0; j < i; j++)
1855 xj = x[j];
1856 nxj = pred_chain[j];
1858 /* Check if nxj is !xj */
1859 if (gimple_cond_lhs (xj->cond) != gimple_cond_lhs (nxj->cond)
1860 || gimple_cond_rhs (xj->cond) != gimple_cond_rhs (nxj->cond)
1861 || gimple_cond_code (xj->cond) != gimple_cond_code (nxj->cond)
1862 || (xj->invert == nxj->invert))
1863 return false;
1866 x.safe_push (pred_chain[i]);
1869 /* Now normalize the pred chains using the extraced x1, x2, x3 etc. */
1870 for (j = 0; j < *n; j++)
1872 use_pred_info_t t;
1873 xj = x[j];
1875 t = XNEW (struct use_pred_info);
1876 *t = *xj;
1878 x[j] = t;
1881 for (i = 0; i < *n; i++)
1883 pred_chain = preds[i];
1884 for (j = 0; j < pred_chain.length (); j++)
1885 free (pred_chain[j]);
1886 pred_chain.release ();
1887 /* A new chain. */
1888 pred_chain.safe_push (x[i]);
1889 preds[i] = pred_chain;
1891 return true;
1896 /* Computes the predicates that guard the use and checks
1897 if the incoming paths that have empty (or possibly
1898 empty) definition can be pruned/filtered. The function returns
1899 true if it can be determined that the use of PHI's def in
1900 USE_STMT is guarded with a predicate set not overlapping with
1901 predicate sets of all runtime paths that do not have a definition.
1902 Returns false if it is not or it can not be determined. USE_BB is
1903 the bb of the use (for phi operand use, the bb is not the bb of
1904 the phi stmt, but the src bb of the operand edge). UNINIT_OPNDS
1905 is a bit vector. If an operand of PHI is uninitialized, the
1906 corresponding bit in the vector is 1. VISIED_PHIS is a pointer
1907 set of phis being visted. */
1909 static bool
1910 is_use_properly_guarded (gimple use_stmt,
1911 basic_block use_bb,
1912 gimple phi,
1913 unsigned uninit_opnds,
1914 struct pointer_set_t *visited_phis)
1916 basic_block phi_bb;
1917 vec<use_pred_info_t> *preds = 0;
1918 vec<use_pred_info_t> *def_preds = 0;
1919 size_t num_preds = 0, num_def_preds = 0;
1920 bool has_valid_preds = false;
1921 bool is_properly_guarded = false;
1923 if (pointer_set_insert (visited_phis, phi))
1924 return false;
1926 phi_bb = gimple_bb (phi);
1928 if (is_non_loop_exit_postdominating (use_bb, phi_bb))
1929 return false;
1931 has_valid_preds = find_predicates (&preds, &num_preds,
1932 phi_bb, use_bb);
1934 if (!has_valid_preds)
1936 destroy_predicate_vecs (num_preds, preds);
1937 return false;
1940 if (dump_file)
1941 dump_predicates (use_stmt, num_preds, preds,
1942 "\nUse in stmt ");
1944 has_valid_preds = find_def_preds (&def_preds,
1945 &num_def_preds, phi);
1947 if (has_valid_preds)
1949 bool normed;
1950 if (dump_file)
1951 dump_predicates (phi, num_def_preds, def_preds,
1952 "Operand defs of phi ");
1954 normed = normalize_preds (def_preds, &num_def_preds);
1955 if (normed && dump_file)
1957 fprintf (dump_file, "\nNormalized to\n");
1958 dump_predicates (phi, num_def_preds, def_preds,
1959 "Operand defs of phi ");
1961 is_properly_guarded =
1962 is_superset_of (def_preds, num_def_preds,
1963 preds, num_preds);
1966 /* further prune the dead incoming phi edges. */
1967 if (!is_properly_guarded)
1968 is_properly_guarded
1969 = use_pred_not_overlap_with_undef_path_pred (
1970 num_preds, preds, phi, uninit_opnds, visited_phis);
1972 destroy_predicate_vecs (num_preds, preds);
1973 destroy_predicate_vecs (num_def_preds, def_preds);
1974 return is_properly_guarded;
1977 /* Searches through all uses of a potentially
1978 uninitialized variable defined by PHI and returns a use
1979 statement if the use is not properly guarded. It returns
1980 NULL if all uses are guarded. UNINIT_OPNDS is a bitvector
1981 holding the position(s) of uninit PHI operands. WORKLIST
1982 is the vector of candidate phis that may be updated by this
1983 function. ADDED_TO_WORKLIST is the pointer set tracking
1984 if the new phi is already in the worklist. */
1986 static gimple
1987 find_uninit_use (gimple phi, unsigned uninit_opnds,
1988 vec<gimple> *worklist,
1989 struct pointer_set_t *added_to_worklist)
1991 tree phi_result;
1992 use_operand_p use_p;
1993 gimple use_stmt;
1994 imm_use_iterator iter;
1996 phi_result = gimple_phi_result (phi);
1998 FOR_EACH_IMM_USE_FAST (use_p, iter, phi_result)
2000 struct pointer_set_t *visited_phis;
2001 basic_block use_bb;
2003 use_stmt = USE_STMT (use_p);
2004 if (is_gimple_debug (use_stmt))
2005 continue;
2007 visited_phis = pointer_set_create ();
2009 if (gimple_code (use_stmt) == GIMPLE_PHI)
2010 use_bb = gimple_phi_arg_edge (use_stmt,
2011 PHI_ARG_INDEX_FROM_USE (use_p))->src;
2012 else
2013 use_bb = gimple_bb (use_stmt);
2015 if (is_use_properly_guarded (use_stmt,
2016 use_bb,
2017 phi,
2018 uninit_opnds,
2019 visited_phis))
2021 pointer_set_destroy (visited_phis);
2022 continue;
2024 pointer_set_destroy (visited_phis);
2026 if (dump_file && (dump_flags & TDF_DETAILS))
2028 fprintf (dump_file, "[CHECK]: Found unguarded use: ");
2029 print_gimple_stmt (dump_file, use_stmt, 0, 0);
2031 /* Found one real use, return. */
2032 if (gimple_code (use_stmt) != GIMPLE_PHI)
2033 return use_stmt;
2035 /* Found a phi use that is not guarded,
2036 add the phi to the worklist. */
2037 if (!pointer_set_insert (added_to_worklist,
2038 use_stmt))
2040 if (dump_file && (dump_flags & TDF_DETAILS))
2042 fprintf (dump_file, "[WORKLIST]: Update worklist with phi: ");
2043 print_gimple_stmt (dump_file, use_stmt, 0, 0);
2046 worklist->safe_push (use_stmt);
2047 pointer_set_insert (possibly_undefined_names, phi_result);
2051 return NULL;
2054 /* Look for inputs to PHI that are SSA_NAMEs that have empty definitions
2055 and gives warning if there exists a runtime path from the entry to a
2056 use of the PHI def that does not contain a definition. In other words,
2057 the warning is on the real use. The more dead paths that can be pruned
2058 by the compiler, the fewer false positives the warning is. WORKLIST
2059 is a vector of candidate phis to be examined. ADDED_TO_WORKLIST is
2060 a pointer set tracking if the new phi is added to the worklist or not. */
2062 static void
2063 warn_uninitialized_phi (gimple phi, vec<gimple> *worklist,
2064 struct pointer_set_t *added_to_worklist)
2066 unsigned uninit_opnds;
2067 gimple uninit_use_stmt = 0;
2068 tree uninit_op;
2070 /* Don't look at virtual operands. */
2071 if (virtual_operand_p (gimple_phi_result (phi)))
2072 return;
2074 uninit_opnds = compute_uninit_opnds_pos (phi);
2076 if (MASK_EMPTY (uninit_opnds))
2077 return;
2079 if (dump_file && (dump_flags & TDF_DETAILS))
2081 fprintf (dump_file, "[CHECK]: examining phi: ");
2082 print_gimple_stmt (dump_file, phi, 0, 0);
2085 /* Now check if we have any use of the value without proper guard. */
2086 uninit_use_stmt = find_uninit_use (phi, uninit_opnds,
2087 worklist, added_to_worklist);
2089 /* All uses are properly guarded. */
2090 if (!uninit_use_stmt)
2091 return;
2093 uninit_op = gimple_phi_arg_def (phi, MASK_FIRST_SET_BIT (uninit_opnds));
2094 if (SSA_NAME_VAR (uninit_op) == NULL_TREE)
2095 return;
2096 warn_uninit (OPT_Wmaybe_uninitialized, uninit_op, SSA_NAME_VAR (uninit_op),
2097 SSA_NAME_VAR (uninit_op),
2098 "%qD may be used uninitialized in this function",
2099 uninit_use_stmt);
2104 /* Entry point to the late uninitialized warning pass. */
2106 static unsigned int
2107 execute_late_warn_uninitialized (void)
2109 basic_block bb;
2110 gimple_stmt_iterator gsi;
2111 vec<gimple> worklist = vNULL;
2112 struct pointer_set_t *added_to_worklist;
2114 calculate_dominance_info (CDI_DOMINATORS);
2115 calculate_dominance_info (CDI_POST_DOMINATORS);
2116 /* Re-do the plain uninitialized variable check, as optimization may have
2117 straightened control flow. Do this first so that we don't accidentally
2118 get a "may be" warning when we'd have seen an "is" warning later. */
2119 warn_uninitialized_vars (/*warn_possibly_uninitialized=*/1);
2121 timevar_push (TV_TREE_UNINIT);
2123 possibly_undefined_names = pointer_set_create ();
2124 added_to_worklist = pointer_set_create ();
2126 /* Initialize worklist */
2127 FOR_EACH_BB (bb)
2128 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2130 gimple phi = gsi_stmt (gsi);
2131 size_t n, i;
2133 n = gimple_phi_num_args (phi);
2135 /* Don't look at virtual operands. */
2136 if (virtual_operand_p (gimple_phi_result (phi)))
2137 continue;
2139 for (i = 0; i < n; ++i)
2141 tree op = gimple_phi_arg_def (phi, i);
2142 if (TREE_CODE (op) == SSA_NAME
2143 && uninit_undefined_value_p (op))
2145 worklist.safe_push (phi);
2146 pointer_set_insert (added_to_worklist, phi);
2147 if (dump_file && (dump_flags & TDF_DETAILS))
2149 fprintf (dump_file, "[WORKLIST]: add to initial list: ");
2150 print_gimple_stmt (dump_file, phi, 0, 0);
2152 break;
2157 while (worklist.length () != 0)
2159 gimple cur_phi = 0;
2160 cur_phi = worklist.pop ();
2161 warn_uninitialized_phi (cur_phi, &worklist, added_to_worklist);
2164 worklist.release ();
2165 pointer_set_destroy (added_to_worklist);
2166 pointer_set_destroy (possibly_undefined_names);
2167 possibly_undefined_names = NULL;
2168 free_dominance_info (CDI_POST_DOMINATORS);
2169 timevar_pop (TV_TREE_UNINIT);
2170 return 0;
2173 static bool
2174 gate_warn_uninitialized (void)
2176 return warn_uninitialized != 0;
2179 namespace {
2181 const pass_data pass_data_late_warn_uninitialized =
2183 GIMPLE_PASS, /* type */
2184 "uninit", /* name */
2185 OPTGROUP_NONE, /* optinfo_flags */
2186 true, /* has_gate */
2187 true, /* has_execute */
2188 TV_NONE, /* tv_id */
2189 PROP_ssa, /* properties_required */
2190 0, /* properties_provided */
2191 0, /* properties_destroyed */
2192 0, /* todo_flags_start */
2193 0, /* todo_flags_finish */
2196 class pass_late_warn_uninitialized : public gimple_opt_pass
2198 public:
2199 pass_late_warn_uninitialized (gcc::context *ctxt)
2200 : gimple_opt_pass (pass_data_late_warn_uninitialized, ctxt)
2203 /* opt_pass methods: */
2204 opt_pass * clone () { return new pass_late_warn_uninitialized (m_ctxt); }
2205 bool gate () { return gate_warn_uninitialized (); }
2206 unsigned int execute () { return execute_late_warn_uninitialized (); }
2208 }; // class pass_late_warn_uninitialized
2210 } // anon namespace
2212 gimple_opt_pass *
2213 make_pass_late_warn_uninitialized (gcc::context *ctxt)
2215 return new pass_late_warn_uninitialized (ctxt);
2219 static unsigned int
2220 execute_early_warn_uninitialized (void)
2222 /* Currently, this pass runs always but
2223 execute_late_warn_uninitialized only runs with optimization. With
2224 optimization we want to warn about possible uninitialized as late
2225 as possible, thus don't do it here. However, without
2226 optimization we need to warn here about "may be uninitialized".
2228 calculate_dominance_info (CDI_POST_DOMINATORS);
2230 warn_uninitialized_vars (/*warn_possibly_uninitialized=*/!optimize);
2232 /* Post-dominator information can not be reliably updated. Free it
2233 after the use. */
2235 free_dominance_info (CDI_POST_DOMINATORS);
2236 return 0;
2240 namespace {
2242 const pass_data pass_data_early_warn_uninitialized =
2244 GIMPLE_PASS, /* type */
2245 "*early_warn_uninitialized", /* name */
2246 OPTGROUP_NONE, /* optinfo_flags */
2247 true, /* has_gate */
2248 true, /* has_execute */
2249 TV_TREE_UNINIT, /* tv_id */
2250 PROP_ssa, /* properties_required */
2251 0, /* properties_provided */
2252 0, /* properties_destroyed */
2253 0, /* todo_flags_start */
2254 0, /* todo_flags_finish */
2257 class pass_early_warn_uninitialized : public gimple_opt_pass
2259 public:
2260 pass_early_warn_uninitialized (gcc::context *ctxt)
2261 : gimple_opt_pass (pass_data_early_warn_uninitialized, ctxt)
2264 /* opt_pass methods: */
2265 bool gate () { return gate_warn_uninitialized (); }
2266 unsigned int execute () { return execute_early_warn_uninitialized (); }
2268 }; // class pass_early_warn_uninitialized
2270 } // anon namespace
2272 gimple_opt_pass *
2273 make_pass_early_warn_uninitialized (gcc::context *ctxt)
2275 return new pass_early_warn_uninitialized (ctxt);