1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
29 #include "basic-block.h"
33 #include "tree-pretty-print.h"
34 #include "gimple-pretty-print.h"
36 #include "tree-dump.h"
37 #include "tree-flow.h"
39 #include "tree-pass.h"
40 #include "tree-ssa-propagate.h"
41 #include "langhooks.h"
44 /* This file implements optimizations on the dominator tree. */
46 /* Representation of a "naked" right-hand-side expression, to be used
47 in recording available expressions in the expression hash table. */
63 struct { tree rhs
; } single
;
64 struct { enum tree_code op
; tree opnd
; } unary
;
65 struct { enum tree_code op
; tree opnd0
, opnd1
; } binary
;
66 struct { enum tree_code op
; tree opnd0
, opnd1
, opnd2
; } ternary
;
67 struct { tree fn
; bool pure
; size_t nargs
; tree
*args
; } call
;
71 /* Structure for recording known values of a conditional expression
72 at the exits from its block. */
74 typedef struct cond_equivalence_s
76 struct hashable_expr cond
;
80 DEF_VEC_O(cond_equivalence
);
81 DEF_VEC_ALLOC_O(cond_equivalence
,heap
);
83 /* Structure for recording edge equivalences as well as any pending
84 edge redirections during the dominator optimizer.
86 Computing and storing the edge equivalences instead of creating
87 them on-demand can save significant amounts of time, particularly
88 for pathological cases involving switch statements.
90 These structures live for a single iteration of the dominator
91 optimizer in the edge's AUX field. At the end of an iteration we
92 free each of these structures and update the AUX field to point
93 to any requested redirection target (the code for updating the
94 CFG and SSA graph for edge redirection expects redirection edge
95 targets to be in the AUX field for each edge. */
99 /* If this edge creates a simple equivalence, the LHS and RHS of
100 the equivalence will be stored here. */
104 /* Traversing an edge may also indicate one or more particular conditions
105 are true or false. */
106 VEC(cond_equivalence
, heap
) *cond_equivalences
;
109 /* Hash table with expressions made available during the renaming process.
110 When an assignment of the form X_i = EXPR is found, the statement is
111 stored in this table. If the same expression EXPR is later found on the
112 RHS of another statement, it is replaced with X_i (thus performing
113 global redundancy elimination). Similarly as we pass through conditionals
114 we record the conditional itself as having either a true or false value
116 static htab_t avail_exprs
;
118 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
119 expressions it enters into the hash table along with a marker entry
120 (null). When we finish processing the block, we pop off entries and
121 remove the expressions from the global hash table until we hit the
123 typedef struct expr_hash_elt
* expr_hash_elt_t
;
124 DEF_VEC_P(expr_hash_elt_t
);
125 DEF_VEC_ALLOC_P(expr_hash_elt_t
,heap
);
127 static VEC(expr_hash_elt_t
,heap
) *avail_exprs_stack
;
129 /* Structure for entries in the expression hash table. */
133 /* The value (lhs) of this expression. */
136 /* The expression (rhs) we want to record. */
137 struct hashable_expr expr
;
139 /* The stmt pointer if this element corresponds to a statement. */
142 /* The hash value for RHS. */
145 /* A unique stamp, typically the address of the hash
146 element itself, used in removing entries from the table. */
147 struct expr_hash_elt
*stamp
;
150 /* Stack of dest,src pairs that need to be restored during finalization.
152 A NULL entry is used to mark the end of pairs which need to be
153 restored during finalization of this block. */
154 static VEC(tree
,heap
) *const_and_copies_stack
;
156 /* Track whether or not we have changed the control flow graph. */
157 static bool cfg_altered
;
159 /* Bitmap of blocks that have had EH statements cleaned. We should
160 remove their dead edges eventually. */
161 static bitmap need_eh_cleanup
;
163 /* Statistics for dominator optimizations. */
167 long num_exprs_considered
;
173 static struct opt_stats_d opt_stats
;
175 /* Local functions. */
176 static void optimize_stmt (basic_block
, gimple_stmt_iterator
);
177 static tree
lookup_avail_expr (gimple
, bool);
178 static hashval_t
avail_expr_hash (const void *);
179 static hashval_t
real_avail_expr_hash (const void *);
180 static int avail_expr_eq (const void *, const void *);
181 static void htab_statistics (FILE *, htab_t
);
182 static void record_cond (cond_equivalence
*);
183 static void record_const_or_copy (tree
, tree
);
184 static void record_equality (tree
, tree
);
185 static void record_equivalences_from_phis (basic_block
);
186 static void record_equivalences_from_incoming_edge (basic_block
);
187 static void eliminate_redundant_computations (gimple_stmt_iterator
*);
188 static void record_equivalences_from_stmt (gimple
, int);
189 static void dom_thread_across_edge (struct dom_walk_data
*, edge
);
190 static void dom_opt_leave_block (struct dom_walk_data
*, basic_block
);
191 static void dom_opt_enter_block (struct dom_walk_data
*, basic_block
);
192 static void remove_local_expressions_from_table (void);
193 static void restore_vars_to_original_value (void);
194 static edge
single_incoming_edge_ignoring_loop_edges (basic_block
);
197 /* Given a statement STMT, initialize the hash table element pointed to
201 initialize_hash_element (gimple stmt
, tree lhs
,
202 struct expr_hash_elt
*element
)
204 enum gimple_code code
= gimple_code (stmt
);
205 struct hashable_expr
*expr
= &element
->expr
;
207 if (code
== GIMPLE_ASSIGN
)
209 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
211 expr
->type
= NULL_TREE
;
213 switch (get_gimple_rhs_class (subcode
))
215 case GIMPLE_SINGLE_RHS
:
216 expr
->kind
= EXPR_SINGLE
;
217 expr
->ops
.single
.rhs
= gimple_assign_rhs1 (stmt
);
219 case GIMPLE_UNARY_RHS
:
220 expr
->kind
= EXPR_UNARY
;
221 expr
->type
= TREE_TYPE (gimple_assign_lhs (stmt
));
222 expr
->ops
.unary
.op
= subcode
;
223 expr
->ops
.unary
.opnd
= gimple_assign_rhs1 (stmt
);
225 case GIMPLE_BINARY_RHS
:
226 expr
->kind
= EXPR_BINARY
;
227 expr
->type
= TREE_TYPE (gimple_assign_lhs (stmt
));
228 expr
->ops
.binary
.op
= subcode
;
229 expr
->ops
.binary
.opnd0
= gimple_assign_rhs1 (stmt
);
230 expr
->ops
.binary
.opnd1
= gimple_assign_rhs2 (stmt
);
232 case GIMPLE_TERNARY_RHS
:
233 expr
->kind
= EXPR_TERNARY
;
234 expr
->type
= TREE_TYPE (gimple_assign_lhs (stmt
));
235 expr
->ops
.ternary
.op
= subcode
;
236 expr
->ops
.ternary
.opnd0
= gimple_assign_rhs1 (stmt
);
237 expr
->ops
.ternary
.opnd1
= gimple_assign_rhs2 (stmt
);
238 expr
->ops
.ternary
.opnd2
= gimple_assign_rhs3 (stmt
);
244 else if (code
== GIMPLE_COND
)
246 expr
->type
= boolean_type_node
;
247 expr
->kind
= EXPR_BINARY
;
248 expr
->ops
.binary
.op
= gimple_cond_code (stmt
);
249 expr
->ops
.binary
.opnd0
= gimple_cond_lhs (stmt
);
250 expr
->ops
.binary
.opnd1
= gimple_cond_rhs (stmt
);
252 else if (code
== GIMPLE_CALL
)
254 size_t nargs
= gimple_call_num_args (stmt
);
257 gcc_assert (gimple_call_lhs (stmt
));
259 expr
->type
= TREE_TYPE (gimple_call_lhs (stmt
));
260 expr
->kind
= EXPR_CALL
;
261 expr
->ops
.call
.fn
= gimple_call_fn (stmt
);
263 if (gimple_call_flags (stmt
) & (ECF_CONST
| ECF_PURE
))
264 expr
->ops
.call
.pure
= true;
266 expr
->ops
.call
.pure
= false;
268 expr
->ops
.call
.nargs
= nargs
;
269 expr
->ops
.call
.args
= (tree
*) xcalloc (nargs
, sizeof (tree
));
270 for (i
= 0; i
< nargs
; i
++)
271 expr
->ops
.call
.args
[i
] = gimple_call_arg (stmt
, i
);
273 else if (code
== GIMPLE_SWITCH
)
275 expr
->type
= TREE_TYPE (gimple_switch_index (stmt
));
276 expr
->kind
= EXPR_SINGLE
;
277 expr
->ops
.single
.rhs
= gimple_switch_index (stmt
);
279 else if (code
== GIMPLE_GOTO
)
281 expr
->type
= TREE_TYPE (gimple_goto_dest (stmt
));
282 expr
->kind
= EXPR_SINGLE
;
283 expr
->ops
.single
.rhs
= gimple_goto_dest (stmt
);
289 element
->stmt
= stmt
;
290 element
->hash
= avail_expr_hash (element
);
291 element
->stamp
= element
;
294 /* Given a conditional expression COND as a tree, initialize
295 a hashable_expr expression EXPR. The conditional must be a
296 comparison or logical negation. A constant or a variable is
300 initialize_expr_from_cond (tree cond
, struct hashable_expr
*expr
)
302 expr
->type
= boolean_type_node
;
304 if (COMPARISON_CLASS_P (cond
))
306 expr
->kind
= EXPR_BINARY
;
307 expr
->ops
.binary
.op
= TREE_CODE (cond
);
308 expr
->ops
.binary
.opnd0
= TREE_OPERAND (cond
, 0);
309 expr
->ops
.binary
.opnd1
= TREE_OPERAND (cond
, 1);
311 else if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
313 expr
->kind
= EXPR_UNARY
;
314 expr
->ops
.unary
.op
= TRUTH_NOT_EXPR
;
315 expr
->ops
.unary
.opnd
= TREE_OPERAND (cond
, 0);
321 /* Given a hashable_expr expression EXPR and an LHS,
322 initialize the hash table element pointed to by ELEMENT. */
325 initialize_hash_element_from_expr (struct hashable_expr
*expr
,
327 struct expr_hash_elt
*element
)
329 element
->expr
= *expr
;
331 element
->stmt
= NULL
;
332 element
->hash
= avail_expr_hash (element
);
333 element
->stamp
= element
;
336 /* Compare two hashable_expr structures for equivalence.
337 They are considered equivalent when the the expressions
338 they denote must necessarily be equal. The logic is intended
339 to follow that of operand_equal_p in fold-const.c */
342 hashable_expr_equal_p (const struct hashable_expr
*expr0
,
343 const struct hashable_expr
*expr1
)
345 tree type0
= expr0
->type
;
346 tree type1
= expr1
->type
;
348 /* If either type is NULL, there is nothing to check. */
349 if ((type0
== NULL_TREE
) ^ (type1
== NULL_TREE
))
352 /* If both types don't have the same signedness, precision, and mode,
353 then we can't consider them equal. */
355 && (TREE_CODE (type0
) == ERROR_MARK
356 || TREE_CODE (type1
) == ERROR_MARK
357 || TYPE_UNSIGNED (type0
) != TYPE_UNSIGNED (type1
)
358 || TYPE_PRECISION (type0
) != TYPE_PRECISION (type1
)
359 || TYPE_MODE (type0
) != TYPE_MODE (type1
)))
362 if (expr0
->kind
!= expr1
->kind
)
368 return operand_equal_p (expr0
->ops
.single
.rhs
,
369 expr1
->ops
.single
.rhs
, 0);
372 if (expr0
->ops
.unary
.op
!= expr1
->ops
.unary
.op
)
375 if ((CONVERT_EXPR_CODE_P (expr0
->ops
.unary
.op
)
376 || expr0
->ops
.unary
.op
== NON_LVALUE_EXPR
)
377 && TYPE_UNSIGNED (expr0
->type
) != TYPE_UNSIGNED (expr1
->type
))
380 return operand_equal_p (expr0
->ops
.unary
.opnd
,
381 expr1
->ops
.unary
.opnd
, 0);
384 if (expr0
->ops
.binary
.op
!= expr1
->ops
.binary
.op
)
387 if (operand_equal_p (expr0
->ops
.binary
.opnd0
,
388 expr1
->ops
.binary
.opnd0
, 0)
389 && operand_equal_p (expr0
->ops
.binary
.opnd1
,
390 expr1
->ops
.binary
.opnd1
, 0))
393 /* For commutative ops, allow the other order. */
394 return (commutative_tree_code (expr0
->ops
.binary
.op
)
395 && operand_equal_p (expr0
->ops
.binary
.opnd0
,
396 expr1
->ops
.binary
.opnd1
, 0)
397 && operand_equal_p (expr0
->ops
.binary
.opnd1
,
398 expr1
->ops
.binary
.opnd0
, 0));
401 if (expr0
->ops
.ternary
.op
!= expr1
->ops
.ternary
.op
402 || !operand_equal_p (expr0
->ops
.ternary
.opnd2
,
403 expr1
->ops
.ternary
.opnd2
, 0))
406 if (operand_equal_p (expr0
->ops
.ternary
.opnd0
,
407 expr1
->ops
.ternary
.opnd0
, 0)
408 && operand_equal_p (expr0
->ops
.ternary
.opnd1
,
409 expr1
->ops
.ternary
.opnd1
, 0))
412 /* For commutative ops, allow the other order. */
413 return (commutative_ternary_tree_code (expr0
->ops
.ternary
.op
)
414 && operand_equal_p (expr0
->ops
.ternary
.opnd0
,
415 expr1
->ops
.ternary
.opnd1
, 0)
416 && operand_equal_p (expr0
->ops
.ternary
.opnd1
,
417 expr1
->ops
.ternary
.opnd0
, 0));
423 /* If the calls are to different functions, then they
424 clearly cannot be equal. */
425 if (! operand_equal_p (expr0
->ops
.call
.fn
,
426 expr1
->ops
.call
.fn
, 0))
429 if (! expr0
->ops
.call
.pure
)
432 if (expr0
->ops
.call
.nargs
!= expr1
->ops
.call
.nargs
)
435 for (i
= 0; i
< expr0
->ops
.call
.nargs
; i
++)
436 if (! operand_equal_p (expr0
->ops
.call
.args
[i
],
437 expr1
->ops
.call
.args
[i
], 0))
448 /* Compute a hash value for a hashable_expr value EXPR and a
449 previously accumulated hash value VAL. If two hashable_expr
450 values compare equal with hashable_expr_equal_p, they must
451 hash to the same value, given an identical value of VAL.
452 The logic is intended to follow iterative_hash_expr in tree.c. */
455 iterative_hash_hashable_expr (const struct hashable_expr
*expr
, hashval_t val
)
460 val
= iterative_hash_expr (expr
->ops
.single
.rhs
, val
);
464 val
= iterative_hash_object (expr
->ops
.unary
.op
, val
);
466 /* Make sure to include signedness in the hash computation.
467 Don't hash the type, that can lead to having nodes which
468 compare equal according to operand_equal_p, but which
469 have different hash codes. */
470 if (CONVERT_EXPR_CODE_P (expr
->ops
.unary
.op
)
471 || expr
->ops
.unary
.op
== NON_LVALUE_EXPR
)
472 val
+= TYPE_UNSIGNED (expr
->type
);
474 val
= iterative_hash_expr (expr
->ops
.unary
.opnd
, val
);
478 val
= iterative_hash_object (expr
->ops
.binary
.op
, val
);
479 if (commutative_tree_code (expr
->ops
.binary
.op
))
480 val
= iterative_hash_exprs_commutative (expr
->ops
.binary
.opnd0
,
481 expr
->ops
.binary
.opnd1
, val
);
484 val
= iterative_hash_expr (expr
->ops
.binary
.opnd0
, val
);
485 val
= iterative_hash_expr (expr
->ops
.binary
.opnd1
, val
);
490 val
= iterative_hash_object (expr
->ops
.ternary
.op
, val
);
491 if (commutative_ternary_tree_code (expr
->ops
.ternary
.op
))
492 val
= iterative_hash_exprs_commutative (expr
->ops
.ternary
.opnd0
,
493 expr
->ops
.ternary
.opnd1
, val
);
496 val
= iterative_hash_expr (expr
->ops
.ternary
.opnd0
, val
);
497 val
= iterative_hash_expr (expr
->ops
.ternary
.opnd1
, val
);
499 val
= iterative_hash_expr (expr
->ops
.ternary
.opnd2
, val
);
505 enum tree_code code
= CALL_EXPR
;
507 val
= iterative_hash_object (code
, val
);
508 val
= iterative_hash_expr (expr
->ops
.call
.fn
, val
);
509 for (i
= 0; i
< expr
->ops
.call
.nargs
; i
++)
510 val
= iterative_hash_expr (expr
->ops
.call
.args
[i
], val
);
521 /* Print a diagnostic dump of an expression hash table entry. */
524 print_expr_hash_elt (FILE * stream
, const struct expr_hash_elt
*element
)
527 fprintf (stream
, "STMT ");
529 fprintf (stream
, "COND ");
533 print_generic_expr (stream
, element
->lhs
, 0);
534 fprintf (stream
, " = ");
537 switch (element
->expr
.kind
)
540 print_generic_expr (stream
, element
->expr
.ops
.single
.rhs
, 0);
544 fprintf (stream
, "%s ", tree_code_name
[element
->expr
.ops
.unary
.op
]);
545 print_generic_expr (stream
, element
->expr
.ops
.unary
.opnd
, 0);
549 print_generic_expr (stream
, element
->expr
.ops
.binary
.opnd0
, 0);
550 fprintf (stream
, " %s ", tree_code_name
[element
->expr
.ops
.binary
.op
]);
551 print_generic_expr (stream
, element
->expr
.ops
.binary
.opnd1
, 0);
555 fprintf (stream
, " %s <", tree_code_name
[element
->expr
.ops
.ternary
.op
]);
556 print_generic_expr (stream
, element
->expr
.ops
.ternary
.opnd0
, 0);
557 fputs (", ", stream
);
558 print_generic_expr (stream
, element
->expr
.ops
.ternary
.opnd1
, 0);
559 fputs (", ", stream
);
560 print_generic_expr (stream
, element
->expr
.ops
.ternary
.opnd2
, 0);
567 size_t nargs
= element
->expr
.ops
.call
.nargs
;
569 print_generic_expr (stream
, element
->expr
.ops
.call
.fn
, 0);
570 fprintf (stream
, " (");
571 for (i
= 0; i
< nargs
; i
++)
573 print_generic_expr (stream
, element
->expr
.ops
.call
.args
[i
], 0);
575 fprintf (stream
, ", ");
577 fprintf (stream
, ")");
581 fprintf (stream
, "\n");
585 fprintf (stream
, " ");
586 print_gimple_stmt (stream
, element
->stmt
, 0, 0);
590 /* Delete an expr_hash_elt and reclaim its storage. */
593 free_expr_hash_elt (void *elt
)
595 struct expr_hash_elt
*element
= ((struct expr_hash_elt
*)elt
);
597 if (element
->expr
.kind
== EXPR_CALL
)
598 free (element
->expr
.ops
.call
.args
);
603 /* Allocate an EDGE_INFO for edge E and attach it to E.
604 Return the new EDGE_INFO structure. */
606 static struct edge_info
*
607 allocate_edge_info (edge e
)
609 struct edge_info
*edge_info
;
611 edge_info
= XCNEW (struct edge_info
);
617 /* Free all EDGE_INFO structures associated with edges in the CFG.
618 If a particular edge can be threaded, copy the redirection
619 target from the EDGE_INFO structure into the edge's AUX field
620 as required by code to update the CFG and SSA graph for
624 free_all_edge_infos (void)
632 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
634 struct edge_info
*edge_info
= (struct edge_info
*) e
->aux
;
638 if (edge_info
->cond_equivalences
)
639 VEC_free (cond_equivalence
, heap
, edge_info
->cond_equivalences
);
647 /* Jump threading, redundancy elimination and const/copy propagation.
649 This pass may expose new symbols that need to be renamed into SSA. For
650 every new symbol exposed, its corresponding bit will be set in
654 tree_ssa_dominator_optimize (void)
656 struct dom_walk_data walk_data
;
658 memset (&opt_stats
, 0, sizeof (opt_stats
));
660 /* Create our hash tables. */
661 avail_exprs
= htab_create (1024, real_avail_expr_hash
, avail_expr_eq
, free_expr_hash_elt
);
662 avail_exprs_stack
= VEC_alloc (expr_hash_elt_t
, heap
, 20);
663 const_and_copies_stack
= VEC_alloc (tree
, heap
, 20);
664 need_eh_cleanup
= BITMAP_ALLOC (NULL
);
666 /* Setup callbacks for the generic dominator tree walker. */
667 walk_data
.dom_direction
= CDI_DOMINATORS
;
668 walk_data
.initialize_block_local_data
= NULL
;
669 walk_data
.before_dom_children
= dom_opt_enter_block
;
670 walk_data
.after_dom_children
= dom_opt_leave_block
;
671 /* Right now we only attach a dummy COND_EXPR to the global data pointer.
672 When we attach more stuff we'll need to fill this out with a real
674 walk_data
.global_data
= NULL
;
675 walk_data
.block_local_data_size
= 0;
677 /* Now initialize the dominator walker. */
678 init_walk_dominator_tree (&walk_data
);
680 calculate_dominance_info (CDI_DOMINATORS
);
683 /* We need to know loop structures in order to avoid destroying them
684 in jump threading. Note that we still can e.g. thread through loop
685 headers to an exit edge, or through loop header to the loop body, assuming
686 that we update the loop info. */
687 loop_optimizer_init (LOOPS_HAVE_SIMPLE_LATCHES
);
689 /* Initialize the value-handle array. */
690 threadedge_initialize_values ();
692 /* We need accurate information regarding back edges in the CFG
693 for jump threading; this may include back edges that are not part of
695 mark_dfs_back_edges ();
697 /* Recursively walk the dominator tree optimizing statements. */
698 walk_dominator_tree (&walk_data
, ENTRY_BLOCK_PTR
);
701 gimple_stmt_iterator gsi
;
704 {for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
705 update_stmt_if_modified (gsi_stmt (gsi
));
709 /* If we exposed any new variables, go ahead and put them into
710 SSA form now, before we handle jump threading. This simplifies
711 interactions between rewriting of _DECL nodes into SSA form
712 and rewriting SSA_NAME nodes into SSA form after block
713 duplication and CFG manipulation. */
714 update_ssa (TODO_update_ssa
);
716 free_all_edge_infos ();
718 /* Thread jumps, creating duplicate blocks as needed. */
719 cfg_altered
|= thread_through_all_blocks (first_pass_instance
);
722 free_dominance_info (CDI_DOMINATORS
);
724 /* Removal of statements may make some EH edges dead. Purge
725 such edges from the CFG as needed. */
726 if (!bitmap_empty_p (need_eh_cleanup
))
731 /* Jump threading may have created forwarder blocks from blocks
732 needing EH cleanup; the new successor of these blocks, which
733 has inherited from the original block, needs the cleanup. */
734 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup
, 0, i
, bi
)
736 basic_block bb
= BASIC_BLOCK (i
);
737 if (single_succ_p (bb
) == 1
738 && (single_succ_edge (bb
)->flags
& EDGE_EH
) == 0)
740 bitmap_clear_bit (need_eh_cleanup
, i
);
741 bitmap_set_bit (need_eh_cleanup
, single_succ (bb
)->index
);
745 gimple_purge_all_dead_eh_edges (need_eh_cleanup
);
746 bitmap_zero (need_eh_cleanup
);
749 statistics_counter_event (cfun
, "Redundant expressions eliminated",
751 statistics_counter_event (cfun
, "Constants propagated",
752 opt_stats
.num_const_prop
);
753 statistics_counter_event (cfun
, "Copies propagated",
754 opt_stats
.num_copy_prop
);
756 /* Debugging dumps. */
757 if (dump_file
&& (dump_flags
& TDF_STATS
))
758 dump_dominator_optimization_stats (dump_file
);
760 loop_optimizer_finalize ();
762 /* Delete our main hashtable. */
763 htab_delete (avail_exprs
);
765 /* And finalize the dominator walker. */
766 fini_walk_dominator_tree (&walk_data
);
768 /* Free asserted bitmaps and stacks. */
769 BITMAP_FREE (need_eh_cleanup
);
771 VEC_free (expr_hash_elt_t
, heap
, avail_exprs_stack
);
772 VEC_free (tree
, heap
, const_and_copies_stack
);
774 /* Free the value-handle array. */
775 threadedge_finalize_values ();
776 ssa_name_values
= NULL
;
782 gate_dominator (void)
784 return flag_tree_dom
!= 0;
787 struct gimple_opt_pass pass_dominator
=
792 gate_dominator
, /* gate */
793 tree_ssa_dominator_optimize
, /* execute */
796 0, /* static_pass_number */
797 TV_TREE_SSA_DOMINATOR_OPTS
, /* tv_id */
798 PROP_cfg
| PROP_ssa
, /* properties_required */
799 0, /* properties_provided */
800 0, /* properties_destroyed */
801 0, /* todo_flags_start */
805 | TODO_verify_ssa
/* todo_flags_finish */
810 /* Given a conditional statement CONDSTMT, convert the
811 condition to a canonical form. */
814 canonicalize_comparison (gimple condstmt
)
820 gcc_assert (gimple_code (condstmt
) == GIMPLE_COND
);
822 op0
= gimple_cond_lhs (condstmt
);
823 op1
= gimple_cond_rhs (condstmt
);
825 code
= gimple_cond_code (condstmt
);
827 /* If it would be profitable to swap the operands, then do so to
828 canonicalize the statement, enabling better optimization.
830 By placing canonicalization of such expressions here we
831 transparently keep statements in canonical form, even
832 when the statement is modified. */
833 if (tree_swap_operands_p (op0
, op1
, false))
835 /* For relationals we need to swap the operands
836 and change the code. */
842 code
= swap_tree_comparison (code
);
844 gimple_cond_set_code (condstmt
, code
);
845 gimple_cond_set_lhs (condstmt
, op1
);
846 gimple_cond_set_rhs (condstmt
, op0
);
848 update_stmt (condstmt
);
853 /* Initialize local stacks for this optimizer and record equivalences
854 upon entry to BB. Equivalences can come from the edge traversed to
855 reach BB or they may come from PHI nodes at the start of BB. */
857 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
858 LIMIT entries left in LOCALs. */
861 remove_local_expressions_from_table (void)
863 /* Remove all the expressions made available in this block. */
864 while (VEC_length (expr_hash_elt_t
, avail_exprs_stack
) > 0)
866 expr_hash_elt_t victim
= VEC_pop (expr_hash_elt_t
, avail_exprs_stack
);
872 /* This must precede the actual removal from the hash table,
873 as ELEMENT and the table entry may share a call argument
874 vector which will be freed during removal. */
875 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
877 fprintf (dump_file
, "<<<< ");
878 print_expr_hash_elt (dump_file
, victim
);
881 slot
= htab_find_slot_with_hash (avail_exprs
,
882 victim
, victim
->hash
, NO_INSERT
);
883 gcc_assert (slot
&& *slot
== (void *) victim
);
884 htab_clear_slot (avail_exprs
, slot
);
888 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
889 CONST_AND_COPIES to its original state, stopping when we hit a
893 restore_vars_to_original_value (void)
895 while (VEC_length (tree
, const_and_copies_stack
) > 0)
897 tree prev_value
, dest
;
899 dest
= VEC_pop (tree
, const_and_copies_stack
);
904 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
906 fprintf (dump_file
, "<<<< COPY ");
907 print_generic_expr (dump_file
, dest
, 0);
908 fprintf (dump_file
, " = ");
909 print_generic_expr (dump_file
, SSA_NAME_VALUE (dest
), 0);
910 fprintf (dump_file
, "\n");
913 prev_value
= VEC_pop (tree
, const_and_copies_stack
);
914 set_ssa_name_value (dest
, prev_value
);
918 /* A trivial wrapper so that we can present the generic jump
919 threading code with a simple API for simplifying statements. */
921 simplify_stmt_for_jump_threading (gimple stmt
,
922 gimple within_stmt ATTRIBUTE_UNUSED
)
924 return lookup_avail_expr (stmt
, false);
927 /* Wrapper for common code to attempt to thread an edge. For example,
928 it handles lazily building the dummy condition and the bookkeeping
929 when jump threading is successful. */
932 dom_thread_across_edge (struct dom_walk_data
*walk_data
, edge e
)
934 if (! walk_data
->global_data
)
937 gimple_build_cond (NE_EXPR
,
938 integer_zero_node
, integer_zero_node
,
940 walk_data
->global_data
= dummy_cond
;
943 thread_across_edge ((gimple
) walk_data
->global_data
, e
, false,
944 &const_and_copies_stack
,
945 simplify_stmt_for_jump_threading
);
948 /* PHI nodes can create equivalences too.
950 Ignoring any alternatives which are the same as the result, if
951 all the alternatives are equal, then the PHI node creates an
955 record_equivalences_from_phis (basic_block bb
)
957 gimple_stmt_iterator gsi
;
959 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
961 gimple phi
= gsi_stmt (gsi
);
963 tree lhs
= gimple_phi_result (phi
);
967 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
969 tree t
= gimple_phi_arg_def (phi
, i
);
971 /* Ignore alternatives which are the same as our LHS. Since
972 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
973 can simply compare pointers. */
977 /* If we have not processed an alternative yet, then set
978 RHS to this alternative. */
981 /* If we have processed an alternative (stored in RHS), then
982 see if it is equal to this one. If it isn't, then stop
984 else if (! operand_equal_for_phi_arg_p (rhs
, t
))
988 /* If we had no interesting alternatives, then all the RHS alternatives
989 must have been the same as LHS. */
993 /* If we managed to iterate through each PHI alternative without
994 breaking out of the loop, then we have a PHI which may create
995 a useful equivalence. We do not need to record unwind data for
996 this, since this is a true assignment and not an equivalence
997 inferred from a comparison. All uses of this ssa name are dominated
998 by this assignment, so unwinding just costs time and space. */
999 if (i
== gimple_phi_num_args (phi
) && may_propagate_copy (lhs
, rhs
))
1000 set_ssa_name_value (lhs
, rhs
);
1004 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1005 return that edge. Otherwise return NULL. */
1007 single_incoming_edge_ignoring_loop_edges (basic_block bb
)
1013 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1015 /* A loop back edge can be identified by the destination of
1016 the edge dominating the source of the edge. */
1017 if (dominated_by_p (CDI_DOMINATORS
, e
->src
, e
->dest
))
1020 /* If we have already seen a non-loop edge, then we must have
1021 multiple incoming non-loop edges and thus we return NULL. */
1025 /* This is the first non-loop incoming edge we have found. Record
1033 /* Record any equivalences created by the incoming edge to BB. If BB
1034 has more than one incoming edge, then no equivalence is created. */
1037 record_equivalences_from_incoming_edge (basic_block bb
)
1041 struct edge_info
*edge_info
;
1043 /* If our parent block ended with a control statement, then we may be
1044 able to record some equivalences based on which outgoing edge from
1045 the parent was followed. */
1046 parent
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
1048 e
= single_incoming_edge_ignoring_loop_edges (bb
);
1050 /* If we had a single incoming edge from our parent block, then enter
1051 any data associated with the edge into our tables. */
1052 if (e
&& e
->src
== parent
)
1056 edge_info
= (struct edge_info
*) e
->aux
;
1060 tree lhs
= edge_info
->lhs
;
1061 tree rhs
= edge_info
->rhs
;
1062 cond_equivalence
*eq
;
1065 record_equality (lhs
, rhs
);
1067 for (i
= 0; VEC_iterate (cond_equivalence
,
1068 edge_info
->cond_equivalences
, i
, eq
); ++i
)
1074 /* Dump SSA statistics on FILE. */
1077 dump_dominator_optimization_stats (FILE *file
)
1079 fprintf (file
, "Total number of statements: %6ld\n\n",
1080 opt_stats
.num_stmts
);
1081 fprintf (file
, "Exprs considered for dominator optimizations: %6ld\n",
1082 opt_stats
.num_exprs_considered
);
1084 fprintf (file
, "\nHash table statistics:\n");
1086 fprintf (file
, " avail_exprs: ");
1087 htab_statistics (file
, avail_exprs
);
1091 /* Dump SSA statistics on stderr. */
1094 debug_dominator_optimization_stats (void)
1096 dump_dominator_optimization_stats (stderr
);
1100 /* Dump statistics for the hash table HTAB. */
1103 htab_statistics (FILE *file
, htab_t htab
)
1105 fprintf (file
, "size %ld, %ld elements, %f collision/search ratio\n",
1106 (long) htab_size (htab
),
1107 (long) htab_elements (htab
),
1108 htab_collisions (htab
));
1112 /* Enter condition equivalence into the expression hash table.
1113 This indicates that a conditional expression has a known
1117 record_cond (cond_equivalence
*p
)
1119 struct expr_hash_elt
*element
= XCNEW (struct expr_hash_elt
);
1122 initialize_hash_element_from_expr (&p
->cond
, p
->value
, element
);
1124 slot
= htab_find_slot_with_hash (avail_exprs
, (void *)element
,
1125 element
->hash
, INSERT
);
1128 *slot
= (void *) element
;
1130 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1132 fprintf (dump_file
, "1>>> ");
1133 print_expr_hash_elt (dump_file
, element
);
1136 VEC_safe_push (expr_hash_elt_t
, heap
, avail_exprs_stack
, element
);
1142 /* Build a cond_equivalence record indicating that the comparison
1143 CODE holds between operands OP0 and OP1 and push it to **P. */
1146 build_and_record_new_cond (enum tree_code code
,
1148 VEC(cond_equivalence
, heap
) **p
)
1151 struct hashable_expr
*cond
= &c
.cond
;
1153 gcc_assert (TREE_CODE_CLASS (code
) == tcc_comparison
);
1155 cond
->type
= boolean_type_node
;
1156 cond
->kind
= EXPR_BINARY
;
1157 cond
->ops
.binary
.op
= code
;
1158 cond
->ops
.binary
.opnd0
= op0
;
1159 cond
->ops
.binary
.opnd1
= op1
;
1161 c
.value
= boolean_true_node
;
1162 VEC_safe_push (cond_equivalence
, heap
, *p
, &c
);
1165 /* Record that COND is true and INVERTED is false into the edge information
1166 structure. Also record that any conditions dominated by COND are true
1169 For example, if a < b is true, then a <= b must also be true. */
1172 record_conditions (struct edge_info
*edge_info
, tree cond
, tree inverted
)
1177 if (!COMPARISON_CLASS_P (cond
))
1180 op0
= TREE_OPERAND (cond
, 0);
1181 op1
= TREE_OPERAND (cond
, 1);
1183 switch (TREE_CODE (cond
))
1187 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
1189 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1190 &edge_info
->cond_equivalences
);
1191 build_and_record_new_cond (LTGT_EXPR
, op0
, op1
,
1192 &edge_info
->cond_equivalences
);
1195 build_and_record_new_cond ((TREE_CODE (cond
) == LT_EXPR
1196 ? LE_EXPR
: GE_EXPR
),
1197 op0
, op1
, &edge_info
->cond_equivalences
);
1198 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1199 &edge_info
->cond_equivalences
);
1204 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
1206 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1207 &edge_info
->cond_equivalences
);
1212 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
1214 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1215 &edge_info
->cond_equivalences
);
1217 build_and_record_new_cond (LE_EXPR
, op0
, op1
,
1218 &edge_info
->cond_equivalences
);
1219 build_and_record_new_cond (GE_EXPR
, op0
, op1
,
1220 &edge_info
->cond_equivalences
);
1223 case UNORDERED_EXPR
:
1224 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1225 &edge_info
->cond_equivalences
);
1226 build_and_record_new_cond (UNLE_EXPR
, op0
, op1
,
1227 &edge_info
->cond_equivalences
);
1228 build_and_record_new_cond (UNGE_EXPR
, op0
, op1
,
1229 &edge_info
->cond_equivalences
);
1230 build_and_record_new_cond (UNEQ_EXPR
, op0
, op1
,
1231 &edge_info
->cond_equivalences
);
1232 build_and_record_new_cond (UNLT_EXPR
, op0
, op1
,
1233 &edge_info
->cond_equivalences
);
1234 build_and_record_new_cond (UNGT_EXPR
, op0
, op1
,
1235 &edge_info
->cond_equivalences
);
1240 build_and_record_new_cond ((TREE_CODE (cond
) == UNLT_EXPR
1241 ? UNLE_EXPR
: UNGE_EXPR
),
1242 op0
, op1
, &edge_info
->cond_equivalences
);
1243 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1244 &edge_info
->cond_equivalences
);
1248 build_and_record_new_cond (UNLE_EXPR
, op0
, op1
,
1249 &edge_info
->cond_equivalences
);
1250 build_and_record_new_cond (UNGE_EXPR
, op0
, op1
,
1251 &edge_info
->cond_equivalences
);
1255 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1256 &edge_info
->cond_equivalences
);
1257 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1258 &edge_info
->cond_equivalences
);
1265 /* Now store the original true and false conditions into the first
1267 initialize_expr_from_cond (cond
, &c
.cond
);
1268 c
.value
= boolean_true_node
;
1269 VEC_safe_push (cond_equivalence
, heap
, edge_info
->cond_equivalences
, &c
);
1271 /* It is possible for INVERTED to be the negation of a comparison,
1272 and not a valid RHS or GIMPLE_COND condition. This happens because
1273 invert_truthvalue may return such an expression when asked to invert
1274 a floating-point comparison. These comparisons are not assumed to
1275 obey the trichotomy law. */
1276 initialize_expr_from_cond (inverted
, &c
.cond
);
1277 c
.value
= boolean_false_node
;
1278 VEC_safe_push (cond_equivalence
, heap
, edge_info
->cond_equivalences
, &c
);
1281 /* A helper function for record_const_or_copy and record_equality.
1282 Do the work of recording the value and undo info. */
1285 record_const_or_copy_1 (tree x
, tree y
, tree prev_x
)
1287 set_ssa_name_value (x
, y
);
1289 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1291 fprintf (dump_file
, "0>>> COPY ");
1292 print_generic_expr (dump_file
, x
, 0);
1293 fprintf (dump_file
, " = ");
1294 print_generic_expr (dump_file
, y
, 0);
1295 fprintf (dump_file
, "\n");
1298 VEC_reserve (tree
, heap
, const_and_copies_stack
, 2);
1299 VEC_quick_push (tree
, const_and_copies_stack
, prev_x
);
1300 VEC_quick_push (tree
, const_and_copies_stack
, x
);
1303 /* Return the loop depth of the basic block of the defining statement of X.
1304 This number should not be treated as absolutely correct because the loop
1305 information may not be completely up-to-date when dom runs. However, it
1306 will be relatively correct, and as more passes are taught to keep loop info
1307 up to date, the result will become more and more accurate. */
1310 loop_depth_of_name (tree x
)
1315 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1316 if (TREE_CODE (x
) != SSA_NAME
)
1319 /* Otherwise return the loop depth of the defining statement's bb.
1320 Note that there may not actually be a bb for this statement, if the
1321 ssa_name is live on entry. */
1322 defstmt
= SSA_NAME_DEF_STMT (x
);
1323 defbb
= gimple_bb (defstmt
);
1327 return defbb
->loop_depth
;
1330 /* Record that X is equal to Y in const_and_copies. Record undo
1331 information in the block-local vector. */
1334 record_const_or_copy (tree x
, tree y
)
1336 tree prev_x
= SSA_NAME_VALUE (x
);
1338 gcc_assert (TREE_CODE (x
) == SSA_NAME
);
1340 if (TREE_CODE (y
) == SSA_NAME
)
1342 tree tmp
= SSA_NAME_VALUE (y
);
1347 record_const_or_copy_1 (x
, y
, prev_x
);
1350 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1351 This constrains the cases in which we may treat this as assignment. */
1354 record_equality (tree x
, tree y
)
1356 tree prev_x
= NULL
, prev_y
= NULL
;
1358 if (TREE_CODE (x
) == SSA_NAME
)
1359 prev_x
= SSA_NAME_VALUE (x
);
1360 if (TREE_CODE (y
) == SSA_NAME
)
1361 prev_y
= SSA_NAME_VALUE (y
);
1363 /* If one of the previous values is invariant, or invariant in more loops
1364 (by depth), then use that.
1365 Otherwise it doesn't matter which value we choose, just so
1366 long as we canonicalize on one value. */
1367 if (is_gimple_min_invariant (y
))
1369 else if (is_gimple_min_invariant (x
)
1370 || (loop_depth_of_name (x
) <= loop_depth_of_name (y
)))
1371 prev_x
= x
, x
= y
, y
= prev_x
, prev_x
= prev_y
;
1372 else if (prev_x
&& is_gimple_min_invariant (prev_x
))
1373 x
= y
, y
= prev_x
, prev_x
= prev_y
;
1377 /* After the swapping, we must have one SSA_NAME. */
1378 if (TREE_CODE (x
) != SSA_NAME
)
1381 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1382 variable compared against zero. If we're honoring signed zeros,
1383 then we cannot record this value unless we know that the value is
1385 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x
)))
1386 && (TREE_CODE (y
) != REAL_CST
1387 || REAL_VALUES_EQUAL (dconst0
, TREE_REAL_CST (y
))))
1390 record_const_or_copy_1 (x
, y
, prev_x
);
1393 /* Returns true when STMT is a simple iv increment. It detects the
1394 following situation:
1396 i_1 = phi (..., i_2)
1397 i_2 = i_1 +/- ... */
1400 simple_iv_increment_p (gimple stmt
)
1406 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1409 lhs
= gimple_assign_lhs (stmt
);
1410 if (TREE_CODE (lhs
) != SSA_NAME
)
1413 if (gimple_assign_rhs_code (stmt
) != PLUS_EXPR
1414 && gimple_assign_rhs_code (stmt
) != MINUS_EXPR
)
1417 preinc
= gimple_assign_rhs1 (stmt
);
1419 if (TREE_CODE (preinc
) != SSA_NAME
)
1422 phi
= SSA_NAME_DEF_STMT (preinc
);
1423 if (gimple_code (phi
) != GIMPLE_PHI
)
1426 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1427 if (gimple_phi_arg_def (phi
, i
) == lhs
)
1433 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1434 known value for that SSA_NAME (or NULL if no value is known).
1436 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1437 successors of BB. */
1440 cprop_into_successor_phis (basic_block bb
)
1445 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1448 gimple_stmt_iterator gsi
;
1450 /* If this is an abnormal edge, then we do not want to copy propagate
1451 into the PHI alternative associated with this edge. */
1452 if (e
->flags
& EDGE_ABNORMAL
)
1455 gsi
= gsi_start_phis (e
->dest
);
1456 if (gsi_end_p (gsi
))
1460 for ( ; !gsi_end_p (gsi
); gsi_next (&gsi
))
1463 use_operand_p orig_p
;
1465 gimple phi
= gsi_stmt (gsi
);
1467 /* The alternative may be associated with a constant, so verify
1468 it is an SSA_NAME before doing anything with it. */
1469 orig_p
= gimple_phi_arg_imm_use_ptr (phi
, indx
);
1470 orig_val
= get_use_from_ptr (orig_p
);
1471 if (TREE_CODE (orig_val
) != SSA_NAME
)
1474 /* If we have *ORIG_P in our constant/copy table, then replace
1475 ORIG_P with its value in our constant/copy table. */
1476 new_val
= SSA_NAME_VALUE (orig_val
);
1478 && new_val
!= orig_val
1479 && (TREE_CODE (new_val
) == SSA_NAME
1480 || is_gimple_min_invariant (new_val
))
1481 && may_propagate_copy (orig_val
, new_val
))
1482 propagate_value (orig_p
, new_val
);
1487 /* We have finished optimizing BB, record any information implied by
1488 taking a specific outgoing edge from BB. */
1491 record_edge_info (basic_block bb
)
1493 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
1494 struct edge_info
*edge_info
;
1496 if (! gsi_end_p (gsi
))
1498 gimple stmt
= gsi_stmt (gsi
);
1499 location_t loc
= gimple_location (stmt
);
1501 if (gimple_code (stmt
) == GIMPLE_SWITCH
)
1503 tree index
= gimple_switch_index (stmt
);
1505 if (TREE_CODE (index
) == SSA_NAME
)
1508 int n_labels
= gimple_switch_num_labels (stmt
);
1509 tree
*info
= XCNEWVEC (tree
, last_basic_block
);
1513 for (i
= 0; i
< n_labels
; i
++)
1515 tree label
= gimple_switch_label (stmt
, i
);
1516 basic_block target_bb
= label_to_block (CASE_LABEL (label
));
1517 if (CASE_HIGH (label
)
1518 || !CASE_LOW (label
)
1519 || info
[target_bb
->index
])
1520 info
[target_bb
->index
] = error_mark_node
;
1522 info
[target_bb
->index
] = label
;
1525 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1527 basic_block target_bb
= e
->dest
;
1528 tree label
= info
[target_bb
->index
];
1530 if (label
!= NULL
&& label
!= error_mark_node
)
1532 tree x
= fold_convert_loc (loc
, TREE_TYPE (index
),
1534 edge_info
= allocate_edge_info (e
);
1535 edge_info
->lhs
= index
;
1543 /* A COND_EXPR may create equivalences too. */
1544 if (gimple_code (stmt
) == GIMPLE_COND
)
1549 tree op0
= gimple_cond_lhs (stmt
);
1550 tree op1
= gimple_cond_rhs (stmt
);
1551 enum tree_code code
= gimple_cond_code (stmt
);
1553 extract_true_false_edges_from_block (bb
, &true_edge
, &false_edge
);
1555 /* Special case comparing booleans against a constant as we
1556 know the value of OP0 on both arms of the branch. i.e., we
1557 can record an equivalence for OP0 rather than COND. */
1558 if ((code
== EQ_EXPR
|| code
== NE_EXPR
)
1559 && TREE_CODE (op0
) == SSA_NAME
1560 && TREE_CODE (TREE_TYPE (op0
)) == BOOLEAN_TYPE
1561 && is_gimple_min_invariant (op1
))
1563 if (code
== EQ_EXPR
)
1565 edge_info
= allocate_edge_info (true_edge
);
1566 edge_info
->lhs
= op0
;
1567 edge_info
->rhs
= (integer_zerop (op1
)
1568 ? boolean_false_node
1569 : boolean_true_node
);
1571 edge_info
= allocate_edge_info (false_edge
);
1572 edge_info
->lhs
= op0
;
1573 edge_info
->rhs
= (integer_zerop (op1
)
1575 : boolean_false_node
);
1579 edge_info
= allocate_edge_info (true_edge
);
1580 edge_info
->lhs
= op0
;
1581 edge_info
->rhs
= (integer_zerop (op1
)
1583 : boolean_false_node
);
1585 edge_info
= allocate_edge_info (false_edge
);
1586 edge_info
->lhs
= op0
;
1587 edge_info
->rhs
= (integer_zerop (op1
)
1588 ? boolean_false_node
1589 : boolean_true_node
);
1592 else if (is_gimple_min_invariant (op0
)
1593 && (TREE_CODE (op1
) == SSA_NAME
1594 || is_gimple_min_invariant (op1
)))
1596 tree cond
= build2 (code
, boolean_type_node
, op0
, op1
);
1597 tree inverted
= invert_truthvalue_loc (loc
, cond
);
1598 struct edge_info
*edge_info
;
1600 edge_info
= allocate_edge_info (true_edge
);
1601 record_conditions (edge_info
, cond
, inverted
);
1603 if (code
== EQ_EXPR
)
1605 edge_info
->lhs
= op1
;
1606 edge_info
->rhs
= op0
;
1609 edge_info
= allocate_edge_info (false_edge
);
1610 record_conditions (edge_info
, inverted
, cond
);
1612 if (TREE_CODE (inverted
) == EQ_EXPR
)
1614 edge_info
->lhs
= op1
;
1615 edge_info
->rhs
= op0
;
1619 else if (TREE_CODE (op0
) == SSA_NAME
1620 && (is_gimple_min_invariant (op1
)
1621 || TREE_CODE (op1
) == SSA_NAME
))
1623 tree cond
= build2 (code
, boolean_type_node
, op0
, op1
);
1624 tree inverted
= invert_truthvalue_loc (loc
, cond
);
1625 struct edge_info
*edge_info
;
1627 edge_info
= allocate_edge_info (true_edge
);
1628 record_conditions (edge_info
, cond
, inverted
);
1630 if (code
== EQ_EXPR
)
1632 edge_info
->lhs
= op0
;
1633 edge_info
->rhs
= op1
;
1636 edge_info
= allocate_edge_info (false_edge
);
1637 record_conditions (edge_info
, inverted
, cond
);
1639 if (TREE_CODE (inverted
) == EQ_EXPR
)
1641 edge_info
->lhs
= op0
;
1642 edge_info
->rhs
= op1
;
1647 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1652 dom_opt_enter_block (struct dom_walk_data
*walk_data ATTRIBUTE_UNUSED
,
1655 gimple_stmt_iterator gsi
;
1657 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1658 fprintf (dump_file
, "\n\nOptimizing block #%d\n\n", bb
->index
);
1660 /* Push a marker on the stacks of local information so that we know how
1661 far to unwind when we finalize this block. */
1662 VEC_safe_push (expr_hash_elt_t
, heap
, avail_exprs_stack
, NULL
);
1663 VEC_safe_push (tree
, heap
, const_and_copies_stack
, NULL_TREE
);
1665 record_equivalences_from_incoming_edge (bb
);
1667 /* PHI nodes can create equivalences too. */
1668 record_equivalences_from_phis (bb
);
1670 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1671 optimize_stmt (bb
, gsi
);
1673 /* Now prepare to process dominated blocks. */
1674 record_edge_info (bb
);
1675 cprop_into_successor_phis (bb
);
1678 /* We have finished processing the dominator children of BB, perform
1679 any finalization actions in preparation for leaving this node in
1680 the dominator tree. */
1683 dom_opt_leave_block (struct dom_walk_data
*walk_data
, basic_block bb
)
1687 /* If we have an outgoing edge to a block with multiple incoming and
1688 outgoing edges, then we may be able to thread the edge, i.e., we
1689 may be able to statically determine which of the outgoing edges
1690 will be traversed when the incoming edge from BB is traversed. */
1691 if (single_succ_p (bb
)
1692 && (single_succ_edge (bb
)->flags
& EDGE_ABNORMAL
) == 0
1693 && potentially_threadable_block (single_succ (bb
)))
1695 dom_thread_across_edge (walk_data
, single_succ_edge (bb
));
1697 else if ((last
= last_stmt (bb
))
1698 && gimple_code (last
) == GIMPLE_COND
1699 && EDGE_COUNT (bb
->succs
) == 2
1700 && (EDGE_SUCC (bb
, 0)->flags
& EDGE_ABNORMAL
) == 0
1701 && (EDGE_SUCC (bb
, 1)->flags
& EDGE_ABNORMAL
) == 0)
1703 edge true_edge
, false_edge
;
1705 extract_true_false_edges_from_block (bb
, &true_edge
, &false_edge
);
1707 /* Only try to thread the edge if it reaches a target block with
1708 more than one predecessor and more than one successor. */
1709 if (potentially_threadable_block (true_edge
->dest
))
1711 struct edge_info
*edge_info
;
1714 /* Push a marker onto the available expression stack so that we
1715 unwind any expressions related to the TRUE arm before processing
1716 the false arm below. */
1717 VEC_safe_push (expr_hash_elt_t
, heap
, avail_exprs_stack
, NULL
);
1718 VEC_safe_push (tree
, heap
, const_and_copies_stack
, NULL_TREE
);
1720 edge_info
= (struct edge_info
*) true_edge
->aux
;
1722 /* If we have info associated with this edge, record it into
1723 our equivalence tables. */
1726 cond_equivalence
*eq
;
1727 tree lhs
= edge_info
->lhs
;
1728 tree rhs
= edge_info
->rhs
;
1730 /* If we have a simple NAME = VALUE equivalence, record it. */
1731 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
1732 record_const_or_copy (lhs
, rhs
);
1734 /* If we have 0 = COND or 1 = COND equivalences, record them
1735 into our expression hash tables. */
1736 for (i
= 0; VEC_iterate (cond_equivalence
,
1737 edge_info
->cond_equivalences
, i
, eq
); ++i
)
1741 dom_thread_across_edge (walk_data
, true_edge
);
1743 /* And restore the various tables to their state before
1744 we threaded this edge. */
1745 remove_local_expressions_from_table ();
1748 /* Similarly for the ELSE arm. */
1749 if (potentially_threadable_block (false_edge
->dest
))
1751 struct edge_info
*edge_info
;
1754 VEC_safe_push (tree
, heap
, const_and_copies_stack
, NULL_TREE
);
1755 edge_info
= (struct edge_info
*) false_edge
->aux
;
1757 /* If we have info associated with this edge, record it into
1758 our equivalence tables. */
1761 cond_equivalence
*eq
;
1762 tree lhs
= edge_info
->lhs
;
1763 tree rhs
= edge_info
->rhs
;
1765 /* If we have a simple NAME = VALUE equivalence, record it. */
1766 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
1767 record_const_or_copy (lhs
, rhs
);
1769 /* If we have 0 = COND or 1 = COND equivalences, record them
1770 into our expression hash tables. */
1771 for (i
= 0; VEC_iterate (cond_equivalence
,
1772 edge_info
->cond_equivalences
, i
, eq
); ++i
)
1776 /* Now thread the edge. */
1777 dom_thread_across_edge (walk_data
, false_edge
);
1779 /* No need to remove local expressions from our tables
1780 or restore vars to their original value as that will
1781 be done immediately below. */
1785 remove_local_expressions_from_table ();
1786 restore_vars_to_original_value ();
1789 /* Search for redundant computations in STMT. If any are found, then
1790 replace them with the variable holding the result of the computation.
1792 If safe, record this expression into the available expression hash
1796 eliminate_redundant_computations (gimple_stmt_iterator
* gsi
)
1801 bool assigns_var_p
= false;
1803 gimple stmt
= gsi_stmt (*gsi
);
1805 tree def
= gimple_get_lhs (stmt
);
1807 /* Certain expressions on the RHS can be optimized away, but can not
1808 themselves be entered into the hash tables. */
1810 || TREE_CODE (def
) != SSA_NAME
1811 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def
)
1812 || gimple_vdef (stmt
)
1813 /* Do not record equivalences for increments of ivs. This would create
1814 overlapping live ranges for a very questionable gain. */
1815 || simple_iv_increment_p (stmt
))
1818 /* Check if the expression has been computed before. */
1819 cached_lhs
= lookup_avail_expr (stmt
, insert
);
1821 opt_stats
.num_exprs_considered
++;
1823 /* Get the type of the expression we are trying to optimize. */
1824 if (is_gimple_assign (stmt
))
1826 expr_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
1827 assigns_var_p
= true;
1829 else if (gimple_code (stmt
) == GIMPLE_COND
)
1830 expr_type
= boolean_type_node
;
1831 else if (is_gimple_call (stmt
))
1833 gcc_assert (gimple_call_lhs (stmt
));
1834 expr_type
= TREE_TYPE (gimple_call_lhs (stmt
));
1835 assigns_var_p
= true;
1837 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
1838 expr_type
= TREE_TYPE (gimple_switch_index (stmt
));
1845 /* It is safe to ignore types here since we have already done
1846 type checking in the hashing and equality routines. In fact
1847 type checking here merely gets in the way of constant
1848 propagation. Also, make sure that it is safe to propagate
1849 CACHED_LHS into the expression in STMT. */
1850 if ((TREE_CODE (cached_lhs
) != SSA_NAME
1852 || useless_type_conversion_p (expr_type
, TREE_TYPE (cached_lhs
))))
1853 || may_propagate_copy_into_stmt (stmt
, cached_lhs
))
1855 gcc_checking_assert (TREE_CODE (cached_lhs
) == SSA_NAME
1856 || is_gimple_min_invariant (cached_lhs
));
1858 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1860 fprintf (dump_file
, " Replaced redundant expr '");
1861 print_gimple_expr (dump_file
, stmt
, 0, dump_flags
);
1862 fprintf (dump_file
, "' with '");
1863 print_generic_expr (dump_file
, cached_lhs
, dump_flags
);
1864 fprintf (dump_file
, "'\n");
1870 && !useless_type_conversion_p (expr_type
, TREE_TYPE (cached_lhs
)))
1871 cached_lhs
= fold_convert (expr_type
, cached_lhs
);
1873 propagate_tree_value_into_stmt (gsi
, cached_lhs
);
1875 /* Since it is always necessary to mark the result as modified,
1876 perhaps we should move this into propagate_tree_value_into_stmt
1878 gimple_set_modified (gsi_stmt (*gsi
), true);
1882 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
1883 the available expressions table or the const_and_copies table.
1884 Detect and record those equivalences. */
1885 /* We handle only very simple copy equivalences here. The heavy
1886 lifing is done by eliminate_redundant_computations. */
1889 record_equivalences_from_stmt (gimple stmt
, int may_optimize_p
)
1892 enum tree_code lhs_code
;
1894 gcc_assert (is_gimple_assign (stmt
));
1896 lhs
= gimple_assign_lhs (stmt
);
1897 lhs_code
= TREE_CODE (lhs
);
1899 if (lhs_code
== SSA_NAME
1900 && gimple_assign_single_p (stmt
))
1902 tree rhs
= gimple_assign_rhs1 (stmt
);
1904 /* If the RHS of the assignment is a constant or another variable that
1905 may be propagated, register it in the CONST_AND_COPIES table. We
1906 do not need to record unwind data for this, since this is a true
1907 assignment and not an equivalence inferred from a comparison. All
1908 uses of this ssa name are dominated by this assignment, so unwinding
1909 just costs time and space. */
1911 && (TREE_CODE (rhs
) == SSA_NAME
1912 || is_gimple_min_invariant (rhs
)))
1914 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1916 fprintf (dump_file
, "==== ASGN ");
1917 print_generic_expr (dump_file
, lhs
, 0);
1918 fprintf (dump_file
, " = ");
1919 print_generic_expr (dump_file
, rhs
, 0);
1920 fprintf (dump_file
, "\n");
1923 set_ssa_name_value (lhs
, rhs
);
1927 /* A memory store, even an aliased store, creates a useful
1928 equivalence. By exchanging the LHS and RHS, creating suitable
1929 vops and recording the result in the available expression table,
1930 we may be able to expose more redundant loads. */
1931 if (!gimple_has_volatile_ops (stmt
)
1932 && gimple_references_memory_p (stmt
)
1933 && gimple_assign_single_p (stmt
)
1934 && (TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
1935 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt
)))
1936 && !is_gimple_reg (lhs
))
1938 tree rhs
= gimple_assign_rhs1 (stmt
);
1941 /* Build a new statement with the RHS and LHS exchanged. */
1942 if (TREE_CODE (rhs
) == SSA_NAME
)
1944 /* NOTE tuples. The call to gimple_build_assign below replaced
1945 a call to build_gimple_modify_stmt, which did not set the
1946 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
1947 may cause an SSA validation failure, as the LHS may be a
1948 default-initialized name and should have no definition. I'm
1949 a bit dubious of this, as the artificial statement that we
1950 generate here may in fact be ill-formed, but it is simply
1951 used as an internal device in this pass, and never becomes
1953 gimple defstmt
= SSA_NAME_DEF_STMT (rhs
);
1954 new_stmt
= gimple_build_assign (rhs
, lhs
);
1955 SSA_NAME_DEF_STMT (rhs
) = defstmt
;
1958 new_stmt
= gimple_build_assign (rhs
, lhs
);
1960 gimple_set_vuse (new_stmt
, gimple_vdef (stmt
));
1962 /* Finally enter the statement into the available expression
1964 lookup_avail_expr (new_stmt
, true);
1968 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
1969 CONST_AND_COPIES. */
1972 cprop_operand (gimple stmt
, use_operand_p op_p
)
1975 tree op
= USE_FROM_PTR (op_p
);
1977 /* If the operand has a known constant value or it is known to be a
1978 copy of some other variable, use the value or copy stored in
1979 CONST_AND_COPIES. */
1980 val
= SSA_NAME_VALUE (op
);
1981 if (val
&& val
!= op
)
1983 /* Do not change the base variable in the virtual operand
1984 tables. That would make it impossible to reconstruct
1985 the renamed virtual operand if we later modify this
1986 statement. Also only allow the new value to be an SSA_NAME
1987 for propagation into virtual operands. */
1988 if (!is_gimple_reg (op
)
1989 && (TREE_CODE (val
) != SSA_NAME
1990 || is_gimple_reg (val
)
1991 || get_virtual_var (val
) != get_virtual_var (op
)))
1994 /* Do not replace hard register operands in asm statements. */
1995 if (gimple_code (stmt
) == GIMPLE_ASM
1996 && !may_propagate_copy_into_asm (op
))
1999 /* Certain operands are not allowed to be copy propagated due
2000 to their interaction with exception handling and some GCC
2002 if (!may_propagate_copy (op
, val
))
2005 /* Do not propagate addresses that point to volatiles into memory
2006 stmts without volatile operands. */
2007 if (POINTER_TYPE_P (TREE_TYPE (val
))
2008 && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val
)))
2009 && gimple_has_mem_ops (stmt
)
2010 && !gimple_has_volatile_ops (stmt
))
2013 /* Do not propagate copies if the propagated value is at a deeper loop
2014 depth than the propagatee. Otherwise, this may move loop variant
2015 variables outside of their loops and prevent coalescing
2016 opportunities. If the value was loop invariant, it will be hoisted
2017 by LICM and exposed for copy propagation. */
2018 if (loop_depth_of_name (val
) > loop_depth_of_name (op
))
2021 /* Do not propagate copies into simple IV increment statements.
2022 See PR23821 for how this can disturb IV analysis. */
2023 if (TREE_CODE (val
) != INTEGER_CST
2024 && simple_iv_increment_p (stmt
))
2028 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2030 fprintf (dump_file
, " Replaced '");
2031 print_generic_expr (dump_file
, op
, dump_flags
);
2032 fprintf (dump_file
, "' with %s '",
2033 (TREE_CODE (val
) != SSA_NAME
? "constant" : "variable"));
2034 print_generic_expr (dump_file
, val
, dump_flags
);
2035 fprintf (dump_file
, "'\n");
2038 if (TREE_CODE (val
) != SSA_NAME
)
2039 opt_stats
.num_const_prop
++;
2041 opt_stats
.num_copy_prop
++;
2043 propagate_value (op_p
, val
);
2045 /* And note that we modified this statement. This is now
2046 safe, even if we changed virtual operands since we will
2047 rescan the statement and rewrite its operands again. */
2048 gimple_set_modified (stmt
, true);
2052 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2053 known value for that SSA_NAME (or NULL if no value is known).
2055 Propagate values from CONST_AND_COPIES into the uses, vuses and
2056 vdef_ops of STMT. */
2059 cprop_into_stmt (gimple stmt
)
2064 FOR_EACH_SSA_USE_OPERAND (op_p
, stmt
, iter
, SSA_OP_ALL_USES
)
2066 if (TREE_CODE (USE_FROM_PTR (op_p
)) == SSA_NAME
)
2067 cprop_operand (stmt
, op_p
);
2071 /* Optimize the statement pointed to by iterator SI.
2073 We try to perform some simplistic global redundancy elimination and
2074 constant propagation:
2076 1- To detect global redundancy, we keep track of expressions that have
2077 been computed in this block and its dominators. If we find that the
2078 same expression is computed more than once, we eliminate repeated
2079 computations by using the target of the first one.
2081 2- Constant values and copy assignments. This is used to do very
2082 simplistic constant and copy propagation. When a constant or copy
2083 assignment is found, we map the value on the RHS of the assignment to
2084 the variable in the LHS in the CONST_AND_COPIES table. */
2087 optimize_stmt (basic_block bb
, gimple_stmt_iterator si
)
2089 gimple stmt
, old_stmt
;
2090 bool may_optimize_p
;
2091 bool modified_p
= false;
2093 old_stmt
= stmt
= gsi_stmt (si
);
2095 if (gimple_code (stmt
) == GIMPLE_COND
)
2096 canonicalize_comparison (stmt
);
2098 update_stmt_if_modified (stmt
);
2099 opt_stats
.num_stmts
++;
2101 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2103 fprintf (dump_file
, "Optimizing statement ");
2104 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
2107 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2108 cprop_into_stmt (stmt
);
2110 /* If the statement has been modified with constant replacements,
2111 fold its RHS before checking for redundant computations. */
2112 if (gimple_modified_p (stmt
))
2116 /* Try to fold the statement making sure that STMT is kept
2118 if (fold_stmt (&si
))
2120 stmt
= gsi_stmt (si
);
2121 gimple_set_modified (stmt
, true);
2123 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2125 fprintf (dump_file
, " Folded to: ");
2126 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
2130 /* We only need to consider cases that can yield a gimple operand. */
2131 if (gimple_assign_single_p (stmt
))
2132 rhs
= gimple_assign_rhs1 (stmt
);
2133 else if (gimple_code (stmt
) == GIMPLE_GOTO
)
2134 rhs
= gimple_goto_dest (stmt
);
2135 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
2136 /* This should never be an ADDR_EXPR. */
2137 rhs
= gimple_switch_index (stmt
);
2139 if (rhs
&& TREE_CODE (rhs
) == ADDR_EXPR
)
2140 recompute_tree_invariant_for_addr_expr (rhs
);
2142 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2143 even if fold_stmt updated the stmt already and thus cleared
2144 gimple_modified_p flag on it. */
2148 /* Check for redundant computations. Do this optimization only
2149 for assignments that have no volatile ops and conditionals. */
2150 may_optimize_p
= (!gimple_has_volatile_ops (stmt
)
2151 && ((is_gimple_assign (stmt
)
2152 && !gimple_rhs_has_side_effects (stmt
))
2153 || (is_gimple_call (stmt
)
2154 && gimple_call_lhs (stmt
) != NULL_TREE
2155 && !gimple_rhs_has_side_effects (stmt
))
2156 || gimple_code (stmt
) == GIMPLE_COND
2157 || gimple_code (stmt
) == GIMPLE_SWITCH
));
2161 if (gimple_code (stmt
) == GIMPLE_CALL
)
2163 /* Resolve __builtin_constant_p. If it hasn't been
2164 folded to integer_one_node by now, it's fairly
2165 certain that the value simply isn't constant. */
2166 tree callee
= gimple_call_fndecl (stmt
);
2168 && DECL_BUILT_IN_CLASS (callee
) == BUILT_IN_NORMAL
2169 && DECL_FUNCTION_CODE (callee
) == BUILT_IN_CONSTANT_P
)
2171 propagate_tree_value_into_stmt (&si
, integer_zero_node
);
2172 stmt
= gsi_stmt (si
);
2176 update_stmt_if_modified (stmt
);
2177 eliminate_redundant_computations (&si
);
2178 stmt
= gsi_stmt (si
);
2180 /* Perform simple redundant store elimination. */
2181 if (gimple_assign_single_p (stmt
)
2182 && TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
2184 tree lhs
= gimple_assign_lhs (stmt
);
2185 tree rhs
= gimple_assign_rhs1 (stmt
);
2188 if (TREE_CODE (rhs
) == SSA_NAME
)
2190 tree tem
= SSA_NAME_VALUE (rhs
);
2194 /* Build a new statement with the RHS and LHS exchanged. */
2195 if (TREE_CODE (rhs
) == SSA_NAME
)
2197 gimple defstmt
= SSA_NAME_DEF_STMT (rhs
);
2198 new_stmt
= gimple_build_assign (rhs
, lhs
);
2199 SSA_NAME_DEF_STMT (rhs
) = defstmt
;
2202 new_stmt
= gimple_build_assign (rhs
, lhs
);
2203 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
2204 cached_lhs
= lookup_avail_expr (new_stmt
, false);
2206 && rhs
== cached_lhs
)
2208 basic_block bb
= gimple_bb (stmt
);
2209 int lp_nr
= lookup_stmt_eh_lp (stmt
);
2210 unlink_stmt_vdef (stmt
);
2211 gsi_remove (&si
, true);
2214 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
2215 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2216 fprintf (dump_file
, " Flagged to clear EH edges.\n");
2223 /* Record any additional equivalences created by this statement. */
2224 if (is_gimple_assign (stmt
))
2225 record_equivalences_from_stmt (stmt
, may_optimize_p
);
2227 /* If STMT is a COND_EXPR and it was modified, then we may know
2228 where it goes. If that is the case, then mark the CFG as altered.
2230 This will cause us to later call remove_unreachable_blocks and
2231 cleanup_tree_cfg when it is safe to do so. It is not safe to
2232 clean things up here since removal of edges and such can trigger
2233 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2236 That's all fine and good, except that once SSA_NAMEs are released
2237 to the manager, we must not call create_ssa_name until all references
2238 to released SSA_NAMEs have been eliminated.
2240 All references to the deleted SSA_NAMEs can not be eliminated until
2241 we remove unreachable blocks.
2243 We can not remove unreachable blocks until after we have completed
2244 any queued jump threading.
2246 We can not complete any queued jump threads until we have taken
2247 appropriate variables out of SSA form. Taking variables out of
2248 SSA form can call create_ssa_name and thus we lose.
2250 Ultimately I suspect we're going to need to change the interface
2251 into the SSA_NAME manager. */
2252 if (gimple_modified_p (stmt
) || modified_p
)
2256 update_stmt_if_modified (stmt
);
2258 if (gimple_code (stmt
) == GIMPLE_COND
)
2259 val
= fold_binary_loc (gimple_location (stmt
),
2260 gimple_cond_code (stmt
), boolean_type_node
,
2261 gimple_cond_lhs (stmt
), gimple_cond_rhs (stmt
));
2262 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
2263 val
= gimple_switch_index (stmt
);
2265 if (val
&& TREE_CODE (val
) == INTEGER_CST
&& find_taken_edge (bb
, val
))
2268 /* If we simplified a statement in such a way as to be shown that it
2269 cannot trap, update the eh information and the cfg to match. */
2270 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
))
2272 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
2273 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2274 fprintf (dump_file
, " Flagged to clear EH edges.\n");
2279 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2280 If found, return its LHS. Otherwise insert STMT in the table and
2283 Also, when an expression is first inserted in the table, it is also
2284 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2285 we finish processing this block and its children. */
2288 lookup_avail_expr (gimple stmt
, bool insert
)
2293 struct expr_hash_elt element
;
2295 /* Get LHS of assignment or call, else NULL_TREE. */
2296 lhs
= gimple_get_lhs (stmt
);
2298 initialize_hash_element (stmt
, lhs
, &element
);
2300 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2302 fprintf (dump_file
, "LKUP ");
2303 print_expr_hash_elt (dump_file
, &element
);
2306 /* Don't bother remembering constant assignments and copy operations.
2307 Constants and copy operations are handled by the constant/copy propagator
2308 in optimize_stmt. */
2309 if (element
.expr
.kind
== EXPR_SINGLE
2310 && (TREE_CODE (element
.expr
.ops
.single
.rhs
) == SSA_NAME
2311 || is_gimple_min_invariant (element
.expr
.ops
.single
.rhs
)))
2314 /* Finally try to find the expression in the main expression hash table. */
2315 slot
= htab_find_slot_with_hash (avail_exprs
, &element
, element
.hash
,
2316 (insert
? INSERT
: NO_INSERT
));
2322 struct expr_hash_elt
*element2
= XNEW (struct expr_hash_elt
);
2323 *element2
= element
;
2324 element2
->stamp
= element2
;
2325 *slot
= (void *) element2
;
2327 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2329 fprintf (dump_file
, "2>>> ");
2330 print_expr_hash_elt (dump_file
, element2
);
2333 VEC_safe_push (expr_hash_elt_t
, heap
, avail_exprs_stack
, element2
);
2337 /* Extract the LHS of the assignment so that it can be used as the current
2338 definition of another variable. */
2339 lhs
= ((struct expr_hash_elt
*)*slot
)->lhs
;
2341 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2342 use the value from the const_and_copies table. */
2343 if (TREE_CODE (lhs
) == SSA_NAME
)
2345 temp
= SSA_NAME_VALUE (lhs
);
2350 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2352 fprintf (dump_file
, "FIND: ");
2353 print_generic_expr (dump_file
, lhs
, 0);
2354 fprintf (dump_file
, "\n");
2360 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2361 for expressions using the code of the expression and the SSA numbers of
2365 avail_expr_hash (const void *p
)
2367 gimple stmt
= ((const struct expr_hash_elt
*)p
)->stmt
;
2368 const struct hashable_expr
*expr
= &((const struct expr_hash_elt
*)p
)->expr
;
2372 val
= iterative_hash_hashable_expr (expr
, val
);
2374 /* If the hash table entry is not associated with a statement, then we
2375 can just hash the expression and not worry about virtual operands
2380 /* Add the SSA version numbers of the vuse operand. This is important
2381 because compound variables like arrays are not renamed in the
2382 operands. Rather, the rename is done on the virtual variable
2383 representing all the elements of the array. */
2384 if ((vuse
= gimple_vuse (stmt
)))
2385 val
= iterative_hash_expr (vuse
, val
);
2391 real_avail_expr_hash (const void *p
)
2393 return ((const struct expr_hash_elt
*)p
)->hash
;
2397 avail_expr_eq (const void *p1
, const void *p2
)
2399 gimple stmt1
= ((const struct expr_hash_elt
*)p1
)->stmt
;
2400 const struct hashable_expr
*expr1
= &((const struct expr_hash_elt
*)p1
)->expr
;
2401 const struct expr_hash_elt
*stamp1
= ((const struct expr_hash_elt
*)p1
)->stamp
;
2402 gimple stmt2
= ((const struct expr_hash_elt
*)p2
)->stmt
;
2403 const struct hashable_expr
*expr2
= &((const struct expr_hash_elt
*)p2
)->expr
;
2404 const struct expr_hash_elt
*stamp2
= ((const struct expr_hash_elt
*)p2
)->stamp
;
2406 /* This case should apply only when removing entries from the table. */
2407 if (stamp1
== stamp2
)
2411 We add stmts to a hash table and them modify them. To detect the case
2412 that we modify a stmt and then search for it, we assume that the hash
2413 is always modified by that change.
2414 We have to fully check why this doesn't happen on trunk or rewrite
2415 this in a more reliable (and easier to understand) way. */
2416 if (((const struct expr_hash_elt
*)p1
)->hash
2417 != ((const struct expr_hash_elt
*)p2
)->hash
)
2420 /* In case of a collision, both RHS have to be identical and have the
2421 same VUSE operands. */
2422 if (hashable_expr_equal_p (expr1
, expr2
)
2423 && types_compatible_p (expr1
->type
, expr2
->type
))
2425 /* Note that STMT1 and/or STMT2 may be NULL. */
2426 return ((stmt1
? gimple_vuse (stmt1
) : NULL_TREE
)
2427 == (stmt2
? gimple_vuse (stmt2
) : NULL_TREE
));
2433 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2434 up degenerate PHIs created by or exposed by jump threading. */
2436 /* Given PHI, return its RHS if the PHI is a degenerate, otherwise return
2440 degenerate_phi_result (gimple phi
)
2442 tree lhs
= gimple_phi_result (phi
);
2446 /* Ignoring arguments which are the same as LHS, if all the remaining
2447 arguments are the same, then the PHI is a degenerate and has the
2448 value of that common argument. */
2449 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
2451 tree arg
= gimple_phi_arg_def (phi
, i
);
2459 else if (arg
== val
)
2461 /* We bring in some of operand_equal_p not only to speed things
2462 up, but also to avoid crashing when dereferencing the type of
2463 a released SSA name. */
2464 else if (TREE_CODE (val
) != TREE_CODE (arg
)
2465 || TREE_CODE (val
) == SSA_NAME
2466 || !operand_equal_p (arg
, val
, 0))
2469 return (i
== gimple_phi_num_args (phi
) ? val
: NULL
);
2472 /* Given a statement STMT, which is either a PHI node or an assignment,
2473 remove it from the IL. */
2476 remove_stmt_or_phi (gimple stmt
)
2478 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
2480 if (gimple_code (stmt
) == GIMPLE_PHI
)
2481 remove_phi_node (&gsi
, true);
2484 gsi_remove (&gsi
, true);
2485 release_defs (stmt
);
2489 /* Given a statement STMT, which is either a PHI node or an assignment,
2490 return the "rhs" of the node, in the case of a non-degenerate
2491 phi, NULL is returned. */
2494 get_rhs_or_phi_arg (gimple stmt
)
2496 if (gimple_code (stmt
) == GIMPLE_PHI
)
2497 return degenerate_phi_result (stmt
);
2498 else if (gimple_assign_single_p (stmt
))
2499 return gimple_assign_rhs1 (stmt
);
2505 /* Given a statement STMT, which is either a PHI node or an assignment,
2506 return the "lhs" of the node. */
2509 get_lhs_or_phi_result (gimple stmt
)
2511 if (gimple_code (stmt
) == GIMPLE_PHI
)
2512 return gimple_phi_result (stmt
);
2513 else if (is_gimple_assign (stmt
))
2514 return gimple_assign_lhs (stmt
);
2519 /* Propagate RHS into all uses of LHS (when possible).
2521 RHS and LHS are derived from STMT, which is passed in solely so
2522 that we can remove it if propagation is successful.
2524 When propagating into a PHI node or into a statement which turns
2525 into a trivial copy or constant initialization, set the
2526 appropriate bit in INTERESTING_NAMEs so that we will visit those
2527 nodes as well in an effort to pick up secondary optimization
2531 propagate_rhs_into_lhs (gimple stmt
, tree lhs
, tree rhs
, bitmap interesting_names
)
2533 /* First verify that propagation is valid and isn't going to move a
2534 loop variant variable outside its loop. */
2535 if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
)
2536 && (TREE_CODE (rhs
) != SSA_NAME
2537 || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs
))
2538 && may_propagate_copy (lhs
, rhs
)
2539 && loop_depth_of_name (lhs
) >= loop_depth_of_name (rhs
))
2541 use_operand_p use_p
;
2542 imm_use_iterator iter
;
2547 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2549 fprintf (dump_file
, " Replacing '");
2550 print_generic_expr (dump_file
, lhs
, dump_flags
);
2551 fprintf (dump_file
, "' with %s '",
2552 (TREE_CODE (rhs
) != SSA_NAME
? "constant" : "variable"));
2553 print_generic_expr (dump_file
, rhs
, dump_flags
);
2554 fprintf (dump_file
, "'\n");
2557 /* Walk over every use of LHS and try to replace the use with RHS.
2558 At this point the only reason why such a propagation would not
2559 be successful would be if the use occurs in an ASM_EXPR. */
2560 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2562 /* Leave debug stmts alone. If we succeed in propagating
2563 all non-debug uses, we'll drop the DEF, and propagation
2564 into debug stmts will occur then. */
2565 if (gimple_debug_bind_p (use_stmt
))
2568 /* It's not always safe to propagate into an ASM_EXPR. */
2569 if (gimple_code (use_stmt
) == GIMPLE_ASM
2570 && ! may_propagate_copy_into_asm (lhs
))
2576 /* It's not ok to propagate into the definition stmt of RHS.
2578 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2579 g_67.1_6 = prephitmp.12_36;
2581 While this is strictly all dead code we do not want to
2582 deal with this here. */
2583 if (TREE_CODE (rhs
) == SSA_NAME
2584 && SSA_NAME_DEF_STMT (rhs
) == use_stmt
)
2591 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2593 fprintf (dump_file
, " Original statement:");
2594 print_gimple_stmt (dump_file
, use_stmt
, 0, dump_flags
);
2597 /* Propagate the RHS into this use of the LHS. */
2598 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2599 propagate_value (use_p
, rhs
);
2601 /* Special cases to avoid useless calls into the folding
2602 routines, operand scanning, etc.
2604 First, propagation into a PHI may cause the PHI to become
2605 a degenerate, so mark the PHI as interesting. No other
2606 actions are necessary.
2608 Second, if we're propagating a virtual operand and the
2609 propagation does not change the underlying _DECL node for
2610 the virtual operand, then no further actions are necessary. */
2611 if (gimple_code (use_stmt
) == GIMPLE_PHI
2612 || (! is_gimple_reg (lhs
)
2613 && TREE_CODE (rhs
) == SSA_NAME
2614 && SSA_NAME_VAR (lhs
) == SSA_NAME_VAR (rhs
)))
2617 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2619 fprintf (dump_file
, " Updated statement:");
2620 print_gimple_stmt (dump_file
, use_stmt
, 0, dump_flags
);
2623 /* Propagation into a PHI may expose new degenerate PHIs,
2624 so mark the result of the PHI as interesting. */
2625 if (gimple_code (use_stmt
) == GIMPLE_PHI
)
2627 tree result
= get_lhs_or_phi_result (use_stmt
);
2628 bitmap_set_bit (interesting_names
, SSA_NAME_VERSION (result
));
2634 /* From this point onward we are propagating into a
2635 real statement. Folding may (or may not) be possible,
2636 we may expose new operands, expose dead EH edges,
2638 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2639 cannot fold a call that simplifies to a constant,
2640 because the GIMPLE_CALL must be replaced by a
2641 GIMPLE_ASSIGN, and there is no way to effect such a
2642 transformation in-place. We might want to consider
2643 using the more general fold_stmt here. */
2644 fold_stmt_inplace (use_stmt
);
2646 /* Sometimes propagation can expose new operands to the
2648 update_stmt (use_stmt
);
2651 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2653 fprintf (dump_file
, " Updated statement:");
2654 print_gimple_stmt (dump_file
, use_stmt
, 0, dump_flags
);
2657 /* If we replaced a variable index with a constant, then
2658 we would need to update the invariant flag for ADDR_EXPRs. */
2659 if (gimple_assign_single_p (use_stmt
)
2660 && TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == ADDR_EXPR
)
2661 recompute_tree_invariant_for_addr_expr
2662 (gimple_assign_rhs1 (use_stmt
));
2664 /* If we cleaned up EH information from the statement,
2665 mark its containing block as needing EH cleanups. */
2666 if (maybe_clean_or_replace_eh_stmt (use_stmt
, use_stmt
))
2668 bitmap_set_bit (need_eh_cleanup
, gimple_bb (use_stmt
)->index
);
2669 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2670 fprintf (dump_file
, " Flagged to clear EH edges.\n");
2673 /* Propagation may expose new trivial copy/constant propagation
2675 if (gimple_assign_single_p (use_stmt
)
2676 && TREE_CODE (gimple_assign_lhs (use_stmt
)) == SSA_NAME
2677 && (TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == SSA_NAME
2678 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt
))))
2680 tree result
= get_lhs_or_phi_result (use_stmt
);
2681 bitmap_set_bit (interesting_names
, SSA_NAME_VERSION (result
));
2684 /* Propagation into these nodes may make certain edges in
2685 the CFG unexecutable. We want to identify them as PHI nodes
2686 at the destination of those unexecutable edges may become
2688 else if (gimple_code (use_stmt
) == GIMPLE_COND
2689 || gimple_code (use_stmt
) == GIMPLE_SWITCH
2690 || gimple_code (use_stmt
) == GIMPLE_GOTO
)
2694 if (gimple_code (use_stmt
) == GIMPLE_COND
)
2695 val
= fold_binary_loc (gimple_location (use_stmt
),
2696 gimple_cond_code (use_stmt
),
2698 gimple_cond_lhs (use_stmt
),
2699 gimple_cond_rhs (use_stmt
));
2700 else if (gimple_code (use_stmt
) == GIMPLE_SWITCH
)
2701 val
= gimple_switch_index (use_stmt
);
2703 val
= gimple_goto_dest (use_stmt
);
2705 if (val
&& is_gimple_min_invariant (val
))
2707 basic_block bb
= gimple_bb (use_stmt
);
2708 edge te
= find_taken_edge (bb
, val
);
2711 gimple_stmt_iterator gsi
, psi
;
2713 /* Remove all outgoing edges except TE. */
2714 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
));)
2718 /* Mark all the PHI nodes at the destination of
2719 the unexecutable edge as interesting. */
2720 for (psi
= gsi_start_phis (e
->dest
);
2724 gimple phi
= gsi_stmt (psi
);
2726 tree result
= gimple_phi_result (phi
);
2727 int version
= SSA_NAME_VERSION (result
);
2729 bitmap_set_bit (interesting_names
, version
);
2732 te
->probability
+= e
->probability
;
2734 te
->count
+= e
->count
;
2742 gsi
= gsi_last_bb (gimple_bb (use_stmt
));
2743 gsi_remove (&gsi
, true);
2745 /* And fixup the flags on the single remaining edge. */
2746 te
->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
);
2747 te
->flags
&= ~EDGE_ABNORMAL
;
2748 te
->flags
|= EDGE_FALLTHRU
;
2749 if (te
->probability
> REG_BR_PROB_BASE
)
2750 te
->probability
= REG_BR_PROB_BASE
;
2755 /* Ensure there is nothing else to do. */
2756 gcc_assert (!all
|| has_zero_uses (lhs
));
2758 /* If we were able to propagate away all uses of LHS, then
2759 we can remove STMT. */
2761 remove_stmt_or_phi (stmt
);
2765 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2766 a statement that is a trivial copy or constant initialization.
2768 Attempt to eliminate T by propagating its RHS into all uses of
2769 its LHS. This may in turn set new bits in INTERESTING_NAMES
2770 for nodes we want to revisit later.
2772 All exit paths should clear INTERESTING_NAMES for the result
2776 eliminate_const_or_copy (gimple stmt
, bitmap interesting_names
)
2778 tree lhs
= get_lhs_or_phi_result (stmt
);
2780 int version
= SSA_NAME_VERSION (lhs
);
2782 /* If the LHS of this statement or PHI has no uses, then we can
2783 just eliminate it. This can occur if, for example, the PHI
2784 was created by block duplication due to threading and its only
2785 use was in the conditional at the end of the block which was
2787 if (has_zero_uses (lhs
))
2789 bitmap_clear_bit (interesting_names
, version
);
2790 remove_stmt_or_phi (stmt
);
2794 /* Get the RHS of the assignment or PHI node if the PHI is a
2796 rhs
= get_rhs_or_phi_arg (stmt
);
2799 bitmap_clear_bit (interesting_names
, version
);
2803 propagate_rhs_into_lhs (stmt
, lhs
, rhs
, interesting_names
);
2805 /* Note that STMT may well have been deleted by now, so do
2806 not access it, instead use the saved version # to clear
2807 T's entry in the worklist. */
2808 bitmap_clear_bit (interesting_names
, version
);
2811 /* The first phase in degenerate PHI elimination.
2813 Eliminate the degenerate PHIs in BB, then recurse on the
2814 dominator children of BB. */
2817 eliminate_degenerate_phis_1 (basic_block bb
, bitmap interesting_names
)
2819 gimple_stmt_iterator gsi
;
2822 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2824 gimple phi
= gsi_stmt (gsi
);
2826 eliminate_const_or_copy (phi
, interesting_names
);
2829 /* Recurse into the dominator children of BB. */
2830 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
2832 son
= next_dom_son (CDI_DOMINATORS
, son
))
2833 eliminate_degenerate_phis_1 (son
, interesting_names
);
2837 /* A very simple pass to eliminate degenerate PHI nodes from the
2838 IL. This is meant to be fast enough to be able to be run several
2839 times in the optimization pipeline.
2841 Certain optimizations, particularly those which duplicate blocks
2842 or remove edges from the CFG can create or expose PHIs which are
2843 trivial copies or constant initializations.
2845 While we could pick up these optimizations in DOM or with the
2846 combination of copy-prop and CCP, those solutions are far too
2847 heavy-weight for our needs.
2849 This implementation has two phases so that we can efficiently
2850 eliminate the first order degenerate PHIs and second order
2853 The first phase performs a dominator walk to identify and eliminate
2854 the vast majority of the degenerate PHIs. When a degenerate PHI
2855 is identified and eliminated any affected statements or PHIs
2856 are put on a worklist.
2858 The second phase eliminates degenerate PHIs and trivial copies
2859 or constant initializations using the worklist. This is how we
2860 pick up the secondary optimization opportunities with minimal
2864 eliminate_degenerate_phis (void)
2866 bitmap interesting_names
;
2867 bitmap interesting_names1
;
2869 /* Bitmap of blocks which need EH information updated. We can not
2870 update it on-the-fly as doing so invalidates the dominator tree. */
2871 need_eh_cleanup
= BITMAP_ALLOC (NULL
);
2873 /* INTERESTING_NAMES is effectively our worklist, indexed by
2876 A set bit indicates that the statement or PHI node which
2877 defines the SSA_NAME should be (re)examined to determine if
2878 it has become a degenerate PHI or trivial const/copy propagation
2881 Experiments have show we generally get better compilation
2882 time behavior with bitmaps rather than sbitmaps. */
2883 interesting_names
= BITMAP_ALLOC (NULL
);
2884 interesting_names1
= BITMAP_ALLOC (NULL
);
2886 calculate_dominance_info (CDI_DOMINATORS
);
2887 cfg_altered
= false;
2889 /* First phase. Eliminate degenerate PHIs via a dominator
2892 Experiments have indicated that we generally get better
2893 compile-time behavior by visiting blocks in the first
2894 phase in dominator order. Presumably this is because walking
2895 in dominator order leaves fewer PHIs for later examination
2896 by the worklist phase. */
2897 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR
, interesting_names
);
2899 /* Second phase. Eliminate second order degenerate PHIs as well
2900 as trivial copies or constant initializations identified by
2901 the first phase or this phase. Basically we keep iterating
2902 until our set of INTERESTING_NAMEs is empty. */
2903 while (!bitmap_empty_p (interesting_names
))
2908 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
2909 changed during the loop. Copy it to another bitmap and
2911 bitmap_copy (interesting_names1
, interesting_names
);
2913 EXECUTE_IF_SET_IN_BITMAP (interesting_names1
, 0, i
, bi
)
2915 tree name
= ssa_name (i
);
2917 /* Ignore SSA_NAMEs that have been released because
2918 their defining statement was deleted (unreachable). */
2920 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i
)),
2926 free_dominance_info (CDI_DOMINATORS
);
2928 /* Propagation of const and copies may make some EH edges dead. Purge
2929 such edges from the CFG as needed. */
2930 if (!bitmap_empty_p (need_eh_cleanup
))
2932 gimple_purge_all_dead_eh_edges (need_eh_cleanup
);
2933 BITMAP_FREE (need_eh_cleanup
);
2936 BITMAP_FREE (interesting_names
);
2937 BITMAP_FREE (interesting_names1
);
2941 struct gimple_opt_pass pass_phi_only_cprop
=
2945 "phicprop", /* name */
2946 gate_dominator
, /* gate */
2947 eliminate_degenerate_phis
, /* execute */
2950 0, /* static_pass_number */
2951 TV_TREE_PHI_CPROP
, /* tv_id */
2952 PROP_cfg
| PROP_ssa
, /* properties_required */
2953 0, /* properties_provided */
2954 0, /* properties_destroyed */
2955 0, /* todo_flags_start */
2961 | TODO_update_ssa
/* todo_flags_finish */