1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
24 #include "hash-table.h"
29 #include "basic-block.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
35 #include "tree-pass.h"
36 #include "tree-ssa-propagate.h"
37 #include "langhooks.h"
40 /* This file implements optimizations on the dominator tree. */
42 /* Representation of a "naked" right-hand-side expression, to be used
43 in recording available expressions in the expression hash table. */
60 struct { tree rhs
; } single
;
61 struct { enum tree_code op
; tree opnd
; } unary
;
62 struct { enum tree_code op
; tree opnd0
, opnd1
; } binary
;
63 struct { enum tree_code op
; tree opnd0
, opnd1
, opnd2
; } ternary
;
64 struct { gimple fn_from
; bool pure
; size_t nargs
; tree
*args
; } call
;
65 struct { size_t nargs
; tree
*args
; } phi
;
69 /* Structure for recording known values of a conditional expression
70 at the exits from its block. */
72 typedef struct cond_equivalence_s
74 struct hashable_expr cond
;
79 /* Structure for recording edge equivalences as well as any pending
80 edge redirections during the dominator optimizer.
82 Computing and storing the edge equivalences instead of creating
83 them on-demand can save significant amounts of time, particularly
84 for pathological cases involving switch statements.
86 These structures live for a single iteration of the dominator
87 optimizer in the edge's AUX field. At the end of an iteration we
88 free each of these structures and update the AUX field to point
89 to any requested redirection target (the code for updating the
90 CFG and SSA graph for edge redirection expects redirection edge
91 targets to be in the AUX field for each edge. */
95 /* If this edge creates a simple equivalence, the LHS and RHS of
96 the equivalence will be stored here. */
100 /* Traversing an edge may also indicate one or more particular conditions
101 are true or false. */
102 vec
<cond_equivalence
> cond_equivalences
;
105 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
106 expressions it enters into the hash table along with a marker entry
107 (null). When we finish processing the block, we pop off entries and
108 remove the expressions from the global hash table until we hit the
110 typedef struct expr_hash_elt
* expr_hash_elt_t
;
112 static vec
<expr_hash_elt_t
> avail_exprs_stack
;
114 /* Structure for entries in the expression hash table. */
118 /* The value (lhs) of this expression. */
121 /* The expression (rhs) we want to record. */
122 struct hashable_expr expr
;
124 /* The stmt pointer if this element corresponds to a statement. */
127 /* The hash value for RHS. */
130 /* A unique stamp, typically the address of the hash
131 element itself, used in removing entries from the table. */
132 struct expr_hash_elt
*stamp
;
135 /* Hashtable helpers. */
137 static bool hashable_expr_equal_p (const struct hashable_expr
*,
138 const struct hashable_expr
*);
139 static void free_expr_hash_elt (void *);
141 struct expr_elt_hasher
143 typedef expr_hash_elt value_type
;
144 typedef expr_hash_elt compare_type
;
145 static inline hashval_t
hash (const value_type
*);
146 static inline bool equal (const value_type
*, const compare_type
*);
147 static inline void remove (value_type
*);
151 expr_elt_hasher::hash (const value_type
*p
)
157 expr_elt_hasher::equal (const value_type
*p1
, const compare_type
*p2
)
159 gimple stmt1
= p1
->stmt
;
160 const struct hashable_expr
*expr1
= &p1
->expr
;
161 const struct expr_hash_elt
*stamp1
= p1
->stamp
;
162 gimple stmt2
= p2
->stmt
;
163 const struct hashable_expr
*expr2
= &p2
->expr
;
164 const struct expr_hash_elt
*stamp2
= p2
->stamp
;
166 /* This case should apply only when removing entries from the table. */
167 if (stamp1
== stamp2
)
171 We add stmts to a hash table and them modify them. To detect the case
172 that we modify a stmt and then search for it, we assume that the hash
173 is always modified by that change.
174 We have to fully check why this doesn't happen on trunk or rewrite
175 this in a more reliable (and easier to understand) way. */
176 if (((const struct expr_hash_elt
*)p1
)->hash
177 != ((const struct expr_hash_elt
*)p2
)->hash
)
180 /* In case of a collision, both RHS have to be identical and have the
181 same VUSE operands. */
182 if (hashable_expr_equal_p (expr1
, expr2
)
183 && types_compatible_p (expr1
->type
, expr2
->type
))
185 /* Note that STMT1 and/or STMT2 may be NULL. */
186 return ((stmt1
? gimple_vuse (stmt1
) : NULL_TREE
)
187 == (stmt2
? gimple_vuse (stmt2
) : NULL_TREE
));
193 /* Delete an expr_hash_elt and reclaim its storage. */
196 expr_elt_hasher::remove (value_type
*element
)
198 free_expr_hash_elt (element
);
201 /* Hash table with expressions made available during the renaming process.
202 When an assignment of the form X_i = EXPR is found, the statement is
203 stored in this table. If the same expression EXPR is later found on the
204 RHS of another statement, it is replaced with X_i (thus performing
205 global redundancy elimination). Similarly as we pass through conditionals
206 we record the conditional itself as having either a true or false value
208 static hash_table
<expr_elt_hasher
> avail_exprs
;
210 /* Stack of dest,src pairs that need to be restored during finalization.
212 A NULL entry is used to mark the end of pairs which need to be
213 restored during finalization of this block. */
214 static vec
<tree
> const_and_copies_stack
;
216 /* Track whether or not we have changed the control flow graph. */
217 static bool cfg_altered
;
219 /* Bitmap of blocks that have had EH statements cleaned. We should
220 remove their dead edges eventually. */
221 static bitmap need_eh_cleanup
;
223 /* Statistics for dominator optimizations. */
227 long num_exprs_considered
;
233 static struct opt_stats_d opt_stats
;
235 /* Local functions. */
236 static void optimize_stmt (basic_block
, gimple_stmt_iterator
);
237 static tree
lookup_avail_expr (gimple
, bool);
238 static hashval_t
avail_expr_hash (const void *);
239 static void htab_statistics (FILE *, hash_table
<expr_elt_hasher
>);
240 static void record_cond (cond_equivalence
*);
241 static void record_const_or_copy (tree
, tree
);
242 static void record_equality (tree
, tree
);
243 static void record_equivalences_from_phis (basic_block
);
244 static void record_equivalences_from_incoming_edge (basic_block
);
245 static void eliminate_redundant_computations (gimple_stmt_iterator
*);
246 static void record_equivalences_from_stmt (gimple
, int);
247 static void dom_thread_across_edge (struct dom_walk_data
*, edge
);
248 static void dom_opt_leave_block (struct dom_walk_data
*, basic_block
);
249 static void dom_opt_enter_block (struct dom_walk_data
*, basic_block
);
250 static void remove_local_expressions_from_table (void);
251 static void restore_vars_to_original_value (void);
252 static edge
single_incoming_edge_ignoring_loop_edges (basic_block
);
255 /* Given a statement STMT, initialize the hash table element pointed to
259 initialize_hash_element (gimple stmt
, tree lhs
,
260 struct expr_hash_elt
*element
)
262 enum gimple_code code
= gimple_code (stmt
);
263 struct hashable_expr
*expr
= &element
->expr
;
265 if (code
== GIMPLE_ASSIGN
)
267 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
269 switch (get_gimple_rhs_class (subcode
))
271 case GIMPLE_SINGLE_RHS
:
272 expr
->kind
= EXPR_SINGLE
;
273 expr
->type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
274 expr
->ops
.single
.rhs
= gimple_assign_rhs1 (stmt
);
276 case GIMPLE_UNARY_RHS
:
277 expr
->kind
= EXPR_UNARY
;
278 expr
->type
= TREE_TYPE (gimple_assign_lhs (stmt
));
279 expr
->ops
.unary
.op
= subcode
;
280 expr
->ops
.unary
.opnd
= gimple_assign_rhs1 (stmt
);
282 case GIMPLE_BINARY_RHS
:
283 expr
->kind
= EXPR_BINARY
;
284 expr
->type
= TREE_TYPE (gimple_assign_lhs (stmt
));
285 expr
->ops
.binary
.op
= subcode
;
286 expr
->ops
.binary
.opnd0
= gimple_assign_rhs1 (stmt
);
287 expr
->ops
.binary
.opnd1
= gimple_assign_rhs2 (stmt
);
289 case GIMPLE_TERNARY_RHS
:
290 expr
->kind
= EXPR_TERNARY
;
291 expr
->type
= TREE_TYPE (gimple_assign_lhs (stmt
));
292 expr
->ops
.ternary
.op
= subcode
;
293 expr
->ops
.ternary
.opnd0
= gimple_assign_rhs1 (stmt
);
294 expr
->ops
.ternary
.opnd1
= gimple_assign_rhs2 (stmt
);
295 expr
->ops
.ternary
.opnd2
= gimple_assign_rhs3 (stmt
);
301 else if (code
== GIMPLE_COND
)
303 expr
->type
= boolean_type_node
;
304 expr
->kind
= EXPR_BINARY
;
305 expr
->ops
.binary
.op
= gimple_cond_code (stmt
);
306 expr
->ops
.binary
.opnd0
= gimple_cond_lhs (stmt
);
307 expr
->ops
.binary
.opnd1
= gimple_cond_rhs (stmt
);
309 else if (code
== GIMPLE_CALL
)
311 size_t nargs
= gimple_call_num_args (stmt
);
314 gcc_assert (gimple_call_lhs (stmt
));
316 expr
->type
= TREE_TYPE (gimple_call_lhs (stmt
));
317 expr
->kind
= EXPR_CALL
;
318 expr
->ops
.call
.fn_from
= stmt
;
320 if (gimple_call_flags (stmt
) & (ECF_CONST
| ECF_PURE
))
321 expr
->ops
.call
.pure
= true;
323 expr
->ops
.call
.pure
= false;
325 expr
->ops
.call
.nargs
= nargs
;
326 expr
->ops
.call
.args
= XCNEWVEC (tree
, nargs
);
327 for (i
= 0; i
< nargs
; i
++)
328 expr
->ops
.call
.args
[i
] = gimple_call_arg (stmt
, i
);
330 else if (code
== GIMPLE_SWITCH
)
332 expr
->type
= TREE_TYPE (gimple_switch_index (stmt
));
333 expr
->kind
= EXPR_SINGLE
;
334 expr
->ops
.single
.rhs
= gimple_switch_index (stmt
);
336 else if (code
== GIMPLE_GOTO
)
338 expr
->type
= TREE_TYPE (gimple_goto_dest (stmt
));
339 expr
->kind
= EXPR_SINGLE
;
340 expr
->ops
.single
.rhs
= gimple_goto_dest (stmt
);
342 else if (code
== GIMPLE_PHI
)
344 size_t nargs
= gimple_phi_num_args (stmt
);
347 expr
->type
= TREE_TYPE (gimple_phi_result (stmt
));
348 expr
->kind
= EXPR_PHI
;
349 expr
->ops
.phi
.nargs
= nargs
;
350 expr
->ops
.phi
.args
= XCNEWVEC (tree
, nargs
);
352 for (i
= 0; i
< nargs
; i
++)
353 expr
->ops
.phi
.args
[i
] = gimple_phi_arg_def (stmt
, i
);
359 element
->stmt
= stmt
;
360 element
->hash
= avail_expr_hash (element
);
361 element
->stamp
= element
;
364 /* Given a conditional expression COND as a tree, initialize
365 a hashable_expr expression EXPR. The conditional must be a
366 comparison or logical negation. A constant or a variable is
370 initialize_expr_from_cond (tree cond
, struct hashable_expr
*expr
)
372 expr
->type
= boolean_type_node
;
374 if (COMPARISON_CLASS_P (cond
))
376 expr
->kind
= EXPR_BINARY
;
377 expr
->ops
.binary
.op
= TREE_CODE (cond
);
378 expr
->ops
.binary
.opnd0
= TREE_OPERAND (cond
, 0);
379 expr
->ops
.binary
.opnd1
= TREE_OPERAND (cond
, 1);
381 else if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
383 expr
->kind
= EXPR_UNARY
;
384 expr
->ops
.unary
.op
= TRUTH_NOT_EXPR
;
385 expr
->ops
.unary
.opnd
= TREE_OPERAND (cond
, 0);
391 /* Given a hashable_expr expression EXPR and an LHS,
392 initialize the hash table element pointed to by ELEMENT. */
395 initialize_hash_element_from_expr (struct hashable_expr
*expr
,
397 struct expr_hash_elt
*element
)
399 element
->expr
= *expr
;
401 element
->stmt
= NULL
;
402 element
->hash
= avail_expr_hash (element
);
403 element
->stamp
= element
;
406 /* Compare two hashable_expr structures for equivalence.
407 They are considered equivalent when the the expressions
408 they denote must necessarily be equal. The logic is intended
409 to follow that of operand_equal_p in fold-const.c */
412 hashable_expr_equal_p (const struct hashable_expr
*expr0
,
413 const struct hashable_expr
*expr1
)
415 tree type0
= expr0
->type
;
416 tree type1
= expr1
->type
;
418 /* If either type is NULL, there is nothing to check. */
419 if ((type0
== NULL_TREE
) ^ (type1
== NULL_TREE
))
422 /* If both types don't have the same signedness, precision, and mode,
423 then we can't consider them equal. */
425 && (TREE_CODE (type0
) == ERROR_MARK
426 || TREE_CODE (type1
) == ERROR_MARK
427 || TYPE_UNSIGNED (type0
) != TYPE_UNSIGNED (type1
)
428 || TYPE_PRECISION (type0
) != TYPE_PRECISION (type1
)
429 || TYPE_MODE (type0
) != TYPE_MODE (type1
)))
432 if (expr0
->kind
!= expr1
->kind
)
438 return operand_equal_p (expr0
->ops
.single
.rhs
,
439 expr1
->ops
.single
.rhs
, 0);
442 if (expr0
->ops
.unary
.op
!= expr1
->ops
.unary
.op
)
445 if ((CONVERT_EXPR_CODE_P (expr0
->ops
.unary
.op
)
446 || expr0
->ops
.unary
.op
== NON_LVALUE_EXPR
)
447 && TYPE_UNSIGNED (expr0
->type
) != TYPE_UNSIGNED (expr1
->type
))
450 return operand_equal_p (expr0
->ops
.unary
.opnd
,
451 expr1
->ops
.unary
.opnd
, 0);
454 if (expr0
->ops
.binary
.op
!= expr1
->ops
.binary
.op
)
457 if (operand_equal_p (expr0
->ops
.binary
.opnd0
,
458 expr1
->ops
.binary
.opnd0
, 0)
459 && operand_equal_p (expr0
->ops
.binary
.opnd1
,
460 expr1
->ops
.binary
.opnd1
, 0))
463 /* For commutative ops, allow the other order. */
464 return (commutative_tree_code (expr0
->ops
.binary
.op
)
465 && operand_equal_p (expr0
->ops
.binary
.opnd0
,
466 expr1
->ops
.binary
.opnd1
, 0)
467 && operand_equal_p (expr0
->ops
.binary
.opnd1
,
468 expr1
->ops
.binary
.opnd0
, 0));
471 if (expr0
->ops
.ternary
.op
!= expr1
->ops
.ternary
.op
472 || !operand_equal_p (expr0
->ops
.ternary
.opnd2
,
473 expr1
->ops
.ternary
.opnd2
, 0))
476 if (operand_equal_p (expr0
->ops
.ternary
.opnd0
,
477 expr1
->ops
.ternary
.opnd0
, 0)
478 && operand_equal_p (expr0
->ops
.ternary
.opnd1
,
479 expr1
->ops
.ternary
.opnd1
, 0))
482 /* For commutative ops, allow the other order. */
483 return (commutative_ternary_tree_code (expr0
->ops
.ternary
.op
)
484 && operand_equal_p (expr0
->ops
.ternary
.opnd0
,
485 expr1
->ops
.ternary
.opnd1
, 0)
486 && operand_equal_p (expr0
->ops
.ternary
.opnd1
,
487 expr1
->ops
.ternary
.opnd0
, 0));
493 /* If the calls are to different functions, then they
494 clearly cannot be equal. */
495 if (!gimple_call_same_target_p (expr0
->ops
.call
.fn_from
,
496 expr1
->ops
.call
.fn_from
))
499 if (! expr0
->ops
.call
.pure
)
502 if (expr0
->ops
.call
.nargs
!= expr1
->ops
.call
.nargs
)
505 for (i
= 0; i
< expr0
->ops
.call
.nargs
; i
++)
506 if (! operand_equal_p (expr0
->ops
.call
.args
[i
],
507 expr1
->ops
.call
.args
[i
], 0))
517 if (expr0
->ops
.phi
.nargs
!= expr1
->ops
.phi
.nargs
)
520 for (i
= 0; i
< expr0
->ops
.phi
.nargs
; i
++)
521 if (! operand_equal_p (expr0
->ops
.phi
.args
[i
],
522 expr1
->ops
.phi
.args
[i
], 0))
533 /* Compute a hash value for a hashable_expr value EXPR and a
534 previously accumulated hash value VAL. If two hashable_expr
535 values compare equal with hashable_expr_equal_p, they must
536 hash to the same value, given an identical value of VAL.
537 The logic is intended to follow iterative_hash_expr in tree.c. */
540 iterative_hash_hashable_expr (const struct hashable_expr
*expr
, hashval_t val
)
545 val
= iterative_hash_expr (expr
->ops
.single
.rhs
, val
);
549 val
= iterative_hash_object (expr
->ops
.unary
.op
, val
);
551 /* Make sure to include signedness in the hash computation.
552 Don't hash the type, that can lead to having nodes which
553 compare equal according to operand_equal_p, but which
554 have different hash codes. */
555 if (CONVERT_EXPR_CODE_P (expr
->ops
.unary
.op
)
556 || expr
->ops
.unary
.op
== NON_LVALUE_EXPR
)
557 val
+= TYPE_UNSIGNED (expr
->type
);
559 val
= iterative_hash_expr (expr
->ops
.unary
.opnd
, val
);
563 val
= iterative_hash_object (expr
->ops
.binary
.op
, val
);
564 if (commutative_tree_code (expr
->ops
.binary
.op
))
565 val
= iterative_hash_exprs_commutative (expr
->ops
.binary
.opnd0
,
566 expr
->ops
.binary
.opnd1
, val
);
569 val
= iterative_hash_expr (expr
->ops
.binary
.opnd0
, val
);
570 val
= iterative_hash_expr (expr
->ops
.binary
.opnd1
, val
);
575 val
= iterative_hash_object (expr
->ops
.ternary
.op
, val
);
576 if (commutative_ternary_tree_code (expr
->ops
.ternary
.op
))
577 val
= iterative_hash_exprs_commutative (expr
->ops
.ternary
.opnd0
,
578 expr
->ops
.ternary
.opnd1
, val
);
581 val
= iterative_hash_expr (expr
->ops
.ternary
.opnd0
, val
);
582 val
= iterative_hash_expr (expr
->ops
.ternary
.opnd1
, val
);
584 val
= iterative_hash_expr (expr
->ops
.ternary
.opnd2
, val
);
590 enum tree_code code
= CALL_EXPR
;
593 val
= iterative_hash_object (code
, val
);
594 fn_from
= expr
->ops
.call
.fn_from
;
595 if (gimple_call_internal_p (fn_from
))
596 val
= iterative_hash_hashval_t
597 ((hashval_t
) gimple_call_internal_fn (fn_from
), val
);
599 val
= iterative_hash_expr (gimple_call_fn (fn_from
), val
);
600 for (i
= 0; i
< expr
->ops
.call
.nargs
; i
++)
601 val
= iterative_hash_expr (expr
->ops
.call
.args
[i
], val
);
609 for (i
= 0; i
< expr
->ops
.phi
.nargs
; i
++)
610 val
= iterative_hash_expr (expr
->ops
.phi
.args
[i
], val
);
621 /* Print a diagnostic dump of an expression hash table entry. */
624 print_expr_hash_elt (FILE * stream
, const struct expr_hash_elt
*element
)
627 fprintf (stream
, "STMT ");
629 fprintf (stream
, "COND ");
633 print_generic_expr (stream
, element
->lhs
, 0);
634 fprintf (stream
, " = ");
637 switch (element
->expr
.kind
)
640 print_generic_expr (stream
, element
->expr
.ops
.single
.rhs
, 0);
644 fprintf (stream
, "%s ", tree_code_name
[element
->expr
.ops
.unary
.op
]);
645 print_generic_expr (stream
, element
->expr
.ops
.unary
.opnd
, 0);
649 print_generic_expr (stream
, element
->expr
.ops
.binary
.opnd0
, 0);
650 fprintf (stream
, " %s ", tree_code_name
[element
->expr
.ops
.binary
.op
]);
651 print_generic_expr (stream
, element
->expr
.ops
.binary
.opnd1
, 0);
655 fprintf (stream
, " %s <", tree_code_name
[element
->expr
.ops
.ternary
.op
]);
656 print_generic_expr (stream
, element
->expr
.ops
.ternary
.opnd0
, 0);
657 fputs (", ", stream
);
658 print_generic_expr (stream
, element
->expr
.ops
.ternary
.opnd1
, 0);
659 fputs (", ", stream
);
660 print_generic_expr (stream
, element
->expr
.ops
.ternary
.opnd2
, 0);
667 size_t nargs
= element
->expr
.ops
.call
.nargs
;
670 fn_from
= element
->expr
.ops
.call
.fn_from
;
671 if (gimple_call_internal_p (fn_from
))
672 fputs (internal_fn_name (gimple_call_internal_fn (fn_from
)),
675 print_generic_expr (stream
, gimple_call_fn (fn_from
), 0);
676 fprintf (stream
, " (");
677 for (i
= 0; i
< nargs
; i
++)
679 print_generic_expr (stream
, element
->expr
.ops
.call
.args
[i
], 0);
681 fprintf (stream
, ", ");
683 fprintf (stream
, ")");
690 size_t nargs
= element
->expr
.ops
.phi
.nargs
;
692 fprintf (stream
, "PHI <");
693 for (i
= 0; i
< nargs
; i
++)
695 print_generic_expr (stream
, element
->expr
.ops
.phi
.args
[i
], 0);
697 fprintf (stream
, ", ");
699 fprintf (stream
, ">");
703 fprintf (stream
, "\n");
707 fprintf (stream
, " ");
708 print_gimple_stmt (stream
, element
->stmt
, 0, 0);
712 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
715 free_expr_hash_elt_contents (struct expr_hash_elt
*element
)
717 if (element
->expr
.kind
== EXPR_CALL
)
718 free (element
->expr
.ops
.call
.args
);
719 else if (element
->expr
.kind
== EXPR_PHI
)
720 free (element
->expr
.ops
.phi
.args
);
723 /* Delete an expr_hash_elt and reclaim its storage. */
726 free_expr_hash_elt (void *elt
)
728 struct expr_hash_elt
*element
= ((struct expr_hash_elt
*)elt
);
729 free_expr_hash_elt_contents (element
);
733 /* Allocate an EDGE_INFO for edge E and attach it to E.
734 Return the new EDGE_INFO structure. */
736 static struct edge_info
*
737 allocate_edge_info (edge e
)
739 struct edge_info
*edge_info
;
741 edge_info
= XCNEW (struct edge_info
);
747 /* Free all EDGE_INFO structures associated with edges in the CFG.
748 If a particular edge can be threaded, copy the redirection
749 target from the EDGE_INFO structure into the edge's AUX field
750 as required by code to update the CFG and SSA graph for
754 free_all_edge_infos (void)
762 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
764 struct edge_info
*edge_info
= (struct edge_info
*) e
->aux
;
768 edge_info
->cond_equivalences
.release ();
776 /* Jump threading, redundancy elimination and const/copy propagation.
778 This pass may expose new symbols that need to be renamed into SSA. For
779 every new symbol exposed, its corresponding bit will be set in
783 tree_ssa_dominator_optimize (void)
785 struct dom_walk_data walk_data
;
787 memset (&opt_stats
, 0, sizeof (opt_stats
));
789 /* Create our hash tables. */
790 avail_exprs
.create (1024);
791 avail_exprs_stack
.create (20);
792 const_and_copies_stack
.create (20);
793 need_eh_cleanup
= BITMAP_ALLOC (NULL
);
795 /* Setup callbacks for the generic dominator tree walker. */
796 walk_data
.dom_direction
= CDI_DOMINATORS
;
797 walk_data
.initialize_block_local_data
= NULL
;
798 walk_data
.before_dom_children
= dom_opt_enter_block
;
799 walk_data
.after_dom_children
= dom_opt_leave_block
;
800 /* Right now we only attach a dummy COND_EXPR to the global data pointer.
801 When we attach more stuff we'll need to fill this out with a real
803 walk_data
.global_data
= NULL
;
804 walk_data
.block_local_data_size
= 0;
806 /* Now initialize the dominator walker. */
807 init_walk_dominator_tree (&walk_data
);
809 calculate_dominance_info (CDI_DOMINATORS
);
812 /* We need to know loop structures in order to avoid destroying them
813 in jump threading. Note that we still can e.g. thread through loop
814 headers to an exit edge, or through loop header to the loop body, assuming
815 that we update the loop info. */
816 loop_optimizer_init (LOOPS_HAVE_SIMPLE_LATCHES
);
818 /* Initialize the value-handle array. */
819 threadedge_initialize_values ();
821 /* We need accurate information regarding back edges in the CFG
822 for jump threading; this may include back edges that are not part of
824 mark_dfs_back_edges ();
826 /* Recursively walk the dominator tree optimizing statements. */
827 walk_dominator_tree (&walk_data
, ENTRY_BLOCK_PTR
);
830 gimple_stmt_iterator gsi
;
834 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
835 update_stmt_if_modified (gsi_stmt (gsi
));
839 /* If we exposed any new variables, go ahead and put them into
840 SSA form now, before we handle jump threading. This simplifies
841 interactions between rewriting of _DECL nodes into SSA form
842 and rewriting SSA_NAME nodes into SSA form after block
843 duplication and CFG manipulation. */
844 update_ssa (TODO_update_ssa
);
846 free_all_edge_infos ();
848 /* Thread jumps, creating duplicate blocks as needed. */
849 cfg_altered
|= thread_through_all_blocks (first_pass_instance
);
852 free_dominance_info (CDI_DOMINATORS
);
854 /* Removal of statements may make some EH edges dead. Purge
855 such edges from the CFG as needed. */
856 if (!bitmap_empty_p (need_eh_cleanup
))
861 /* Jump threading may have created forwarder blocks from blocks
862 needing EH cleanup; the new successor of these blocks, which
863 has inherited from the original block, needs the cleanup.
864 Don't clear bits in the bitmap, as that can break the bitmap
866 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup
, 0, i
, bi
)
868 basic_block bb
= BASIC_BLOCK (i
);
871 while (single_succ_p (bb
)
872 && (single_succ_edge (bb
)->flags
& EDGE_EH
) == 0)
873 bb
= single_succ (bb
);
874 if (bb
== EXIT_BLOCK_PTR
)
876 if ((unsigned) bb
->index
!= i
)
877 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
880 gimple_purge_all_dead_eh_edges (need_eh_cleanup
);
881 bitmap_clear (need_eh_cleanup
);
884 statistics_counter_event (cfun
, "Redundant expressions eliminated",
886 statistics_counter_event (cfun
, "Constants propagated",
887 opt_stats
.num_const_prop
);
888 statistics_counter_event (cfun
, "Copies propagated",
889 opt_stats
.num_copy_prop
);
891 /* Debugging dumps. */
892 if (dump_file
&& (dump_flags
& TDF_STATS
))
893 dump_dominator_optimization_stats (dump_file
);
895 loop_optimizer_finalize ();
897 /* Delete our main hashtable. */
898 avail_exprs
.dispose ();
900 /* And finalize the dominator walker. */
901 fini_walk_dominator_tree (&walk_data
);
903 /* Free asserted bitmaps and stacks. */
904 BITMAP_FREE (need_eh_cleanup
);
906 avail_exprs_stack
.release ();
907 const_and_copies_stack
.release ();
909 /* Free the value-handle array. */
910 threadedge_finalize_values ();
911 ssa_name_values
.release ();
917 gate_dominator (void)
919 return flag_tree_dom
!= 0;
922 struct gimple_opt_pass pass_dominator
=
927 OPTGROUP_NONE
, /* optinfo_flags */
928 gate_dominator
, /* gate */
929 tree_ssa_dominator_optimize
, /* execute */
932 0, /* static_pass_number */
933 TV_TREE_SSA_DOMINATOR_OPTS
, /* tv_id */
934 PROP_cfg
| PROP_ssa
, /* properties_required */
935 0, /* properties_provided */
936 0, /* properties_destroyed */
937 0, /* todo_flags_start */
941 | TODO_verify_flow
/* todo_flags_finish */
946 /* Given a conditional statement CONDSTMT, convert the
947 condition to a canonical form. */
950 canonicalize_comparison (gimple condstmt
)
956 gcc_assert (gimple_code (condstmt
) == GIMPLE_COND
);
958 op0
= gimple_cond_lhs (condstmt
);
959 op1
= gimple_cond_rhs (condstmt
);
961 code
= gimple_cond_code (condstmt
);
963 /* If it would be profitable to swap the operands, then do so to
964 canonicalize the statement, enabling better optimization.
966 By placing canonicalization of such expressions here we
967 transparently keep statements in canonical form, even
968 when the statement is modified. */
969 if (tree_swap_operands_p (op0
, op1
, false))
971 /* For relationals we need to swap the operands
972 and change the code. */
978 code
= swap_tree_comparison (code
);
980 gimple_cond_set_code (condstmt
, code
);
981 gimple_cond_set_lhs (condstmt
, op1
);
982 gimple_cond_set_rhs (condstmt
, op0
);
984 update_stmt (condstmt
);
989 /* Initialize local stacks for this optimizer and record equivalences
990 upon entry to BB. Equivalences can come from the edge traversed to
991 reach BB or they may come from PHI nodes at the start of BB. */
993 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
994 LIMIT entries left in LOCALs. */
997 remove_local_expressions_from_table (void)
999 /* Remove all the expressions made available in this block. */
1000 while (avail_exprs_stack
.length () > 0)
1002 expr_hash_elt_t victim
= avail_exprs_stack
.pop ();
1003 expr_hash_elt
**slot
;
1008 /* This must precede the actual removal from the hash table,
1009 as ELEMENT and the table entry may share a call argument
1010 vector which will be freed during removal. */
1011 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1013 fprintf (dump_file
, "<<<< ");
1014 print_expr_hash_elt (dump_file
, victim
);
1017 slot
= avail_exprs
.find_slot_with_hash (victim
, victim
->hash
, NO_INSERT
);
1018 gcc_assert (slot
&& *slot
== victim
);
1019 avail_exprs
.clear_slot (slot
);
1023 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1024 CONST_AND_COPIES to its original state, stopping when we hit a
1028 restore_vars_to_original_value (void)
1030 while (const_and_copies_stack
.length () > 0)
1032 tree prev_value
, dest
;
1034 dest
= const_and_copies_stack
.pop ();
1039 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1041 fprintf (dump_file
, "<<<< COPY ");
1042 print_generic_expr (dump_file
, dest
, 0);
1043 fprintf (dump_file
, " = ");
1044 print_generic_expr (dump_file
, SSA_NAME_VALUE (dest
), 0);
1045 fprintf (dump_file
, "\n");
1048 prev_value
= const_and_copies_stack
.pop ();
1049 set_ssa_name_value (dest
, prev_value
);
1053 /* A trivial wrapper so that we can present the generic jump
1054 threading code with a simple API for simplifying statements. */
1056 simplify_stmt_for_jump_threading (gimple stmt
,
1057 gimple within_stmt ATTRIBUTE_UNUSED
)
1059 return lookup_avail_expr (stmt
, false);
1062 /* Wrapper for common code to attempt to thread an edge. For example,
1063 it handles lazily building the dummy condition and the bookkeeping
1064 when jump threading is successful. */
1067 dom_thread_across_edge (struct dom_walk_data
*walk_data
, edge e
)
1069 if (! walk_data
->global_data
)
1072 gimple_build_cond (NE_EXPR
,
1073 integer_zero_node
, integer_zero_node
,
1075 walk_data
->global_data
= dummy_cond
;
1078 thread_across_edge ((gimple
) walk_data
->global_data
, e
, false,
1079 &const_and_copies_stack
,
1080 simplify_stmt_for_jump_threading
);
1083 /* PHI nodes can create equivalences too.
1085 Ignoring any alternatives which are the same as the result, if
1086 all the alternatives are equal, then the PHI node creates an
1090 record_equivalences_from_phis (basic_block bb
)
1092 gimple_stmt_iterator gsi
;
1094 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1096 gimple phi
= gsi_stmt (gsi
);
1098 tree lhs
= gimple_phi_result (phi
);
1102 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1104 tree t
= gimple_phi_arg_def (phi
, i
);
1106 /* Ignore alternatives which are the same as our LHS. Since
1107 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1108 can simply compare pointers. */
1112 /* If we have not processed an alternative yet, then set
1113 RHS to this alternative. */
1116 /* If we have processed an alternative (stored in RHS), then
1117 see if it is equal to this one. If it isn't, then stop
1119 else if (! operand_equal_for_phi_arg_p (rhs
, t
))
1123 /* If we had no interesting alternatives, then all the RHS alternatives
1124 must have been the same as LHS. */
1128 /* If we managed to iterate through each PHI alternative without
1129 breaking out of the loop, then we have a PHI which may create
1130 a useful equivalence. We do not need to record unwind data for
1131 this, since this is a true assignment and not an equivalence
1132 inferred from a comparison. All uses of this ssa name are dominated
1133 by this assignment, so unwinding just costs time and space. */
1134 if (i
== gimple_phi_num_args (phi
) && may_propagate_copy (lhs
, rhs
))
1135 set_ssa_name_value (lhs
, rhs
);
1139 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1140 return that edge. Otherwise return NULL. */
1142 single_incoming_edge_ignoring_loop_edges (basic_block bb
)
1148 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1150 /* A loop back edge can be identified by the destination of
1151 the edge dominating the source of the edge. */
1152 if (dominated_by_p (CDI_DOMINATORS
, e
->src
, e
->dest
))
1155 /* If we have already seen a non-loop edge, then we must have
1156 multiple incoming non-loop edges and thus we return NULL. */
1160 /* This is the first non-loop incoming edge we have found. Record
1168 /* Record any equivalences created by the incoming edge to BB. If BB
1169 has more than one incoming edge, then no equivalence is created. */
1172 record_equivalences_from_incoming_edge (basic_block bb
)
1176 struct edge_info
*edge_info
;
1178 /* If our parent block ended with a control statement, then we may be
1179 able to record some equivalences based on which outgoing edge from
1180 the parent was followed. */
1181 parent
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
1183 e
= single_incoming_edge_ignoring_loop_edges (bb
);
1185 /* If we had a single incoming edge from our parent block, then enter
1186 any data associated with the edge into our tables. */
1187 if (e
&& e
->src
== parent
)
1191 edge_info
= (struct edge_info
*) e
->aux
;
1195 tree lhs
= edge_info
->lhs
;
1196 tree rhs
= edge_info
->rhs
;
1197 cond_equivalence
*eq
;
1200 record_equality (lhs
, rhs
);
1202 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1203 set via a widening type conversion, then we may be able to record
1204 additional equivalences. */
1206 && TREE_CODE (lhs
) == SSA_NAME
1207 && is_gimple_constant (rhs
)
1208 && TREE_CODE (rhs
) == INTEGER_CST
)
1210 gimple defstmt
= SSA_NAME_DEF_STMT (lhs
);
1213 && is_gimple_assign (defstmt
)
1214 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt
)))
1216 tree old_rhs
= gimple_assign_rhs1 (defstmt
);
1218 /* If the conversion widens the original value and
1219 the constant is in the range of the type of OLD_RHS,
1220 then convert the constant and record the equivalence.
1222 Note that int_fits_type_p does not check the precision
1223 if the upper and lower bounds are OK. */
1224 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs
))
1225 && (TYPE_PRECISION (TREE_TYPE (lhs
))
1226 > TYPE_PRECISION (TREE_TYPE (old_rhs
)))
1227 && int_fits_type_p (rhs
, TREE_TYPE (old_rhs
)))
1229 tree newval
= fold_convert (TREE_TYPE (old_rhs
), rhs
);
1230 record_equality (old_rhs
, newval
);
1235 for (i
= 0; edge_info
->cond_equivalences
.iterate (i
, &eq
); ++i
)
1241 /* Dump SSA statistics on FILE. */
1244 dump_dominator_optimization_stats (FILE *file
)
1246 fprintf (file
, "Total number of statements: %6ld\n\n",
1247 opt_stats
.num_stmts
);
1248 fprintf (file
, "Exprs considered for dominator optimizations: %6ld\n",
1249 opt_stats
.num_exprs_considered
);
1251 fprintf (file
, "\nHash table statistics:\n");
1253 fprintf (file
, " avail_exprs: ");
1254 htab_statistics (file
, avail_exprs
);
1258 /* Dump SSA statistics on stderr. */
1261 debug_dominator_optimization_stats (void)
1263 dump_dominator_optimization_stats (stderr
);
1267 /* Dump statistics for the hash table HTAB. */
1270 htab_statistics (FILE *file
, hash_table
<expr_elt_hasher
> htab
)
1272 fprintf (file
, "size %ld, %ld elements, %f collision/search ratio\n",
1273 (long) htab
.size (),
1274 (long) htab
.elements (),
1275 htab
.collisions ());
1279 /* Enter condition equivalence into the expression hash table.
1280 This indicates that a conditional expression has a known
1284 record_cond (cond_equivalence
*p
)
1286 struct expr_hash_elt
*element
= XCNEW (struct expr_hash_elt
);
1287 expr_hash_elt
**slot
;
1289 initialize_hash_element_from_expr (&p
->cond
, p
->value
, element
);
1291 slot
= avail_exprs
.find_slot_with_hash (element
, element
->hash
, INSERT
);
1296 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1298 fprintf (dump_file
, "1>>> ");
1299 print_expr_hash_elt (dump_file
, element
);
1302 avail_exprs_stack
.safe_push (element
);
1305 free_expr_hash_elt (element
);
1308 /* Build a cond_equivalence record indicating that the comparison
1309 CODE holds between operands OP0 and OP1 and push it to **P. */
1312 build_and_record_new_cond (enum tree_code code
,
1314 vec
<cond_equivalence
> *p
)
1317 struct hashable_expr
*cond
= &c
.cond
;
1319 gcc_assert (TREE_CODE_CLASS (code
) == tcc_comparison
);
1321 cond
->type
= boolean_type_node
;
1322 cond
->kind
= EXPR_BINARY
;
1323 cond
->ops
.binary
.op
= code
;
1324 cond
->ops
.binary
.opnd0
= op0
;
1325 cond
->ops
.binary
.opnd1
= op1
;
1327 c
.value
= boolean_true_node
;
1331 /* Record that COND is true and INVERTED is false into the edge information
1332 structure. Also record that any conditions dominated by COND are true
1335 For example, if a < b is true, then a <= b must also be true. */
1338 record_conditions (struct edge_info
*edge_info
, tree cond
, tree inverted
)
1343 if (!COMPARISON_CLASS_P (cond
))
1346 op0
= TREE_OPERAND (cond
, 0);
1347 op1
= TREE_OPERAND (cond
, 1);
1349 switch (TREE_CODE (cond
))
1353 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
1355 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1356 &edge_info
->cond_equivalences
);
1357 build_and_record_new_cond (LTGT_EXPR
, op0
, op1
,
1358 &edge_info
->cond_equivalences
);
1361 build_and_record_new_cond ((TREE_CODE (cond
) == LT_EXPR
1362 ? LE_EXPR
: GE_EXPR
),
1363 op0
, op1
, &edge_info
->cond_equivalences
);
1364 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1365 &edge_info
->cond_equivalences
);
1370 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
1372 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1373 &edge_info
->cond_equivalences
);
1378 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
1380 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1381 &edge_info
->cond_equivalences
);
1383 build_and_record_new_cond (LE_EXPR
, op0
, op1
,
1384 &edge_info
->cond_equivalences
);
1385 build_and_record_new_cond (GE_EXPR
, op0
, op1
,
1386 &edge_info
->cond_equivalences
);
1389 case UNORDERED_EXPR
:
1390 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1391 &edge_info
->cond_equivalences
);
1392 build_and_record_new_cond (UNLE_EXPR
, op0
, op1
,
1393 &edge_info
->cond_equivalences
);
1394 build_and_record_new_cond (UNGE_EXPR
, op0
, op1
,
1395 &edge_info
->cond_equivalences
);
1396 build_and_record_new_cond (UNEQ_EXPR
, op0
, op1
,
1397 &edge_info
->cond_equivalences
);
1398 build_and_record_new_cond (UNLT_EXPR
, op0
, op1
,
1399 &edge_info
->cond_equivalences
);
1400 build_and_record_new_cond (UNGT_EXPR
, op0
, op1
,
1401 &edge_info
->cond_equivalences
);
1406 build_and_record_new_cond ((TREE_CODE (cond
) == UNLT_EXPR
1407 ? UNLE_EXPR
: UNGE_EXPR
),
1408 op0
, op1
, &edge_info
->cond_equivalences
);
1409 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1410 &edge_info
->cond_equivalences
);
1414 build_and_record_new_cond (UNLE_EXPR
, op0
, op1
,
1415 &edge_info
->cond_equivalences
);
1416 build_and_record_new_cond (UNGE_EXPR
, op0
, op1
,
1417 &edge_info
->cond_equivalences
);
1421 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1422 &edge_info
->cond_equivalences
);
1423 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1424 &edge_info
->cond_equivalences
);
1431 /* Now store the original true and false conditions into the first
1433 initialize_expr_from_cond (cond
, &c
.cond
);
1434 c
.value
= boolean_true_node
;
1435 edge_info
->cond_equivalences
.safe_push (c
);
1437 /* It is possible for INVERTED to be the negation of a comparison,
1438 and not a valid RHS or GIMPLE_COND condition. This happens because
1439 invert_truthvalue may return such an expression when asked to invert
1440 a floating-point comparison. These comparisons are not assumed to
1441 obey the trichotomy law. */
1442 initialize_expr_from_cond (inverted
, &c
.cond
);
1443 c
.value
= boolean_false_node
;
1444 edge_info
->cond_equivalences
.safe_push (c
);
1447 /* A helper function for record_const_or_copy and record_equality.
1448 Do the work of recording the value and undo info. */
1451 record_const_or_copy_1 (tree x
, tree y
, tree prev_x
)
1453 set_ssa_name_value (x
, y
);
1455 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1457 fprintf (dump_file
, "0>>> COPY ");
1458 print_generic_expr (dump_file
, x
, 0);
1459 fprintf (dump_file
, " = ");
1460 print_generic_expr (dump_file
, y
, 0);
1461 fprintf (dump_file
, "\n");
1464 const_and_copies_stack
.reserve (2);
1465 const_and_copies_stack
.quick_push (prev_x
);
1466 const_and_copies_stack
.quick_push (x
);
1469 /* Return the loop depth of the basic block of the defining statement of X.
1470 This number should not be treated as absolutely correct because the loop
1471 information may not be completely up-to-date when dom runs. However, it
1472 will be relatively correct, and as more passes are taught to keep loop info
1473 up to date, the result will become more and more accurate. */
1476 loop_depth_of_name (tree x
)
1481 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1482 if (TREE_CODE (x
) != SSA_NAME
)
1485 /* Otherwise return the loop depth of the defining statement's bb.
1486 Note that there may not actually be a bb for this statement, if the
1487 ssa_name is live on entry. */
1488 defstmt
= SSA_NAME_DEF_STMT (x
);
1489 defbb
= gimple_bb (defstmt
);
1493 return bb_loop_depth (defbb
);
1496 /* Record that X is equal to Y in const_and_copies. Record undo
1497 information in the block-local vector. */
1500 record_const_or_copy (tree x
, tree y
)
1502 tree prev_x
= SSA_NAME_VALUE (x
);
1504 gcc_assert (TREE_CODE (x
) == SSA_NAME
);
1506 if (TREE_CODE (y
) == SSA_NAME
)
1508 tree tmp
= SSA_NAME_VALUE (y
);
1513 record_const_or_copy_1 (x
, y
, prev_x
);
1516 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1517 This constrains the cases in which we may treat this as assignment. */
1520 record_equality (tree x
, tree y
)
1522 tree prev_x
= NULL
, prev_y
= NULL
;
1524 if (TREE_CODE (x
) == SSA_NAME
)
1525 prev_x
= SSA_NAME_VALUE (x
);
1526 if (TREE_CODE (y
) == SSA_NAME
)
1527 prev_y
= SSA_NAME_VALUE (y
);
1529 /* If one of the previous values is invariant, or invariant in more loops
1530 (by depth), then use that.
1531 Otherwise it doesn't matter which value we choose, just so
1532 long as we canonicalize on one value. */
1533 if (is_gimple_min_invariant (y
))
1535 else if (is_gimple_min_invariant (x
)
1536 || (loop_depth_of_name (x
) <= loop_depth_of_name (y
)))
1537 prev_x
= x
, x
= y
, y
= prev_x
, prev_x
= prev_y
;
1538 else if (prev_x
&& is_gimple_min_invariant (prev_x
))
1539 x
= y
, y
= prev_x
, prev_x
= prev_y
;
1543 /* After the swapping, we must have one SSA_NAME. */
1544 if (TREE_CODE (x
) != SSA_NAME
)
1547 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1548 variable compared against zero. If we're honoring signed zeros,
1549 then we cannot record this value unless we know that the value is
1551 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x
)))
1552 && (TREE_CODE (y
) != REAL_CST
1553 || REAL_VALUES_EQUAL (dconst0
, TREE_REAL_CST (y
))))
1556 record_const_or_copy_1 (x
, y
, prev_x
);
1559 /* Returns true when STMT is a simple iv increment. It detects the
1560 following situation:
1562 i_1 = phi (..., i_2)
1563 i_2 = i_1 +/- ... */
1566 simple_iv_increment_p (gimple stmt
)
1568 enum tree_code code
;
1573 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1576 lhs
= gimple_assign_lhs (stmt
);
1577 if (TREE_CODE (lhs
) != SSA_NAME
)
1580 code
= gimple_assign_rhs_code (stmt
);
1581 if (code
!= PLUS_EXPR
1582 && code
!= MINUS_EXPR
1583 && code
!= POINTER_PLUS_EXPR
)
1586 preinc
= gimple_assign_rhs1 (stmt
);
1587 if (TREE_CODE (preinc
) != SSA_NAME
)
1590 phi
= SSA_NAME_DEF_STMT (preinc
);
1591 if (gimple_code (phi
) != GIMPLE_PHI
)
1594 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1595 if (gimple_phi_arg_def (phi
, i
) == lhs
)
1601 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1602 known value for that SSA_NAME (or NULL if no value is known).
1604 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1605 successors of BB. */
1608 cprop_into_successor_phis (basic_block bb
)
1613 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1616 gimple_stmt_iterator gsi
;
1618 /* If this is an abnormal edge, then we do not want to copy propagate
1619 into the PHI alternative associated with this edge. */
1620 if (e
->flags
& EDGE_ABNORMAL
)
1623 gsi
= gsi_start_phis (e
->dest
);
1624 if (gsi_end_p (gsi
))
1628 for ( ; !gsi_end_p (gsi
); gsi_next (&gsi
))
1631 use_operand_p orig_p
;
1633 gimple phi
= gsi_stmt (gsi
);
1635 /* The alternative may be associated with a constant, so verify
1636 it is an SSA_NAME before doing anything with it. */
1637 orig_p
= gimple_phi_arg_imm_use_ptr (phi
, indx
);
1638 orig_val
= get_use_from_ptr (orig_p
);
1639 if (TREE_CODE (orig_val
) != SSA_NAME
)
1642 /* If we have *ORIG_P in our constant/copy table, then replace
1643 ORIG_P with its value in our constant/copy table. */
1644 new_val
= SSA_NAME_VALUE (orig_val
);
1646 && new_val
!= orig_val
1647 && (TREE_CODE (new_val
) == SSA_NAME
1648 || is_gimple_min_invariant (new_val
))
1649 && may_propagate_copy (orig_val
, new_val
))
1650 propagate_value (orig_p
, new_val
);
1655 /* We have finished optimizing BB, record any information implied by
1656 taking a specific outgoing edge from BB. */
1659 record_edge_info (basic_block bb
)
1661 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
1662 struct edge_info
*edge_info
;
1664 if (! gsi_end_p (gsi
))
1666 gimple stmt
= gsi_stmt (gsi
);
1667 location_t loc
= gimple_location (stmt
);
1669 if (gimple_code (stmt
) == GIMPLE_SWITCH
)
1671 tree index
= gimple_switch_index (stmt
);
1673 if (TREE_CODE (index
) == SSA_NAME
)
1676 int n_labels
= gimple_switch_num_labels (stmt
);
1677 tree
*info
= XCNEWVEC (tree
, last_basic_block
);
1681 for (i
= 0; i
< n_labels
; i
++)
1683 tree label
= gimple_switch_label (stmt
, i
);
1684 basic_block target_bb
= label_to_block (CASE_LABEL (label
));
1685 if (CASE_HIGH (label
)
1686 || !CASE_LOW (label
)
1687 || info
[target_bb
->index
])
1688 info
[target_bb
->index
] = error_mark_node
;
1690 info
[target_bb
->index
] = label
;
1693 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1695 basic_block target_bb
= e
->dest
;
1696 tree label
= info
[target_bb
->index
];
1698 if (label
!= NULL
&& label
!= error_mark_node
)
1700 tree x
= fold_convert_loc (loc
, TREE_TYPE (index
),
1702 edge_info
= allocate_edge_info (e
);
1703 edge_info
->lhs
= index
;
1711 /* A COND_EXPR may create equivalences too. */
1712 if (gimple_code (stmt
) == GIMPLE_COND
)
1717 tree op0
= gimple_cond_lhs (stmt
);
1718 tree op1
= gimple_cond_rhs (stmt
);
1719 enum tree_code code
= gimple_cond_code (stmt
);
1721 extract_true_false_edges_from_block (bb
, &true_edge
, &false_edge
);
1723 /* Special case comparing booleans against a constant as we
1724 know the value of OP0 on both arms of the branch. i.e., we
1725 can record an equivalence for OP0 rather than COND. */
1726 if ((code
== EQ_EXPR
|| code
== NE_EXPR
)
1727 && TREE_CODE (op0
) == SSA_NAME
1728 && TREE_CODE (TREE_TYPE (op0
)) == BOOLEAN_TYPE
1729 && is_gimple_min_invariant (op1
))
1731 if (code
== EQ_EXPR
)
1733 edge_info
= allocate_edge_info (true_edge
);
1734 edge_info
->lhs
= op0
;
1735 edge_info
->rhs
= (integer_zerop (op1
)
1736 ? boolean_false_node
1737 : boolean_true_node
);
1739 edge_info
= allocate_edge_info (false_edge
);
1740 edge_info
->lhs
= op0
;
1741 edge_info
->rhs
= (integer_zerop (op1
)
1743 : boolean_false_node
);
1747 edge_info
= allocate_edge_info (true_edge
);
1748 edge_info
->lhs
= op0
;
1749 edge_info
->rhs
= (integer_zerop (op1
)
1751 : boolean_false_node
);
1753 edge_info
= allocate_edge_info (false_edge
);
1754 edge_info
->lhs
= op0
;
1755 edge_info
->rhs
= (integer_zerop (op1
)
1756 ? boolean_false_node
1757 : boolean_true_node
);
1760 else if (is_gimple_min_invariant (op0
)
1761 && (TREE_CODE (op1
) == SSA_NAME
1762 || is_gimple_min_invariant (op1
)))
1764 tree cond
= build2 (code
, boolean_type_node
, op0
, op1
);
1765 tree inverted
= invert_truthvalue_loc (loc
, cond
);
1766 bool can_infer_simple_equiv
1767 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0
)))
1768 && real_zerop (op0
));
1769 struct edge_info
*edge_info
;
1771 edge_info
= allocate_edge_info (true_edge
);
1772 record_conditions (edge_info
, cond
, inverted
);
1774 if (can_infer_simple_equiv
&& code
== EQ_EXPR
)
1776 edge_info
->lhs
= op1
;
1777 edge_info
->rhs
= op0
;
1780 edge_info
= allocate_edge_info (false_edge
);
1781 record_conditions (edge_info
, inverted
, cond
);
1783 if (can_infer_simple_equiv
&& TREE_CODE (inverted
) == EQ_EXPR
)
1785 edge_info
->lhs
= op1
;
1786 edge_info
->rhs
= op0
;
1790 else if (TREE_CODE (op0
) == SSA_NAME
1791 && (TREE_CODE (op1
) == SSA_NAME
1792 || is_gimple_min_invariant (op1
)))
1794 tree cond
= build2 (code
, boolean_type_node
, op0
, op1
);
1795 tree inverted
= invert_truthvalue_loc (loc
, cond
);
1796 bool can_infer_simple_equiv
1797 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1
)))
1798 && (TREE_CODE (op1
) == SSA_NAME
|| real_zerop (op1
)));
1799 struct edge_info
*edge_info
;
1801 edge_info
= allocate_edge_info (true_edge
);
1802 record_conditions (edge_info
, cond
, inverted
);
1804 if (can_infer_simple_equiv
&& code
== EQ_EXPR
)
1806 edge_info
->lhs
= op0
;
1807 edge_info
->rhs
= op1
;
1810 edge_info
= allocate_edge_info (false_edge
);
1811 record_conditions (edge_info
, inverted
, cond
);
1813 if (can_infer_simple_equiv
&& TREE_CODE (inverted
) == EQ_EXPR
)
1815 edge_info
->lhs
= op0
;
1816 edge_info
->rhs
= op1
;
1821 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1826 dom_opt_enter_block (struct dom_walk_data
*walk_data ATTRIBUTE_UNUSED
,
1829 gimple_stmt_iterator gsi
;
1831 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1832 fprintf (dump_file
, "\n\nOptimizing block #%d\n\n", bb
->index
);
1834 /* Push a marker on the stacks of local information so that we know how
1835 far to unwind when we finalize this block. */
1836 avail_exprs_stack
.safe_push (NULL
);
1837 const_and_copies_stack
.safe_push (NULL_TREE
);
1839 record_equivalences_from_incoming_edge (bb
);
1841 /* PHI nodes can create equivalences too. */
1842 record_equivalences_from_phis (bb
);
1844 /* Create equivalences from redundant PHIs. PHIs are only truly
1845 redundant when they exist in the same block, so push another
1846 marker and unwind right afterwards. */
1847 avail_exprs_stack
.safe_push (NULL
);
1848 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1849 eliminate_redundant_computations (&gsi
);
1850 remove_local_expressions_from_table ();
1852 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1853 optimize_stmt (bb
, gsi
);
1855 /* Now prepare to process dominated blocks. */
1856 record_edge_info (bb
);
1857 cprop_into_successor_phis (bb
);
1860 /* We have finished processing the dominator children of BB, perform
1861 any finalization actions in preparation for leaving this node in
1862 the dominator tree. */
1865 dom_opt_leave_block (struct dom_walk_data
*walk_data
, basic_block bb
)
1869 /* If we have an outgoing edge to a block with multiple incoming and
1870 outgoing edges, then we may be able to thread the edge, i.e., we
1871 may be able to statically determine which of the outgoing edges
1872 will be traversed when the incoming edge from BB is traversed. */
1873 if (single_succ_p (bb
)
1874 && (single_succ_edge (bb
)->flags
& EDGE_ABNORMAL
) == 0
1875 && potentially_threadable_block (single_succ (bb
)))
1877 /* Push a marker on the stack, which thread_across_edge expects
1879 const_and_copies_stack
.safe_push (NULL_TREE
);
1880 dom_thread_across_edge (walk_data
, single_succ_edge (bb
));
1882 else if ((last
= last_stmt (bb
))
1883 && gimple_code (last
) == GIMPLE_COND
1884 && EDGE_COUNT (bb
->succs
) == 2
1885 && (EDGE_SUCC (bb
, 0)->flags
& EDGE_ABNORMAL
) == 0
1886 && (EDGE_SUCC (bb
, 1)->flags
& EDGE_ABNORMAL
) == 0)
1888 edge true_edge
, false_edge
;
1890 extract_true_false_edges_from_block (bb
, &true_edge
, &false_edge
);
1892 /* Only try to thread the edge if it reaches a target block with
1893 more than one predecessor and more than one successor. */
1894 if (potentially_threadable_block (true_edge
->dest
))
1896 struct edge_info
*edge_info
;
1899 /* Push a marker onto the available expression stack so that we
1900 unwind any expressions related to the TRUE arm before processing
1901 the false arm below. */
1902 avail_exprs_stack
.safe_push (NULL
);
1903 const_and_copies_stack
.safe_push (NULL_TREE
);
1905 edge_info
= (struct edge_info
*) true_edge
->aux
;
1907 /* If we have info associated with this edge, record it into
1908 our equivalence tables. */
1911 cond_equivalence
*eq
;
1912 tree lhs
= edge_info
->lhs
;
1913 tree rhs
= edge_info
->rhs
;
1915 /* If we have a simple NAME = VALUE equivalence, record it. */
1916 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
1917 record_const_or_copy (lhs
, rhs
);
1919 /* If we have 0 = COND or 1 = COND equivalences, record them
1920 into our expression hash tables. */
1921 for (i
= 0; edge_info
->cond_equivalences
.iterate (i
, &eq
); ++i
)
1925 dom_thread_across_edge (walk_data
, true_edge
);
1927 /* And restore the various tables to their state before
1928 we threaded this edge. */
1929 remove_local_expressions_from_table ();
1932 /* Similarly for the ELSE arm. */
1933 if (potentially_threadable_block (false_edge
->dest
))
1935 struct edge_info
*edge_info
;
1938 const_and_copies_stack
.safe_push (NULL_TREE
);
1939 edge_info
= (struct edge_info
*) false_edge
->aux
;
1941 /* If we have info associated with this edge, record it into
1942 our equivalence tables. */
1945 cond_equivalence
*eq
;
1946 tree lhs
= edge_info
->lhs
;
1947 tree rhs
= edge_info
->rhs
;
1949 /* If we have a simple NAME = VALUE equivalence, record it. */
1950 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
1951 record_const_or_copy (lhs
, rhs
);
1953 /* If we have 0 = COND or 1 = COND equivalences, record them
1954 into our expression hash tables. */
1955 for (i
= 0; edge_info
->cond_equivalences
.iterate (i
, &eq
); ++i
)
1959 /* Now thread the edge. */
1960 dom_thread_across_edge (walk_data
, false_edge
);
1962 /* No need to remove local expressions from our tables
1963 or restore vars to their original value as that will
1964 be done immediately below. */
1968 remove_local_expressions_from_table ();
1969 restore_vars_to_original_value ();
1972 /* Search for redundant computations in STMT. If any are found, then
1973 replace them with the variable holding the result of the computation.
1975 If safe, record this expression into the available expression hash
1979 eliminate_redundant_computations (gimple_stmt_iterator
* gsi
)
1985 bool assigns_var_p
= false;
1987 gimple stmt
= gsi_stmt (*gsi
);
1989 if (gimple_code (stmt
) == GIMPLE_PHI
)
1990 def
= gimple_phi_result (stmt
);
1992 def
= gimple_get_lhs (stmt
);
1994 /* Certain expressions on the RHS can be optimized away, but can not
1995 themselves be entered into the hash tables. */
1997 || TREE_CODE (def
) != SSA_NAME
1998 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def
)
1999 || gimple_vdef (stmt
)
2000 /* Do not record equivalences for increments of ivs. This would create
2001 overlapping live ranges for a very questionable gain. */
2002 || simple_iv_increment_p (stmt
))
2005 /* Check if the expression has been computed before. */
2006 cached_lhs
= lookup_avail_expr (stmt
, insert
);
2008 opt_stats
.num_exprs_considered
++;
2010 /* Get the type of the expression we are trying to optimize. */
2011 if (is_gimple_assign (stmt
))
2013 expr_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2014 assigns_var_p
= true;
2016 else if (gimple_code (stmt
) == GIMPLE_COND
)
2017 expr_type
= boolean_type_node
;
2018 else if (is_gimple_call (stmt
))
2020 gcc_assert (gimple_call_lhs (stmt
));
2021 expr_type
= TREE_TYPE (gimple_call_lhs (stmt
));
2022 assigns_var_p
= true;
2024 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
2025 expr_type
= TREE_TYPE (gimple_switch_index (stmt
));
2026 else if (gimple_code (stmt
) == GIMPLE_PHI
)
2027 /* We can't propagate into a phi, so the logic below doesn't apply.
2028 Instead record an equivalence between the cached LHS and the
2029 PHI result of this statement, provided they are in the same block.
2030 This should be sufficient to kill the redundant phi. */
2032 if (def
&& cached_lhs
)
2033 record_const_or_copy (def
, cached_lhs
);
2042 /* It is safe to ignore types here since we have already done
2043 type checking in the hashing and equality routines. In fact
2044 type checking here merely gets in the way of constant
2045 propagation. Also, make sure that it is safe to propagate
2046 CACHED_LHS into the expression in STMT. */
2047 if ((TREE_CODE (cached_lhs
) != SSA_NAME
2049 || useless_type_conversion_p (expr_type
, TREE_TYPE (cached_lhs
))))
2050 || may_propagate_copy_into_stmt (stmt
, cached_lhs
))
2052 gcc_checking_assert (TREE_CODE (cached_lhs
) == SSA_NAME
2053 || is_gimple_min_invariant (cached_lhs
));
2055 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2057 fprintf (dump_file
, " Replaced redundant expr '");
2058 print_gimple_expr (dump_file
, stmt
, 0, dump_flags
);
2059 fprintf (dump_file
, "' with '");
2060 print_generic_expr (dump_file
, cached_lhs
, dump_flags
);
2061 fprintf (dump_file
, "'\n");
2067 && !useless_type_conversion_p (expr_type
, TREE_TYPE (cached_lhs
)))
2068 cached_lhs
= fold_convert (expr_type
, cached_lhs
);
2070 propagate_tree_value_into_stmt (gsi
, cached_lhs
);
2072 /* Since it is always necessary to mark the result as modified,
2073 perhaps we should move this into propagate_tree_value_into_stmt
2075 gimple_set_modified (gsi_stmt (*gsi
), true);
2079 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2080 the available expressions table or the const_and_copies table.
2081 Detect and record those equivalences. */
2082 /* We handle only very simple copy equivalences here. The heavy
2083 lifing is done by eliminate_redundant_computations. */
2086 record_equivalences_from_stmt (gimple stmt
, int may_optimize_p
)
2089 enum tree_code lhs_code
;
2091 gcc_assert (is_gimple_assign (stmt
));
2093 lhs
= gimple_assign_lhs (stmt
);
2094 lhs_code
= TREE_CODE (lhs
);
2096 if (lhs_code
== SSA_NAME
2097 && gimple_assign_single_p (stmt
))
2099 tree rhs
= gimple_assign_rhs1 (stmt
);
2101 /* If the RHS of the assignment is a constant or another variable that
2102 may be propagated, register it in the CONST_AND_COPIES table. We
2103 do not need to record unwind data for this, since this is a true
2104 assignment and not an equivalence inferred from a comparison. All
2105 uses of this ssa name are dominated by this assignment, so unwinding
2106 just costs time and space. */
2108 && (TREE_CODE (rhs
) == SSA_NAME
2109 || is_gimple_min_invariant (rhs
)))
2111 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2113 fprintf (dump_file
, "==== ASGN ");
2114 print_generic_expr (dump_file
, lhs
, 0);
2115 fprintf (dump_file
, " = ");
2116 print_generic_expr (dump_file
, rhs
, 0);
2117 fprintf (dump_file
, "\n");
2120 set_ssa_name_value (lhs
, rhs
);
2124 /* A memory store, even an aliased store, creates a useful
2125 equivalence. By exchanging the LHS and RHS, creating suitable
2126 vops and recording the result in the available expression table,
2127 we may be able to expose more redundant loads. */
2128 if (!gimple_has_volatile_ops (stmt
)
2129 && gimple_references_memory_p (stmt
)
2130 && gimple_assign_single_p (stmt
)
2131 && (TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
2132 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt
)))
2133 && !is_gimple_reg (lhs
))
2135 tree rhs
= gimple_assign_rhs1 (stmt
);
2138 /* Build a new statement with the RHS and LHS exchanged. */
2139 if (TREE_CODE (rhs
) == SSA_NAME
)
2141 /* NOTE tuples. The call to gimple_build_assign below replaced
2142 a call to build_gimple_modify_stmt, which did not set the
2143 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2144 may cause an SSA validation failure, as the LHS may be a
2145 default-initialized name and should have no definition. I'm
2146 a bit dubious of this, as the artificial statement that we
2147 generate here may in fact be ill-formed, but it is simply
2148 used as an internal device in this pass, and never becomes
2150 gimple defstmt
= SSA_NAME_DEF_STMT (rhs
);
2151 new_stmt
= gimple_build_assign (rhs
, lhs
);
2152 SSA_NAME_DEF_STMT (rhs
) = defstmt
;
2155 new_stmt
= gimple_build_assign (rhs
, lhs
);
2157 gimple_set_vuse (new_stmt
, gimple_vdef (stmt
));
2159 /* Finally enter the statement into the available expression
2161 lookup_avail_expr (new_stmt
, true);
2165 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2166 CONST_AND_COPIES. */
2169 cprop_operand (gimple stmt
, use_operand_p op_p
)
2172 tree op
= USE_FROM_PTR (op_p
);
2174 /* If the operand has a known constant value or it is known to be a
2175 copy of some other variable, use the value or copy stored in
2176 CONST_AND_COPIES. */
2177 val
= SSA_NAME_VALUE (op
);
2178 if (val
&& val
!= op
)
2180 /* Do not replace hard register operands in asm statements. */
2181 if (gimple_code (stmt
) == GIMPLE_ASM
2182 && !may_propagate_copy_into_asm (op
))
2185 /* Certain operands are not allowed to be copy propagated due
2186 to their interaction with exception handling and some GCC
2188 if (!may_propagate_copy (op
, val
))
2191 /* Do not propagate addresses that point to volatiles into memory
2192 stmts without volatile operands. */
2193 if (POINTER_TYPE_P (TREE_TYPE (val
))
2194 && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val
)))
2195 && gimple_has_mem_ops (stmt
)
2196 && !gimple_has_volatile_ops (stmt
))
2199 /* Do not propagate copies if the propagated value is at a deeper loop
2200 depth than the propagatee. Otherwise, this may move loop variant
2201 variables outside of their loops and prevent coalescing
2202 opportunities. If the value was loop invariant, it will be hoisted
2203 by LICM and exposed for copy propagation. */
2204 if (loop_depth_of_name (val
) > loop_depth_of_name (op
))
2207 /* Do not propagate copies into simple IV increment statements.
2208 See PR23821 for how this can disturb IV analysis. */
2209 if (TREE_CODE (val
) != INTEGER_CST
2210 && simple_iv_increment_p (stmt
))
2214 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2216 fprintf (dump_file
, " Replaced '");
2217 print_generic_expr (dump_file
, op
, dump_flags
);
2218 fprintf (dump_file
, "' with %s '",
2219 (TREE_CODE (val
) != SSA_NAME
? "constant" : "variable"));
2220 print_generic_expr (dump_file
, val
, dump_flags
);
2221 fprintf (dump_file
, "'\n");
2224 if (TREE_CODE (val
) != SSA_NAME
)
2225 opt_stats
.num_const_prop
++;
2227 opt_stats
.num_copy_prop
++;
2229 propagate_value (op_p
, val
);
2231 /* And note that we modified this statement. This is now
2232 safe, even if we changed virtual operands since we will
2233 rescan the statement and rewrite its operands again. */
2234 gimple_set_modified (stmt
, true);
2238 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2239 known value for that SSA_NAME (or NULL if no value is known).
2241 Propagate values from CONST_AND_COPIES into the uses, vuses and
2242 vdef_ops of STMT. */
2245 cprop_into_stmt (gimple stmt
)
2250 FOR_EACH_SSA_USE_OPERAND (op_p
, stmt
, iter
, SSA_OP_USE
)
2251 cprop_operand (stmt
, op_p
);
2254 /* Optimize the statement pointed to by iterator SI.
2256 We try to perform some simplistic global redundancy elimination and
2257 constant propagation:
2259 1- To detect global redundancy, we keep track of expressions that have
2260 been computed in this block and its dominators. If we find that the
2261 same expression is computed more than once, we eliminate repeated
2262 computations by using the target of the first one.
2264 2- Constant values and copy assignments. This is used to do very
2265 simplistic constant and copy propagation. When a constant or copy
2266 assignment is found, we map the value on the RHS of the assignment to
2267 the variable in the LHS in the CONST_AND_COPIES table. */
2270 optimize_stmt (basic_block bb
, gimple_stmt_iterator si
)
2272 gimple stmt
, old_stmt
;
2273 bool may_optimize_p
;
2274 bool modified_p
= false;
2276 old_stmt
= stmt
= gsi_stmt (si
);
2278 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2280 fprintf (dump_file
, "Optimizing statement ");
2281 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
2284 if (gimple_code (stmt
) == GIMPLE_COND
)
2285 canonicalize_comparison (stmt
);
2287 update_stmt_if_modified (stmt
);
2288 opt_stats
.num_stmts
++;
2290 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2291 cprop_into_stmt (stmt
);
2293 /* If the statement has been modified with constant replacements,
2294 fold its RHS before checking for redundant computations. */
2295 if (gimple_modified_p (stmt
))
2299 /* Try to fold the statement making sure that STMT is kept
2301 if (fold_stmt (&si
))
2303 stmt
= gsi_stmt (si
);
2304 gimple_set_modified (stmt
, true);
2306 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2308 fprintf (dump_file
, " Folded to: ");
2309 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
2313 /* We only need to consider cases that can yield a gimple operand. */
2314 if (gimple_assign_single_p (stmt
))
2315 rhs
= gimple_assign_rhs1 (stmt
);
2316 else if (gimple_code (stmt
) == GIMPLE_GOTO
)
2317 rhs
= gimple_goto_dest (stmt
);
2318 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
2319 /* This should never be an ADDR_EXPR. */
2320 rhs
= gimple_switch_index (stmt
);
2322 if (rhs
&& TREE_CODE (rhs
) == ADDR_EXPR
)
2323 recompute_tree_invariant_for_addr_expr (rhs
);
2325 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2326 even if fold_stmt updated the stmt already and thus cleared
2327 gimple_modified_p flag on it. */
2331 /* Check for redundant computations. Do this optimization only
2332 for assignments that have no volatile ops and conditionals. */
2333 may_optimize_p
= (!gimple_has_side_effects (stmt
)
2334 && (is_gimple_assign (stmt
)
2335 || (is_gimple_call (stmt
)
2336 && gimple_call_lhs (stmt
) != NULL_TREE
)
2337 || gimple_code (stmt
) == GIMPLE_COND
2338 || gimple_code (stmt
) == GIMPLE_SWITCH
));
2342 if (gimple_code (stmt
) == GIMPLE_CALL
)
2344 /* Resolve __builtin_constant_p. If it hasn't been
2345 folded to integer_one_node by now, it's fairly
2346 certain that the value simply isn't constant. */
2347 tree callee
= gimple_call_fndecl (stmt
);
2349 && DECL_BUILT_IN_CLASS (callee
) == BUILT_IN_NORMAL
2350 && DECL_FUNCTION_CODE (callee
) == BUILT_IN_CONSTANT_P
)
2352 propagate_tree_value_into_stmt (&si
, integer_zero_node
);
2353 stmt
= gsi_stmt (si
);
2357 update_stmt_if_modified (stmt
);
2358 eliminate_redundant_computations (&si
);
2359 stmt
= gsi_stmt (si
);
2361 /* Perform simple redundant store elimination. */
2362 if (gimple_assign_single_p (stmt
)
2363 && TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
2365 tree lhs
= gimple_assign_lhs (stmt
);
2366 tree rhs
= gimple_assign_rhs1 (stmt
);
2369 if (TREE_CODE (rhs
) == SSA_NAME
)
2371 tree tem
= SSA_NAME_VALUE (rhs
);
2375 /* Build a new statement with the RHS and LHS exchanged. */
2376 if (TREE_CODE (rhs
) == SSA_NAME
)
2378 gimple defstmt
= SSA_NAME_DEF_STMT (rhs
);
2379 new_stmt
= gimple_build_assign (rhs
, lhs
);
2380 SSA_NAME_DEF_STMT (rhs
) = defstmt
;
2383 new_stmt
= gimple_build_assign (rhs
, lhs
);
2384 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
2385 cached_lhs
= lookup_avail_expr (new_stmt
, false);
2387 && rhs
== cached_lhs
)
2389 basic_block bb
= gimple_bb (stmt
);
2390 unlink_stmt_vdef (stmt
);
2391 if (gsi_remove (&si
, true))
2393 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
2394 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2395 fprintf (dump_file
, " Flagged to clear EH edges.\n");
2397 release_defs (stmt
);
2403 /* Record any additional equivalences created by this statement. */
2404 if (is_gimple_assign (stmt
))
2405 record_equivalences_from_stmt (stmt
, may_optimize_p
);
2407 /* If STMT is a COND_EXPR and it was modified, then we may know
2408 where it goes. If that is the case, then mark the CFG as altered.
2410 This will cause us to later call remove_unreachable_blocks and
2411 cleanup_tree_cfg when it is safe to do so. It is not safe to
2412 clean things up here since removal of edges and such can trigger
2413 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2416 That's all fine and good, except that once SSA_NAMEs are released
2417 to the manager, we must not call create_ssa_name until all references
2418 to released SSA_NAMEs have been eliminated.
2420 All references to the deleted SSA_NAMEs can not be eliminated until
2421 we remove unreachable blocks.
2423 We can not remove unreachable blocks until after we have completed
2424 any queued jump threading.
2426 We can not complete any queued jump threads until we have taken
2427 appropriate variables out of SSA form. Taking variables out of
2428 SSA form can call create_ssa_name and thus we lose.
2430 Ultimately I suspect we're going to need to change the interface
2431 into the SSA_NAME manager. */
2432 if (gimple_modified_p (stmt
) || modified_p
)
2436 update_stmt_if_modified (stmt
);
2438 if (gimple_code (stmt
) == GIMPLE_COND
)
2439 val
= fold_binary_loc (gimple_location (stmt
),
2440 gimple_cond_code (stmt
), boolean_type_node
,
2441 gimple_cond_lhs (stmt
), gimple_cond_rhs (stmt
));
2442 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
2443 val
= gimple_switch_index (stmt
);
2445 if (val
&& TREE_CODE (val
) == INTEGER_CST
&& find_taken_edge (bb
, val
))
2448 /* If we simplified a statement in such a way as to be shown that it
2449 cannot trap, update the eh information and the cfg to match. */
2450 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
))
2452 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
2453 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2454 fprintf (dump_file
, " Flagged to clear EH edges.\n");
2459 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2460 If found, return its LHS. Otherwise insert STMT in the table and
2463 Also, when an expression is first inserted in the table, it is also
2464 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2465 we finish processing this block and its children. */
2468 lookup_avail_expr (gimple stmt
, bool insert
)
2470 expr_hash_elt
**slot
;
2473 struct expr_hash_elt element
;
2475 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2476 if (gimple_code (stmt
) == GIMPLE_PHI
)
2477 lhs
= gimple_phi_result (stmt
);
2479 lhs
= gimple_get_lhs (stmt
);
2481 initialize_hash_element (stmt
, lhs
, &element
);
2483 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2485 fprintf (dump_file
, "LKUP ");
2486 print_expr_hash_elt (dump_file
, &element
);
2489 /* Don't bother remembering constant assignments and copy operations.
2490 Constants and copy operations are handled by the constant/copy propagator
2491 in optimize_stmt. */
2492 if (element
.expr
.kind
== EXPR_SINGLE
2493 && (TREE_CODE (element
.expr
.ops
.single
.rhs
) == SSA_NAME
2494 || is_gimple_min_invariant (element
.expr
.ops
.single
.rhs
)))
2497 /* Finally try to find the expression in the main expression hash table. */
2498 slot
= avail_exprs
.find_slot_with_hash (&element
, element
.hash
,
2499 (insert
? INSERT
: NO_INSERT
));
2502 free_expr_hash_elt_contents (&element
);
2505 else if (*slot
== NULL
)
2507 struct expr_hash_elt
*element2
= XNEW (struct expr_hash_elt
);
2508 *element2
= element
;
2509 element2
->stamp
= element2
;
2512 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2514 fprintf (dump_file
, "2>>> ");
2515 print_expr_hash_elt (dump_file
, element2
);
2518 avail_exprs_stack
.safe_push (element2
);
2522 free_expr_hash_elt_contents (&element
);
2524 /* Extract the LHS of the assignment so that it can be used as the current
2525 definition of another variable. */
2526 lhs
= ((struct expr_hash_elt
*)*slot
)->lhs
;
2528 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2529 use the value from the const_and_copies table. */
2530 if (TREE_CODE (lhs
) == SSA_NAME
)
2532 temp
= SSA_NAME_VALUE (lhs
);
2537 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2539 fprintf (dump_file
, "FIND: ");
2540 print_generic_expr (dump_file
, lhs
, 0);
2541 fprintf (dump_file
, "\n");
2547 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2548 for expressions using the code of the expression and the SSA numbers of
2552 avail_expr_hash (const void *p
)
2554 gimple stmt
= ((const struct expr_hash_elt
*)p
)->stmt
;
2555 const struct hashable_expr
*expr
= &((const struct expr_hash_elt
*)p
)->expr
;
2559 val
= iterative_hash_hashable_expr (expr
, val
);
2561 /* If the hash table entry is not associated with a statement, then we
2562 can just hash the expression and not worry about virtual operands
2567 /* Add the SSA version numbers of the vuse operand. This is important
2568 because compound variables like arrays are not renamed in the
2569 operands. Rather, the rename is done on the virtual variable
2570 representing all the elements of the array. */
2571 if ((vuse
= gimple_vuse (stmt
)))
2572 val
= iterative_hash_expr (vuse
, val
);
2577 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2578 up degenerate PHIs created by or exposed by jump threading. */
2580 /* Given PHI, return its RHS if the PHI is a degenerate, otherwise return
2584 degenerate_phi_result (gimple phi
)
2586 tree lhs
= gimple_phi_result (phi
);
2590 /* Ignoring arguments which are the same as LHS, if all the remaining
2591 arguments are the same, then the PHI is a degenerate and has the
2592 value of that common argument. */
2593 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
2595 tree arg
= gimple_phi_arg_def (phi
, i
);
2603 else if (arg
== val
)
2605 /* We bring in some of operand_equal_p not only to speed things
2606 up, but also to avoid crashing when dereferencing the type of
2607 a released SSA name. */
2608 else if (TREE_CODE (val
) != TREE_CODE (arg
)
2609 || TREE_CODE (val
) == SSA_NAME
2610 || !operand_equal_p (arg
, val
, 0))
2613 return (i
== gimple_phi_num_args (phi
) ? val
: NULL
);
2616 /* Given a statement STMT, which is either a PHI node or an assignment,
2617 remove it from the IL. */
2620 remove_stmt_or_phi (gimple stmt
)
2622 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
2624 if (gimple_code (stmt
) == GIMPLE_PHI
)
2625 remove_phi_node (&gsi
, true);
2628 gsi_remove (&gsi
, true);
2629 release_defs (stmt
);
2633 /* Given a statement STMT, which is either a PHI node or an assignment,
2634 return the "rhs" of the node, in the case of a non-degenerate
2635 phi, NULL is returned. */
2638 get_rhs_or_phi_arg (gimple stmt
)
2640 if (gimple_code (stmt
) == GIMPLE_PHI
)
2641 return degenerate_phi_result (stmt
);
2642 else if (gimple_assign_single_p (stmt
))
2643 return gimple_assign_rhs1 (stmt
);
2649 /* Given a statement STMT, which is either a PHI node or an assignment,
2650 return the "lhs" of the node. */
2653 get_lhs_or_phi_result (gimple stmt
)
2655 if (gimple_code (stmt
) == GIMPLE_PHI
)
2656 return gimple_phi_result (stmt
);
2657 else if (is_gimple_assign (stmt
))
2658 return gimple_assign_lhs (stmt
);
2663 /* Propagate RHS into all uses of LHS (when possible).
2665 RHS and LHS are derived from STMT, which is passed in solely so
2666 that we can remove it if propagation is successful.
2668 When propagating into a PHI node or into a statement which turns
2669 into a trivial copy or constant initialization, set the
2670 appropriate bit in INTERESTING_NAMEs so that we will visit those
2671 nodes as well in an effort to pick up secondary optimization
2675 propagate_rhs_into_lhs (gimple stmt
, tree lhs
, tree rhs
, bitmap interesting_names
)
2677 /* First verify that propagation is valid and isn't going to move a
2678 loop variant variable outside its loop. */
2679 if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
)
2680 && (TREE_CODE (rhs
) != SSA_NAME
2681 || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs
))
2682 && may_propagate_copy (lhs
, rhs
)
2683 && loop_depth_of_name (lhs
) >= loop_depth_of_name (rhs
))
2685 use_operand_p use_p
;
2686 imm_use_iterator iter
;
2691 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2693 fprintf (dump_file
, " Replacing '");
2694 print_generic_expr (dump_file
, lhs
, dump_flags
);
2695 fprintf (dump_file
, "' with %s '",
2696 (TREE_CODE (rhs
) != SSA_NAME
? "constant" : "variable"));
2697 print_generic_expr (dump_file
, rhs
, dump_flags
);
2698 fprintf (dump_file
, "'\n");
2701 /* Walk over every use of LHS and try to replace the use with RHS.
2702 At this point the only reason why such a propagation would not
2703 be successful would be if the use occurs in an ASM_EXPR. */
2704 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2706 /* Leave debug stmts alone. If we succeed in propagating
2707 all non-debug uses, we'll drop the DEF, and propagation
2708 into debug stmts will occur then. */
2709 if (gimple_debug_bind_p (use_stmt
))
2712 /* It's not always safe to propagate into an ASM_EXPR. */
2713 if (gimple_code (use_stmt
) == GIMPLE_ASM
2714 && ! may_propagate_copy_into_asm (lhs
))
2720 /* It's not ok to propagate into the definition stmt of RHS.
2722 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2723 g_67.1_6 = prephitmp.12_36;
2725 While this is strictly all dead code we do not want to
2726 deal with this here. */
2727 if (TREE_CODE (rhs
) == SSA_NAME
2728 && SSA_NAME_DEF_STMT (rhs
) == use_stmt
)
2735 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2737 fprintf (dump_file
, " Original statement:");
2738 print_gimple_stmt (dump_file
, use_stmt
, 0, dump_flags
);
2741 /* Propagate the RHS into this use of the LHS. */
2742 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2743 propagate_value (use_p
, rhs
);
2745 /* Special cases to avoid useless calls into the folding
2746 routines, operand scanning, etc.
2748 Propagation into a PHI may cause the PHI to become
2749 a degenerate, so mark the PHI as interesting. No other
2750 actions are necessary. */
2751 if (gimple_code (use_stmt
) == GIMPLE_PHI
)
2756 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2758 fprintf (dump_file
, " Updated statement:");
2759 print_gimple_stmt (dump_file
, use_stmt
, 0, dump_flags
);
2762 result
= get_lhs_or_phi_result (use_stmt
);
2763 bitmap_set_bit (interesting_names
, SSA_NAME_VERSION (result
));
2767 /* From this point onward we are propagating into a
2768 real statement. Folding may (or may not) be possible,
2769 we may expose new operands, expose dead EH edges,
2771 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2772 cannot fold a call that simplifies to a constant,
2773 because the GIMPLE_CALL must be replaced by a
2774 GIMPLE_ASSIGN, and there is no way to effect such a
2775 transformation in-place. We might want to consider
2776 using the more general fold_stmt here. */
2778 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
2779 fold_stmt_inplace (&gsi
);
2782 /* Sometimes propagation can expose new operands to the
2784 update_stmt (use_stmt
);
2787 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2789 fprintf (dump_file
, " Updated statement:");
2790 print_gimple_stmt (dump_file
, use_stmt
, 0, dump_flags
);
2793 /* If we replaced a variable index with a constant, then
2794 we would need to update the invariant flag for ADDR_EXPRs. */
2795 if (gimple_assign_single_p (use_stmt
)
2796 && TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == ADDR_EXPR
)
2797 recompute_tree_invariant_for_addr_expr
2798 (gimple_assign_rhs1 (use_stmt
));
2800 /* If we cleaned up EH information from the statement,
2801 mark its containing block as needing EH cleanups. */
2802 if (maybe_clean_or_replace_eh_stmt (use_stmt
, use_stmt
))
2804 bitmap_set_bit (need_eh_cleanup
, gimple_bb (use_stmt
)->index
);
2805 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2806 fprintf (dump_file
, " Flagged to clear EH edges.\n");
2809 /* Propagation may expose new trivial copy/constant propagation
2811 if (gimple_assign_single_p (use_stmt
)
2812 && TREE_CODE (gimple_assign_lhs (use_stmt
)) == SSA_NAME
2813 && (TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == SSA_NAME
2814 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt
))))
2816 tree result
= get_lhs_or_phi_result (use_stmt
);
2817 bitmap_set_bit (interesting_names
, SSA_NAME_VERSION (result
));
2820 /* Propagation into these nodes may make certain edges in
2821 the CFG unexecutable. We want to identify them as PHI nodes
2822 at the destination of those unexecutable edges may become
2824 else if (gimple_code (use_stmt
) == GIMPLE_COND
2825 || gimple_code (use_stmt
) == GIMPLE_SWITCH
2826 || gimple_code (use_stmt
) == GIMPLE_GOTO
)
2830 if (gimple_code (use_stmt
) == GIMPLE_COND
)
2831 val
= fold_binary_loc (gimple_location (use_stmt
),
2832 gimple_cond_code (use_stmt
),
2834 gimple_cond_lhs (use_stmt
),
2835 gimple_cond_rhs (use_stmt
));
2836 else if (gimple_code (use_stmt
) == GIMPLE_SWITCH
)
2837 val
= gimple_switch_index (use_stmt
);
2839 val
= gimple_goto_dest (use_stmt
);
2841 if (val
&& is_gimple_min_invariant (val
))
2843 basic_block bb
= gimple_bb (use_stmt
);
2844 edge te
= find_taken_edge (bb
, val
);
2847 gimple_stmt_iterator gsi
, psi
;
2849 /* Remove all outgoing edges except TE. */
2850 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
));)
2854 /* Mark all the PHI nodes at the destination of
2855 the unexecutable edge as interesting. */
2856 for (psi
= gsi_start_phis (e
->dest
);
2860 gimple phi
= gsi_stmt (psi
);
2862 tree result
= gimple_phi_result (phi
);
2863 int version
= SSA_NAME_VERSION (result
);
2865 bitmap_set_bit (interesting_names
, version
);
2868 te
->probability
+= e
->probability
;
2870 te
->count
+= e
->count
;
2878 gsi
= gsi_last_bb (gimple_bb (use_stmt
));
2879 gsi_remove (&gsi
, true);
2881 /* And fixup the flags on the single remaining edge. */
2882 te
->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
);
2883 te
->flags
&= ~EDGE_ABNORMAL
;
2884 te
->flags
|= EDGE_FALLTHRU
;
2885 if (te
->probability
> REG_BR_PROB_BASE
)
2886 te
->probability
= REG_BR_PROB_BASE
;
2891 /* Ensure there is nothing else to do. */
2892 gcc_assert (!all
|| has_zero_uses (lhs
));
2894 /* If we were able to propagate away all uses of LHS, then
2895 we can remove STMT. */
2897 remove_stmt_or_phi (stmt
);
2901 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2902 a statement that is a trivial copy or constant initialization.
2904 Attempt to eliminate T by propagating its RHS into all uses of
2905 its LHS. This may in turn set new bits in INTERESTING_NAMES
2906 for nodes we want to revisit later.
2908 All exit paths should clear INTERESTING_NAMES for the result
2912 eliminate_const_or_copy (gimple stmt
, bitmap interesting_names
)
2914 tree lhs
= get_lhs_or_phi_result (stmt
);
2916 int version
= SSA_NAME_VERSION (lhs
);
2918 /* If the LHS of this statement or PHI has no uses, then we can
2919 just eliminate it. This can occur if, for example, the PHI
2920 was created by block duplication due to threading and its only
2921 use was in the conditional at the end of the block which was
2923 if (has_zero_uses (lhs
))
2925 bitmap_clear_bit (interesting_names
, version
);
2926 remove_stmt_or_phi (stmt
);
2930 /* Get the RHS of the assignment or PHI node if the PHI is a
2932 rhs
= get_rhs_or_phi_arg (stmt
);
2935 bitmap_clear_bit (interesting_names
, version
);
2939 if (!virtual_operand_p (lhs
))
2940 propagate_rhs_into_lhs (stmt
, lhs
, rhs
, interesting_names
);
2944 imm_use_iterator iter
;
2945 use_operand_p use_p
;
2946 /* For virtual operands we have to propagate into all uses as
2947 otherwise we will create overlapping life-ranges. */
2948 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2949 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2950 SET_USE (use_p
, rhs
);
2951 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
))
2952 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs
) = 1;
2953 remove_stmt_or_phi (stmt
);
2956 /* Note that STMT may well have been deleted by now, so do
2957 not access it, instead use the saved version # to clear
2958 T's entry in the worklist. */
2959 bitmap_clear_bit (interesting_names
, version
);
2962 /* The first phase in degenerate PHI elimination.
2964 Eliminate the degenerate PHIs in BB, then recurse on the
2965 dominator children of BB. */
2968 eliminate_degenerate_phis_1 (basic_block bb
, bitmap interesting_names
)
2970 gimple_stmt_iterator gsi
;
2973 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2975 gimple phi
= gsi_stmt (gsi
);
2977 eliminate_const_or_copy (phi
, interesting_names
);
2980 /* Recurse into the dominator children of BB. */
2981 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
2983 son
= next_dom_son (CDI_DOMINATORS
, son
))
2984 eliminate_degenerate_phis_1 (son
, interesting_names
);
2988 /* A very simple pass to eliminate degenerate PHI nodes from the
2989 IL. This is meant to be fast enough to be able to be run several
2990 times in the optimization pipeline.
2992 Certain optimizations, particularly those which duplicate blocks
2993 or remove edges from the CFG can create or expose PHIs which are
2994 trivial copies or constant initializations.
2996 While we could pick up these optimizations in DOM or with the
2997 combination of copy-prop and CCP, those solutions are far too
2998 heavy-weight for our needs.
3000 This implementation has two phases so that we can efficiently
3001 eliminate the first order degenerate PHIs and second order
3004 The first phase performs a dominator walk to identify and eliminate
3005 the vast majority of the degenerate PHIs. When a degenerate PHI
3006 is identified and eliminated any affected statements or PHIs
3007 are put on a worklist.
3009 The second phase eliminates degenerate PHIs and trivial copies
3010 or constant initializations using the worklist. This is how we
3011 pick up the secondary optimization opportunities with minimal
3015 eliminate_degenerate_phis (void)
3017 bitmap interesting_names
;
3018 bitmap interesting_names1
;
3020 /* Bitmap of blocks which need EH information updated. We can not
3021 update it on-the-fly as doing so invalidates the dominator tree. */
3022 need_eh_cleanup
= BITMAP_ALLOC (NULL
);
3024 /* INTERESTING_NAMES is effectively our worklist, indexed by
3027 A set bit indicates that the statement or PHI node which
3028 defines the SSA_NAME should be (re)examined to determine if
3029 it has become a degenerate PHI or trivial const/copy propagation
3032 Experiments have show we generally get better compilation
3033 time behavior with bitmaps rather than sbitmaps. */
3034 interesting_names
= BITMAP_ALLOC (NULL
);
3035 interesting_names1
= BITMAP_ALLOC (NULL
);
3037 calculate_dominance_info (CDI_DOMINATORS
);
3038 cfg_altered
= false;
3040 /* First phase. Eliminate degenerate PHIs via a dominator
3043 Experiments have indicated that we generally get better
3044 compile-time behavior by visiting blocks in the first
3045 phase in dominator order. Presumably this is because walking
3046 in dominator order leaves fewer PHIs for later examination
3047 by the worklist phase. */
3048 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR
, interesting_names
);
3050 /* Second phase. Eliminate second order degenerate PHIs as well
3051 as trivial copies or constant initializations identified by
3052 the first phase or this phase. Basically we keep iterating
3053 until our set of INTERESTING_NAMEs is empty. */
3054 while (!bitmap_empty_p (interesting_names
))
3059 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3060 changed during the loop. Copy it to another bitmap and
3062 bitmap_copy (interesting_names1
, interesting_names
);
3064 EXECUTE_IF_SET_IN_BITMAP (interesting_names1
, 0, i
, bi
)
3066 tree name
= ssa_name (i
);
3068 /* Ignore SSA_NAMEs that have been released because
3069 their defining statement was deleted (unreachable). */
3071 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i
)),
3078 free_dominance_info (CDI_DOMINATORS
);
3079 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3081 loops_state_set (LOOPS_NEED_FIXUP
);
3084 /* Propagation of const and copies may make some EH edges dead. Purge
3085 such edges from the CFG as needed. */
3086 if (!bitmap_empty_p (need_eh_cleanup
))
3088 gimple_purge_all_dead_eh_edges (need_eh_cleanup
);
3089 BITMAP_FREE (need_eh_cleanup
);
3092 BITMAP_FREE (interesting_names
);
3093 BITMAP_FREE (interesting_names1
);
3097 struct gimple_opt_pass pass_phi_only_cprop
=
3101 "phicprop", /* name */
3102 OPTGROUP_NONE
, /* optinfo_flags */
3103 gate_dominator
, /* gate */
3104 eliminate_degenerate_phis
, /* execute */
3107 0, /* static_pass_number */
3108 TV_TREE_PHI_CPROP
, /* tv_id */
3109 PROP_cfg
| PROP_ssa
, /* properties_required */
3110 0, /* properties_provided */
3111 0, /* properties_destroyed */
3112 0, /* todo_flags_start */
3116 | TODO_update_ssa
/* todo_flags_finish */