1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
24 #include "hash-table.h"
27 #include "stor-layout.h"
30 #include "basic-block.h"
33 #include "gimple-pretty-print.h"
34 #include "tree-ssa-alias.h"
35 #include "internal-fn.h"
36 #include "gimple-fold.h"
38 #include "gimple-expr.h"
41 #include "gimple-iterator.h"
42 #include "gimple-ssa.h"
44 #include "tree-phinodes.h"
45 #include "ssa-iterators.h"
46 #include "stringpool.h"
47 #include "tree-ssanames.h"
48 #include "tree-into-ssa.h"
50 #include "tree-pass.h"
51 #include "tree-ssa-propagate.h"
52 #include "tree-ssa-threadupdate.h"
53 #include "langhooks.h"
55 #include "tree-ssa-threadedge.h"
56 #include "tree-ssa-dom.h"
58 /* This file implements optimizations on the dominator tree. */
60 /* Representation of a "naked" right-hand-side expression, to be used
61 in recording available expressions in the expression hash table. */
78 struct { tree rhs
; } single
;
79 struct { enum tree_code op
; tree opnd
; } unary
;
80 struct { enum tree_code op
; tree opnd0
, opnd1
; } binary
;
81 struct { enum tree_code op
; tree opnd0
, opnd1
, opnd2
; } ternary
;
82 struct { gimple fn_from
; bool pure
; size_t nargs
; tree
*args
; } call
;
83 struct { size_t nargs
; tree
*args
; } phi
;
87 /* Structure for recording known values of a conditional expression
88 at the exits from its block. */
90 typedef struct cond_equivalence_s
92 struct hashable_expr cond
;
97 /* Structure for recording edge equivalences as well as any pending
98 edge redirections during the dominator optimizer.
100 Computing and storing the edge equivalences instead of creating
101 them on-demand can save significant amounts of time, particularly
102 for pathological cases involving switch statements.
104 These structures live for a single iteration of the dominator
105 optimizer in the edge's AUX field. At the end of an iteration we
106 free each of these structures and update the AUX field to point
107 to any requested redirection target (the code for updating the
108 CFG and SSA graph for edge redirection expects redirection edge
109 targets to be in the AUX field for each edge. */
113 /* If this edge creates a simple equivalence, the LHS and RHS of
114 the equivalence will be stored here. */
118 /* Traversing an edge may also indicate one or more particular conditions
119 are true or false. */
120 vec
<cond_equivalence
> cond_equivalences
;
123 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
124 expressions it enters into the hash table along with a marker entry
125 (null). When we finish processing the block, we pop off entries and
126 remove the expressions from the global hash table until we hit the
128 typedef struct expr_hash_elt
* expr_hash_elt_t
;
130 static vec
<expr_hash_elt_t
> avail_exprs_stack
;
132 /* Structure for entries in the expression hash table. */
136 /* The value (lhs) of this expression. */
139 /* The expression (rhs) we want to record. */
140 struct hashable_expr expr
;
142 /* The stmt pointer if this element corresponds to a statement. */
145 /* The hash value for RHS. */
148 /* A unique stamp, typically the address of the hash
149 element itself, used in removing entries from the table. */
150 struct expr_hash_elt
*stamp
;
153 /* Hashtable helpers. */
155 static bool hashable_expr_equal_p (const struct hashable_expr
*,
156 const struct hashable_expr
*);
157 static void free_expr_hash_elt (void *);
159 struct expr_elt_hasher
161 typedef expr_hash_elt value_type
;
162 typedef expr_hash_elt compare_type
;
163 static inline hashval_t
hash (const value_type
*);
164 static inline bool equal (const value_type
*, const compare_type
*);
165 static inline void remove (value_type
*);
169 expr_elt_hasher::hash (const value_type
*p
)
175 expr_elt_hasher::equal (const value_type
*p1
, const compare_type
*p2
)
177 gimple stmt1
= p1
->stmt
;
178 const struct hashable_expr
*expr1
= &p1
->expr
;
179 const struct expr_hash_elt
*stamp1
= p1
->stamp
;
180 gimple stmt2
= p2
->stmt
;
181 const struct hashable_expr
*expr2
= &p2
->expr
;
182 const struct expr_hash_elt
*stamp2
= p2
->stamp
;
184 /* This case should apply only when removing entries from the table. */
185 if (stamp1
== stamp2
)
189 We add stmts to a hash table and them modify them. To detect the case
190 that we modify a stmt and then search for it, we assume that the hash
191 is always modified by that change.
192 We have to fully check why this doesn't happen on trunk or rewrite
193 this in a more reliable (and easier to understand) way. */
194 if (((const struct expr_hash_elt
*)p1
)->hash
195 != ((const struct expr_hash_elt
*)p2
)->hash
)
198 /* In case of a collision, both RHS have to be identical and have the
199 same VUSE operands. */
200 if (hashable_expr_equal_p (expr1
, expr2
)
201 && types_compatible_p (expr1
->type
, expr2
->type
))
203 /* Note that STMT1 and/or STMT2 may be NULL. */
204 return ((stmt1
? gimple_vuse (stmt1
) : NULL_TREE
)
205 == (stmt2
? gimple_vuse (stmt2
) : NULL_TREE
));
211 /* Delete an expr_hash_elt and reclaim its storage. */
214 expr_elt_hasher::remove (value_type
*element
)
216 free_expr_hash_elt (element
);
219 /* Hash table with expressions made available during the renaming process.
220 When an assignment of the form X_i = EXPR is found, the statement is
221 stored in this table. If the same expression EXPR is later found on the
222 RHS of another statement, it is replaced with X_i (thus performing
223 global redundancy elimination). Similarly as we pass through conditionals
224 we record the conditional itself as having either a true or false value
226 static hash_table
<expr_elt_hasher
> avail_exprs
;
228 /* Stack of dest,src pairs that need to be restored during finalization.
230 A NULL entry is used to mark the end of pairs which need to be
231 restored during finalization of this block. */
232 static vec
<tree
> const_and_copies_stack
;
234 /* Track whether or not we have changed the control flow graph. */
235 static bool cfg_altered
;
237 /* Bitmap of blocks that have had EH statements cleaned. We should
238 remove their dead edges eventually. */
239 static bitmap need_eh_cleanup
;
241 /* Statistics for dominator optimizations. */
245 long num_exprs_considered
;
251 static struct opt_stats_d opt_stats
;
253 /* Local functions. */
254 static void optimize_stmt (basic_block
, gimple_stmt_iterator
);
255 static tree
lookup_avail_expr (gimple
, bool);
256 static hashval_t
avail_expr_hash (const void *);
257 static void htab_statistics (FILE *, hash_table
<expr_elt_hasher
>);
258 static void record_cond (cond_equivalence
*);
259 static void record_const_or_copy (tree
, tree
);
260 static void record_equality (tree
, tree
);
261 static void record_equivalences_from_phis (basic_block
);
262 static void record_equivalences_from_incoming_edge (basic_block
);
263 static void eliminate_redundant_computations (gimple_stmt_iterator
*);
264 static void record_equivalences_from_stmt (gimple
, int);
265 static void remove_local_expressions_from_table (void);
266 static void restore_vars_to_original_value (void);
267 static edge
single_incoming_edge_ignoring_loop_edges (basic_block
);
270 /* Given a statement STMT, initialize the hash table element pointed to
274 initialize_hash_element (gimple stmt
, tree lhs
,
275 struct expr_hash_elt
*element
)
277 enum gimple_code code
= gimple_code (stmt
);
278 struct hashable_expr
*expr
= &element
->expr
;
280 if (code
== GIMPLE_ASSIGN
)
282 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
284 switch (get_gimple_rhs_class (subcode
))
286 case GIMPLE_SINGLE_RHS
:
287 expr
->kind
= EXPR_SINGLE
;
288 expr
->type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
289 expr
->ops
.single
.rhs
= gimple_assign_rhs1 (stmt
);
291 case GIMPLE_UNARY_RHS
:
292 expr
->kind
= EXPR_UNARY
;
293 expr
->type
= TREE_TYPE (gimple_assign_lhs (stmt
));
294 expr
->ops
.unary
.op
= subcode
;
295 expr
->ops
.unary
.opnd
= gimple_assign_rhs1 (stmt
);
297 case GIMPLE_BINARY_RHS
:
298 expr
->kind
= EXPR_BINARY
;
299 expr
->type
= TREE_TYPE (gimple_assign_lhs (stmt
));
300 expr
->ops
.binary
.op
= subcode
;
301 expr
->ops
.binary
.opnd0
= gimple_assign_rhs1 (stmt
);
302 expr
->ops
.binary
.opnd1
= gimple_assign_rhs2 (stmt
);
304 case GIMPLE_TERNARY_RHS
:
305 expr
->kind
= EXPR_TERNARY
;
306 expr
->type
= TREE_TYPE (gimple_assign_lhs (stmt
));
307 expr
->ops
.ternary
.op
= subcode
;
308 expr
->ops
.ternary
.opnd0
= gimple_assign_rhs1 (stmt
);
309 expr
->ops
.ternary
.opnd1
= gimple_assign_rhs2 (stmt
);
310 expr
->ops
.ternary
.opnd2
= gimple_assign_rhs3 (stmt
);
316 else if (code
== GIMPLE_COND
)
318 expr
->type
= boolean_type_node
;
319 expr
->kind
= EXPR_BINARY
;
320 expr
->ops
.binary
.op
= gimple_cond_code (stmt
);
321 expr
->ops
.binary
.opnd0
= gimple_cond_lhs (stmt
);
322 expr
->ops
.binary
.opnd1
= gimple_cond_rhs (stmt
);
324 else if (code
== GIMPLE_CALL
)
326 size_t nargs
= gimple_call_num_args (stmt
);
329 gcc_assert (gimple_call_lhs (stmt
));
331 expr
->type
= TREE_TYPE (gimple_call_lhs (stmt
));
332 expr
->kind
= EXPR_CALL
;
333 expr
->ops
.call
.fn_from
= stmt
;
335 if (gimple_call_flags (stmt
) & (ECF_CONST
| ECF_PURE
))
336 expr
->ops
.call
.pure
= true;
338 expr
->ops
.call
.pure
= false;
340 expr
->ops
.call
.nargs
= nargs
;
341 expr
->ops
.call
.args
= XCNEWVEC (tree
, nargs
);
342 for (i
= 0; i
< nargs
; i
++)
343 expr
->ops
.call
.args
[i
] = gimple_call_arg (stmt
, i
);
345 else if (code
== GIMPLE_SWITCH
)
347 expr
->type
= TREE_TYPE (gimple_switch_index (stmt
));
348 expr
->kind
= EXPR_SINGLE
;
349 expr
->ops
.single
.rhs
= gimple_switch_index (stmt
);
351 else if (code
== GIMPLE_GOTO
)
353 expr
->type
= TREE_TYPE (gimple_goto_dest (stmt
));
354 expr
->kind
= EXPR_SINGLE
;
355 expr
->ops
.single
.rhs
= gimple_goto_dest (stmt
);
357 else if (code
== GIMPLE_PHI
)
359 size_t nargs
= gimple_phi_num_args (stmt
);
362 expr
->type
= TREE_TYPE (gimple_phi_result (stmt
));
363 expr
->kind
= EXPR_PHI
;
364 expr
->ops
.phi
.nargs
= nargs
;
365 expr
->ops
.phi
.args
= XCNEWVEC (tree
, nargs
);
367 for (i
= 0; i
< nargs
; i
++)
368 expr
->ops
.phi
.args
[i
] = gimple_phi_arg_def (stmt
, i
);
374 element
->stmt
= stmt
;
375 element
->hash
= avail_expr_hash (element
);
376 element
->stamp
= element
;
379 /* Given a conditional expression COND as a tree, initialize
380 a hashable_expr expression EXPR. The conditional must be a
381 comparison or logical negation. A constant or a variable is
385 initialize_expr_from_cond (tree cond
, struct hashable_expr
*expr
)
387 expr
->type
= boolean_type_node
;
389 if (COMPARISON_CLASS_P (cond
))
391 expr
->kind
= EXPR_BINARY
;
392 expr
->ops
.binary
.op
= TREE_CODE (cond
);
393 expr
->ops
.binary
.opnd0
= TREE_OPERAND (cond
, 0);
394 expr
->ops
.binary
.opnd1
= TREE_OPERAND (cond
, 1);
396 else if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
398 expr
->kind
= EXPR_UNARY
;
399 expr
->ops
.unary
.op
= TRUTH_NOT_EXPR
;
400 expr
->ops
.unary
.opnd
= TREE_OPERAND (cond
, 0);
406 /* Given a hashable_expr expression EXPR and an LHS,
407 initialize the hash table element pointed to by ELEMENT. */
410 initialize_hash_element_from_expr (struct hashable_expr
*expr
,
412 struct expr_hash_elt
*element
)
414 element
->expr
= *expr
;
416 element
->stmt
= NULL
;
417 element
->hash
= avail_expr_hash (element
);
418 element
->stamp
= element
;
421 /* Compare two hashable_expr structures for equivalence.
422 They are considered equivalent when the the expressions
423 they denote must necessarily be equal. The logic is intended
424 to follow that of operand_equal_p in fold-const.c */
427 hashable_expr_equal_p (const struct hashable_expr
*expr0
,
428 const struct hashable_expr
*expr1
)
430 tree type0
= expr0
->type
;
431 tree type1
= expr1
->type
;
433 /* If either type is NULL, there is nothing to check. */
434 if ((type0
== NULL_TREE
) ^ (type1
== NULL_TREE
))
437 /* If both types don't have the same signedness, precision, and mode,
438 then we can't consider them equal. */
440 && (TREE_CODE (type0
) == ERROR_MARK
441 || TREE_CODE (type1
) == ERROR_MARK
442 || TYPE_UNSIGNED (type0
) != TYPE_UNSIGNED (type1
)
443 || TYPE_PRECISION (type0
) != TYPE_PRECISION (type1
)
444 || TYPE_MODE (type0
) != TYPE_MODE (type1
)))
447 if (expr0
->kind
!= expr1
->kind
)
453 return operand_equal_p (expr0
->ops
.single
.rhs
,
454 expr1
->ops
.single
.rhs
, 0);
457 if (expr0
->ops
.unary
.op
!= expr1
->ops
.unary
.op
)
460 if ((CONVERT_EXPR_CODE_P (expr0
->ops
.unary
.op
)
461 || expr0
->ops
.unary
.op
== NON_LVALUE_EXPR
)
462 && TYPE_UNSIGNED (expr0
->type
) != TYPE_UNSIGNED (expr1
->type
))
465 return operand_equal_p (expr0
->ops
.unary
.opnd
,
466 expr1
->ops
.unary
.opnd
, 0);
469 if (expr0
->ops
.binary
.op
!= expr1
->ops
.binary
.op
)
472 if (operand_equal_p (expr0
->ops
.binary
.opnd0
,
473 expr1
->ops
.binary
.opnd0
, 0)
474 && operand_equal_p (expr0
->ops
.binary
.opnd1
,
475 expr1
->ops
.binary
.opnd1
, 0))
478 /* For commutative ops, allow the other order. */
479 return (commutative_tree_code (expr0
->ops
.binary
.op
)
480 && operand_equal_p (expr0
->ops
.binary
.opnd0
,
481 expr1
->ops
.binary
.opnd1
, 0)
482 && operand_equal_p (expr0
->ops
.binary
.opnd1
,
483 expr1
->ops
.binary
.opnd0
, 0));
486 if (expr0
->ops
.ternary
.op
!= expr1
->ops
.ternary
.op
487 || !operand_equal_p (expr0
->ops
.ternary
.opnd2
,
488 expr1
->ops
.ternary
.opnd2
, 0))
491 if (operand_equal_p (expr0
->ops
.ternary
.opnd0
,
492 expr1
->ops
.ternary
.opnd0
, 0)
493 && operand_equal_p (expr0
->ops
.ternary
.opnd1
,
494 expr1
->ops
.ternary
.opnd1
, 0))
497 /* For commutative ops, allow the other order. */
498 return (commutative_ternary_tree_code (expr0
->ops
.ternary
.op
)
499 && operand_equal_p (expr0
->ops
.ternary
.opnd0
,
500 expr1
->ops
.ternary
.opnd1
, 0)
501 && operand_equal_p (expr0
->ops
.ternary
.opnd1
,
502 expr1
->ops
.ternary
.opnd0
, 0));
508 /* If the calls are to different functions, then they
509 clearly cannot be equal. */
510 if (!gimple_call_same_target_p (expr0
->ops
.call
.fn_from
,
511 expr1
->ops
.call
.fn_from
))
514 if (! expr0
->ops
.call
.pure
)
517 if (expr0
->ops
.call
.nargs
!= expr1
->ops
.call
.nargs
)
520 for (i
= 0; i
< expr0
->ops
.call
.nargs
; i
++)
521 if (! operand_equal_p (expr0
->ops
.call
.args
[i
],
522 expr1
->ops
.call
.args
[i
], 0))
532 if (expr0
->ops
.phi
.nargs
!= expr1
->ops
.phi
.nargs
)
535 for (i
= 0; i
< expr0
->ops
.phi
.nargs
; i
++)
536 if (! operand_equal_p (expr0
->ops
.phi
.args
[i
],
537 expr1
->ops
.phi
.args
[i
], 0))
548 /* Generate a hash value for a pair of expressions. This can be used
549 iteratively by passing a previous result as the VAL argument.
551 The same hash value is always returned for a given pair of expressions,
552 regardless of the order in which they are presented. This is useful in
553 hashing the operands of commutative functions. */
556 iterative_hash_exprs_commutative (const_tree t1
,
557 const_tree t2
, hashval_t val
)
559 hashval_t one
= iterative_hash_expr (t1
, 0);
560 hashval_t two
= iterative_hash_expr (t2
, 0);
564 t
= one
, one
= two
, two
= t
;
565 val
= iterative_hash_hashval_t (one
, val
);
566 val
= iterative_hash_hashval_t (two
, val
);
571 /* Compute a hash value for a hashable_expr value EXPR and a
572 previously accumulated hash value VAL. If two hashable_expr
573 values compare equal with hashable_expr_equal_p, they must
574 hash to the same value, given an identical value of VAL.
575 The logic is intended to follow iterative_hash_expr in tree.c. */
578 iterative_hash_hashable_expr (const struct hashable_expr
*expr
, hashval_t val
)
583 val
= iterative_hash_expr (expr
->ops
.single
.rhs
, val
);
587 val
= iterative_hash_object (expr
->ops
.unary
.op
, val
);
589 /* Make sure to include signedness in the hash computation.
590 Don't hash the type, that can lead to having nodes which
591 compare equal according to operand_equal_p, but which
592 have different hash codes. */
593 if (CONVERT_EXPR_CODE_P (expr
->ops
.unary
.op
)
594 || expr
->ops
.unary
.op
== NON_LVALUE_EXPR
)
595 val
+= TYPE_UNSIGNED (expr
->type
);
597 val
= iterative_hash_expr (expr
->ops
.unary
.opnd
, val
);
601 val
= iterative_hash_object (expr
->ops
.binary
.op
, val
);
602 if (commutative_tree_code (expr
->ops
.binary
.op
))
603 val
= iterative_hash_exprs_commutative (expr
->ops
.binary
.opnd0
,
604 expr
->ops
.binary
.opnd1
, val
);
607 val
= iterative_hash_expr (expr
->ops
.binary
.opnd0
, val
);
608 val
= iterative_hash_expr (expr
->ops
.binary
.opnd1
, val
);
613 val
= iterative_hash_object (expr
->ops
.ternary
.op
, val
);
614 if (commutative_ternary_tree_code (expr
->ops
.ternary
.op
))
615 val
= iterative_hash_exprs_commutative (expr
->ops
.ternary
.opnd0
,
616 expr
->ops
.ternary
.opnd1
, val
);
619 val
= iterative_hash_expr (expr
->ops
.ternary
.opnd0
, val
);
620 val
= iterative_hash_expr (expr
->ops
.ternary
.opnd1
, val
);
622 val
= iterative_hash_expr (expr
->ops
.ternary
.opnd2
, val
);
628 enum tree_code code
= CALL_EXPR
;
631 val
= iterative_hash_object (code
, val
);
632 fn_from
= expr
->ops
.call
.fn_from
;
633 if (gimple_call_internal_p (fn_from
))
634 val
= iterative_hash_hashval_t
635 ((hashval_t
) gimple_call_internal_fn (fn_from
), val
);
637 val
= iterative_hash_expr (gimple_call_fn (fn_from
), val
);
638 for (i
= 0; i
< expr
->ops
.call
.nargs
; i
++)
639 val
= iterative_hash_expr (expr
->ops
.call
.args
[i
], val
);
647 for (i
= 0; i
< expr
->ops
.phi
.nargs
; i
++)
648 val
= iterative_hash_expr (expr
->ops
.phi
.args
[i
], val
);
659 /* Print a diagnostic dump of an expression hash table entry. */
662 print_expr_hash_elt (FILE * stream
, const struct expr_hash_elt
*element
)
665 fprintf (stream
, "STMT ");
667 fprintf (stream
, "COND ");
671 print_generic_expr (stream
, element
->lhs
, 0);
672 fprintf (stream
, " = ");
675 switch (element
->expr
.kind
)
678 print_generic_expr (stream
, element
->expr
.ops
.single
.rhs
, 0);
682 fprintf (stream
, "%s ", get_tree_code_name (element
->expr
.ops
.unary
.op
));
683 print_generic_expr (stream
, element
->expr
.ops
.unary
.opnd
, 0);
687 print_generic_expr (stream
, element
->expr
.ops
.binary
.opnd0
, 0);
688 fprintf (stream
, " %s ", get_tree_code_name (element
->expr
.ops
.binary
.op
));
689 print_generic_expr (stream
, element
->expr
.ops
.binary
.opnd1
, 0);
693 fprintf (stream
, " %s <", get_tree_code_name (element
->expr
.ops
.ternary
.op
));
694 print_generic_expr (stream
, element
->expr
.ops
.ternary
.opnd0
, 0);
695 fputs (", ", stream
);
696 print_generic_expr (stream
, element
->expr
.ops
.ternary
.opnd1
, 0);
697 fputs (", ", stream
);
698 print_generic_expr (stream
, element
->expr
.ops
.ternary
.opnd2
, 0);
705 size_t nargs
= element
->expr
.ops
.call
.nargs
;
708 fn_from
= element
->expr
.ops
.call
.fn_from
;
709 if (gimple_call_internal_p (fn_from
))
710 fputs (internal_fn_name (gimple_call_internal_fn (fn_from
)),
713 print_generic_expr (stream
, gimple_call_fn (fn_from
), 0);
714 fprintf (stream
, " (");
715 for (i
= 0; i
< nargs
; i
++)
717 print_generic_expr (stream
, element
->expr
.ops
.call
.args
[i
], 0);
719 fprintf (stream
, ", ");
721 fprintf (stream
, ")");
728 size_t nargs
= element
->expr
.ops
.phi
.nargs
;
730 fprintf (stream
, "PHI <");
731 for (i
= 0; i
< nargs
; i
++)
733 print_generic_expr (stream
, element
->expr
.ops
.phi
.args
[i
], 0);
735 fprintf (stream
, ", ");
737 fprintf (stream
, ">");
741 fprintf (stream
, "\n");
745 fprintf (stream
, " ");
746 print_gimple_stmt (stream
, element
->stmt
, 0, 0);
750 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
753 free_expr_hash_elt_contents (struct expr_hash_elt
*element
)
755 if (element
->expr
.kind
== EXPR_CALL
)
756 free (element
->expr
.ops
.call
.args
);
757 else if (element
->expr
.kind
== EXPR_PHI
)
758 free (element
->expr
.ops
.phi
.args
);
761 /* Delete an expr_hash_elt and reclaim its storage. */
764 free_expr_hash_elt (void *elt
)
766 struct expr_hash_elt
*element
= ((struct expr_hash_elt
*)elt
);
767 free_expr_hash_elt_contents (element
);
771 /* Allocate an EDGE_INFO for edge E and attach it to E.
772 Return the new EDGE_INFO structure. */
774 static struct edge_info
*
775 allocate_edge_info (edge e
)
777 struct edge_info
*edge_info
;
779 edge_info
= XCNEW (struct edge_info
);
785 /* Free all EDGE_INFO structures associated with edges in the CFG.
786 If a particular edge can be threaded, copy the redirection
787 target from the EDGE_INFO structure into the edge's AUX field
788 as required by code to update the CFG and SSA graph for
792 free_all_edge_infos (void)
798 FOR_EACH_BB_FN (bb
, cfun
)
800 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
802 struct edge_info
*edge_info
= (struct edge_info
*) e
->aux
;
806 edge_info
->cond_equivalences
.release ();
814 class dom_opt_dom_walker
: public dom_walker
817 dom_opt_dom_walker (cdi_direction direction
)
818 : dom_walker (direction
), m_dummy_cond (NULL
) {}
820 virtual void before_dom_children (basic_block
);
821 virtual void after_dom_children (basic_block
);
824 void thread_across_edge (edge
);
829 /* Jump threading, redundancy elimination and const/copy propagation.
831 This pass may expose new symbols that need to be renamed into SSA. For
832 every new symbol exposed, its corresponding bit will be set in
836 tree_ssa_dominator_optimize (void)
838 memset (&opt_stats
, 0, sizeof (opt_stats
));
840 /* Create our hash tables. */
841 avail_exprs
.create (1024);
842 avail_exprs_stack
.create (20);
843 const_and_copies_stack
.create (20);
844 need_eh_cleanup
= BITMAP_ALLOC (NULL
);
846 calculate_dominance_info (CDI_DOMINATORS
);
849 /* We need to know loop structures in order to avoid destroying them
850 in jump threading. Note that we still can e.g. thread through loop
851 headers to an exit edge, or through loop header to the loop body, assuming
852 that we update the loop info.
854 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
855 to several overly conservative bail-outs in jump threading, case
856 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
857 missing. We should improve jump threading in future then
858 LOOPS_HAVE_PREHEADERS won't be needed here. */
859 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
| LOOPS_HAVE_SIMPLE_LATCHES
);
861 /* Initialize the value-handle array. */
862 threadedge_initialize_values ();
864 /* We need accurate information regarding back edges in the CFG
865 for jump threading; this may include back edges that are not part of
867 mark_dfs_back_edges ();
869 /* Recursively walk the dominator tree optimizing statements. */
870 dom_opt_dom_walker (CDI_DOMINATORS
).walk (cfun
->cfg
->x_entry_block_ptr
);
873 gimple_stmt_iterator gsi
;
875 FOR_EACH_BB_FN (bb
, cfun
)
877 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
878 update_stmt_if_modified (gsi_stmt (gsi
));
882 /* If we exposed any new variables, go ahead and put them into
883 SSA form now, before we handle jump threading. This simplifies
884 interactions between rewriting of _DECL nodes into SSA form
885 and rewriting SSA_NAME nodes into SSA form after block
886 duplication and CFG manipulation. */
887 update_ssa (TODO_update_ssa
);
889 free_all_edge_infos ();
891 /* Thread jumps, creating duplicate blocks as needed. */
892 cfg_altered
|= thread_through_all_blocks (first_pass_instance
);
895 free_dominance_info (CDI_DOMINATORS
);
897 /* Removal of statements may make some EH edges dead. Purge
898 such edges from the CFG as needed. */
899 if (!bitmap_empty_p (need_eh_cleanup
))
904 /* Jump threading may have created forwarder blocks from blocks
905 needing EH cleanup; the new successor of these blocks, which
906 has inherited from the original block, needs the cleanup.
907 Don't clear bits in the bitmap, as that can break the bitmap
909 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup
, 0, i
, bi
)
911 basic_block bb
= BASIC_BLOCK_FOR_FN (cfun
, i
);
914 while (single_succ_p (bb
)
915 && (single_succ_edge (bb
)->flags
& EDGE_EH
) == 0)
916 bb
= single_succ (bb
);
917 if (bb
== EXIT_BLOCK_PTR_FOR_FN (cfun
))
919 if ((unsigned) bb
->index
!= i
)
920 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
923 gimple_purge_all_dead_eh_edges (need_eh_cleanup
);
924 bitmap_clear (need_eh_cleanup
);
927 statistics_counter_event (cfun
, "Redundant expressions eliminated",
929 statistics_counter_event (cfun
, "Constants propagated",
930 opt_stats
.num_const_prop
);
931 statistics_counter_event (cfun
, "Copies propagated",
932 opt_stats
.num_copy_prop
);
934 /* Debugging dumps. */
935 if (dump_file
&& (dump_flags
& TDF_STATS
))
936 dump_dominator_optimization_stats (dump_file
);
938 loop_optimizer_finalize ();
940 /* Delete our main hashtable. */
941 avail_exprs
.dispose ();
943 /* Free asserted bitmaps and stacks. */
944 BITMAP_FREE (need_eh_cleanup
);
946 avail_exprs_stack
.release ();
947 const_and_copies_stack
.release ();
949 /* Free the value-handle array. */
950 threadedge_finalize_values ();
956 gate_dominator (void)
958 return flag_tree_dom
!= 0;
963 const pass_data pass_data_dominator
=
965 GIMPLE_PASS
, /* type */
967 OPTGROUP_NONE
, /* optinfo_flags */
969 true, /* has_execute */
970 TV_TREE_SSA_DOMINATOR_OPTS
, /* tv_id */
971 ( PROP_cfg
| PROP_ssa
), /* properties_required */
972 0, /* properties_provided */
973 0, /* properties_destroyed */
974 0, /* todo_flags_start */
975 ( TODO_cleanup_cfg
| TODO_update_ssa
977 | TODO_verify_flow
), /* todo_flags_finish */
980 class pass_dominator
: public gimple_opt_pass
983 pass_dominator (gcc::context
*ctxt
)
984 : gimple_opt_pass (pass_data_dominator
, ctxt
)
987 /* opt_pass methods: */
988 opt_pass
* clone () { return new pass_dominator (m_ctxt
); }
989 bool gate () { return gate_dominator (); }
990 unsigned int execute () { return tree_ssa_dominator_optimize (); }
992 }; // class pass_dominator
997 make_pass_dominator (gcc::context
*ctxt
)
999 return new pass_dominator (ctxt
);
1003 /* Given a conditional statement CONDSTMT, convert the
1004 condition to a canonical form. */
1007 canonicalize_comparison (gimple condstmt
)
1011 enum tree_code code
;
1013 gcc_assert (gimple_code (condstmt
) == GIMPLE_COND
);
1015 op0
= gimple_cond_lhs (condstmt
);
1016 op1
= gimple_cond_rhs (condstmt
);
1018 code
= gimple_cond_code (condstmt
);
1020 /* If it would be profitable to swap the operands, then do so to
1021 canonicalize the statement, enabling better optimization.
1023 By placing canonicalization of such expressions here we
1024 transparently keep statements in canonical form, even
1025 when the statement is modified. */
1026 if (tree_swap_operands_p (op0
, op1
, false))
1028 /* For relationals we need to swap the operands
1029 and change the code. */
1035 code
= swap_tree_comparison (code
);
1037 gimple_cond_set_code (condstmt
, code
);
1038 gimple_cond_set_lhs (condstmt
, op1
);
1039 gimple_cond_set_rhs (condstmt
, op0
);
1041 update_stmt (condstmt
);
1046 /* Initialize local stacks for this optimizer and record equivalences
1047 upon entry to BB. Equivalences can come from the edge traversed to
1048 reach BB or they may come from PHI nodes at the start of BB. */
1050 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1051 LIMIT entries left in LOCALs. */
1054 remove_local_expressions_from_table (void)
1056 /* Remove all the expressions made available in this block. */
1057 while (avail_exprs_stack
.length () > 0)
1059 expr_hash_elt_t victim
= avail_exprs_stack
.pop ();
1060 expr_hash_elt
**slot
;
1065 /* This must precede the actual removal from the hash table,
1066 as ELEMENT and the table entry may share a call argument
1067 vector which will be freed during removal. */
1068 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1070 fprintf (dump_file
, "<<<< ");
1071 print_expr_hash_elt (dump_file
, victim
);
1074 slot
= avail_exprs
.find_slot_with_hash (victim
, victim
->hash
, NO_INSERT
);
1075 gcc_assert (slot
&& *slot
== victim
);
1076 avail_exprs
.clear_slot (slot
);
1080 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1081 CONST_AND_COPIES to its original state, stopping when we hit a
1085 restore_vars_to_original_value (void)
1087 while (const_and_copies_stack
.length () > 0)
1089 tree prev_value
, dest
;
1091 dest
= const_and_copies_stack
.pop ();
1096 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1098 fprintf (dump_file
, "<<<< COPY ");
1099 print_generic_expr (dump_file
, dest
, 0);
1100 fprintf (dump_file
, " = ");
1101 print_generic_expr (dump_file
, SSA_NAME_VALUE (dest
), 0);
1102 fprintf (dump_file
, "\n");
1105 prev_value
= const_and_copies_stack
.pop ();
1106 set_ssa_name_value (dest
, prev_value
);
1110 /* A trivial wrapper so that we can present the generic jump
1111 threading code with a simple API for simplifying statements. */
1113 simplify_stmt_for_jump_threading (gimple stmt
,
1114 gimple within_stmt ATTRIBUTE_UNUSED
)
1116 return lookup_avail_expr (stmt
, false);
1119 /* Record into the equivalence tables any equivalences implied by
1120 traversing edge E (which are cached in E->aux).
1122 Callers are responsible for managing the unwinding markers. */
1124 record_temporary_equivalences (edge e
)
1127 struct edge_info
*edge_info
= (struct edge_info
*) e
->aux
;
1129 /* If we have info associated with this edge, record it into
1130 our equivalence tables. */
1133 cond_equivalence
*eq
;
1134 tree lhs
= edge_info
->lhs
;
1135 tree rhs
= edge_info
->rhs
;
1137 /* If we have a simple NAME = VALUE equivalence, record it. */
1138 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
1139 record_const_or_copy (lhs
, rhs
);
1141 /* If we have 0 = COND or 1 = COND equivalences, record them
1142 into our expression hash tables. */
1143 for (i
= 0; edge_info
->cond_equivalences
.iterate (i
, &eq
); ++i
)
1148 /* Wrapper for common code to attempt to thread an edge. For example,
1149 it handles lazily building the dummy condition and the bookkeeping
1150 when jump threading is successful. */
1153 dom_opt_dom_walker::thread_across_edge (edge e
)
1157 gimple_build_cond (NE_EXPR
,
1158 integer_zero_node
, integer_zero_node
,
1161 /* Push a marker on both stacks so we can unwind the tables back to their
1163 avail_exprs_stack
.safe_push (NULL
);
1164 const_and_copies_stack
.safe_push (NULL_TREE
);
1166 /* Traversing E may result in equivalences we can utilize. */
1167 record_temporary_equivalences (e
);
1169 /* With all the edge equivalences in the tables, go ahead and attempt
1170 to thread through E->dest. */
1171 ::thread_across_edge (m_dummy_cond
, e
, false,
1172 &const_and_copies_stack
,
1173 simplify_stmt_for_jump_threading
);
1175 /* And restore the various tables to their state before
1176 we threaded this edge.
1178 XXX The code in tree-ssa-threadedge.c will restore the state of
1179 the const_and_copies table. We we just have to restore the expression
1181 remove_local_expressions_from_table ();
1184 /* PHI nodes can create equivalences too.
1186 Ignoring any alternatives which are the same as the result, if
1187 all the alternatives are equal, then the PHI node creates an
1191 record_equivalences_from_phis (basic_block bb
)
1193 gimple_stmt_iterator gsi
;
1195 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1197 gimple phi
= gsi_stmt (gsi
);
1199 tree lhs
= gimple_phi_result (phi
);
1203 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1205 tree t
= gimple_phi_arg_def (phi
, i
);
1207 /* Ignore alternatives which are the same as our LHS. Since
1208 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1209 can simply compare pointers. */
1213 /* If we have not processed an alternative yet, then set
1214 RHS to this alternative. */
1217 /* If we have processed an alternative (stored in RHS), then
1218 see if it is equal to this one. If it isn't, then stop
1220 else if (! operand_equal_for_phi_arg_p (rhs
, t
))
1224 /* If we had no interesting alternatives, then all the RHS alternatives
1225 must have been the same as LHS. */
1229 /* If we managed to iterate through each PHI alternative without
1230 breaking out of the loop, then we have a PHI which may create
1231 a useful equivalence. We do not need to record unwind data for
1232 this, since this is a true assignment and not an equivalence
1233 inferred from a comparison. All uses of this ssa name are dominated
1234 by this assignment, so unwinding just costs time and space. */
1235 if (i
== gimple_phi_num_args (phi
) && may_propagate_copy (lhs
, rhs
))
1236 set_ssa_name_value (lhs
, rhs
);
1240 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1241 return that edge. Otherwise return NULL. */
1243 single_incoming_edge_ignoring_loop_edges (basic_block bb
)
1249 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1251 /* A loop back edge can be identified by the destination of
1252 the edge dominating the source of the edge. */
1253 if (dominated_by_p (CDI_DOMINATORS
, e
->src
, e
->dest
))
1256 /* If we have already seen a non-loop edge, then we must have
1257 multiple incoming non-loop edges and thus we return NULL. */
1261 /* This is the first non-loop incoming edge we have found. Record
1269 /* Record any equivalences created by the incoming edge to BB. If BB
1270 has more than one incoming edge, then no equivalence is created. */
1273 record_equivalences_from_incoming_edge (basic_block bb
)
1277 struct edge_info
*edge_info
;
1279 /* If our parent block ended with a control statement, then we may be
1280 able to record some equivalences based on which outgoing edge from
1281 the parent was followed. */
1282 parent
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
1284 e
= single_incoming_edge_ignoring_loop_edges (bb
);
1286 /* If we had a single incoming edge from our parent block, then enter
1287 any data associated with the edge into our tables. */
1288 if (e
&& e
->src
== parent
)
1292 edge_info
= (struct edge_info
*) e
->aux
;
1296 tree lhs
= edge_info
->lhs
;
1297 tree rhs
= edge_info
->rhs
;
1298 cond_equivalence
*eq
;
1301 record_equality (lhs
, rhs
);
1303 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1304 set via a widening type conversion, then we may be able to record
1305 additional equivalences. */
1307 && TREE_CODE (lhs
) == SSA_NAME
1308 && is_gimple_constant (rhs
)
1309 && TREE_CODE (rhs
) == INTEGER_CST
)
1311 gimple defstmt
= SSA_NAME_DEF_STMT (lhs
);
1314 && is_gimple_assign (defstmt
)
1315 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt
)))
1317 tree old_rhs
= gimple_assign_rhs1 (defstmt
);
1319 /* If the conversion widens the original value and
1320 the constant is in the range of the type of OLD_RHS,
1321 then convert the constant and record the equivalence.
1323 Note that int_fits_type_p does not check the precision
1324 if the upper and lower bounds are OK. */
1325 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs
))
1326 && (TYPE_PRECISION (TREE_TYPE (lhs
))
1327 > TYPE_PRECISION (TREE_TYPE (old_rhs
)))
1328 && int_fits_type_p (rhs
, TREE_TYPE (old_rhs
)))
1330 tree newval
= fold_convert (TREE_TYPE (old_rhs
), rhs
);
1331 record_equality (old_rhs
, newval
);
1336 for (i
= 0; edge_info
->cond_equivalences
.iterate (i
, &eq
); ++i
)
1342 /* Dump SSA statistics on FILE. */
1345 dump_dominator_optimization_stats (FILE *file
)
1347 fprintf (file
, "Total number of statements: %6ld\n\n",
1348 opt_stats
.num_stmts
);
1349 fprintf (file
, "Exprs considered for dominator optimizations: %6ld\n",
1350 opt_stats
.num_exprs_considered
);
1352 fprintf (file
, "\nHash table statistics:\n");
1354 fprintf (file
, " avail_exprs: ");
1355 htab_statistics (file
, avail_exprs
);
1359 /* Dump SSA statistics on stderr. */
1362 debug_dominator_optimization_stats (void)
1364 dump_dominator_optimization_stats (stderr
);
1368 /* Dump statistics for the hash table HTAB. */
1371 htab_statistics (FILE *file
, hash_table
<expr_elt_hasher
> htab
)
1373 fprintf (file
, "size %ld, %ld elements, %f collision/search ratio\n",
1374 (long) htab
.size (),
1375 (long) htab
.elements (),
1376 htab
.collisions ());
1380 /* Enter condition equivalence into the expression hash table.
1381 This indicates that a conditional expression has a known
1385 record_cond (cond_equivalence
*p
)
1387 struct expr_hash_elt
*element
= XCNEW (struct expr_hash_elt
);
1388 expr_hash_elt
**slot
;
1390 initialize_hash_element_from_expr (&p
->cond
, p
->value
, element
);
1392 slot
= avail_exprs
.find_slot_with_hash (element
, element
->hash
, INSERT
);
1397 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1399 fprintf (dump_file
, "1>>> ");
1400 print_expr_hash_elt (dump_file
, element
);
1403 avail_exprs_stack
.safe_push (element
);
1406 free_expr_hash_elt (element
);
1409 /* Build a cond_equivalence record indicating that the comparison
1410 CODE holds between operands OP0 and OP1 and push it to **P. */
1413 build_and_record_new_cond (enum tree_code code
,
1415 vec
<cond_equivalence
> *p
)
1418 struct hashable_expr
*cond
= &c
.cond
;
1420 gcc_assert (TREE_CODE_CLASS (code
) == tcc_comparison
);
1422 cond
->type
= boolean_type_node
;
1423 cond
->kind
= EXPR_BINARY
;
1424 cond
->ops
.binary
.op
= code
;
1425 cond
->ops
.binary
.opnd0
= op0
;
1426 cond
->ops
.binary
.opnd1
= op1
;
1428 c
.value
= boolean_true_node
;
1432 /* Record that COND is true and INVERTED is false into the edge information
1433 structure. Also record that any conditions dominated by COND are true
1436 For example, if a < b is true, then a <= b must also be true. */
1439 record_conditions (struct edge_info
*edge_info
, tree cond
, tree inverted
)
1444 if (!COMPARISON_CLASS_P (cond
))
1447 op0
= TREE_OPERAND (cond
, 0);
1448 op1
= TREE_OPERAND (cond
, 1);
1450 switch (TREE_CODE (cond
))
1454 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
1456 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1457 &edge_info
->cond_equivalences
);
1458 build_and_record_new_cond (LTGT_EXPR
, op0
, op1
,
1459 &edge_info
->cond_equivalences
);
1462 build_and_record_new_cond ((TREE_CODE (cond
) == LT_EXPR
1463 ? LE_EXPR
: GE_EXPR
),
1464 op0
, op1
, &edge_info
->cond_equivalences
);
1465 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1466 &edge_info
->cond_equivalences
);
1471 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
1473 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1474 &edge_info
->cond_equivalences
);
1479 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
1481 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1482 &edge_info
->cond_equivalences
);
1484 build_and_record_new_cond (LE_EXPR
, op0
, op1
,
1485 &edge_info
->cond_equivalences
);
1486 build_and_record_new_cond (GE_EXPR
, op0
, op1
,
1487 &edge_info
->cond_equivalences
);
1490 case UNORDERED_EXPR
:
1491 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1492 &edge_info
->cond_equivalences
);
1493 build_and_record_new_cond (UNLE_EXPR
, op0
, op1
,
1494 &edge_info
->cond_equivalences
);
1495 build_and_record_new_cond (UNGE_EXPR
, op0
, op1
,
1496 &edge_info
->cond_equivalences
);
1497 build_and_record_new_cond (UNEQ_EXPR
, op0
, op1
,
1498 &edge_info
->cond_equivalences
);
1499 build_and_record_new_cond (UNLT_EXPR
, op0
, op1
,
1500 &edge_info
->cond_equivalences
);
1501 build_and_record_new_cond (UNGT_EXPR
, op0
, op1
,
1502 &edge_info
->cond_equivalences
);
1507 build_and_record_new_cond ((TREE_CODE (cond
) == UNLT_EXPR
1508 ? UNLE_EXPR
: UNGE_EXPR
),
1509 op0
, op1
, &edge_info
->cond_equivalences
);
1510 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1511 &edge_info
->cond_equivalences
);
1515 build_and_record_new_cond (UNLE_EXPR
, op0
, op1
,
1516 &edge_info
->cond_equivalences
);
1517 build_and_record_new_cond (UNGE_EXPR
, op0
, op1
,
1518 &edge_info
->cond_equivalences
);
1522 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1523 &edge_info
->cond_equivalences
);
1524 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1525 &edge_info
->cond_equivalences
);
1532 /* Now store the original true and false conditions into the first
1534 initialize_expr_from_cond (cond
, &c
.cond
);
1535 c
.value
= boolean_true_node
;
1536 edge_info
->cond_equivalences
.safe_push (c
);
1538 /* It is possible for INVERTED to be the negation of a comparison,
1539 and not a valid RHS or GIMPLE_COND condition. This happens because
1540 invert_truthvalue may return such an expression when asked to invert
1541 a floating-point comparison. These comparisons are not assumed to
1542 obey the trichotomy law. */
1543 initialize_expr_from_cond (inverted
, &c
.cond
);
1544 c
.value
= boolean_false_node
;
1545 edge_info
->cond_equivalences
.safe_push (c
);
1548 /* A helper function for record_const_or_copy and record_equality.
1549 Do the work of recording the value and undo info. */
1552 record_const_or_copy_1 (tree x
, tree y
, tree prev_x
)
1554 set_ssa_name_value (x
, y
);
1556 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1558 fprintf (dump_file
, "0>>> COPY ");
1559 print_generic_expr (dump_file
, x
, 0);
1560 fprintf (dump_file
, " = ");
1561 print_generic_expr (dump_file
, y
, 0);
1562 fprintf (dump_file
, "\n");
1565 const_and_copies_stack
.reserve (2);
1566 const_and_copies_stack
.quick_push (prev_x
);
1567 const_and_copies_stack
.quick_push (x
);
1570 /* Return the loop depth of the basic block of the defining statement of X.
1571 This number should not be treated as absolutely correct because the loop
1572 information may not be completely up-to-date when dom runs. However, it
1573 will be relatively correct, and as more passes are taught to keep loop info
1574 up to date, the result will become more and more accurate. */
1577 loop_depth_of_name (tree x
)
1582 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1583 if (TREE_CODE (x
) != SSA_NAME
)
1586 /* Otherwise return the loop depth of the defining statement's bb.
1587 Note that there may not actually be a bb for this statement, if the
1588 ssa_name is live on entry. */
1589 defstmt
= SSA_NAME_DEF_STMT (x
);
1590 defbb
= gimple_bb (defstmt
);
1594 return bb_loop_depth (defbb
);
1597 /* Record that X is equal to Y in const_and_copies. Record undo
1598 information in the block-local vector. */
1601 record_const_or_copy (tree x
, tree y
)
1603 tree prev_x
= SSA_NAME_VALUE (x
);
1605 gcc_assert (TREE_CODE (x
) == SSA_NAME
);
1607 if (TREE_CODE (y
) == SSA_NAME
)
1609 tree tmp
= SSA_NAME_VALUE (y
);
1614 record_const_or_copy_1 (x
, y
, prev_x
);
1617 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1618 This constrains the cases in which we may treat this as assignment. */
1621 record_equality (tree x
, tree y
)
1623 tree prev_x
= NULL
, prev_y
= NULL
;
1625 if (TREE_CODE (x
) == SSA_NAME
)
1626 prev_x
= SSA_NAME_VALUE (x
);
1627 if (TREE_CODE (y
) == SSA_NAME
)
1628 prev_y
= SSA_NAME_VALUE (y
);
1630 /* If one of the previous values is invariant, or invariant in more loops
1631 (by depth), then use that.
1632 Otherwise it doesn't matter which value we choose, just so
1633 long as we canonicalize on one value. */
1634 if (is_gimple_min_invariant (y
))
1636 else if (is_gimple_min_invariant (x
)
1637 || (loop_depth_of_name (x
) <= loop_depth_of_name (y
)))
1638 prev_x
= x
, x
= y
, y
= prev_x
, prev_x
= prev_y
;
1639 else if (prev_x
&& is_gimple_min_invariant (prev_x
))
1640 x
= y
, y
= prev_x
, prev_x
= prev_y
;
1644 /* After the swapping, we must have one SSA_NAME. */
1645 if (TREE_CODE (x
) != SSA_NAME
)
1648 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1649 variable compared against zero. If we're honoring signed zeros,
1650 then we cannot record this value unless we know that the value is
1652 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x
)))
1653 && (TREE_CODE (y
) != REAL_CST
1654 || REAL_VALUES_EQUAL (dconst0
, TREE_REAL_CST (y
))))
1657 record_const_or_copy_1 (x
, y
, prev_x
);
1660 /* Returns true when STMT is a simple iv increment. It detects the
1661 following situation:
1663 i_1 = phi (..., i_2)
1664 i_2 = i_1 +/- ... */
1667 simple_iv_increment_p (gimple stmt
)
1669 enum tree_code code
;
1674 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1677 lhs
= gimple_assign_lhs (stmt
);
1678 if (TREE_CODE (lhs
) != SSA_NAME
)
1681 code
= gimple_assign_rhs_code (stmt
);
1682 if (code
!= PLUS_EXPR
1683 && code
!= MINUS_EXPR
1684 && code
!= POINTER_PLUS_EXPR
)
1687 preinc
= gimple_assign_rhs1 (stmt
);
1688 if (TREE_CODE (preinc
) != SSA_NAME
)
1691 phi
= SSA_NAME_DEF_STMT (preinc
);
1692 if (gimple_code (phi
) != GIMPLE_PHI
)
1695 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1696 if (gimple_phi_arg_def (phi
, i
) == lhs
)
1702 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1703 known value for that SSA_NAME (or NULL if no value is known).
1705 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1706 successors of BB. */
1709 cprop_into_successor_phis (basic_block bb
)
1714 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1717 gimple_stmt_iterator gsi
;
1719 /* If this is an abnormal edge, then we do not want to copy propagate
1720 into the PHI alternative associated with this edge. */
1721 if (e
->flags
& EDGE_ABNORMAL
)
1724 gsi
= gsi_start_phis (e
->dest
);
1725 if (gsi_end_p (gsi
))
1728 /* We may have an equivalence associated with this edge. While
1729 we can not propagate it into non-dominated blocks, we can
1730 propagate them into PHIs in non-dominated blocks. */
1732 /* Push the unwind marker so we can reset the const and copies
1733 table back to its original state after processing this edge. */
1734 const_and_copies_stack
.safe_push (NULL_TREE
);
1736 /* Extract and record any simple NAME = VALUE equivalences.
1738 Don't bother with [01] = COND equivalences, they're not useful
1740 struct edge_info
*edge_info
= (struct edge_info
*) e
->aux
;
1743 tree lhs
= edge_info
->lhs
;
1744 tree rhs
= edge_info
->rhs
;
1746 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
1747 record_const_or_copy (lhs
, rhs
);
1751 for ( ; !gsi_end_p (gsi
); gsi_next (&gsi
))
1754 use_operand_p orig_p
;
1756 gimple phi
= gsi_stmt (gsi
);
1758 /* The alternative may be associated with a constant, so verify
1759 it is an SSA_NAME before doing anything with it. */
1760 orig_p
= gimple_phi_arg_imm_use_ptr (phi
, indx
);
1761 orig_val
= get_use_from_ptr (orig_p
);
1762 if (TREE_CODE (orig_val
) != SSA_NAME
)
1765 /* If we have *ORIG_P in our constant/copy table, then replace
1766 ORIG_P with its value in our constant/copy table. */
1767 new_val
= SSA_NAME_VALUE (orig_val
);
1769 && new_val
!= orig_val
1770 && (TREE_CODE (new_val
) == SSA_NAME
1771 || is_gimple_min_invariant (new_val
))
1772 && may_propagate_copy (orig_val
, new_val
))
1773 propagate_value (orig_p
, new_val
);
1776 restore_vars_to_original_value ();
1780 /* We have finished optimizing BB, record any information implied by
1781 taking a specific outgoing edge from BB. */
1784 record_edge_info (basic_block bb
)
1786 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
1787 struct edge_info
*edge_info
;
1789 if (! gsi_end_p (gsi
))
1791 gimple stmt
= gsi_stmt (gsi
);
1792 location_t loc
= gimple_location (stmt
);
1794 if (gimple_code (stmt
) == GIMPLE_SWITCH
)
1796 tree index
= gimple_switch_index (stmt
);
1798 if (TREE_CODE (index
) == SSA_NAME
)
1801 int n_labels
= gimple_switch_num_labels (stmt
);
1802 tree
*info
= XCNEWVEC (tree
, last_basic_block_for_fn (cfun
));
1806 for (i
= 0; i
< n_labels
; i
++)
1808 tree label
= gimple_switch_label (stmt
, i
);
1809 basic_block target_bb
= label_to_block (CASE_LABEL (label
));
1810 if (CASE_HIGH (label
)
1811 || !CASE_LOW (label
)
1812 || info
[target_bb
->index
])
1813 info
[target_bb
->index
] = error_mark_node
;
1815 info
[target_bb
->index
] = label
;
1818 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1820 basic_block target_bb
= e
->dest
;
1821 tree label
= info
[target_bb
->index
];
1823 if (label
!= NULL
&& label
!= error_mark_node
)
1825 tree x
= fold_convert_loc (loc
, TREE_TYPE (index
),
1827 edge_info
= allocate_edge_info (e
);
1828 edge_info
->lhs
= index
;
1836 /* A COND_EXPR may create equivalences too. */
1837 if (gimple_code (stmt
) == GIMPLE_COND
)
1842 tree op0
= gimple_cond_lhs (stmt
);
1843 tree op1
= gimple_cond_rhs (stmt
);
1844 enum tree_code code
= gimple_cond_code (stmt
);
1846 extract_true_false_edges_from_block (bb
, &true_edge
, &false_edge
);
1848 /* Special case comparing booleans against a constant as we
1849 know the value of OP0 on both arms of the branch. i.e., we
1850 can record an equivalence for OP0 rather than COND. */
1851 if ((code
== EQ_EXPR
|| code
== NE_EXPR
)
1852 && TREE_CODE (op0
) == SSA_NAME
1853 && TREE_CODE (TREE_TYPE (op0
)) == BOOLEAN_TYPE
1854 && is_gimple_min_invariant (op1
))
1856 if (code
== EQ_EXPR
)
1858 edge_info
= allocate_edge_info (true_edge
);
1859 edge_info
->lhs
= op0
;
1860 edge_info
->rhs
= (integer_zerop (op1
)
1861 ? boolean_false_node
1862 : boolean_true_node
);
1864 edge_info
= allocate_edge_info (false_edge
);
1865 edge_info
->lhs
= op0
;
1866 edge_info
->rhs
= (integer_zerop (op1
)
1868 : boolean_false_node
);
1872 edge_info
= allocate_edge_info (true_edge
);
1873 edge_info
->lhs
= op0
;
1874 edge_info
->rhs
= (integer_zerop (op1
)
1876 : boolean_false_node
);
1878 edge_info
= allocate_edge_info (false_edge
);
1879 edge_info
->lhs
= op0
;
1880 edge_info
->rhs
= (integer_zerop (op1
)
1881 ? boolean_false_node
1882 : boolean_true_node
);
1885 else if (is_gimple_min_invariant (op0
)
1886 && (TREE_CODE (op1
) == SSA_NAME
1887 || is_gimple_min_invariant (op1
)))
1889 tree cond
= build2 (code
, boolean_type_node
, op0
, op1
);
1890 tree inverted
= invert_truthvalue_loc (loc
, cond
);
1891 bool can_infer_simple_equiv
1892 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0
)))
1893 && real_zerop (op0
));
1894 struct edge_info
*edge_info
;
1896 edge_info
= allocate_edge_info (true_edge
);
1897 record_conditions (edge_info
, cond
, inverted
);
1899 if (can_infer_simple_equiv
&& code
== EQ_EXPR
)
1901 edge_info
->lhs
= op1
;
1902 edge_info
->rhs
= op0
;
1905 edge_info
= allocate_edge_info (false_edge
);
1906 record_conditions (edge_info
, inverted
, cond
);
1908 if (can_infer_simple_equiv
&& TREE_CODE (inverted
) == EQ_EXPR
)
1910 edge_info
->lhs
= op1
;
1911 edge_info
->rhs
= op0
;
1915 else if (TREE_CODE (op0
) == SSA_NAME
1916 && (TREE_CODE (op1
) == SSA_NAME
1917 || is_gimple_min_invariant (op1
)))
1919 tree cond
= build2 (code
, boolean_type_node
, op0
, op1
);
1920 tree inverted
= invert_truthvalue_loc (loc
, cond
);
1921 bool can_infer_simple_equiv
1922 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1
)))
1923 && (TREE_CODE (op1
) == SSA_NAME
|| real_zerop (op1
)));
1924 struct edge_info
*edge_info
;
1926 edge_info
= allocate_edge_info (true_edge
);
1927 record_conditions (edge_info
, cond
, inverted
);
1929 if (can_infer_simple_equiv
&& code
== EQ_EXPR
)
1931 edge_info
->lhs
= op0
;
1932 edge_info
->rhs
= op1
;
1935 edge_info
= allocate_edge_info (false_edge
);
1936 record_conditions (edge_info
, inverted
, cond
);
1938 if (can_infer_simple_equiv
&& TREE_CODE (inverted
) == EQ_EXPR
)
1940 edge_info
->lhs
= op0
;
1941 edge_info
->rhs
= op1
;
1946 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1951 dom_opt_dom_walker::before_dom_children (basic_block bb
)
1953 gimple_stmt_iterator gsi
;
1955 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1956 fprintf (dump_file
, "\n\nOptimizing block #%d\n\n", bb
->index
);
1958 /* Push a marker on the stacks of local information so that we know how
1959 far to unwind when we finalize this block. */
1960 avail_exprs_stack
.safe_push (NULL
);
1961 const_and_copies_stack
.safe_push (NULL_TREE
);
1963 record_equivalences_from_incoming_edge (bb
);
1965 /* PHI nodes can create equivalences too. */
1966 record_equivalences_from_phis (bb
);
1968 /* Create equivalences from redundant PHIs. PHIs are only truly
1969 redundant when they exist in the same block, so push another
1970 marker and unwind right afterwards. */
1971 avail_exprs_stack
.safe_push (NULL
);
1972 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1973 eliminate_redundant_computations (&gsi
);
1974 remove_local_expressions_from_table ();
1976 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1977 optimize_stmt (bb
, gsi
);
1979 /* Now prepare to process dominated blocks. */
1980 record_edge_info (bb
);
1981 cprop_into_successor_phis (bb
);
1984 /* We have finished processing the dominator children of BB, perform
1985 any finalization actions in preparation for leaving this node in
1986 the dominator tree. */
1989 dom_opt_dom_walker::after_dom_children (basic_block bb
)
1993 /* If we have an outgoing edge to a block with multiple incoming and
1994 outgoing edges, then we may be able to thread the edge, i.e., we
1995 may be able to statically determine which of the outgoing edges
1996 will be traversed when the incoming edge from BB is traversed. */
1997 if (single_succ_p (bb
)
1998 && (single_succ_edge (bb
)->flags
& EDGE_ABNORMAL
) == 0
1999 && potentially_threadable_block (single_succ (bb
)))
2001 thread_across_edge (single_succ_edge (bb
));
2003 else if ((last
= last_stmt (bb
))
2004 && gimple_code (last
) == GIMPLE_COND
2005 && EDGE_COUNT (bb
->succs
) == 2
2006 && (EDGE_SUCC (bb
, 0)->flags
& EDGE_ABNORMAL
) == 0
2007 && (EDGE_SUCC (bb
, 1)->flags
& EDGE_ABNORMAL
) == 0)
2009 edge true_edge
, false_edge
;
2011 extract_true_false_edges_from_block (bb
, &true_edge
, &false_edge
);
2013 /* Only try to thread the edge if it reaches a target block with
2014 more than one predecessor and more than one successor. */
2015 if (potentially_threadable_block (true_edge
->dest
))
2016 thread_across_edge (true_edge
);
2018 /* Similarly for the ELSE arm. */
2019 if (potentially_threadable_block (false_edge
->dest
))
2020 thread_across_edge (false_edge
);
2024 /* These remove expressions local to BB from the tables. */
2025 remove_local_expressions_from_table ();
2026 restore_vars_to_original_value ();
2029 /* Search for redundant computations in STMT. If any are found, then
2030 replace them with the variable holding the result of the computation.
2032 If safe, record this expression into the available expression hash
2036 eliminate_redundant_computations (gimple_stmt_iterator
* gsi
)
2042 bool assigns_var_p
= false;
2044 gimple stmt
= gsi_stmt (*gsi
);
2046 if (gimple_code (stmt
) == GIMPLE_PHI
)
2047 def
= gimple_phi_result (stmt
);
2049 def
= gimple_get_lhs (stmt
);
2051 /* Certain expressions on the RHS can be optimized away, but can not
2052 themselves be entered into the hash tables. */
2054 || TREE_CODE (def
) != SSA_NAME
2055 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def
)
2056 || gimple_vdef (stmt
)
2057 /* Do not record equivalences for increments of ivs. This would create
2058 overlapping live ranges for a very questionable gain. */
2059 || simple_iv_increment_p (stmt
))
2062 /* Check if the expression has been computed before. */
2063 cached_lhs
= lookup_avail_expr (stmt
, insert
);
2065 opt_stats
.num_exprs_considered
++;
2067 /* Get the type of the expression we are trying to optimize. */
2068 if (is_gimple_assign (stmt
))
2070 expr_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2071 assigns_var_p
= true;
2073 else if (gimple_code (stmt
) == GIMPLE_COND
)
2074 expr_type
= boolean_type_node
;
2075 else if (is_gimple_call (stmt
))
2077 gcc_assert (gimple_call_lhs (stmt
));
2078 expr_type
= TREE_TYPE (gimple_call_lhs (stmt
));
2079 assigns_var_p
= true;
2081 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
2082 expr_type
= TREE_TYPE (gimple_switch_index (stmt
));
2083 else if (gimple_code (stmt
) == GIMPLE_PHI
)
2084 /* We can't propagate into a phi, so the logic below doesn't apply.
2085 Instead record an equivalence between the cached LHS and the
2086 PHI result of this statement, provided they are in the same block.
2087 This should be sufficient to kill the redundant phi. */
2089 if (def
&& cached_lhs
)
2090 record_const_or_copy (def
, cached_lhs
);
2099 /* It is safe to ignore types here since we have already done
2100 type checking in the hashing and equality routines. In fact
2101 type checking here merely gets in the way of constant
2102 propagation. Also, make sure that it is safe to propagate
2103 CACHED_LHS into the expression in STMT. */
2104 if ((TREE_CODE (cached_lhs
) != SSA_NAME
2106 || useless_type_conversion_p (expr_type
, TREE_TYPE (cached_lhs
))))
2107 || may_propagate_copy_into_stmt (stmt
, cached_lhs
))
2109 gcc_checking_assert (TREE_CODE (cached_lhs
) == SSA_NAME
2110 || is_gimple_min_invariant (cached_lhs
));
2112 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2114 fprintf (dump_file
, " Replaced redundant expr '");
2115 print_gimple_expr (dump_file
, stmt
, 0, dump_flags
);
2116 fprintf (dump_file
, "' with '");
2117 print_generic_expr (dump_file
, cached_lhs
, dump_flags
);
2118 fprintf (dump_file
, "'\n");
2124 && !useless_type_conversion_p (expr_type
, TREE_TYPE (cached_lhs
)))
2125 cached_lhs
= fold_convert (expr_type
, cached_lhs
);
2127 propagate_tree_value_into_stmt (gsi
, cached_lhs
);
2129 /* Since it is always necessary to mark the result as modified,
2130 perhaps we should move this into propagate_tree_value_into_stmt
2132 gimple_set_modified (gsi_stmt (*gsi
), true);
2136 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2137 the available expressions table or the const_and_copies table.
2138 Detect and record those equivalences. */
2139 /* We handle only very simple copy equivalences here. The heavy
2140 lifing is done by eliminate_redundant_computations. */
2143 record_equivalences_from_stmt (gimple stmt
, int may_optimize_p
)
2146 enum tree_code lhs_code
;
2148 gcc_assert (is_gimple_assign (stmt
));
2150 lhs
= gimple_assign_lhs (stmt
);
2151 lhs_code
= TREE_CODE (lhs
);
2153 if (lhs_code
== SSA_NAME
2154 && gimple_assign_single_p (stmt
))
2156 tree rhs
= gimple_assign_rhs1 (stmt
);
2158 /* If the RHS of the assignment is a constant or another variable that
2159 may be propagated, register it in the CONST_AND_COPIES table. We
2160 do not need to record unwind data for this, since this is a true
2161 assignment and not an equivalence inferred from a comparison. All
2162 uses of this ssa name are dominated by this assignment, so unwinding
2163 just costs time and space. */
2165 && (TREE_CODE (rhs
) == SSA_NAME
2166 || is_gimple_min_invariant (rhs
)))
2168 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2170 fprintf (dump_file
, "==== ASGN ");
2171 print_generic_expr (dump_file
, lhs
, 0);
2172 fprintf (dump_file
, " = ");
2173 print_generic_expr (dump_file
, rhs
, 0);
2174 fprintf (dump_file
, "\n");
2177 set_ssa_name_value (lhs
, rhs
);
2181 /* A memory store, even an aliased store, creates a useful
2182 equivalence. By exchanging the LHS and RHS, creating suitable
2183 vops and recording the result in the available expression table,
2184 we may be able to expose more redundant loads. */
2185 if (!gimple_has_volatile_ops (stmt
)
2186 && gimple_references_memory_p (stmt
)
2187 && gimple_assign_single_p (stmt
)
2188 && (TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
2189 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt
)))
2190 && !is_gimple_reg (lhs
))
2192 tree rhs
= gimple_assign_rhs1 (stmt
);
2195 /* Build a new statement with the RHS and LHS exchanged. */
2196 if (TREE_CODE (rhs
) == SSA_NAME
)
2198 /* NOTE tuples. The call to gimple_build_assign below replaced
2199 a call to build_gimple_modify_stmt, which did not set the
2200 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2201 may cause an SSA validation failure, as the LHS may be a
2202 default-initialized name and should have no definition. I'm
2203 a bit dubious of this, as the artificial statement that we
2204 generate here may in fact be ill-formed, but it is simply
2205 used as an internal device in this pass, and never becomes
2207 gimple defstmt
= SSA_NAME_DEF_STMT (rhs
);
2208 new_stmt
= gimple_build_assign (rhs
, lhs
);
2209 SSA_NAME_DEF_STMT (rhs
) = defstmt
;
2212 new_stmt
= gimple_build_assign (rhs
, lhs
);
2214 gimple_set_vuse (new_stmt
, gimple_vdef (stmt
));
2216 /* Finally enter the statement into the available expression
2218 lookup_avail_expr (new_stmt
, true);
2222 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2223 CONST_AND_COPIES. */
2226 cprop_operand (gimple stmt
, use_operand_p op_p
)
2229 tree op
= USE_FROM_PTR (op_p
);
2231 /* If the operand has a known constant value or it is known to be a
2232 copy of some other variable, use the value or copy stored in
2233 CONST_AND_COPIES. */
2234 val
= SSA_NAME_VALUE (op
);
2235 if (val
&& val
!= op
)
2237 /* Do not replace hard register operands in asm statements. */
2238 if (gimple_code (stmt
) == GIMPLE_ASM
2239 && !may_propagate_copy_into_asm (op
))
2242 /* Certain operands are not allowed to be copy propagated due
2243 to their interaction with exception handling and some GCC
2245 if (!may_propagate_copy (op
, val
))
2248 /* Do not propagate addresses that point to volatiles into memory
2249 stmts without volatile operands. */
2250 if (POINTER_TYPE_P (TREE_TYPE (val
))
2251 && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val
)))
2252 && gimple_has_mem_ops (stmt
)
2253 && !gimple_has_volatile_ops (stmt
))
2256 /* Do not propagate copies if the propagated value is at a deeper loop
2257 depth than the propagatee. Otherwise, this may move loop variant
2258 variables outside of their loops and prevent coalescing
2259 opportunities. If the value was loop invariant, it will be hoisted
2260 by LICM and exposed for copy propagation. */
2261 if (loop_depth_of_name (val
) > loop_depth_of_name (op
))
2264 /* Do not propagate copies into simple IV increment statements.
2265 See PR23821 for how this can disturb IV analysis. */
2266 if (TREE_CODE (val
) != INTEGER_CST
2267 && simple_iv_increment_p (stmt
))
2271 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2273 fprintf (dump_file
, " Replaced '");
2274 print_generic_expr (dump_file
, op
, dump_flags
);
2275 fprintf (dump_file
, "' with %s '",
2276 (TREE_CODE (val
) != SSA_NAME
? "constant" : "variable"));
2277 print_generic_expr (dump_file
, val
, dump_flags
);
2278 fprintf (dump_file
, "'\n");
2281 if (TREE_CODE (val
) != SSA_NAME
)
2282 opt_stats
.num_const_prop
++;
2284 opt_stats
.num_copy_prop
++;
2286 propagate_value (op_p
, val
);
2288 /* And note that we modified this statement. This is now
2289 safe, even if we changed virtual operands since we will
2290 rescan the statement and rewrite its operands again. */
2291 gimple_set_modified (stmt
, true);
2295 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2296 known value for that SSA_NAME (or NULL if no value is known).
2298 Propagate values from CONST_AND_COPIES into the uses, vuses and
2299 vdef_ops of STMT. */
2302 cprop_into_stmt (gimple stmt
)
2307 FOR_EACH_SSA_USE_OPERAND (op_p
, stmt
, iter
, SSA_OP_USE
)
2308 cprop_operand (stmt
, op_p
);
2311 /* Optimize the statement pointed to by iterator SI.
2313 We try to perform some simplistic global redundancy elimination and
2314 constant propagation:
2316 1- To detect global redundancy, we keep track of expressions that have
2317 been computed in this block and its dominators. If we find that the
2318 same expression is computed more than once, we eliminate repeated
2319 computations by using the target of the first one.
2321 2- Constant values and copy assignments. This is used to do very
2322 simplistic constant and copy propagation. When a constant or copy
2323 assignment is found, we map the value on the RHS of the assignment to
2324 the variable in the LHS in the CONST_AND_COPIES table. */
2327 optimize_stmt (basic_block bb
, gimple_stmt_iterator si
)
2329 gimple stmt
, old_stmt
;
2330 bool may_optimize_p
;
2331 bool modified_p
= false;
2333 old_stmt
= stmt
= gsi_stmt (si
);
2335 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2337 fprintf (dump_file
, "Optimizing statement ");
2338 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
2341 if (gimple_code (stmt
) == GIMPLE_COND
)
2342 canonicalize_comparison (stmt
);
2344 update_stmt_if_modified (stmt
);
2345 opt_stats
.num_stmts
++;
2347 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2348 cprop_into_stmt (stmt
);
2350 /* If the statement has been modified with constant replacements,
2351 fold its RHS before checking for redundant computations. */
2352 if (gimple_modified_p (stmt
))
2356 /* Try to fold the statement making sure that STMT is kept
2358 if (fold_stmt (&si
))
2360 stmt
= gsi_stmt (si
);
2361 gimple_set_modified (stmt
, true);
2363 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2365 fprintf (dump_file
, " Folded to: ");
2366 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
2370 /* We only need to consider cases that can yield a gimple operand. */
2371 if (gimple_assign_single_p (stmt
))
2372 rhs
= gimple_assign_rhs1 (stmt
);
2373 else if (gimple_code (stmt
) == GIMPLE_GOTO
)
2374 rhs
= gimple_goto_dest (stmt
);
2375 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
2376 /* This should never be an ADDR_EXPR. */
2377 rhs
= gimple_switch_index (stmt
);
2379 if (rhs
&& TREE_CODE (rhs
) == ADDR_EXPR
)
2380 recompute_tree_invariant_for_addr_expr (rhs
);
2382 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2383 even if fold_stmt updated the stmt already and thus cleared
2384 gimple_modified_p flag on it. */
2388 /* Check for redundant computations. Do this optimization only
2389 for assignments that have no volatile ops and conditionals. */
2390 may_optimize_p
= (!gimple_has_side_effects (stmt
)
2391 && (is_gimple_assign (stmt
)
2392 || (is_gimple_call (stmt
)
2393 && gimple_call_lhs (stmt
) != NULL_TREE
)
2394 || gimple_code (stmt
) == GIMPLE_COND
2395 || gimple_code (stmt
) == GIMPLE_SWITCH
));
2399 if (gimple_code (stmt
) == GIMPLE_CALL
)
2401 /* Resolve __builtin_constant_p. If it hasn't been
2402 folded to integer_one_node by now, it's fairly
2403 certain that the value simply isn't constant. */
2404 tree callee
= gimple_call_fndecl (stmt
);
2406 && DECL_BUILT_IN_CLASS (callee
) == BUILT_IN_NORMAL
2407 && DECL_FUNCTION_CODE (callee
) == BUILT_IN_CONSTANT_P
)
2409 propagate_tree_value_into_stmt (&si
, integer_zero_node
);
2410 stmt
= gsi_stmt (si
);
2414 update_stmt_if_modified (stmt
);
2415 eliminate_redundant_computations (&si
);
2416 stmt
= gsi_stmt (si
);
2418 /* Perform simple redundant store elimination. */
2419 if (gimple_assign_single_p (stmt
)
2420 && TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
2422 tree lhs
= gimple_assign_lhs (stmt
);
2423 tree rhs
= gimple_assign_rhs1 (stmt
);
2426 if (TREE_CODE (rhs
) == SSA_NAME
)
2428 tree tem
= SSA_NAME_VALUE (rhs
);
2432 /* Build a new statement with the RHS and LHS exchanged. */
2433 if (TREE_CODE (rhs
) == SSA_NAME
)
2435 gimple defstmt
= SSA_NAME_DEF_STMT (rhs
);
2436 new_stmt
= gimple_build_assign (rhs
, lhs
);
2437 SSA_NAME_DEF_STMT (rhs
) = defstmt
;
2440 new_stmt
= gimple_build_assign (rhs
, lhs
);
2441 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
2442 cached_lhs
= lookup_avail_expr (new_stmt
, false);
2444 && rhs
== cached_lhs
)
2446 basic_block bb
= gimple_bb (stmt
);
2447 unlink_stmt_vdef (stmt
);
2448 if (gsi_remove (&si
, true))
2450 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
2451 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2452 fprintf (dump_file
, " Flagged to clear EH edges.\n");
2454 release_defs (stmt
);
2460 /* Record any additional equivalences created by this statement. */
2461 if (is_gimple_assign (stmt
))
2462 record_equivalences_from_stmt (stmt
, may_optimize_p
);
2464 /* If STMT is a COND_EXPR and it was modified, then we may know
2465 where it goes. If that is the case, then mark the CFG as altered.
2467 This will cause us to later call remove_unreachable_blocks and
2468 cleanup_tree_cfg when it is safe to do so. It is not safe to
2469 clean things up here since removal of edges and such can trigger
2470 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2473 That's all fine and good, except that once SSA_NAMEs are released
2474 to the manager, we must not call create_ssa_name until all references
2475 to released SSA_NAMEs have been eliminated.
2477 All references to the deleted SSA_NAMEs can not be eliminated until
2478 we remove unreachable blocks.
2480 We can not remove unreachable blocks until after we have completed
2481 any queued jump threading.
2483 We can not complete any queued jump threads until we have taken
2484 appropriate variables out of SSA form. Taking variables out of
2485 SSA form can call create_ssa_name and thus we lose.
2487 Ultimately I suspect we're going to need to change the interface
2488 into the SSA_NAME manager. */
2489 if (gimple_modified_p (stmt
) || modified_p
)
2493 update_stmt_if_modified (stmt
);
2495 if (gimple_code (stmt
) == GIMPLE_COND
)
2496 val
= fold_binary_loc (gimple_location (stmt
),
2497 gimple_cond_code (stmt
), boolean_type_node
,
2498 gimple_cond_lhs (stmt
), gimple_cond_rhs (stmt
));
2499 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
2500 val
= gimple_switch_index (stmt
);
2502 if (val
&& TREE_CODE (val
) == INTEGER_CST
&& find_taken_edge (bb
, val
))
2505 /* If we simplified a statement in such a way as to be shown that it
2506 cannot trap, update the eh information and the cfg to match. */
2507 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
))
2509 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
2510 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2511 fprintf (dump_file
, " Flagged to clear EH edges.\n");
2516 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2517 If found, return its LHS. Otherwise insert STMT in the table and
2520 Also, when an expression is first inserted in the table, it is also
2521 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2522 we finish processing this block and its children. */
2525 lookup_avail_expr (gimple stmt
, bool insert
)
2527 expr_hash_elt
**slot
;
2530 struct expr_hash_elt element
;
2532 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2533 if (gimple_code (stmt
) == GIMPLE_PHI
)
2534 lhs
= gimple_phi_result (stmt
);
2536 lhs
= gimple_get_lhs (stmt
);
2538 initialize_hash_element (stmt
, lhs
, &element
);
2540 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2542 fprintf (dump_file
, "LKUP ");
2543 print_expr_hash_elt (dump_file
, &element
);
2546 /* Don't bother remembering constant assignments and copy operations.
2547 Constants and copy operations are handled by the constant/copy propagator
2548 in optimize_stmt. */
2549 if (element
.expr
.kind
== EXPR_SINGLE
2550 && (TREE_CODE (element
.expr
.ops
.single
.rhs
) == SSA_NAME
2551 || is_gimple_min_invariant (element
.expr
.ops
.single
.rhs
)))
2554 /* Finally try to find the expression in the main expression hash table. */
2555 slot
= avail_exprs
.find_slot_with_hash (&element
, element
.hash
,
2556 (insert
? INSERT
: NO_INSERT
));
2559 free_expr_hash_elt_contents (&element
);
2562 else if (*slot
== NULL
)
2564 struct expr_hash_elt
*element2
= XNEW (struct expr_hash_elt
);
2565 *element2
= element
;
2566 element2
->stamp
= element2
;
2569 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2571 fprintf (dump_file
, "2>>> ");
2572 print_expr_hash_elt (dump_file
, element2
);
2575 avail_exprs_stack
.safe_push (element2
);
2579 free_expr_hash_elt_contents (&element
);
2581 /* Extract the LHS of the assignment so that it can be used as the current
2582 definition of another variable. */
2583 lhs
= ((struct expr_hash_elt
*)*slot
)->lhs
;
2585 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2586 use the value from the const_and_copies table. */
2587 if (TREE_CODE (lhs
) == SSA_NAME
)
2589 temp
= SSA_NAME_VALUE (lhs
);
2594 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2596 fprintf (dump_file
, "FIND: ");
2597 print_generic_expr (dump_file
, lhs
, 0);
2598 fprintf (dump_file
, "\n");
2604 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2605 for expressions using the code of the expression and the SSA numbers of
2609 avail_expr_hash (const void *p
)
2611 gimple stmt
= ((const struct expr_hash_elt
*)p
)->stmt
;
2612 const struct hashable_expr
*expr
= &((const struct expr_hash_elt
*)p
)->expr
;
2616 val
= iterative_hash_hashable_expr (expr
, val
);
2618 /* If the hash table entry is not associated with a statement, then we
2619 can just hash the expression and not worry about virtual operands
2624 /* Add the SSA version numbers of the vuse operand. This is important
2625 because compound variables like arrays are not renamed in the
2626 operands. Rather, the rename is done on the virtual variable
2627 representing all the elements of the array. */
2628 if ((vuse
= gimple_vuse (stmt
)))
2629 val
= iterative_hash_expr (vuse
, val
);
2634 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2635 up degenerate PHIs created by or exposed by jump threading. */
2637 /* Given a statement STMT, which is either a PHI node or an assignment,
2638 remove it from the IL. */
2641 remove_stmt_or_phi (gimple stmt
)
2643 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
2645 if (gimple_code (stmt
) == GIMPLE_PHI
)
2646 remove_phi_node (&gsi
, true);
2649 gsi_remove (&gsi
, true);
2650 release_defs (stmt
);
2654 /* Given a statement STMT, which is either a PHI node or an assignment,
2655 return the "rhs" of the node, in the case of a non-degenerate
2656 phi, NULL is returned. */
2659 get_rhs_or_phi_arg (gimple stmt
)
2661 if (gimple_code (stmt
) == GIMPLE_PHI
)
2662 return degenerate_phi_result (stmt
);
2663 else if (gimple_assign_single_p (stmt
))
2664 return gimple_assign_rhs1 (stmt
);
2670 /* Given a statement STMT, which is either a PHI node or an assignment,
2671 return the "lhs" of the node. */
2674 get_lhs_or_phi_result (gimple stmt
)
2676 if (gimple_code (stmt
) == GIMPLE_PHI
)
2677 return gimple_phi_result (stmt
);
2678 else if (is_gimple_assign (stmt
))
2679 return gimple_assign_lhs (stmt
);
2684 /* Propagate RHS into all uses of LHS (when possible).
2686 RHS and LHS are derived from STMT, which is passed in solely so
2687 that we can remove it if propagation is successful.
2689 When propagating into a PHI node or into a statement which turns
2690 into a trivial copy or constant initialization, set the
2691 appropriate bit in INTERESTING_NAMEs so that we will visit those
2692 nodes as well in an effort to pick up secondary optimization
2696 propagate_rhs_into_lhs (gimple stmt
, tree lhs
, tree rhs
, bitmap interesting_names
)
2698 /* First verify that propagation is valid and isn't going to move a
2699 loop variant variable outside its loop. */
2700 if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
)
2701 && (TREE_CODE (rhs
) != SSA_NAME
2702 || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs
))
2703 && may_propagate_copy (lhs
, rhs
)
2704 && loop_depth_of_name (lhs
) >= loop_depth_of_name (rhs
))
2706 use_operand_p use_p
;
2707 imm_use_iterator iter
;
2712 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2714 fprintf (dump_file
, " Replacing '");
2715 print_generic_expr (dump_file
, lhs
, dump_flags
);
2716 fprintf (dump_file
, "' with %s '",
2717 (TREE_CODE (rhs
) != SSA_NAME
? "constant" : "variable"));
2718 print_generic_expr (dump_file
, rhs
, dump_flags
);
2719 fprintf (dump_file
, "'\n");
2722 /* Walk over every use of LHS and try to replace the use with RHS.
2723 At this point the only reason why such a propagation would not
2724 be successful would be if the use occurs in an ASM_EXPR. */
2725 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2727 /* Leave debug stmts alone. If we succeed in propagating
2728 all non-debug uses, we'll drop the DEF, and propagation
2729 into debug stmts will occur then. */
2730 if (gimple_debug_bind_p (use_stmt
))
2733 /* It's not always safe to propagate into an ASM_EXPR. */
2734 if (gimple_code (use_stmt
) == GIMPLE_ASM
2735 && ! may_propagate_copy_into_asm (lhs
))
2741 /* It's not ok to propagate into the definition stmt of RHS.
2743 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2744 g_67.1_6 = prephitmp.12_36;
2746 While this is strictly all dead code we do not want to
2747 deal with this here. */
2748 if (TREE_CODE (rhs
) == SSA_NAME
2749 && SSA_NAME_DEF_STMT (rhs
) == use_stmt
)
2756 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2758 fprintf (dump_file
, " Original statement:");
2759 print_gimple_stmt (dump_file
, use_stmt
, 0, dump_flags
);
2762 /* Propagate the RHS into this use of the LHS. */
2763 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2764 propagate_value (use_p
, rhs
);
2766 /* Special cases to avoid useless calls into the folding
2767 routines, operand scanning, etc.
2769 Propagation into a PHI may cause the PHI to become
2770 a degenerate, so mark the PHI as interesting. No other
2771 actions are necessary. */
2772 if (gimple_code (use_stmt
) == GIMPLE_PHI
)
2777 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2779 fprintf (dump_file
, " Updated statement:");
2780 print_gimple_stmt (dump_file
, use_stmt
, 0, dump_flags
);
2783 result
= get_lhs_or_phi_result (use_stmt
);
2784 bitmap_set_bit (interesting_names
, SSA_NAME_VERSION (result
));
2788 /* From this point onward we are propagating into a
2789 real statement. Folding may (or may not) be possible,
2790 we may expose new operands, expose dead EH edges,
2792 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2793 cannot fold a call that simplifies to a constant,
2794 because the GIMPLE_CALL must be replaced by a
2795 GIMPLE_ASSIGN, and there is no way to effect such a
2796 transformation in-place. We might want to consider
2797 using the more general fold_stmt here. */
2799 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
2800 fold_stmt_inplace (&gsi
);
2803 /* Sometimes propagation can expose new operands to the
2805 update_stmt (use_stmt
);
2808 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2810 fprintf (dump_file
, " Updated statement:");
2811 print_gimple_stmt (dump_file
, use_stmt
, 0, dump_flags
);
2814 /* If we replaced a variable index with a constant, then
2815 we would need to update the invariant flag for ADDR_EXPRs. */
2816 if (gimple_assign_single_p (use_stmt
)
2817 && TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == ADDR_EXPR
)
2818 recompute_tree_invariant_for_addr_expr
2819 (gimple_assign_rhs1 (use_stmt
));
2821 /* If we cleaned up EH information from the statement,
2822 mark its containing block as needing EH cleanups. */
2823 if (maybe_clean_or_replace_eh_stmt (use_stmt
, use_stmt
))
2825 bitmap_set_bit (need_eh_cleanup
, gimple_bb (use_stmt
)->index
);
2826 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2827 fprintf (dump_file
, " Flagged to clear EH edges.\n");
2830 /* Propagation may expose new trivial copy/constant propagation
2832 if (gimple_assign_single_p (use_stmt
)
2833 && TREE_CODE (gimple_assign_lhs (use_stmt
)) == SSA_NAME
2834 && (TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == SSA_NAME
2835 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt
))))
2837 tree result
= get_lhs_or_phi_result (use_stmt
);
2838 bitmap_set_bit (interesting_names
, SSA_NAME_VERSION (result
));
2841 /* Propagation into these nodes may make certain edges in
2842 the CFG unexecutable. We want to identify them as PHI nodes
2843 at the destination of those unexecutable edges may become
2845 else if (gimple_code (use_stmt
) == GIMPLE_COND
2846 || gimple_code (use_stmt
) == GIMPLE_SWITCH
2847 || gimple_code (use_stmt
) == GIMPLE_GOTO
)
2851 if (gimple_code (use_stmt
) == GIMPLE_COND
)
2852 val
= fold_binary_loc (gimple_location (use_stmt
),
2853 gimple_cond_code (use_stmt
),
2855 gimple_cond_lhs (use_stmt
),
2856 gimple_cond_rhs (use_stmt
));
2857 else if (gimple_code (use_stmt
) == GIMPLE_SWITCH
)
2858 val
= gimple_switch_index (use_stmt
);
2860 val
= gimple_goto_dest (use_stmt
);
2862 if (val
&& is_gimple_min_invariant (val
))
2864 basic_block bb
= gimple_bb (use_stmt
);
2865 edge te
= find_taken_edge (bb
, val
);
2868 gimple_stmt_iterator gsi
, psi
;
2870 /* Remove all outgoing edges except TE. */
2871 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
));)
2875 /* Mark all the PHI nodes at the destination of
2876 the unexecutable edge as interesting. */
2877 for (psi
= gsi_start_phis (e
->dest
);
2881 gimple phi
= gsi_stmt (psi
);
2883 tree result
= gimple_phi_result (phi
);
2884 int version
= SSA_NAME_VERSION (result
);
2886 bitmap_set_bit (interesting_names
, version
);
2889 te
->probability
+= e
->probability
;
2891 te
->count
+= e
->count
;
2899 gsi
= gsi_last_bb (gimple_bb (use_stmt
));
2900 gsi_remove (&gsi
, true);
2902 /* And fixup the flags on the single remaining edge. */
2903 te
->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
);
2904 te
->flags
&= ~EDGE_ABNORMAL
;
2905 te
->flags
|= EDGE_FALLTHRU
;
2906 if (te
->probability
> REG_BR_PROB_BASE
)
2907 te
->probability
= REG_BR_PROB_BASE
;
2912 /* Ensure there is nothing else to do. */
2913 gcc_assert (!all
|| has_zero_uses (lhs
));
2915 /* If we were able to propagate away all uses of LHS, then
2916 we can remove STMT. */
2918 remove_stmt_or_phi (stmt
);
2922 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2923 a statement that is a trivial copy or constant initialization.
2925 Attempt to eliminate T by propagating its RHS into all uses of
2926 its LHS. This may in turn set new bits in INTERESTING_NAMES
2927 for nodes we want to revisit later.
2929 All exit paths should clear INTERESTING_NAMES for the result
2933 eliminate_const_or_copy (gimple stmt
, bitmap interesting_names
)
2935 tree lhs
= get_lhs_or_phi_result (stmt
);
2937 int version
= SSA_NAME_VERSION (lhs
);
2939 /* If the LHS of this statement or PHI has no uses, then we can
2940 just eliminate it. This can occur if, for example, the PHI
2941 was created by block duplication due to threading and its only
2942 use was in the conditional at the end of the block which was
2944 if (has_zero_uses (lhs
))
2946 bitmap_clear_bit (interesting_names
, version
);
2947 remove_stmt_or_phi (stmt
);
2951 /* Get the RHS of the assignment or PHI node if the PHI is a
2953 rhs
= get_rhs_or_phi_arg (stmt
);
2956 bitmap_clear_bit (interesting_names
, version
);
2960 if (!virtual_operand_p (lhs
))
2961 propagate_rhs_into_lhs (stmt
, lhs
, rhs
, interesting_names
);
2965 imm_use_iterator iter
;
2966 use_operand_p use_p
;
2967 /* For virtual operands we have to propagate into all uses as
2968 otherwise we will create overlapping life-ranges. */
2969 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2970 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2971 SET_USE (use_p
, rhs
);
2972 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
))
2973 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs
) = 1;
2974 remove_stmt_or_phi (stmt
);
2977 /* Note that STMT may well have been deleted by now, so do
2978 not access it, instead use the saved version # to clear
2979 T's entry in the worklist. */
2980 bitmap_clear_bit (interesting_names
, version
);
2983 /* The first phase in degenerate PHI elimination.
2985 Eliminate the degenerate PHIs in BB, then recurse on the
2986 dominator children of BB. */
2989 eliminate_degenerate_phis_1 (basic_block bb
, bitmap interesting_names
)
2991 gimple_stmt_iterator gsi
;
2994 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2996 gimple phi
= gsi_stmt (gsi
);
2998 eliminate_const_or_copy (phi
, interesting_names
);
3001 /* Recurse into the dominator children of BB. */
3002 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
3004 son
= next_dom_son (CDI_DOMINATORS
, son
))
3005 eliminate_degenerate_phis_1 (son
, interesting_names
);
3009 /* A very simple pass to eliminate degenerate PHI nodes from the
3010 IL. This is meant to be fast enough to be able to be run several
3011 times in the optimization pipeline.
3013 Certain optimizations, particularly those which duplicate blocks
3014 or remove edges from the CFG can create or expose PHIs which are
3015 trivial copies or constant initializations.
3017 While we could pick up these optimizations in DOM or with the
3018 combination of copy-prop and CCP, those solutions are far too
3019 heavy-weight for our needs.
3021 This implementation has two phases so that we can efficiently
3022 eliminate the first order degenerate PHIs and second order
3025 The first phase performs a dominator walk to identify and eliminate
3026 the vast majority of the degenerate PHIs. When a degenerate PHI
3027 is identified and eliminated any affected statements or PHIs
3028 are put on a worklist.
3030 The second phase eliminates degenerate PHIs and trivial copies
3031 or constant initializations using the worklist. This is how we
3032 pick up the secondary optimization opportunities with minimal
3036 eliminate_degenerate_phis (void)
3038 bitmap interesting_names
;
3039 bitmap interesting_names1
;
3041 /* Bitmap of blocks which need EH information updated. We can not
3042 update it on-the-fly as doing so invalidates the dominator tree. */
3043 need_eh_cleanup
= BITMAP_ALLOC (NULL
);
3045 /* INTERESTING_NAMES is effectively our worklist, indexed by
3048 A set bit indicates that the statement or PHI node which
3049 defines the SSA_NAME should be (re)examined to determine if
3050 it has become a degenerate PHI or trivial const/copy propagation
3053 Experiments have show we generally get better compilation
3054 time behavior with bitmaps rather than sbitmaps. */
3055 interesting_names
= BITMAP_ALLOC (NULL
);
3056 interesting_names1
= BITMAP_ALLOC (NULL
);
3058 calculate_dominance_info (CDI_DOMINATORS
);
3059 cfg_altered
= false;
3061 /* First phase. Eliminate degenerate PHIs via a dominator
3064 Experiments have indicated that we generally get better
3065 compile-time behavior by visiting blocks in the first
3066 phase in dominator order. Presumably this is because walking
3067 in dominator order leaves fewer PHIs for later examination
3068 by the worklist phase. */
3069 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun
),
3072 /* Second phase. Eliminate second order degenerate PHIs as well
3073 as trivial copies or constant initializations identified by
3074 the first phase or this phase. Basically we keep iterating
3075 until our set of INTERESTING_NAMEs is empty. */
3076 while (!bitmap_empty_p (interesting_names
))
3081 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3082 changed during the loop. Copy it to another bitmap and
3084 bitmap_copy (interesting_names1
, interesting_names
);
3086 EXECUTE_IF_SET_IN_BITMAP (interesting_names1
, 0, i
, bi
)
3088 tree name
= ssa_name (i
);
3090 /* Ignore SSA_NAMEs that have been released because
3091 their defining statement was deleted (unreachable). */
3093 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i
)),
3100 free_dominance_info (CDI_DOMINATORS
);
3101 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3103 loops_state_set (LOOPS_NEED_FIXUP
);
3106 /* Propagation of const and copies may make some EH edges dead. Purge
3107 such edges from the CFG as needed. */
3108 if (!bitmap_empty_p (need_eh_cleanup
))
3110 gimple_purge_all_dead_eh_edges (need_eh_cleanup
);
3111 BITMAP_FREE (need_eh_cleanup
);
3114 BITMAP_FREE (interesting_names
);
3115 BITMAP_FREE (interesting_names1
);
3121 const pass_data pass_data_phi_only_cprop
=
3123 GIMPLE_PASS
, /* type */
3124 "phicprop", /* name */
3125 OPTGROUP_NONE
, /* optinfo_flags */
3126 true, /* has_gate */
3127 true, /* has_execute */
3128 TV_TREE_PHI_CPROP
, /* tv_id */
3129 ( PROP_cfg
| PROP_ssa
), /* properties_required */
3130 0, /* properties_provided */
3131 0, /* properties_destroyed */
3132 0, /* todo_flags_start */
3133 ( TODO_cleanup_cfg
| TODO_verify_ssa
3135 | TODO_update_ssa
), /* todo_flags_finish */
3138 class pass_phi_only_cprop
: public gimple_opt_pass
3141 pass_phi_only_cprop (gcc::context
*ctxt
)
3142 : gimple_opt_pass (pass_data_phi_only_cprop
, ctxt
)
3145 /* opt_pass methods: */
3146 opt_pass
* clone () { return new pass_phi_only_cprop (m_ctxt
); }
3147 bool gate () { return gate_dominator (); }
3148 unsigned int execute () { return eliminate_degenerate_phis (); }
3150 }; // class pass_phi_only_cprop
3155 make_pass_phi_only_cprop (gcc::context
*ctxt
)
3157 return new pass_phi_only_cprop (ctxt
);