1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
24 #include "hash-table.h"
27 #include "stor-layout.h"
35 #include "hard-reg-set.h"
38 #include "dominance.h"
41 #include "basic-block.h"
44 #include "gimple-pretty-print.h"
45 #include "tree-ssa-alias.h"
46 #include "internal-fn.h"
47 #include "gimple-fold.h"
49 #include "gimple-expr.h"
52 #include "gimple-iterator.h"
53 #include "gimple-ssa.h"
55 #include "tree-phinodes.h"
56 #include "ssa-iterators.h"
57 #include "stringpool.h"
58 #include "tree-ssanames.h"
59 #include "tree-into-ssa.h"
61 #include "tree-pass.h"
62 #include "tree-ssa-propagate.h"
63 #include "tree-ssa-threadupdate.h"
64 #include "langhooks.h"
66 #include "tree-ssa-threadedge.h"
67 #include "tree-ssa-dom.h"
70 /* This file implements optimizations on the dominator tree. */
72 /* Representation of a "naked" right-hand-side expression, to be used
73 in recording available expressions in the expression hash table. */
90 struct { tree rhs
; } single
;
91 struct { enum tree_code op
; tree opnd
; } unary
;
92 struct { enum tree_code op
; tree opnd0
, opnd1
; } binary
;
93 struct { enum tree_code op
; tree opnd0
, opnd1
, opnd2
; } ternary
;
94 struct { gimple fn_from
; bool pure
; size_t nargs
; tree
*args
; } call
;
95 struct { size_t nargs
; tree
*args
; } phi
;
99 /* Structure for recording known values of a conditional expression
100 at the exits from its block. */
102 typedef struct cond_equivalence_s
104 struct hashable_expr cond
;
109 /* Structure for recording edge equivalences as well as any pending
110 edge redirections during the dominator optimizer.
112 Computing and storing the edge equivalences instead of creating
113 them on-demand can save significant amounts of time, particularly
114 for pathological cases involving switch statements.
116 These structures live for a single iteration of the dominator
117 optimizer in the edge's AUX field. At the end of an iteration we
118 free each of these structures and update the AUX field to point
119 to any requested redirection target (the code for updating the
120 CFG and SSA graph for edge redirection expects redirection edge
121 targets to be in the AUX field for each edge. */
125 /* If this edge creates a simple equivalence, the LHS and RHS of
126 the equivalence will be stored here. */
130 /* Traversing an edge may also indicate one or more particular conditions
131 are true or false. */
132 vec
<cond_equivalence
> cond_equivalences
;
135 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
136 expressions it enters into the hash table along with a marker entry
137 (null). When we finish processing the block, we pop off entries and
138 remove the expressions from the global hash table until we hit the
140 typedef struct expr_hash_elt
* expr_hash_elt_t
;
142 static vec
<expr_hash_elt_t
> avail_exprs_stack
;
144 /* Structure for entries in the expression hash table. */
148 /* The value (lhs) of this expression. */
151 /* The expression (rhs) we want to record. */
152 struct hashable_expr expr
;
154 /* The stmt pointer if this element corresponds to a statement. */
157 /* The hash value for RHS. */
160 /* A unique stamp, typically the address of the hash
161 element itself, used in removing entries from the table. */
162 struct expr_hash_elt
*stamp
;
165 /* Hashtable helpers. */
167 static bool hashable_expr_equal_p (const struct hashable_expr
*,
168 const struct hashable_expr
*);
169 static void free_expr_hash_elt (void *);
171 struct expr_elt_hasher
173 typedef expr_hash_elt
*value_type
;
174 typedef expr_hash_elt
*compare_type
;
175 typedef int store_values_directly
;
176 static inline hashval_t
hash (const value_type
&);
177 static inline bool equal (const value_type
&, const compare_type
&);
178 static inline void remove (value_type
&);
182 expr_elt_hasher::hash (const value_type
&p
)
188 expr_elt_hasher::equal (const value_type
&p1
, const compare_type
&p2
)
190 gimple stmt1
= p1
->stmt
;
191 const struct hashable_expr
*expr1
= &p1
->expr
;
192 const struct expr_hash_elt
*stamp1
= p1
->stamp
;
193 gimple stmt2
= p2
->stmt
;
194 const struct hashable_expr
*expr2
= &p2
->expr
;
195 const struct expr_hash_elt
*stamp2
= p2
->stamp
;
197 /* This case should apply only when removing entries from the table. */
198 if (stamp1
== stamp2
)
202 We add stmts to a hash table and them modify them. To detect the case
203 that we modify a stmt and then search for it, we assume that the hash
204 is always modified by that change.
205 We have to fully check why this doesn't happen on trunk or rewrite
206 this in a more reliable (and easier to understand) way. */
207 if (((const struct expr_hash_elt
*)p1
)->hash
208 != ((const struct expr_hash_elt
*)p2
)->hash
)
211 /* In case of a collision, both RHS have to be identical and have the
212 same VUSE operands. */
213 if (hashable_expr_equal_p (expr1
, expr2
)
214 && types_compatible_p (expr1
->type
, expr2
->type
))
216 /* Note that STMT1 and/or STMT2 may be NULL. */
217 return ((stmt1
? gimple_vuse (stmt1
) : NULL_TREE
)
218 == (stmt2
? gimple_vuse (stmt2
) : NULL_TREE
));
224 /* Delete an expr_hash_elt and reclaim its storage. */
227 expr_elt_hasher::remove (value_type
&element
)
229 free_expr_hash_elt (element
);
232 /* Hash table with expressions made available during the renaming process.
233 When an assignment of the form X_i = EXPR is found, the statement is
234 stored in this table. If the same expression EXPR is later found on the
235 RHS of another statement, it is replaced with X_i (thus performing
236 global redundancy elimination). Similarly as we pass through conditionals
237 we record the conditional itself as having either a true or false value
239 static hash_table
<expr_elt_hasher
> *avail_exprs
;
241 /* Stack of dest,src pairs that need to be restored during finalization.
243 A NULL entry is used to mark the end of pairs which need to be
244 restored during finalization of this block. */
245 static vec
<tree
> const_and_copies_stack
;
247 /* Track whether or not we have changed the control flow graph. */
248 static bool cfg_altered
;
250 /* Bitmap of blocks that have had EH statements cleaned. We should
251 remove their dead edges eventually. */
252 static bitmap need_eh_cleanup
;
254 /* Statistics for dominator optimizations. */
258 long num_exprs_considered
;
264 static struct opt_stats_d opt_stats
;
266 /* Local functions. */
267 static void optimize_stmt (basic_block
, gimple_stmt_iterator
);
268 static tree
lookup_avail_expr (gimple
, bool);
269 static hashval_t
avail_expr_hash (const void *);
270 static void htab_statistics (FILE *,
271 const hash_table
<expr_elt_hasher
> &);
272 static void record_cond (cond_equivalence
*);
273 static void record_const_or_copy (tree
, tree
);
274 static void record_equality (tree
, tree
);
275 static void record_equivalences_from_phis (basic_block
);
276 static void record_equivalences_from_incoming_edge (basic_block
);
277 static void eliminate_redundant_computations (gimple_stmt_iterator
*);
278 static void record_equivalences_from_stmt (gimple
, int);
279 static void remove_local_expressions_from_table (void);
280 static void restore_vars_to_original_value (void);
281 static edge
single_incoming_edge_ignoring_loop_edges (basic_block
);
284 /* Given a statement STMT, initialize the hash table element pointed to
288 initialize_hash_element (gimple stmt
, tree lhs
,
289 struct expr_hash_elt
*element
)
291 enum gimple_code code
= gimple_code (stmt
);
292 struct hashable_expr
*expr
= &element
->expr
;
294 if (code
== GIMPLE_ASSIGN
)
296 enum tree_code subcode
= gimple_assign_rhs_code (stmt
);
298 switch (get_gimple_rhs_class (subcode
))
300 case GIMPLE_SINGLE_RHS
:
301 expr
->kind
= EXPR_SINGLE
;
302 expr
->type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
303 expr
->ops
.single
.rhs
= gimple_assign_rhs1 (stmt
);
305 case GIMPLE_UNARY_RHS
:
306 expr
->kind
= EXPR_UNARY
;
307 expr
->type
= TREE_TYPE (gimple_assign_lhs (stmt
));
308 if (CONVERT_EXPR_CODE_P (subcode
))
310 expr
->ops
.unary
.op
= subcode
;
311 expr
->ops
.unary
.opnd
= gimple_assign_rhs1 (stmt
);
313 case GIMPLE_BINARY_RHS
:
314 expr
->kind
= EXPR_BINARY
;
315 expr
->type
= TREE_TYPE (gimple_assign_lhs (stmt
));
316 expr
->ops
.binary
.op
= subcode
;
317 expr
->ops
.binary
.opnd0
= gimple_assign_rhs1 (stmt
);
318 expr
->ops
.binary
.opnd1
= gimple_assign_rhs2 (stmt
);
320 case GIMPLE_TERNARY_RHS
:
321 expr
->kind
= EXPR_TERNARY
;
322 expr
->type
= TREE_TYPE (gimple_assign_lhs (stmt
));
323 expr
->ops
.ternary
.op
= subcode
;
324 expr
->ops
.ternary
.opnd0
= gimple_assign_rhs1 (stmt
);
325 expr
->ops
.ternary
.opnd1
= gimple_assign_rhs2 (stmt
);
326 expr
->ops
.ternary
.opnd2
= gimple_assign_rhs3 (stmt
);
332 else if (code
== GIMPLE_COND
)
334 expr
->type
= boolean_type_node
;
335 expr
->kind
= EXPR_BINARY
;
336 expr
->ops
.binary
.op
= gimple_cond_code (stmt
);
337 expr
->ops
.binary
.opnd0
= gimple_cond_lhs (stmt
);
338 expr
->ops
.binary
.opnd1
= gimple_cond_rhs (stmt
);
340 else if (code
== GIMPLE_CALL
)
342 size_t nargs
= gimple_call_num_args (stmt
);
345 gcc_assert (gimple_call_lhs (stmt
));
347 expr
->type
= TREE_TYPE (gimple_call_lhs (stmt
));
348 expr
->kind
= EXPR_CALL
;
349 expr
->ops
.call
.fn_from
= stmt
;
351 if (gimple_call_flags (stmt
) & (ECF_CONST
| ECF_PURE
))
352 expr
->ops
.call
.pure
= true;
354 expr
->ops
.call
.pure
= false;
356 expr
->ops
.call
.nargs
= nargs
;
357 expr
->ops
.call
.args
= XCNEWVEC (tree
, nargs
);
358 for (i
= 0; i
< nargs
; i
++)
359 expr
->ops
.call
.args
[i
] = gimple_call_arg (stmt
, i
);
361 else if (code
== GIMPLE_SWITCH
)
363 expr
->type
= TREE_TYPE (gimple_switch_index (stmt
));
364 expr
->kind
= EXPR_SINGLE
;
365 expr
->ops
.single
.rhs
= gimple_switch_index (stmt
);
367 else if (code
== GIMPLE_GOTO
)
369 expr
->type
= TREE_TYPE (gimple_goto_dest (stmt
));
370 expr
->kind
= EXPR_SINGLE
;
371 expr
->ops
.single
.rhs
= gimple_goto_dest (stmt
);
373 else if (code
== GIMPLE_PHI
)
375 size_t nargs
= gimple_phi_num_args (stmt
);
378 expr
->type
= TREE_TYPE (gimple_phi_result (stmt
));
379 expr
->kind
= EXPR_PHI
;
380 expr
->ops
.phi
.nargs
= nargs
;
381 expr
->ops
.phi
.args
= XCNEWVEC (tree
, nargs
);
383 for (i
= 0; i
< nargs
; i
++)
384 expr
->ops
.phi
.args
[i
] = gimple_phi_arg_def (stmt
, i
);
390 element
->stmt
= stmt
;
391 element
->hash
= avail_expr_hash (element
);
392 element
->stamp
= element
;
395 /* Given a conditional expression COND as a tree, initialize
396 a hashable_expr expression EXPR. The conditional must be a
397 comparison or logical negation. A constant or a variable is
401 initialize_expr_from_cond (tree cond
, struct hashable_expr
*expr
)
403 expr
->type
= boolean_type_node
;
405 if (COMPARISON_CLASS_P (cond
))
407 expr
->kind
= EXPR_BINARY
;
408 expr
->ops
.binary
.op
= TREE_CODE (cond
);
409 expr
->ops
.binary
.opnd0
= TREE_OPERAND (cond
, 0);
410 expr
->ops
.binary
.opnd1
= TREE_OPERAND (cond
, 1);
412 else if (TREE_CODE (cond
) == TRUTH_NOT_EXPR
)
414 expr
->kind
= EXPR_UNARY
;
415 expr
->ops
.unary
.op
= TRUTH_NOT_EXPR
;
416 expr
->ops
.unary
.opnd
= TREE_OPERAND (cond
, 0);
422 /* Given a hashable_expr expression EXPR and an LHS,
423 initialize the hash table element pointed to by ELEMENT. */
426 initialize_hash_element_from_expr (struct hashable_expr
*expr
,
428 struct expr_hash_elt
*element
)
430 element
->expr
= *expr
;
432 element
->stmt
= NULL
;
433 element
->hash
= avail_expr_hash (element
);
434 element
->stamp
= element
;
437 /* Compare two hashable_expr structures for equivalence.
438 They are considered equivalent when the the expressions
439 they denote must necessarily be equal. The logic is intended
440 to follow that of operand_equal_p in fold-const.c */
443 hashable_expr_equal_p (const struct hashable_expr
*expr0
,
444 const struct hashable_expr
*expr1
)
446 tree type0
= expr0
->type
;
447 tree type1
= expr1
->type
;
449 /* If either type is NULL, there is nothing to check. */
450 if ((type0
== NULL_TREE
) ^ (type1
== NULL_TREE
))
453 /* If both types don't have the same signedness, precision, and mode,
454 then we can't consider them equal. */
456 && (TREE_CODE (type0
) == ERROR_MARK
457 || TREE_CODE (type1
) == ERROR_MARK
458 || TYPE_UNSIGNED (type0
) != TYPE_UNSIGNED (type1
)
459 || TYPE_PRECISION (type0
) != TYPE_PRECISION (type1
)
460 || TYPE_MODE (type0
) != TYPE_MODE (type1
)))
463 if (expr0
->kind
!= expr1
->kind
)
469 return operand_equal_p (expr0
->ops
.single
.rhs
,
470 expr1
->ops
.single
.rhs
, 0);
473 if (expr0
->ops
.unary
.op
!= expr1
->ops
.unary
.op
)
476 if ((CONVERT_EXPR_CODE_P (expr0
->ops
.unary
.op
)
477 || expr0
->ops
.unary
.op
== NON_LVALUE_EXPR
)
478 && TYPE_UNSIGNED (expr0
->type
) != TYPE_UNSIGNED (expr1
->type
))
481 return operand_equal_p (expr0
->ops
.unary
.opnd
,
482 expr1
->ops
.unary
.opnd
, 0);
485 if (expr0
->ops
.binary
.op
!= expr1
->ops
.binary
.op
)
488 if (operand_equal_p (expr0
->ops
.binary
.opnd0
,
489 expr1
->ops
.binary
.opnd0
, 0)
490 && operand_equal_p (expr0
->ops
.binary
.opnd1
,
491 expr1
->ops
.binary
.opnd1
, 0))
494 /* For commutative ops, allow the other order. */
495 return (commutative_tree_code (expr0
->ops
.binary
.op
)
496 && operand_equal_p (expr0
->ops
.binary
.opnd0
,
497 expr1
->ops
.binary
.opnd1
, 0)
498 && operand_equal_p (expr0
->ops
.binary
.opnd1
,
499 expr1
->ops
.binary
.opnd0
, 0));
502 if (expr0
->ops
.ternary
.op
!= expr1
->ops
.ternary
.op
503 || !operand_equal_p (expr0
->ops
.ternary
.opnd2
,
504 expr1
->ops
.ternary
.opnd2
, 0))
507 if (operand_equal_p (expr0
->ops
.ternary
.opnd0
,
508 expr1
->ops
.ternary
.opnd0
, 0)
509 && operand_equal_p (expr0
->ops
.ternary
.opnd1
,
510 expr1
->ops
.ternary
.opnd1
, 0))
513 /* For commutative ops, allow the other order. */
514 return (commutative_ternary_tree_code (expr0
->ops
.ternary
.op
)
515 && operand_equal_p (expr0
->ops
.ternary
.opnd0
,
516 expr1
->ops
.ternary
.opnd1
, 0)
517 && operand_equal_p (expr0
->ops
.ternary
.opnd1
,
518 expr1
->ops
.ternary
.opnd0
, 0));
524 /* If the calls are to different functions, then they
525 clearly cannot be equal. */
526 if (!gimple_call_same_target_p (expr0
->ops
.call
.fn_from
,
527 expr1
->ops
.call
.fn_from
))
530 if (! expr0
->ops
.call
.pure
)
533 if (expr0
->ops
.call
.nargs
!= expr1
->ops
.call
.nargs
)
536 for (i
= 0; i
< expr0
->ops
.call
.nargs
; i
++)
537 if (! operand_equal_p (expr0
->ops
.call
.args
[i
],
538 expr1
->ops
.call
.args
[i
], 0))
541 if (stmt_could_throw_p (expr0
->ops
.call
.fn_from
))
543 int lp0
= lookup_stmt_eh_lp (expr0
->ops
.call
.fn_from
);
544 int lp1
= lookup_stmt_eh_lp (expr1
->ops
.call
.fn_from
);
545 if ((lp0
> 0 || lp1
> 0) && lp0
!= lp1
)
556 if (expr0
->ops
.phi
.nargs
!= expr1
->ops
.phi
.nargs
)
559 for (i
= 0; i
< expr0
->ops
.phi
.nargs
; i
++)
560 if (! operand_equal_p (expr0
->ops
.phi
.args
[i
],
561 expr1
->ops
.phi
.args
[i
], 0))
572 /* Generate a hash value for a pair of expressions. This can be used
573 iteratively by passing a previous result in HSTATE.
575 The same hash value is always returned for a given pair of expressions,
576 regardless of the order in which they are presented. This is useful in
577 hashing the operands of commutative functions. */
583 add_expr_commutative (const_tree t1
, const_tree t2
, hash
&hstate
)
587 inchash::add_expr (t1
, one
);
588 inchash::add_expr (t2
, two
);
589 hstate
.add_commutative (one
, two
);
592 /* Compute a hash value for a hashable_expr value EXPR and a
593 previously accumulated hash value VAL. If two hashable_expr
594 values compare equal with hashable_expr_equal_p, they must
595 hash to the same value, given an identical value of VAL.
596 The logic is intended to follow inchash::add_expr in tree.c. */
599 add_hashable_expr (const struct hashable_expr
*expr
, hash
&hstate
)
604 inchash::add_expr (expr
->ops
.single
.rhs
, hstate
);
608 hstate
.add_object (expr
->ops
.unary
.op
);
610 /* Make sure to include signedness in the hash computation.
611 Don't hash the type, that can lead to having nodes which
612 compare equal according to operand_equal_p, but which
613 have different hash codes. */
614 if (CONVERT_EXPR_CODE_P (expr
->ops
.unary
.op
)
615 || expr
->ops
.unary
.op
== NON_LVALUE_EXPR
)
616 hstate
.add_int (TYPE_UNSIGNED (expr
->type
));
618 inchash::add_expr (expr
->ops
.unary
.opnd
, hstate
);
622 hstate
.add_object (expr
->ops
.binary
.op
);
623 if (commutative_tree_code (expr
->ops
.binary
.op
))
624 inchash::add_expr_commutative (expr
->ops
.binary
.opnd0
,
625 expr
->ops
.binary
.opnd1
, hstate
);
628 inchash::add_expr (expr
->ops
.binary
.opnd0
, hstate
);
629 inchash::add_expr (expr
->ops
.binary
.opnd1
, hstate
);
634 hstate
.add_object (expr
->ops
.ternary
.op
);
635 if (commutative_ternary_tree_code (expr
->ops
.ternary
.op
))
636 inchash::add_expr_commutative (expr
->ops
.ternary
.opnd0
,
637 expr
->ops
.ternary
.opnd1
, hstate
);
640 inchash::add_expr (expr
->ops
.ternary
.opnd0
, hstate
);
641 inchash::add_expr (expr
->ops
.ternary
.opnd1
, hstate
);
643 inchash::add_expr (expr
->ops
.ternary
.opnd2
, hstate
);
649 enum tree_code code
= CALL_EXPR
;
652 hstate
.add_object (code
);
653 fn_from
= expr
->ops
.call
.fn_from
;
654 if (gimple_call_internal_p (fn_from
))
655 hstate
.merge_hash ((hashval_t
) gimple_call_internal_fn (fn_from
));
657 inchash::add_expr (gimple_call_fn (fn_from
), hstate
);
658 for (i
= 0; i
< expr
->ops
.call
.nargs
; i
++)
659 inchash::add_expr (expr
->ops
.call
.args
[i
], hstate
);
667 for (i
= 0; i
< expr
->ops
.phi
.nargs
; i
++)
668 inchash::add_expr (expr
->ops
.phi
.args
[i
], hstate
);
679 /* Print a diagnostic dump of an expression hash table entry. */
682 print_expr_hash_elt (FILE * stream
, const struct expr_hash_elt
*element
)
685 fprintf (stream
, "STMT ");
687 fprintf (stream
, "COND ");
691 print_generic_expr (stream
, element
->lhs
, 0);
692 fprintf (stream
, " = ");
695 switch (element
->expr
.kind
)
698 print_generic_expr (stream
, element
->expr
.ops
.single
.rhs
, 0);
702 fprintf (stream
, "%s ", get_tree_code_name (element
->expr
.ops
.unary
.op
));
703 print_generic_expr (stream
, element
->expr
.ops
.unary
.opnd
, 0);
707 print_generic_expr (stream
, element
->expr
.ops
.binary
.opnd0
, 0);
708 fprintf (stream
, " %s ", get_tree_code_name (element
->expr
.ops
.binary
.op
));
709 print_generic_expr (stream
, element
->expr
.ops
.binary
.opnd1
, 0);
713 fprintf (stream
, " %s <", get_tree_code_name (element
->expr
.ops
.ternary
.op
));
714 print_generic_expr (stream
, element
->expr
.ops
.ternary
.opnd0
, 0);
715 fputs (", ", stream
);
716 print_generic_expr (stream
, element
->expr
.ops
.ternary
.opnd1
, 0);
717 fputs (", ", stream
);
718 print_generic_expr (stream
, element
->expr
.ops
.ternary
.opnd2
, 0);
725 size_t nargs
= element
->expr
.ops
.call
.nargs
;
728 fn_from
= element
->expr
.ops
.call
.fn_from
;
729 if (gimple_call_internal_p (fn_from
))
730 fputs (internal_fn_name (gimple_call_internal_fn (fn_from
)),
733 print_generic_expr (stream
, gimple_call_fn (fn_from
), 0);
734 fprintf (stream
, " (");
735 for (i
= 0; i
< nargs
; i
++)
737 print_generic_expr (stream
, element
->expr
.ops
.call
.args
[i
], 0);
739 fprintf (stream
, ", ");
741 fprintf (stream
, ")");
748 size_t nargs
= element
->expr
.ops
.phi
.nargs
;
750 fprintf (stream
, "PHI <");
751 for (i
= 0; i
< nargs
; i
++)
753 print_generic_expr (stream
, element
->expr
.ops
.phi
.args
[i
], 0);
755 fprintf (stream
, ", ");
757 fprintf (stream
, ">");
761 fprintf (stream
, "\n");
765 fprintf (stream
, " ");
766 print_gimple_stmt (stream
, element
->stmt
, 0, 0);
770 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
773 free_expr_hash_elt_contents (struct expr_hash_elt
*element
)
775 if (element
->expr
.kind
== EXPR_CALL
)
776 free (element
->expr
.ops
.call
.args
);
777 else if (element
->expr
.kind
== EXPR_PHI
)
778 free (element
->expr
.ops
.phi
.args
);
781 /* Delete an expr_hash_elt and reclaim its storage. */
784 free_expr_hash_elt (void *elt
)
786 struct expr_hash_elt
*element
= ((struct expr_hash_elt
*)elt
);
787 free_expr_hash_elt_contents (element
);
791 /* Allocate an EDGE_INFO for edge E and attach it to E.
792 Return the new EDGE_INFO structure. */
794 static struct edge_info
*
795 allocate_edge_info (edge e
)
797 struct edge_info
*edge_info
;
799 edge_info
= XCNEW (struct edge_info
);
805 /* Free all EDGE_INFO structures associated with edges in the CFG.
806 If a particular edge can be threaded, copy the redirection
807 target from the EDGE_INFO structure into the edge's AUX field
808 as required by code to update the CFG and SSA graph for
812 free_all_edge_infos (void)
818 FOR_EACH_BB_FN (bb
, cfun
)
820 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
822 struct edge_info
*edge_info
= (struct edge_info
*) e
->aux
;
826 edge_info
->cond_equivalences
.release ();
834 class dom_opt_dom_walker
: public dom_walker
837 dom_opt_dom_walker (cdi_direction direction
)
838 : dom_walker (direction
), m_dummy_cond (NULL
) {}
840 virtual void before_dom_children (basic_block
);
841 virtual void after_dom_children (basic_block
);
844 void thread_across_edge (edge
);
849 /* Jump threading, redundancy elimination and const/copy propagation.
851 This pass may expose new symbols that need to be renamed into SSA. For
852 every new symbol exposed, its corresponding bit will be set in
857 const pass_data pass_data_dominator
=
859 GIMPLE_PASS
, /* type */
861 OPTGROUP_NONE
, /* optinfo_flags */
862 TV_TREE_SSA_DOMINATOR_OPTS
, /* tv_id */
863 ( PROP_cfg
| PROP_ssa
), /* properties_required */
864 0, /* properties_provided */
865 0, /* properties_destroyed */
866 0, /* todo_flags_start */
867 ( TODO_cleanup_cfg
| TODO_update_ssa
), /* todo_flags_finish */
870 class pass_dominator
: public gimple_opt_pass
873 pass_dominator (gcc::context
*ctxt
)
874 : gimple_opt_pass (pass_data_dominator
, ctxt
)
877 /* opt_pass methods: */
878 opt_pass
* clone () { return new pass_dominator (m_ctxt
); }
879 virtual bool gate (function
*) { return flag_tree_dom
!= 0; }
880 virtual unsigned int execute (function
*);
882 }; // class pass_dominator
885 pass_dominator::execute (function
*fun
)
887 memset (&opt_stats
, 0, sizeof (opt_stats
));
889 /* Create our hash tables. */
890 avail_exprs
= new hash_table
<expr_elt_hasher
> (1024);
891 avail_exprs_stack
.create (20);
892 const_and_copies_stack
.create (20);
893 need_eh_cleanup
= BITMAP_ALLOC (NULL
);
895 calculate_dominance_info (CDI_DOMINATORS
);
898 /* We need to know loop structures in order to avoid destroying them
899 in jump threading. Note that we still can e.g. thread through loop
900 headers to an exit edge, or through loop header to the loop body, assuming
901 that we update the loop info.
903 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
904 to several overly conservative bail-outs in jump threading, case
905 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
906 missing. We should improve jump threading in future then
907 LOOPS_HAVE_PREHEADERS won't be needed here. */
908 loop_optimizer_init (LOOPS_HAVE_PREHEADERS
| LOOPS_HAVE_SIMPLE_LATCHES
);
910 /* Initialize the value-handle array. */
911 threadedge_initialize_values ();
913 /* We need accurate information regarding back edges in the CFG
914 for jump threading; this may include back edges that are not part of
916 mark_dfs_back_edges ();
918 /* Recursively walk the dominator tree optimizing statements. */
919 dom_opt_dom_walker (CDI_DOMINATORS
).walk (fun
->cfg
->x_entry_block_ptr
);
922 gimple_stmt_iterator gsi
;
924 FOR_EACH_BB_FN (bb
, fun
)
926 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
927 update_stmt_if_modified (gsi_stmt (gsi
));
931 /* If we exposed any new variables, go ahead and put them into
932 SSA form now, before we handle jump threading. This simplifies
933 interactions between rewriting of _DECL nodes into SSA form
934 and rewriting SSA_NAME nodes into SSA form after block
935 duplication and CFG manipulation. */
936 update_ssa (TODO_update_ssa
);
938 free_all_edge_infos ();
940 /* Thread jumps, creating duplicate blocks as needed. */
941 cfg_altered
|= thread_through_all_blocks (first_pass_instance
);
944 free_dominance_info (CDI_DOMINATORS
);
946 /* Removal of statements may make some EH edges dead. Purge
947 such edges from the CFG as needed. */
948 if (!bitmap_empty_p (need_eh_cleanup
))
953 /* Jump threading may have created forwarder blocks from blocks
954 needing EH cleanup; the new successor of these blocks, which
955 has inherited from the original block, needs the cleanup.
956 Don't clear bits in the bitmap, as that can break the bitmap
958 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup
, 0, i
, bi
)
960 basic_block bb
= BASIC_BLOCK_FOR_FN (fun
, i
);
963 while (single_succ_p (bb
)
964 && (single_succ_edge (bb
)->flags
& EDGE_EH
) == 0)
965 bb
= single_succ (bb
);
966 if (bb
== EXIT_BLOCK_PTR_FOR_FN (fun
))
968 if ((unsigned) bb
->index
!= i
)
969 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
972 gimple_purge_all_dead_eh_edges (need_eh_cleanup
);
973 bitmap_clear (need_eh_cleanup
);
976 statistics_counter_event (fun
, "Redundant expressions eliminated",
978 statistics_counter_event (fun
, "Constants propagated",
979 opt_stats
.num_const_prop
);
980 statistics_counter_event (fun
, "Copies propagated",
981 opt_stats
.num_copy_prop
);
983 /* Debugging dumps. */
984 if (dump_file
&& (dump_flags
& TDF_STATS
))
985 dump_dominator_optimization_stats (dump_file
);
987 loop_optimizer_finalize ();
989 /* Delete our main hashtable. */
993 /* Free asserted bitmaps and stacks. */
994 BITMAP_FREE (need_eh_cleanup
);
996 avail_exprs_stack
.release ();
997 const_and_copies_stack
.release ();
999 /* Free the value-handle array. */
1000 threadedge_finalize_values ();
1008 make_pass_dominator (gcc::context
*ctxt
)
1010 return new pass_dominator (ctxt
);
1014 /* Given a conditional statement CONDSTMT, convert the
1015 condition to a canonical form. */
1018 canonicalize_comparison (gimple condstmt
)
1022 enum tree_code code
;
1024 gcc_assert (gimple_code (condstmt
) == GIMPLE_COND
);
1026 op0
= gimple_cond_lhs (condstmt
);
1027 op1
= gimple_cond_rhs (condstmt
);
1029 code
= gimple_cond_code (condstmt
);
1031 /* If it would be profitable to swap the operands, then do so to
1032 canonicalize the statement, enabling better optimization.
1034 By placing canonicalization of such expressions here we
1035 transparently keep statements in canonical form, even
1036 when the statement is modified. */
1037 if (tree_swap_operands_p (op0
, op1
, false))
1039 /* For relationals we need to swap the operands
1040 and change the code. */
1046 code
= swap_tree_comparison (code
);
1048 gimple_cond_set_code (condstmt
, code
);
1049 gimple_cond_set_lhs (condstmt
, op1
);
1050 gimple_cond_set_rhs (condstmt
, op0
);
1052 update_stmt (condstmt
);
1057 /* Initialize local stacks for this optimizer and record equivalences
1058 upon entry to BB. Equivalences can come from the edge traversed to
1059 reach BB or they may come from PHI nodes at the start of BB. */
1061 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1062 LIMIT entries left in LOCALs. */
1065 remove_local_expressions_from_table (void)
1067 /* Remove all the expressions made available in this block. */
1068 while (avail_exprs_stack
.length () > 0)
1070 expr_hash_elt_t victim
= avail_exprs_stack
.pop ();
1071 expr_hash_elt
**slot
;
1076 /* This must precede the actual removal from the hash table,
1077 as ELEMENT and the table entry may share a call argument
1078 vector which will be freed during removal. */
1079 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1081 fprintf (dump_file
, "<<<< ");
1082 print_expr_hash_elt (dump_file
, victim
);
1085 slot
= avail_exprs
->find_slot (victim
, NO_INSERT
);
1086 gcc_assert (slot
&& *slot
== victim
);
1087 avail_exprs
->clear_slot (slot
);
1091 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1092 CONST_AND_COPIES to its original state, stopping when we hit a
1096 restore_vars_to_original_value (void)
1098 while (const_and_copies_stack
.length () > 0)
1100 tree prev_value
, dest
;
1102 dest
= const_and_copies_stack
.pop ();
1107 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1109 fprintf (dump_file
, "<<<< COPY ");
1110 print_generic_expr (dump_file
, dest
, 0);
1111 fprintf (dump_file
, " = ");
1112 print_generic_expr (dump_file
, SSA_NAME_VALUE (dest
), 0);
1113 fprintf (dump_file
, "\n");
1116 prev_value
= const_and_copies_stack
.pop ();
1117 set_ssa_name_value (dest
, prev_value
);
1121 /* A trivial wrapper so that we can present the generic jump
1122 threading code with a simple API for simplifying statements. */
1124 simplify_stmt_for_jump_threading (gimple stmt
,
1125 gimple within_stmt ATTRIBUTE_UNUSED
)
1127 return lookup_avail_expr (stmt
, false);
1130 /* Record into the equivalence tables any equivalences implied by
1131 traversing edge E (which are cached in E->aux).
1133 Callers are responsible for managing the unwinding markers. */
1135 record_temporary_equivalences (edge e
)
1138 struct edge_info
*edge_info
= (struct edge_info
*) e
->aux
;
1140 /* If we have info associated with this edge, record it into
1141 our equivalence tables. */
1144 cond_equivalence
*eq
;
1145 tree lhs
= edge_info
->lhs
;
1146 tree rhs
= edge_info
->rhs
;
1148 /* If we have a simple NAME = VALUE equivalence, record it. */
1149 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
1150 record_const_or_copy (lhs
, rhs
);
1152 /* If we have 0 = COND or 1 = COND equivalences, record them
1153 into our expression hash tables. */
1154 for (i
= 0; edge_info
->cond_equivalences
.iterate (i
, &eq
); ++i
)
1159 /* Wrapper for common code to attempt to thread an edge. For example,
1160 it handles lazily building the dummy condition and the bookkeeping
1161 when jump threading is successful. */
1164 dom_opt_dom_walker::thread_across_edge (edge e
)
1168 gimple_build_cond (NE_EXPR
,
1169 integer_zero_node
, integer_zero_node
,
1172 /* Push a marker on both stacks so we can unwind the tables back to their
1174 avail_exprs_stack
.safe_push (NULL
);
1175 const_and_copies_stack
.safe_push (NULL_TREE
);
1177 /* Traversing E may result in equivalences we can utilize. */
1178 record_temporary_equivalences (e
);
1180 /* With all the edge equivalences in the tables, go ahead and attempt
1181 to thread through E->dest. */
1182 ::thread_across_edge (m_dummy_cond
, e
, false,
1183 &const_and_copies_stack
,
1184 simplify_stmt_for_jump_threading
);
1186 /* And restore the various tables to their state before
1187 we threaded this edge.
1189 XXX The code in tree-ssa-threadedge.c will restore the state of
1190 the const_and_copies table. We we just have to restore the expression
1192 remove_local_expressions_from_table ();
1195 /* PHI nodes can create equivalences too.
1197 Ignoring any alternatives which are the same as the result, if
1198 all the alternatives are equal, then the PHI node creates an
1202 record_equivalences_from_phis (basic_block bb
)
1204 gimple_stmt_iterator gsi
;
1206 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1208 gimple phi
= gsi_stmt (gsi
);
1210 tree lhs
= gimple_phi_result (phi
);
1214 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1216 tree t
= gimple_phi_arg_def (phi
, i
);
1218 /* Ignore alternatives which are the same as our LHS. Since
1219 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1220 can simply compare pointers. */
1224 /* If we have not processed an alternative yet, then set
1225 RHS to this alternative. */
1228 /* If we have processed an alternative (stored in RHS), then
1229 see if it is equal to this one. If it isn't, then stop
1231 else if (! operand_equal_for_phi_arg_p (rhs
, t
))
1235 /* If we had no interesting alternatives, then all the RHS alternatives
1236 must have been the same as LHS. */
1240 /* If we managed to iterate through each PHI alternative without
1241 breaking out of the loop, then we have a PHI which may create
1242 a useful equivalence. We do not need to record unwind data for
1243 this, since this is a true assignment and not an equivalence
1244 inferred from a comparison. All uses of this ssa name are dominated
1245 by this assignment, so unwinding just costs time and space. */
1246 if (i
== gimple_phi_num_args (phi
)
1247 && may_propagate_copy (lhs
, rhs
))
1248 set_ssa_name_value (lhs
, rhs
);
1252 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1253 return that edge. Otherwise return NULL. */
1255 single_incoming_edge_ignoring_loop_edges (basic_block bb
)
1261 FOR_EACH_EDGE (e
, ei
, bb
->preds
)
1263 /* A loop back edge can be identified by the destination of
1264 the edge dominating the source of the edge. */
1265 if (dominated_by_p (CDI_DOMINATORS
, e
->src
, e
->dest
))
1268 /* If we have already seen a non-loop edge, then we must have
1269 multiple incoming non-loop edges and thus we return NULL. */
1273 /* This is the first non-loop incoming edge we have found. Record
1281 /* Record any equivalences created by the incoming edge to BB. If BB
1282 has more than one incoming edge, then no equivalence is created. */
1285 record_equivalences_from_incoming_edge (basic_block bb
)
1289 struct edge_info
*edge_info
;
1291 /* If our parent block ended with a control statement, then we may be
1292 able to record some equivalences based on which outgoing edge from
1293 the parent was followed. */
1294 parent
= get_immediate_dominator (CDI_DOMINATORS
, bb
);
1296 e
= single_incoming_edge_ignoring_loop_edges (bb
);
1298 /* If we had a single incoming edge from our parent block, then enter
1299 any data associated with the edge into our tables. */
1300 if (e
&& e
->src
== parent
)
1304 edge_info
= (struct edge_info
*) e
->aux
;
1308 tree lhs
= edge_info
->lhs
;
1309 tree rhs
= edge_info
->rhs
;
1310 cond_equivalence
*eq
;
1313 record_equality (lhs
, rhs
);
1315 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1316 set via a widening type conversion, then we may be able to record
1317 additional equivalences. */
1319 && TREE_CODE (lhs
) == SSA_NAME
1320 && is_gimple_constant (rhs
)
1321 && TREE_CODE (rhs
) == INTEGER_CST
)
1323 gimple defstmt
= SSA_NAME_DEF_STMT (lhs
);
1326 && is_gimple_assign (defstmt
)
1327 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt
)))
1329 tree old_rhs
= gimple_assign_rhs1 (defstmt
);
1331 /* If the conversion widens the original value and
1332 the constant is in the range of the type of OLD_RHS,
1333 then convert the constant and record the equivalence.
1335 Note that int_fits_type_p does not check the precision
1336 if the upper and lower bounds are OK. */
1337 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs
))
1338 && (TYPE_PRECISION (TREE_TYPE (lhs
))
1339 > TYPE_PRECISION (TREE_TYPE (old_rhs
)))
1340 && int_fits_type_p (rhs
, TREE_TYPE (old_rhs
)))
1342 tree newval
= fold_convert (TREE_TYPE (old_rhs
), rhs
);
1343 record_equality (old_rhs
, newval
);
1348 for (i
= 0; edge_info
->cond_equivalences
.iterate (i
, &eq
); ++i
)
1354 /* Dump SSA statistics on FILE. */
1357 dump_dominator_optimization_stats (FILE *file
)
1359 fprintf (file
, "Total number of statements: %6ld\n\n",
1360 opt_stats
.num_stmts
);
1361 fprintf (file
, "Exprs considered for dominator optimizations: %6ld\n",
1362 opt_stats
.num_exprs_considered
);
1364 fprintf (file
, "\nHash table statistics:\n");
1366 fprintf (file
, " avail_exprs: ");
1367 htab_statistics (file
, *avail_exprs
);
1371 /* Dump SSA statistics on stderr. */
1374 debug_dominator_optimization_stats (void)
1376 dump_dominator_optimization_stats (stderr
);
1380 /* Dump statistics for the hash table HTAB. */
1383 htab_statistics (FILE *file
, const hash_table
<expr_elt_hasher
> &htab
)
1385 fprintf (file
, "size %ld, %ld elements, %f collision/search ratio\n",
1386 (long) htab
.size (),
1387 (long) htab
.elements (),
1388 htab
.collisions ());
1392 /* Enter condition equivalence into the expression hash table.
1393 This indicates that a conditional expression has a known
1397 record_cond (cond_equivalence
*p
)
1399 struct expr_hash_elt
*element
= XCNEW (struct expr_hash_elt
);
1400 expr_hash_elt
**slot
;
1402 initialize_hash_element_from_expr (&p
->cond
, p
->value
, element
);
1404 slot
= avail_exprs
->find_slot_with_hash (element
, element
->hash
, INSERT
);
1409 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1411 fprintf (dump_file
, "1>>> ");
1412 print_expr_hash_elt (dump_file
, element
);
1415 avail_exprs_stack
.safe_push (element
);
1418 free_expr_hash_elt (element
);
1421 /* Build a cond_equivalence record indicating that the comparison
1422 CODE holds between operands OP0 and OP1 and push it to **P. */
1425 build_and_record_new_cond (enum tree_code code
,
1427 vec
<cond_equivalence
> *p
)
1430 struct hashable_expr
*cond
= &c
.cond
;
1432 gcc_assert (TREE_CODE_CLASS (code
) == tcc_comparison
);
1434 cond
->type
= boolean_type_node
;
1435 cond
->kind
= EXPR_BINARY
;
1436 cond
->ops
.binary
.op
= code
;
1437 cond
->ops
.binary
.opnd0
= op0
;
1438 cond
->ops
.binary
.opnd1
= op1
;
1440 c
.value
= boolean_true_node
;
1444 /* Record that COND is true and INVERTED is false into the edge information
1445 structure. Also record that any conditions dominated by COND are true
1448 For example, if a < b is true, then a <= b must also be true. */
1451 record_conditions (struct edge_info
*edge_info
, tree cond
, tree inverted
)
1456 if (!COMPARISON_CLASS_P (cond
))
1459 op0
= TREE_OPERAND (cond
, 0);
1460 op1
= TREE_OPERAND (cond
, 1);
1462 switch (TREE_CODE (cond
))
1466 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
1468 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1469 &edge_info
->cond_equivalences
);
1470 build_and_record_new_cond (LTGT_EXPR
, op0
, op1
,
1471 &edge_info
->cond_equivalences
);
1474 build_and_record_new_cond ((TREE_CODE (cond
) == LT_EXPR
1475 ? LE_EXPR
: GE_EXPR
),
1476 op0
, op1
, &edge_info
->cond_equivalences
);
1477 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1478 &edge_info
->cond_equivalences
);
1483 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
1485 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1486 &edge_info
->cond_equivalences
);
1491 if (FLOAT_TYPE_P (TREE_TYPE (op0
)))
1493 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1494 &edge_info
->cond_equivalences
);
1496 build_and_record_new_cond (LE_EXPR
, op0
, op1
,
1497 &edge_info
->cond_equivalences
);
1498 build_and_record_new_cond (GE_EXPR
, op0
, op1
,
1499 &edge_info
->cond_equivalences
);
1502 case UNORDERED_EXPR
:
1503 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1504 &edge_info
->cond_equivalences
);
1505 build_and_record_new_cond (UNLE_EXPR
, op0
, op1
,
1506 &edge_info
->cond_equivalences
);
1507 build_and_record_new_cond (UNGE_EXPR
, op0
, op1
,
1508 &edge_info
->cond_equivalences
);
1509 build_and_record_new_cond (UNEQ_EXPR
, op0
, op1
,
1510 &edge_info
->cond_equivalences
);
1511 build_and_record_new_cond (UNLT_EXPR
, op0
, op1
,
1512 &edge_info
->cond_equivalences
);
1513 build_and_record_new_cond (UNGT_EXPR
, op0
, op1
,
1514 &edge_info
->cond_equivalences
);
1519 build_and_record_new_cond ((TREE_CODE (cond
) == UNLT_EXPR
1520 ? UNLE_EXPR
: UNGE_EXPR
),
1521 op0
, op1
, &edge_info
->cond_equivalences
);
1522 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1523 &edge_info
->cond_equivalences
);
1527 build_and_record_new_cond (UNLE_EXPR
, op0
, op1
,
1528 &edge_info
->cond_equivalences
);
1529 build_and_record_new_cond (UNGE_EXPR
, op0
, op1
,
1530 &edge_info
->cond_equivalences
);
1534 build_and_record_new_cond (NE_EXPR
, op0
, op1
,
1535 &edge_info
->cond_equivalences
);
1536 build_and_record_new_cond (ORDERED_EXPR
, op0
, op1
,
1537 &edge_info
->cond_equivalences
);
1544 /* Now store the original true and false conditions into the first
1546 initialize_expr_from_cond (cond
, &c
.cond
);
1547 c
.value
= boolean_true_node
;
1548 edge_info
->cond_equivalences
.safe_push (c
);
1550 /* It is possible for INVERTED to be the negation of a comparison,
1551 and not a valid RHS or GIMPLE_COND condition. This happens because
1552 invert_truthvalue may return such an expression when asked to invert
1553 a floating-point comparison. These comparisons are not assumed to
1554 obey the trichotomy law. */
1555 initialize_expr_from_cond (inverted
, &c
.cond
);
1556 c
.value
= boolean_false_node
;
1557 edge_info
->cond_equivalences
.safe_push (c
);
1560 /* A helper function for record_const_or_copy and record_equality.
1561 Do the work of recording the value and undo info. */
1564 record_const_or_copy_1 (tree x
, tree y
, tree prev_x
)
1566 set_ssa_name_value (x
, y
);
1568 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1570 fprintf (dump_file
, "0>>> COPY ");
1571 print_generic_expr (dump_file
, x
, 0);
1572 fprintf (dump_file
, " = ");
1573 print_generic_expr (dump_file
, y
, 0);
1574 fprintf (dump_file
, "\n");
1577 const_and_copies_stack
.reserve (2);
1578 const_and_copies_stack
.quick_push (prev_x
);
1579 const_and_copies_stack
.quick_push (x
);
1582 /* Record that X is equal to Y in const_and_copies. Record undo
1583 information in the block-local vector. */
1586 record_const_or_copy (tree x
, tree y
)
1588 tree prev_x
= SSA_NAME_VALUE (x
);
1590 gcc_assert (TREE_CODE (x
) == SSA_NAME
);
1592 if (TREE_CODE (y
) == SSA_NAME
)
1594 tree tmp
= SSA_NAME_VALUE (y
);
1599 record_const_or_copy_1 (x
, y
, prev_x
);
1602 /* Return the loop depth of the basic block of the defining statement of X.
1603 This number should not be treated as absolutely correct because the loop
1604 information may not be completely up-to-date when dom runs. However, it
1605 will be relatively correct, and as more passes are taught to keep loop info
1606 up to date, the result will become more and more accurate. */
1609 loop_depth_of_name (tree x
)
1614 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1615 if (TREE_CODE (x
) != SSA_NAME
)
1618 /* Otherwise return the loop depth of the defining statement's bb.
1619 Note that there may not actually be a bb for this statement, if the
1620 ssa_name is live on entry. */
1621 defstmt
= SSA_NAME_DEF_STMT (x
);
1622 defbb
= gimple_bb (defstmt
);
1626 return bb_loop_depth (defbb
);
1629 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1630 This constrains the cases in which we may treat this as assignment. */
1633 record_equality (tree x
, tree y
)
1635 tree prev_x
= NULL
, prev_y
= NULL
;
1637 if (TREE_CODE (x
) == SSA_NAME
)
1638 prev_x
= SSA_NAME_VALUE (x
);
1639 if (TREE_CODE (y
) == SSA_NAME
)
1640 prev_y
= SSA_NAME_VALUE (y
);
1642 /* If one of the previous values is invariant, or invariant in more loops
1643 (by depth), then use that.
1644 Otherwise it doesn't matter which value we choose, just so
1645 long as we canonicalize on one value. */
1646 if (is_gimple_min_invariant (y
))
1648 else if (is_gimple_min_invariant (x
)
1649 /* ??? When threading over backedges the following is important
1650 for correctness. See PR61757. */
1651 || (loop_depth_of_name (x
) <= loop_depth_of_name (y
)))
1652 prev_x
= x
, x
= y
, y
= prev_x
, prev_x
= prev_y
;
1653 else if (prev_x
&& is_gimple_min_invariant (prev_x
))
1654 x
= y
, y
= prev_x
, prev_x
= prev_y
;
1658 /* After the swapping, we must have one SSA_NAME. */
1659 if (TREE_CODE (x
) != SSA_NAME
)
1662 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1663 variable compared against zero. If we're honoring signed zeros,
1664 then we cannot record this value unless we know that the value is
1666 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x
)))
1667 && (TREE_CODE (y
) != REAL_CST
1668 || REAL_VALUES_EQUAL (dconst0
, TREE_REAL_CST (y
))))
1671 record_const_or_copy_1 (x
, y
, prev_x
);
1674 /* Returns true when STMT is a simple iv increment. It detects the
1675 following situation:
1677 i_1 = phi (..., i_2)
1678 i_2 = i_1 +/- ... */
1681 simple_iv_increment_p (gimple stmt
)
1683 enum tree_code code
;
1688 if (gimple_code (stmt
) != GIMPLE_ASSIGN
)
1691 lhs
= gimple_assign_lhs (stmt
);
1692 if (TREE_CODE (lhs
) != SSA_NAME
)
1695 code
= gimple_assign_rhs_code (stmt
);
1696 if (code
!= PLUS_EXPR
1697 && code
!= MINUS_EXPR
1698 && code
!= POINTER_PLUS_EXPR
)
1701 preinc
= gimple_assign_rhs1 (stmt
);
1702 if (TREE_CODE (preinc
) != SSA_NAME
)
1705 phi
= SSA_NAME_DEF_STMT (preinc
);
1706 if (gimple_code (phi
) != GIMPLE_PHI
)
1709 for (i
= 0; i
< gimple_phi_num_args (phi
); i
++)
1710 if (gimple_phi_arg_def (phi
, i
) == lhs
)
1716 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1717 known value for that SSA_NAME (or NULL if no value is known).
1719 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1720 successors of BB. */
1723 cprop_into_successor_phis (basic_block bb
)
1728 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1731 gimple_stmt_iterator gsi
;
1733 /* If this is an abnormal edge, then we do not want to copy propagate
1734 into the PHI alternative associated with this edge. */
1735 if (e
->flags
& EDGE_ABNORMAL
)
1738 gsi
= gsi_start_phis (e
->dest
);
1739 if (gsi_end_p (gsi
))
1742 /* We may have an equivalence associated with this edge. While
1743 we can not propagate it into non-dominated blocks, we can
1744 propagate them into PHIs in non-dominated blocks. */
1746 /* Push the unwind marker so we can reset the const and copies
1747 table back to its original state after processing this edge. */
1748 const_and_copies_stack
.safe_push (NULL_TREE
);
1750 /* Extract and record any simple NAME = VALUE equivalences.
1752 Don't bother with [01] = COND equivalences, they're not useful
1754 struct edge_info
*edge_info
= (struct edge_info
*) e
->aux
;
1757 tree lhs
= edge_info
->lhs
;
1758 tree rhs
= edge_info
->rhs
;
1760 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
1761 record_const_or_copy (lhs
, rhs
);
1765 for ( ; !gsi_end_p (gsi
); gsi_next (&gsi
))
1768 use_operand_p orig_p
;
1770 gimple phi
= gsi_stmt (gsi
);
1772 /* The alternative may be associated with a constant, so verify
1773 it is an SSA_NAME before doing anything with it. */
1774 orig_p
= gimple_phi_arg_imm_use_ptr (phi
, indx
);
1775 orig_val
= get_use_from_ptr (orig_p
);
1776 if (TREE_CODE (orig_val
) != SSA_NAME
)
1779 /* If we have *ORIG_P in our constant/copy table, then replace
1780 ORIG_P with its value in our constant/copy table. */
1781 new_val
= SSA_NAME_VALUE (orig_val
);
1783 && new_val
!= orig_val
1784 && (TREE_CODE (new_val
) == SSA_NAME
1785 || is_gimple_min_invariant (new_val
))
1786 && may_propagate_copy (orig_val
, new_val
))
1787 propagate_value (orig_p
, new_val
);
1790 restore_vars_to_original_value ();
1794 /* We have finished optimizing BB, record any information implied by
1795 taking a specific outgoing edge from BB. */
1798 record_edge_info (basic_block bb
)
1800 gimple_stmt_iterator gsi
= gsi_last_bb (bb
);
1801 struct edge_info
*edge_info
;
1803 if (! gsi_end_p (gsi
))
1805 gimple stmt
= gsi_stmt (gsi
);
1806 location_t loc
= gimple_location (stmt
);
1808 if (gimple_code (stmt
) == GIMPLE_SWITCH
)
1810 tree index
= gimple_switch_index (stmt
);
1812 if (TREE_CODE (index
) == SSA_NAME
)
1815 int n_labels
= gimple_switch_num_labels (stmt
);
1816 tree
*info
= XCNEWVEC (tree
, last_basic_block_for_fn (cfun
));
1820 for (i
= 0; i
< n_labels
; i
++)
1822 tree label
= gimple_switch_label (stmt
, i
);
1823 basic_block target_bb
= label_to_block (CASE_LABEL (label
));
1824 if (CASE_HIGH (label
)
1825 || !CASE_LOW (label
)
1826 || info
[target_bb
->index
])
1827 info
[target_bb
->index
] = error_mark_node
;
1829 info
[target_bb
->index
] = label
;
1832 FOR_EACH_EDGE (e
, ei
, bb
->succs
)
1834 basic_block target_bb
= e
->dest
;
1835 tree label
= info
[target_bb
->index
];
1837 if (label
!= NULL
&& label
!= error_mark_node
)
1839 tree x
= fold_convert_loc (loc
, TREE_TYPE (index
),
1841 edge_info
= allocate_edge_info (e
);
1842 edge_info
->lhs
= index
;
1850 /* A COND_EXPR may create equivalences too. */
1851 if (gimple_code (stmt
) == GIMPLE_COND
)
1856 tree op0
= gimple_cond_lhs (stmt
);
1857 tree op1
= gimple_cond_rhs (stmt
);
1858 enum tree_code code
= gimple_cond_code (stmt
);
1860 extract_true_false_edges_from_block (bb
, &true_edge
, &false_edge
);
1862 /* Special case comparing booleans against a constant as we
1863 know the value of OP0 on both arms of the branch. i.e., we
1864 can record an equivalence for OP0 rather than COND. */
1865 if ((code
== EQ_EXPR
|| code
== NE_EXPR
)
1866 && TREE_CODE (op0
) == SSA_NAME
1867 && TREE_CODE (TREE_TYPE (op0
)) == BOOLEAN_TYPE
1868 && is_gimple_min_invariant (op1
))
1870 if (code
== EQ_EXPR
)
1872 edge_info
= allocate_edge_info (true_edge
);
1873 edge_info
->lhs
= op0
;
1874 edge_info
->rhs
= (integer_zerop (op1
)
1875 ? boolean_false_node
1876 : boolean_true_node
);
1878 edge_info
= allocate_edge_info (false_edge
);
1879 edge_info
->lhs
= op0
;
1880 edge_info
->rhs
= (integer_zerop (op1
)
1882 : boolean_false_node
);
1886 edge_info
= allocate_edge_info (true_edge
);
1887 edge_info
->lhs
= op0
;
1888 edge_info
->rhs
= (integer_zerop (op1
)
1890 : boolean_false_node
);
1892 edge_info
= allocate_edge_info (false_edge
);
1893 edge_info
->lhs
= op0
;
1894 edge_info
->rhs
= (integer_zerop (op1
)
1895 ? boolean_false_node
1896 : boolean_true_node
);
1899 else if (is_gimple_min_invariant (op0
)
1900 && (TREE_CODE (op1
) == SSA_NAME
1901 || is_gimple_min_invariant (op1
)))
1903 tree cond
= build2 (code
, boolean_type_node
, op0
, op1
);
1904 tree inverted
= invert_truthvalue_loc (loc
, cond
);
1905 bool can_infer_simple_equiv
1906 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0
)))
1907 && real_zerop (op0
));
1908 struct edge_info
*edge_info
;
1910 edge_info
= allocate_edge_info (true_edge
);
1911 record_conditions (edge_info
, cond
, inverted
);
1913 if (can_infer_simple_equiv
&& code
== EQ_EXPR
)
1915 edge_info
->lhs
= op1
;
1916 edge_info
->rhs
= op0
;
1919 edge_info
= allocate_edge_info (false_edge
);
1920 record_conditions (edge_info
, inverted
, cond
);
1922 if (can_infer_simple_equiv
&& TREE_CODE (inverted
) == EQ_EXPR
)
1924 edge_info
->lhs
= op1
;
1925 edge_info
->rhs
= op0
;
1929 else if (TREE_CODE (op0
) == SSA_NAME
1930 && (TREE_CODE (op1
) == SSA_NAME
1931 || is_gimple_min_invariant (op1
)))
1933 tree cond
= build2 (code
, boolean_type_node
, op0
, op1
);
1934 tree inverted
= invert_truthvalue_loc (loc
, cond
);
1935 bool can_infer_simple_equiv
1936 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1
)))
1937 && (TREE_CODE (op1
) == SSA_NAME
|| real_zerop (op1
)));
1938 struct edge_info
*edge_info
;
1940 edge_info
= allocate_edge_info (true_edge
);
1941 record_conditions (edge_info
, cond
, inverted
);
1943 if (can_infer_simple_equiv
&& code
== EQ_EXPR
)
1945 edge_info
->lhs
= op0
;
1946 edge_info
->rhs
= op1
;
1949 edge_info
= allocate_edge_info (false_edge
);
1950 record_conditions (edge_info
, inverted
, cond
);
1952 if (can_infer_simple_equiv
&& TREE_CODE (inverted
) == EQ_EXPR
)
1954 edge_info
->lhs
= op0
;
1955 edge_info
->rhs
= op1
;
1960 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1965 dom_opt_dom_walker::before_dom_children (basic_block bb
)
1967 gimple_stmt_iterator gsi
;
1969 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1970 fprintf (dump_file
, "\n\nOptimizing block #%d\n\n", bb
->index
);
1972 /* Push a marker on the stacks of local information so that we know how
1973 far to unwind when we finalize this block. */
1974 avail_exprs_stack
.safe_push (NULL
);
1975 const_and_copies_stack
.safe_push (NULL_TREE
);
1977 record_equivalences_from_incoming_edge (bb
);
1979 /* PHI nodes can create equivalences too. */
1980 record_equivalences_from_phis (bb
);
1982 /* Create equivalences from redundant PHIs. PHIs are only truly
1983 redundant when they exist in the same block, so push another
1984 marker and unwind right afterwards. */
1985 avail_exprs_stack
.safe_push (NULL
);
1986 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1987 eliminate_redundant_computations (&gsi
);
1988 remove_local_expressions_from_table ();
1990 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1991 optimize_stmt (bb
, gsi
);
1993 /* Now prepare to process dominated blocks. */
1994 record_edge_info (bb
);
1995 cprop_into_successor_phis (bb
);
1998 /* We have finished processing the dominator children of BB, perform
1999 any finalization actions in preparation for leaving this node in
2000 the dominator tree. */
2003 dom_opt_dom_walker::after_dom_children (basic_block bb
)
2007 /* If we have an outgoing edge to a block with multiple incoming and
2008 outgoing edges, then we may be able to thread the edge, i.e., we
2009 may be able to statically determine which of the outgoing edges
2010 will be traversed when the incoming edge from BB is traversed. */
2011 if (single_succ_p (bb
)
2012 && (single_succ_edge (bb
)->flags
& EDGE_ABNORMAL
) == 0
2013 && potentially_threadable_block (single_succ (bb
)))
2015 thread_across_edge (single_succ_edge (bb
));
2017 else if ((last
= last_stmt (bb
))
2018 && gimple_code (last
) == GIMPLE_COND
2019 && EDGE_COUNT (bb
->succs
) == 2
2020 && (EDGE_SUCC (bb
, 0)->flags
& EDGE_ABNORMAL
) == 0
2021 && (EDGE_SUCC (bb
, 1)->flags
& EDGE_ABNORMAL
) == 0)
2023 edge true_edge
, false_edge
;
2025 extract_true_false_edges_from_block (bb
, &true_edge
, &false_edge
);
2027 /* Only try to thread the edge if it reaches a target block with
2028 more than one predecessor and more than one successor. */
2029 if (potentially_threadable_block (true_edge
->dest
))
2030 thread_across_edge (true_edge
);
2032 /* Similarly for the ELSE arm. */
2033 if (potentially_threadable_block (false_edge
->dest
))
2034 thread_across_edge (false_edge
);
2038 /* These remove expressions local to BB from the tables. */
2039 remove_local_expressions_from_table ();
2040 restore_vars_to_original_value ();
2043 /* Search for redundant computations in STMT. If any are found, then
2044 replace them with the variable holding the result of the computation.
2046 If safe, record this expression into the available expression hash
2050 eliminate_redundant_computations (gimple_stmt_iterator
* gsi
)
2056 bool assigns_var_p
= false;
2058 gimple stmt
= gsi_stmt (*gsi
);
2060 if (gimple_code (stmt
) == GIMPLE_PHI
)
2061 def
= gimple_phi_result (stmt
);
2063 def
= gimple_get_lhs (stmt
);
2065 /* Certain expressions on the RHS can be optimized away, but can not
2066 themselves be entered into the hash tables. */
2068 || TREE_CODE (def
) != SSA_NAME
2069 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def
)
2070 || gimple_vdef (stmt
)
2071 /* Do not record equivalences for increments of ivs. This would create
2072 overlapping live ranges for a very questionable gain. */
2073 || simple_iv_increment_p (stmt
))
2076 /* Check if the expression has been computed before. */
2077 cached_lhs
= lookup_avail_expr (stmt
, insert
);
2079 opt_stats
.num_exprs_considered
++;
2081 /* Get the type of the expression we are trying to optimize. */
2082 if (is_gimple_assign (stmt
))
2084 expr_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2085 assigns_var_p
= true;
2087 else if (gimple_code (stmt
) == GIMPLE_COND
)
2088 expr_type
= boolean_type_node
;
2089 else if (is_gimple_call (stmt
))
2091 gcc_assert (gimple_call_lhs (stmt
));
2092 expr_type
= TREE_TYPE (gimple_call_lhs (stmt
));
2093 assigns_var_p
= true;
2095 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
2096 expr_type
= TREE_TYPE (gimple_switch_index (stmt
));
2097 else if (gimple_code (stmt
) == GIMPLE_PHI
)
2098 /* We can't propagate into a phi, so the logic below doesn't apply.
2099 Instead record an equivalence between the cached LHS and the
2100 PHI result of this statement, provided they are in the same block.
2101 This should be sufficient to kill the redundant phi. */
2103 if (def
&& cached_lhs
)
2104 record_const_or_copy (def
, cached_lhs
);
2113 /* It is safe to ignore types here since we have already done
2114 type checking in the hashing and equality routines. In fact
2115 type checking here merely gets in the way of constant
2116 propagation. Also, make sure that it is safe to propagate
2117 CACHED_LHS into the expression in STMT. */
2118 if ((TREE_CODE (cached_lhs
) != SSA_NAME
2120 || useless_type_conversion_p (expr_type
, TREE_TYPE (cached_lhs
))))
2121 || may_propagate_copy_into_stmt (stmt
, cached_lhs
))
2123 gcc_checking_assert (TREE_CODE (cached_lhs
) == SSA_NAME
2124 || is_gimple_min_invariant (cached_lhs
));
2126 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2128 fprintf (dump_file
, " Replaced redundant expr '");
2129 print_gimple_expr (dump_file
, stmt
, 0, dump_flags
);
2130 fprintf (dump_file
, "' with '");
2131 print_generic_expr (dump_file
, cached_lhs
, dump_flags
);
2132 fprintf (dump_file
, "'\n");
2138 && !useless_type_conversion_p (expr_type
, TREE_TYPE (cached_lhs
)))
2139 cached_lhs
= fold_convert (expr_type
, cached_lhs
);
2141 propagate_tree_value_into_stmt (gsi
, cached_lhs
);
2143 /* Since it is always necessary to mark the result as modified,
2144 perhaps we should move this into propagate_tree_value_into_stmt
2146 gimple_set_modified (gsi_stmt (*gsi
), true);
2150 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2151 the available expressions table or the const_and_copies table.
2152 Detect and record those equivalences. */
2153 /* We handle only very simple copy equivalences here. The heavy
2154 lifing is done by eliminate_redundant_computations. */
2157 record_equivalences_from_stmt (gimple stmt
, int may_optimize_p
)
2160 enum tree_code lhs_code
;
2162 gcc_assert (is_gimple_assign (stmt
));
2164 lhs
= gimple_assign_lhs (stmt
);
2165 lhs_code
= TREE_CODE (lhs
);
2167 if (lhs_code
== SSA_NAME
2168 && gimple_assign_single_p (stmt
))
2170 tree rhs
= gimple_assign_rhs1 (stmt
);
2172 /* If the RHS of the assignment is a constant or another variable that
2173 may be propagated, register it in the CONST_AND_COPIES table. We
2174 do not need to record unwind data for this, since this is a true
2175 assignment and not an equivalence inferred from a comparison. All
2176 uses of this ssa name are dominated by this assignment, so unwinding
2177 just costs time and space. */
2179 && (TREE_CODE (rhs
) == SSA_NAME
2180 || is_gimple_min_invariant (rhs
)))
2182 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2184 fprintf (dump_file
, "==== ASGN ");
2185 print_generic_expr (dump_file
, lhs
, 0);
2186 fprintf (dump_file
, " = ");
2187 print_generic_expr (dump_file
, rhs
, 0);
2188 fprintf (dump_file
, "\n");
2191 set_ssa_name_value (lhs
, rhs
);
2195 /* A memory store, even an aliased store, creates a useful
2196 equivalence. By exchanging the LHS and RHS, creating suitable
2197 vops and recording the result in the available expression table,
2198 we may be able to expose more redundant loads. */
2199 if (!gimple_has_volatile_ops (stmt
)
2200 && gimple_references_memory_p (stmt
)
2201 && gimple_assign_single_p (stmt
)
2202 && (TREE_CODE (gimple_assign_rhs1 (stmt
)) == SSA_NAME
2203 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt
)))
2204 && !is_gimple_reg (lhs
))
2206 tree rhs
= gimple_assign_rhs1 (stmt
);
2209 /* Build a new statement with the RHS and LHS exchanged. */
2210 if (TREE_CODE (rhs
) == SSA_NAME
)
2212 /* NOTE tuples. The call to gimple_build_assign below replaced
2213 a call to build_gimple_modify_stmt, which did not set the
2214 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2215 may cause an SSA validation failure, as the LHS may be a
2216 default-initialized name and should have no definition. I'm
2217 a bit dubious of this, as the artificial statement that we
2218 generate here may in fact be ill-formed, but it is simply
2219 used as an internal device in this pass, and never becomes
2221 gimple defstmt
= SSA_NAME_DEF_STMT (rhs
);
2222 new_stmt
= gimple_build_assign (rhs
, lhs
);
2223 SSA_NAME_DEF_STMT (rhs
) = defstmt
;
2226 new_stmt
= gimple_build_assign (rhs
, lhs
);
2228 gimple_set_vuse (new_stmt
, gimple_vdef (stmt
));
2230 /* Finally enter the statement into the available expression
2232 lookup_avail_expr (new_stmt
, true);
2236 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2237 CONST_AND_COPIES. */
2240 cprop_operand (gimple stmt
, use_operand_p op_p
)
2243 tree op
= USE_FROM_PTR (op_p
);
2245 /* If the operand has a known constant value or it is known to be a
2246 copy of some other variable, use the value or copy stored in
2247 CONST_AND_COPIES. */
2248 val
= SSA_NAME_VALUE (op
);
2249 if (val
&& val
!= op
)
2251 /* Do not replace hard register operands in asm statements. */
2252 if (gimple_code (stmt
) == GIMPLE_ASM
2253 && !may_propagate_copy_into_asm (op
))
2256 /* Certain operands are not allowed to be copy propagated due
2257 to their interaction with exception handling and some GCC
2259 if (!may_propagate_copy (op
, val
))
2262 /* Do not propagate copies into simple IV increment statements.
2263 See PR23821 for how this can disturb IV analysis. */
2264 if (TREE_CODE (val
) != INTEGER_CST
2265 && simple_iv_increment_p (stmt
))
2269 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2271 fprintf (dump_file
, " Replaced '");
2272 print_generic_expr (dump_file
, op
, dump_flags
);
2273 fprintf (dump_file
, "' with %s '",
2274 (TREE_CODE (val
) != SSA_NAME
? "constant" : "variable"));
2275 print_generic_expr (dump_file
, val
, dump_flags
);
2276 fprintf (dump_file
, "'\n");
2279 if (TREE_CODE (val
) != SSA_NAME
)
2280 opt_stats
.num_const_prop
++;
2282 opt_stats
.num_copy_prop
++;
2284 propagate_value (op_p
, val
);
2286 /* And note that we modified this statement. This is now
2287 safe, even if we changed virtual operands since we will
2288 rescan the statement and rewrite its operands again. */
2289 gimple_set_modified (stmt
, true);
2293 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2294 known value for that SSA_NAME (or NULL if no value is known).
2296 Propagate values from CONST_AND_COPIES into the uses, vuses and
2297 vdef_ops of STMT. */
2300 cprop_into_stmt (gimple stmt
)
2305 FOR_EACH_SSA_USE_OPERAND (op_p
, stmt
, iter
, SSA_OP_USE
)
2306 cprop_operand (stmt
, op_p
);
2309 /* Optimize the statement pointed to by iterator SI.
2311 We try to perform some simplistic global redundancy elimination and
2312 constant propagation:
2314 1- To detect global redundancy, we keep track of expressions that have
2315 been computed in this block and its dominators. If we find that the
2316 same expression is computed more than once, we eliminate repeated
2317 computations by using the target of the first one.
2319 2- Constant values and copy assignments. This is used to do very
2320 simplistic constant and copy propagation. When a constant or copy
2321 assignment is found, we map the value on the RHS of the assignment to
2322 the variable in the LHS in the CONST_AND_COPIES table. */
2325 optimize_stmt (basic_block bb
, gimple_stmt_iterator si
)
2327 gimple stmt
, old_stmt
;
2328 bool may_optimize_p
;
2329 bool modified_p
= false;
2331 old_stmt
= stmt
= gsi_stmt (si
);
2333 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2335 fprintf (dump_file
, "Optimizing statement ");
2336 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
2339 if (gimple_code (stmt
) == GIMPLE_COND
)
2340 canonicalize_comparison (stmt
);
2342 update_stmt_if_modified (stmt
);
2343 opt_stats
.num_stmts
++;
2345 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2346 cprop_into_stmt (stmt
);
2348 /* If the statement has been modified with constant replacements,
2349 fold its RHS before checking for redundant computations. */
2350 if (gimple_modified_p (stmt
))
2354 /* Try to fold the statement making sure that STMT is kept
2356 if (fold_stmt (&si
))
2358 stmt
= gsi_stmt (si
);
2359 gimple_set_modified (stmt
, true);
2361 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2363 fprintf (dump_file
, " Folded to: ");
2364 print_gimple_stmt (dump_file
, stmt
, 0, TDF_SLIM
);
2368 /* We only need to consider cases that can yield a gimple operand. */
2369 if (gimple_assign_single_p (stmt
))
2370 rhs
= gimple_assign_rhs1 (stmt
);
2371 else if (gimple_code (stmt
) == GIMPLE_GOTO
)
2372 rhs
= gimple_goto_dest (stmt
);
2373 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
2374 /* This should never be an ADDR_EXPR. */
2375 rhs
= gimple_switch_index (stmt
);
2377 if (rhs
&& TREE_CODE (rhs
) == ADDR_EXPR
)
2378 recompute_tree_invariant_for_addr_expr (rhs
);
2380 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2381 even if fold_stmt updated the stmt already and thus cleared
2382 gimple_modified_p flag on it. */
2386 /* Check for redundant computations. Do this optimization only
2387 for assignments that have no volatile ops and conditionals. */
2388 may_optimize_p
= (!gimple_has_side_effects (stmt
)
2389 && (is_gimple_assign (stmt
)
2390 || (is_gimple_call (stmt
)
2391 && gimple_call_lhs (stmt
) != NULL_TREE
)
2392 || gimple_code (stmt
) == GIMPLE_COND
2393 || gimple_code (stmt
) == GIMPLE_SWITCH
));
2397 if (gimple_code (stmt
) == GIMPLE_CALL
)
2399 /* Resolve __builtin_constant_p. If it hasn't been
2400 folded to integer_one_node by now, it's fairly
2401 certain that the value simply isn't constant. */
2402 tree callee
= gimple_call_fndecl (stmt
);
2404 && DECL_BUILT_IN_CLASS (callee
) == BUILT_IN_NORMAL
2405 && DECL_FUNCTION_CODE (callee
) == BUILT_IN_CONSTANT_P
)
2407 propagate_tree_value_into_stmt (&si
, integer_zero_node
);
2408 stmt
= gsi_stmt (si
);
2412 update_stmt_if_modified (stmt
);
2413 eliminate_redundant_computations (&si
);
2414 stmt
= gsi_stmt (si
);
2416 /* Perform simple redundant store elimination. */
2417 if (gimple_assign_single_p (stmt
)
2418 && TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
2420 tree lhs
= gimple_assign_lhs (stmt
);
2421 tree rhs
= gimple_assign_rhs1 (stmt
);
2424 if (TREE_CODE (rhs
) == SSA_NAME
)
2426 tree tem
= SSA_NAME_VALUE (rhs
);
2430 /* Build a new statement with the RHS and LHS exchanged. */
2431 if (TREE_CODE (rhs
) == SSA_NAME
)
2433 gimple defstmt
= SSA_NAME_DEF_STMT (rhs
);
2434 new_stmt
= gimple_build_assign (rhs
, lhs
);
2435 SSA_NAME_DEF_STMT (rhs
) = defstmt
;
2438 new_stmt
= gimple_build_assign (rhs
, lhs
);
2439 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
2440 cached_lhs
= lookup_avail_expr (new_stmt
, false);
2442 && rhs
== cached_lhs
)
2444 basic_block bb
= gimple_bb (stmt
);
2445 unlink_stmt_vdef (stmt
);
2446 if (gsi_remove (&si
, true))
2448 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
2449 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2450 fprintf (dump_file
, " Flagged to clear EH edges.\n");
2452 release_defs (stmt
);
2458 /* Record any additional equivalences created by this statement. */
2459 if (is_gimple_assign (stmt
))
2460 record_equivalences_from_stmt (stmt
, may_optimize_p
);
2462 /* If STMT is a COND_EXPR and it was modified, then we may know
2463 where it goes. If that is the case, then mark the CFG as altered.
2465 This will cause us to later call remove_unreachable_blocks and
2466 cleanup_tree_cfg when it is safe to do so. It is not safe to
2467 clean things up here since removal of edges and such can trigger
2468 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2471 That's all fine and good, except that once SSA_NAMEs are released
2472 to the manager, we must not call create_ssa_name until all references
2473 to released SSA_NAMEs have been eliminated.
2475 All references to the deleted SSA_NAMEs can not be eliminated until
2476 we remove unreachable blocks.
2478 We can not remove unreachable blocks until after we have completed
2479 any queued jump threading.
2481 We can not complete any queued jump threads until we have taken
2482 appropriate variables out of SSA form. Taking variables out of
2483 SSA form can call create_ssa_name and thus we lose.
2485 Ultimately I suspect we're going to need to change the interface
2486 into the SSA_NAME manager. */
2487 if (gimple_modified_p (stmt
) || modified_p
)
2491 update_stmt_if_modified (stmt
);
2493 if (gimple_code (stmt
) == GIMPLE_COND
)
2494 val
= fold_binary_loc (gimple_location (stmt
),
2495 gimple_cond_code (stmt
), boolean_type_node
,
2496 gimple_cond_lhs (stmt
), gimple_cond_rhs (stmt
));
2497 else if (gimple_code (stmt
) == GIMPLE_SWITCH
)
2498 val
= gimple_switch_index (stmt
);
2500 if (val
&& TREE_CODE (val
) == INTEGER_CST
&& find_taken_edge (bb
, val
))
2503 /* If we simplified a statement in such a way as to be shown that it
2504 cannot trap, update the eh information and the cfg to match. */
2505 if (maybe_clean_or_replace_eh_stmt (old_stmt
, stmt
))
2507 bitmap_set_bit (need_eh_cleanup
, bb
->index
);
2508 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2509 fprintf (dump_file
, " Flagged to clear EH edges.\n");
2514 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2515 If found, return its LHS. Otherwise insert STMT in the table and
2518 Also, when an expression is first inserted in the table, it is also
2519 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2520 we finish processing this block and its children. */
2523 lookup_avail_expr (gimple stmt
, bool insert
)
2525 expr_hash_elt
**slot
;
2528 struct expr_hash_elt element
;
2530 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2531 if (gimple_code (stmt
) == GIMPLE_PHI
)
2532 lhs
= gimple_phi_result (stmt
);
2534 lhs
= gimple_get_lhs (stmt
);
2536 initialize_hash_element (stmt
, lhs
, &element
);
2538 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2540 fprintf (dump_file
, "LKUP ");
2541 print_expr_hash_elt (dump_file
, &element
);
2544 /* Don't bother remembering constant assignments and copy operations.
2545 Constants and copy operations are handled by the constant/copy propagator
2546 in optimize_stmt. */
2547 if (element
.expr
.kind
== EXPR_SINGLE
2548 && (TREE_CODE (element
.expr
.ops
.single
.rhs
) == SSA_NAME
2549 || is_gimple_min_invariant (element
.expr
.ops
.single
.rhs
)))
2552 /* Finally try to find the expression in the main expression hash table. */
2553 slot
= avail_exprs
->find_slot (&element
, (insert
? INSERT
: NO_INSERT
));
2556 free_expr_hash_elt_contents (&element
);
2559 else if (*slot
== NULL
)
2561 struct expr_hash_elt
*element2
= XNEW (struct expr_hash_elt
);
2562 *element2
= element
;
2563 element2
->stamp
= element2
;
2566 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2568 fprintf (dump_file
, "2>>> ");
2569 print_expr_hash_elt (dump_file
, element2
);
2572 avail_exprs_stack
.safe_push (element2
);
2576 free_expr_hash_elt_contents (&element
);
2578 /* Extract the LHS of the assignment so that it can be used as the current
2579 definition of another variable. */
2580 lhs
= ((struct expr_hash_elt
*)*slot
)->lhs
;
2582 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2583 use the value from the const_and_copies table. */
2584 if (TREE_CODE (lhs
) == SSA_NAME
)
2586 temp
= SSA_NAME_VALUE (lhs
);
2591 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2593 fprintf (dump_file
, "FIND: ");
2594 print_generic_expr (dump_file
, lhs
, 0);
2595 fprintf (dump_file
, "\n");
2601 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2602 for expressions using the code of the expression and the SSA numbers of
2606 avail_expr_hash (const void *p
)
2608 gimple stmt
= ((const struct expr_hash_elt
*)p
)->stmt
;
2609 const struct hashable_expr
*expr
= &((const struct expr_hash_elt
*)p
)->expr
;
2611 inchash::hash hstate
;
2613 inchash::add_hashable_expr (expr
, hstate
);
2615 /* If the hash table entry is not associated with a statement, then we
2616 can just hash the expression and not worry about virtual operands
2619 return hstate
.end ();
2621 /* Add the SSA version numbers of the vuse operand. This is important
2622 because compound variables like arrays are not renamed in the
2623 operands. Rather, the rename is done on the virtual variable
2624 representing all the elements of the array. */
2625 if ((vuse
= gimple_vuse (stmt
)))
2626 inchash::add_expr (vuse
, hstate
);
2628 return hstate
.end ();
2631 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2632 up degenerate PHIs created by or exposed by jump threading. */
2634 /* Given a statement STMT, which is either a PHI node or an assignment,
2635 remove it from the IL. */
2638 remove_stmt_or_phi (gimple stmt
)
2640 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
2642 if (gimple_code (stmt
) == GIMPLE_PHI
)
2643 remove_phi_node (&gsi
, true);
2646 gsi_remove (&gsi
, true);
2647 release_defs (stmt
);
2651 /* Given a statement STMT, which is either a PHI node or an assignment,
2652 return the "rhs" of the node, in the case of a non-degenerate
2653 phi, NULL is returned. */
2656 get_rhs_or_phi_arg (gimple stmt
)
2658 if (gimple_code (stmt
) == GIMPLE_PHI
)
2659 return degenerate_phi_result (stmt
);
2660 else if (gimple_assign_single_p (stmt
))
2661 return gimple_assign_rhs1 (stmt
);
2667 /* Given a statement STMT, which is either a PHI node or an assignment,
2668 return the "lhs" of the node. */
2671 get_lhs_or_phi_result (gimple stmt
)
2673 if (gimple_code (stmt
) == GIMPLE_PHI
)
2674 return gimple_phi_result (stmt
);
2675 else if (is_gimple_assign (stmt
))
2676 return gimple_assign_lhs (stmt
);
2681 /* Propagate RHS into all uses of LHS (when possible).
2683 RHS and LHS are derived from STMT, which is passed in solely so
2684 that we can remove it if propagation is successful.
2686 When propagating into a PHI node or into a statement which turns
2687 into a trivial copy or constant initialization, set the
2688 appropriate bit in INTERESTING_NAMEs so that we will visit those
2689 nodes as well in an effort to pick up secondary optimization
2693 propagate_rhs_into_lhs (gimple stmt
, tree lhs
, tree rhs
, bitmap interesting_names
)
2695 /* First verify that propagation is valid. */
2696 if (may_propagate_copy (lhs
, rhs
))
2698 use_operand_p use_p
;
2699 imm_use_iterator iter
;
2704 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2706 fprintf (dump_file
, " Replacing '");
2707 print_generic_expr (dump_file
, lhs
, dump_flags
);
2708 fprintf (dump_file
, "' with %s '",
2709 (TREE_CODE (rhs
) != SSA_NAME
? "constant" : "variable"));
2710 print_generic_expr (dump_file
, rhs
, dump_flags
);
2711 fprintf (dump_file
, "'\n");
2714 /* Walk over every use of LHS and try to replace the use with RHS.
2715 At this point the only reason why such a propagation would not
2716 be successful would be if the use occurs in an ASM_EXPR. */
2717 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2719 /* Leave debug stmts alone. If we succeed in propagating
2720 all non-debug uses, we'll drop the DEF, and propagation
2721 into debug stmts will occur then. */
2722 if (gimple_debug_bind_p (use_stmt
))
2725 /* It's not always safe to propagate into an ASM_EXPR. */
2726 if (gimple_code (use_stmt
) == GIMPLE_ASM
2727 && ! may_propagate_copy_into_asm (lhs
))
2733 /* It's not ok to propagate into the definition stmt of RHS.
2735 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2736 g_67.1_6 = prephitmp.12_36;
2738 While this is strictly all dead code we do not want to
2739 deal with this here. */
2740 if (TREE_CODE (rhs
) == SSA_NAME
2741 && SSA_NAME_DEF_STMT (rhs
) == use_stmt
)
2748 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2750 fprintf (dump_file
, " Original statement:");
2751 print_gimple_stmt (dump_file
, use_stmt
, 0, dump_flags
);
2754 /* Propagate the RHS into this use of the LHS. */
2755 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2756 propagate_value (use_p
, rhs
);
2758 /* Special cases to avoid useless calls into the folding
2759 routines, operand scanning, etc.
2761 Propagation into a PHI may cause the PHI to become
2762 a degenerate, so mark the PHI as interesting. No other
2763 actions are necessary. */
2764 if (gimple_code (use_stmt
) == GIMPLE_PHI
)
2769 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2771 fprintf (dump_file
, " Updated statement:");
2772 print_gimple_stmt (dump_file
, use_stmt
, 0, dump_flags
);
2775 result
= get_lhs_or_phi_result (use_stmt
);
2776 bitmap_set_bit (interesting_names
, SSA_NAME_VERSION (result
));
2780 /* From this point onward we are propagating into a
2781 real statement. Folding may (or may not) be possible,
2782 we may expose new operands, expose dead EH edges,
2784 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2785 cannot fold a call that simplifies to a constant,
2786 because the GIMPLE_CALL must be replaced by a
2787 GIMPLE_ASSIGN, and there is no way to effect such a
2788 transformation in-place. We might want to consider
2789 using the more general fold_stmt here. */
2791 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
2792 fold_stmt_inplace (&gsi
);
2795 /* Sometimes propagation can expose new operands to the
2797 update_stmt (use_stmt
);
2800 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2802 fprintf (dump_file
, " Updated statement:");
2803 print_gimple_stmt (dump_file
, use_stmt
, 0, dump_flags
);
2806 /* If we replaced a variable index with a constant, then
2807 we would need to update the invariant flag for ADDR_EXPRs. */
2808 if (gimple_assign_single_p (use_stmt
)
2809 && TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == ADDR_EXPR
)
2810 recompute_tree_invariant_for_addr_expr
2811 (gimple_assign_rhs1 (use_stmt
));
2813 /* If we cleaned up EH information from the statement,
2814 mark its containing block as needing EH cleanups. */
2815 if (maybe_clean_or_replace_eh_stmt (use_stmt
, use_stmt
))
2817 bitmap_set_bit (need_eh_cleanup
, gimple_bb (use_stmt
)->index
);
2818 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2819 fprintf (dump_file
, " Flagged to clear EH edges.\n");
2822 /* Propagation may expose new trivial copy/constant propagation
2824 if (gimple_assign_single_p (use_stmt
)
2825 && TREE_CODE (gimple_assign_lhs (use_stmt
)) == SSA_NAME
2826 && (TREE_CODE (gimple_assign_rhs1 (use_stmt
)) == SSA_NAME
2827 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt
))))
2829 tree result
= get_lhs_or_phi_result (use_stmt
);
2830 bitmap_set_bit (interesting_names
, SSA_NAME_VERSION (result
));
2833 /* Propagation into these nodes may make certain edges in
2834 the CFG unexecutable. We want to identify them as PHI nodes
2835 at the destination of those unexecutable edges may become
2837 else if (gimple_code (use_stmt
) == GIMPLE_COND
2838 || gimple_code (use_stmt
) == GIMPLE_SWITCH
2839 || gimple_code (use_stmt
) == GIMPLE_GOTO
)
2843 if (gimple_code (use_stmt
) == GIMPLE_COND
)
2844 val
= fold_binary_loc (gimple_location (use_stmt
),
2845 gimple_cond_code (use_stmt
),
2847 gimple_cond_lhs (use_stmt
),
2848 gimple_cond_rhs (use_stmt
));
2849 else if (gimple_code (use_stmt
) == GIMPLE_SWITCH
)
2850 val
= gimple_switch_index (use_stmt
);
2852 val
= gimple_goto_dest (use_stmt
);
2854 if (val
&& is_gimple_min_invariant (val
))
2856 basic_block bb
= gimple_bb (use_stmt
);
2857 edge te
= find_taken_edge (bb
, val
);
2860 gimple_stmt_iterator gsi
, psi
;
2862 /* Remove all outgoing edges except TE. */
2863 for (ei
= ei_start (bb
->succs
); (e
= ei_safe_edge (ei
));)
2867 /* Mark all the PHI nodes at the destination of
2868 the unexecutable edge as interesting. */
2869 for (psi
= gsi_start_phis (e
->dest
);
2873 gimple phi
= gsi_stmt (psi
);
2875 tree result
= gimple_phi_result (phi
);
2876 int version
= SSA_NAME_VERSION (result
);
2878 bitmap_set_bit (interesting_names
, version
);
2881 te
->probability
+= e
->probability
;
2883 te
->count
+= e
->count
;
2891 gsi
= gsi_last_bb (gimple_bb (use_stmt
));
2892 gsi_remove (&gsi
, true);
2894 /* And fixup the flags on the single remaining edge. */
2895 te
->flags
&= ~(EDGE_TRUE_VALUE
| EDGE_FALSE_VALUE
);
2896 te
->flags
&= ~EDGE_ABNORMAL
;
2897 te
->flags
|= EDGE_FALLTHRU
;
2898 if (te
->probability
> REG_BR_PROB_BASE
)
2899 te
->probability
= REG_BR_PROB_BASE
;
2904 /* Ensure there is nothing else to do. */
2905 gcc_assert (!all
|| has_zero_uses (lhs
));
2907 /* If we were able to propagate away all uses of LHS, then
2908 we can remove STMT. */
2910 remove_stmt_or_phi (stmt
);
2914 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2915 a statement that is a trivial copy or constant initialization.
2917 Attempt to eliminate T by propagating its RHS into all uses of
2918 its LHS. This may in turn set new bits in INTERESTING_NAMES
2919 for nodes we want to revisit later.
2921 All exit paths should clear INTERESTING_NAMES for the result
2925 eliminate_const_or_copy (gimple stmt
, bitmap interesting_names
)
2927 tree lhs
= get_lhs_or_phi_result (stmt
);
2929 int version
= SSA_NAME_VERSION (lhs
);
2931 /* If the LHS of this statement or PHI has no uses, then we can
2932 just eliminate it. This can occur if, for example, the PHI
2933 was created by block duplication due to threading and its only
2934 use was in the conditional at the end of the block which was
2936 if (has_zero_uses (lhs
))
2938 bitmap_clear_bit (interesting_names
, version
);
2939 remove_stmt_or_phi (stmt
);
2943 /* Get the RHS of the assignment or PHI node if the PHI is a
2945 rhs
= get_rhs_or_phi_arg (stmt
);
2948 bitmap_clear_bit (interesting_names
, version
);
2952 if (!virtual_operand_p (lhs
))
2953 propagate_rhs_into_lhs (stmt
, lhs
, rhs
, interesting_names
);
2957 imm_use_iterator iter
;
2958 use_operand_p use_p
;
2959 /* For virtual operands we have to propagate into all uses as
2960 otherwise we will create overlapping life-ranges. */
2961 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2962 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2963 SET_USE (use_p
, rhs
);
2964 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
))
2965 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs
) = 1;
2966 remove_stmt_or_phi (stmt
);
2969 /* Note that STMT may well have been deleted by now, so do
2970 not access it, instead use the saved version # to clear
2971 T's entry in the worklist. */
2972 bitmap_clear_bit (interesting_names
, version
);
2975 /* The first phase in degenerate PHI elimination.
2977 Eliminate the degenerate PHIs in BB, then recurse on the
2978 dominator children of BB. */
2981 eliminate_degenerate_phis_1 (basic_block bb
, bitmap interesting_names
)
2983 gimple_stmt_iterator gsi
;
2986 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2988 gimple phi
= gsi_stmt (gsi
);
2990 eliminate_const_or_copy (phi
, interesting_names
);
2993 /* Recurse into the dominator children of BB. */
2994 for (son
= first_dom_son (CDI_DOMINATORS
, bb
);
2996 son
= next_dom_son (CDI_DOMINATORS
, son
))
2997 eliminate_degenerate_phis_1 (son
, interesting_names
);
3001 /* A very simple pass to eliminate degenerate PHI nodes from the
3002 IL. This is meant to be fast enough to be able to be run several
3003 times in the optimization pipeline.
3005 Certain optimizations, particularly those which duplicate blocks
3006 or remove edges from the CFG can create or expose PHIs which are
3007 trivial copies or constant initializations.
3009 While we could pick up these optimizations in DOM or with the
3010 combination of copy-prop and CCP, those solutions are far too
3011 heavy-weight for our needs.
3013 This implementation has two phases so that we can efficiently
3014 eliminate the first order degenerate PHIs and second order
3017 The first phase performs a dominator walk to identify and eliminate
3018 the vast majority of the degenerate PHIs. When a degenerate PHI
3019 is identified and eliminated any affected statements or PHIs
3020 are put on a worklist.
3022 The second phase eliminates degenerate PHIs and trivial copies
3023 or constant initializations using the worklist. This is how we
3024 pick up the secondary optimization opportunities with minimal
3029 const pass_data pass_data_phi_only_cprop
=
3031 GIMPLE_PASS
, /* type */
3032 "phicprop", /* name */
3033 OPTGROUP_NONE
, /* optinfo_flags */
3034 TV_TREE_PHI_CPROP
, /* tv_id */
3035 ( PROP_cfg
| PROP_ssa
), /* properties_required */
3036 0, /* properties_provided */
3037 0, /* properties_destroyed */
3038 0, /* todo_flags_start */
3039 ( TODO_cleanup_cfg
| TODO_update_ssa
), /* todo_flags_finish */
3042 class pass_phi_only_cprop
: public gimple_opt_pass
3045 pass_phi_only_cprop (gcc::context
*ctxt
)
3046 : gimple_opt_pass (pass_data_phi_only_cprop
, ctxt
)
3049 /* opt_pass methods: */
3050 opt_pass
* clone () { return new pass_phi_only_cprop (m_ctxt
); }
3051 virtual bool gate (function
*) { return flag_tree_dom
!= 0; }
3052 virtual unsigned int execute (function
*);
3054 }; // class pass_phi_only_cprop
3057 pass_phi_only_cprop::execute (function
*fun
)
3059 bitmap interesting_names
;
3060 bitmap interesting_names1
;
3062 /* Bitmap of blocks which need EH information updated. We can not
3063 update it on-the-fly as doing so invalidates the dominator tree. */
3064 need_eh_cleanup
= BITMAP_ALLOC (NULL
);
3066 /* INTERESTING_NAMES is effectively our worklist, indexed by
3069 A set bit indicates that the statement or PHI node which
3070 defines the SSA_NAME should be (re)examined to determine if
3071 it has become a degenerate PHI or trivial const/copy propagation
3074 Experiments have show we generally get better compilation
3075 time behavior with bitmaps rather than sbitmaps. */
3076 interesting_names
= BITMAP_ALLOC (NULL
);
3077 interesting_names1
= BITMAP_ALLOC (NULL
);
3079 calculate_dominance_info (CDI_DOMINATORS
);
3080 cfg_altered
= false;
3082 /* First phase. Eliminate degenerate PHIs via a dominator
3085 Experiments have indicated that we generally get better
3086 compile-time behavior by visiting blocks in the first
3087 phase in dominator order. Presumably this is because walking
3088 in dominator order leaves fewer PHIs for later examination
3089 by the worklist phase. */
3090 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun
),
3093 /* Second phase. Eliminate second order degenerate PHIs as well
3094 as trivial copies or constant initializations identified by
3095 the first phase or this phase. Basically we keep iterating
3096 until our set of INTERESTING_NAMEs is empty. */
3097 while (!bitmap_empty_p (interesting_names
))
3102 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3103 changed during the loop. Copy it to another bitmap and
3105 bitmap_copy (interesting_names1
, interesting_names
);
3107 EXECUTE_IF_SET_IN_BITMAP (interesting_names1
, 0, i
, bi
)
3109 tree name
= ssa_name (i
);
3111 /* Ignore SSA_NAMEs that have been released because
3112 their defining statement was deleted (unreachable). */
3114 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i
)),
3121 free_dominance_info (CDI_DOMINATORS
);
3122 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3123 loops_state_set (LOOPS_NEED_FIXUP
);
3126 /* Propagation of const and copies may make some EH edges dead. Purge
3127 such edges from the CFG as needed. */
3128 if (!bitmap_empty_p (need_eh_cleanup
))
3130 gimple_purge_all_dead_eh_edges (need_eh_cleanup
);
3131 BITMAP_FREE (need_eh_cleanup
);
3134 BITMAP_FREE (interesting_names
);
3135 BITMAP_FREE (interesting_names1
);
3142 make_pass_phi_only_cprop (gcc::context
*ctxt
)
3144 return new pass_phi_only_cprop (ctxt
);