2013-11-20 Jan-Benedict Glaw <jbglaw@lug-owl.de>
[official-gcc.git] / gcc / tree-ssa-dom.c
bloba286c105615977e44c5da1d71316044487548db1
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "stor-layout.h"
28 #include "flags.h"
29 #include "tm_p.h"
30 #include "basic-block.h"
31 #include "cfgloop.h"
32 #include "function.h"
33 #include "gimple-pretty-print.h"
34 #include "gimple.h"
35 #include "gimple-iterator.h"
36 #include "gimple-ssa.h"
37 #include "tree-cfg.h"
38 #include "tree-phinodes.h"
39 #include "ssa-iterators.h"
40 #include "stringpool.h"
41 #include "tree-ssanames.h"
42 #include "tree-into-ssa.h"
43 #include "domwalk.h"
44 #include "tree-pass.h"
45 #include "tree-ssa-propagate.h"
46 #include "tree-ssa-threadupdate.h"
47 #include "langhooks.h"
48 #include "params.h"
49 #include "tree-ssa-threadedge.h"
50 #include "tree-ssa-dom.h"
52 /* This file implements optimizations on the dominator tree. */
54 /* Representation of a "naked" right-hand-side expression, to be used
55 in recording available expressions in the expression hash table. */
57 enum expr_kind
59 EXPR_SINGLE,
60 EXPR_UNARY,
61 EXPR_BINARY,
62 EXPR_TERNARY,
63 EXPR_CALL,
64 EXPR_PHI
67 struct hashable_expr
69 tree type;
70 enum expr_kind kind;
71 union {
72 struct { tree rhs; } single;
73 struct { enum tree_code op; tree opnd; } unary;
74 struct { enum tree_code op; tree opnd0, opnd1; } binary;
75 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
76 struct { gimple fn_from; bool pure; size_t nargs; tree *args; } call;
77 struct { size_t nargs; tree *args; } phi;
78 } ops;
81 /* Structure for recording known values of a conditional expression
82 at the exits from its block. */
84 typedef struct cond_equivalence_s
86 struct hashable_expr cond;
87 tree value;
88 } cond_equivalence;
91 /* Structure for recording edge equivalences as well as any pending
92 edge redirections during the dominator optimizer.
94 Computing and storing the edge equivalences instead of creating
95 them on-demand can save significant amounts of time, particularly
96 for pathological cases involving switch statements.
98 These structures live for a single iteration of the dominator
99 optimizer in the edge's AUX field. At the end of an iteration we
100 free each of these structures and update the AUX field to point
101 to any requested redirection target (the code for updating the
102 CFG and SSA graph for edge redirection expects redirection edge
103 targets to be in the AUX field for each edge. */
105 struct edge_info
107 /* If this edge creates a simple equivalence, the LHS and RHS of
108 the equivalence will be stored here. */
109 tree lhs;
110 tree rhs;
112 /* Traversing an edge may also indicate one or more particular conditions
113 are true or false. */
114 vec<cond_equivalence> cond_equivalences;
117 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
118 expressions it enters into the hash table along with a marker entry
119 (null). When we finish processing the block, we pop off entries and
120 remove the expressions from the global hash table until we hit the
121 marker. */
122 typedef struct expr_hash_elt * expr_hash_elt_t;
124 static vec<expr_hash_elt_t> avail_exprs_stack;
126 /* Structure for entries in the expression hash table. */
128 struct expr_hash_elt
130 /* The value (lhs) of this expression. */
131 tree lhs;
133 /* The expression (rhs) we want to record. */
134 struct hashable_expr expr;
136 /* The stmt pointer if this element corresponds to a statement. */
137 gimple stmt;
139 /* The hash value for RHS. */
140 hashval_t hash;
142 /* A unique stamp, typically the address of the hash
143 element itself, used in removing entries from the table. */
144 struct expr_hash_elt *stamp;
147 /* Hashtable helpers. */
149 static bool hashable_expr_equal_p (const struct hashable_expr *,
150 const struct hashable_expr *);
151 static void free_expr_hash_elt (void *);
153 struct expr_elt_hasher
155 typedef expr_hash_elt value_type;
156 typedef expr_hash_elt compare_type;
157 static inline hashval_t hash (const value_type *);
158 static inline bool equal (const value_type *, const compare_type *);
159 static inline void remove (value_type *);
162 inline hashval_t
163 expr_elt_hasher::hash (const value_type *p)
165 return p->hash;
168 inline bool
169 expr_elt_hasher::equal (const value_type *p1, const compare_type *p2)
171 gimple stmt1 = p1->stmt;
172 const struct hashable_expr *expr1 = &p1->expr;
173 const struct expr_hash_elt *stamp1 = p1->stamp;
174 gimple stmt2 = p2->stmt;
175 const struct hashable_expr *expr2 = &p2->expr;
176 const struct expr_hash_elt *stamp2 = p2->stamp;
178 /* This case should apply only when removing entries from the table. */
179 if (stamp1 == stamp2)
180 return true;
182 /* FIXME tuples:
183 We add stmts to a hash table and them modify them. To detect the case
184 that we modify a stmt and then search for it, we assume that the hash
185 is always modified by that change.
186 We have to fully check why this doesn't happen on trunk or rewrite
187 this in a more reliable (and easier to understand) way. */
188 if (((const struct expr_hash_elt *)p1)->hash
189 != ((const struct expr_hash_elt *)p2)->hash)
190 return false;
192 /* In case of a collision, both RHS have to be identical and have the
193 same VUSE operands. */
194 if (hashable_expr_equal_p (expr1, expr2)
195 && types_compatible_p (expr1->type, expr2->type))
197 /* Note that STMT1 and/or STMT2 may be NULL. */
198 return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
199 == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
202 return false;
205 /* Delete an expr_hash_elt and reclaim its storage. */
207 inline void
208 expr_elt_hasher::remove (value_type *element)
210 free_expr_hash_elt (element);
213 /* Hash table with expressions made available during the renaming process.
214 When an assignment of the form X_i = EXPR is found, the statement is
215 stored in this table. If the same expression EXPR is later found on the
216 RHS of another statement, it is replaced with X_i (thus performing
217 global redundancy elimination). Similarly as we pass through conditionals
218 we record the conditional itself as having either a true or false value
219 in this table. */
220 static hash_table <expr_elt_hasher> avail_exprs;
222 /* Stack of dest,src pairs that need to be restored during finalization.
224 A NULL entry is used to mark the end of pairs which need to be
225 restored during finalization of this block. */
226 static vec<tree> const_and_copies_stack;
228 /* Track whether or not we have changed the control flow graph. */
229 static bool cfg_altered;
231 /* Bitmap of blocks that have had EH statements cleaned. We should
232 remove their dead edges eventually. */
233 static bitmap need_eh_cleanup;
235 /* Statistics for dominator optimizations. */
236 struct opt_stats_d
238 long num_stmts;
239 long num_exprs_considered;
240 long num_re;
241 long num_const_prop;
242 long num_copy_prop;
245 static struct opt_stats_d opt_stats;
247 /* Local functions. */
248 static void optimize_stmt (basic_block, gimple_stmt_iterator);
249 static tree lookup_avail_expr (gimple, bool);
250 static hashval_t avail_expr_hash (const void *);
251 static void htab_statistics (FILE *, hash_table <expr_elt_hasher>);
252 static void record_cond (cond_equivalence *);
253 static void record_const_or_copy (tree, tree);
254 static void record_equality (tree, tree);
255 static void record_equivalences_from_phis (basic_block);
256 static void record_equivalences_from_incoming_edge (basic_block);
257 static void eliminate_redundant_computations (gimple_stmt_iterator *);
258 static void record_equivalences_from_stmt (gimple, int);
259 static void remove_local_expressions_from_table (void);
260 static void restore_vars_to_original_value (void);
261 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
264 /* Given a statement STMT, initialize the hash table element pointed to
265 by ELEMENT. */
267 static void
268 initialize_hash_element (gimple stmt, tree lhs,
269 struct expr_hash_elt *element)
271 enum gimple_code code = gimple_code (stmt);
272 struct hashable_expr *expr = &element->expr;
274 if (code == GIMPLE_ASSIGN)
276 enum tree_code subcode = gimple_assign_rhs_code (stmt);
278 switch (get_gimple_rhs_class (subcode))
280 case GIMPLE_SINGLE_RHS:
281 expr->kind = EXPR_SINGLE;
282 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
283 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
284 break;
285 case GIMPLE_UNARY_RHS:
286 expr->kind = EXPR_UNARY;
287 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
288 expr->ops.unary.op = subcode;
289 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
290 break;
291 case GIMPLE_BINARY_RHS:
292 expr->kind = EXPR_BINARY;
293 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
294 expr->ops.binary.op = subcode;
295 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
296 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
297 break;
298 case GIMPLE_TERNARY_RHS:
299 expr->kind = EXPR_TERNARY;
300 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
301 expr->ops.ternary.op = subcode;
302 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
303 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
304 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
305 break;
306 default:
307 gcc_unreachable ();
310 else if (code == GIMPLE_COND)
312 expr->type = boolean_type_node;
313 expr->kind = EXPR_BINARY;
314 expr->ops.binary.op = gimple_cond_code (stmt);
315 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
316 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
318 else if (code == GIMPLE_CALL)
320 size_t nargs = gimple_call_num_args (stmt);
321 size_t i;
323 gcc_assert (gimple_call_lhs (stmt));
325 expr->type = TREE_TYPE (gimple_call_lhs (stmt));
326 expr->kind = EXPR_CALL;
327 expr->ops.call.fn_from = stmt;
329 if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
330 expr->ops.call.pure = true;
331 else
332 expr->ops.call.pure = false;
334 expr->ops.call.nargs = nargs;
335 expr->ops.call.args = XCNEWVEC (tree, nargs);
336 for (i = 0; i < nargs; i++)
337 expr->ops.call.args[i] = gimple_call_arg (stmt, i);
339 else if (code == GIMPLE_SWITCH)
341 expr->type = TREE_TYPE (gimple_switch_index (stmt));
342 expr->kind = EXPR_SINGLE;
343 expr->ops.single.rhs = gimple_switch_index (stmt);
345 else if (code == GIMPLE_GOTO)
347 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
348 expr->kind = EXPR_SINGLE;
349 expr->ops.single.rhs = gimple_goto_dest (stmt);
351 else if (code == GIMPLE_PHI)
353 size_t nargs = gimple_phi_num_args (stmt);
354 size_t i;
356 expr->type = TREE_TYPE (gimple_phi_result (stmt));
357 expr->kind = EXPR_PHI;
358 expr->ops.phi.nargs = nargs;
359 expr->ops.phi.args = XCNEWVEC (tree, nargs);
361 for (i = 0; i < nargs; i++)
362 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
364 else
365 gcc_unreachable ();
367 element->lhs = lhs;
368 element->stmt = stmt;
369 element->hash = avail_expr_hash (element);
370 element->stamp = element;
373 /* Given a conditional expression COND as a tree, initialize
374 a hashable_expr expression EXPR. The conditional must be a
375 comparison or logical negation. A constant or a variable is
376 not permitted. */
378 static void
379 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
381 expr->type = boolean_type_node;
383 if (COMPARISON_CLASS_P (cond))
385 expr->kind = EXPR_BINARY;
386 expr->ops.binary.op = TREE_CODE (cond);
387 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
388 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
390 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
392 expr->kind = EXPR_UNARY;
393 expr->ops.unary.op = TRUTH_NOT_EXPR;
394 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
396 else
397 gcc_unreachable ();
400 /* Given a hashable_expr expression EXPR and an LHS,
401 initialize the hash table element pointed to by ELEMENT. */
403 static void
404 initialize_hash_element_from_expr (struct hashable_expr *expr,
405 tree lhs,
406 struct expr_hash_elt *element)
408 element->expr = *expr;
409 element->lhs = lhs;
410 element->stmt = NULL;
411 element->hash = avail_expr_hash (element);
412 element->stamp = element;
415 /* Compare two hashable_expr structures for equivalence.
416 They are considered equivalent when the the expressions
417 they denote must necessarily be equal. The logic is intended
418 to follow that of operand_equal_p in fold-const.c */
420 static bool
421 hashable_expr_equal_p (const struct hashable_expr *expr0,
422 const struct hashable_expr *expr1)
424 tree type0 = expr0->type;
425 tree type1 = expr1->type;
427 /* If either type is NULL, there is nothing to check. */
428 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
429 return false;
431 /* If both types don't have the same signedness, precision, and mode,
432 then we can't consider them equal. */
433 if (type0 != type1
434 && (TREE_CODE (type0) == ERROR_MARK
435 || TREE_CODE (type1) == ERROR_MARK
436 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
437 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
438 || TYPE_MODE (type0) != TYPE_MODE (type1)))
439 return false;
441 if (expr0->kind != expr1->kind)
442 return false;
444 switch (expr0->kind)
446 case EXPR_SINGLE:
447 return operand_equal_p (expr0->ops.single.rhs,
448 expr1->ops.single.rhs, 0);
450 case EXPR_UNARY:
451 if (expr0->ops.unary.op != expr1->ops.unary.op)
452 return false;
454 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
455 || expr0->ops.unary.op == NON_LVALUE_EXPR)
456 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
457 return false;
459 return operand_equal_p (expr0->ops.unary.opnd,
460 expr1->ops.unary.opnd, 0);
462 case EXPR_BINARY:
463 if (expr0->ops.binary.op != expr1->ops.binary.op)
464 return false;
466 if (operand_equal_p (expr0->ops.binary.opnd0,
467 expr1->ops.binary.opnd0, 0)
468 && operand_equal_p (expr0->ops.binary.opnd1,
469 expr1->ops.binary.opnd1, 0))
470 return true;
472 /* For commutative ops, allow the other order. */
473 return (commutative_tree_code (expr0->ops.binary.op)
474 && operand_equal_p (expr0->ops.binary.opnd0,
475 expr1->ops.binary.opnd1, 0)
476 && operand_equal_p (expr0->ops.binary.opnd1,
477 expr1->ops.binary.opnd0, 0));
479 case EXPR_TERNARY:
480 if (expr0->ops.ternary.op != expr1->ops.ternary.op
481 || !operand_equal_p (expr0->ops.ternary.opnd2,
482 expr1->ops.ternary.opnd2, 0))
483 return false;
485 if (operand_equal_p (expr0->ops.ternary.opnd0,
486 expr1->ops.ternary.opnd0, 0)
487 && operand_equal_p (expr0->ops.ternary.opnd1,
488 expr1->ops.ternary.opnd1, 0))
489 return true;
491 /* For commutative ops, allow the other order. */
492 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
493 && operand_equal_p (expr0->ops.ternary.opnd0,
494 expr1->ops.ternary.opnd1, 0)
495 && operand_equal_p (expr0->ops.ternary.opnd1,
496 expr1->ops.ternary.opnd0, 0));
498 case EXPR_CALL:
500 size_t i;
502 /* If the calls are to different functions, then they
503 clearly cannot be equal. */
504 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
505 expr1->ops.call.fn_from))
506 return false;
508 if (! expr0->ops.call.pure)
509 return false;
511 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
512 return false;
514 for (i = 0; i < expr0->ops.call.nargs; i++)
515 if (! operand_equal_p (expr0->ops.call.args[i],
516 expr1->ops.call.args[i], 0))
517 return false;
519 return true;
522 case EXPR_PHI:
524 size_t i;
526 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
527 return false;
529 for (i = 0; i < expr0->ops.phi.nargs; i++)
530 if (! operand_equal_p (expr0->ops.phi.args[i],
531 expr1->ops.phi.args[i], 0))
532 return false;
534 return true;
537 default:
538 gcc_unreachable ();
542 /* Generate a hash value for a pair of expressions. This can be used
543 iteratively by passing a previous result as the VAL argument.
545 The same hash value is always returned for a given pair of expressions,
546 regardless of the order in which they are presented. This is useful in
547 hashing the operands of commutative functions. */
549 static hashval_t
550 iterative_hash_exprs_commutative (const_tree t1,
551 const_tree t2, hashval_t val)
553 hashval_t one = iterative_hash_expr (t1, 0);
554 hashval_t two = iterative_hash_expr (t2, 0);
555 hashval_t t;
557 if (one > two)
558 t = one, one = two, two = t;
559 val = iterative_hash_hashval_t (one, val);
560 val = iterative_hash_hashval_t (two, val);
562 return val;
565 /* Compute a hash value for a hashable_expr value EXPR and a
566 previously accumulated hash value VAL. If two hashable_expr
567 values compare equal with hashable_expr_equal_p, they must
568 hash to the same value, given an identical value of VAL.
569 The logic is intended to follow iterative_hash_expr in tree.c. */
571 static hashval_t
572 iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
574 switch (expr->kind)
576 case EXPR_SINGLE:
577 val = iterative_hash_expr (expr->ops.single.rhs, val);
578 break;
580 case EXPR_UNARY:
581 val = iterative_hash_object (expr->ops.unary.op, val);
583 /* Make sure to include signedness in the hash computation.
584 Don't hash the type, that can lead to having nodes which
585 compare equal according to operand_equal_p, but which
586 have different hash codes. */
587 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
588 || expr->ops.unary.op == NON_LVALUE_EXPR)
589 val += TYPE_UNSIGNED (expr->type);
591 val = iterative_hash_expr (expr->ops.unary.opnd, val);
592 break;
594 case EXPR_BINARY:
595 val = iterative_hash_object (expr->ops.binary.op, val);
596 if (commutative_tree_code (expr->ops.binary.op))
597 val = iterative_hash_exprs_commutative (expr->ops.binary.opnd0,
598 expr->ops.binary.opnd1, val);
599 else
601 val = iterative_hash_expr (expr->ops.binary.opnd0, val);
602 val = iterative_hash_expr (expr->ops.binary.opnd1, val);
604 break;
606 case EXPR_TERNARY:
607 val = iterative_hash_object (expr->ops.ternary.op, val);
608 if (commutative_ternary_tree_code (expr->ops.ternary.op))
609 val = iterative_hash_exprs_commutative (expr->ops.ternary.opnd0,
610 expr->ops.ternary.opnd1, val);
611 else
613 val = iterative_hash_expr (expr->ops.ternary.opnd0, val);
614 val = iterative_hash_expr (expr->ops.ternary.opnd1, val);
616 val = iterative_hash_expr (expr->ops.ternary.opnd2, val);
617 break;
619 case EXPR_CALL:
621 size_t i;
622 enum tree_code code = CALL_EXPR;
623 gimple fn_from;
625 val = iterative_hash_object (code, val);
626 fn_from = expr->ops.call.fn_from;
627 if (gimple_call_internal_p (fn_from))
628 val = iterative_hash_hashval_t
629 ((hashval_t) gimple_call_internal_fn (fn_from), val);
630 else
631 val = iterative_hash_expr (gimple_call_fn (fn_from), val);
632 for (i = 0; i < expr->ops.call.nargs; i++)
633 val = iterative_hash_expr (expr->ops.call.args[i], val);
635 break;
637 case EXPR_PHI:
639 size_t i;
641 for (i = 0; i < expr->ops.phi.nargs; i++)
642 val = iterative_hash_expr (expr->ops.phi.args[i], val);
644 break;
646 default:
647 gcc_unreachable ();
650 return val;
653 /* Print a diagnostic dump of an expression hash table entry. */
655 static void
656 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
658 if (element->stmt)
659 fprintf (stream, "STMT ");
660 else
661 fprintf (stream, "COND ");
663 if (element->lhs)
665 print_generic_expr (stream, element->lhs, 0);
666 fprintf (stream, " = ");
669 switch (element->expr.kind)
671 case EXPR_SINGLE:
672 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
673 break;
675 case EXPR_UNARY:
676 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
677 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
678 break;
680 case EXPR_BINARY:
681 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
682 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
683 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
684 break;
686 case EXPR_TERNARY:
687 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
688 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
689 fputs (", ", stream);
690 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
691 fputs (", ", stream);
692 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
693 fputs (">", stream);
694 break;
696 case EXPR_CALL:
698 size_t i;
699 size_t nargs = element->expr.ops.call.nargs;
700 gimple fn_from;
702 fn_from = element->expr.ops.call.fn_from;
703 if (gimple_call_internal_p (fn_from))
704 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
705 stream);
706 else
707 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
708 fprintf (stream, " (");
709 for (i = 0; i < nargs; i++)
711 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
712 if (i + 1 < nargs)
713 fprintf (stream, ", ");
715 fprintf (stream, ")");
717 break;
719 case EXPR_PHI:
721 size_t i;
722 size_t nargs = element->expr.ops.phi.nargs;
724 fprintf (stream, "PHI <");
725 for (i = 0; i < nargs; i++)
727 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
728 if (i + 1 < nargs)
729 fprintf (stream, ", ");
731 fprintf (stream, ">");
733 break;
735 fprintf (stream, "\n");
737 if (element->stmt)
739 fprintf (stream, " ");
740 print_gimple_stmt (stream, element->stmt, 0, 0);
744 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
746 static void
747 free_expr_hash_elt_contents (struct expr_hash_elt *element)
749 if (element->expr.kind == EXPR_CALL)
750 free (element->expr.ops.call.args);
751 else if (element->expr.kind == EXPR_PHI)
752 free (element->expr.ops.phi.args);
755 /* Delete an expr_hash_elt and reclaim its storage. */
757 static void
758 free_expr_hash_elt (void *elt)
760 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
761 free_expr_hash_elt_contents (element);
762 free (element);
765 /* Allocate an EDGE_INFO for edge E and attach it to E.
766 Return the new EDGE_INFO structure. */
768 static struct edge_info *
769 allocate_edge_info (edge e)
771 struct edge_info *edge_info;
773 edge_info = XCNEW (struct edge_info);
775 e->aux = edge_info;
776 return edge_info;
779 /* Free all EDGE_INFO structures associated with edges in the CFG.
780 If a particular edge can be threaded, copy the redirection
781 target from the EDGE_INFO structure into the edge's AUX field
782 as required by code to update the CFG and SSA graph for
783 jump threading. */
785 static void
786 free_all_edge_infos (void)
788 basic_block bb;
789 edge_iterator ei;
790 edge e;
792 FOR_EACH_BB (bb)
794 FOR_EACH_EDGE (e, ei, bb->preds)
796 struct edge_info *edge_info = (struct edge_info *) e->aux;
798 if (edge_info)
800 edge_info->cond_equivalences.release ();
801 free (edge_info);
802 e->aux = NULL;
808 class dom_opt_dom_walker : public dom_walker
810 public:
811 dom_opt_dom_walker (cdi_direction direction)
812 : dom_walker (direction), m_dummy_cond (NULL) {}
814 virtual void before_dom_children (basic_block);
815 virtual void after_dom_children (basic_block);
817 private:
818 void thread_across_edge (edge);
820 gimple m_dummy_cond;
823 /* Jump threading, redundancy elimination and const/copy propagation.
825 This pass may expose new symbols that need to be renamed into SSA. For
826 every new symbol exposed, its corresponding bit will be set in
827 VARS_TO_RENAME. */
829 static unsigned int
830 tree_ssa_dominator_optimize (void)
832 memset (&opt_stats, 0, sizeof (opt_stats));
834 /* Create our hash tables. */
835 avail_exprs.create (1024);
836 avail_exprs_stack.create (20);
837 const_and_copies_stack.create (20);
838 need_eh_cleanup = BITMAP_ALLOC (NULL);
840 calculate_dominance_info (CDI_DOMINATORS);
841 cfg_altered = false;
843 /* We need to know loop structures in order to avoid destroying them
844 in jump threading. Note that we still can e.g. thread through loop
845 headers to an exit edge, or through loop header to the loop body, assuming
846 that we update the loop info. */
847 loop_optimizer_init (LOOPS_HAVE_SIMPLE_LATCHES);
849 /* Initialize the value-handle array. */
850 threadedge_initialize_values ();
852 /* We need accurate information regarding back edges in the CFG
853 for jump threading; this may include back edges that are not part of
854 a single loop. */
855 mark_dfs_back_edges ();
857 /* Recursively walk the dominator tree optimizing statements. */
858 dom_opt_dom_walker (CDI_DOMINATORS).walk (cfun->cfg->x_entry_block_ptr);
861 gimple_stmt_iterator gsi;
862 basic_block bb;
863 FOR_EACH_BB (bb)
865 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
866 update_stmt_if_modified (gsi_stmt (gsi));
870 /* If we exposed any new variables, go ahead and put them into
871 SSA form now, before we handle jump threading. This simplifies
872 interactions between rewriting of _DECL nodes into SSA form
873 and rewriting SSA_NAME nodes into SSA form after block
874 duplication and CFG manipulation. */
875 update_ssa (TODO_update_ssa);
877 free_all_edge_infos ();
879 /* Thread jumps, creating duplicate blocks as needed. */
880 cfg_altered |= thread_through_all_blocks (first_pass_instance);
882 if (cfg_altered)
883 free_dominance_info (CDI_DOMINATORS);
885 /* Removal of statements may make some EH edges dead. Purge
886 such edges from the CFG as needed. */
887 if (!bitmap_empty_p (need_eh_cleanup))
889 unsigned i;
890 bitmap_iterator bi;
892 /* Jump threading may have created forwarder blocks from blocks
893 needing EH cleanup; the new successor of these blocks, which
894 has inherited from the original block, needs the cleanup.
895 Don't clear bits in the bitmap, as that can break the bitmap
896 iterator. */
897 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
899 basic_block bb = BASIC_BLOCK (i);
900 if (bb == NULL)
901 continue;
902 while (single_succ_p (bb)
903 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
904 bb = single_succ (bb);
905 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
906 continue;
907 if ((unsigned) bb->index != i)
908 bitmap_set_bit (need_eh_cleanup, bb->index);
911 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
912 bitmap_clear (need_eh_cleanup);
915 statistics_counter_event (cfun, "Redundant expressions eliminated",
916 opt_stats.num_re);
917 statistics_counter_event (cfun, "Constants propagated",
918 opt_stats.num_const_prop);
919 statistics_counter_event (cfun, "Copies propagated",
920 opt_stats.num_copy_prop);
922 /* Debugging dumps. */
923 if (dump_file && (dump_flags & TDF_STATS))
924 dump_dominator_optimization_stats (dump_file);
926 loop_optimizer_finalize ();
928 /* Delete our main hashtable. */
929 avail_exprs.dispose ();
931 /* Free asserted bitmaps and stacks. */
932 BITMAP_FREE (need_eh_cleanup);
934 avail_exprs_stack.release ();
935 const_and_copies_stack.release ();
937 /* Free the value-handle array. */
938 threadedge_finalize_values ();
940 return 0;
943 static bool
944 gate_dominator (void)
946 return flag_tree_dom != 0;
949 namespace {
951 const pass_data pass_data_dominator =
953 GIMPLE_PASS, /* type */
954 "dom", /* name */
955 OPTGROUP_NONE, /* optinfo_flags */
956 true, /* has_gate */
957 true, /* has_execute */
958 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
959 ( PROP_cfg | PROP_ssa ), /* properties_required */
960 0, /* properties_provided */
961 0, /* properties_destroyed */
962 0, /* todo_flags_start */
963 ( TODO_cleanup_cfg | TODO_update_ssa
964 | TODO_verify_ssa
965 | TODO_verify_flow ), /* todo_flags_finish */
968 class pass_dominator : public gimple_opt_pass
970 public:
971 pass_dominator (gcc::context *ctxt)
972 : gimple_opt_pass (pass_data_dominator, ctxt)
975 /* opt_pass methods: */
976 opt_pass * clone () { return new pass_dominator (m_ctxt); }
977 bool gate () { return gate_dominator (); }
978 unsigned int execute () { return tree_ssa_dominator_optimize (); }
980 }; // class pass_dominator
982 } // anon namespace
984 gimple_opt_pass *
985 make_pass_dominator (gcc::context *ctxt)
987 return new pass_dominator (ctxt);
991 /* Given a conditional statement CONDSTMT, convert the
992 condition to a canonical form. */
994 static void
995 canonicalize_comparison (gimple condstmt)
997 tree op0;
998 tree op1;
999 enum tree_code code;
1001 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1003 op0 = gimple_cond_lhs (condstmt);
1004 op1 = gimple_cond_rhs (condstmt);
1006 code = gimple_cond_code (condstmt);
1008 /* If it would be profitable to swap the operands, then do so to
1009 canonicalize the statement, enabling better optimization.
1011 By placing canonicalization of such expressions here we
1012 transparently keep statements in canonical form, even
1013 when the statement is modified. */
1014 if (tree_swap_operands_p (op0, op1, false))
1016 /* For relationals we need to swap the operands
1017 and change the code. */
1018 if (code == LT_EXPR
1019 || code == GT_EXPR
1020 || code == LE_EXPR
1021 || code == GE_EXPR)
1023 code = swap_tree_comparison (code);
1025 gimple_cond_set_code (condstmt, code);
1026 gimple_cond_set_lhs (condstmt, op1);
1027 gimple_cond_set_rhs (condstmt, op0);
1029 update_stmt (condstmt);
1034 /* Initialize local stacks for this optimizer and record equivalences
1035 upon entry to BB. Equivalences can come from the edge traversed to
1036 reach BB or they may come from PHI nodes at the start of BB. */
1038 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1039 LIMIT entries left in LOCALs. */
1041 static void
1042 remove_local_expressions_from_table (void)
1044 /* Remove all the expressions made available in this block. */
1045 while (avail_exprs_stack.length () > 0)
1047 expr_hash_elt_t victim = avail_exprs_stack.pop ();
1048 expr_hash_elt **slot;
1050 if (victim == NULL)
1051 break;
1053 /* This must precede the actual removal from the hash table,
1054 as ELEMENT and the table entry may share a call argument
1055 vector which will be freed during removal. */
1056 if (dump_file && (dump_flags & TDF_DETAILS))
1058 fprintf (dump_file, "<<<< ");
1059 print_expr_hash_elt (dump_file, victim);
1062 slot = avail_exprs.find_slot_with_hash (victim, victim->hash, NO_INSERT);
1063 gcc_assert (slot && *slot == victim);
1064 avail_exprs.clear_slot (slot);
1068 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1069 CONST_AND_COPIES to its original state, stopping when we hit a
1070 NULL marker. */
1072 static void
1073 restore_vars_to_original_value (void)
1075 while (const_and_copies_stack.length () > 0)
1077 tree prev_value, dest;
1079 dest = const_and_copies_stack.pop ();
1081 if (dest == NULL)
1082 break;
1084 if (dump_file && (dump_flags & TDF_DETAILS))
1086 fprintf (dump_file, "<<<< COPY ");
1087 print_generic_expr (dump_file, dest, 0);
1088 fprintf (dump_file, " = ");
1089 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1090 fprintf (dump_file, "\n");
1093 prev_value = const_and_copies_stack.pop ();
1094 set_ssa_name_value (dest, prev_value);
1098 /* A trivial wrapper so that we can present the generic jump
1099 threading code with a simple API for simplifying statements. */
1100 static tree
1101 simplify_stmt_for_jump_threading (gimple stmt,
1102 gimple within_stmt ATTRIBUTE_UNUSED)
1104 return lookup_avail_expr (stmt, false);
1107 /* Record into the equivalence tables any equivalences implied by
1108 traversing edge E (which are cached in E->aux).
1110 Callers are responsible for managing the unwinding markers. */
1111 static void
1112 record_temporary_equivalences (edge e)
1114 int i;
1115 struct edge_info *edge_info = (struct edge_info *) e->aux;
1117 /* If we have info associated with this edge, record it into
1118 our equivalence tables. */
1119 if (edge_info)
1121 cond_equivalence *eq;
1122 tree lhs = edge_info->lhs;
1123 tree rhs = edge_info->rhs;
1125 /* If we have a simple NAME = VALUE equivalence, record it. */
1126 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1127 record_const_or_copy (lhs, rhs);
1129 /* If we have 0 = COND or 1 = COND equivalences, record them
1130 into our expression hash tables. */
1131 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1132 record_cond (eq);
1136 /* Wrapper for common code to attempt to thread an edge. For example,
1137 it handles lazily building the dummy condition and the bookkeeping
1138 when jump threading is successful. */
1140 void
1141 dom_opt_dom_walker::thread_across_edge (edge e)
1143 if (! m_dummy_cond)
1144 m_dummy_cond =
1145 gimple_build_cond (NE_EXPR,
1146 integer_zero_node, integer_zero_node,
1147 NULL, NULL);
1149 /* Push a marker on both stacks so we can unwind the tables back to their
1150 current state. */
1151 avail_exprs_stack.safe_push (NULL);
1152 const_and_copies_stack.safe_push (NULL_TREE);
1154 /* Traversing E may result in equivalences we can utilize. */
1155 record_temporary_equivalences (e);
1157 /* With all the edge equivalences in the tables, go ahead and attempt
1158 to thread through E->dest. */
1159 ::thread_across_edge (m_dummy_cond, e, false,
1160 &const_and_copies_stack,
1161 simplify_stmt_for_jump_threading);
1163 /* And restore the various tables to their state before
1164 we threaded this edge.
1166 XXX The code in tree-ssa-threadedge.c will restore the state of
1167 the const_and_copies table. We we just have to restore the expression
1168 table. */
1169 remove_local_expressions_from_table ();
1172 /* PHI nodes can create equivalences too.
1174 Ignoring any alternatives which are the same as the result, if
1175 all the alternatives are equal, then the PHI node creates an
1176 equivalence. */
1178 static void
1179 record_equivalences_from_phis (basic_block bb)
1181 gimple_stmt_iterator gsi;
1183 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1185 gimple phi = gsi_stmt (gsi);
1187 tree lhs = gimple_phi_result (phi);
1188 tree rhs = NULL;
1189 size_t i;
1191 for (i = 0; i < gimple_phi_num_args (phi); i++)
1193 tree t = gimple_phi_arg_def (phi, i);
1195 /* Ignore alternatives which are the same as our LHS. Since
1196 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1197 can simply compare pointers. */
1198 if (lhs == t)
1199 continue;
1201 /* If we have not processed an alternative yet, then set
1202 RHS to this alternative. */
1203 if (rhs == NULL)
1204 rhs = t;
1205 /* If we have processed an alternative (stored in RHS), then
1206 see if it is equal to this one. If it isn't, then stop
1207 the search. */
1208 else if (! operand_equal_for_phi_arg_p (rhs, t))
1209 break;
1212 /* If we had no interesting alternatives, then all the RHS alternatives
1213 must have been the same as LHS. */
1214 if (!rhs)
1215 rhs = lhs;
1217 /* If we managed to iterate through each PHI alternative without
1218 breaking out of the loop, then we have a PHI which may create
1219 a useful equivalence. We do not need to record unwind data for
1220 this, since this is a true assignment and not an equivalence
1221 inferred from a comparison. All uses of this ssa name are dominated
1222 by this assignment, so unwinding just costs time and space. */
1223 if (i == gimple_phi_num_args (phi) && may_propagate_copy (lhs, rhs))
1224 set_ssa_name_value (lhs, rhs);
1228 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1229 return that edge. Otherwise return NULL. */
1230 static edge
1231 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1233 edge retval = NULL;
1234 edge e;
1235 edge_iterator ei;
1237 FOR_EACH_EDGE (e, ei, bb->preds)
1239 /* A loop back edge can be identified by the destination of
1240 the edge dominating the source of the edge. */
1241 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1242 continue;
1244 /* If we have already seen a non-loop edge, then we must have
1245 multiple incoming non-loop edges and thus we return NULL. */
1246 if (retval)
1247 return NULL;
1249 /* This is the first non-loop incoming edge we have found. Record
1250 it. */
1251 retval = e;
1254 return retval;
1257 /* Record any equivalences created by the incoming edge to BB. If BB
1258 has more than one incoming edge, then no equivalence is created. */
1260 static void
1261 record_equivalences_from_incoming_edge (basic_block bb)
1263 edge e;
1264 basic_block parent;
1265 struct edge_info *edge_info;
1267 /* If our parent block ended with a control statement, then we may be
1268 able to record some equivalences based on which outgoing edge from
1269 the parent was followed. */
1270 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1272 e = single_incoming_edge_ignoring_loop_edges (bb);
1274 /* If we had a single incoming edge from our parent block, then enter
1275 any data associated with the edge into our tables. */
1276 if (e && e->src == parent)
1278 unsigned int i;
1280 edge_info = (struct edge_info *) e->aux;
1282 if (edge_info)
1284 tree lhs = edge_info->lhs;
1285 tree rhs = edge_info->rhs;
1286 cond_equivalence *eq;
1288 if (lhs)
1289 record_equality (lhs, rhs);
1291 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1292 set via a widening type conversion, then we may be able to record
1293 additional equivalences. */
1294 if (lhs
1295 && TREE_CODE (lhs) == SSA_NAME
1296 && is_gimple_constant (rhs)
1297 && TREE_CODE (rhs) == INTEGER_CST)
1299 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1301 if (defstmt
1302 && is_gimple_assign (defstmt)
1303 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1305 tree old_rhs = gimple_assign_rhs1 (defstmt);
1307 /* If the conversion widens the original value and
1308 the constant is in the range of the type of OLD_RHS,
1309 then convert the constant and record the equivalence.
1311 Note that int_fits_type_p does not check the precision
1312 if the upper and lower bounds are OK. */
1313 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1314 && (TYPE_PRECISION (TREE_TYPE (lhs))
1315 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1316 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1318 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1319 record_equality (old_rhs, newval);
1324 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1325 record_cond (eq);
1330 /* Dump SSA statistics on FILE. */
1332 void
1333 dump_dominator_optimization_stats (FILE *file)
1335 fprintf (file, "Total number of statements: %6ld\n\n",
1336 opt_stats.num_stmts);
1337 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1338 opt_stats.num_exprs_considered);
1340 fprintf (file, "\nHash table statistics:\n");
1342 fprintf (file, " avail_exprs: ");
1343 htab_statistics (file, avail_exprs);
1347 /* Dump SSA statistics on stderr. */
1349 DEBUG_FUNCTION void
1350 debug_dominator_optimization_stats (void)
1352 dump_dominator_optimization_stats (stderr);
1356 /* Dump statistics for the hash table HTAB. */
1358 static void
1359 htab_statistics (FILE *file, hash_table <expr_elt_hasher> htab)
1361 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1362 (long) htab.size (),
1363 (long) htab.elements (),
1364 htab.collisions ());
1368 /* Enter condition equivalence into the expression hash table.
1369 This indicates that a conditional expression has a known
1370 boolean value. */
1372 static void
1373 record_cond (cond_equivalence *p)
1375 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1376 expr_hash_elt **slot;
1378 initialize_hash_element_from_expr (&p->cond, p->value, element);
1380 slot = avail_exprs.find_slot_with_hash (element, element->hash, INSERT);
1381 if (*slot == NULL)
1383 *slot = element;
1385 if (dump_file && (dump_flags & TDF_DETAILS))
1387 fprintf (dump_file, "1>>> ");
1388 print_expr_hash_elt (dump_file, element);
1391 avail_exprs_stack.safe_push (element);
1393 else
1394 free_expr_hash_elt (element);
1397 /* Build a cond_equivalence record indicating that the comparison
1398 CODE holds between operands OP0 and OP1 and push it to **P. */
1400 static void
1401 build_and_record_new_cond (enum tree_code code,
1402 tree op0, tree op1,
1403 vec<cond_equivalence> *p)
1405 cond_equivalence c;
1406 struct hashable_expr *cond = &c.cond;
1408 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1410 cond->type = boolean_type_node;
1411 cond->kind = EXPR_BINARY;
1412 cond->ops.binary.op = code;
1413 cond->ops.binary.opnd0 = op0;
1414 cond->ops.binary.opnd1 = op1;
1416 c.value = boolean_true_node;
1417 p->safe_push (c);
1420 /* Record that COND is true and INVERTED is false into the edge information
1421 structure. Also record that any conditions dominated by COND are true
1422 as well.
1424 For example, if a < b is true, then a <= b must also be true. */
1426 static void
1427 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1429 tree op0, op1;
1430 cond_equivalence c;
1432 if (!COMPARISON_CLASS_P (cond))
1433 return;
1435 op0 = TREE_OPERAND (cond, 0);
1436 op1 = TREE_OPERAND (cond, 1);
1438 switch (TREE_CODE (cond))
1440 case LT_EXPR:
1441 case GT_EXPR:
1442 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1444 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1445 &edge_info->cond_equivalences);
1446 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1447 &edge_info->cond_equivalences);
1450 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1451 ? LE_EXPR : GE_EXPR),
1452 op0, op1, &edge_info->cond_equivalences);
1453 build_and_record_new_cond (NE_EXPR, op0, op1,
1454 &edge_info->cond_equivalences);
1455 break;
1457 case GE_EXPR:
1458 case LE_EXPR:
1459 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1461 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1462 &edge_info->cond_equivalences);
1464 break;
1466 case EQ_EXPR:
1467 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1469 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1470 &edge_info->cond_equivalences);
1472 build_and_record_new_cond (LE_EXPR, op0, op1,
1473 &edge_info->cond_equivalences);
1474 build_and_record_new_cond (GE_EXPR, op0, op1,
1475 &edge_info->cond_equivalences);
1476 break;
1478 case UNORDERED_EXPR:
1479 build_and_record_new_cond (NE_EXPR, op0, op1,
1480 &edge_info->cond_equivalences);
1481 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1482 &edge_info->cond_equivalences);
1483 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1484 &edge_info->cond_equivalences);
1485 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1486 &edge_info->cond_equivalences);
1487 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1488 &edge_info->cond_equivalences);
1489 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1490 &edge_info->cond_equivalences);
1491 break;
1493 case UNLT_EXPR:
1494 case UNGT_EXPR:
1495 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1496 ? UNLE_EXPR : UNGE_EXPR),
1497 op0, op1, &edge_info->cond_equivalences);
1498 build_and_record_new_cond (NE_EXPR, op0, op1,
1499 &edge_info->cond_equivalences);
1500 break;
1502 case UNEQ_EXPR:
1503 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1504 &edge_info->cond_equivalences);
1505 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1506 &edge_info->cond_equivalences);
1507 break;
1509 case LTGT_EXPR:
1510 build_and_record_new_cond (NE_EXPR, op0, op1,
1511 &edge_info->cond_equivalences);
1512 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1513 &edge_info->cond_equivalences);
1514 break;
1516 default:
1517 break;
1520 /* Now store the original true and false conditions into the first
1521 two slots. */
1522 initialize_expr_from_cond (cond, &c.cond);
1523 c.value = boolean_true_node;
1524 edge_info->cond_equivalences.safe_push (c);
1526 /* It is possible for INVERTED to be the negation of a comparison,
1527 and not a valid RHS or GIMPLE_COND condition. This happens because
1528 invert_truthvalue may return such an expression when asked to invert
1529 a floating-point comparison. These comparisons are not assumed to
1530 obey the trichotomy law. */
1531 initialize_expr_from_cond (inverted, &c.cond);
1532 c.value = boolean_false_node;
1533 edge_info->cond_equivalences.safe_push (c);
1536 /* A helper function for record_const_or_copy and record_equality.
1537 Do the work of recording the value and undo info. */
1539 static void
1540 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1542 set_ssa_name_value (x, y);
1544 if (dump_file && (dump_flags & TDF_DETAILS))
1546 fprintf (dump_file, "0>>> COPY ");
1547 print_generic_expr (dump_file, x, 0);
1548 fprintf (dump_file, " = ");
1549 print_generic_expr (dump_file, y, 0);
1550 fprintf (dump_file, "\n");
1553 const_and_copies_stack.reserve (2);
1554 const_and_copies_stack.quick_push (prev_x);
1555 const_and_copies_stack.quick_push (x);
1558 /* Return the loop depth of the basic block of the defining statement of X.
1559 This number should not be treated as absolutely correct because the loop
1560 information may not be completely up-to-date when dom runs. However, it
1561 will be relatively correct, and as more passes are taught to keep loop info
1562 up to date, the result will become more and more accurate. */
1565 loop_depth_of_name (tree x)
1567 gimple defstmt;
1568 basic_block defbb;
1570 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1571 if (TREE_CODE (x) != SSA_NAME)
1572 return 0;
1574 /* Otherwise return the loop depth of the defining statement's bb.
1575 Note that there may not actually be a bb for this statement, if the
1576 ssa_name is live on entry. */
1577 defstmt = SSA_NAME_DEF_STMT (x);
1578 defbb = gimple_bb (defstmt);
1579 if (!defbb)
1580 return 0;
1582 return bb_loop_depth (defbb);
1585 /* Record that X is equal to Y in const_and_copies. Record undo
1586 information in the block-local vector. */
1588 static void
1589 record_const_or_copy (tree x, tree y)
1591 tree prev_x = SSA_NAME_VALUE (x);
1593 gcc_assert (TREE_CODE (x) == SSA_NAME);
1595 if (TREE_CODE (y) == SSA_NAME)
1597 tree tmp = SSA_NAME_VALUE (y);
1598 if (tmp)
1599 y = tmp;
1602 record_const_or_copy_1 (x, y, prev_x);
1605 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1606 This constrains the cases in which we may treat this as assignment. */
1608 static void
1609 record_equality (tree x, tree y)
1611 tree prev_x = NULL, prev_y = NULL;
1613 if (TREE_CODE (x) == SSA_NAME)
1614 prev_x = SSA_NAME_VALUE (x);
1615 if (TREE_CODE (y) == SSA_NAME)
1616 prev_y = SSA_NAME_VALUE (y);
1618 /* If one of the previous values is invariant, or invariant in more loops
1619 (by depth), then use that.
1620 Otherwise it doesn't matter which value we choose, just so
1621 long as we canonicalize on one value. */
1622 if (is_gimple_min_invariant (y))
1624 else if (is_gimple_min_invariant (x)
1625 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1626 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1627 else if (prev_x && is_gimple_min_invariant (prev_x))
1628 x = y, y = prev_x, prev_x = prev_y;
1629 else if (prev_y)
1630 y = prev_y;
1632 /* After the swapping, we must have one SSA_NAME. */
1633 if (TREE_CODE (x) != SSA_NAME)
1634 return;
1636 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1637 variable compared against zero. If we're honoring signed zeros,
1638 then we cannot record this value unless we know that the value is
1639 nonzero. */
1640 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1641 && (TREE_CODE (y) != REAL_CST
1642 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1643 return;
1645 record_const_or_copy_1 (x, y, prev_x);
1648 /* Returns true when STMT is a simple iv increment. It detects the
1649 following situation:
1651 i_1 = phi (..., i_2)
1652 i_2 = i_1 +/- ... */
1654 bool
1655 simple_iv_increment_p (gimple stmt)
1657 enum tree_code code;
1658 tree lhs, preinc;
1659 gimple phi;
1660 size_t i;
1662 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1663 return false;
1665 lhs = gimple_assign_lhs (stmt);
1666 if (TREE_CODE (lhs) != SSA_NAME)
1667 return false;
1669 code = gimple_assign_rhs_code (stmt);
1670 if (code != PLUS_EXPR
1671 && code != MINUS_EXPR
1672 && code != POINTER_PLUS_EXPR)
1673 return false;
1675 preinc = gimple_assign_rhs1 (stmt);
1676 if (TREE_CODE (preinc) != SSA_NAME)
1677 return false;
1679 phi = SSA_NAME_DEF_STMT (preinc);
1680 if (gimple_code (phi) != GIMPLE_PHI)
1681 return false;
1683 for (i = 0; i < gimple_phi_num_args (phi); i++)
1684 if (gimple_phi_arg_def (phi, i) == lhs)
1685 return true;
1687 return false;
1690 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1691 known value for that SSA_NAME (or NULL if no value is known).
1693 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1694 successors of BB. */
1696 static void
1697 cprop_into_successor_phis (basic_block bb)
1699 edge e;
1700 edge_iterator ei;
1702 FOR_EACH_EDGE (e, ei, bb->succs)
1704 int indx;
1705 gimple_stmt_iterator gsi;
1707 /* If this is an abnormal edge, then we do not want to copy propagate
1708 into the PHI alternative associated with this edge. */
1709 if (e->flags & EDGE_ABNORMAL)
1710 continue;
1712 gsi = gsi_start_phis (e->dest);
1713 if (gsi_end_p (gsi))
1714 continue;
1716 /* We may have an equivalence associated with this edge. While
1717 we can not propagate it into non-dominated blocks, we can
1718 propagate them into PHIs in non-dominated blocks. */
1720 /* Push the unwind marker so we can reset the const and copies
1721 table back to its original state after processing this edge. */
1722 const_and_copies_stack.safe_push (NULL_TREE);
1724 /* Extract and record any simple NAME = VALUE equivalences.
1726 Don't bother with [01] = COND equivalences, they're not useful
1727 here. */
1728 struct edge_info *edge_info = (struct edge_info *) e->aux;
1729 if (edge_info)
1731 tree lhs = edge_info->lhs;
1732 tree rhs = edge_info->rhs;
1734 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1735 record_const_or_copy (lhs, rhs);
1738 indx = e->dest_idx;
1739 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1741 tree new_val;
1742 use_operand_p orig_p;
1743 tree orig_val;
1744 gimple phi = gsi_stmt (gsi);
1746 /* The alternative may be associated with a constant, so verify
1747 it is an SSA_NAME before doing anything with it. */
1748 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1749 orig_val = get_use_from_ptr (orig_p);
1750 if (TREE_CODE (orig_val) != SSA_NAME)
1751 continue;
1753 /* If we have *ORIG_P in our constant/copy table, then replace
1754 ORIG_P with its value in our constant/copy table. */
1755 new_val = SSA_NAME_VALUE (orig_val);
1756 if (new_val
1757 && new_val != orig_val
1758 && (TREE_CODE (new_val) == SSA_NAME
1759 || is_gimple_min_invariant (new_val))
1760 && may_propagate_copy (orig_val, new_val))
1761 propagate_value (orig_p, new_val);
1764 restore_vars_to_original_value ();
1768 /* We have finished optimizing BB, record any information implied by
1769 taking a specific outgoing edge from BB. */
1771 static void
1772 record_edge_info (basic_block bb)
1774 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1775 struct edge_info *edge_info;
1777 if (! gsi_end_p (gsi))
1779 gimple stmt = gsi_stmt (gsi);
1780 location_t loc = gimple_location (stmt);
1782 if (gimple_code (stmt) == GIMPLE_SWITCH)
1784 tree index = gimple_switch_index (stmt);
1786 if (TREE_CODE (index) == SSA_NAME)
1788 int i;
1789 int n_labels = gimple_switch_num_labels (stmt);
1790 tree *info = XCNEWVEC (tree, last_basic_block);
1791 edge e;
1792 edge_iterator ei;
1794 for (i = 0; i < n_labels; i++)
1796 tree label = gimple_switch_label (stmt, i);
1797 basic_block target_bb = label_to_block (CASE_LABEL (label));
1798 if (CASE_HIGH (label)
1799 || !CASE_LOW (label)
1800 || info[target_bb->index])
1801 info[target_bb->index] = error_mark_node;
1802 else
1803 info[target_bb->index] = label;
1806 FOR_EACH_EDGE (e, ei, bb->succs)
1808 basic_block target_bb = e->dest;
1809 tree label = info[target_bb->index];
1811 if (label != NULL && label != error_mark_node)
1813 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1814 CASE_LOW (label));
1815 edge_info = allocate_edge_info (e);
1816 edge_info->lhs = index;
1817 edge_info->rhs = x;
1820 free (info);
1824 /* A COND_EXPR may create equivalences too. */
1825 if (gimple_code (stmt) == GIMPLE_COND)
1827 edge true_edge;
1828 edge false_edge;
1830 tree op0 = gimple_cond_lhs (stmt);
1831 tree op1 = gimple_cond_rhs (stmt);
1832 enum tree_code code = gimple_cond_code (stmt);
1834 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1836 /* Special case comparing booleans against a constant as we
1837 know the value of OP0 on both arms of the branch. i.e., we
1838 can record an equivalence for OP0 rather than COND. */
1839 if ((code == EQ_EXPR || code == NE_EXPR)
1840 && TREE_CODE (op0) == SSA_NAME
1841 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1842 && is_gimple_min_invariant (op1))
1844 if (code == EQ_EXPR)
1846 edge_info = allocate_edge_info (true_edge);
1847 edge_info->lhs = op0;
1848 edge_info->rhs = (integer_zerop (op1)
1849 ? boolean_false_node
1850 : boolean_true_node);
1852 edge_info = allocate_edge_info (false_edge);
1853 edge_info->lhs = op0;
1854 edge_info->rhs = (integer_zerop (op1)
1855 ? boolean_true_node
1856 : boolean_false_node);
1858 else
1860 edge_info = allocate_edge_info (true_edge);
1861 edge_info->lhs = op0;
1862 edge_info->rhs = (integer_zerop (op1)
1863 ? boolean_true_node
1864 : boolean_false_node);
1866 edge_info = allocate_edge_info (false_edge);
1867 edge_info->lhs = op0;
1868 edge_info->rhs = (integer_zerop (op1)
1869 ? boolean_false_node
1870 : boolean_true_node);
1873 else if (is_gimple_min_invariant (op0)
1874 && (TREE_CODE (op1) == SSA_NAME
1875 || is_gimple_min_invariant (op1)))
1877 tree cond = build2 (code, boolean_type_node, op0, op1);
1878 tree inverted = invert_truthvalue_loc (loc, cond);
1879 bool can_infer_simple_equiv
1880 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0)))
1881 && real_zerop (op0));
1882 struct edge_info *edge_info;
1884 edge_info = allocate_edge_info (true_edge);
1885 record_conditions (edge_info, cond, inverted);
1887 if (can_infer_simple_equiv && code == EQ_EXPR)
1889 edge_info->lhs = op1;
1890 edge_info->rhs = op0;
1893 edge_info = allocate_edge_info (false_edge);
1894 record_conditions (edge_info, inverted, cond);
1896 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1898 edge_info->lhs = op1;
1899 edge_info->rhs = op0;
1903 else if (TREE_CODE (op0) == SSA_NAME
1904 && (TREE_CODE (op1) == SSA_NAME
1905 || is_gimple_min_invariant (op1)))
1907 tree cond = build2 (code, boolean_type_node, op0, op1);
1908 tree inverted = invert_truthvalue_loc (loc, cond);
1909 bool can_infer_simple_equiv
1910 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1)))
1911 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1912 struct edge_info *edge_info;
1914 edge_info = allocate_edge_info (true_edge);
1915 record_conditions (edge_info, cond, inverted);
1917 if (can_infer_simple_equiv && code == EQ_EXPR)
1919 edge_info->lhs = op0;
1920 edge_info->rhs = op1;
1923 edge_info = allocate_edge_info (false_edge);
1924 record_conditions (edge_info, inverted, cond);
1926 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1928 edge_info->lhs = op0;
1929 edge_info->rhs = op1;
1934 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1938 void
1939 dom_opt_dom_walker::before_dom_children (basic_block bb)
1941 gimple_stmt_iterator gsi;
1943 if (dump_file && (dump_flags & TDF_DETAILS))
1944 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1946 /* Push a marker on the stacks of local information so that we know how
1947 far to unwind when we finalize this block. */
1948 avail_exprs_stack.safe_push (NULL);
1949 const_and_copies_stack.safe_push (NULL_TREE);
1951 record_equivalences_from_incoming_edge (bb);
1953 /* PHI nodes can create equivalences too. */
1954 record_equivalences_from_phis (bb);
1956 /* Create equivalences from redundant PHIs. PHIs are only truly
1957 redundant when they exist in the same block, so push another
1958 marker and unwind right afterwards. */
1959 avail_exprs_stack.safe_push (NULL);
1960 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1961 eliminate_redundant_computations (&gsi);
1962 remove_local_expressions_from_table ();
1964 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1965 optimize_stmt (bb, gsi);
1967 /* Now prepare to process dominated blocks. */
1968 record_edge_info (bb);
1969 cprop_into_successor_phis (bb);
1972 /* We have finished processing the dominator children of BB, perform
1973 any finalization actions in preparation for leaving this node in
1974 the dominator tree. */
1976 void
1977 dom_opt_dom_walker::after_dom_children (basic_block bb)
1979 gimple last;
1981 /* If we have an outgoing edge to a block with multiple incoming and
1982 outgoing edges, then we may be able to thread the edge, i.e., we
1983 may be able to statically determine which of the outgoing edges
1984 will be traversed when the incoming edge from BB is traversed. */
1985 if (single_succ_p (bb)
1986 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1987 && potentially_threadable_block (single_succ (bb)))
1989 thread_across_edge (single_succ_edge (bb));
1991 else if ((last = last_stmt (bb))
1992 && gimple_code (last) == GIMPLE_COND
1993 && EDGE_COUNT (bb->succs) == 2
1994 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1995 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1997 edge true_edge, false_edge;
1999 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2001 /* Only try to thread the edge if it reaches a target block with
2002 more than one predecessor and more than one successor. */
2003 if (potentially_threadable_block (true_edge->dest))
2004 thread_across_edge (true_edge);
2006 /* Similarly for the ELSE arm. */
2007 if (potentially_threadable_block (false_edge->dest))
2008 thread_across_edge (false_edge);
2012 /* These remove expressions local to BB from the tables. */
2013 remove_local_expressions_from_table ();
2014 restore_vars_to_original_value ();
2017 /* Search for redundant computations in STMT. If any are found, then
2018 replace them with the variable holding the result of the computation.
2020 If safe, record this expression into the available expression hash
2021 table. */
2023 static void
2024 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2026 tree expr_type;
2027 tree cached_lhs;
2028 tree def;
2029 bool insert = true;
2030 bool assigns_var_p = false;
2032 gimple stmt = gsi_stmt (*gsi);
2034 if (gimple_code (stmt) == GIMPLE_PHI)
2035 def = gimple_phi_result (stmt);
2036 else
2037 def = gimple_get_lhs (stmt);
2039 /* Certain expressions on the RHS can be optimized away, but can not
2040 themselves be entered into the hash tables. */
2041 if (! def
2042 || TREE_CODE (def) != SSA_NAME
2043 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2044 || gimple_vdef (stmt)
2045 /* Do not record equivalences for increments of ivs. This would create
2046 overlapping live ranges for a very questionable gain. */
2047 || simple_iv_increment_p (stmt))
2048 insert = false;
2050 /* Check if the expression has been computed before. */
2051 cached_lhs = lookup_avail_expr (stmt, insert);
2053 opt_stats.num_exprs_considered++;
2055 /* Get the type of the expression we are trying to optimize. */
2056 if (is_gimple_assign (stmt))
2058 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2059 assigns_var_p = true;
2061 else if (gimple_code (stmt) == GIMPLE_COND)
2062 expr_type = boolean_type_node;
2063 else if (is_gimple_call (stmt))
2065 gcc_assert (gimple_call_lhs (stmt));
2066 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2067 assigns_var_p = true;
2069 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2070 expr_type = TREE_TYPE (gimple_switch_index (stmt));
2071 else if (gimple_code (stmt) == GIMPLE_PHI)
2072 /* We can't propagate into a phi, so the logic below doesn't apply.
2073 Instead record an equivalence between the cached LHS and the
2074 PHI result of this statement, provided they are in the same block.
2075 This should be sufficient to kill the redundant phi. */
2077 if (def && cached_lhs)
2078 record_const_or_copy (def, cached_lhs);
2079 return;
2081 else
2082 gcc_unreachable ();
2084 if (!cached_lhs)
2085 return;
2087 /* It is safe to ignore types here since we have already done
2088 type checking in the hashing and equality routines. In fact
2089 type checking here merely gets in the way of constant
2090 propagation. Also, make sure that it is safe to propagate
2091 CACHED_LHS into the expression in STMT. */
2092 if ((TREE_CODE (cached_lhs) != SSA_NAME
2093 && (assigns_var_p
2094 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2095 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2097 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2098 || is_gimple_min_invariant (cached_lhs));
2100 if (dump_file && (dump_flags & TDF_DETAILS))
2102 fprintf (dump_file, " Replaced redundant expr '");
2103 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2104 fprintf (dump_file, "' with '");
2105 print_generic_expr (dump_file, cached_lhs, dump_flags);
2106 fprintf (dump_file, "'\n");
2109 opt_stats.num_re++;
2111 if (assigns_var_p
2112 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2113 cached_lhs = fold_convert (expr_type, cached_lhs);
2115 propagate_tree_value_into_stmt (gsi, cached_lhs);
2117 /* Since it is always necessary to mark the result as modified,
2118 perhaps we should move this into propagate_tree_value_into_stmt
2119 itself. */
2120 gimple_set_modified (gsi_stmt (*gsi), true);
2124 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2125 the available expressions table or the const_and_copies table.
2126 Detect and record those equivalences. */
2127 /* We handle only very simple copy equivalences here. The heavy
2128 lifing is done by eliminate_redundant_computations. */
2130 static void
2131 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2133 tree lhs;
2134 enum tree_code lhs_code;
2136 gcc_assert (is_gimple_assign (stmt));
2138 lhs = gimple_assign_lhs (stmt);
2139 lhs_code = TREE_CODE (lhs);
2141 if (lhs_code == SSA_NAME
2142 && gimple_assign_single_p (stmt))
2144 tree rhs = gimple_assign_rhs1 (stmt);
2146 /* If the RHS of the assignment is a constant or another variable that
2147 may be propagated, register it in the CONST_AND_COPIES table. We
2148 do not need to record unwind data for this, since this is a true
2149 assignment and not an equivalence inferred from a comparison. All
2150 uses of this ssa name are dominated by this assignment, so unwinding
2151 just costs time and space. */
2152 if (may_optimize_p
2153 && (TREE_CODE (rhs) == SSA_NAME
2154 || is_gimple_min_invariant (rhs)))
2156 if (dump_file && (dump_flags & TDF_DETAILS))
2158 fprintf (dump_file, "==== ASGN ");
2159 print_generic_expr (dump_file, lhs, 0);
2160 fprintf (dump_file, " = ");
2161 print_generic_expr (dump_file, rhs, 0);
2162 fprintf (dump_file, "\n");
2165 set_ssa_name_value (lhs, rhs);
2169 /* A memory store, even an aliased store, creates a useful
2170 equivalence. By exchanging the LHS and RHS, creating suitable
2171 vops and recording the result in the available expression table,
2172 we may be able to expose more redundant loads. */
2173 if (!gimple_has_volatile_ops (stmt)
2174 && gimple_references_memory_p (stmt)
2175 && gimple_assign_single_p (stmt)
2176 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2177 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2178 && !is_gimple_reg (lhs))
2180 tree rhs = gimple_assign_rhs1 (stmt);
2181 gimple new_stmt;
2183 /* Build a new statement with the RHS and LHS exchanged. */
2184 if (TREE_CODE (rhs) == SSA_NAME)
2186 /* NOTE tuples. The call to gimple_build_assign below replaced
2187 a call to build_gimple_modify_stmt, which did not set the
2188 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2189 may cause an SSA validation failure, as the LHS may be a
2190 default-initialized name and should have no definition. I'm
2191 a bit dubious of this, as the artificial statement that we
2192 generate here may in fact be ill-formed, but it is simply
2193 used as an internal device in this pass, and never becomes
2194 part of the CFG. */
2195 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2196 new_stmt = gimple_build_assign (rhs, lhs);
2197 SSA_NAME_DEF_STMT (rhs) = defstmt;
2199 else
2200 new_stmt = gimple_build_assign (rhs, lhs);
2202 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2204 /* Finally enter the statement into the available expression
2205 table. */
2206 lookup_avail_expr (new_stmt, true);
2210 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2211 CONST_AND_COPIES. */
2213 static void
2214 cprop_operand (gimple stmt, use_operand_p op_p)
2216 tree val;
2217 tree op = USE_FROM_PTR (op_p);
2219 /* If the operand has a known constant value or it is known to be a
2220 copy of some other variable, use the value or copy stored in
2221 CONST_AND_COPIES. */
2222 val = SSA_NAME_VALUE (op);
2223 if (val && val != op)
2225 /* Do not replace hard register operands in asm statements. */
2226 if (gimple_code (stmt) == GIMPLE_ASM
2227 && !may_propagate_copy_into_asm (op))
2228 return;
2230 /* Certain operands are not allowed to be copy propagated due
2231 to their interaction with exception handling and some GCC
2232 extensions. */
2233 if (!may_propagate_copy (op, val))
2234 return;
2236 /* Do not propagate addresses that point to volatiles into memory
2237 stmts without volatile operands. */
2238 if (POINTER_TYPE_P (TREE_TYPE (val))
2239 && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val)))
2240 && gimple_has_mem_ops (stmt)
2241 && !gimple_has_volatile_ops (stmt))
2242 return;
2244 /* Do not propagate copies if the propagated value is at a deeper loop
2245 depth than the propagatee. Otherwise, this may move loop variant
2246 variables outside of their loops and prevent coalescing
2247 opportunities. If the value was loop invariant, it will be hoisted
2248 by LICM and exposed for copy propagation. */
2249 if (loop_depth_of_name (val) > loop_depth_of_name (op))
2250 return;
2252 /* Do not propagate copies into simple IV increment statements.
2253 See PR23821 for how this can disturb IV analysis. */
2254 if (TREE_CODE (val) != INTEGER_CST
2255 && simple_iv_increment_p (stmt))
2256 return;
2258 /* Dump details. */
2259 if (dump_file && (dump_flags & TDF_DETAILS))
2261 fprintf (dump_file, " Replaced '");
2262 print_generic_expr (dump_file, op, dump_flags);
2263 fprintf (dump_file, "' with %s '",
2264 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2265 print_generic_expr (dump_file, val, dump_flags);
2266 fprintf (dump_file, "'\n");
2269 if (TREE_CODE (val) != SSA_NAME)
2270 opt_stats.num_const_prop++;
2271 else
2272 opt_stats.num_copy_prop++;
2274 propagate_value (op_p, val);
2276 /* And note that we modified this statement. This is now
2277 safe, even if we changed virtual operands since we will
2278 rescan the statement and rewrite its operands again. */
2279 gimple_set_modified (stmt, true);
2283 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2284 known value for that SSA_NAME (or NULL if no value is known).
2286 Propagate values from CONST_AND_COPIES into the uses, vuses and
2287 vdef_ops of STMT. */
2289 static void
2290 cprop_into_stmt (gimple stmt)
2292 use_operand_p op_p;
2293 ssa_op_iter iter;
2295 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2296 cprop_operand (stmt, op_p);
2299 /* Optimize the statement pointed to by iterator SI.
2301 We try to perform some simplistic global redundancy elimination and
2302 constant propagation:
2304 1- To detect global redundancy, we keep track of expressions that have
2305 been computed in this block and its dominators. If we find that the
2306 same expression is computed more than once, we eliminate repeated
2307 computations by using the target of the first one.
2309 2- Constant values and copy assignments. This is used to do very
2310 simplistic constant and copy propagation. When a constant or copy
2311 assignment is found, we map the value on the RHS of the assignment to
2312 the variable in the LHS in the CONST_AND_COPIES table. */
2314 static void
2315 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2317 gimple stmt, old_stmt;
2318 bool may_optimize_p;
2319 bool modified_p = false;
2321 old_stmt = stmt = gsi_stmt (si);
2323 if (dump_file && (dump_flags & TDF_DETAILS))
2325 fprintf (dump_file, "Optimizing statement ");
2326 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2329 if (gimple_code (stmt) == GIMPLE_COND)
2330 canonicalize_comparison (stmt);
2332 update_stmt_if_modified (stmt);
2333 opt_stats.num_stmts++;
2335 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2336 cprop_into_stmt (stmt);
2338 /* If the statement has been modified with constant replacements,
2339 fold its RHS before checking for redundant computations. */
2340 if (gimple_modified_p (stmt))
2342 tree rhs = NULL;
2344 /* Try to fold the statement making sure that STMT is kept
2345 up to date. */
2346 if (fold_stmt (&si))
2348 stmt = gsi_stmt (si);
2349 gimple_set_modified (stmt, true);
2351 if (dump_file && (dump_flags & TDF_DETAILS))
2353 fprintf (dump_file, " Folded to: ");
2354 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2358 /* We only need to consider cases that can yield a gimple operand. */
2359 if (gimple_assign_single_p (stmt))
2360 rhs = gimple_assign_rhs1 (stmt);
2361 else if (gimple_code (stmt) == GIMPLE_GOTO)
2362 rhs = gimple_goto_dest (stmt);
2363 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2364 /* This should never be an ADDR_EXPR. */
2365 rhs = gimple_switch_index (stmt);
2367 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2368 recompute_tree_invariant_for_addr_expr (rhs);
2370 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2371 even if fold_stmt updated the stmt already and thus cleared
2372 gimple_modified_p flag on it. */
2373 modified_p = true;
2376 /* Check for redundant computations. Do this optimization only
2377 for assignments that have no volatile ops and conditionals. */
2378 may_optimize_p = (!gimple_has_side_effects (stmt)
2379 && (is_gimple_assign (stmt)
2380 || (is_gimple_call (stmt)
2381 && gimple_call_lhs (stmt) != NULL_TREE)
2382 || gimple_code (stmt) == GIMPLE_COND
2383 || gimple_code (stmt) == GIMPLE_SWITCH));
2385 if (may_optimize_p)
2387 if (gimple_code (stmt) == GIMPLE_CALL)
2389 /* Resolve __builtin_constant_p. If it hasn't been
2390 folded to integer_one_node by now, it's fairly
2391 certain that the value simply isn't constant. */
2392 tree callee = gimple_call_fndecl (stmt);
2393 if (callee
2394 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2395 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2397 propagate_tree_value_into_stmt (&si, integer_zero_node);
2398 stmt = gsi_stmt (si);
2402 update_stmt_if_modified (stmt);
2403 eliminate_redundant_computations (&si);
2404 stmt = gsi_stmt (si);
2406 /* Perform simple redundant store elimination. */
2407 if (gimple_assign_single_p (stmt)
2408 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2410 tree lhs = gimple_assign_lhs (stmt);
2411 tree rhs = gimple_assign_rhs1 (stmt);
2412 tree cached_lhs;
2413 gimple new_stmt;
2414 if (TREE_CODE (rhs) == SSA_NAME)
2416 tree tem = SSA_NAME_VALUE (rhs);
2417 if (tem)
2418 rhs = tem;
2420 /* Build a new statement with the RHS and LHS exchanged. */
2421 if (TREE_CODE (rhs) == SSA_NAME)
2423 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2424 new_stmt = gimple_build_assign (rhs, lhs);
2425 SSA_NAME_DEF_STMT (rhs) = defstmt;
2427 else
2428 new_stmt = gimple_build_assign (rhs, lhs);
2429 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2430 cached_lhs = lookup_avail_expr (new_stmt, false);
2431 if (cached_lhs
2432 && rhs == cached_lhs)
2434 basic_block bb = gimple_bb (stmt);
2435 unlink_stmt_vdef (stmt);
2436 if (gsi_remove (&si, true))
2438 bitmap_set_bit (need_eh_cleanup, bb->index);
2439 if (dump_file && (dump_flags & TDF_DETAILS))
2440 fprintf (dump_file, " Flagged to clear EH edges.\n");
2442 release_defs (stmt);
2443 return;
2448 /* Record any additional equivalences created by this statement. */
2449 if (is_gimple_assign (stmt))
2450 record_equivalences_from_stmt (stmt, may_optimize_p);
2452 /* If STMT is a COND_EXPR and it was modified, then we may know
2453 where it goes. If that is the case, then mark the CFG as altered.
2455 This will cause us to later call remove_unreachable_blocks and
2456 cleanup_tree_cfg when it is safe to do so. It is not safe to
2457 clean things up here since removal of edges and such can trigger
2458 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2459 the manager.
2461 That's all fine and good, except that once SSA_NAMEs are released
2462 to the manager, we must not call create_ssa_name until all references
2463 to released SSA_NAMEs have been eliminated.
2465 All references to the deleted SSA_NAMEs can not be eliminated until
2466 we remove unreachable blocks.
2468 We can not remove unreachable blocks until after we have completed
2469 any queued jump threading.
2471 We can not complete any queued jump threads until we have taken
2472 appropriate variables out of SSA form. Taking variables out of
2473 SSA form can call create_ssa_name and thus we lose.
2475 Ultimately I suspect we're going to need to change the interface
2476 into the SSA_NAME manager. */
2477 if (gimple_modified_p (stmt) || modified_p)
2479 tree val = NULL;
2481 update_stmt_if_modified (stmt);
2483 if (gimple_code (stmt) == GIMPLE_COND)
2484 val = fold_binary_loc (gimple_location (stmt),
2485 gimple_cond_code (stmt), boolean_type_node,
2486 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2487 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2488 val = gimple_switch_index (stmt);
2490 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2491 cfg_altered = true;
2493 /* If we simplified a statement in such a way as to be shown that it
2494 cannot trap, update the eh information and the cfg to match. */
2495 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2497 bitmap_set_bit (need_eh_cleanup, bb->index);
2498 if (dump_file && (dump_flags & TDF_DETAILS))
2499 fprintf (dump_file, " Flagged to clear EH edges.\n");
2504 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2505 If found, return its LHS. Otherwise insert STMT in the table and
2506 return NULL_TREE.
2508 Also, when an expression is first inserted in the table, it is also
2509 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2510 we finish processing this block and its children. */
2512 static tree
2513 lookup_avail_expr (gimple stmt, bool insert)
2515 expr_hash_elt **slot;
2516 tree lhs;
2517 tree temp;
2518 struct expr_hash_elt element;
2520 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2521 if (gimple_code (stmt) == GIMPLE_PHI)
2522 lhs = gimple_phi_result (stmt);
2523 else
2524 lhs = gimple_get_lhs (stmt);
2526 initialize_hash_element (stmt, lhs, &element);
2528 if (dump_file && (dump_flags & TDF_DETAILS))
2530 fprintf (dump_file, "LKUP ");
2531 print_expr_hash_elt (dump_file, &element);
2534 /* Don't bother remembering constant assignments and copy operations.
2535 Constants and copy operations are handled by the constant/copy propagator
2536 in optimize_stmt. */
2537 if (element.expr.kind == EXPR_SINGLE
2538 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2539 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2540 return NULL_TREE;
2542 /* Finally try to find the expression in the main expression hash table. */
2543 slot = avail_exprs.find_slot_with_hash (&element, element.hash,
2544 (insert ? INSERT : NO_INSERT));
2545 if (slot == NULL)
2547 free_expr_hash_elt_contents (&element);
2548 return NULL_TREE;
2550 else if (*slot == NULL)
2552 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2553 *element2 = element;
2554 element2->stamp = element2;
2555 *slot = element2;
2557 if (dump_file && (dump_flags & TDF_DETAILS))
2559 fprintf (dump_file, "2>>> ");
2560 print_expr_hash_elt (dump_file, element2);
2563 avail_exprs_stack.safe_push (element2);
2564 return NULL_TREE;
2566 else
2567 free_expr_hash_elt_contents (&element);
2569 /* Extract the LHS of the assignment so that it can be used as the current
2570 definition of another variable. */
2571 lhs = ((struct expr_hash_elt *)*slot)->lhs;
2573 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2574 use the value from the const_and_copies table. */
2575 if (TREE_CODE (lhs) == SSA_NAME)
2577 temp = SSA_NAME_VALUE (lhs);
2578 if (temp)
2579 lhs = temp;
2582 if (dump_file && (dump_flags & TDF_DETAILS))
2584 fprintf (dump_file, "FIND: ");
2585 print_generic_expr (dump_file, lhs, 0);
2586 fprintf (dump_file, "\n");
2589 return lhs;
2592 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2593 for expressions using the code of the expression and the SSA numbers of
2594 its operands. */
2596 static hashval_t
2597 avail_expr_hash (const void *p)
2599 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2600 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2601 tree vuse;
2602 hashval_t val = 0;
2604 val = iterative_hash_hashable_expr (expr, val);
2606 /* If the hash table entry is not associated with a statement, then we
2607 can just hash the expression and not worry about virtual operands
2608 and such. */
2609 if (!stmt)
2610 return val;
2612 /* Add the SSA version numbers of the vuse operand. This is important
2613 because compound variables like arrays are not renamed in the
2614 operands. Rather, the rename is done on the virtual variable
2615 representing all the elements of the array. */
2616 if ((vuse = gimple_vuse (stmt)))
2617 val = iterative_hash_expr (vuse, val);
2619 return val;
2622 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2623 up degenerate PHIs created by or exposed by jump threading. */
2625 /* Given a statement STMT, which is either a PHI node or an assignment,
2626 remove it from the IL. */
2628 static void
2629 remove_stmt_or_phi (gimple stmt)
2631 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2633 if (gimple_code (stmt) == GIMPLE_PHI)
2634 remove_phi_node (&gsi, true);
2635 else
2637 gsi_remove (&gsi, true);
2638 release_defs (stmt);
2642 /* Given a statement STMT, which is either a PHI node or an assignment,
2643 return the "rhs" of the node, in the case of a non-degenerate
2644 phi, NULL is returned. */
2646 static tree
2647 get_rhs_or_phi_arg (gimple stmt)
2649 if (gimple_code (stmt) == GIMPLE_PHI)
2650 return degenerate_phi_result (stmt);
2651 else if (gimple_assign_single_p (stmt))
2652 return gimple_assign_rhs1 (stmt);
2653 else
2654 gcc_unreachable ();
2658 /* Given a statement STMT, which is either a PHI node or an assignment,
2659 return the "lhs" of the node. */
2661 static tree
2662 get_lhs_or_phi_result (gimple stmt)
2664 if (gimple_code (stmt) == GIMPLE_PHI)
2665 return gimple_phi_result (stmt);
2666 else if (is_gimple_assign (stmt))
2667 return gimple_assign_lhs (stmt);
2668 else
2669 gcc_unreachable ();
2672 /* Propagate RHS into all uses of LHS (when possible).
2674 RHS and LHS are derived from STMT, which is passed in solely so
2675 that we can remove it if propagation is successful.
2677 When propagating into a PHI node or into a statement which turns
2678 into a trivial copy or constant initialization, set the
2679 appropriate bit in INTERESTING_NAMEs so that we will visit those
2680 nodes as well in an effort to pick up secondary optimization
2681 opportunities. */
2683 static void
2684 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2686 /* First verify that propagation is valid and isn't going to move a
2687 loop variant variable outside its loop. */
2688 if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2689 && (TREE_CODE (rhs) != SSA_NAME
2690 || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs))
2691 && may_propagate_copy (lhs, rhs)
2692 && loop_depth_of_name (lhs) >= loop_depth_of_name (rhs))
2694 use_operand_p use_p;
2695 imm_use_iterator iter;
2696 gimple use_stmt;
2697 bool all = true;
2699 /* Dump details. */
2700 if (dump_file && (dump_flags & TDF_DETAILS))
2702 fprintf (dump_file, " Replacing '");
2703 print_generic_expr (dump_file, lhs, dump_flags);
2704 fprintf (dump_file, "' with %s '",
2705 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2706 print_generic_expr (dump_file, rhs, dump_flags);
2707 fprintf (dump_file, "'\n");
2710 /* Walk over every use of LHS and try to replace the use with RHS.
2711 At this point the only reason why such a propagation would not
2712 be successful would be if the use occurs in an ASM_EXPR. */
2713 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2715 /* Leave debug stmts alone. If we succeed in propagating
2716 all non-debug uses, we'll drop the DEF, and propagation
2717 into debug stmts will occur then. */
2718 if (gimple_debug_bind_p (use_stmt))
2719 continue;
2721 /* It's not always safe to propagate into an ASM_EXPR. */
2722 if (gimple_code (use_stmt) == GIMPLE_ASM
2723 && ! may_propagate_copy_into_asm (lhs))
2725 all = false;
2726 continue;
2729 /* It's not ok to propagate into the definition stmt of RHS.
2730 <bb 9>:
2731 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2732 g_67.1_6 = prephitmp.12_36;
2733 goto <bb 9>;
2734 While this is strictly all dead code we do not want to
2735 deal with this here. */
2736 if (TREE_CODE (rhs) == SSA_NAME
2737 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2739 all = false;
2740 continue;
2743 /* Dump details. */
2744 if (dump_file && (dump_flags & TDF_DETAILS))
2746 fprintf (dump_file, " Original statement:");
2747 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2750 /* Propagate the RHS into this use of the LHS. */
2751 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2752 propagate_value (use_p, rhs);
2754 /* Special cases to avoid useless calls into the folding
2755 routines, operand scanning, etc.
2757 Propagation into a PHI may cause the PHI to become
2758 a degenerate, so mark the PHI as interesting. No other
2759 actions are necessary. */
2760 if (gimple_code (use_stmt) == GIMPLE_PHI)
2762 tree result;
2764 /* Dump details. */
2765 if (dump_file && (dump_flags & TDF_DETAILS))
2767 fprintf (dump_file, " Updated statement:");
2768 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2771 result = get_lhs_or_phi_result (use_stmt);
2772 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2773 continue;
2776 /* From this point onward we are propagating into a
2777 real statement. Folding may (or may not) be possible,
2778 we may expose new operands, expose dead EH edges,
2779 etc. */
2780 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2781 cannot fold a call that simplifies to a constant,
2782 because the GIMPLE_CALL must be replaced by a
2783 GIMPLE_ASSIGN, and there is no way to effect such a
2784 transformation in-place. We might want to consider
2785 using the more general fold_stmt here. */
2787 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2788 fold_stmt_inplace (&gsi);
2791 /* Sometimes propagation can expose new operands to the
2792 renamer. */
2793 update_stmt (use_stmt);
2795 /* Dump details. */
2796 if (dump_file && (dump_flags & TDF_DETAILS))
2798 fprintf (dump_file, " Updated statement:");
2799 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2802 /* If we replaced a variable index with a constant, then
2803 we would need to update the invariant flag for ADDR_EXPRs. */
2804 if (gimple_assign_single_p (use_stmt)
2805 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2806 recompute_tree_invariant_for_addr_expr
2807 (gimple_assign_rhs1 (use_stmt));
2809 /* If we cleaned up EH information from the statement,
2810 mark its containing block as needing EH cleanups. */
2811 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2813 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2814 if (dump_file && (dump_flags & TDF_DETAILS))
2815 fprintf (dump_file, " Flagged to clear EH edges.\n");
2818 /* Propagation may expose new trivial copy/constant propagation
2819 opportunities. */
2820 if (gimple_assign_single_p (use_stmt)
2821 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2822 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2823 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2825 tree result = get_lhs_or_phi_result (use_stmt);
2826 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2829 /* Propagation into these nodes may make certain edges in
2830 the CFG unexecutable. We want to identify them as PHI nodes
2831 at the destination of those unexecutable edges may become
2832 degenerates. */
2833 else if (gimple_code (use_stmt) == GIMPLE_COND
2834 || gimple_code (use_stmt) == GIMPLE_SWITCH
2835 || gimple_code (use_stmt) == GIMPLE_GOTO)
2837 tree val;
2839 if (gimple_code (use_stmt) == GIMPLE_COND)
2840 val = fold_binary_loc (gimple_location (use_stmt),
2841 gimple_cond_code (use_stmt),
2842 boolean_type_node,
2843 gimple_cond_lhs (use_stmt),
2844 gimple_cond_rhs (use_stmt));
2845 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2846 val = gimple_switch_index (use_stmt);
2847 else
2848 val = gimple_goto_dest (use_stmt);
2850 if (val && is_gimple_min_invariant (val))
2852 basic_block bb = gimple_bb (use_stmt);
2853 edge te = find_taken_edge (bb, val);
2854 edge_iterator ei;
2855 edge e;
2856 gimple_stmt_iterator gsi, psi;
2858 /* Remove all outgoing edges except TE. */
2859 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2861 if (e != te)
2863 /* Mark all the PHI nodes at the destination of
2864 the unexecutable edge as interesting. */
2865 for (psi = gsi_start_phis (e->dest);
2866 !gsi_end_p (psi);
2867 gsi_next (&psi))
2869 gimple phi = gsi_stmt (psi);
2871 tree result = gimple_phi_result (phi);
2872 int version = SSA_NAME_VERSION (result);
2874 bitmap_set_bit (interesting_names, version);
2877 te->probability += e->probability;
2879 te->count += e->count;
2880 remove_edge (e);
2881 cfg_altered = true;
2883 else
2884 ei_next (&ei);
2887 gsi = gsi_last_bb (gimple_bb (use_stmt));
2888 gsi_remove (&gsi, true);
2890 /* And fixup the flags on the single remaining edge. */
2891 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2892 te->flags &= ~EDGE_ABNORMAL;
2893 te->flags |= EDGE_FALLTHRU;
2894 if (te->probability > REG_BR_PROB_BASE)
2895 te->probability = REG_BR_PROB_BASE;
2900 /* Ensure there is nothing else to do. */
2901 gcc_assert (!all || has_zero_uses (lhs));
2903 /* If we were able to propagate away all uses of LHS, then
2904 we can remove STMT. */
2905 if (all)
2906 remove_stmt_or_phi (stmt);
2910 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2911 a statement that is a trivial copy or constant initialization.
2913 Attempt to eliminate T by propagating its RHS into all uses of
2914 its LHS. This may in turn set new bits in INTERESTING_NAMES
2915 for nodes we want to revisit later.
2917 All exit paths should clear INTERESTING_NAMES for the result
2918 of STMT. */
2920 static void
2921 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2923 tree lhs = get_lhs_or_phi_result (stmt);
2924 tree rhs;
2925 int version = SSA_NAME_VERSION (lhs);
2927 /* If the LHS of this statement or PHI has no uses, then we can
2928 just eliminate it. This can occur if, for example, the PHI
2929 was created by block duplication due to threading and its only
2930 use was in the conditional at the end of the block which was
2931 deleted. */
2932 if (has_zero_uses (lhs))
2934 bitmap_clear_bit (interesting_names, version);
2935 remove_stmt_or_phi (stmt);
2936 return;
2939 /* Get the RHS of the assignment or PHI node if the PHI is a
2940 degenerate. */
2941 rhs = get_rhs_or_phi_arg (stmt);
2942 if (!rhs)
2944 bitmap_clear_bit (interesting_names, version);
2945 return;
2948 if (!virtual_operand_p (lhs))
2949 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2950 else
2952 gimple use_stmt;
2953 imm_use_iterator iter;
2954 use_operand_p use_p;
2955 /* For virtual operands we have to propagate into all uses as
2956 otherwise we will create overlapping life-ranges. */
2957 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2958 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2959 SET_USE (use_p, rhs);
2960 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
2961 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
2962 remove_stmt_or_phi (stmt);
2965 /* Note that STMT may well have been deleted by now, so do
2966 not access it, instead use the saved version # to clear
2967 T's entry in the worklist. */
2968 bitmap_clear_bit (interesting_names, version);
2971 /* The first phase in degenerate PHI elimination.
2973 Eliminate the degenerate PHIs in BB, then recurse on the
2974 dominator children of BB. */
2976 static void
2977 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2979 gimple_stmt_iterator gsi;
2980 basic_block son;
2982 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2984 gimple phi = gsi_stmt (gsi);
2986 eliminate_const_or_copy (phi, interesting_names);
2989 /* Recurse into the dominator children of BB. */
2990 for (son = first_dom_son (CDI_DOMINATORS, bb);
2991 son;
2992 son = next_dom_son (CDI_DOMINATORS, son))
2993 eliminate_degenerate_phis_1 (son, interesting_names);
2997 /* A very simple pass to eliminate degenerate PHI nodes from the
2998 IL. This is meant to be fast enough to be able to be run several
2999 times in the optimization pipeline.
3001 Certain optimizations, particularly those which duplicate blocks
3002 or remove edges from the CFG can create or expose PHIs which are
3003 trivial copies or constant initializations.
3005 While we could pick up these optimizations in DOM or with the
3006 combination of copy-prop and CCP, those solutions are far too
3007 heavy-weight for our needs.
3009 This implementation has two phases so that we can efficiently
3010 eliminate the first order degenerate PHIs and second order
3011 degenerate PHIs.
3013 The first phase performs a dominator walk to identify and eliminate
3014 the vast majority of the degenerate PHIs. When a degenerate PHI
3015 is identified and eliminated any affected statements or PHIs
3016 are put on a worklist.
3018 The second phase eliminates degenerate PHIs and trivial copies
3019 or constant initializations using the worklist. This is how we
3020 pick up the secondary optimization opportunities with minimal
3021 cost. */
3023 static unsigned int
3024 eliminate_degenerate_phis (void)
3026 bitmap interesting_names;
3027 bitmap interesting_names1;
3029 /* Bitmap of blocks which need EH information updated. We can not
3030 update it on-the-fly as doing so invalidates the dominator tree. */
3031 need_eh_cleanup = BITMAP_ALLOC (NULL);
3033 /* INTERESTING_NAMES is effectively our worklist, indexed by
3034 SSA_NAME_VERSION.
3036 A set bit indicates that the statement or PHI node which
3037 defines the SSA_NAME should be (re)examined to determine if
3038 it has become a degenerate PHI or trivial const/copy propagation
3039 opportunity.
3041 Experiments have show we generally get better compilation
3042 time behavior with bitmaps rather than sbitmaps. */
3043 interesting_names = BITMAP_ALLOC (NULL);
3044 interesting_names1 = BITMAP_ALLOC (NULL);
3046 calculate_dominance_info (CDI_DOMINATORS);
3047 cfg_altered = false;
3049 /* First phase. Eliminate degenerate PHIs via a dominator
3050 walk of the CFG.
3052 Experiments have indicated that we generally get better
3053 compile-time behavior by visiting blocks in the first
3054 phase in dominator order. Presumably this is because walking
3055 in dominator order leaves fewer PHIs for later examination
3056 by the worklist phase. */
3057 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun),
3058 interesting_names);
3060 /* Second phase. Eliminate second order degenerate PHIs as well
3061 as trivial copies or constant initializations identified by
3062 the first phase or this phase. Basically we keep iterating
3063 until our set of INTERESTING_NAMEs is empty. */
3064 while (!bitmap_empty_p (interesting_names))
3066 unsigned int i;
3067 bitmap_iterator bi;
3069 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3070 changed during the loop. Copy it to another bitmap and
3071 use that. */
3072 bitmap_copy (interesting_names1, interesting_names);
3074 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3076 tree name = ssa_name (i);
3078 /* Ignore SSA_NAMEs that have been released because
3079 their defining statement was deleted (unreachable). */
3080 if (name)
3081 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3082 interesting_names);
3086 if (cfg_altered)
3088 free_dominance_info (CDI_DOMINATORS);
3089 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3090 if (current_loops)
3091 loops_state_set (LOOPS_NEED_FIXUP);
3094 /* Propagation of const and copies may make some EH edges dead. Purge
3095 such edges from the CFG as needed. */
3096 if (!bitmap_empty_p (need_eh_cleanup))
3098 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3099 BITMAP_FREE (need_eh_cleanup);
3102 BITMAP_FREE (interesting_names);
3103 BITMAP_FREE (interesting_names1);
3104 return 0;
3107 namespace {
3109 const pass_data pass_data_phi_only_cprop =
3111 GIMPLE_PASS, /* type */
3112 "phicprop", /* name */
3113 OPTGROUP_NONE, /* optinfo_flags */
3114 true, /* has_gate */
3115 true, /* has_execute */
3116 TV_TREE_PHI_CPROP, /* tv_id */
3117 ( PROP_cfg | PROP_ssa ), /* properties_required */
3118 0, /* properties_provided */
3119 0, /* properties_destroyed */
3120 0, /* todo_flags_start */
3121 ( TODO_cleanup_cfg | TODO_verify_ssa
3122 | TODO_verify_stmts
3123 | TODO_update_ssa ), /* todo_flags_finish */
3126 class pass_phi_only_cprop : public gimple_opt_pass
3128 public:
3129 pass_phi_only_cprop (gcc::context *ctxt)
3130 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3133 /* opt_pass methods: */
3134 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3135 bool gate () { return gate_dominator (); }
3136 unsigned int execute () { return eliminate_degenerate_phis (); }
3138 }; // class pass_phi_only_cprop
3140 } // anon namespace
3142 gimple_opt_pass *
3143 make_pass_phi_only_cprop (gcc::context *ctxt)
3145 return new pass_phi_only_cprop (ctxt);