PR middle-end/59175
[official-gcc.git] / gcc / tree-ssa-dom.c
blob0ce24df2abef6c21c0b1613b3d4a6e9a38cc6ce2
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "basic-block.h"
30 #include "cfgloop.h"
31 #include "function.h"
32 #include "gimple-pretty-print.h"
33 #include "gimple.h"
34 #include "gimple-iterator.h"
35 #include "gimple-ssa.h"
36 #include "tree-cfg.h"
37 #include "tree-phinodes.h"
38 #include "ssa-iterators.h"
39 #include "tree-ssanames.h"
40 #include "tree-into-ssa.h"
41 #include "domwalk.h"
42 #include "tree-pass.h"
43 #include "tree-ssa-propagate.h"
44 #include "tree-ssa-threadupdate.h"
45 #include "langhooks.h"
46 #include "params.h"
47 #include "tree-ssa-threadedge.h"
48 #include "tree-ssa-dom.h"
50 /* This file implements optimizations on the dominator tree. */
52 /* Representation of a "naked" right-hand-side expression, to be used
53 in recording available expressions in the expression hash table. */
55 enum expr_kind
57 EXPR_SINGLE,
58 EXPR_UNARY,
59 EXPR_BINARY,
60 EXPR_TERNARY,
61 EXPR_CALL,
62 EXPR_PHI
65 struct hashable_expr
67 tree type;
68 enum expr_kind kind;
69 union {
70 struct { tree rhs; } single;
71 struct { enum tree_code op; tree opnd; } unary;
72 struct { enum tree_code op; tree opnd0, opnd1; } binary;
73 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
74 struct { gimple fn_from; bool pure; size_t nargs; tree *args; } call;
75 struct { size_t nargs; tree *args; } phi;
76 } ops;
79 /* Structure for recording known values of a conditional expression
80 at the exits from its block. */
82 typedef struct cond_equivalence_s
84 struct hashable_expr cond;
85 tree value;
86 } cond_equivalence;
89 /* Structure for recording edge equivalences as well as any pending
90 edge redirections during the dominator optimizer.
92 Computing and storing the edge equivalences instead of creating
93 them on-demand can save significant amounts of time, particularly
94 for pathological cases involving switch statements.
96 These structures live for a single iteration of the dominator
97 optimizer in the edge's AUX field. At the end of an iteration we
98 free each of these structures and update the AUX field to point
99 to any requested redirection target (the code for updating the
100 CFG and SSA graph for edge redirection expects redirection edge
101 targets to be in the AUX field for each edge. */
103 struct edge_info
105 /* If this edge creates a simple equivalence, the LHS and RHS of
106 the equivalence will be stored here. */
107 tree lhs;
108 tree rhs;
110 /* Traversing an edge may also indicate one or more particular conditions
111 are true or false. */
112 vec<cond_equivalence> cond_equivalences;
115 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
116 expressions it enters into the hash table along with a marker entry
117 (null). When we finish processing the block, we pop off entries and
118 remove the expressions from the global hash table until we hit the
119 marker. */
120 typedef struct expr_hash_elt * expr_hash_elt_t;
122 static vec<expr_hash_elt_t> avail_exprs_stack;
124 /* Structure for entries in the expression hash table. */
126 struct expr_hash_elt
128 /* The value (lhs) of this expression. */
129 tree lhs;
131 /* The expression (rhs) we want to record. */
132 struct hashable_expr expr;
134 /* The stmt pointer if this element corresponds to a statement. */
135 gimple stmt;
137 /* The hash value for RHS. */
138 hashval_t hash;
140 /* A unique stamp, typically the address of the hash
141 element itself, used in removing entries from the table. */
142 struct expr_hash_elt *stamp;
145 /* Hashtable helpers. */
147 static bool hashable_expr_equal_p (const struct hashable_expr *,
148 const struct hashable_expr *);
149 static void free_expr_hash_elt (void *);
151 struct expr_elt_hasher
153 typedef expr_hash_elt value_type;
154 typedef expr_hash_elt compare_type;
155 static inline hashval_t hash (const value_type *);
156 static inline bool equal (const value_type *, const compare_type *);
157 static inline void remove (value_type *);
160 inline hashval_t
161 expr_elt_hasher::hash (const value_type *p)
163 return p->hash;
166 inline bool
167 expr_elt_hasher::equal (const value_type *p1, const compare_type *p2)
169 gimple stmt1 = p1->stmt;
170 const struct hashable_expr *expr1 = &p1->expr;
171 const struct expr_hash_elt *stamp1 = p1->stamp;
172 gimple stmt2 = p2->stmt;
173 const struct hashable_expr *expr2 = &p2->expr;
174 const struct expr_hash_elt *stamp2 = p2->stamp;
176 /* This case should apply only when removing entries from the table. */
177 if (stamp1 == stamp2)
178 return true;
180 /* FIXME tuples:
181 We add stmts to a hash table and them modify them. To detect the case
182 that we modify a stmt and then search for it, we assume that the hash
183 is always modified by that change.
184 We have to fully check why this doesn't happen on trunk or rewrite
185 this in a more reliable (and easier to understand) way. */
186 if (((const struct expr_hash_elt *)p1)->hash
187 != ((const struct expr_hash_elt *)p2)->hash)
188 return false;
190 /* In case of a collision, both RHS have to be identical and have the
191 same VUSE operands. */
192 if (hashable_expr_equal_p (expr1, expr2)
193 && types_compatible_p (expr1->type, expr2->type))
195 /* Note that STMT1 and/or STMT2 may be NULL. */
196 return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
197 == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
200 return false;
203 /* Delete an expr_hash_elt and reclaim its storage. */
205 inline void
206 expr_elt_hasher::remove (value_type *element)
208 free_expr_hash_elt (element);
211 /* Hash table with expressions made available during the renaming process.
212 When an assignment of the form X_i = EXPR is found, the statement is
213 stored in this table. If the same expression EXPR is later found on the
214 RHS of another statement, it is replaced with X_i (thus performing
215 global redundancy elimination). Similarly as we pass through conditionals
216 we record the conditional itself as having either a true or false value
217 in this table. */
218 static hash_table <expr_elt_hasher> avail_exprs;
220 /* Stack of dest,src pairs that need to be restored during finalization.
222 A NULL entry is used to mark the end of pairs which need to be
223 restored during finalization of this block. */
224 static vec<tree> const_and_copies_stack;
226 /* Track whether or not we have changed the control flow graph. */
227 static bool cfg_altered;
229 /* Bitmap of blocks that have had EH statements cleaned. We should
230 remove their dead edges eventually. */
231 static bitmap need_eh_cleanup;
233 /* Statistics for dominator optimizations. */
234 struct opt_stats_d
236 long num_stmts;
237 long num_exprs_considered;
238 long num_re;
239 long num_const_prop;
240 long num_copy_prop;
243 static struct opt_stats_d opt_stats;
245 /* Local functions. */
246 static void optimize_stmt (basic_block, gimple_stmt_iterator);
247 static tree lookup_avail_expr (gimple, bool);
248 static hashval_t avail_expr_hash (const void *);
249 static void htab_statistics (FILE *, hash_table <expr_elt_hasher>);
250 static void record_cond (cond_equivalence *);
251 static void record_const_or_copy (tree, tree);
252 static void record_equality (tree, tree);
253 static void record_equivalences_from_phis (basic_block);
254 static void record_equivalences_from_incoming_edge (basic_block);
255 static void eliminate_redundant_computations (gimple_stmt_iterator *);
256 static void record_equivalences_from_stmt (gimple, int);
257 static void remove_local_expressions_from_table (void);
258 static void restore_vars_to_original_value (void);
259 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
262 /* Given a statement STMT, initialize the hash table element pointed to
263 by ELEMENT. */
265 static void
266 initialize_hash_element (gimple stmt, tree lhs,
267 struct expr_hash_elt *element)
269 enum gimple_code code = gimple_code (stmt);
270 struct hashable_expr *expr = &element->expr;
272 if (code == GIMPLE_ASSIGN)
274 enum tree_code subcode = gimple_assign_rhs_code (stmt);
276 switch (get_gimple_rhs_class (subcode))
278 case GIMPLE_SINGLE_RHS:
279 expr->kind = EXPR_SINGLE;
280 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
281 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
282 break;
283 case GIMPLE_UNARY_RHS:
284 expr->kind = EXPR_UNARY;
285 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
286 expr->ops.unary.op = subcode;
287 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
288 break;
289 case GIMPLE_BINARY_RHS:
290 expr->kind = EXPR_BINARY;
291 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
292 expr->ops.binary.op = subcode;
293 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
294 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
295 break;
296 case GIMPLE_TERNARY_RHS:
297 expr->kind = EXPR_TERNARY;
298 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
299 expr->ops.ternary.op = subcode;
300 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
301 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
302 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
303 break;
304 default:
305 gcc_unreachable ();
308 else if (code == GIMPLE_COND)
310 expr->type = boolean_type_node;
311 expr->kind = EXPR_BINARY;
312 expr->ops.binary.op = gimple_cond_code (stmt);
313 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
314 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
316 else if (code == GIMPLE_CALL)
318 size_t nargs = gimple_call_num_args (stmt);
319 size_t i;
321 gcc_assert (gimple_call_lhs (stmt));
323 expr->type = TREE_TYPE (gimple_call_lhs (stmt));
324 expr->kind = EXPR_CALL;
325 expr->ops.call.fn_from = stmt;
327 if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
328 expr->ops.call.pure = true;
329 else
330 expr->ops.call.pure = false;
332 expr->ops.call.nargs = nargs;
333 expr->ops.call.args = XCNEWVEC (tree, nargs);
334 for (i = 0; i < nargs; i++)
335 expr->ops.call.args[i] = gimple_call_arg (stmt, i);
337 else if (code == GIMPLE_SWITCH)
339 expr->type = TREE_TYPE (gimple_switch_index (stmt));
340 expr->kind = EXPR_SINGLE;
341 expr->ops.single.rhs = gimple_switch_index (stmt);
343 else if (code == GIMPLE_GOTO)
345 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
346 expr->kind = EXPR_SINGLE;
347 expr->ops.single.rhs = gimple_goto_dest (stmt);
349 else if (code == GIMPLE_PHI)
351 size_t nargs = gimple_phi_num_args (stmt);
352 size_t i;
354 expr->type = TREE_TYPE (gimple_phi_result (stmt));
355 expr->kind = EXPR_PHI;
356 expr->ops.phi.nargs = nargs;
357 expr->ops.phi.args = XCNEWVEC (tree, nargs);
359 for (i = 0; i < nargs; i++)
360 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
362 else
363 gcc_unreachable ();
365 element->lhs = lhs;
366 element->stmt = stmt;
367 element->hash = avail_expr_hash (element);
368 element->stamp = element;
371 /* Given a conditional expression COND as a tree, initialize
372 a hashable_expr expression EXPR. The conditional must be a
373 comparison or logical negation. A constant or a variable is
374 not permitted. */
376 static void
377 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
379 expr->type = boolean_type_node;
381 if (COMPARISON_CLASS_P (cond))
383 expr->kind = EXPR_BINARY;
384 expr->ops.binary.op = TREE_CODE (cond);
385 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
386 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
388 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
390 expr->kind = EXPR_UNARY;
391 expr->ops.unary.op = TRUTH_NOT_EXPR;
392 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
394 else
395 gcc_unreachable ();
398 /* Given a hashable_expr expression EXPR and an LHS,
399 initialize the hash table element pointed to by ELEMENT. */
401 static void
402 initialize_hash_element_from_expr (struct hashable_expr *expr,
403 tree lhs,
404 struct expr_hash_elt *element)
406 element->expr = *expr;
407 element->lhs = lhs;
408 element->stmt = NULL;
409 element->hash = avail_expr_hash (element);
410 element->stamp = element;
413 /* Compare two hashable_expr structures for equivalence.
414 They are considered equivalent when the the expressions
415 they denote must necessarily be equal. The logic is intended
416 to follow that of operand_equal_p in fold-const.c */
418 static bool
419 hashable_expr_equal_p (const struct hashable_expr *expr0,
420 const struct hashable_expr *expr1)
422 tree type0 = expr0->type;
423 tree type1 = expr1->type;
425 /* If either type is NULL, there is nothing to check. */
426 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
427 return false;
429 /* If both types don't have the same signedness, precision, and mode,
430 then we can't consider them equal. */
431 if (type0 != type1
432 && (TREE_CODE (type0) == ERROR_MARK
433 || TREE_CODE (type1) == ERROR_MARK
434 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
435 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
436 || TYPE_MODE (type0) != TYPE_MODE (type1)))
437 return false;
439 if (expr0->kind != expr1->kind)
440 return false;
442 switch (expr0->kind)
444 case EXPR_SINGLE:
445 return operand_equal_p (expr0->ops.single.rhs,
446 expr1->ops.single.rhs, 0);
448 case EXPR_UNARY:
449 if (expr0->ops.unary.op != expr1->ops.unary.op)
450 return false;
452 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
453 || expr0->ops.unary.op == NON_LVALUE_EXPR)
454 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
455 return false;
457 return operand_equal_p (expr0->ops.unary.opnd,
458 expr1->ops.unary.opnd, 0);
460 case EXPR_BINARY:
461 if (expr0->ops.binary.op != expr1->ops.binary.op)
462 return false;
464 if (operand_equal_p (expr0->ops.binary.opnd0,
465 expr1->ops.binary.opnd0, 0)
466 && operand_equal_p (expr0->ops.binary.opnd1,
467 expr1->ops.binary.opnd1, 0))
468 return true;
470 /* For commutative ops, allow the other order. */
471 return (commutative_tree_code (expr0->ops.binary.op)
472 && operand_equal_p (expr0->ops.binary.opnd0,
473 expr1->ops.binary.opnd1, 0)
474 && operand_equal_p (expr0->ops.binary.opnd1,
475 expr1->ops.binary.opnd0, 0));
477 case EXPR_TERNARY:
478 if (expr0->ops.ternary.op != expr1->ops.ternary.op
479 || !operand_equal_p (expr0->ops.ternary.opnd2,
480 expr1->ops.ternary.opnd2, 0))
481 return false;
483 if (operand_equal_p (expr0->ops.ternary.opnd0,
484 expr1->ops.ternary.opnd0, 0)
485 && operand_equal_p (expr0->ops.ternary.opnd1,
486 expr1->ops.ternary.opnd1, 0))
487 return true;
489 /* For commutative ops, allow the other order. */
490 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
491 && operand_equal_p (expr0->ops.ternary.opnd0,
492 expr1->ops.ternary.opnd1, 0)
493 && operand_equal_p (expr0->ops.ternary.opnd1,
494 expr1->ops.ternary.opnd0, 0));
496 case EXPR_CALL:
498 size_t i;
500 /* If the calls are to different functions, then they
501 clearly cannot be equal. */
502 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
503 expr1->ops.call.fn_from))
504 return false;
506 if (! expr0->ops.call.pure)
507 return false;
509 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
510 return false;
512 for (i = 0; i < expr0->ops.call.nargs; i++)
513 if (! operand_equal_p (expr0->ops.call.args[i],
514 expr1->ops.call.args[i], 0))
515 return false;
517 return true;
520 case EXPR_PHI:
522 size_t i;
524 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
525 return false;
527 for (i = 0; i < expr0->ops.phi.nargs; i++)
528 if (! operand_equal_p (expr0->ops.phi.args[i],
529 expr1->ops.phi.args[i], 0))
530 return false;
532 return true;
535 default:
536 gcc_unreachable ();
540 /* Generate a hash value for a pair of expressions. This can be used
541 iteratively by passing a previous result as the VAL argument.
543 The same hash value is always returned for a given pair of expressions,
544 regardless of the order in which they are presented. This is useful in
545 hashing the operands of commutative functions. */
547 static hashval_t
548 iterative_hash_exprs_commutative (const_tree t1,
549 const_tree t2, hashval_t val)
551 hashval_t one = iterative_hash_expr (t1, 0);
552 hashval_t two = iterative_hash_expr (t2, 0);
553 hashval_t t;
555 if (one > two)
556 t = one, one = two, two = t;
557 val = iterative_hash_hashval_t (one, val);
558 val = iterative_hash_hashval_t (two, val);
560 return val;
563 /* Compute a hash value for a hashable_expr value EXPR and a
564 previously accumulated hash value VAL. If two hashable_expr
565 values compare equal with hashable_expr_equal_p, they must
566 hash to the same value, given an identical value of VAL.
567 The logic is intended to follow iterative_hash_expr in tree.c. */
569 static hashval_t
570 iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
572 switch (expr->kind)
574 case EXPR_SINGLE:
575 val = iterative_hash_expr (expr->ops.single.rhs, val);
576 break;
578 case EXPR_UNARY:
579 val = iterative_hash_object (expr->ops.unary.op, val);
581 /* Make sure to include signedness in the hash computation.
582 Don't hash the type, that can lead to having nodes which
583 compare equal according to operand_equal_p, but which
584 have different hash codes. */
585 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
586 || expr->ops.unary.op == NON_LVALUE_EXPR)
587 val += TYPE_UNSIGNED (expr->type);
589 val = iterative_hash_expr (expr->ops.unary.opnd, val);
590 break;
592 case EXPR_BINARY:
593 val = iterative_hash_object (expr->ops.binary.op, val);
594 if (commutative_tree_code (expr->ops.binary.op))
595 val = iterative_hash_exprs_commutative (expr->ops.binary.opnd0,
596 expr->ops.binary.opnd1, val);
597 else
599 val = iterative_hash_expr (expr->ops.binary.opnd0, val);
600 val = iterative_hash_expr (expr->ops.binary.opnd1, val);
602 break;
604 case EXPR_TERNARY:
605 val = iterative_hash_object (expr->ops.ternary.op, val);
606 if (commutative_ternary_tree_code (expr->ops.ternary.op))
607 val = iterative_hash_exprs_commutative (expr->ops.ternary.opnd0,
608 expr->ops.ternary.opnd1, val);
609 else
611 val = iterative_hash_expr (expr->ops.ternary.opnd0, val);
612 val = iterative_hash_expr (expr->ops.ternary.opnd1, val);
614 val = iterative_hash_expr (expr->ops.ternary.opnd2, val);
615 break;
617 case EXPR_CALL:
619 size_t i;
620 enum tree_code code = CALL_EXPR;
621 gimple fn_from;
623 val = iterative_hash_object (code, val);
624 fn_from = expr->ops.call.fn_from;
625 if (gimple_call_internal_p (fn_from))
626 val = iterative_hash_hashval_t
627 ((hashval_t) gimple_call_internal_fn (fn_from), val);
628 else
629 val = iterative_hash_expr (gimple_call_fn (fn_from), val);
630 for (i = 0; i < expr->ops.call.nargs; i++)
631 val = iterative_hash_expr (expr->ops.call.args[i], val);
633 break;
635 case EXPR_PHI:
637 size_t i;
639 for (i = 0; i < expr->ops.phi.nargs; i++)
640 val = iterative_hash_expr (expr->ops.phi.args[i], val);
642 break;
644 default:
645 gcc_unreachable ();
648 return val;
651 /* Print a diagnostic dump of an expression hash table entry. */
653 static void
654 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
656 if (element->stmt)
657 fprintf (stream, "STMT ");
658 else
659 fprintf (stream, "COND ");
661 if (element->lhs)
663 print_generic_expr (stream, element->lhs, 0);
664 fprintf (stream, " = ");
667 switch (element->expr.kind)
669 case EXPR_SINGLE:
670 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
671 break;
673 case EXPR_UNARY:
674 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
675 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
676 break;
678 case EXPR_BINARY:
679 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
680 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
681 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
682 break;
684 case EXPR_TERNARY:
685 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
686 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
687 fputs (", ", stream);
688 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
689 fputs (", ", stream);
690 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
691 fputs (">", stream);
692 break;
694 case EXPR_CALL:
696 size_t i;
697 size_t nargs = element->expr.ops.call.nargs;
698 gimple fn_from;
700 fn_from = element->expr.ops.call.fn_from;
701 if (gimple_call_internal_p (fn_from))
702 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
703 stream);
704 else
705 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
706 fprintf (stream, " (");
707 for (i = 0; i < nargs; i++)
709 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
710 if (i + 1 < nargs)
711 fprintf (stream, ", ");
713 fprintf (stream, ")");
715 break;
717 case EXPR_PHI:
719 size_t i;
720 size_t nargs = element->expr.ops.phi.nargs;
722 fprintf (stream, "PHI <");
723 for (i = 0; i < nargs; i++)
725 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
726 if (i + 1 < nargs)
727 fprintf (stream, ", ");
729 fprintf (stream, ">");
731 break;
733 fprintf (stream, "\n");
735 if (element->stmt)
737 fprintf (stream, " ");
738 print_gimple_stmt (stream, element->stmt, 0, 0);
742 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
744 static void
745 free_expr_hash_elt_contents (struct expr_hash_elt *element)
747 if (element->expr.kind == EXPR_CALL)
748 free (element->expr.ops.call.args);
749 else if (element->expr.kind == EXPR_PHI)
750 free (element->expr.ops.phi.args);
753 /* Delete an expr_hash_elt and reclaim its storage. */
755 static void
756 free_expr_hash_elt (void *elt)
758 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
759 free_expr_hash_elt_contents (element);
760 free (element);
763 /* Allocate an EDGE_INFO for edge E and attach it to E.
764 Return the new EDGE_INFO structure. */
766 static struct edge_info *
767 allocate_edge_info (edge e)
769 struct edge_info *edge_info;
771 edge_info = XCNEW (struct edge_info);
773 e->aux = edge_info;
774 return edge_info;
777 /* Free all EDGE_INFO structures associated with edges in the CFG.
778 If a particular edge can be threaded, copy the redirection
779 target from the EDGE_INFO structure into the edge's AUX field
780 as required by code to update the CFG and SSA graph for
781 jump threading. */
783 static void
784 free_all_edge_infos (void)
786 basic_block bb;
787 edge_iterator ei;
788 edge e;
790 FOR_EACH_BB (bb)
792 FOR_EACH_EDGE (e, ei, bb->preds)
794 struct edge_info *edge_info = (struct edge_info *) e->aux;
796 if (edge_info)
798 edge_info->cond_equivalences.release ();
799 free (edge_info);
800 e->aux = NULL;
806 class dom_opt_dom_walker : public dom_walker
808 public:
809 dom_opt_dom_walker (cdi_direction direction)
810 : dom_walker (direction), m_dummy_cond (NULL) {}
812 virtual void before_dom_children (basic_block);
813 virtual void after_dom_children (basic_block);
815 private:
816 void thread_across_edge (edge);
818 gimple m_dummy_cond;
821 /* Jump threading, redundancy elimination and const/copy propagation.
823 This pass may expose new symbols that need to be renamed into SSA. For
824 every new symbol exposed, its corresponding bit will be set in
825 VARS_TO_RENAME. */
827 static unsigned int
828 tree_ssa_dominator_optimize (void)
830 memset (&opt_stats, 0, sizeof (opt_stats));
832 /* Create our hash tables. */
833 avail_exprs.create (1024);
834 avail_exprs_stack.create (20);
835 const_and_copies_stack.create (20);
836 need_eh_cleanup = BITMAP_ALLOC (NULL);
838 calculate_dominance_info (CDI_DOMINATORS);
839 cfg_altered = false;
841 /* We need to know loop structures in order to avoid destroying them
842 in jump threading. Note that we still can e.g. thread through loop
843 headers to an exit edge, or through loop header to the loop body, assuming
844 that we update the loop info. */
845 loop_optimizer_init (LOOPS_HAVE_SIMPLE_LATCHES);
847 /* Initialize the value-handle array. */
848 threadedge_initialize_values ();
850 /* We need accurate information regarding back edges in the CFG
851 for jump threading; this may include back edges that are not part of
852 a single loop. */
853 mark_dfs_back_edges ();
855 /* Recursively walk the dominator tree optimizing statements. */
856 dom_opt_dom_walker (CDI_DOMINATORS).walk (cfun->cfg->x_entry_block_ptr);
859 gimple_stmt_iterator gsi;
860 basic_block bb;
861 FOR_EACH_BB (bb)
863 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
864 update_stmt_if_modified (gsi_stmt (gsi));
868 /* If we exposed any new variables, go ahead and put them into
869 SSA form now, before we handle jump threading. This simplifies
870 interactions between rewriting of _DECL nodes into SSA form
871 and rewriting SSA_NAME nodes into SSA form after block
872 duplication and CFG manipulation. */
873 update_ssa (TODO_update_ssa);
875 free_all_edge_infos ();
877 /* Thread jumps, creating duplicate blocks as needed. */
878 cfg_altered |= thread_through_all_blocks (first_pass_instance);
880 if (cfg_altered)
881 free_dominance_info (CDI_DOMINATORS);
883 /* Removal of statements may make some EH edges dead. Purge
884 such edges from the CFG as needed. */
885 if (!bitmap_empty_p (need_eh_cleanup))
887 unsigned i;
888 bitmap_iterator bi;
890 /* Jump threading may have created forwarder blocks from blocks
891 needing EH cleanup; the new successor of these blocks, which
892 has inherited from the original block, needs the cleanup.
893 Don't clear bits in the bitmap, as that can break the bitmap
894 iterator. */
895 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
897 basic_block bb = BASIC_BLOCK (i);
898 if (bb == NULL)
899 continue;
900 while (single_succ_p (bb)
901 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
902 bb = single_succ (bb);
903 if (bb == EXIT_BLOCK_PTR)
904 continue;
905 if ((unsigned) bb->index != i)
906 bitmap_set_bit (need_eh_cleanup, bb->index);
909 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
910 bitmap_clear (need_eh_cleanup);
913 statistics_counter_event (cfun, "Redundant expressions eliminated",
914 opt_stats.num_re);
915 statistics_counter_event (cfun, "Constants propagated",
916 opt_stats.num_const_prop);
917 statistics_counter_event (cfun, "Copies propagated",
918 opt_stats.num_copy_prop);
920 /* Debugging dumps. */
921 if (dump_file && (dump_flags & TDF_STATS))
922 dump_dominator_optimization_stats (dump_file);
924 loop_optimizer_finalize ();
926 /* Delete our main hashtable. */
927 avail_exprs.dispose ();
929 /* Free asserted bitmaps and stacks. */
930 BITMAP_FREE (need_eh_cleanup);
932 avail_exprs_stack.release ();
933 const_and_copies_stack.release ();
935 /* Free the value-handle array. */
936 threadedge_finalize_values ();
938 return 0;
941 static bool
942 gate_dominator (void)
944 return flag_tree_dom != 0;
947 namespace {
949 const pass_data pass_data_dominator =
951 GIMPLE_PASS, /* type */
952 "dom", /* name */
953 OPTGROUP_NONE, /* optinfo_flags */
954 true, /* has_gate */
955 true, /* has_execute */
956 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
957 ( PROP_cfg | PROP_ssa ), /* properties_required */
958 0, /* properties_provided */
959 0, /* properties_destroyed */
960 0, /* todo_flags_start */
961 ( TODO_cleanup_cfg | TODO_update_ssa
962 | TODO_verify_ssa
963 | TODO_verify_flow ), /* todo_flags_finish */
966 class pass_dominator : public gimple_opt_pass
968 public:
969 pass_dominator (gcc::context *ctxt)
970 : gimple_opt_pass (pass_data_dominator, ctxt)
973 /* opt_pass methods: */
974 opt_pass * clone () { return new pass_dominator (m_ctxt); }
975 bool gate () { return gate_dominator (); }
976 unsigned int execute () { return tree_ssa_dominator_optimize (); }
978 }; // class pass_dominator
980 } // anon namespace
982 gimple_opt_pass *
983 make_pass_dominator (gcc::context *ctxt)
985 return new pass_dominator (ctxt);
989 /* Given a conditional statement CONDSTMT, convert the
990 condition to a canonical form. */
992 static void
993 canonicalize_comparison (gimple condstmt)
995 tree op0;
996 tree op1;
997 enum tree_code code;
999 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1001 op0 = gimple_cond_lhs (condstmt);
1002 op1 = gimple_cond_rhs (condstmt);
1004 code = gimple_cond_code (condstmt);
1006 /* If it would be profitable to swap the operands, then do so to
1007 canonicalize the statement, enabling better optimization.
1009 By placing canonicalization of such expressions here we
1010 transparently keep statements in canonical form, even
1011 when the statement is modified. */
1012 if (tree_swap_operands_p (op0, op1, false))
1014 /* For relationals we need to swap the operands
1015 and change the code. */
1016 if (code == LT_EXPR
1017 || code == GT_EXPR
1018 || code == LE_EXPR
1019 || code == GE_EXPR)
1021 code = swap_tree_comparison (code);
1023 gimple_cond_set_code (condstmt, code);
1024 gimple_cond_set_lhs (condstmt, op1);
1025 gimple_cond_set_rhs (condstmt, op0);
1027 update_stmt (condstmt);
1032 /* Initialize local stacks for this optimizer and record equivalences
1033 upon entry to BB. Equivalences can come from the edge traversed to
1034 reach BB or they may come from PHI nodes at the start of BB. */
1036 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1037 LIMIT entries left in LOCALs. */
1039 static void
1040 remove_local_expressions_from_table (void)
1042 /* Remove all the expressions made available in this block. */
1043 while (avail_exprs_stack.length () > 0)
1045 expr_hash_elt_t victim = avail_exprs_stack.pop ();
1046 expr_hash_elt **slot;
1048 if (victim == NULL)
1049 break;
1051 /* This must precede the actual removal from the hash table,
1052 as ELEMENT and the table entry may share a call argument
1053 vector which will be freed during removal. */
1054 if (dump_file && (dump_flags & TDF_DETAILS))
1056 fprintf (dump_file, "<<<< ");
1057 print_expr_hash_elt (dump_file, victim);
1060 slot = avail_exprs.find_slot_with_hash (victim, victim->hash, NO_INSERT);
1061 gcc_assert (slot && *slot == victim);
1062 avail_exprs.clear_slot (slot);
1066 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1067 CONST_AND_COPIES to its original state, stopping when we hit a
1068 NULL marker. */
1070 static void
1071 restore_vars_to_original_value (void)
1073 while (const_and_copies_stack.length () > 0)
1075 tree prev_value, dest;
1077 dest = const_and_copies_stack.pop ();
1079 if (dest == NULL)
1080 break;
1082 if (dump_file && (dump_flags & TDF_DETAILS))
1084 fprintf (dump_file, "<<<< COPY ");
1085 print_generic_expr (dump_file, dest, 0);
1086 fprintf (dump_file, " = ");
1087 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1088 fprintf (dump_file, "\n");
1091 prev_value = const_and_copies_stack.pop ();
1092 set_ssa_name_value (dest, prev_value);
1096 /* A trivial wrapper so that we can present the generic jump
1097 threading code with a simple API for simplifying statements. */
1098 static tree
1099 simplify_stmt_for_jump_threading (gimple stmt,
1100 gimple within_stmt ATTRIBUTE_UNUSED)
1102 return lookup_avail_expr (stmt, false);
1105 /* Record into the equivalence tables any equivalences implied by
1106 traversing edge E (which are cached in E->aux).
1108 Callers are responsible for managing the unwinding markers. */
1109 static void
1110 record_temporary_equivalences (edge e)
1112 int i;
1113 struct edge_info *edge_info = (struct edge_info *) e->aux;
1115 /* If we have info associated with this edge, record it into
1116 our equivalence tables. */
1117 if (edge_info)
1119 cond_equivalence *eq;
1120 tree lhs = edge_info->lhs;
1121 tree rhs = edge_info->rhs;
1123 /* If we have a simple NAME = VALUE equivalence, record it. */
1124 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1125 record_const_or_copy (lhs, rhs);
1127 /* If we have 0 = COND or 1 = COND equivalences, record them
1128 into our expression hash tables. */
1129 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1130 record_cond (eq);
1134 /* Wrapper for common code to attempt to thread an edge. For example,
1135 it handles lazily building the dummy condition and the bookkeeping
1136 when jump threading is successful. */
1138 void
1139 dom_opt_dom_walker::thread_across_edge (edge e)
1141 if (! m_dummy_cond)
1142 m_dummy_cond =
1143 gimple_build_cond (NE_EXPR,
1144 integer_zero_node, integer_zero_node,
1145 NULL, NULL);
1147 /* Push a marker on both stacks so we can unwind the tables back to their
1148 current state. */
1149 avail_exprs_stack.safe_push (NULL);
1150 const_and_copies_stack.safe_push (NULL_TREE);
1152 /* Traversing E may result in equivalences we can utilize. */
1153 record_temporary_equivalences (e);
1155 /* With all the edge equivalences in the tables, go ahead and attempt
1156 to thread through E->dest. */
1157 ::thread_across_edge (m_dummy_cond, e, false,
1158 &const_and_copies_stack,
1159 simplify_stmt_for_jump_threading);
1161 /* And restore the various tables to their state before
1162 we threaded this edge.
1164 XXX The code in tree-ssa-threadedge.c will restore the state of
1165 the const_and_copies table. We we just have to restore the expression
1166 table. */
1167 remove_local_expressions_from_table ();
1170 /* PHI nodes can create equivalences too.
1172 Ignoring any alternatives which are the same as the result, if
1173 all the alternatives are equal, then the PHI node creates an
1174 equivalence. */
1176 static void
1177 record_equivalences_from_phis (basic_block bb)
1179 gimple_stmt_iterator gsi;
1181 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1183 gimple phi = gsi_stmt (gsi);
1185 tree lhs = gimple_phi_result (phi);
1186 tree rhs = NULL;
1187 size_t i;
1189 for (i = 0; i < gimple_phi_num_args (phi); i++)
1191 tree t = gimple_phi_arg_def (phi, i);
1193 /* Ignore alternatives which are the same as our LHS. Since
1194 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1195 can simply compare pointers. */
1196 if (lhs == t)
1197 continue;
1199 /* If we have not processed an alternative yet, then set
1200 RHS to this alternative. */
1201 if (rhs == NULL)
1202 rhs = t;
1203 /* If we have processed an alternative (stored in RHS), then
1204 see if it is equal to this one. If it isn't, then stop
1205 the search. */
1206 else if (! operand_equal_for_phi_arg_p (rhs, t))
1207 break;
1210 /* If we had no interesting alternatives, then all the RHS alternatives
1211 must have been the same as LHS. */
1212 if (!rhs)
1213 rhs = lhs;
1215 /* If we managed to iterate through each PHI alternative without
1216 breaking out of the loop, then we have a PHI which may create
1217 a useful equivalence. We do not need to record unwind data for
1218 this, since this is a true assignment and not an equivalence
1219 inferred from a comparison. All uses of this ssa name are dominated
1220 by this assignment, so unwinding just costs time and space. */
1221 if (i == gimple_phi_num_args (phi) && may_propagate_copy (lhs, rhs))
1222 set_ssa_name_value (lhs, rhs);
1226 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1227 return that edge. Otherwise return NULL. */
1228 static edge
1229 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1231 edge retval = NULL;
1232 edge e;
1233 edge_iterator ei;
1235 FOR_EACH_EDGE (e, ei, bb->preds)
1237 /* A loop back edge can be identified by the destination of
1238 the edge dominating the source of the edge. */
1239 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1240 continue;
1242 /* If we have already seen a non-loop edge, then we must have
1243 multiple incoming non-loop edges and thus we return NULL. */
1244 if (retval)
1245 return NULL;
1247 /* This is the first non-loop incoming edge we have found. Record
1248 it. */
1249 retval = e;
1252 return retval;
1255 /* Record any equivalences created by the incoming edge to BB. If BB
1256 has more than one incoming edge, then no equivalence is created. */
1258 static void
1259 record_equivalences_from_incoming_edge (basic_block bb)
1261 edge e;
1262 basic_block parent;
1263 struct edge_info *edge_info;
1265 /* If our parent block ended with a control statement, then we may be
1266 able to record some equivalences based on which outgoing edge from
1267 the parent was followed. */
1268 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1270 e = single_incoming_edge_ignoring_loop_edges (bb);
1272 /* If we had a single incoming edge from our parent block, then enter
1273 any data associated with the edge into our tables. */
1274 if (e && e->src == parent)
1276 unsigned int i;
1278 edge_info = (struct edge_info *) e->aux;
1280 if (edge_info)
1282 tree lhs = edge_info->lhs;
1283 tree rhs = edge_info->rhs;
1284 cond_equivalence *eq;
1286 if (lhs)
1287 record_equality (lhs, rhs);
1289 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1290 set via a widening type conversion, then we may be able to record
1291 additional equivalences. */
1292 if (lhs
1293 && TREE_CODE (lhs) == SSA_NAME
1294 && is_gimple_constant (rhs)
1295 && TREE_CODE (rhs) == INTEGER_CST)
1297 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1299 if (defstmt
1300 && is_gimple_assign (defstmt)
1301 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1303 tree old_rhs = gimple_assign_rhs1 (defstmt);
1305 /* If the conversion widens the original value and
1306 the constant is in the range of the type of OLD_RHS,
1307 then convert the constant and record the equivalence.
1309 Note that int_fits_type_p does not check the precision
1310 if the upper and lower bounds are OK. */
1311 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1312 && (TYPE_PRECISION (TREE_TYPE (lhs))
1313 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1314 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1316 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1317 record_equality (old_rhs, newval);
1322 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1323 record_cond (eq);
1328 /* Dump SSA statistics on FILE. */
1330 void
1331 dump_dominator_optimization_stats (FILE *file)
1333 fprintf (file, "Total number of statements: %6ld\n\n",
1334 opt_stats.num_stmts);
1335 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1336 opt_stats.num_exprs_considered);
1338 fprintf (file, "\nHash table statistics:\n");
1340 fprintf (file, " avail_exprs: ");
1341 htab_statistics (file, avail_exprs);
1345 /* Dump SSA statistics on stderr. */
1347 DEBUG_FUNCTION void
1348 debug_dominator_optimization_stats (void)
1350 dump_dominator_optimization_stats (stderr);
1354 /* Dump statistics for the hash table HTAB. */
1356 static void
1357 htab_statistics (FILE *file, hash_table <expr_elt_hasher> htab)
1359 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1360 (long) htab.size (),
1361 (long) htab.elements (),
1362 htab.collisions ());
1366 /* Enter condition equivalence into the expression hash table.
1367 This indicates that a conditional expression has a known
1368 boolean value. */
1370 static void
1371 record_cond (cond_equivalence *p)
1373 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1374 expr_hash_elt **slot;
1376 initialize_hash_element_from_expr (&p->cond, p->value, element);
1378 slot = avail_exprs.find_slot_with_hash (element, element->hash, INSERT);
1379 if (*slot == NULL)
1381 *slot = element;
1383 if (dump_file && (dump_flags & TDF_DETAILS))
1385 fprintf (dump_file, "1>>> ");
1386 print_expr_hash_elt (dump_file, element);
1389 avail_exprs_stack.safe_push (element);
1391 else
1392 free_expr_hash_elt (element);
1395 /* Build a cond_equivalence record indicating that the comparison
1396 CODE holds between operands OP0 and OP1 and push it to **P. */
1398 static void
1399 build_and_record_new_cond (enum tree_code code,
1400 tree op0, tree op1,
1401 vec<cond_equivalence> *p)
1403 cond_equivalence c;
1404 struct hashable_expr *cond = &c.cond;
1406 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1408 cond->type = boolean_type_node;
1409 cond->kind = EXPR_BINARY;
1410 cond->ops.binary.op = code;
1411 cond->ops.binary.opnd0 = op0;
1412 cond->ops.binary.opnd1 = op1;
1414 c.value = boolean_true_node;
1415 p->safe_push (c);
1418 /* Record that COND is true and INVERTED is false into the edge information
1419 structure. Also record that any conditions dominated by COND are true
1420 as well.
1422 For example, if a < b is true, then a <= b must also be true. */
1424 static void
1425 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1427 tree op0, op1;
1428 cond_equivalence c;
1430 if (!COMPARISON_CLASS_P (cond))
1431 return;
1433 op0 = TREE_OPERAND (cond, 0);
1434 op1 = TREE_OPERAND (cond, 1);
1436 switch (TREE_CODE (cond))
1438 case LT_EXPR:
1439 case GT_EXPR:
1440 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1442 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1443 &edge_info->cond_equivalences);
1444 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1445 &edge_info->cond_equivalences);
1448 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1449 ? LE_EXPR : GE_EXPR),
1450 op0, op1, &edge_info->cond_equivalences);
1451 build_and_record_new_cond (NE_EXPR, op0, op1,
1452 &edge_info->cond_equivalences);
1453 break;
1455 case GE_EXPR:
1456 case LE_EXPR:
1457 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1459 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1460 &edge_info->cond_equivalences);
1462 break;
1464 case EQ_EXPR:
1465 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1467 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1468 &edge_info->cond_equivalences);
1470 build_and_record_new_cond (LE_EXPR, op0, op1,
1471 &edge_info->cond_equivalences);
1472 build_and_record_new_cond (GE_EXPR, op0, op1,
1473 &edge_info->cond_equivalences);
1474 break;
1476 case UNORDERED_EXPR:
1477 build_and_record_new_cond (NE_EXPR, op0, op1,
1478 &edge_info->cond_equivalences);
1479 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1480 &edge_info->cond_equivalences);
1481 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1482 &edge_info->cond_equivalences);
1483 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1484 &edge_info->cond_equivalences);
1485 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1486 &edge_info->cond_equivalences);
1487 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1488 &edge_info->cond_equivalences);
1489 break;
1491 case UNLT_EXPR:
1492 case UNGT_EXPR:
1493 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1494 ? UNLE_EXPR : UNGE_EXPR),
1495 op0, op1, &edge_info->cond_equivalences);
1496 build_and_record_new_cond (NE_EXPR, op0, op1,
1497 &edge_info->cond_equivalences);
1498 break;
1500 case UNEQ_EXPR:
1501 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1502 &edge_info->cond_equivalences);
1503 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1504 &edge_info->cond_equivalences);
1505 break;
1507 case LTGT_EXPR:
1508 build_and_record_new_cond (NE_EXPR, op0, op1,
1509 &edge_info->cond_equivalences);
1510 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1511 &edge_info->cond_equivalences);
1512 break;
1514 default:
1515 break;
1518 /* Now store the original true and false conditions into the first
1519 two slots. */
1520 initialize_expr_from_cond (cond, &c.cond);
1521 c.value = boolean_true_node;
1522 edge_info->cond_equivalences.safe_push (c);
1524 /* It is possible for INVERTED to be the negation of a comparison,
1525 and not a valid RHS or GIMPLE_COND condition. This happens because
1526 invert_truthvalue may return such an expression when asked to invert
1527 a floating-point comparison. These comparisons are not assumed to
1528 obey the trichotomy law. */
1529 initialize_expr_from_cond (inverted, &c.cond);
1530 c.value = boolean_false_node;
1531 edge_info->cond_equivalences.safe_push (c);
1534 /* A helper function for record_const_or_copy and record_equality.
1535 Do the work of recording the value and undo info. */
1537 static void
1538 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1540 set_ssa_name_value (x, y);
1542 if (dump_file && (dump_flags & TDF_DETAILS))
1544 fprintf (dump_file, "0>>> COPY ");
1545 print_generic_expr (dump_file, x, 0);
1546 fprintf (dump_file, " = ");
1547 print_generic_expr (dump_file, y, 0);
1548 fprintf (dump_file, "\n");
1551 const_and_copies_stack.reserve (2);
1552 const_and_copies_stack.quick_push (prev_x);
1553 const_and_copies_stack.quick_push (x);
1556 /* Return the loop depth of the basic block of the defining statement of X.
1557 This number should not be treated as absolutely correct because the loop
1558 information may not be completely up-to-date when dom runs. However, it
1559 will be relatively correct, and as more passes are taught to keep loop info
1560 up to date, the result will become more and more accurate. */
1563 loop_depth_of_name (tree x)
1565 gimple defstmt;
1566 basic_block defbb;
1568 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1569 if (TREE_CODE (x) != SSA_NAME)
1570 return 0;
1572 /* Otherwise return the loop depth of the defining statement's bb.
1573 Note that there may not actually be a bb for this statement, if the
1574 ssa_name is live on entry. */
1575 defstmt = SSA_NAME_DEF_STMT (x);
1576 defbb = gimple_bb (defstmt);
1577 if (!defbb)
1578 return 0;
1580 return bb_loop_depth (defbb);
1583 /* Record that X is equal to Y in const_and_copies. Record undo
1584 information in the block-local vector. */
1586 static void
1587 record_const_or_copy (tree x, tree y)
1589 tree prev_x = SSA_NAME_VALUE (x);
1591 gcc_assert (TREE_CODE (x) == SSA_NAME);
1593 if (TREE_CODE (y) == SSA_NAME)
1595 tree tmp = SSA_NAME_VALUE (y);
1596 if (tmp)
1597 y = tmp;
1600 record_const_or_copy_1 (x, y, prev_x);
1603 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1604 This constrains the cases in which we may treat this as assignment. */
1606 static void
1607 record_equality (tree x, tree y)
1609 tree prev_x = NULL, prev_y = NULL;
1611 if (TREE_CODE (x) == SSA_NAME)
1612 prev_x = SSA_NAME_VALUE (x);
1613 if (TREE_CODE (y) == SSA_NAME)
1614 prev_y = SSA_NAME_VALUE (y);
1616 /* If one of the previous values is invariant, or invariant in more loops
1617 (by depth), then use that.
1618 Otherwise it doesn't matter which value we choose, just so
1619 long as we canonicalize on one value. */
1620 if (is_gimple_min_invariant (y))
1622 else if (is_gimple_min_invariant (x)
1623 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1624 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1625 else if (prev_x && is_gimple_min_invariant (prev_x))
1626 x = y, y = prev_x, prev_x = prev_y;
1627 else if (prev_y)
1628 y = prev_y;
1630 /* After the swapping, we must have one SSA_NAME. */
1631 if (TREE_CODE (x) != SSA_NAME)
1632 return;
1634 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1635 variable compared against zero. If we're honoring signed zeros,
1636 then we cannot record this value unless we know that the value is
1637 nonzero. */
1638 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1639 && (TREE_CODE (y) != REAL_CST
1640 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1641 return;
1643 record_const_or_copy_1 (x, y, prev_x);
1646 /* Returns true when STMT is a simple iv increment. It detects the
1647 following situation:
1649 i_1 = phi (..., i_2)
1650 i_2 = i_1 +/- ... */
1652 bool
1653 simple_iv_increment_p (gimple stmt)
1655 enum tree_code code;
1656 tree lhs, preinc;
1657 gimple phi;
1658 size_t i;
1660 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1661 return false;
1663 lhs = gimple_assign_lhs (stmt);
1664 if (TREE_CODE (lhs) != SSA_NAME)
1665 return false;
1667 code = gimple_assign_rhs_code (stmt);
1668 if (code != PLUS_EXPR
1669 && code != MINUS_EXPR
1670 && code != POINTER_PLUS_EXPR)
1671 return false;
1673 preinc = gimple_assign_rhs1 (stmt);
1674 if (TREE_CODE (preinc) != SSA_NAME)
1675 return false;
1677 phi = SSA_NAME_DEF_STMT (preinc);
1678 if (gimple_code (phi) != GIMPLE_PHI)
1679 return false;
1681 for (i = 0; i < gimple_phi_num_args (phi); i++)
1682 if (gimple_phi_arg_def (phi, i) == lhs)
1683 return true;
1685 return false;
1688 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1689 known value for that SSA_NAME (or NULL if no value is known).
1691 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1692 successors of BB. */
1694 static void
1695 cprop_into_successor_phis (basic_block bb)
1697 edge e;
1698 edge_iterator ei;
1700 FOR_EACH_EDGE (e, ei, bb->succs)
1702 int indx;
1703 gimple_stmt_iterator gsi;
1705 /* If this is an abnormal edge, then we do not want to copy propagate
1706 into the PHI alternative associated with this edge. */
1707 if (e->flags & EDGE_ABNORMAL)
1708 continue;
1710 gsi = gsi_start_phis (e->dest);
1711 if (gsi_end_p (gsi))
1712 continue;
1714 /* We may have an equivalence associated with this edge. While
1715 we can not propagate it into non-dominated blocks, we can
1716 propagate them into PHIs in non-dominated blocks. */
1718 /* Push the unwind marker so we can reset the const and copies
1719 table back to its original state after processing this edge. */
1720 const_and_copies_stack.safe_push (NULL_TREE);
1722 /* Extract and record any simple NAME = VALUE equivalences.
1724 Don't bother with [01] = COND equivalences, they're not useful
1725 here. */
1726 struct edge_info *edge_info = (struct edge_info *) e->aux;
1727 if (edge_info)
1729 tree lhs = edge_info->lhs;
1730 tree rhs = edge_info->rhs;
1732 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1733 record_const_or_copy (lhs, rhs);
1736 indx = e->dest_idx;
1737 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1739 tree new_val;
1740 use_operand_p orig_p;
1741 tree orig_val;
1742 gimple phi = gsi_stmt (gsi);
1744 /* The alternative may be associated with a constant, so verify
1745 it is an SSA_NAME before doing anything with it. */
1746 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1747 orig_val = get_use_from_ptr (orig_p);
1748 if (TREE_CODE (orig_val) != SSA_NAME)
1749 continue;
1751 /* If we have *ORIG_P in our constant/copy table, then replace
1752 ORIG_P with its value in our constant/copy table. */
1753 new_val = SSA_NAME_VALUE (orig_val);
1754 if (new_val
1755 && new_val != orig_val
1756 && (TREE_CODE (new_val) == SSA_NAME
1757 || is_gimple_min_invariant (new_val))
1758 && may_propagate_copy (orig_val, new_val))
1759 propagate_value (orig_p, new_val);
1762 restore_vars_to_original_value ();
1766 /* We have finished optimizing BB, record any information implied by
1767 taking a specific outgoing edge from BB. */
1769 static void
1770 record_edge_info (basic_block bb)
1772 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1773 struct edge_info *edge_info;
1775 if (! gsi_end_p (gsi))
1777 gimple stmt = gsi_stmt (gsi);
1778 location_t loc = gimple_location (stmt);
1780 if (gimple_code (stmt) == GIMPLE_SWITCH)
1782 tree index = gimple_switch_index (stmt);
1784 if (TREE_CODE (index) == SSA_NAME)
1786 int i;
1787 int n_labels = gimple_switch_num_labels (stmt);
1788 tree *info = XCNEWVEC (tree, last_basic_block);
1789 edge e;
1790 edge_iterator ei;
1792 for (i = 0; i < n_labels; i++)
1794 tree label = gimple_switch_label (stmt, i);
1795 basic_block target_bb = label_to_block (CASE_LABEL (label));
1796 if (CASE_HIGH (label)
1797 || !CASE_LOW (label)
1798 || info[target_bb->index])
1799 info[target_bb->index] = error_mark_node;
1800 else
1801 info[target_bb->index] = label;
1804 FOR_EACH_EDGE (e, ei, bb->succs)
1806 basic_block target_bb = e->dest;
1807 tree label = info[target_bb->index];
1809 if (label != NULL && label != error_mark_node)
1811 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1812 CASE_LOW (label));
1813 edge_info = allocate_edge_info (e);
1814 edge_info->lhs = index;
1815 edge_info->rhs = x;
1818 free (info);
1822 /* A COND_EXPR may create equivalences too. */
1823 if (gimple_code (stmt) == GIMPLE_COND)
1825 edge true_edge;
1826 edge false_edge;
1828 tree op0 = gimple_cond_lhs (stmt);
1829 tree op1 = gimple_cond_rhs (stmt);
1830 enum tree_code code = gimple_cond_code (stmt);
1832 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1834 /* Special case comparing booleans against a constant as we
1835 know the value of OP0 on both arms of the branch. i.e., we
1836 can record an equivalence for OP0 rather than COND. */
1837 if ((code == EQ_EXPR || code == NE_EXPR)
1838 && TREE_CODE (op0) == SSA_NAME
1839 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1840 && is_gimple_min_invariant (op1))
1842 if (code == EQ_EXPR)
1844 edge_info = allocate_edge_info (true_edge);
1845 edge_info->lhs = op0;
1846 edge_info->rhs = (integer_zerop (op1)
1847 ? boolean_false_node
1848 : boolean_true_node);
1850 edge_info = allocate_edge_info (false_edge);
1851 edge_info->lhs = op0;
1852 edge_info->rhs = (integer_zerop (op1)
1853 ? boolean_true_node
1854 : boolean_false_node);
1856 else
1858 edge_info = allocate_edge_info (true_edge);
1859 edge_info->lhs = op0;
1860 edge_info->rhs = (integer_zerop (op1)
1861 ? boolean_true_node
1862 : boolean_false_node);
1864 edge_info = allocate_edge_info (false_edge);
1865 edge_info->lhs = op0;
1866 edge_info->rhs = (integer_zerop (op1)
1867 ? boolean_false_node
1868 : boolean_true_node);
1871 else if (is_gimple_min_invariant (op0)
1872 && (TREE_CODE (op1) == SSA_NAME
1873 || is_gimple_min_invariant (op1)))
1875 tree cond = build2 (code, boolean_type_node, op0, op1);
1876 tree inverted = invert_truthvalue_loc (loc, cond);
1877 bool can_infer_simple_equiv
1878 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0)))
1879 && real_zerop (op0));
1880 struct edge_info *edge_info;
1882 edge_info = allocate_edge_info (true_edge);
1883 record_conditions (edge_info, cond, inverted);
1885 if (can_infer_simple_equiv && code == EQ_EXPR)
1887 edge_info->lhs = op1;
1888 edge_info->rhs = op0;
1891 edge_info = allocate_edge_info (false_edge);
1892 record_conditions (edge_info, inverted, cond);
1894 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1896 edge_info->lhs = op1;
1897 edge_info->rhs = op0;
1901 else if (TREE_CODE (op0) == SSA_NAME
1902 && (TREE_CODE (op1) == SSA_NAME
1903 || is_gimple_min_invariant (op1)))
1905 tree cond = build2 (code, boolean_type_node, op0, op1);
1906 tree inverted = invert_truthvalue_loc (loc, cond);
1907 bool can_infer_simple_equiv
1908 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1)))
1909 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1910 struct edge_info *edge_info;
1912 edge_info = allocate_edge_info (true_edge);
1913 record_conditions (edge_info, cond, inverted);
1915 if (can_infer_simple_equiv && code == EQ_EXPR)
1917 edge_info->lhs = op0;
1918 edge_info->rhs = op1;
1921 edge_info = allocate_edge_info (false_edge);
1922 record_conditions (edge_info, inverted, cond);
1924 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1926 edge_info->lhs = op0;
1927 edge_info->rhs = op1;
1932 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1936 void
1937 dom_opt_dom_walker::before_dom_children (basic_block bb)
1939 gimple_stmt_iterator gsi;
1941 if (dump_file && (dump_flags & TDF_DETAILS))
1942 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1944 /* Push a marker on the stacks of local information so that we know how
1945 far to unwind when we finalize this block. */
1946 avail_exprs_stack.safe_push (NULL);
1947 const_and_copies_stack.safe_push (NULL_TREE);
1949 record_equivalences_from_incoming_edge (bb);
1951 /* PHI nodes can create equivalences too. */
1952 record_equivalences_from_phis (bb);
1954 /* Create equivalences from redundant PHIs. PHIs are only truly
1955 redundant when they exist in the same block, so push another
1956 marker and unwind right afterwards. */
1957 avail_exprs_stack.safe_push (NULL);
1958 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1959 eliminate_redundant_computations (&gsi);
1960 remove_local_expressions_from_table ();
1962 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1963 optimize_stmt (bb, gsi);
1965 /* Now prepare to process dominated blocks. */
1966 record_edge_info (bb);
1967 cprop_into_successor_phis (bb);
1970 /* We have finished processing the dominator children of BB, perform
1971 any finalization actions in preparation for leaving this node in
1972 the dominator tree. */
1974 void
1975 dom_opt_dom_walker::after_dom_children (basic_block bb)
1977 gimple last;
1979 /* If we have an outgoing edge to a block with multiple incoming and
1980 outgoing edges, then we may be able to thread the edge, i.e., we
1981 may be able to statically determine which of the outgoing edges
1982 will be traversed when the incoming edge from BB is traversed. */
1983 if (single_succ_p (bb)
1984 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1985 && potentially_threadable_block (single_succ (bb)))
1987 thread_across_edge (single_succ_edge (bb));
1989 else if ((last = last_stmt (bb))
1990 && gimple_code (last) == GIMPLE_COND
1991 && EDGE_COUNT (bb->succs) == 2
1992 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1993 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1995 edge true_edge, false_edge;
1997 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1999 /* Only try to thread the edge if it reaches a target block with
2000 more than one predecessor and more than one successor. */
2001 if (potentially_threadable_block (true_edge->dest))
2002 thread_across_edge (true_edge);
2004 /* Similarly for the ELSE arm. */
2005 if (potentially_threadable_block (false_edge->dest))
2006 thread_across_edge (false_edge);
2010 /* These remove expressions local to BB from the tables. */
2011 remove_local_expressions_from_table ();
2012 restore_vars_to_original_value ();
2015 /* Search for redundant computations in STMT. If any are found, then
2016 replace them with the variable holding the result of the computation.
2018 If safe, record this expression into the available expression hash
2019 table. */
2021 static void
2022 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2024 tree expr_type;
2025 tree cached_lhs;
2026 tree def;
2027 bool insert = true;
2028 bool assigns_var_p = false;
2030 gimple stmt = gsi_stmt (*gsi);
2032 if (gimple_code (stmt) == GIMPLE_PHI)
2033 def = gimple_phi_result (stmt);
2034 else
2035 def = gimple_get_lhs (stmt);
2037 /* Certain expressions on the RHS can be optimized away, but can not
2038 themselves be entered into the hash tables. */
2039 if (! def
2040 || TREE_CODE (def) != SSA_NAME
2041 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2042 || gimple_vdef (stmt)
2043 /* Do not record equivalences for increments of ivs. This would create
2044 overlapping live ranges for a very questionable gain. */
2045 || simple_iv_increment_p (stmt))
2046 insert = false;
2048 /* Check if the expression has been computed before. */
2049 cached_lhs = lookup_avail_expr (stmt, insert);
2051 opt_stats.num_exprs_considered++;
2053 /* Get the type of the expression we are trying to optimize. */
2054 if (is_gimple_assign (stmt))
2056 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2057 assigns_var_p = true;
2059 else if (gimple_code (stmt) == GIMPLE_COND)
2060 expr_type = boolean_type_node;
2061 else if (is_gimple_call (stmt))
2063 gcc_assert (gimple_call_lhs (stmt));
2064 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2065 assigns_var_p = true;
2067 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2068 expr_type = TREE_TYPE (gimple_switch_index (stmt));
2069 else if (gimple_code (stmt) == GIMPLE_PHI)
2070 /* We can't propagate into a phi, so the logic below doesn't apply.
2071 Instead record an equivalence between the cached LHS and the
2072 PHI result of this statement, provided they are in the same block.
2073 This should be sufficient to kill the redundant phi. */
2075 if (def && cached_lhs)
2076 record_const_or_copy (def, cached_lhs);
2077 return;
2079 else
2080 gcc_unreachable ();
2082 if (!cached_lhs)
2083 return;
2085 /* It is safe to ignore types here since we have already done
2086 type checking in the hashing and equality routines. In fact
2087 type checking here merely gets in the way of constant
2088 propagation. Also, make sure that it is safe to propagate
2089 CACHED_LHS into the expression in STMT. */
2090 if ((TREE_CODE (cached_lhs) != SSA_NAME
2091 && (assigns_var_p
2092 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2093 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2095 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2096 || is_gimple_min_invariant (cached_lhs));
2098 if (dump_file && (dump_flags & TDF_DETAILS))
2100 fprintf (dump_file, " Replaced redundant expr '");
2101 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2102 fprintf (dump_file, "' with '");
2103 print_generic_expr (dump_file, cached_lhs, dump_flags);
2104 fprintf (dump_file, "'\n");
2107 opt_stats.num_re++;
2109 if (assigns_var_p
2110 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2111 cached_lhs = fold_convert (expr_type, cached_lhs);
2113 propagate_tree_value_into_stmt (gsi, cached_lhs);
2115 /* Since it is always necessary to mark the result as modified,
2116 perhaps we should move this into propagate_tree_value_into_stmt
2117 itself. */
2118 gimple_set_modified (gsi_stmt (*gsi), true);
2122 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2123 the available expressions table or the const_and_copies table.
2124 Detect and record those equivalences. */
2125 /* We handle only very simple copy equivalences here. The heavy
2126 lifing is done by eliminate_redundant_computations. */
2128 static void
2129 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2131 tree lhs;
2132 enum tree_code lhs_code;
2134 gcc_assert (is_gimple_assign (stmt));
2136 lhs = gimple_assign_lhs (stmt);
2137 lhs_code = TREE_CODE (lhs);
2139 if (lhs_code == SSA_NAME
2140 && gimple_assign_single_p (stmt))
2142 tree rhs = gimple_assign_rhs1 (stmt);
2144 /* If the RHS of the assignment is a constant or another variable that
2145 may be propagated, register it in the CONST_AND_COPIES table. We
2146 do not need to record unwind data for this, since this is a true
2147 assignment and not an equivalence inferred from a comparison. All
2148 uses of this ssa name are dominated by this assignment, so unwinding
2149 just costs time and space. */
2150 if (may_optimize_p
2151 && (TREE_CODE (rhs) == SSA_NAME
2152 || is_gimple_min_invariant (rhs)))
2154 if (dump_file && (dump_flags & TDF_DETAILS))
2156 fprintf (dump_file, "==== ASGN ");
2157 print_generic_expr (dump_file, lhs, 0);
2158 fprintf (dump_file, " = ");
2159 print_generic_expr (dump_file, rhs, 0);
2160 fprintf (dump_file, "\n");
2163 set_ssa_name_value (lhs, rhs);
2167 /* A memory store, even an aliased store, creates a useful
2168 equivalence. By exchanging the LHS and RHS, creating suitable
2169 vops and recording the result in the available expression table,
2170 we may be able to expose more redundant loads. */
2171 if (!gimple_has_volatile_ops (stmt)
2172 && gimple_references_memory_p (stmt)
2173 && gimple_assign_single_p (stmt)
2174 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2175 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2176 && !is_gimple_reg (lhs))
2178 tree rhs = gimple_assign_rhs1 (stmt);
2179 gimple new_stmt;
2181 /* Build a new statement with the RHS and LHS exchanged. */
2182 if (TREE_CODE (rhs) == SSA_NAME)
2184 /* NOTE tuples. The call to gimple_build_assign below replaced
2185 a call to build_gimple_modify_stmt, which did not set the
2186 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2187 may cause an SSA validation failure, as the LHS may be a
2188 default-initialized name and should have no definition. I'm
2189 a bit dubious of this, as the artificial statement that we
2190 generate here may in fact be ill-formed, but it is simply
2191 used as an internal device in this pass, and never becomes
2192 part of the CFG. */
2193 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2194 new_stmt = gimple_build_assign (rhs, lhs);
2195 SSA_NAME_DEF_STMT (rhs) = defstmt;
2197 else
2198 new_stmt = gimple_build_assign (rhs, lhs);
2200 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2202 /* Finally enter the statement into the available expression
2203 table. */
2204 lookup_avail_expr (new_stmt, true);
2208 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2209 CONST_AND_COPIES. */
2211 static void
2212 cprop_operand (gimple stmt, use_operand_p op_p)
2214 tree val;
2215 tree op = USE_FROM_PTR (op_p);
2217 /* If the operand has a known constant value or it is known to be a
2218 copy of some other variable, use the value or copy stored in
2219 CONST_AND_COPIES. */
2220 val = SSA_NAME_VALUE (op);
2221 if (val && val != op)
2223 /* Do not replace hard register operands in asm statements. */
2224 if (gimple_code (stmt) == GIMPLE_ASM
2225 && !may_propagate_copy_into_asm (op))
2226 return;
2228 /* Certain operands are not allowed to be copy propagated due
2229 to their interaction with exception handling and some GCC
2230 extensions. */
2231 if (!may_propagate_copy (op, val))
2232 return;
2234 /* Do not propagate addresses that point to volatiles into memory
2235 stmts without volatile operands. */
2236 if (POINTER_TYPE_P (TREE_TYPE (val))
2237 && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val)))
2238 && gimple_has_mem_ops (stmt)
2239 && !gimple_has_volatile_ops (stmt))
2240 return;
2242 /* Do not propagate copies if the propagated value is at a deeper loop
2243 depth than the propagatee. Otherwise, this may move loop variant
2244 variables outside of their loops and prevent coalescing
2245 opportunities. If the value was loop invariant, it will be hoisted
2246 by LICM and exposed for copy propagation. */
2247 if (loop_depth_of_name (val) > loop_depth_of_name (op))
2248 return;
2250 /* Do not propagate copies into simple IV increment statements.
2251 See PR23821 for how this can disturb IV analysis. */
2252 if (TREE_CODE (val) != INTEGER_CST
2253 && simple_iv_increment_p (stmt))
2254 return;
2256 /* Dump details. */
2257 if (dump_file && (dump_flags & TDF_DETAILS))
2259 fprintf (dump_file, " Replaced '");
2260 print_generic_expr (dump_file, op, dump_flags);
2261 fprintf (dump_file, "' with %s '",
2262 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2263 print_generic_expr (dump_file, val, dump_flags);
2264 fprintf (dump_file, "'\n");
2267 if (TREE_CODE (val) != SSA_NAME)
2268 opt_stats.num_const_prop++;
2269 else
2270 opt_stats.num_copy_prop++;
2272 propagate_value (op_p, val);
2274 /* And note that we modified this statement. This is now
2275 safe, even if we changed virtual operands since we will
2276 rescan the statement and rewrite its operands again. */
2277 gimple_set_modified (stmt, true);
2281 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2282 known value for that SSA_NAME (or NULL if no value is known).
2284 Propagate values from CONST_AND_COPIES into the uses, vuses and
2285 vdef_ops of STMT. */
2287 static void
2288 cprop_into_stmt (gimple stmt)
2290 use_operand_p op_p;
2291 ssa_op_iter iter;
2293 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2294 cprop_operand (stmt, op_p);
2297 /* Optimize the statement pointed to by iterator SI.
2299 We try to perform some simplistic global redundancy elimination and
2300 constant propagation:
2302 1- To detect global redundancy, we keep track of expressions that have
2303 been computed in this block and its dominators. If we find that the
2304 same expression is computed more than once, we eliminate repeated
2305 computations by using the target of the first one.
2307 2- Constant values and copy assignments. This is used to do very
2308 simplistic constant and copy propagation. When a constant or copy
2309 assignment is found, we map the value on the RHS of the assignment to
2310 the variable in the LHS in the CONST_AND_COPIES table. */
2312 static void
2313 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2315 gimple stmt, old_stmt;
2316 bool may_optimize_p;
2317 bool modified_p = false;
2319 old_stmt = stmt = gsi_stmt (si);
2321 if (dump_file && (dump_flags & TDF_DETAILS))
2323 fprintf (dump_file, "Optimizing statement ");
2324 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2327 if (gimple_code (stmt) == GIMPLE_COND)
2328 canonicalize_comparison (stmt);
2330 update_stmt_if_modified (stmt);
2331 opt_stats.num_stmts++;
2333 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2334 cprop_into_stmt (stmt);
2336 /* If the statement has been modified with constant replacements,
2337 fold its RHS before checking for redundant computations. */
2338 if (gimple_modified_p (stmt))
2340 tree rhs = NULL;
2342 /* Try to fold the statement making sure that STMT is kept
2343 up to date. */
2344 if (fold_stmt (&si))
2346 stmt = gsi_stmt (si);
2347 gimple_set_modified (stmt, true);
2349 if (dump_file && (dump_flags & TDF_DETAILS))
2351 fprintf (dump_file, " Folded to: ");
2352 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2356 /* We only need to consider cases that can yield a gimple operand. */
2357 if (gimple_assign_single_p (stmt))
2358 rhs = gimple_assign_rhs1 (stmt);
2359 else if (gimple_code (stmt) == GIMPLE_GOTO)
2360 rhs = gimple_goto_dest (stmt);
2361 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2362 /* This should never be an ADDR_EXPR. */
2363 rhs = gimple_switch_index (stmt);
2365 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2366 recompute_tree_invariant_for_addr_expr (rhs);
2368 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2369 even if fold_stmt updated the stmt already and thus cleared
2370 gimple_modified_p flag on it. */
2371 modified_p = true;
2374 /* Check for redundant computations. Do this optimization only
2375 for assignments that have no volatile ops and conditionals. */
2376 may_optimize_p = (!gimple_has_side_effects (stmt)
2377 && (is_gimple_assign (stmt)
2378 || (is_gimple_call (stmt)
2379 && gimple_call_lhs (stmt) != NULL_TREE)
2380 || gimple_code (stmt) == GIMPLE_COND
2381 || gimple_code (stmt) == GIMPLE_SWITCH));
2383 if (may_optimize_p)
2385 if (gimple_code (stmt) == GIMPLE_CALL)
2387 /* Resolve __builtin_constant_p. If it hasn't been
2388 folded to integer_one_node by now, it's fairly
2389 certain that the value simply isn't constant. */
2390 tree callee = gimple_call_fndecl (stmt);
2391 if (callee
2392 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2393 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2395 propagate_tree_value_into_stmt (&si, integer_zero_node);
2396 stmt = gsi_stmt (si);
2400 update_stmt_if_modified (stmt);
2401 eliminate_redundant_computations (&si);
2402 stmt = gsi_stmt (si);
2404 /* Perform simple redundant store elimination. */
2405 if (gimple_assign_single_p (stmt)
2406 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2408 tree lhs = gimple_assign_lhs (stmt);
2409 tree rhs = gimple_assign_rhs1 (stmt);
2410 tree cached_lhs;
2411 gimple new_stmt;
2412 if (TREE_CODE (rhs) == SSA_NAME)
2414 tree tem = SSA_NAME_VALUE (rhs);
2415 if (tem)
2416 rhs = tem;
2418 /* Build a new statement with the RHS and LHS exchanged. */
2419 if (TREE_CODE (rhs) == SSA_NAME)
2421 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2422 new_stmt = gimple_build_assign (rhs, lhs);
2423 SSA_NAME_DEF_STMT (rhs) = defstmt;
2425 else
2426 new_stmt = gimple_build_assign (rhs, lhs);
2427 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2428 cached_lhs = lookup_avail_expr (new_stmt, false);
2429 if (cached_lhs
2430 && rhs == cached_lhs)
2432 basic_block bb = gimple_bb (stmt);
2433 unlink_stmt_vdef (stmt);
2434 if (gsi_remove (&si, true))
2436 bitmap_set_bit (need_eh_cleanup, bb->index);
2437 if (dump_file && (dump_flags & TDF_DETAILS))
2438 fprintf (dump_file, " Flagged to clear EH edges.\n");
2440 release_defs (stmt);
2441 return;
2446 /* Record any additional equivalences created by this statement. */
2447 if (is_gimple_assign (stmt))
2448 record_equivalences_from_stmt (stmt, may_optimize_p);
2450 /* If STMT is a COND_EXPR and it was modified, then we may know
2451 where it goes. If that is the case, then mark the CFG as altered.
2453 This will cause us to later call remove_unreachable_blocks and
2454 cleanup_tree_cfg when it is safe to do so. It is not safe to
2455 clean things up here since removal of edges and such can trigger
2456 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2457 the manager.
2459 That's all fine and good, except that once SSA_NAMEs are released
2460 to the manager, we must not call create_ssa_name until all references
2461 to released SSA_NAMEs have been eliminated.
2463 All references to the deleted SSA_NAMEs can not be eliminated until
2464 we remove unreachable blocks.
2466 We can not remove unreachable blocks until after we have completed
2467 any queued jump threading.
2469 We can not complete any queued jump threads until we have taken
2470 appropriate variables out of SSA form. Taking variables out of
2471 SSA form can call create_ssa_name and thus we lose.
2473 Ultimately I suspect we're going to need to change the interface
2474 into the SSA_NAME manager. */
2475 if (gimple_modified_p (stmt) || modified_p)
2477 tree val = NULL;
2479 update_stmt_if_modified (stmt);
2481 if (gimple_code (stmt) == GIMPLE_COND)
2482 val = fold_binary_loc (gimple_location (stmt),
2483 gimple_cond_code (stmt), boolean_type_node,
2484 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2485 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2486 val = gimple_switch_index (stmt);
2488 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2489 cfg_altered = true;
2491 /* If we simplified a statement in such a way as to be shown that it
2492 cannot trap, update the eh information and the cfg to match. */
2493 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2495 bitmap_set_bit (need_eh_cleanup, bb->index);
2496 if (dump_file && (dump_flags & TDF_DETAILS))
2497 fprintf (dump_file, " Flagged to clear EH edges.\n");
2502 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2503 If found, return its LHS. Otherwise insert STMT in the table and
2504 return NULL_TREE.
2506 Also, when an expression is first inserted in the table, it is also
2507 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2508 we finish processing this block and its children. */
2510 static tree
2511 lookup_avail_expr (gimple stmt, bool insert)
2513 expr_hash_elt **slot;
2514 tree lhs;
2515 tree temp;
2516 struct expr_hash_elt element;
2518 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2519 if (gimple_code (stmt) == GIMPLE_PHI)
2520 lhs = gimple_phi_result (stmt);
2521 else
2522 lhs = gimple_get_lhs (stmt);
2524 initialize_hash_element (stmt, lhs, &element);
2526 if (dump_file && (dump_flags & TDF_DETAILS))
2528 fprintf (dump_file, "LKUP ");
2529 print_expr_hash_elt (dump_file, &element);
2532 /* Don't bother remembering constant assignments and copy operations.
2533 Constants and copy operations are handled by the constant/copy propagator
2534 in optimize_stmt. */
2535 if (element.expr.kind == EXPR_SINGLE
2536 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2537 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2538 return NULL_TREE;
2540 /* Finally try to find the expression in the main expression hash table. */
2541 slot = avail_exprs.find_slot_with_hash (&element, element.hash,
2542 (insert ? INSERT : NO_INSERT));
2543 if (slot == NULL)
2545 free_expr_hash_elt_contents (&element);
2546 return NULL_TREE;
2548 else if (*slot == NULL)
2550 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2551 *element2 = element;
2552 element2->stamp = element2;
2553 *slot = element2;
2555 if (dump_file && (dump_flags & TDF_DETAILS))
2557 fprintf (dump_file, "2>>> ");
2558 print_expr_hash_elt (dump_file, element2);
2561 avail_exprs_stack.safe_push (element2);
2562 return NULL_TREE;
2564 else
2565 free_expr_hash_elt_contents (&element);
2567 /* Extract the LHS of the assignment so that it can be used as the current
2568 definition of another variable. */
2569 lhs = ((struct expr_hash_elt *)*slot)->lhs;
2571 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2572 use the value from the const_and_copies table. */
2573 if (TREE_CODE (lhs) == SSA_NAME)
2575 temp = SSA_NAME_VALUE (lhs);
2576 if (temp)
2577 lhs = temp;
2580 if (dump_file && (dump_flags & TDF_DETAILS))
2582 fprintf (dump_file, "FIND: ");
2583 print_generic_expr (dump_file, lhs, 0);
2584 fprintf (dump_file, "\n");
2587 return lhs;
2590 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2591 for expressions using the code of the expression and the SSA numbers of
2592 its operands. */
2594 static hashval_t
2595 avail_expr_hash (const void *p)
2597 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2598 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2599 tree vuse;
2600 hashval_t val = 0;
2602 val = iterative_hash_hashable_expr (expr, val);
2604 /* If the hash table entry is not associated with a statement, then we
2605 can just hash the expression and not worry about virtual operands
2606 and such. */
2607 if (!stmt)
2608 return val;
2610 /* Add the SSA version numbers of the vuse operand. This is important
2611 because compound variables like arrays are not renamed in the
2612 operands. Rather, the rename is done on the virtual variable
2613 representing all the elements of the array. */
2614 if ((vuse = gimple_vuse (stmt)))
2615 val = iterative_hash_expr (vuse, val);
2617 return val;
2620 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2621 up degenerate PHIs created by or exposed by jump threading. */
2623 /* Given a statement STMT, which is either a PHI node or an assignment,
2624 remove it from the IL. */
2626 static void
2627 remove_stmt_or_phi (gimple stmt)
2629 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2631 if (gimple_code (stmt) == GIMPLE_PHI)
2632 remove_phi_node (&gsi, true);
2633 else
2635 gsi_remove (&gsi, true);
2636 release_defs (stmt);
2640 /* Given a statement STMT, which is either a PHI node or an assignment,
2641 return the "rhs" of the node, in the case of a non-degenerate
2642 phi, NULL is returned. */
2644 static tree
2645 get_rhs_or_phi_arg (gimple stmt)
2647 if (gimple_code (stmt) == GIMPLE_PHI)
2648 return degenerate_phi_result (stmt);
2649 else if (gimple_assign_single_p (stmt))
2650 return gimple_assign_rhs1 (stmt);
2651 else
2652 gcc_unreachable ();
2656 /* Given a statement STMT, which is either a PHI node or an assignment,
2657 return the "lhs" of the node. */
2659 static tree
2660 get_lhs_or_phi_result (gimple stmt)
2662 if (gimple_code (stmt) == GIMPLE_PHI)
2663 return gimple_phi_result (stmt);
2664 else if (is_gimple_assign (stmt))
2665 return gimple_assign_lhs (stmt);
2666 else
2667 gcc_unreachable ();
2670 /* Propagate RHS into all uses of LHS (when possible).
2672 RHS and LHS are derived from STMT, which is passed in solely so
2673 that we can remove it if propagation is successful.
2675 When propagating into a PHI node or into a statement which turns
2676 into a trivial copy or constant initialization, set the
2677 appropriate bit in INTERESTING_NAMEs so that we will visit those
2678 nodes as well in an effort to pick up secondary optimization
2679 opportunities. */
2681 static void
2682 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2684 /* First verify that propagation is valid and isn't going to move a
2685 loop variant variable outside its loop. */
2686 if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2687 && (TREE_CODE (rhs) != SSA_NAME
2688 || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs))
2689 && may_propagate_copy (lhs, rhs)
2690 && loop_depth_of_name (lhs) >= loop_depth_of_name (rhs))
2692 use_operand_p use_p;
2693 imm_use_iterator iter;
2694 gimple use_stmt;
2695 bool all = true;
2697 /* Dump details. */
2698 if (dump_file && (dump_flags & TDF_DETAILS))
2700 fprintf (dump_file, " Replacing '");
2701 print_generic_expr (dump_file, lhs, dump_flags);
2702 fprintf (dump_file, "' with %s '",
2703 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2704 print_generic_expr (dump_file, rhs, dump_flags);
2705 fprintf (dump_file, "'\n");
2708 /* Walk over every use of LHS and try to replace the use with RHS.
2709 At this point the only reason why such a propagation would not
2710 be successful would be if the use occurs in an ASM_EXPR. */
2711 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2713 /* Leave debug stmts alone. If we succeed in propagating
2714 all non-debug uses, we'll drop the DEF, and propagation
2715 into debug stmts will occur then. */
2716 if (gimple_debug_bind_p (use_stmt))
2717 continue;
2719 /* It's not always safe to propagate into an ASM_EXPR. */
2720 if (gimple_code (use_stmt) == GIMPLE_ASM
2721 && ! may_propagate_copy_into_asm (lhs))
2723 all = false;
2724 continue;
2727 /* It's not ok to propagate into the definition stmt of RHS.
2728 <bb 9>:
2729 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2730 g_67.1_6 = prephitmp.12_36;
2731 goto <bb 9>;
2732 While this is strictly all dead code we do not want to
2733 deal with this here. */
2734 if (TREE_CODE (rhs) == SSA_NAME
2735 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2737 all = false;
2738 continue;
2741 /* Dump details. */
2742 if (dump_file && (dump_flags & TDF_DETAILS))
2744 fprintf (dump_file, " Original statement:");
2745 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2748 /* Propagate the RHS into this use of the LHS. */
2749 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2750 propagate_value (use_p, rhs);
2752 /* Special cases to avoid useless calls into the folding
2753 routines, operand scanning, etc.
2755 Propagation into a PHI may cause the PHI to become
2756 a degenerate, so mark the PHI as interesting. No other
2757 actions are necessary. */
2758 if (gimple_code (use_stmt) == GIMPLE_PHI)
2760 tree result;
2762 /* Dump details. */
2763 if (dump_file && (dump_flags & TDF_DETAILS))
2765 fprintf (dump_file, " Updated statement:");
2766 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2769 result = get_lhs_or_phi_result (use_stmt);
2770 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2771 continue;
2774 /* From this point onward we are propagating into a
2775 real statement. Folding may (or may not) be possible,
2776 we may expose new operands, expose dead EH edges,
2777 etc. */
2778 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2779 cannot fold a call that simplifies to a constant,
2780 because the GIMPLE_CALL must be replaced by a
2781 GIMPLE_ASSIGN, and there is no way to effect such a
2782 transformation in-place. We might want to consider
2783 using the more general fold_stmt here. */
2785 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2786 fold_stmt_inplace (&gsi);
2789 /* Sometimes propagation can expose new operands to the
2790 renamer. */
2791 update_stmt (use_stmt);
2793 /* Dump details. */
2794 if (dump_file && (dump_flags & TDF_DETAILS))
2796 fprintf (dump_file, " Updated statement:");
2797 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2800 /* If we replaced a variable index with a constant, then
2801 we would need to update the invariant flag for ADDR_EXPRs. */
2802 if (gimple_assign_single_p (use_stmt)
2803 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2804 recompute_tree_invariant_for_addr_expr
2805 (gimple_assign_rhs1 (use_stmt));
2807 /* If we cleaned up EH information from the statement,
2808 mark its containing block as needing EH cleanups. */
2809 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2811 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2812 if (dump_file && (dump_flags & TDF_DETAILS))
2813 fprintf (dump_file, " Flagged to clear EH edges.\n");
2816 /* Propagation may expose new trivial copy/constant propagation
2817 opportunities. */
2818 if (gimple_assign_single_p (use_stmt)
2819 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2820 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2821 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2823 tree result = get_lhs_or_phi_result (use_stmt);
2824 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2827 /* Propagation into these nodes may make certain edges in
2828 the CFG unexecutable. We want to identify them as PHI nodes
2829 at the destination of those unexecutable edges may become
2830 degenerates. */
2831 else if (gimple_code (use_stmt) == GIMPLE_COND
2832 || gimple_code (use_stmt) == GIMPLE_SWITCH
2833 || gimple_code (use_stmt) == GIMPLE_GOTO)
2835 tree val;
2837 if (gimple_code (use_stmt) == GIMPLE_COND)
2838 val = fold_binary_loc (gimple_location (use_stmt),
2839 gimple_cond_code (use_stmt),
2840 boolean_type_node,
2841 gimple_cond_lhs (use_stmt),
2842 gimple_cond_rhs (use_stmt));
2843 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2844 val = gimple_switch_index (use_stmt);
2845 else
2846 val = gimple_goto_dest (use_stmt);
2848 if (val && is_gimple_min_invariant (val))
2850 basic_block bb = gimple_bb (use_stmt);
2851 edge te = find_taken_edge (bb, val);
2852 edge_iterator ei;
2853 edge e;
2854 gimple_stmt_iterator gsi, psi;
2856 /* Remove all outgoing edges except TE. */
2857 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2859 if (e != te)
2861 /* Mark all the PHI nodes at the destination of
2862 the unexecutable edge as interesting. */
2863 for (psi = gsi_start_phis (e->dest);
2864 !gsi_end_p (psi);
2865 gsi_next (&psi))
2867 gimple phi = gsi_stmt (psi);
2869 tree result = gimple_phi_result (phi);
2870 int version = SSA_NAME_VERSION (result);
2872 bitmap_set_bit (interesting_names, version);
2875 te->probability += e->probability;
2877 te->count += e->count;
2878 remove_edge (e);
2879 cfg_altered = true;
2881 else
2882 ei_next (&ei);
2885 gsi = gsi_last_bb (gimple_bb (use_stmt));
2886 gsi_remove (&gsi, true);
2888 /* And fixup the flags on the single remaining edge. */
2889 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2890 te->flags &= ~EDGE_ABNORMAL;
2891 te->flags |= EDGE_FALLTHRU;
2892 if (te->probability > REG_BR_PROB_BASE)
2893 te->probability = REG_BR_PROB_BASE;
2898 /* Ensure there is nothing else to do. */
2899 gcc_assert (!all || has_zero_uses (lhs));
2901 /* If we were able to propagate away all uses of LHS, then
2902 we can remove STMT. */
2903 if (all)
2904 remove_stmt_or_phi (stmt);
2908 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2909 a statement that is a trivial copy or constant initialization.
2911 Attempt to eliminate T by propagating its RHS into all uses of
2912 its LHS. This may in turn set new bits in INTERESTING_NAMES
2913 for nodes we want to revisit later.
2915 All exit paths should clear INTERESTING_NAMES for the result
2916 of STMT. */
2918 static void
2919 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2921 tree lhs = get_lhs_or_phi_result (stmt);
2922 tree rhs;
2923 int version = SSA_NAME_VERSION (lhs);
2925 /* If the LHS of this statement or PHI has no uses, then we can
2926 just eliminate it. This can occur if, for example, the PHI
2927 was created by block duplication due to threading and its only
2928 use was in the conditional at the end of the block which was
2929 deleted. */
2930 if (has_zero_uses (lhs))
2932 bitmap_clear_bit (interesting_names, version);
2933 remove_stmt_or_phi (stmt);
2934 return;
2937 /* Get the RHS of the assignment or PHI node if the PHI is a
2938 degenerate. */
2939 rhs = get_rhs_or_phi_arg (stmt);
2940 if (!rhs)
2942 bitmap_clear_bit (interesting_names, version);
2943 return;
2946 if (!virtual_operand_p (lhs))
2947 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2948 else
2950 gimple use_stmt;
2951 imm_use_iterator iter;
2952 use_operand_p use_p;
2953 /* For virtual operands we have to propagate into all uses as
2954 otherwise we will create overlapping life-ranges. */
2955 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2956 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2957 SET_USE (use_p, rhs);
2958 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
2959 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
2960 remove_stmt_or_phi (stmt);
2963 /* Note that STMT may well have been deleted by now, so do
2964 not access it, instead use the saved version # to clear
2965 T's entry in the worklist. */
2966 bitmap_clear_bit (interesting_names, version);
2969 /* The first phase in degenerate PHI elimination.
2971 Eliminate the degenerate PHIs in BB, then recurse on the
2972 dominator children of BB. */
2974 static void
2975 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2977 gimple_stmt_iterator gsi;
2978 basic_block son;
2980 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2982 gimple phi = gsi_stmt (gsi);
2984 eliminate_const_or_copy (phi, interesting_names);
2987 /* Recurse into the dominator children of BB. */
2988 for (son = first_dom_son (CDI_DOMINATORS, bb);
2989 son;
2990 son = next_dom_son (CDI_DOMINATORS, son))
2991 eliminate_degenerate_phis_1 (son, interesting_names);
2995 /* A very simple pass to eliminate degenerate PHI nodes from the
2996 IL. This is meant to be fast enough to be able to be run several
2997 times in the optimization pipeline.
2999 Certain optimizations, particularly those which duplicate blocks
3000 or remove edges from the CFG can create or expose PHIs which are
3001 trivial copies or constant initializations.
3003 While we could pick up these optimizations in DOM or with the
3004 combination of copy-prop and CCP, those solutions are far too
3005 heavy-weight for our needs.
3007 This implementation has two phases so that we can efficiently
3008 eliminate the first order degenerate PHIs and second order
3009 degenerate PHIs.
3011 The first phase performs a dominator walk to identify and eliminate
3012 the vast majority of the degenerate PHIs. When a degenerate PHI
3013 is identified and eliminated any affected statements or PHIs
3014 are put on a worklist.
3016 The second phase eliminates degenerate PHIs and trivial copies
3017 or constant initializations using the worklist. This is how we
3018 pick up the secondary optimization opportunities with minimal
3019 cost. */
3021 static unsigned int
3022 eliminate_degenerate_phis (void)
3024 bitmap interesting_names;
3025 bitmap interesting_names1;
3027 /* Bitmap of blocks which need EH information updated. We can not
3028 update it on-the-fly as doing so invalidates the dominator tree. */
3029 need_eh_cleanup = BITMAP_ALLOC (NULL);
3031 /* INTERESTING_NAMES is effectively our worklist, indexed by
3032 SSA_NAME_VERSION.
3034 A set bit indicates that the statement or PHI node which
3035 defines the SSA_NAME should be (re)examined to determine if
3036 it has become a degenerate PHI or trivial const/copy propagation
3037 opportunity.
3039 Experiments have show we generally get better compilation
3040 time behavior with bitmaps rather than sbitmaps. */
3041 interesting_names = BITMAP_ALLOC (NULL);
3042 interesting_names1 = BITMAP_ALLOC (NULL);
3044 calculate_dominance_info (CDI_DOMINATORS);
3045 cfg_altered = false;
3047 /* First phase. Eliminate degenerate PHIs via a dominator
3048 walk of the CFG.
3050 Experiments have indicated that we generally get better
3051 compile-time behavior by visiting blocks in the first
3052 phase in dominator order. Presumably this is because walking
3053 in dominator order leaves fewer PHIs for later examination
3054 by the worklist phase. */
3055 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR, interesting_names);
3057 /* Second phase. Eliminate second order degenerate PHIs as well
3058 as trivial copies or constant initializations identified by
3059 the first phase or this phase. Basically we keep iterating
3060 until our set of INTERESTING_NAMEs is empty. */
3061 while (!bitmap_empty_p (interesting_names))
3063 unsigned int i;
3064 bitmap_iterator bi;
3066 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3067 changed during the loop. Copy it to another bitmap and
3068 use that. */
3069 bitmap_copy (interesting_names1, interesting_names);
3071 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3073 tree name = ssa_name (i);
3075 /* Ignore SSA_NAMEs that have been released because
3076 their defining statement was deleted (unreachable). */
3077 if (name)
3078 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3079 interesting_names);
3083 if (cfg_altered)
3085 free_dominance_info (CDI_DOMINATORS);
3086 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3087 if (current_loops)
3088 loops_state_set (LOOPS_NEED_FIXUP);
3091 /* Propagation of const and copies may make some EH edges dead. Purge
3092 such edges from the CFG as needed. */
3093 if (!bitmap_empty_p (need_eh_cleanup))
3095 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3096 BITMAP_FREE (need_eh_cleanup);
3099 BITMAP_FREE (interesting_names);
3100 BITMAP_FREE (interesting_names1);
3101 return 0;
3104 namespace {
3106 const pass_data pass_data_phi_only_cprop =
3108 GIMPLE_PASS, /* type */
3109 "phicprop", /* name */
3110 OPTGROUP_NONE, /* optinfo_flags */
3111 true, /* has_gate */
3112 true, /* has_execute */
3113 TV_TREE_PHI_CPROP, /* tv_id */
3114 ( PROP_cfg | PROP_ssa ), /* properties_required */
3115 0, /* properties_provided */
3116 0, /* properties_destroyed */
3117 0, /* todo_flags_start */
3118 ( TODO_cleanup_cfg | TODO_verify_ssa
3119 | TODO_verify_stmts
3120 | TODO_update_ssa ), /* todo_flags_finish */
3123 class pass_phi_only_cprop : public gimple_opt_pass
3125 public:
3126 pass_phi_only_cprop (gcc::context *ctxt)
3127 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3130 /* opt_pass methods: */
3131 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3132 bool gate () { return gate_dominator (); }
3133 unsigned int execute () { return eliminate_degenerate_phis (); }
3135 }; // class pass_phi_only_cprop
3137 } // anon namespace
3139 gimple_opt_pass *
3140 make_pass_phi_only_cprop (gcc::context *ctxt)
3142 return new pass_phi_only_cprop (ctxt);