Rebase.
[official-gcc.git] / gcc / tree-ssa-dom.c
blobfec386e3152bcf69c65a7af77ca932cb4d3a7a35
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "stor-layout.h"
28 #include "flags.h"
29 #include "tm_p.h"
30 #include "basic-block.h"
31 #include "cfgloop.h"
32 #include "inchash.h"
33 #include "function.h"
34 #include "gimple-pretty-print.h"
35 #include "tree-ssa-alias.h"
36 #include "internal-fn.h"
37 #include "gimple-fold.h"
38 #include "tree-eh.h"
39 #include "gimple-expr.h"
40 #include "is-a.h"
41 #include "gimple.h"
42 #include "gimple-iterator.h"
43 #include "gimple-ssa.h"
44 #include "tree-cfg.h"
45 #include "tree-phinodes.h"
46 #include "ssa-iterators.h"
47 #include "stringpool.h"
48 #include "tree-ssanames.h"
49 #include "tree-into-ssa.h"
50 #include "domwalk.h"
51 #include "tree-pass.h"
52 #include "tree-ssa-propagate.h"
53 #include "tree-ssa-threadupdate.h"
54 #include "langhooks.h"
55 #include "params.h"
56 #include "tree-ssa-threadedge.h"
57 #include "tree-ssa-dom.h"
58 #include "inchash.h"
60 /* This file implements optimizations on the dominator tree. */
62 /* Representation of a "naked" right-hand-side expression, to be used
63 in recording available expressions in the expression hash table. */
65 enum expr_kind
67 EXPR_SINGLE,
68 EXPR_UNARY,
69 EXPR_BINARY,
70 EXPR_TERNARY,
71 EXPR_CALL,
72 EXPR_PHI
75 struct hashable_expr
77 tree type;
78 enum expr_kind kind;
79 union {
80 struct { tree rhs; } single;
81 struct { enum tree_code op; tree opnd; } unary;
82 struct { enum tree_code op; tree opnd0, opnd1; } binary;
83 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
84 struct { gimple fn_from; bool pure; size_t nargs; tree *args; } call;
85 struct { size_t nargs; tree *args; } phi;
86 } ops;
89 /* Structure for recording known values of a conditional expression
90 at the exits from its block. */
92 typedef struct cond_equivalence_s
94 struct hashable_expr cond;
95 tree value;
96 } cond_equivalence;
99 /* Structure for recording edge equivalences as well as any pending
100 edge redirections during the dominator optimizer.
102 Computing and storing the edge equivalences instead of creating
103 them on-demand can save significant amounts of time, particularly
104 for pathological cases involving switch statements.
106 These structures live for a single iteration of the dominator
107 optimizer in the edge's AUX field. At the end of an iteration we
108 free each of these structures and update the AUX field to point
109 to any requested redirection target (the code for updating the
110 CFG and SSA graph for edge redirection expects redirection edge
111 targets to be in the AUX field for each edge. */
113 struct edge_info
115 /* If this edge creates a simple equivalence, the LHS and RHS of
116 the equivalence will be stored here. */
117 tree lhs;
118 tree rhs;
120 /* Traversing an edge may also indicate one or more particular conditions
121 are true or false. */
122 vec<cond_equivalence> cond_equivalences;
125 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
126 expressions it enters into the hash table along with a marker entry
127 (null). When we finish processing the block, we pop off entries and
128 remove the expressions from the global hash table until we hit the
129 marker. */
130 typedef struct expr_hash_elt * expr_hash_elt_t;
132 static vec<expr_hash_elt_t> avail_exprs_stack;
134 /* Structure for entries in the expression hash table. */
136 struct expr_hash_elt
138 /* The value (lhs) of this expression. */
139 tree lhs;
141 /* The expression (rhs) we want to record. */
142 struct hashable_expr expr;
144 /* The stmt pointer if this element corresponds to a statement. */
145 gimple stmt;
147 /* The hash value for RHS. */
148 hashval_t hash;
150 /* A unique stamp, typically the address of the hash
151 element itself, used in removing entries from the table. */
152 struct expr_hash_elt *stamp;
155 /* Hashtable helpers. */
157 static bool hashable_expr_equal_p (const struct hashable_expr *,
158 const struct hashable_expr *);
159 static void free_expr_hash_elt (void *);
161 struct expr_elt_hasher
163 typedef expr_hash_elt *value_type;
164 typedef expr_hash_elt *compare_type;
165 typedef int store_values_directly;
166 static inline hashval_t hash (const value_type &);
167 static inline bool equal (const value_type &, const compare_type &);
168 static inline void remove (value_type &);
171 inline hashval_t
172 expr_elt_hasher::hash (const value_type &p)
174 return p->hash;
177 inline bool
178 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
180 gimple stmt1 = p1->stmt;
181 const struct hashable_expr *expr1 = &p1->expr;
182 const struct expr_hash_elt *stamp1 = p1->stamp;
183 gimple stmt2 = p2->stmt;
184 const struct hashable_expr *expr2 = &p2->expr;
185 const struct expr_hash_elt *stamp2 = p2->stamp;
187 /* This case should apply only when removing entries from the table. */
188 if (stamp1 == stamp2)
189 return true;
191 /* FIXME tuples:
192 We add stmts to a hash table and them modify them. To detect the case
193 that we modify a stmt and then search for it, we assume that the hash
194 is always modified by that change.
195 We have to fully check why this doesn't happen on trunk or rewrite
196 this in a more reliable (and easier to understand) way. */
197 if (((const struct expr_hash_elt *)p1)->hash
198 != ((const struct expr_hash_elt *)p2)->hash)
199 return false;
201 /* In case of a collision, both RHS have to be identical and have the
202 same VUSE operands. */
203 if (hashable_expr_equal_p (expr1, expr2)
204 && types_compatible_p (expr1->type, expr2->type))
206 /* Note that STMT1 and/or STMT2 may be NULL. */
207 return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
208 == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
211 return false;
214 /* Delete an expr_hash_elt and reclaim its storage. */
216 inline void
217 expr_elt_hasher::remove (value_type &element)
219 free_expr_hash_elt (element);
222 /* Hash table with expressions made available during the renaming process.
223 When an assignment of the form X_i = EXPR is found, the statement is
224 stored in this table. If the same expression EXPR is later found on the
225 RHS of another statement, it is replaced with X_i (thus performing
226 global redundancy elimination). Similarly as we pass through conditionals
227 we record the conditional itself as having either a true or false value
228 in this table. */
229 static hash_table<expr_elt_hasher> *avail_exprs;
231 /* Stack of dest,src pairs that need to be restored during finalization.
233 A NULL entry is used to mark the end of pairs which need to be
234 restored during finalization of this block. */
235 static vec<tree> const_and_copies_stack;
237 /* Track whether or not we have changed the control flow graph. */
238 static bool cfg_altered;
240 /* Bitmap of blocks that have had EH statements cleaned. We should
241 remove their dead edges eventually. */
242 static bitmap need_eh_cleanup;
244 /* Statistics for dominator optimizations. */
245 struct opt_stats_d
247 long num_stmts;
248 long num_exprs_considered;
249 long num_re;
250 long num_const_prop;
251 long num_copy_prop;
254 static struct opt_stats_d opt_stats;
256 /* Local functions. */
257 static void optimize_stmt (basic_block, gimple_stmt_iterator);
258 static tree lookup_avail_expr (gimple, bool);
259 static hashval_t avail_expr_hash (const void *);
260 static void htab_statistics (FILE *,
261 const hash_table<expr_elt_hasher> &);
262 static void record_cond (cond_equivalence *);
263 static void record_const_or_copy (tree, tree);
264 static void record_equality (tree, tree);
265 static void record_equivalences_from_phis (basic_block);
266 static void record_equivalences_from_incoming_edge (basic_block);
267 static void eliminate_redundant_computations (gimple_stmt_iterator *);
268 static void record_equivalences_from_stmt (gimple, int);
269 static void remove_local_expressions_from_table (void);
270 static void restore_vars_to_original_value (void);
271 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
274 /* Given a statement STMT, initialize the hash table element pointed to
275 by ELEMENT. */
277 static void
278 initialize_hash_element (gimple stmt, tree lhs,
279 struct expr_hash_elt *element)
281 enum gimple_code code = gimple_code (stmt);
282 struct hashable_expr *expr = &element->expr;
284 if (code == GIMPLE_ASSIGN)
286 enum tree_code subcode = gimple_assign_rhs_code (stmt);
288 switch (get_gimple_rhs_class (subcode))
290 case GIMPLE_SINGLE_RHS:
291 expr->kind = EXPR_SINGLE;
292 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
293 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
294 break;
295 case GIMPLE_UNARY_RHS:
296 expr->kind = EXPR_UNARY;
297 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
298 expr->ops.unary.op = subcode;
299 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
300 break;
301 case GIMPLE_BINARY_RHS:
302 expr->kind = EXPR_BINARY;
303 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
304 expr->ops.binary.op = subcode;
305 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
306 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
307 break;
308 case GIMPLE_TERNARY_RHS:
309 expr->kind = EXPR_TERNARY;
310 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
311 expr->ops.ternary.op = subcode;
312 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
313 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
314 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
315 break;
316 default:
317 gcc_unreachable ();
320 else if (code == GIMPLE_COND)
322 expr->type = boolean_type_node;
323 expr->kind = EXPR_BINARY;
324 expr->ops.binary.op = gimple_cond_code (stmt);
325 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
326 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
328 else if (code == GIMPLE_CALL)
330 size_t nargs = gimple_call_num_args (stmt);
331 size_t i;
333 gcc_assert (gimple_call_lhs (stmt));
335 expr->type = TREE_TYPE (gimple_call_lhs (stmt));
336 expr->kind = EXPR_CALL;
337 expr->ops.call.fn_from = stmt;
339 if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
340 expr->ops.call.pure = true;
341 else
342 expr->ops.call.pure = false;
344 expr->ops.call.nargs = nargs;
345 expr->ops.call.args = XCNEWVEC (tree, nargs);
346 for (i = 0; i < nargs; i++)
347 expr->ops.call.args[i] = gimple_call_arg (stmt, i);
349 else if (code == GIMPLE_SWITCH)
351 expr->type = TREE_TYPE (gimple_switch_index (stmt));
352 expr->kind = EXPR_SINGLE;
353 expr->ops.single.rhs = gimple_switch_index (stmt);
355 else if (code == GIMPLE_GOTO)
357 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
358 expr->kind = EXPR_SINGLE;
359 expr->ops.single.rhs = gimple_goto_dest (stmt);
361 else if (code == GIMPLE_PHI)
363 size_t nargs = gimple_phi_num_args (stmt);
364 size_t i;
366 expr->type = TREE_TYPE (gimple_phi_result (stmt));
367 expr->kind = EXPR_PHI;
368 expr->ops.phi.nargs = nargs;
369 expr->ops.phi.args = XCNEWVEC (tree, nargs);
371 for (i = 0; i < nargs; i++)
372 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
374 else
375 gcc_unreachable ();
377 element->lhs = lhs;
378 element->stmt = stmt;
379 element->hash = avail_expr_hash (element);
380 element->stamp = element;
383 /* Given a conditional expression COND as a tree, initialize
384 a hashable_expr expression EXPR. The conditional must be a
385 comparison or logical negation. A constant or a variable is
386 not permitted. */
388 static void
389 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
391 expr->type = boolean_type_node;
393 if (COMPARISON_CLASS_P (cond))
395 expr->kind = EXPR_BINARY;
396 expr->ops.binary.op = TREE_CODE (cond);
397 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
398 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
400 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
402 expr->kind = EXPR_UNARY;
403 expr->ops.unary.op = TRUTH_NOT_EXPR;
404 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
406 else
407 gcc_unreachable ();
410 /* Given a hashable_expr expression EXPR and an LHS,
411 initialize the hash table element pointed to by ELEMENT. */
413 static void
414 initialize_hash_element_from_expr (struct hashable_expr *expr,
415 tree lhs,
416 struct expr_hash_elt *element)
418 element->expr = *expr;
419 element->lhs = lhs;
420 element->stmt = NULL;
421 element->hash = avail_expr_hash (element);
422 element->stamp = element;
425 /* Compare two hashable_expr structures for equivalence.
426 They are considered equivalent when the the expressions
427 they denote must necessarily be equal. The logic is intended
428 to follow that of operand_equal_p in fold-const.c */
430 static bool
431 hashable_expr_equal_p (const struct hashable_expr *expr0,
432 const struct hashable_expr *expr1)
434 tree type0 = expr0->type;
435 tree type1 = expr1->type;
437 /* If either type is NULL, there is nothing to check. */
438 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
439 return false;
441 /* If both types don't have the same signedness, precision, and mode,
442 then we can't consider them equal. */
443 if (type0 != type1
444 && (TREE_CODE (type0) == ERROR_MARK
445 || TREE_CODE (type1) == ERROR_MARK
446 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
447 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
448 || TYPE_MODE (type0) != TYPE_MODE (type1)))
449 return false;
451 if (expr0->kind != expr1->kind)
452 return false;
454 switch (expr0->kind)
456 case EXPR_SINGLE:
457 return operand_equal_p (expr0->ops.single.rhs,
458 expr1->ops.single.rhs, 0);
460 case EXPR_UNARY:
461 if (expr0->ops.unary.op != expr1->ops.unary.op)
462 return false;
464 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
465 || expr0->ops.unary.op == NON_LVALUE_EXPR)
466 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
467 return false;
469 return operand_equal_p (expr0->ops.unary.opnd,
470 expr1->ops.unary.opnd, 0);
472 case EXPR_BINARY:
473 if (expr0->ops.binary.op != expr1->ops.binary.op)
474 return false;
476 if (operand_equal_p (expr0->ops.binary.opnd0,
477 expr1->ops.binary.opnd0, 0)
478 && operand_equal_p (expr0->ops.binary.opnd1,
479 expr1->ops.binary.opnd1, 0))
480 return true;
482 /* For commutative ops, allow the other order. */
483 return (commutative_tree_code (expr0->ops.binary.op)
484 && operand_equal_p (expr0->ops.binary.opnd0,
485 expr1->ops.binary.opnd1, 0)
486 && operand_equal_p (expr0->ops.binary.opnd1,
487 expr1->ops.binary.opnd0, 0));
489 case EXPR_TERNARY:
490 if (expr0->ops.ternary.op != expr1->ops.ternary.op
491 || !operand_equal_p (expr0->ops.ternary.opnd2,
492 expr1->ops.ternary.opnd2, 0))
493 return false;
495 if (operand_equal_p (expr0->ops.ternary.opnd0,
496 expr1->ops.ternary.opnd0, 0)
497 && operand_equal_p (expr0->ops.ternary.opnd1,
498 expr1->ops.ternary.opnd1, 0))
499 return true;
501 /* For commutative ops, allow the other order. */
502 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
503 && operand_equal_p (expr0->ops.ternary.opnd0,
504 expr1->ops.ternary.opnd1, 0)
505 && operand_equal_p (expr0->ops.ternary.opnd1,
506 expr1->ops.ternary.opnd0, 0));
508 case EXPR_CALL:
510 size_t i;
512 /* If the calls are to different functions, then they
513 clearly cannot be equal. */
514 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
515 expr1->ops.call.fn_from))
516 return false;
518 if (! expr0->ops.call.pure)
519 return false;
521 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
522 return false;
524 for (i = 0; i < expr0->ops.call.nargs; i++)
525 if (! operand_equal_p (expr0->ops.call.args[i],
526 expr1->ops.call.args[i], 0))
527 return false;
529 if (stmt_could_throw_p (expr0->ops.call.fn_from))
531 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
532 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
533 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
534 return false;
537 return true;
540 case EXPR_PHI:
542 size_t i;
544 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
545 return false;
547 for (i = 0; i < expr0->ops.phi.nargs; i++)
548 if (! operand_equal_p (expr0->ops.phi.args[i],
549 expr1->ops.phi.args[i], 0))
550 return false;
552 return true;
555 default:
556 gcc_unreachable ();
560 /* Generate a hash value for a pair of expressions. This can be used
561 iteratively by passing a previous result in HSTATE.
563 The same hash value is always returned for a given pair of expressions,
564 regardless of the order in which they are presented. This is useful in
565 hashing the operands of commutative functions. */
567 namespace inchash
570 static void
571 add_expr_commutative (const_tree t1, const_tree t2, hash &hstate)
573 hash one, two;
575 inchash::add_expr (t1, one);
576 inchash::add_expr (t2, two);
577 hstate.add_commutative (one, two);
580 /* Compute a hash value for a hashable_expr value EXPR and a
581 previously accumulated hash value VAL. If two hashable_expr
582 values compare equal with hashable_expr_equal_p, they must
583 hash to the same value, given an identical value of VAL.
584 The logic is intended to follow inchash::add_expr in tree.c. */
586 static void
587 add_hashable_expr (const struct hashable_expr *expr, hash &hstate)
589 switch (expr->kind)
591 case EXPR_SINGLE:
592 inchash::add_expr (expr->ops.single.rhs, hstate);
593 break;
595 case EXPR_UNARY:
596 hstate.add_object (expr->ops.unary.op);
598 /* Make sure to include signedness in the hash computation.
599 Don't hash the type, that can lead to having nodes which
600 compare equal according to operand_equal_p, but which
601 have different hash codes. */
602 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
603 || expr->ops.unary.op == NON_LVALUE_EXPR)
604 hstate.add_int (TYPE_UNSIGNED (expr->type));
606 inchash::add_expr (expr->ops.unary.opnd, hstate);
607 break;
609 case EXPR_BINARY:
610 hstate.add_object (expr->ops.binary.op);
611 if (commutative_tree_code (expr->ops.binary.op))
612 inchash::add_expr_commutative (expr->ops.binary.opnd0,
613 expr->ops.binary.opnd1, hstate);
614 else
616 inchash::add_expr (expr->ops.binary.opnd0, hstate);
617 inchash::add_expr (expr->ops.binary.opnd1, hstate);
619 break;
621 case EXPR_TERNARY:
622 hstate.add_object (expr->ops.ternary.op);
623 if (commutative_ternary_tree_code (expr->ops.ternary.op))
624 inchash::add_expr_commutative (expr->ops.ternary.opnd0,
625 expr->ops.ternary.opnd1, hstate);
626 else
628 inchash::add_expr (expr->ops.ternary.opnd0, hstate);
629 inchash::add_expr (expr->ops.ternary.opnd1, hstate);
631 inchash::add_expr (expr->ops.ternary.opnd2, hstate);
632 break;
634 case EXPR_CALL:
636 size_t i;
637 enum tree_code code = CALL_EXPR;
638 gimple fn_from;
640 hstate.add_object (code);
641 fn_from = expr->ops.call.fn_from;
642 if (gimple_call_internal_p (fn_from))
643 hstate.merge_hash ((hashval_t) gimple_call_internal_fn (fn_from));
644 else
645 inchash::add_expr (gimple_call_fn (fn_from), hstate);
646 for (i = 0; i < expr->ops.call.nargs; i++)
647 inchash::add_expr (expr->ops.call.args[i], hstate);
649 break;
651 case EXPR_PHI:
653 size_t i;
655 for (i = 0; i < expr->ops.phi.nargs; i++)
656 inchash::add_expr (expr->ops.phi.args[i], hstate);
658 break;
660 default:
661 gcc_unreachable ();
667 /* Print a diagnostic dump of an expression hash table entry. */
669 static void
670 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
672 if (element->stmt)
673 fprintf (stream, "STMT ");
674 else
675 fprintf (stream, "COND ");
677 if (element->lhs)
679 print_generic_expr (stream, element->lhs, 0);
680 fprintf (stream, " = ");
683 switch (element->expr.kind)
685 case EXPR_SINGLE:
686 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
687 break;
689 case EXPR_UNARY:
690 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
691 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
692 break;
694 case EXPR_BINARY:
695 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
696 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
697 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
698 break;
700 case EXPR_TERNARY:
701 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
702 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
703 fputs (", ", stream);
704 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
705 fputs (", ", stream);
706 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
707 fputs (">", stream);
708 break;
710 case EXPR_CALL:
712 size_t i;
713 size_t nargs = element->expr.ops.call.nargs;
714 gimple fn_from;
716 fn_from = element->expr.ops.call.fn_from;
717 if (gimple_call_internal_p (fn_from))
718 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
719 stream);
720 else
721 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
722 fprintf (stream, " (");
723 for (i = 0; i < nargs; i++)
725 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
726 if (i + 1 < nargs)
727 fprintf (stream, ", ");
729 fprintf (stream, ")");
731 break;
733 case EXPR_PHI:
735 size_t i;
736 size_t nargs = element->expr.ops.phi.nargs;
738 fprintf (stream, "PHI <");
739 for (i = 0; i < nargs; i++)
741 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
742 if (i + 1 < nargs)
743 fprintf (stream, ", ");
745 fprintf (stream, ">");
747 break;
749 fprintf (stream, "\n");
751 if (element->stmt)
753 fprintf (stream, " ");
754 print_gimple_stmt (stream, element->stmt, 0, 0);
758 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
760 static void
761 free_expr_hash_elt_contents (struct expr_hash_elt *element)
763 if (element->expr.kind == EXPR_CALL)
764 free (element->expr.ops.call.args);
765 else if (element->expr.kind == EXPR_PHI)
766 free (element->expr.ops.phi.args);
769 /* Delete an expr_hash_elt and reclaim its storage. */
771 static void
772 free_expr_hash_elt (void *elt)
774 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
775 free_expr_hash_elt_contents (element);
776 free (element);
779 /* Allocate an EDGE_INFO for edge E and attach it to E.
780 Return the new EDGE_INFO structure. */
782 static struct edge_info *
783 allocate_edge_info (edge e)
785 struct edge_info *edge_info;
787 edge_info = XCNEW (struct edge_info);
789 e->aux = edge_info;
790 return edge_info;
793 /* Free all EDGE_INFO structures associated with edges in the CFG.
794 If a particular edge can be threaded, copy the redirection
795 target from the EDGE_INFO structure into the edge's AUX field
796 as required by code to update the CFG and SSA graph for
797 jump threading. */
799 static void
800 free_all_edge_infos (void)
802 basic_block bb;
803 edge_iterator ei;
804 edge e;
806 FOR_EACH_BB_FN (bb, cfun)
808 FOR_EACH_EDGE (e, ei, bb->preds)
810 struct edge_info *edge_info = (struct edge_info *) e->aux;
812 if (edge_info)
814 edge_info->cond_equivalences.release ();
815 free (edge_info);
816 e->aux = NULL;
822 class dom_opt_dom_walker : public dom_walker
824 public:
825 dom_opt_dom_walker (cdi_direction direction)
826 : dom_walker (direction), m_dummy_cond (NULL) {}
828 virtual void before_dom_children (basic_block);
829 virtual void after_dom_children (basic_block);
831 private:
832 void thread_across_edge (edge);
834 gimple m_dummy_cond;
837 /* Jump threading, redundancy elimination and const/copy propagation.
839 This pass may expose new symbols that need to be renamed into SSA. For
840 every new symbol exposed, its corresponding bit will be set in
841 VARS_TO_RENAME. */
843 namespace {
845 const pass_data pass_data_dominator =
847 GIMPLE_PASS, /* type */
848 "dom", /* name */
849 OPTGROUP_NONE, /* optinfo_flags */
850 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
851 ( PROP_cfg | PROP_ssa ), /* properties_required */
852 0, /* properties_provided */
853 0, /* properties_destroyed */
854 0, /* todo_flags_start */
855 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
858 class pass_dominator : public gimple_opt_pass
860 public:
861 pass_dominator (gcc::context *ctxt)
862 : gimple_opt_pass (pass_data_dominator, ctxt)
865 /* opt_pass methods: */
866 opt_pass * clone () { return new pass_dominator (m_ctxt); }
867 virtual bool gate (function *) { return flag_tree_dom != 0; }
868 virtual unsigned int execute (function *);
870 }; // class pass_dominator
872 unsigned int
873 pass_dominator::execute (function *fun)
875 memset (&opt_stats, 0, sizeof (opt_stats));
877 /* Create our hash tables. */
878 avail_exprs = new hash_table<expr_elt_hasher> (1024);
879 avail_exprs_stack.create (20);
880 const_and_copies_stack.create (20);
881 need_eh_cleanup = BITMAP_ALLOC (NULL);
883 calculate_dominance_info (CDI_DOMINATORS);
884 cfg_altered = false;
886 /* We need to know loop structures in order to avoid destroying them
887 in jump threading. Note that we still can e.g. thread through loop
888 headers to an exit edge, or through loop header to the loop body, assuming
889 that we update the loop info.
891 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
892 to several overly conservative bail-outs in jump threading, case
893 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
894 missing. We should improve jump threading in future then
895 LOOPS_HAVE_PREHEADERS won't be needed here. */
896 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
898 /* Initialize the value-handle array. */
899 threadedge_initialize_values ();
901 /* We need accurate information regarding back edges in the CFG
902 for jump threading; this may include back edges that are not part of
903 a single loop. */
904 mark_dfs_back_edges ();
906 /* Recursively walk the dominator tree optimizing statements. */
907 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
910 gimple_stmt_iterator gsi;
911 basic_block bb;
912 FOR_EACH_BB_FN (bb, fun)
914 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
915 update_stmt_if_modified (gsi_stmt (gsi));
919 /* If we exposed any new variables, go ahead and put them into
920 SSA form now, before we handle jump threading. This simplifies
921 interactions between rewriting of _DECL nodes into SSA form
922 and rewriting SSA_NAME nodes into SSA form after block
923 duplication and CFG manipulation. */
924 update_ssa (TODO_update_ssa);
926 free_all_edge_infos ();
928 /* Thread jumps, creating duplicate blocks as needed. */
929 cfg_altered |= thread_through_all_blocks (first_pass_instance);
931 if (cfg_altered)
932 free_dominance_info (CDI_DOMINATORS);
934 /* Removal of statements may make some EH edges dead. Purge
935 such edges from the CFG as needed. */
936 if (!bitmap_empty_p (need_eh_cleanup))
938 unsigned i;
939 bitmap_iterator bi;
941 /* Jump threading may have created forwarder blocks from blocks
942 needing EH cleanup; the new successor of these blocks, which
943 has inherited from the original block, needs the cleanup.
944 Don't clear bits in the bitmap, as that can break the bitmap
945 iterator. */
946 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
948 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
949 if (bb == NULL)
950 continue;
951 while (single_succ_p (bb)
952 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
953 bb = single_succ (bb);
954 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
955 continue;
956 if ((unsigned) bb->index != i)
957 bitmap_set_bit (need_eh_cleanup, bb->index);
960 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
961 bitmap_clear (need_eh_cleanup);
964 statistics_counter_event (fun, "Redundant expressions eliminated",
965 opt_stats.num_re);
966 statistics_counter_event (fun, "Constants propagated",
967 opt_stats.num_const_prop);
968 statistics_counter_event (fun, "Copies propagated",
969 opt_stats.num_copy_prop);
971 /* Debugging dumps. */
972 if (dump_file && (dump_flags & TDF_STATS))
973 dump_dominator_optimization_stats (dump_file);
975 loop_optimizer_finalize ();
977 /* Delete our main hashtable. */
978 delete avail_exprs;
979 avail_exprs = NULL;
981 /* Free asserted bitmaps and stacks. */
982 BITMAP_FREE (need_eh_cleanup);
984 avail_exprs_stack.release ();
985 const_and_copies_stack.release ();
987 /* Free the value-handle array. */
988 threadedge_finalize_values ();
990 return 0;
993 } // anon namespace
995 gimple_opt_pass *
996 make_pass_dominator (gcc::context *ctxt)
998 return new pass_dominator (ctxt);
1002 /* Given a conditional statement CONDSTMT, convert the
1003 condition to a canonical form. */
1005 static void
1006 canonicalize_comparison (gimple condstmt)
1008 tree op0;
1009 tree op1;
1010 enum tree_code code;
1012 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1014 op0 = gimple_cond_lhs (condstmt);
1015 op1 = gimple_cond_rhs (condstmt);
1017 code = gimple_cond_code (condstmt);
1019 /* If it would be profitable to swap the operands, then do so to
1020 canonicalize the statement, enabling better optimization.
1022 By placing canonicalization of such expressions here we
1023 transparently keep statements in canonical form, even
1024 when the statement is modified. */
1025 if (tree_swap_operands_p (op0, op1, false))
1027 /* For relationals we need to swap the operands
1028 and change the code. */
1029 if (code == LT_EXPR
1030 || code == GT_EXPR
1031 || code == LE_EXPR
1032 || code == GE_EXPR)
1034 code = swap_tree_comparison (code);
1036 gimple_cond_set_code (condstmt, code);
1037 gimple_cond_set_lhs (condstmt, op1);
1038 gimple_cond_set_rhs (condstmt, op0);
1040 update_stmt (condstmt);
1045 /* Initialize local stacks for this optimizer and record equivalences
1046 upon entry to BB. Equivalences can come from the edge traversed to
1047 reach BB or they may come from PHI nodes at the start of BB. */
1049 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1050 LIMIT entries left in LOCALs. */
1052 static void
1053 remove_local_expressions_from_table (void)
1055 /* Remove all the expressions made available in this block. */
1056 while (avail_exprs_stack.length () > 0)
1058 expr_hash_elt_t victim = avail_exprs_stack.pop ();
1059 expr_hash_elt **slot;
1061 if (victim == NULL)
1062 break;
1064 /* This must precede the actual removal from the hash table,
1065 as ELEMENT and the table entry may share a call argument
1066 vector which will be freed during removal. */
1067 if (dump_file && (dump_flags & TDF_DETAILS))
1069 fprintf (dump_file, "<<<< ");
1070 print_expr_hash_elt (dump_file, victim);
1073 slot = avail_exprs->find_slot (victim, NO_INSERT);
1074 gcc_assert (slot && *slot == victim);
1075 avail_exprs->clear_slot (slot);
1079 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1080 CONST_AND_COPIES to its original state, stopping when we hit a
1081 NULL marker. */
1083 static void
1084 restore_vars_to_original_value (void)
1086 while (const_and_copies_stack.length () > 0)
1088 tree prev_value, dest;
1090 dest = const_and_copies_stack.pop ();
1092 if (dest == NULL)
1093 break;
1095 if (dump_file && (dump_flags & TDF_DETAILS))
1097 fprintf (dump_file, "<<<< COPY ");
1098 print_generic_expr (dump_file, dest, 0);
1099 fprintf (dump_file, " = ");
1100 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1101 fprintf (dump_file, "\n");
1104 prev_value = const_and_copies_stack.pop ();
1105 set_ssa_name_value (dest, prev_value);
1109 /* A trivial wrapper so that we can present the generic jump
1110 threading code with a simple API for simplifying statements. */
1111 static tree
1112 simplify_stmt_for_jump_threading (gimple stmt,
1113 gimple within_stmt ATTRIBUTE_UNUSED)
1115 return lookup_avail_expr (stmt, false);
1118 /* Record into the equivalence tables any equivalences implied by
1119 traversing edge E (which are cached in E->aux).
1121 Callers are responsible for managing the unwinding markers. */
1122 static void
1123 record_temporary_equivalences (edge e)
1125 int i;
1126 struct edge_info *edge_info = (struct edge_info *) e->aux;
1128 /* If we have info associated with this edge, record it into
1129 our equivalence tables. */
1130 if (edge_info)
1132 cond_equivalence *eq;
1133 tree lhs = edge_info->lhs;
1134 tree rhs = edge_info->rhs;
1136 /* If we have a simple NAME = VALUE equivalence, record it. */
1137 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1138 record_const_or_copy (lhs, rhs);
1140 /* If we have 0 = COND or 1 = COND equivalences, record them
1141 into our expression hash tables. */
1142 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1143 record_cond (eq);
1147 /* Wrapper for common code to attempt to thread an edge. For example,
1148 it handles lazily building the dummy condition and the bookkeeping
1149 when jump threading is successful. */
1151 void
1152 dom_opt_dom_walker::thread_across_edge (edge e)
1154 if (! m_dummy_cond)
1155 m_dummy_cond =
1156 gimple_build_cond (NE_EXPR,
1157 integer_zero_node, integer_zero_node,
1158 NULL, NULL);
1160 /* Push a marker on both stacks so we can unwind the tables back to their
1161 current state. */
1162 avail_exprs_stack.safe_push (NULL);
1163 const_and_copies_stack.safe_push (NULL_TREE);
1165 /* Traversing E may result in equivalences we can utilize. */
1166 record_temporary_equivalences (e);
1168 /* With all the edge equivalences in the tables, go ahead and attempt
1169 to thread through E->dest. */
1170 ::thread_across_edge (m_dummy_cond, e, false,
1171 &const_and_copies_stack,
1172 simplify_stmt_for_jump_threading);
1174 /* And restore the various tables to their state before
1175 we threaded this edge.
1177 XXX The code in tree-ssa-threadedge.c will restore the state of
1178 the const_and_copies table. We we just have to restore the expression
1179 table. */
1180 remove_local_expressions_from_table ();
1183 /* PHI nodes can create equivalences too.
1185 Ignoring any alternatives which are the same as the result, if
1186 all the alternatives are equal, then the PHI node creates an
1187 equivalence. */
1189 static void
1190 record_equivalences_from_phis (basic_block bb)
1192 gimple_stmt_iterator gsi;
1194 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1196 gimple phi = gsi_stmt (gsi);
1198 tree lhs = gimple_phi_result (phi);
1199 tree rhs = NULL;
1200 size_t i;
1202 for (i = 0; i < gimple_phi_num_args (phi); i++)
1204 tree t = gimple_phi_arg_def (phi, i);
1206 /* Ignore alternatives which are the same as our LHS. Since
1207 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1208 can simply compare pointers. */
1209 if (lhs == t)
1210 continue;
1212 /* If we have not processed an alternative yet, then set
1213 RHS to this alternative. */
1214 if (rhs == NULL)
1215 rhs = t;
1216 /* If we have processed an alternative (stored in RHS), then
1217 see if it is equal to this one. If it isn't, then stop
1218 the search. */
1219 else if (! operand_equal_for_phi_arg_p (rhs, t))
1220 break;
1223 /* If we had no interesting alternatives, then all the RHS alternatives
1224 must have been the same as LHS. */
1225 if (!rhs)
1226 rhs = lhs;
1228 /* If we managed to iterate through each PHI alternative without
1229 breaking out of the loop, then we have a PHI which may create
1230 a useful equivalence. We do not need to record unwind data for
1231 this, since this is a true assignment and not an equivalence
1232 inferred from a comparison. All uses of this ssa name are dominated
1233 by this assignment, so unwinding just costs time and space. */
1234 if (i == gimple_phi_num_args (phi)
1235 && may_propagate_copy (lhs, rhs))
1236 set_ssa_name_value (lhs, rhs);
1240 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1241 return that edge. Otherwise return NULL. */
1242 static edge
1243 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1245 edge retval = NULL;
1246 edge e;
1247 edge_iterator ei;
1249 FOR_EACH_EDGE (e, ei, bb->preds)
1251 /* A loop back edge can be identified by the destination of
1252 the edge dominating the source of the edge. */
1253 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1254 continue;
1256 /* If we have already seen a non-loop edge, then we must have
1257 multiple incoming non-loop edges and thus we return NULL. */
1258 if (retval)
1259 return NULL;
1261 /* This is the first non-loop incoming edge we have found. Record
1262 it. */
1263 retval = e;
1266 return retval;
1269 /* Record any equivalences created by the incoming edge to BB. If BB
1270 has more than one incoming edge, then no equivalence is created. */
1272 static void
1273 record_equivalences_from_incoming_edge (basic_block bb)
1275 edge e;
1276 basic_block parent;
1277 struct edge_info *edge_info;
1279 /* If our parent block ended with a control statement, then we may be
1280 able to record some equivalences based on which outgoing edge from
1281 the parent was followed. */
1282 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1284 e = single_incoming_edge_ignoring_loop_edges (bb);
1286 /* If we had a single incoming edge from our parent block, then enter
1287 any data associated with the edge into our tables. */
1288 if (e && e->src == parent)
1290 unsigned int i;
1292 edge_info = (struct edge_info *) e->aux;
1294 if (edge_info)
1296 tree lhs = edge_info->lhs;
1297 tree rhs = edge_info->rhs;
1298 cond_equivalence *eq;
1300 if (lhs)
1301 record_equality (lhs, rhs);
1303 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1304 set via a widening type conversion, then we may be able to record
1305 additional equivalences. */
1306 if (lhs
1307 && TREE_CODE (lhs) == SSA_NAME
1308 && is_gimple_constant (rhs)
1309 && TREE_CODE (rhs) == INTEGER_CST)
1311 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1313 if (defstmt
1314 && is_gimple_assign (defstmt)
1315 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1317 tree old_rhs = gimple_assign_rhs1 (defstmt);
1319 /* If the conversion widens the original value and
1320 the constant is in the range of the type of OLD_RHS,
1321 then convert the constant and record the equivalence.
1323 Note that int_fits_type_p does not check the precision
1324 if the upper and lower bounds are OK. */
1325 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1326 && (TYPE_PRECISION (TREE_TYPE (lhs))
1327 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1328 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1330 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1331 record_equality (old_rhs, newval);
1336 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1337 record_cond (eq);
1342 /* Dump SSA statistics on FILE. */
1344 void
1345 dump_dominator_optimization_stats (FILE *file)
1347 fprintf (file, "Total number of statements: %6ld\n\n",
1348 opt_stats.num_stmts);
1349 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1350 opt_stats.num_exprs_considered);
1352 fprintf (file, "\nHash table statistics:\n");
1354 fprintf (file, " avail_exprs: ");
1355 htab_statistics (file, *avail_exprs);
1359 /* Dump SSA statistics on stderr. */
1361 DEBUG_FUNCTION void
1362 debug_dominator_optimization_stats (void)
1364 dump_dominator_optimization_stats (stderr);
1368 /* Dump statistics for the hash table HTAB. */
1370 static void
1371 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1373 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1374 (long) htab.size (),
1375 (long) htab.elements (),
1376 htab.collisions ());
1380 /* Enter condition equivalence into the expression hash table.
1381 This indicates that a conditional expression has a known
1382 boolean value. */
1384 static void
1385 record_cond (cond_equivalence *p)
1387 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1388 expr_hash_elt **slot;
1390 initialize_hash_element_from_expr (&p->cond, p->value, element);
1392 slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1393 if (*slot == NULL)
1395 *slot = element;
1397 if (dump_file && (dump_flags & TDF_DETAILS))
1399 fprintf (dump_file, "1>>> ");
1400 print_expr_hash_elt (dump_file, element);
1403 avail_exprs_stack.safe_push (element);
1405 else
1406 free_expr_hash_elt (element);
1409 /* Build a cond_equivalence record indicating that the comparison
1410 CODE holds between operands OP0 and OP1 and push it to **P. */
1412 static void
1413 build_and_record_new_cond (enum tree_code code,
1414 tree op0, tree op1,
1415 vec<cond_equivalence> *p)
1417 cond_equivalence c;
1418 struct hashable_expr *cond = &c.cond;
1420 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1422 cond->type = boolean_type_node;
1423 cond->kind = EXPR_BINARY;
1424 cond->ops.binary.op = code;
1425 cond->ops.binary.opnd0 = op0;
1426 cond->ops.binary.opnd1 = op1;
1428 c.value = boolean_true_node;
1429 p->safe_push (c);
1432 /* Record that COND is true and INVERTED is false into the edge information
1433 structure. Also record that any conditions dominated by COND are true
1434 as well.
1436 For example, if a < b is true, then a <= b must also be true. */
1438 static void
1439 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1441 tree op0, op1;
1442 cond_equivalence c;
1444 if (!COMPARISON_CLASS_P (cond))
1445 return;
1447 op0 = TREE_OPERAND (cond, 0);
1448 op1 = TREE_OPERAND (cond, 1);
1450 switch (TREE_CODE (cond))
1452 case LT_EXPR:
1453 case GT_EXPR:
1454 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1456 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1457 &edge_info->cond_equivalences);
1458 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1459 &edge_info->cond_equivalences);
1462 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1463 ? LE_EXPR : GE_EXPR),
1464 op0, op1, &edge_info->cond_equivalences);
1465 build_and_record_new_cond (NE_EXPR, op0, op1,
1466 &edge_info->cond_equivalences);
1467 break;
1469 case GE_EXPR:
1470 case LE_EXPR:
1471 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1473 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1474 &edge_info->cond_equivalences);
1476 break;
1478 case EQ_EXPR:
1479 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1481 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1482 &edge_info->cond_equivalences);
1484 build_and_record_new_cond (LE_EXPR, op0, op1,
1485 &edge_info->cond_equivalences);
1486 build_and_record_new_cond (GE_EXPR, op0, op1,
1487 &edge_info->cond_equivalences);
1488 break;
1490 case UNORDERED_EXPR:
1491 build_and_record_new_cond (NE_EXPR, op0, op1,
1492 &edge_info->cond_equivalences);
1493 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1494 &edge_info->cond_equivalences);
1495 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1496 &edge_info->cond_equivalences);
1497 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1498 &edge_info->cond_equivalences);
1499 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1500 &edge_info->cond_equivalences);
1501 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1502 &edge_info->cond_equivalences);
1503 break;
1505 case UNLT_EXPR:
1506 case UNGT_EXPR:
1507 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1508 ? UNLE_EXPR : UNGE_EXPR),
1509 op0, op1, &edge_info->cond_equivalences);
1510 build_and_record_new_cond (NE_EXPR, op0, op1,
1511 &edge_info->cond_equivalences);
1512 break;
1514 case UNEQ_EXPR:
1515 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1516 &edge_info->cond_equivalences);
1517 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1518 &edge_info->cond_equivalences);
1519 break;
1521 case LTGT_EXPR:
1522 build_and_record_new_cond (NE_EXPR, op0, op1,
1523 &edge_info->cond_equivalences);
1524 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1525 &edge_info->cond_equivalences);
1526 break;
1528 default:
1529 break;
1532 /* Now store the original true and false conditions into the first
1533 two slots. */
1534 initialize_expr_from_cond (cond, &c.cond);
1535 c.value = boolean_true_node;
1536 edge_info->cond_equivalences.safe_push (c);
1538 /* It is possible for INVERTED to be the negation of a comparison,
1539 and not a valid RHS or GIMPLE_COND condition. This happens because
1540 invert_truthvalue may return such an expression when asked to invert
1541 a floating-point comparison. These comparisons are not assumed to
1542 obey the trichotomy law. */
1543 initialize_expr_from_cond (inverted, &c.cond);
1544 c.value = boolean_false_node;
1545 edge_info->cond_equivalences.safe_push (c);
1548 /* A helper function for record_const_or_copy and record_equality.
1549 Do the work of recording the value and undo info. */
1551 static void
1552 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1554 set_ssa_name_value (x, y);
1556 if (dump_file && (dump_flags & TDF_DETAILS))
1558 fprintf (dump_file, "0>>> COPY ");
1559 print_generic_expr (dump_file, x, 0);
1560 fprintf (dump_file, " = ");
1561 print_generic_expr (dump_file, y, 0);
1562 fprintf (dump_file, "\n");
1565 const_and_copies_stack.reserve (2);
1566 const_and_copies_stack.quick_push (prev_x);
1567 const_and_copies_stack.quick_push (x);
1570 /* Record that X is equal to Y in const_and_copies. Record undo
1571 information in the block-local vector. */
1573 static void
1574 record_const_or_copy (tree x, tree y)
1576 tree prev_x = SSA_NAME_VALUE (x);
1578 gcc_assert (TREE_CODE (x) == SSA_NAME);
1580 if (TREE_CODE (y) == SSA_NAME)
1582 tree tmp = SSA_NAME_VALUE (y);
1583 if (tmp)
1584 y = tmp;
1587 record_const_or_copy_1 (x, y, prev_x);
1590 /* Return the loop depth of the basic block of the defining statement of X.
1591 This number should not be treated as absolutely correct because the loop
1592 information may not be completely up-to-date when dom runs. However, it
1593 will be relatively correct, and as more passes are taught to keep loop info
1594 up to date, the result will become more and more accurate. */
1596 static int
1597 loop_depth_of_name (tree x)
1599 gimple defstmt;
1600 basic_block defbb;
1602 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1603 if (TREE_CODE (x) != SSA_NAME)
1604 return 0;
1606 /* Otherwise return the loop depth of the defining statement's bb.
1607 Note that there may not actually be a bb for this statement, if the
1608 ssa_name is live on entry. */
1609 defstmt = SSA_NAME_DEF_STMT (x);
1610 defbb = gimple_bb (defstmt);
1611 if (!defbb)
1612 return 0;
1614 return bb_loop_depth (defbb);
1617 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1618 This constrains the cases in which we may treat this as assignment. */
1620 static void
1621 record_equality (tree x, tree y)
1623 tree prev_x = NULL, prev_y = NULL;
1625 if (TREE_CODE (x) == SSA_NAME)
1626 prev_x = SSA_NAME_VALUE (x);
1627 if (TREE_CODE (y) == SSA_NAME)
1628 prev_y = SSA_NAME_VALUE (y);
1630 /* If one of the previous values is invariant, or invariant in more loops
1631 (by depth), then use that.
1632 Otherwise it doesn't matter which value we choose, just so
1633 long as we canonicalize on one value. */
1634 if (is_gimple_min_invariant (y))
1636 else if (is_gimple_min_invariant (x)
1637 /* ??? When threading over backedges the following is important
1638 for correctness. See PR61757. */
1639 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1640 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1641 else if (prev_x && is_gimple_min_invariant (prev_x))
1642 x = y, y = prev_x, prev_x = prev_y;
1643 else if (prev_y)
1644 y = prev_y;
1646 /* After the swapping, we must have one SSA_NAME. */
1647 if (TREE_CODE (x) != SSA_NAME)
1648 return;
1650 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1651 variable compared against zero. If we're honoring signed zeros,
1652 then we cannot record this value unless we know that the value is
1653 nonzero. */
1654 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1655 && (TREE_CODE (y) != REAL_CST
1656 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1657 return;
1659 record_const_or_copy_1 (x, y, prev_x);
1662 /* Returns true when STMT is a simple iv increment. It detects the
1663 following situation:
1665 i_1 = phi (..., i_2)
1666 i_2 = i_1 +/- ... */
1668 bool
1669 simple_iv_increment_p (gimple stmt)
1671 enum tree_code code;
1672 tree lhs, preinc;
1673 gimple phi;
1674 size_t i;
1676 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1677 return false;
1679 lhs = gimple_assign_lhs (stmt);
1680 if (TREE_CODE (lhs) != SSA_NAME)
1681 return false;
1683 code = gimple_assign_rhs_code (stmt);
1684 if (code != PLUS_EXPR
1685 && code != MINUS_EXPR
1686 && code != POINTER_PLUS_EXPR)
1687 return false;
1689 preinc = gimple_assign_rhs1 (stmt);
1690 if (TREE_CODE (preinc) != SSA_NAME)
1691 return false;
1693 phi = SSA_NAME_DEF_STMT (preinc);
1694 if (gimple_code (phi) != GIMPLE_PHI)
1695 return false;
1697 for (i = 0; i < gimple_phi_num_args (phi); i++)
1698 if (gimple_phi_arg_def (phi, i) == lhs)
1699 return true;
1701 return false;
1704 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1705 known value for that SSA_NAME (or NULL if no value is known).
1707 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1708 successors of BB. */
1710 static void
1711 cprop_into_successor_phis (basic_block bb)
1713 edge e;
1714 edge_iterator ei;
1716 FOR_EACH_EDGE (e, ei, bb->succs)
1718 int indx;
1719 gimple_stmt_iterator gsi;
1721 /* If this is an abnormal edge, then we do not want to copy propagate
1722 into the PHI alternative associated with this edge. */
1723 if (e->flags & EDGE_ABNORMAL)
1724 continue;
1726 gsi = gsi_start_phis (e->dest);
1727 if (gsi_end_p (gsi))
1728 continue;
1730 /* We may have an equivalence associated with this edge. While
1731 we can not propagate it into non-dominated blocks, we can
1732 propagate them into PHIs in non-dominated blocks. */
1734 /* Push the unwind marker so we can reset the const and copies
1735 table back to its original state after processing this edge. */
1736 const_and_copies_stack.safe_push (NULL_TREE);
1738 /* Extract and record any simple NAME = VALUE equivalences.
1740 Don't bother with [01] = COND equivalences, they're not useful
1741 here. */
1742 struct edge_info *edge_info = (struct edge_info *) e->aux;
1743 if (edge_info)
1745 tree lhs = edge_info->lhs;
1746 tree rhs = edge_info->rhs;
1748 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1749 record_const_or_copy (lhs, rhs);
1752 indx = e->dest_idx;
1753 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1755 tree new_val;
1756 use_operand_p orig_p;
1757 tree orig_val;
1758 gimple phi = gsi_stmt (gsi);
1760 /* The alternative may be associated with a constant, so verify
1761 it is an SSA_NAME before doing anything with it. */
1762 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1763 orig_val = get_use_from_ptr (orig_p);
1764 if (TREE_CODE (orig_val) != SSA_NAME)
1765 continue;
1767 /* If we have *ORIG_P in our constant/copy table, then replace
1768 ORIG_P with its value in our constant/copy table. */
1769 new_val = SSA_NAME_VALUE (orig_val);
1770 if (new_val
1771 && new_val != orig_val
1772 && (TREE_CODE (new_val) == SSA_NAME
1773 || is_gimple_min_invariant (new_val))
1774 && may_propagate_copy (orig_val, new_val))
1775 propagate_value (orig_p, new_val);
1778 restore_vars_to_original_value ();
1782 /* We have finished optimizing BB, record any information implied by
1783 taking a specific outgoing edge from BB. */
1785 static void
1786 record_edge_info (basic_block bb)
1788 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1789 struct edge_info *edge_info;
1791 if (! gsi_end_p (gsi))
1793 gimple stmt = gsi_stmt (gsi);
1794 location_t loc = gimple_location (stmt);
1796 if (gimple_code (stmt) == GIMPLE_SWITCH)
1798 tree index = gimple_switch_index (stmt);
1800 if (TREE_CODE (index) == SSA_NAME)
1802 int i;
1803 int n_labels = gimple_switch_num_labels (stmt);
1804 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
1805 edge e;
1806 edge_iterator ei;
1808 for (i = 0; i < n_labels; i++)
1810 tree label = gimple_switch_label (stmt, i);
1811 basic_block target_bb = label_to_block (CASE_LABEL (label));
1812 if (CASE_HIGH (label)
1813 || !CASE_LOW (label)
1814 || info[target_bb->index])
1815 info[target_bb->index] = error_mark_node;
1816 else
1817 info[target_bb->index] = label;
1820 FOR_EACH_EDGE (e, ei, bb->succs)
1822 basic_block target_bb = e->dest;
1823 tree label = info[target_bb->index];
1825 if (label != NULL && label != error_mark_node)
1827 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1828 CASE_LOW (label));
1829 edge_info = allocate_edge_info (e);
1830 edge_info->lhs = index;
1831 edge_info->rhs = x;
1834 free (info);
1838 /* A COND_EXPR may create equivalences too. */
1839 if (gimple_code (stmt) == GIMPLE_COND)
1841 edge true_edge;
1842 edge false_edge;
1844 tree op0 = gimple_cond_lhs (stmt);
1845 tree op1 = gimple_cond_rhs (stmt);
1846 enum tree_code code = gimple_cond_code (stmt);
1848 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1850 /* Special case comparing booleans against a constant as we
1851 know the value of OP0 on both arms of the branch. i.e., we
1852 can record an equivalence for OP0 rather than COND. */
1853 if ((code == EQ_EXPR || code == NE_EXPR)
1854 && TREE_CODE (op0) == SSA_NAME
1855 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1856 && is_gimple_min_invariant (op1))
1858 if (code == EQ_EXPR)
1860 edge_info = allocate_edge_info (true_edge);
1861 edge_info->lhs = op0;
1862 edge_info->rhs = (integer_zerop (op1)
1863 ? boolean_false_node
1864 : boolean_true_node);
1866 edge_info = allocate_edge_info (false_edge);
1867 edge_info->lhs = op0;
1868 edge_info->rhs = (integer_zerop (op1)
1869 ? boolean_true_node
1870 : boolean_false_node);
1872 else
1874 edge_info = allocate_edge_info (true_edge);
1875 edge_info->lhs = op0;
1876 edge_info->rhs = (integer_zerop (op1)
1877 ? boolean_true_node
1878 : boolean_false_node);
1880 edge_info = allocate_edge_info (false_edge);
1881 edge_info->lhs = op0;
1882 edge_info->rhs = (integer_zerop (op1)
1883 ? boolean_false_node
1884 : boolean_true_node);
1887 else if (is_gimple_min_invariant (op0)
1888 && (TREE_CODE (op1) == SSA_NAME
1889 || is_gimple_min_invariant (op1)))
1891 tree cond = build2 (code, boolean_type_node, op0, op1);
1892 tree inverted = invert_truthvalue_loc (loc, cond);
1893 bool can_infer_simple_equiv
1894 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0)))
1895 && real_zerop (op0));
1896 struct edge_info *edge_info;
1898 edge_info = allocate_edge_info (true_edge);
1899 record_conditions (edge_info, cond, inverted);
1901 if (can_infer_simple_equiv && code == EQ_EXPR)
1903 edge_info->lhs = op1;
1904 edge_info->rhs = op0;
1907 edge_info = allocate_edge_info (false_edge);
1908 record_conditions (edge_info, inverted, cond);
1910 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1912 edge_info->lhs = op1;
1913 edge_info->rhs = op0;
1917 else if (TREE_CODE (op0) == SSA_NAME
1918 && (TREE_CODE (op1) == SSA_NAME
1919 || is_gimple_min_invariant (op1)))
1921 tree cond = build2 (code, boolean_type_node, op0, op1);
1922 tree inverted = invert_truthvalue_loc (loc, cond);
1923 bool can_infer_simple_equiv
1924 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1)))
1925 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1926 struct edge_info *edge_info;
1928 edge_info = allocate_edge_info (true_edge);
1929 record_conditions (edge_info, cond, inverted);
1931 if (can_infer_simple_equiv && code == EQ_EXPR)
1933 edge_info->lhs = op0;
1934 edge_info->rhs = op1;
1937 edge_info = allocate_edge_info (false_edge);
1938 record_conditions (edge_info, inverted, cond);
1940 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1942 edge_info->lhs = op0;
1943 edge_info->rhs = op1;
1948 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1952 void
1953 dom_opt_dom_walker::before_dom_children (basic_block bb)
1955 gimple_stmt_iterator gsi;
1957 if (dump_file && (dump_flags & TDF_DETAILS))
1958 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1960 /* Push a marker on the stacks of local information so that we know how
1961 far to unwind when we finalize this block. */
1962 avail_exprs_stack.safe_push (NULL);
1963 const_and_copies_stack.safe_push (NULL_TREE);
1965 record_equivalences_from_incoming_edge (bb);
1967 /* PHI nodes can create equivalences too. */
1968 record_equivalences_from_phis (bb);
1970 /* Create equivalences from redundant PHIs. PHIs are only truly
1971 redundant when they exist in the same block, so push another
1972 marker and unwind right afterwards. */
1973 avail_exprs_stack.safe_push (NULL);
1974 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1975 eliminate_redundant_computations (&gsi);
1976 remove_local_expressions_from_table ();
1978 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1979 optimize_stmt (bb, gsi);
1981 /* Now prepare to process dominated blocks. */
1982 record_edge_info (bb);
1983 cprop_into_successor_phis (bb);
1986 /* We have finished processing the dominator children of BB, perform
1987 any finalization actions in preparation for leaving this node in
1988 the dominator tree. */
1990 void
1991 dom_opt_dom_walker::after_dom_children (basic_block bb)
1993 gimple last;
1995 /* If we have an outgoing edge to a block with multiple incoming and
1996 outgoing edges, then we may be able to thread the edge, i.e., we
1997 may be able to statically determine which of the outgoing edges
1998 will be traversed when the incoming edge from BB is traversed. */
1999 if (single_succ_p (bb)
2000 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
2001 && potentially_threadable_block (single_succ (bb)))
2003 thread_across_edge (single_succ_edge (bb));
2005 else if ((last = last_stmt (bb))
2006 && gimple_code (last) == GIMPLE_COND
2007 && EDGE_COUNT (bb->succs) == 2
2008 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
2009 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
2011 edge true_edge, false_edge;
2013 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2015 /* Only try to thread the edge if it reaches a target block with
2016 more than one predecessor and more than one successor. */
2017 if (potentially_threadable_block (true_edge->dest))
2018 thread_across_edge (true_edge);
2020 /* Similarly for the ELSE arm. */
2021 if (potentially_threadable_block (false_edge->dest))
2022 thread_across_edge (false_edge);
2026 /* These remove expressions local to BB from the tables. */
2027 remove_local_expressions_from_table ();
2028 restore_vars_to_original_value ();
2031 /* Search for redundant computations in STMT. If any are found, then
2032 replace them with the variable holding the result of the computation.
2034 If safe, record this expression into the available expression hash
2035 table. */
2037 static void
2038 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2040 tree expr_type;
2041 tree cached_lhs;
2042 tree def;
2043 bool insert = true;
2044 bool assigns_var_p = false;
2046 gimple stmt = gsi_stmt (*gsi);
2048 if (gimple_code (stmt) == GIMPLE_PHI)
2049 def = gimple_phi_result (stmt);
2050 else
2051 def = gimple_get_lhs (stmt);
2053 /* Certain expressions on the RHS can be optimized away, but can not
2054 themselves be entered into the hash tables. */
2055 if (! def
2056 || TREE_CODE (def) != SSA_NAME
2057 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2058 || gimple_vdef (stmt)
2059 /* Do not record equivalences for increments of ivs. This would create
2060 overlapping live ranges for a very questionable gain. */
2061 || simple_iv_increment_p (stmt))
2062 insert = false;
2064 /* Check if the expression has been computed before. */
2065 cached_lhs = lookup_avail_expr (stmt, insert);
2067 opt_stats.num_exprs_considered++;
2069 /* Get the type of the expression we are trying to optimize. */
2070 if (is_gimple_assign (stmt))
2072 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2073 assigns_var_p = true;
2075 else if (gimple_code (stmt) == GIMPLE_COND)
2076 expr_type = boolean_type_node;
2077 else if (is_gimple_call (stmt))
2079 gcc_assert (gimple_call_lhs (stmt));
2080 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2081 assigns_var_p = true;
2083 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2084 expr_type = TREE_TYPE (gimple_switch_index (stmt));
2085 else if (gimple_code (stmt) == GIMPLE_PHI)
2086 /* We can't propagate into a phi, so the logic below doesn't apply.
2087 Instead record an equivalence between the cached LHS and the
2088 PHI result of this statement, provided they are in the same block.
2089 This should be sufficient to kill the redundant phi. */
2091 if (def && cached_lhs)
2092 record_const_or_copy (def, cached_lhs);
2093 return;
2095 else
2096 gcc_unreachable ();
2098 if (!cached_lhs)
2099 return;
2101 /* It is safe to ignore types here since we have already done
2102 type checking in the hashing and equality routines. In fact
2103 type checking here merely gets in the way of constant
2104 propagation. Also, make sure that it is safe to propagate
2105 CACHED_LHS into the expression in STMT. */
2106 if ((TREE_CODE (cached_lhs) != SSA_NAME
2107 && (assigns_var_p
2108 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2109 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2111 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2112 || is_gimple_min_invariant (cached_lhs));
2114 if (dump_file && (dump_flags & TDF_DETAILS))
2116 fprintf (dump_file, " Replaced redundant expr '");
2117 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2118 fprintf (dump_file, "' with '");
2119 print_generic_expr (dump_file, cached_lhs, dump_flags);
2120 fprintf (dump_file, "'\n");
2123 opt_stats.num_re++;
2125 if (assigns_var_p
2126 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2127 cached_lhs = fold_convert (expr_type, cached_lhs);
2129 propagate_tree_value_into_stmt (gsi, cached_lhs);
2131 /* Since it is always necessary to mark the result as modified,
2132 perhaps we should move this into propagate_tree_value_into_stmt
2133 itself. */
2134 gimple_set_modified (gsi_stmt (*gsi), true);
2138 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2139 the available expressions table or the const_and_copies table.
2140 Detect and record those equivalences. */
2141 /* We handle only very simple copy equivalences here. The heavy
2142 lifing is done by eliminate_redundant_computations. */
2144 static void
2145 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2147 tree lhs;
2148 enum tree_code lhs_code;
2150 gcc_assert (is_gimple_assign (stmt));
2152 lhs = gimple_assign_lhs (stmt);
2153 lhs_code = TREE_CODE (lhs);
2155 if (lhs_code == SSA_NAME
2156 && gimple_assign_single_p (stmt))
2158 tree rhs = gimple_assign_rhs1 (stmt);
2160 /* If the RHS of the assignment is a constant or another variable that
2161 may be propagated, register it in the CONST_AND_COPIES table. We
2162 do not need to record unwind data for this, since this is a true
2163 assignment and not an equivalence inferred from a comparison. All
2164 uses of this ssa name are dominated by this assignment, so unwinding
2165 just costs time and space. */
2166 if (may_optimize_p
2167 && (TREE_CODE (rhs) == SSA_NAME
2168 || is_gimple_min_invariant (rhs)))
2170 if (dump_file && (dump_flags & TDF_DETAILS))
2172 fprintf (dump_file, "==== ASGN ");
2173 print_generic_expr (dump_file, lhs, 0);
2174 fprintf (dump_file, " = ");
2175 print_generic_expr (dump_file, rhs, 0);
2176 fprintf (dump_file, "\n");
2179 set_ssa_name_value (lhs, rhs);
2183 /* A memory store, even an aliased store, creates a useful
2184 equivalence. By exchanging the LHS and RHS, creating suitable
2185 vops and recording the result in the available expression table,
2186 we may be able to expose more redundant loads. */
2187 if (!gimple_has_volatile_ops (stmt)
2188 && gimple_references_memory_p (stmt)
2189 && gimple_assign_single_p (stmt)
2190 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2191 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2192 && !is_gimple_reg (lhs))
2194 tree rhs = gimple_assign_rhs1 (stmt);
2195 gimple new_stmt;
2197 /* Build a new statement with the RHS and LHS exchanged. */
2198 if (TREE_CODE (rhs) == SSA_NAME)
2200 /* NOTE tuples. The call to gimple_build_assign below replaced
2201 a call to build_gimple_modify_stmt, which did not set the
2202 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2203 may cause an SSA validation failure, as the LHS may be a
2204 default-initialized name and should have no definition. I'm
2205 a bit dubious of this, as the artificial statement that we
2206 generate here may in fact be ill-formed, but it is simply
2207 used as an internal device in this pass, and never becomes
2208 part of the CFG. */
2209 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2210 new_stmt = gimple_build_assign (rhs, lhs);
2211 SSA_NAME_DEF_STMT (rhs) = defstmt;
2213 else
2214 new_stmt = gimple_build_assign (rhs, lhs);
2216 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2218 /* Finally enter the statement into the available expression
2219 table. */
2220 lookup_avail_expr (new_stmt, true);
2224 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2225 CONST_AND_COPIES. */
2227 static void
2228 cprop_operand (gimple stmt, use_operand_p op_p)
2230 tree val;
2231 tree op = USE_FROM_PTR (op_p);
2233 /* If the operand has a known constant value or it is known to be a
2234 copy of some other variable, use the value or copy stored in
2235 CONST_AND_COPIES. */
2236 val = SSA_NAME_VALUE (op);
2237 if (val && val != op)
2239 /* Do not replace hard register operands in asm statements. */
2240 if (gimple_code (stmt) == GIMPLE_ASM
2241 && !may_propagate_copy_into_asm (op))
2242 return;
2244 /* Certain operands are not allowed to be copy propagated due
2245 to their interaction with exception handling and some GCC
2246 extensions. */
2247 if (!may_propagate_copy (op, val))
2248 return;
2250 /* Do not propagate copies into simple IV increment statements.
2251 See PR23821 for how this can disturb IV analysis. */
2252 if (TREE_CODE (val) != INTEGER_CST
2253 && simple_iv_increment_p (stmt))
2254 return;
2256 /* Dump details. */
2257 if (dump_file && (dump_flags & TDF_DETAILS))
2259 fprintf (dump_file, " Replaced '");
2260 print_generic_expr (dump_file, op, dump_flags);
2261 fprintf (dump_file, "' with %s '",
2262 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2263 print_generic_expr (dump_file, val, dump_flags);
2264 fprintf (dump_file, "'\n");
2267 if (TREE_CODE (val) != SSA_NAME)
2268 opt_stats.num_const_prop++;
2269 else
2270 opt_stats.num_copy_prop++;
2272 propagate_value (op_p, val);
2274 /* And note that we modified this statement. This is now
2275 safe, even if we changed virtual operands since we will
2276 rescan the statement and rewrite its operands again. */
2277 gimple_set_modified (stmt, true);
2281 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2282 known value for that SSA_NAME (or NULL if no value is known).
2284 Propagate values from CONST_AND_COPIES into the uses, vuses and
2285 vdef_ops of STMT. */
2287 static void
2288 cprop_into_stmt (gimple stmt)
2290 use_operand_p op_p;
2291 ssa_op_iter iter;
2293 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2294 cprop_operand (stmt, op_p);
2297 /* Optimize the statement pointed to by iterator SI.
2299 We try to perform some simplistic global redundancy elimination and
2300 constant propagation:
2302 1- To detect global redundancy, we keep track of expressions that have
2303 been computed in this block and its dominators. If we find that the
2304 same expression is computed more than once, we eliminate repeated
2305 computations by using the target of the first one.
2307 2- Constant values and copy assignments. This is used to do very
2308 simplistic constant and copy propagation. When a constant or copy
2309 assignment is found, we map the value on the RHS of the assignment to
2310 the variable in the LHS in the CONST_AND_COPIES table. */
2312 static void
2313 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2315 gimple stmt, old_stmt;
2316 bool may_optimize_p;
2317 bool modified_p = false;
2319 old_stmt = stmt = gsi_stmt (si);
2321 if (dump_file && (dump_flags & TDF_DETAILS))
2323 fprintf (dump_file, "Optimizing statement ");
2324 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2327 if (gimple_code (stmt) == GIMPLE_COND)
2328 canonicalize_comparison (stmt);
2330 update_stmt_if_modified (stmt);
2331 opt_stats.num_stmts++;
2333 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2334 cprop_into_stmt (stmt);
2336 /* If the statement has been modified with constant replacements,
2337 fold its RHS before checking for redundant computations. */
2338 if (gimple_modified_p (stmt))
2340 tree rhs = NULL;
2342 /* Try to fold the statement making sure that STMT is kept
2343 up to date. */
2344 if (fold_stmt (&si))
2346 stmt = gsi_stmt (si);
2347 gimple_set_modified (stmt, true);
2349 if (dump_file && (dump_flags & TDF_DETAILS))
2351 fprintf (dump_file, " Folded to: ");
2352 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2356 /* We only need to consider cases that can yield a gimple operand. */
2357 if (gimple_assign_single_p (stmt))
2358 rhs = gimple_assign_rhs1 (stmt);
2359 else if (gimple_code (stmt) == GIMPLE_GOTO)
2360 rhs = gimple_goto_dest (stmt);
2361 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2362 /* This should never be an ADDR_EXPR. */
2363 rhs = gimple_switch_index (stmt);
2365 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2366 recompute_tree_invariant_for_addr_expr (rhs);
2368 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2369 even if fold_stmt updated the stmt already and thus cleared
2370 gimple_modified_p flag on it. */
2371 modified_p = true;
2374 /* Check for redundant computations. Do this optimization only
2375 for assignments that have no volatile ops and conditionals. */
2376 may_optimize_p = (!gimple_has_side_effects (stmt)
2377 && (is_gimple_assign (stmt)
2378 || (is_gimple_call (stmt)
2379 && gimple_call_lhs (stmt) != NULL_TREE)
2380 || gimple_code (stmt) == GIMPLE_COND
2381 || gimple_code (stmt) == GIMPLE_SWITCH));
2383 if (may_optimize_p)
2385 if (gimple_code (stmt) == GIMPLE_CALL)
2387 /* Resolve __builtin_constant_p. If it hasn't been
2388 folded to integer_one_node by now, it's fairly
2389 certain that the value simply isn't constant. */
2390 tree callee = gimple_call_fndecl (stmt);
2391 if (callee
2392 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2393 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2395 propagate_tree_value_into_stmt (&si, integer_zero_node);
2396 stmt = gsi_stmt (si);
2400 update_stmt_if_modified (stmt);
2401 eliminate_redundant_computations (&si);
2402 stmt = gsi_stmt (si);
2404 /* Perform simple redundant store elimination. */
2405 if (gimple_assign_single_p (stmt)
2406 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2408 tree lhs = gimple_assign_lhs (stmt);
2409 tree rhs = gimple_assign_rhs1 (stmt);
2410 tree cached_lhs;
2411 gimple new_stmt;
2412 if (TREE_CODE (rhs) == SSA_NAME)
2414 tree tem = SSA_NAME_VALUE (rhs);
2415 if (tem)
2416 rhs = tem;
2418 /* Build a new statement with the RHS and LHS exchanged. */
2419 if (TREE_CODE (rhs) == SSA_NAME)
2421 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2422 new_stmt = gimple_build_assign (rhs, lhs);
2423 SSA_NAME_DEF_STMT (rhs) = defstmt;
2425 else
2426 new_stmt = gimple_build_assign (rhs, lhs);
2427 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2428 cached_lhs = lookup_avail_expr (new_stmt, false);
2429 if (cached_lhs
2430 && rhs == cached_lhs)
2432 basic_block bb = gimple_bb (stmt);
2433 unlink_stmt_vdef (stmt);
2434 if (gsi_remove (&si, true))
2436 bitmap_set_bit (need_eh_cleanup, bb->index);
2437 if (dump_file && (dump_flags & TDF_DETAILS))
2438 fprintf (dump_file, " Flagged to clear EH edges.\n");
2440 release_defs (stmt);
2441 return;
2446 /* Record any additional equivalences created by this statement. */
2447 if (is_gimple_assign (stmt))
2448 record_equivalences_from_stmt (stmt, may_optimize_p);
2450 /* If STMT is a COND_EXPR and it was modified, then we may know
2451 where it goes. If that is the case, then mark the CFG as altered.
2453 This will cause us to later call remove_unreachable_blocks and
2454 cleanup_tree_cfg when it is safe to do so. It is not safe to
2455 clean things up here since removal of edges and such can trigger
2456 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2457 the manager.
2459 That's all fine and good, except that once SSA_NAMEs are released
2460 to the manager, we must not call create_ssa_name until all references
2461 to released SSA_NAMEs have been eliminated.
2463 All references to the deleted SSA_NAMEs can not be eliminated until
2464 we remove unreachable blocks.
2466 We can not remove unreachable blocks until after we have completed
2467 any queued jump threading.
2469 We can not complete any queued jump threads until we have taken
2470 appropriate variables out of SSA form. Taking variables out of
2471 SSA form can call create_ssa_name and thus we lose.
2473 Ultimately I suspect we're going to need to change the interface
2474 into the SSA_NAME manager. */
2475 if (gimple_modified_p (stmt) || modified_p)
2477 tree val = NULL;
2479 update_stmt_if_modified (stmt);
2481 if (gimple_code (stmt) == GIMPLE_COND)
2482 val = fold_binary_loc (gimple_location (stmt),
2483 gimple_cond_code (stmt), boolean_type_node,
2484 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2485 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2486 val = gimple_switch_index (stmt);
2488 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2489 cfg_altered = true;
2491 /* If we simplified a statement in such a way as to be shown that it
2492 cannot trap, update the eh information and the cfg to match. */
2493 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2495 bitmap_set_bit (need_eh_cleanup, bb->index);
2496 if (dump_file && (dump_flags & TDF_DETAILS))
2497 fprintf (dump_file, " Flagged to clear EH edges.\n");
2502 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2503 If found, return its LHS. Otherwise insert STMT in the table and
2504 return NULL_TREE.
2506 Also, when an expression is first inserted in the table, it is also
2507 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2508 we finish processing this block and its children. */
2510 static tree
2511 lookup_avail_expr (gimple stmt, bool insert)
2513 expr_hash_elt **slot;
2514 tree lhs;
2515 tree temp;
2516 struct expr_hash_elt element;
2518 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2519 if (gimple_code (stmt) == GIMPLE_PHI)
2520 lhs = gimple_phi_result (stmt);
2521 else
2522 lhs = gimple_get_lhs (stmt);
2524 initialize_hash_element (stmt, lhs, &element);
2526 if (dump_file && (dump_flags & TDF_DETAILS))
2528 fprintf (dump_file, "LKUP ");
2529 print_expr_hash_elt (dump_file, &element);
2532 /* Don't bother remembering constant assignments and copy operations.
2533 Constants and copy operations are handled by the constant/copy propagator
2534 in optimize_stmt. */
2535 if (element.expr.kind == EXPR_SINGLE
2536 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2537 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2538 return NULL_TREE;
2540 /* Finally try to find the expression in the main expression hash table. */
2541 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2542 if (slot == NULL)
2544 free_expr_hash_elt_contents (&element);
2545 return NULL_TREE;
2547 else if (*slot == NULL)
2549 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2550 *element2 = element;
2551 element2->stamp = element2;
2552 *slot = element2;
2554 if (dump_file && (dump_flags & TDF_DETAILS))
2556 fprintf (dump_file, "2>>> ");
2557 print_expr_hash_elt (dump_file, element2);
2560 avail_exprs_stack.safe_push (element2);
2561 return NULL_TREE;
2563 else
2564 free_expr_hash_elt_contents (&element);
2566 /* Extract the LHS of the assignment so that it can be used as the current
2567 definition of another variable. */
2568 lhs = ((struct expr_hash_elt *)*slot)->lhs;
2570 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2571 use the value from the const_and_copies table. */
2572 if (TREE_CODE (lhs) == SSA_NAME)
2574 temp = SSA_NAME_VALUE (lhs);
2575 if (temp)
2576 lhs = temp;
2579 if (dump_file && (dump_flags & TDF_DETAILS))
2581 fprintf (dump_file, "FIND: ");
2582 print_generic_expr (dump_file, lhs, 0);
2583 fprintf (dump_file, "\n");
2586 return lhs;
2589 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2590 for expressions using the code of the expression and the SSA numbers of
2591 its operands. */
2593 static hashval_t
2594 avail_expr_hash (const void *p)
2596 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2597 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2598 tree vuse;
2599 inchash::hash hstate;
2601 inchash::add_hashable_expr (expr, hstate);
2603 /* If the hash table entry is not associated with a statement, then we
2604 can just hash the expression and not worry about virtual operands
2605 and such. */
2606 if (!stmt)
2607 return hstate.end ();
2609 /* Add the SSA version numbers of the vuse operand. This is important
2610 because compound variables like arrays are not renamed in the
2611 operands. Rather, the rename is done on the virtual variable
2612 representing all the elements of the array. */
2613 if ((vuse = gimple_vuse (stmt)))
2614 inchash::add_expr (vuse, hstate);
2616 return hstate.end ();
2619 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2620 up degenerate PHIs created by or exposed by jump threading. */
2622 /* Given a statement STMT, which is either a PHI node or an assignment,
2623 remove it from the IL. */
2625 static void
2626 remove_stmt_or_phi (gimple stmt)
2628 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2630 if (gimple_code (stmt) == GIMPLE_PHI)
2631 remove_phi_node (&gsi, true);
2632 else
2634 gsi_remove (&gsi, true);
2635 release_defs (stmt);
2639 /* Given a statement STMT, which is either a PHI node or an assignment,
2640 return the "rhs" of the node, in the case of a non-degenerate
2641 phi, NULL is returned. */
2643 static tree
2644 get_rhs_or_phi_arg (gimple stmt)
2646 if (gimple_code (stmt) == GIMPLE_PHI)
2647 return degenerate_phi_result (stmt);
2648 else if (gimple_assign_single_p (stmt))
2649 return gimple_assign_rhs1 (stmt);
2650 else
2651 gcc_unreachable ();
2655 /* Given a statement STMT, which is either a PHI node or an assignment,
2656 return the "lhs" of the node. */
2658 static tree
2659 get_lhs_or_phi_result (gimple stmt)
2661 if (gimple_code (stmt) == GIMPLE_PHI)
2662 return gimple_phi_result (stmt);
2663 else if (is_gimple_assign (stmt))
2664 return gimple_assign_lhs (stmt);
2665 else
2666 gcc_unreachable ();
2669 /* Propagate RHS into all uses of LHS (when possible).
2671 RHS and LHS are derived from STMT, which is passed in solely so
2672 that we can remove it if propagation is successful.
2674 When propagating into a PHI node or into a statement which turns
2675 into a trivial copy or constant initialization, set the
2676 appropriate bit in INTERESTING_NAMEs so that we will visit those
2677 nodes as well in an effort to pick up secondary optimization
2678 opportunities. */
2680 static void
2681 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2683 /* First verify that propagation is valid. */
2684 if (may_propagate_copy (lhs, rhs))
2686 use_operand_p use_p;
2687 imm_use_iterator iter;
2688 gimple use_stmt;
2689 bool all = true;
2691 /* Dump details. */
2692 if (dump_file && (dump_flags & TDF_DETAILS))
2694 fprintf (dump_file, " Replacing '");
2695 print_generic_expr (dump_file, lhs, dump_flags);
2696 fprintf (dump_file, "' with %s '",
2697 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2698 print_generic_expr (dump_file, rhs, dump_flags);
2699 fprintf (dump_file, "'\n");
2702 /* Walk over every use of LHS and try to replace the use with RHS.
2703 At this point the only reason why such a propagation would not
2704 be successful would be if the use occurs in an ASM_EXPR. */
2705 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2707 /* Leave debug stmts alone. If we succeed in propagating
2708 all non-debug uses, we'll drop the DEF, and propagation
2709 into debug stmts will occur then. */
2710 if (gimple_debug_bind_p (use_stmt))
2711 continue;
2713 /* It's not always safe to propagate into an ASM_EXPR. */
2714 if (gimple_code (use_stmt) == GIMPLE_ASM
2715 && ! may_propagate_copy_into_asm (lhs))
2717 all = false;
2718 continue;
2721 /* It's not ok to propagate into the definition stmt of RHS.
2722 <bb 9>:
2723 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2724 g_67.1_6 = prephitmp.12_36;
2725 goto <bb 9>;
2726 While this is strictly all dead code we do not want to
2727 deal with this here. */
2728 if (TREE_CODE (rhs) == SSA_NAME
2729 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2731 all = false;
2732 continue;
2735 /* Dump details. */
2736 if (dump_file && (dump_flags & TDF_DETAILS))
2738 fprintf (dump_file, " Original statement:");
2739 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2742 /* Propagate the RHS into this use of the LHS. */
2743 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2744 propagate_value (use_p, rhs);
2746 /* Special cases to avoid useless calls into the folding
2747 routines, operand scanning, etc.
2749 Propagation into a PHI may cause the PHI to become
2750 a degenerate, so mark the PHI as interesting. No other
2751 actions are necessary. */
2752 if (gimple_code (use_stmt) == GIMPLE_PHI)
2754 tree result;
2756 /* Dump details. */
2757 if (dump_file && (dump_flags & TDF_DETAILS))
2759 fprintf (dump_file, " Updated statement:");
2760 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2763 result = get_lhs_or_phi_result (use_stmt);
2764 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2765 continue;
2768 /* From this point onward we are propagating into a
2769 real statement. Folding may (or may not) be possible,
2770 we may expose new operands, expose dead EH edges,
2771 etc. */
2772 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2773 cannot fold a call that simplifies to a constant,
2774 because the GIMPLE_CALL must be replaced by a
2775 GIMPLE_ASSIGN, and there is no way to effect such a
2776 transformation in-place. We might want to consider
2777 using the more general fold_stmt here. */
2779 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2780 fold_stmt_inplace (&gsi);
2783 /* Sometimes propagation can expose new operands to the
2784 renamer. */
2785 update_stmt (use_stmt);
2787 /* Dump details. */
2788 if (dump_file && (dump_flags & TDF_DETAILS))
2790 fprintf (dump_file, " Updated statement:");
2791 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2794 /* If we replaced a variable index with a constant, then
2795 we would need to update the invariant flag for ADDR_EXPRs. */
2796 if (gimple_assign_single_p (use_stmt)
2797 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2798 recompute_tree_invariant_for_addr_expr
2799 (gimple_assign_rhs1 (use_stmt));
2801 /* If we cleaned up EH information from the statement,
2802 mark its containing block as needing EH cleanups. */
2803 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2805 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2806 if (dump_file && (dump_flags & TDF_DETAILS))
2807 fprintf (dump_file, " Flagged to clear EH edges.\n");
2810 /* Propagation may expose new trivial copy/constant propagation
2811 opportunities. */
2812 if (gimple_assign_single_p (use_stmt)
2813 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2814 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2815 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2817 tree result = get_lhs_or_phi_result (use_stmt);
2818 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2821 /* Propagation into these nodes may make certain edges in
2822 the CFG unexecutable. We want to identify them as PHI nodes
2823 at the destination of those unexecutable edges may become
2824 degenerates. */
2825 else if (gimple_code (use_stmt) == GIMPLE_COND
2826 || gimple_code (use_stmt) == GIMPLE_SWITCH
2827 || gimple_code (use_stmt) == GIMPLE_GOTO)
2829 tree val;
2831 if (gimple_code (use_stmt) == GIMPLE_COND)
2832 val = fold_binary_loc (gimple_location (use_stmt),
2833 gimple_cond_code (use_stmt),
2834 boolean_type_node,
2835 gimple_cond_lhs (use_stmt),
2836 gimple_cond_rhs (use_stmt));
2837 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2838 val = gimple_switch_index (use_stmt);
2839 else
2840 val = gimple_goto_dest (use_stmt);
2842 if (val && is_gimple_min_invariant (val))
2844 basic_block bb = gimple_bb (use_stmt);
2845 edge te = find_taken_edge (bb, val);
2846 edge_iterator ei;
2847 edge e;
2848 gimple_stmt_iterator gsi, psi;
2850 /* Remove all outgoing edges except TE. */
2851 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2853 if (e != te)
2855 /* Mark all the PHI nodes at the destination of
2856 the unexecutable edge as interesting. */
2857 for (psi = gsi_start_phis (e->dest);
2858 !gsi_end_p (psi);
2859 gsi_next (&psi))
2861 gimple phi = gsi_stmt (psi);
2863 tree result = gimple_phi_result (phi);
2864 int version = SSA_NAME_VERSION (result);
2866 bitmap_set_bit (interesting_names, version);
2869 te->probability += e->probability;
2871 te->count += e->count;
2872 remove_edge (e);
2873 cfg_altered = true;
2875 else
2876 ei_next (&ei);
2879 gsi = gsi_last_bb (gimple_bb (use_stmt));
2880 gsi_remove (&gsi, true);
2882 /* And fixup the flags on the single remaining edge. */
2883 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2884 te->flags &= ~EDGE_ABNORMAL;
2885 te->flags |= EDGE_FALLTHRU;
2886 if (te->probability > REG_BR_PROB_BASE)
2887 te->probability = REG_BR_PROB_BASE;
2892 /* Ensure there is nothing else to do. */
2893 gcc_assert (!all || has_zero_uses (lhs));
2895 /* If we were able to propagate away all uses of LHS, then
2896 we can remove STMT. */
2897 if (all)
2898 remove_stmt_or_phi (stmt);
2902 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2903 a statement that is a trivial copy or constant initialization.
2905 Attempt to eliminate T by propagating its RHS into all uses of
2906 its LHS. This may in turn set new bits in INTERESTING_NAMES
2907 for nodes we want to revisit later.
2909 All exit paths should clear INTERESTING_NAMES for the result
2910 of STMT. */
2912 static void
2913 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2915 tree lhs = get_lhs_or_phi_result (stmt);
2916 tree rhs;
2917 int version = SSA_NAME_VERSION (lhs);
2919 /* If the LHS of this statement or PHI has no uses, then we can
2920 just eliminate it. This can occur if, for example, the PHI
2921 was created by block duplication due to threading and its only
2922 use was in the conditional at the end of the block which was
2923 deleted. */
2924 if (has_zero_uses (lhs))
2926 bitmap_clear_bit (interesting_names, version);
2927 remove_stmt_or_phi (stmt);
2928 return;
2931 /* Get the RHS of the assignment or PHI node if the PHI is a
2932 degenerate. */
2933 rhs = get_rhs_or_phi_arg (stmt);
2934 if (!rhs)
2936 bitmap_clear_bit (interesting_names, version);
2937 return;
2940 if (!virtual_operand_p (lhs))
2941 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2942 else
2944 gimple use_stmt;
2945 imm_use_iterator iter;
2946 use_operand_p use_p;
2947 /* For virtual operands we have to propagate into all uses as
2948 otherwise we will create overlapping life-ranges. */
2949 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2950 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2951 SET_USE (use_p, rhs);
2952 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
2953 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
2954 remove_stmt_or_phi (stmt);
2957 /* Note that STMT may well have been deleted by now, so do
2958 not access it, instead use the saved version # to clear
2959 T's entry in the worklist. */
2960 bitmap_clear_bit (interesting_names, version);
2963 /* The first phase in degenerate PHI elimination.
2965 Eliminate the degenerate PHIs in BB, then recurse on the
2966 dominator children of BB. */
2968 static void
2969 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2971 gimple_stmt_iterator gsi;
2972 basic_block son;
2974 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2976 gimple phi = gsi_stmt (gsi);
2978 eliminate_const_or_copy (phi, interesting_names);
2981 /* Recurse into the dominator children of BB. */
2982 for (son = first_dom_son (CDI_DOMINATORS, bb);
2983 son;
2984 son = next_dom_son (CDI_DOMINATORS, son))
2985 eliminate_degenerate_phis_1 (son, interesting_names);
2989 /* A very simple pass to eliminate degenerate PHI nodes from the
2990 IL. This is meant to be fast enough to be able to be run several
2991 times in the optimization pipeline.
2993 Certain optimizations, particularly those which duplicate blocks
2994 or remove edges from the CFG can create or expose PHIs which are
2995 trivial copies or constant initializations.
2997 While we could pick up these optimizations in DOM or with the
2998 combination of copy-prop and CCP, those solutions are far too
2999 heavy-weight for our needs.
3001 This implementation has two phases so that we can efficiently
3002 eliminate the first order degenerate PHIs and second order
3003 degenerate PHIs.
3005 The first phase performs a dominator walk to identify and eliminate
3006 the vast majority of the degenerate PHIs. When a degenerate PHI
3007 is identified and eliminated any affected statements or PHIs
3008 are put on a worklist.
3010 The second phase eliminates degenerate PHIs and trivial copies
3011 or constant initializations using the worklist. This is how we
3012 pick up the secondary optimization opportunities with minimal
3013 cost. */
3015 namespace {
3017 const pass_data pass_data_phi_only_cprop =
3019 GIMPLE_PASS, /* type */
3020 "phicprop", /* name */
3021 OPTGROUP_NONE, /* optinfo_flags */
3022 TV_TREE_PHI_CPROP, /* tv_id */
3023 ( PROP_cfg | PROP_ssa ), /* properties_required */
3024 0, /* properties_provided */
3025 0, /* properties_destroyed */
3026 0, /* todo_flags_start */
3027 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3030 class pass_phi_only_cprop : public gimple_opt_pass
3032 public:
3033 pass_phi_only_cprop (gcc::context *ctxt)
3034 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3037 /* opt_pass methods: */
3038 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3039 virtual bool gate (function *) { return flag_tree_dom != 0; }
3040 virtual unsigned int execute (function *);
3042 }; // class pass_phi_only_cprop
3044 unsigned int
3045 pass_phi_only_cprop::execute (function *fun)
3047 bitmap interesting_names;
3048 bitmap interesting_names1;
3050 /* Bitmap of blocks which need EH information updated. We can not
3051 update it on-the-fly as doing so invalidates the dominator tree. */
3052 need_eh_cleanup = BITMAP_ALLOC (NULL);
3054 /* INTERESTING_NAMES is effectively our worklist, indexed by
3055 SSA_NAME_VERSION.
3057 A set bit indicates that the statement or PHI node which
3058 defines the SSA_NAME should be (re)examined to determine if
3059 it has become a degenerate PHI or trivial const/copy propagation
3060 opportunity.
3062 Experiments have show we generally get better compilation
3063 time behavior with bitmaps rather than sbitmaps. */
3064 interesting_names = BITMAP_ALLOC (NULL);
3065 interesting_names1 = BITMAP_ALLOC (NULL);
3067 calculate_dominance_info (CDI_DOMINATORS);
3068 cfg_altered = false;
3070 /* First phase. Eliminate degenerate PHIs via a dominator
3071 walk of the CFG.
3073 Experiments have indicated that we generally get better
3074 compile-time behavior by visiting blocks in the first
3075 phase in dominator order. Presumably this is because walking
3076 in dominator order leaves fewer PHIs for later examination
3077 by the worklist phase. */
3078 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3079 interesting_names);
3081 /* Second phase. Eliminate second order degenerate PHIs as well
3082 as trivial copies or constant initializations identified by
3083 the first phase or this phase. Basically we keep iterating
3084 until our set of INTERESTING_NAMEs is empty. */
3085 while (!bitmap_empty_p (interesting_names))
3087 unsigned int i;
3088 bitmap_iterator bi;
3090 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3091 changed during the loop. Copy it to another bitmap and
3092 use that. */
3093 bitmap_copy (interesting_names1, interesting_names);
3095 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3097 tree name = ssa_name (i);
3099 /* Ignore SSA_NAMEs that have been released because
3100 their defining statement was deleted (unreachable). */
3101 if (name)
3102 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3103 interesting_names);
3107 if (cfg_altered)
3109 free_dominance_info (CDI_DOMINATORS);
3110 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3111 loops_state_set (LOOPS_NEED_FIXUP);
3114 /* Propagation of const and copies may make some EH edges dead. Purge
3115 such edges from the CFG as needed. */
3116 if (!bitmap_empty_p (need_eh_cleanup))
3118 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3119 BITMAP_FREE (need_eh_cleanup);
3122 BITMAP_FREE (interesting_names);
3123 BITMAP_FREE (interesting_names1);
3124 return 0;
3127 } // anon namespace
3129 gimple_opt_pass *
3130 make_pass_phi_only_cprop (gcc::context *ctxt)
3132 return new pass_phi_only_cprop (ctxt);