2013-10-22 Paolo Carlini <paolo.carlini@oracle.com>
[official-gcc.git] / gcc / tree-ssa-dom.c
blob4ce29aed54e37c08fb064eab76939e5dcb55acd1
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "flags.h"
28 #include "tm_p.h"
29 #include "basic-block.h"
30 #include "cfgloop.h"
31 #include "function.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-ssa.h"
34 #include "domwalk.h"
35 #include "tree-pass.h"
36 #include "tree-ssa-propagate.h"
37 #include "tree-ssa-threadupdate.h"
38 #include "langhooks.h"
39 #include "params.h"
40 #include "tree-ssa-threadedge.h"
41 #include "tree-ssa-dom.h"
43 /* This file implements optimizations on the dominator tree. */
45 /* Representation of a "naked" right-hand-side expression, to be used
46 in recording available expressions in the expression hash table. */
48 enum expr_kind
50 EXPR_SINGLE,
51 EXPR_UNARY,
52 EXPR_BINARY,
53 EXPR_TERNARY,
54 EXPR_CALL,
55 EXPR_PHI
58 struct hashable_expr
60 tree type;
61 enum expr_kind kind;
62 union {
63 struct { tree rhs; } single;
64 struct { enum tree_code op; tree opnd; } unary;
65 struct { enum tree_code op; tree opnd0, opnd1; } binary;
66 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
67 struct { gimple fn_from; bool pure; size_t nargs; tree *args; } call;
68 struct { size_t nargs; tree *args; } phi;
69 } ops;
72 /* Structure for recording known values of a conditional expression
73 at the exits from its block. */
75 typedef struct cond_equivalence_s
77 struct hashable_expr cond;
78 tree value;
79 } cond_equivalence;
82 /* Structure for recording edge equivalences as well as any pending
83 edge redirections during the dominator optimizer.
85 Computing and storing the edge equivalences instead of creating
86 them on-demand can save significant amounts of time, particularly
87 for pathological cases involving switch statements.
89 These structures live for a single iteration of the dominator
90 optimizer in the edge's AUX field. At the end of an iteration we
91 free each of these structures and update the AUX field to point
92 to any requested redirection target (the code for updating the
93 CFG and SSA graph for edge redirection expects redirection edge
94 targets to be in the AUX field for each edge. */
96 struct edge_info
98 /* If this edge creates a simple equivalence, the LHS and RHS of
99 the equivalence will be stored here. */
100 tree lhs;
101 tree rhs;
103 /* Traversing an edge may also indicate one or more particular conditions
104 are true or false. */
105 vec<cond_equivalence> cond_equivalences;
108 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
109 expressions it enters into the hash table along with a marker entry
110 (null). When we finish processing the block, we pop off entries and
111 remove the expressions from the global hash table until we hit the
112 marker. */
113 typedef struct expr_hash_elt * expr_hash_elt_t;
115 static vec<expr_hash_elt_t> avail_exprs_stack;
117 /* Structure for entries in the expression hash table. */
119 struct expr_hash_elt
121 /* The value (lhs) of this expression. */
122 tree lhs;
124 /* The expression (rhs) we want to record. */
125 struct hashable_expr expr;
127 /* The stmt pointer if this element corresponds to a statement. */
128 gimple stmt;
130 /* The hash value for RHS. */
131 hashval_t hash;
133 /* A unique stamp, typically the address of the hash
134 element itself, used in removing entries from the table. */
135 struct expr_hash_elt *stamp;
138 /* Hashtable helpers. */
140 static bool hashable_expr_equal_p (const struct hashable_expr *,
141 const struct hashable_expr *);
142 static void free_expr_hash_elt (void *);
144 struct expr_elt_hasher
146 typedef expr_hash_elt value_type;
147 typedef expr_hash_elt compare_type;
148 static inline hashval_t hash (const value_type *);
149 static inline bool equal (const value_type *, const compare_type *);
150 static inline void remove (value_type *);
153 inline hashval_t
154 expr_elt_hasher::hash (const value_type *p)
156 return p->hash;
159 inline bool
160 expr_elt_hasher::equal (const value_type *p1, const compare_type *p2)
162 gimple stmt1 = p1->stmt;
163 const struct hashable_expr *expr1 = &p1->expr;
164 const struct expr_hash_elt *stamp1 = p1->stamp;
165 gimple stmt2 = p2->stmt;
166 const struct hashable_expr *expr2 = &p2->expr;
167 const struct expr_hash_elt *stamp2 = p2->stamp;
169 /* This case should apply only when removing entries from the table. */
170 if (stamp1 == stamp2)
171 return true;
173 /* FIXME tuples:
174 We add stmts to a hash table and them modify them. To detect the case
175 that we modify a stmt and then search for it, we assume that the hash
176 is always modified by that change.
177 We have to fully check why this doesn't happen on trunk or rewrite
178 this in a more reliable (and easier to understand) way. */
179 if (((const struct expr_hash_elt *)p1)->hash
180 != ((const struct expr_hash_elt *)p2)->hash)
181 return false;
183 /* In case of a collision, both RHS have to be identical and have the
184 same VUSE operands. */
185 if (hashable_expr_equal_p (expr1, expr2)
186 && types_compatible_p (expr1->type, expr2->type))
188 /* Note that STMT1 and/or STMT2 may be NULL. */
189 return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
190 == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
193 return false;
196 /* Delete an expr_hash_elt and reclaim its storage. */
198 inline void
199 expr_elt_hasher::remove (value_type *element)
201 free_expr_hash_elt (element);
204 /* Hash table with expressions made available during the renaming process.
205 When an assignment of the form X_i = EXPR is found, the statement is
206 stored in this table. If the same expression EXPR is later found on the
207 RHS of another statement, it is replaced with X_i (thus performing
208 global redundancy elimination). Similarly as we pass through conditionals
209 we record the conditional itself as having either a true or false value
210 in this table. */
211 static hash_table <expr_elt_hasher> avail_exprs;
213 /* Stack of dest,src pairs that need to be restored during finalization.
215 A NULL entry is used to mark the end of pairs which need to be
216 restored during finalization of this block. */
217 static vec<tree> const_and_copies_stack;
219 /* Track whether or not we have changed the control flow graph. */
220 static bool cfg_altered;
222 /* Bitmap of blocks that have had EH statements cleaned. We should
223 remove their dead edges eventually. */
224 static bitmap need_eh_cleanup;
226 /* Statistics for dominator optimizations. */
227 struct opt_stats_d
229 long num_stmts;
230 long num_exprs_considered;
231 long num_re;
232 long num_const_prop;
233 long num_copy_prop;
236 static struct opt_stats_d opt_stats;
238 /* Local functions. */
239 static void optimize_stmt (basic_block, gimple_stmt_iterator);
240 static tree lookup_avail_expr (gimple, bool);
241 static hashval_t avail_expr_hash (const void *);
242 static void htab_statistics (FILE *, hash_table <expr_elt_hasher>);
243 static void record_cond (cond_equivalence *);
244 static void record_const_or_copy (tree, tree);
245 static void record_equality (tree, tree);
246 static void record_equivalences_from_phis (basic_block);
247 static void record_equivalences_from_incoming_edge (basic_block);
248 static void eliminate_redundant_computations (gimple_stmt_iterator *);
249 static void record_equivalences_from_stmt (gimple, int);
250 static void remove_local_expressions_from_table (void);
251 static void restore_vars_to_original_value (void);
252 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
255 /* Given a statement STMT, initialize the hash table element pointed to
256 by ELEMENT. */
258 static void
259 initialize_hash_element (gimple stmt, tree lhs,
260 struct expr_hash_elt *element)
262 enum gimple_code code = gimple_code (stmt);
263 struct hashable_expr *expr = &element->expr;
265 if (code == GIMPLE_ASSIGN)
267 enum tree_code subcode = gimple_assign_rhs_code (stmt);
269 switch (get_gimple_rhs_class (subcode))
271 case GIMPLE_SINGLE_RHS:
272 expr->kind = EXPR_SINGLE;
273 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
274 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
275 break;
276 case GIMPLE_UNARY_RHS:
277 expr->kind = EXPR_UNARY;
278 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
279 expr->ops.unary.op = subcode;
280 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
281 break;
282 case GIMPLE_BINARY_RHS:
283 expr->kind = EXPR_BINARY;
284 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
285 expr->ops.binary.op = subcode;
286 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
287 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
288 break;
289 case GIMPLE_TERNARY_RHS:
290 expr->kind = EXPR_TERNARY;
291 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
292 expr->ops.ternary.op = subcode;
293 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
294 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
295 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
296 break;
297 default:
298 gcc_unreachable ();
301 else if (code == GIMPLE_COND)
303 expr->type = boolean_type_node;
304 expr->kind = EXPR_BINARY;
305 expr->ops.binary.op = gimple_cond_code (stmt);
306 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
307 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
309 else if (code == GIMPLE_CALL)
311 size_t nargs = gimple_call_num_args (stmt);
312 size_t i;
314 gcc_assert (gimple_call_lhs (stmt));
316 expr->type = TREE_TYPE (gimple_call_lhs (stmt));
317 expr->kind = EXPR_CALL;
318 expr->ops.call.fn_from = stmt;
320 if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
321 expr->ops.call.pure = true;
322 else
323 expr->ops.call.pure = false;
325 expr->ops.call.nargs = nargs;
326 expr->ops.call.args = XCNEWVEC (tree, nargs);
327 for (i = 0; i < nargs; i++)
328 expr->ops.call.args[i] = gimple_call_arg (stmt, i);
330 else if (code == GIMPLE_SWITCH)
332 expr->type = TREE_TYPE (gimple_switch_index (stmt));
333 expr->kind = EXPR_SINGLE;
334 expr->ops.single.rhs = gimple_switch_index (stmt);
336 else if (code == GIMPLE_GOTO)
338 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
339 expr->kind = EXPR_SINGLE;
340 expr->ops.single.rhs = gimple_goto_dest (stmt);
342 else if (code == GIMPLE_PHI)
344 size_t nargs = gimple_phi_num_args (stmt);
345 size_t i;
347 expr->type = TREE_TYPE (gimple_phi_result (stmt));
348 expr->kind = EXPR_PHI;
349 expr->ops.phi.nargs = nargs;
350 expr->ops.phi.args = XCNEWVEC (tree, nargs);
352 for (i = 0; i < nargs; i++)
353 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
355 else
356 gcc_unreachable ();
358 element->lhs = lhs;
359 element->stmt = stmt;
360 element->hash = avail_expr_hash (element);
361 element->stamp = element;
364 /* Given a conditional expression COND as a tree, initialize
365 a hashable_expr expression EXPR. The conditional must be a
366 comparison or logical negation. A constant or a variable is
367 not permitted. */
369 static void
370 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
372 expr->type = boolean_type_node;
374 if (COMPARISON_CLASS_P (cond))
376 expr->kind = EXPR_BINARY;
377 expr->ops.binary.op = TREE_CODE (cond);
378 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
379 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
381 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
383 expr->kind = EXPR_UNARY;
384 expr->ops.unary.op = TRUTH_NOT_EXPR;
385 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
387 else
388 gcc_unreachable ();
391 /* Given a hashable_expr expression EXPR and an LHS,
392 initialize the hash table element pointed to by ELEMENT. */
394 static void
395 initialize_hash_element_from_expr (struct hashable_expr *expr,
396 tree lhs,
397 struct expr_hash_elt *element)
399 element->expr = *expr;
400 element->lhs = lhs;
401 element->stmt = NULL;
402 element->hash = avail_expr_hash (element);
403 element->stamp = element;
406 /* Compare two hashable_expr structures for equivalence.
407 They are considered equivalent when the the expressions
408 they denote must necessarily be equal. The logic is intended
409 to follow that of operand_equal_p in fold-const.c */
411 static bool
412 hashable_expr_equal_p (const struct hashable_expr *expr0,
413 const struct hashable_expr *expr1)
415 tree type0 = expr0->type;
416 tree type1 = expr1->type;
418 /* If either type is NULL, there is nothing to check. */
419 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
420 return false;
422 /* If both types don't have the same signedness, precision, and mode,
423 then we can't consider them equal. */
424 if (type0 != type1
425 && (TREE_CODE (type0) == ERROR_MARK
426 || TREE_CODE (type1) == ERROR_MARK
427 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
428 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
429 || TYPE_MODE (type0) != TYPE_MODE (type1)))
430 return false;
432 if (expr0->kind != expr1->kind)
433 return false;
435 switch (expr0->kind)
437 case EXPR_SINGLE:
438 return operand_equal_p (expr0->ops.single.rhs,
439 expr1->ops.single.rhs, 0);
441 case EXPR_UNARY:
442 if (expr0->ops.unary.op != expr1->ops.unary.op)
443 return false;
445 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
446 || expr0->ops.unary.op == NON_LVALUE_EXPR)
447 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
448 return false;
450 return operand_equal_p (expr0->ops.unary.opnd,
451 expr1->ops.unary.opnd, 0);
453 case EXPR_BINARY:
454 if (expr0->ops.binary.op != expr1->ops.binary.op)
455 return false;
457 if (operand_equal_p (expr0->ops.binary.opnd0,
458 expr1->ops.binary.opnd0, 0)
459 && operand_equal_p (expr0->ops.binary.opnd1,
460 expr1->ops.binary.opnd1, 0))
461 return true;
463 /* For commutative ops, allow the other order. */
464 return (commutative_tree_code (expr0->ops.binary.op)
465 && operand_equal_p (expr0->ops.binary.opnd0,
466 expr1->ops.binary.opnd1, 0)
467 && operand_equal_p (expr0->ops.binary.opnd1,
468 expr1->ops.binary.opnd0, 0));
470 case EXPR_TERNARY:
471 if (expr0->ops.ternary.op != expr1->ops.ternary.op
472 || !operand_equal_p (expr0->ops.ternary.opnd2,
473 expr1->ops.ternary.opnd2, 0))
474 return false;
476 if (operand_equal_p (expr0->ops.ternary.opnd0,
477 expr1->ops.ternary.opnd0, 0)
478 && operand_equal_p (expr0->ops.ternary.opnd1,
479 expr1->ops.ternary.opnd1, 0))
480 return true;
482 /* For commutative ops, allow the other order. */
483 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
484 && operand_equal_p (expr0->ops.ternary.opnd0,
485 expr1->ops.ternary.opnd1, 0)
486 && operand_equal_p (expr0->ops.ternary.opnd1,
487 expr1->ops.ternary.opnd0, 0));
489 case EXPR_CALL:
491 size_t i;
493 /* If the calls are to different functions, then they
494 clearly cannot be equal. */
495 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
496 expr1->ops.call.fn_from))
497 return false;
499 if (! expr0->ops.call.pure)
500 return false;
502 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
503 return false;
505 for (i = 0; i < expr0->ops.call.nargs; i++)
506 if (! operand_equal_p (expr0->ops.call.args[i],
507 expr1->ops.call.args[i], 0))
508 return false;
510 return true;
513 case EXPR_PHI:
515 size_t i;
517 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
518 return false;
520 for (i = 0; i < expr0->ops.phi.nargs; i++)
521 if (! operand_equal_p (expr0->ops.phi.args[i],
522 expr1->ops.phi.args[i], 0))
523 return false;
525 return true;
528 default:
529 gcc_unreachable ();
533 /* Compute a hash value for a hashable_expr value EXPR and a
534 previously accumulated hash value VAL. If two hashable_expr
535 values compare equal with hashable_expr_equal_p, they must
536 hash to the same value, given an identical value of VAL.
537 The logic is intended to follow iterative_hash_expr in tree.c. */
539 static hashval_t
540 iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
542 switch (expr->kind)
544 case EXPR_SINGLE:
545 val = iterative_hash_expr (expr->ops.single.rhs, val);
546 break;
548 case EXPR_UNARY:
549 val = iterative_hash_object (expr->ops.unary.op, val);
551 /* Make sure to include signedness in the hash computation.
552 Don't hash the type, that can lead to having nodes which
553 compare equal according to operand_equal_p, but which
554 have different hash codes. */
555 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
556 || expr->ops.unary.op == NON_LVALUE_EXPR)
557 val += TYPE_UNSIGNED (expr->type);
559 val = iterative_hash_expr (expr->ops.unary.opnd, val);
560 break;
562 case EXPR_BINARY:
563 val = iterative_hash_object (expr->ops.binary.op, val);
564 if (commutative_tree_code (expr->ops.binary.op))
565 val = iterative_hash_exprs_commutative (expr->ops.binary.opnd0,
566 expr->ops.binary.opnd1, val);
567 else
569 val = iterative_hash_expr (expr->ops.binary.opnd0, val);
570 val = iterative_hash_expr (expr->ops.binary.opnd1, val);
572 break;
574 case EXPR_TERNARY:
575 val = iterative_hash_object (expr->ops.ternary.op, val);
576 if (commutative_ternary_tree_code (expr->ops.ternary.op))
577 val = iterative_hash_exprs_commutative (expr->ops.ternary.opnd0,
578 expr->ops.ternary.opnd1, val);
579 else
581 val = iterative_hash_expr (expr->ops.ternary.opnd0, val);
582 val = iterative_hash_expr (expr->ops.ternary.opnd1, val);
584 val = iterative_hash_expr (expr->ops.ternary.opnd2, val);
585 break;
587 case EXPR_CALL:
589 size_t i;
590 enum tree_code code = CALL_EXPR;
591 gimple fn_from;
593 val = iterative_hash_object (code, val);
594 fn_from = expr->ops.call.fn_from;
595 if (gimple_call_internal_p (fn_from))
596 val = iterative_hash_hashval_t
597 ((hashval_t) gimple_call_internal_fn (fn_from), val);
598 else
599 val = iterative_hash_expr (gimple_call_fn (fn_from), val);
600 for (i = 0; i < expr->ops.call.nargs; i++)
601 val = iterative_hash_expr (expr->ops.call.args[i], val);
603 break;
605 case EXPR_PHI:
607 size_t i;
609 for (i = 0; i < expr->ops.phi.nargs; i++)
610 val = iterative_hash_expr (expr->ops.phi.args[i], val);
612 break;
614 default:
615 gcc_unreachable ();
618 return val;
621 /* Print a diagnostic dump of an expression hash table entry. */
623 static void
624 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
626 if (element->stmt)
627 fprintf (stream, "STMT ");
628 else
629 fprintf (stream, "COND ");
631 if (element->lhs)
633 print_generic_expr (stream, element->lhs, 0);
634 fprintf (stream, " = ");
637 switch (element->expr.kind)
639 case EXPR_SINGLE:
640 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
641 break;
643 case EXPR_UNARY:
644 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
645 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
646 break;
648 case EXPR_BINARY:
649 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
650 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
651 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
652 break;
654 case EXPR_TERNARY:
655 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
656 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
657 fputs (", ", stream);
658 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
659 fputs (", ", stream);
660 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
661 fputs (">", stream);
662 break;
664 case EXPR_CALL:
666 size_t i;
667 size_t nargs = element->expr.ops.call.nargs;
668 gimple fn_from;
670 fn_from = element->expr.ops.call.fn_from;
671 if (gimple_call_internal_p (fn_from))
672 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
673 stream);
674 else
675 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
676 fprintf (stream, " (");
677 for (i = 0; i < nargs; i++)
679 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
680 if (i + 1 < nargs)
681 fprintf (stream, ", ");
683 fprintf (stream, ")");
685 break;
687 case EXPR_PHI:
689 size_t i;
690 size_t nargs = element->expr.ops.phi.nargs;
692 fprintf (stream, "PHI <");
693 for (i = 0; i < nargs; i++)
695 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
696 if (i + 1 < nargs)
697 fprintf (stream, ", ");
699 fprintf (stream, ">");
701 break;
703 fprintf (stream, "\n");
705 if (element->stmt)
707 fprintf (stream, " ");
708 print_gimple_stmt (stream, element->stmt, 0, 0);
712 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
714 static void
715 free_expr_hash_elt_contents (struct expr_hash_elt *element)
717 if (element->expr.kind == EXPR_CALL)
718 free (element->expr.ops.call.args);
719 else if (element->expr.kind == EXPR_PHI)
720 free (element->expr.ops.phi.args);
723 /* Delete an expr_hash_elt and reclaim its storage. */
725 static void
726 free_expr_hash_elt (void *elt)
728 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
729 free_expr_hash_elt_contents (element);
730 free (element);
733 /* Allocate an EDGE_INFO for edge E and attach it to E.
734 Return the new EDGE_INFO structure. */
736 static struct edge_info *
737 allocate_edge_info (edge e)
739 struct edge_info *edge_info;
741 edge_info = XCNEW (struct edge_info);
743 e->aux = edge_info;
744 return edge_info;
747 /* Free all EDGE_INFO structures associated with edges in the CFG.
748 If a particular edge can be threaded, copy the redirection
749 target from the EDGE_INFO structure into the edge's AUX field
750 as required by code to update the CFG and SSA graph for
751 jump threading. */
753 static void
754 free_all_edge_infos (void)
756 basic_block bb;
757 edge_iterator ei;
758 edge e;
760 FOR_EACH_BB (bb)
762 FOR_EACH_EDGE (e, ei, bb->preds)
764 struct edge_info *edge_info = (struct edge_info *) e->aux;
766 if (edge_info)
768 edge_info->cond_equivalences.release ();
769 free (edge_info);
770 e->aux = NULL;
776 class dom_opt_dom_walker : public dom_walker
778 public:
779 dom_opt_dom_walker (cdi_direction direction)
780 : dom_walker (direction), m_dummy_cond (NULL) {}
782 virtual void before_dom_children (basic_block);
783 virtual void after_dom_children (basic_block);
785 private:
786 void thread_across_edge (edge);
788 gimple m_dummy_cond;
791 /* Jump threading, redundancy elimination and const/copy propagation.
793 This pass may expose new symbols that need to be renamed into SSA. For
794 every new symbol exposed, its corresponding bit will be set in
795 VARS_TO_RENAME. */
797 static unsigned int
798 tree_ssa_dominator_optimize (void)
800 memset (&opt_stats, 0, sizeof (opt_stats));
802 /* Create our hash tables. */
803 avail_exprs.create (1024);
804 avail_exprs_stack.create (20);
805 const_and_copies_stack.create (20);
806 need_eh_cleanup = BITMAP_ALLOC (NULL);
808 calculate_dominance_info (CDI_DOMINATORS);
809 cfg_altered = false;
811 /* We need to know loop structures in order to avoid destroying them
812 in jump threading. Note that we still can e.g. thread through loop
813 headers to an exit edge, or through loop header to the loop body, assuming
814 that we update the loop info. */
815 loop_optimizer_init (LOOPS_HAVE_SIMPLE_LATCHES);
817 /* Initialize the value-handle array. */
818 threadedge_initialize_values ();
820 /* We need accurate information regarding back edges in the CFG
821 for jump threading; this may include back edges that are not part of
822 a single loop. */
823 mark_dfs_back_edges ();
825 /* Recursively walk the dominator tree optimizing statements. */
826 dom_opt_dom_walker (CDI_DOMINATORS).walk (cfun->cfg->x_entry_block_ptr);
829 gimple_stmt_iterator gsi;
830 basic_block bb;
831 FOR_EACH_BB (bb)
833 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
834 update_stmt_if_modified (gsi_stmt (gsi));
838 /* If we exposed any new variables, go ahead and put them into
839 SSA form now, before we handle jump threading. This simplifies
840 interactions between rewriting of _DECL nodes into SSA form
841 and rewriting SSA_NAME nodes into SSA form after block
842 duplication and CFG manipulation. */
843 update_ssa (TODO_update_ssa);
845 free_all_edge_infos ();
847 /* Thread jumps, creating duplicate blocks as needed. */
848 cfg_altered |= thread_through_all_blocks (first_pass_instance);
850 if (cfg_altered)
851 free_dominance_info (CDI_DOMINATORS);
853 /* Removal of statements may make some EH edges dead. Purge
854 such edges from the CFG as needed. */
855 if (!bitmap_empty_p (need_eh_cleanup))
857 unsigned i;
858 bitmap_iterator bi;
860 /* Jump threading may have created forwarder blocks from blocks
861 needing EH cleanup; the new successor of these blocks, which
862 has inherited from the original block, needs the cleanup.
863 Don't clear bits in the bitmap, as that can break the bitmap
864 iterator. */
865 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
867 basic_block bb = BASIC_BLOCK (i);
868 if (bb == NULL)
869 continue;
870 while (single_succ_p (bb)
871 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
872 bb = single_succ (bb);
873 if (bb == EXIT_BLOCK_PTR)
874 continue;
875 if ((unsigned) bb->index != i)
876 bitmap_set_bit (need_eh_cleanup, bb->index);
879 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
880 bitmap_clear (need_eh_cleanup);
883 statistics_counter_event (cfun, "Redundant expressions eliminated",
884 opt_stats.num_re);
885 statistics_counter_event (cfun, "Constants propagated",
886 opt_stats.num_const_prop);
887 statistics_counter_event (cfun, "Copies propagated",
888 opt_stats.num_copy_prop);
890 /* Debugging dumps. */
891 if (dump_file && (dump_flags & TDF_STATS))
892 dump_dominator_optimization_stats (dump_file);
894 loop_optimizer_finalize ();
896 /* Delete our main hashtable. */
897 avail_exprs.dispose ();
899 /* Free asserted bitmaps and stacks. */
900 BITMAP_FREE (need_eh_cleanup);
902 avail_exprs_stack.release ();
903 const_and_copies_stack.release ();
905 /* Free the value-handle array. */
906 threadedge_finalize_values ();
908 return 0;
911 static bool
912 gate_dominator (void)
914 return flag_tree_dom != 0;
917 namespace {
919 const pass_data pass_data_dominator =
921 GIMPLE_PASS, /* type */
922 "dom", /* name */
923 OPTGROUP_NONE, /* optinfo_flags */
924 true, /* has_gate */
925 true, /* has_execute */
926 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
927 ( PROP_cfg | PROP_ssa ), /* properties_required */
928 0, /* properties_provided */
929 0, /* properties_destroyed */
930 0, /* todo_flags_start */
931 ( TODO_cleanup_cfg | TODO_update_ssa
932 | TODO_verify_ssa
933 | TODO_verify_flow ), /* todo_flags_finish */
936 class pass_dominator : public gimple_opt_pass
938 public:
939 pass_dominator (gcc::context *ctxt)
940 : gimple_opt_pass (pass_data_dominator, ctxt)
943 /* opt_pass methods: */
944 opt_pass * clone () { return new pass_dominator (m_ctxt); }
945 bool gate () { return gate_dominator (); }
946 unsigned int execute () { return tree_ssa_dominator_optimize (); }
948 }; // class pass_dominator
950 } // anon namespace
952 gimple_opt_pass *
953 make_pass_dominator (gcc::context *ctxt)
955 return new pass_dominator (ctxt);
959 /* Given a conditional statement CONDSTMT, convert the
960 condition to a canonical form. */
962 static void
963 canonicalize_comparison (gimple condstmt)
965 tree op0;
966 tree op1;
967 enum tree_code code;
969 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
971 op0 = gimple_cond_lhs (condstmt);
972 op1 = gimple_cond_rhs (condstmt);
974 code = gimple_cond_code (condstmt);
976 /* If it would be profitable to swap the operands, then do so to
977 canonicalize the statement, enabling better optimization.
979 By placing canonicalization of such expressions here we
980 transparently keep statements in canonical form, even
981 when the statement is modified. */
982 if (tree_swap_operands_p (op0, op1, false))
984 /* For relationals we need to swap the operands
985 and change the code. */
986 if (code == LT_EXPR
987 || code == GT_EXPR
988 || code == LE_EXPR
989 || code == GE_EXPR)
991 code = swap_tree_comparison (code);
993 gimple_cond_set_code (condstmt, code);
994 gimple_cond_set_lhs (condstmt, op1);
995 gimple_cond_set_rhs (condstmt, op0);
997 update_stmt (condstmt);
1002 /* Initialize local stacks for this optimizer and record equivalences
1003 upon entry to BB. Equivalences can come from the edge traversed to
1004 reach BB or they may come from PHI nodes at the start of BB. */
1006 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1007 LIMIT entries left in LOCALs. */
1009 static void
1010 remove_local_expressions_from_table (void)
1012 /* Remove all the expressions made available in this block. */
1013 while (avail_exprs_stack.length () > 0)
1015 expr_hash_elt_t victim = avail_exprs_stack.pop ();
1016 expr_hash_elt **slot;
1018 if (victim == NULL)
1019 break;
1021 /* This must precede the actual removal from the hash table,
1022 as ELEMENT and the table entry may share a call argument
1023 vector which will be freed during removal. */
1024 if (dump_file && (dump_flags & TDF_DETAILS))
1026 fprintf (dump_file, "<<<< ");
1027 print_expr_hash_elt (dump_file, victim);
1030 slot = avail_exprs.find_slot_with_hash (victim, victim->hash, NO_INSERT);
1031 gcc_assert (slot && *slot == victim);
1032 avail_exprs.clear_slot (slot);
1036 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1037 CONST_AND_COPIES to its original state, stopping when we hit a
1038 NULL marker. */
1040 static void
1041 restore_vars_to_original_value (void)
1043 while (const_and_copies_stack.length () > 0)
1045 tree prev_value, dest;
1047 dest = const_and_copies_stack.pop ();
1049 if (dest == NULL)
1050 break;
1052 if (dump_file && (dump_flags & TDF_DETAILS))
1054 fprintf (dump_file, "<<<< COPY ");
1055 print_generic_expr (dump_file, dest, 0);
1056 fprintf (dump_file, " = ");
1057 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1058 fprintf (dump_file, "\n");
1061 prev_value = const_and_copies_stack.pop ();
1062 set_ssa_name_value (dest, prev_value);
1066 /* A trivial wrapper so that we can present the generic jump
1067 threading code with a simple API for simplifying statements. */
1068 static tree
1069 simplify_stmt_for_jump_threading (gimple stmt,
1070 gimple within_stmt ATTRIBUTE_UNUSED)
1072 return lookup_avail_expr (stmt, false);
1075 /* Record into the equivalence tables any equivalences implied by
1076 traversing edge E (which are cached in E->aux).
1078 Callers are responsible for managing the unwinding markers. */
1079 static void
1080 record_temporary_equivalences (edge e)
1082 int i;
1083 struct edge_info *edge_info = (struct edge_info *) e->aux;
1085 /* If we have info associated with this edge, record it into
1086 our equivalence tables. */
1087 if (edge_info)
1089 cond_equivalence *eq;
1090 tree lhs = edge_info->lhs;
1091 tree rhs = edge_info->rhs;
1093 /* If we have a simple NAME = VALUE equivalence, record it. */
1094 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1095 record_const_or_copy (lhs, rhs);
1097 /* If we have 0 = COND or 1 = COND equivalences, record them
1098 into our expression hash tables. */
1099 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1100 record_cond (eq);
1104 /* Wrapper for common code to attempt to thread an edge. For example,
1105 it handles lazily building the dummy condition and the bookkeeping
1106 when jump threading is successful. */
1108 void
1109 dom_opt_dom_walker::thread_across_edge (edge e)
1111 if (! m_dummy_cond)
1112 m_dummy_cond =
1113 gimple_build_cond (NE_EXPR,
1114 integer_zero_node, integer_zero_node,
1115 NULL, NULL);
1117 /* Push a marker on both stacks so we can unwind the tables back to their
1118 current state. */
1119 avail_exprs_stack.safe_push (NULL);
1120 const_and_copies_stack.safe_push (NULL_TREE);
1122 /* Traversing E may result in equivalences we can utilize. */
1123 record_temporary_equivalences (e);
1125 /* With all the edge equivalences in the tables, go ahead and attempt
1126 to thread through E->dest. */
1127 ::thread_across_edge (m_dummy_cond, e, false,
1128 &const_and_copies_stack,
1129 simplify_stmt_for_jump_threading);
1131 /* And restore the various tables to their state before
1132 we threaded this edge.
1134 XXX The code in tree-ssa-threadedge.c will restore the state of
1135 the const_and_copies table. We we just have to restore the expression
1136 table. */
1137 remove_local_expressions_from_table ();
1140 /* PHI nodes can create equivalences too.
1142 Ignoring any alternatives which are the same as the result, if
1143 all the alternatives are equal, then the PHI node creates an
1144 equivalence. */
1146 static void
1147 record_equivalences_from_phis (basic_block bb)
1149 gimple_stmt_iterator gsi;
1151 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1153 gimple phi = gsi_stmt (gsi);
1155 tree lhs = gimple_phi_result (phi);
1156 tree rhs = NULL;
1157 size_t i;
1159 for (i = 0; i < gimple_phi_num_args (phi); i++)
1161 tree t = gimple_phi_arg_def (phi, i);
1163 /* Ignore alternatives which are the same as our LHS. Since
1164 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1165 can simply compare pointers. */
1166 if (lhs == t)
1167 continue;
1169 /* If we have not processed an alternative yet, then set
1170 RHS to this alternative. */
1171 if (rhs == NULL)
1172 rhs = t;
1173 /* If we have processed an alternative (stored in RHS), then
1174 see if it is equal to this one. If it isn't, then stop
1175 the search. */
1176 else if (! operand_equal_for_phi_arg_p (rhs, t))
1177 break;
1180 /* If we had no interesting alternatives, then all the RHS alternatives
1181 must have been the same as LHS. */
1182 if (!rhs)
1183 rhs = lhs;
1185 /* If we managed to iterate through each PHI alternative without
1186 breaking out of the loop, then we have a PHI which may create
1187 a useful equivalence. We do not need to record unwind data for
1188 this, since this is a true assignment and not an equivalence
1189 inferred from a comparison. All uses of this ssa name are dominated
1190 by this assignment, so unwinding just costs time and space. */
1191 if (i == gimple_phi_num_args (phi) && may_propagate_copy (lhs, rhs))
1192 set_ssa_name_value (lhs, rhs);
1196 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1197 return that edge. Otherwise return NULL. */
1198 static edge
1199 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1201 edge retval = NULL;
1202 edge e;
1203 edge_iterator ei;
1205 FOR_EACH_EDGE (e, ei, bb->preds)
1207 /* A loop back edge can be identified by the destination of
1208 the edge dominating the source of the edge. */
1209 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1210 continue;
1212 /* If we have already seen a non-loop edge, then we must have
1213 multiple incoming non-loop edges and thus we return NULL. */
1214 if (retval)
1215 return NULL;
1217 /* This is the first non-loop incoming edge we have found. Record
1218 it. */
1219 retval = e;
1222 return retval;
1225 /* Record any equivalences created by the incoming edge to BB. If BB
1226 has more than one incoming edge, then no equivalence is created. */
1228 static void
1229 record_equivalences_from_incoming_edge (basic_block bb)
1231 edge e;
1232 basic_block parent;
1233 struct edge_info *edge_info;
1235 /* If our parent block ended with a control statement, then we may be
1236 able to record some equivalences based on which outgoing edge from
1237 the parent was followed. */
1238 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1240 e = single_incoming_edge_ignoring_loop_edges (bb);
1242 /* If we had a single incoming edge from our parent block, then enter
1243 any data associated with the edge into our tables. */
1244 if (e && e->src == parent)
1246 unsigned int i;
1248 edge_info = (struct edge_info *) e->aux;
1250 if (edge_info)
1252 tree lhs = edge_info->lhs;
1253 tree rhs = edge_info->rhs;
1254 cond_equivalence *eq;
1256 if (lhs)
1257 record_equality (lhs, rhs);
1259 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1260 set via a widening type conversion, then we may be able to record
1261 additional equivalences. */
1262 if (lhs
1263 && TREE_CODE (lhs) == SSA_NAME
1264 && is_gimple_constant (rhs)
1265 && TREE_CODE (rhs) == INTEGER_CST)
1267 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1269 if (defstmt
1270 && is_gimple_assign (defstmt)
1271 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1273 tree old_rhs = gimple_assign_rhs1 (defstmt);
1275 /* If the conversion widens the original value and
1276 the constant is in the range of the type of OLD_RHS,
1277 then convert the constant and record the equivalence.
1279 Note that int_fits_type_p does not check the precision
1280 if the upper and lower bounds are OK. */
1281 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1282 && (TYPE_PRECISION (TREE_TYPE (lhs))
1283 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1284 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1286 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1287 record_equality (old_rhs, newval);
1292 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1293 record_cond (eq);
1298 /* Dump SSA statistics on FILE. */
1300 void
1301 dump_dominator_optimization_stats (FILE *file)
1303 fprintf (file, "Total number of statements: %6ld\n\n",
1304 opt_stats.num_stmts);
1305 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1306 opt_stats.num_exprs_considered);
1308 fprintf (file, "\nHash table statistics:\n");
1310 fprintf (file, " avail_exprs: ");
1311 htab_statistics (file, avail_exprs);
1315 /* Dump SSA statistics on stderr. */
1317 DEBUG_FUNCTION void
1318 debug_dominator_optimization_stats (void)
1320 dump_dominator_optimization_stats (stderr);
1324 /* Dump statistics for the hash table HTAB. */
1326 static void
1327 htab_statistics (FILE *file, hash_table <expr_elt_hasher> htab)
1329 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1330 (long) htab.size (),
1331 (long) htab.elements (),
1332 htab.collisions ());
1336 /* Enter condition equivalence into the expression hash table.
1337 This indicates that a conditional expression has a known
1338 boolean value. */
1340 static void
1341 record_cond (cond_equivalence *p)
1343 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1344 expr_hash_elt **slot;
1346 initialize_hash_element_from_expr (&p->cond, p->value, element);
1348 slot = avail_exprs.find_slot_with_hash (element, element->hash, INSERT);
1349 if (*slot == NULL)
1351 *slot = element;
1353 if (dump_file && (dump_flags & TDF_DETAILS))
1355 fprintf (dump_file, "1>>> ");
1356 print_expr_hash_elt (dump_file, element);
1359 avail_exprs_stack.safe_push (element);
1361 else
1362 free_expr_hash_elt (element);
1365 /* Build a cond_equivalence record indicating that the comparison
1366 CODE holds between operands OP0 and OP1 and push it to **P. */
1368 static void
1369 build_and_record_new_cond (enum tree_code code,
1370 tree op0, tree op1,
1371 vec<cond_equivalence> *p)
1373 cond_equivalence c;
1374 struct hashable_expr *cond = &c.cond;
1376 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1378 cond->type = boolean_type_node;
1379 cond->kind = EXPR_BINARY;
1380 cond->ops.binary.op = code;
1381 cond->ops.binary.opnd0 = op0;
1382 cond->ops.binary.opnd1 = op1;
1384 c.value = boolean_true_node;
1385 p->safe_push (c);
1388 /* Record that COND is true and INVERTED is false into the edge information
1389 structure. Also record that any conditions dominated by COND are true
1390 as well.
1392 For example, if a < b is true, then a <= b must also be true. */
1394 static void
1395 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1397 tree op0, op1;
1398 cond_equivalence c;
1400 if (!COMPARISON_CLASS_P (cond))
1401 return;
1403 op0 = TREE_OPERAND (cond, 0);
1404 op1 = TREE_OPERAND (cond, 1);
1406 switch (TREE_CODE (cond))
1408 case LT_EXPR:
1409 case GT_EXPR:
1410 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1412 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1413 &edge_info->cond_equivalences);
1414 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1415 &edge_info->cond_equivalences);
1418 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1419 ? LE_EXPR : GE_EXPR),
1420 op0, op1, &edge_info->cond_equivalences);
1421 build_and_record_new_cond (NE_EXPR, op0, op1,
1422 &edge_info->cond_equivalences);
1423 break;
1425 case GE_EXPR:
1426 case LE_EXPR:
1427 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1429 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1430 &edge_info->cond_equivalences);
1432 break;
1434 case EQ_EXPR:
1435 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1437 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1438 &edge_info->cond_equivalences);
1440 build_and_record_new_cond (LE_EXPR, op0, op1,
1441 &edge_info->cond_equivalences);
1442 build_and_record_new_cond (GE_EXPR, op0, op1,
1443 &edge_info->cond_equivalences);
1444 break;
1446 case UNORDERED_EXPR:
1447 build_and_record_new_cond (NE_EXPR, op0, op1,
1448 &edge_info->cond_equivalences);
1449 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1450 &edge_info->cond_equivalences);
1451 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1452 &edge_info->cond_equivalences);
1453 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1454 &edge_info->cond_equivalences);
1455 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1456 &edge_info->cond_equivalences);
1457 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1458 &edge_info->cond_equivalences);
1459 break;
1461 case UNLT_EXPR:
1462 case UNGT_EXPR:
1463 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1464 ? UNLE_EXPR : UNGE_EXPR),
1465 op0, op1, &edge_info->cond_equivalences);
1466 build_and_record_new_cond (NE_EXPR, op0, op1,
1467 &edge_info->cond_equivalences);
1468 break;
1470 case UNEQ_EXPR:
1471 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1472 &edge_info->cond_equivalences);
1473 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1474 &edge_info->cond_equivalences);
1475 break;
1477 case LTGT_EXPR:
1478 build_and_record_new_cond (NE_EXPR, op0, op1,
1479 &edge_info->cond_equivalences);
1480 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1481 &edge_info->cond_equivalences);
1482 break;
1484 default:
1485 break;
1488 /* Now store the original true and false conditions into the first
1489 two slots. */
1490 initialize_expr_from_cond (cond, &c.cond);
1491 c.value = boolean_true_node;
1492 edge_info->cond_equivalences.safe_push (c);
1494 /* It is possible for INVERTED to be the negation of a comparison,
1495 and not a valid RHS or GIMPLE_COND condition. This happens because
1496 invert_truthvalue may return such an expression when asked to invert
1497 a floating-point comparison. These comparisons are not assumed to
1498 obey the trichotomy law. */
1499 initialize_expr_from_cond (inverted, &c.cond);
1500 c.value = boolean_false_node;
1501 edge_info->cond_equivalences.safe_push (c);
1504 /* A helper function for record_const_or_copy and record_equality.
1505 Do the work of recording the value and undo info. */
1507 static void
1508 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1510 set_ssa_name_value (x, y);
1512 if (dump_file && (dump_flags & TDF_DETAILS))
1514 fprintf (dump_file, "0>>> COPY ");
1515 print_generic_expr (dump_file, x, 0);
1516 fprintf (dump_file, " = ");
1517 print_generic_expr (dump_file, y, 0);
1518 fprintf (dump_file, "\n");
1521 const_and_copies_stack.reserve (2);
1522 const_and_copies_stack.quick_push (prev_x);
1523 const_and_copies_stack.quick_push (x);
1526 /* Return the loop depth of the basic block of the defining statement of X.
1527 This number should not be treated as absolutely correct because the loop
1528 information may not be completely up-to-date when dom runs. However, it
1529 will be relatively correct, and as more passes are taught to keep loop info
1530 up to date, the result will become more and more accurate. */
1533 loop_depth_of_name (tree x)
1535 gimple defstmt;
1536 basic_block defbb;
1538 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1539 if (TREE_CODE (x) != SSA_NAME)
1540 return 0;
1542 /* Otherwise return the loop depth of the defining statement's bb.
1543 Note that there may not actually be a bb for this statement, if the
1544 ssa_name is live on entry. */
1545 defstmt = SSA_NAME_DEF_STMT (x);
1546 defbb = gimple_bb (defstmt);
1547 if (!defbb)
1548 return 0;
1550 return bb_loop_depth (defbb);
1553 /* Record that X is equal to Y in const_and_copies. Record undo
1554 information in the block-local vector. */
1556 static void
1557 record_const_or_copy (tree x, tree y)
1559 tree prev_x = SSA_NAME_VALUE (x);
1561 gcc_assert (TREE_CODE (x) == SSA_NAME);
1563 if (TREE_CODE (y) == SSA_NAME)
1565 tree tmp = SSA_NAME_VALUE (y);
1566 if (tmp)
1567 y = tmp;
1570 record_const_or_copy_1 (x, y, prev_x);
1573 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1574 This constrains the cases in which we may treat this as assignment. */
1576 static void
1577 record_equality (tree x, tree y)
1579 tree prev_x = NULL, prev_y = NULL;
1581 if (TREE_CODE (x) == SSA_NAME)
1582 prev_x = SSA_NAME_VALUE (x);
1583 if (TREE_CODE (y) == SSA_NAME)
1584 prev_y = SSA_NAME_VALUE (y);
1586 /* If one of the previous values is invariant, or invariant in more loops
1587 (by depth), then use that.
1588 Otherwise it doesn't matter which value we choose, just so
1589 long as we canonicalize on one value. */
1590 if (is_gimple_min_invariant (y))
1592 else if (is_gimple_min_invariant (x)
1593 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1594 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1595 else if (prev_x && is_gimple_min_invariant (prev_x))
1596 x = y, y = prev_x, prev_x = prev_y;
1597 else if (prev_y)
1598 y = prev_y;
1600 /* After the swapping, we must have one SSA_NAME. */
1601 if (TREE_CODE (x) != SSA_NAME)
1602 return;
1604 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1605 variable compared against zero. If we're honoring signed zeros,
1606 then we cannot record this value unless we know that the value is
1607 nonzero. */
1608 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1609 && (TREE_CODE (y) != REAL_CST
1610 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1611 return;
1613 record_const_or_copy_1 (x, y, prev_x);
1616 /* Returns true when STMT is a simple iv increment. It detects the
1617 following situation:
1619 i_1 = phi (..., i_2)
1620 i_2 = i_1 +/- ... */
1622 bool
1623 simple_iv_increment_p (gimple stmt)
1625 enum tree_code code;
1626 tree lhs, preinc;
1627 gimple phi;
1628 size_t i;
1630 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1631 return false;
1633 lhs = gimple_assign_lhs (stmt);
1634 if (TREE_CODE (lhs) != SSA_NAME)
1635 return false;
1637 code = gimple_assign_rhs_code (stmt);
1638 if (code != PLUS_EXPR
1639 && code != MINUS_EXPR
1640 && code != POINTER_PLUS_EXPR)
1641 return false;
1643 preinc = gimple_assign_rhs1 (stmt);
1644 if (TREE_CODE (preinc) != SSA_NAME)
1645 return false;
1647 phi = SSA_NAME_DEF_STMT (preinc);
1648 if (gimple_code (phi) != GIMPLE_PHI)
1649 return false;
1651 for (i = 0; i < gimple_phi_num_args (phi); i++)
1652 if (gimple_phi_arg_def (phi, i) == lhs)
1653 return true;
1655 return false;
1658 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1659 known value for that SSA_NAME (or NULL if no value is known).
1661 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1662 successors of BB. */
1664 static void
1665 cprop_into_successor_phis (basic_block bb)
1667 edge e;
1668 edge_iterator ei;
1670 FOR_EACH_EDGE (e, ei, bb->succs)
1672 int indx;
1673 gimple_stmt_iterator gsi;
1675 /* If this is an abnormal edge, then we do not want to copy propagate
1676 into the PHI alternative associated with this edge. */
1677 if (e->flags & EDGE_ABNORMAL)
1678 continue;
1680 gsi = gsi_start_phis (e->dest);
1681 if (gsi_end_p (gsi))
1682 continue;
1684 /* We may have an equivalence associated with this edge. While
1685 we can not propagate it into non-dominated blocks, we can
1686 propagate them into PHIs in non-dominated blocks. */
1688 /* Push the unwind marker so we can reset the const and copies
1689 table back to its original state after processing this edge. */
1690 const_and_copies_stack.safe_push (NULL_TREE);
1692 /* Extract and record any simple NAME = VALUE equivalences.
1694 Don't bother with [01] = COND equivalences, they're not useful
1695 here. */
1696 struct edge_info *edge_info = (struct edge_info *) e->aux;
1697 if (edge_info)
1699 tree lhs = edge_info->lhs;
1700 tree rhs = edge_info->rhs;
1702 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1703 record_const_or_copy (lhs, rhs);
1706 indx = e->dest_idx;
1707 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1709 tree new_val;
1710 use_operand_p orig_p;
1711 tree orig_val;
1712 gimple phi = gsi_stmt (gsi);
1714 /* The alternative may be associated with a constant, so verify
1715 it is an SSA_NAME before doing anything with it. */
1716 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1717 orig_val = get_use_from_ptr (orig_p);
1718 if (TREE_CODE (orig_val) != SSA_NAME)
1719 continue;
1721 /* If we have *ORIG_P in our constant/copy table, then replace
1722 ORIG_P with its value in our constant/copy table. */
1723 new_val = SSA_NAME_VALUE (orig_val);
1724 if (new_val
1725 && new_val != orig_val
1726 && (TREE_CODE (new_val) == SSA_NAME
1727 || is_gimple_min_invariant (new_val))
1728 && may_propagate_copy (orig_val, new_val))
1729 propagate_value (orig_p, new_val);
1732 restore_vars_to_original_value ();
1736 /* We have finished optimizing BB, record any information implied by
1737 taking a specific outgoing edge from BB. */
1739 static void
1740 record_edge_info (basic_block bb)
1742 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1743 struct edge_info *edge_info;
1745 if (! gsi_end_p (gsi))
1747 gimple stmt = gsi_stmt (gsi);
1748 location_t loc = gimple_location (stmt);
1750 if (gimple_code (stmt) == GIMPLE_SWITCH)
1752 tree index = gimple_switch_index (stmt);
1754 if (TREE_CODE (index) == SSA_NAME)
1756 int i;
1757 int n_labels = gimple_switch_num_labels (stmt);
1758 tree *info = XCNEWVEC (tree, last_basic_block);
1759 edge e;
1760 edge_iterator ei;
1762 for (i = 0; i < n_labels; i++)
1764 tree label = gimple_switch_label (stmt, i);
1765 basic_block target_bb = label_to_block (CASE_LABEL (label));
1766 if (CASE_HIGH (label)
1767 || !CASE_LOW (label)
1768 || info[target_bb->index])
1769 info[target_bb->index] = error_mark_node;
1770 else
1771 info[target_bb->index] = label;
1774 FOR_EACH_EDGE (e, ei, bb->succs)
1776 basic_block target_bb = e->dest;
1777 tree label = info[target_bb->index];
1779 if (label != NULL && label != error_mark_node)
1781 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1782 CASE_LOW (label));
1783 edge_info = allocate_edge_info (e);
1784 edge_info->lhs = index;
1785 edge_info->rhs = x;
1788 free (info);
1792 /* A COND_EXPR may create equivalences too. */
1793 if (gimple_code (stmt) == GIMPLE_COND)
1795 edge true_edge;
1796 edge false_edge;
1798 tree op0 = gimple_cond_lhs (stmt);
1799 tree op1 = gimple_cond_rhs (stmt);
1800 enum tree_code code = gimple_cond_code (stmt);
1802 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1804 /* Special case comparing booleans against a constant as we
1805 know the value of OP0 on both arms of the branch. i.e., we
1806 can record an equivalence for OP0 rather than COND. */
1807 if ((code == EQ_EXPR || code == NE_EXPR)
1808 && TREE_CODE (op0) == SSA_NAME
1809 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1810 && is_gimple_min_invariant (op1))
1812 if (code == EQ_EXPR)
1814 edge_info = allocate_edge_info (true_edge);
1815 edge_info->lhs = op0;
1816 edge_info->rhs = (integer_zerop (op1)
1817 ? boolean_false_node
1818 : boolean_true_node);
1820 edge_info = allocate_edge_info (false_edge);
1821 edge_info->lhs = op0;
1822 edge_info->rhs = (integer_zerop (op1)
1823 ? boolean_true_node
1824 : boolean_false_node);
1826 else
1828 edge_info = allocate_edge_info (true_edge);
1829 edge_info->lhs = op0;
1830 edge_info->rhs = (integer_zerop (op1)
1831 ? boolean_true_node
1832 : boolean_false_node);
1834 edge_info = allocate_edge_info (false_edge);
1835 edge_info->lhs = op0;
1836 edge_info->rhs = (integer_zerop (op1)
1837 ? boolean_false_node
1838 : boolean_true_node);
1841 else if (is_gimple_min_invariant (op0)
1842 && (TREE_CODE (op1) == SSA_NAME
1843 || is_gimple_min_invariant (op1)))
1845 tree cond = build2 (code, boolean_type_node, op0, op1);
1846 tree inverted = invert_truthvalue_loc (loc, cond);
1847 bool can_infer_simple_equiv
1848 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0)))
1849 && real_zerop (op0));
1850 struct edge_info *edge_info;
1852 edge_info = allocate_edge_info (true_edge);
1853 record_conditions (edge_info, cond, inverted);
1855 if (can_infer_simple_equiv && code == EQ_EXPR)
1857 edge_info->lhs = op1;
1858 edge_info->rhs = op0;
1861 edge_info = allocate_edge_info (false_edge);
1862 record_conditions (edge_info, inverted, cond);
1864 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1866 edge_info->lhs = op1;
1867 edge_info->rhs = op0;
1871 else if (TREE_CODE (op0) == SSA_NAME
1872 && (TREE_CODE (op1) == SSA_NAME
1873 || is_gimple_min_invariant (op1)))
1875 tree cond = build2 (code, boolean_type_node, op0, op1);
1876 tree inverted = invert_truthvalue_loc (loc, cond);
1877 bool can_infer_simple_equiv
1878 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1)))
1879 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1880 struct edge_info *edge_info;
1882 edge_info = allocate_edge_info (true_edge);
1883 record_conditions (edge_info, cond, inverted);
1885 if (can_infer_simple_equiv && code == EQ_EXPR)
1887 edge_info->lhs = op0;
1888 edge_info->rhs = op1;
1891 edge_info = allocate_edge_info (false_edge);
1892 record_conditions (edge_info, inverted, cond);
1894 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1896 edge_info->lhs = op0;
1897 edge_info->rhs = op1;
1902 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1906 void
1907 dom_opt_dom_walker::before_dom_children (basic_block bb)
1909 gimple_stmt_iterator gsi;
1911 if (dump_file && (dump_flags & TDF_DETAILS))
1912 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1914 /* Push a marker on the stacks of local information so that we know how
1915 far to unwind when we finalize this block. */
1916 avail_exprs_stack.safe_push (NULL);
1917 const_and_copies_stack.safe_push (NULL_TREE);
1919 record_equivalences_from_incoming_edge (bb);
1921 /* PHI nodes can create equivalences too. */
1922 record_equivalences_from_phis (bb);
1924 /* Create equivalences from redundant PHIs. PHIs are only truly
1925 redundant when they exist in the same block, so push another
1926 marker and unwind right afterwards. */
1927 avail_exprs_stack.safe_push (NULL);
1928 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1929 eliminate_redundant_computations (&gsi);
1930 remove_local_expressions_from_table ();
1932 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1933 optimize_stmt (bb, gsi);
1935 /* Now prepare to process dominated blocks. */
1936 record_edge_info (bb);
1937 cprop_into_successor_phis (bb);
1940 /* We have finished processing the dominator children of BB, perform
1941 any finalization actions in preparation for leaving this node in
1942 the dominator tree. */
1944 void
1945 dom_opt_dom_walker::after_dom_children (basic_block bb)
1947 gimple last;
1949 /* If we have an outgoing edge to a block with multiple incoming and
1950 outgoing edges, then we may be able to thread the edge, i.e., we
1951 may be able to statically determine which of the outgoing edges
1952 will be traversed when the incoming edge from BB is traversed. */
1953 if (single_succ_p (bb)
1954 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1955 && potentially_threadable_block (single_succ (bb)))
1957 thread_across_edge (single_succ_edge (bb));
1959 else if ((last = last_stmt (bb))
1960 && gimple_code (last) == GIMPLE_COND
1961 && EDGE_COUNT (bb->succs) == 2
1962 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1963 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1965 edge true_edge, false_edge;
1967 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1969 /* Only try to thread the edge if it reaches a target block with
1970 more than one predecessor and more than one successor. */
1971 if (potentially_threadable_block (true_edge->dest))
1972 thread_across_edge (true_edge);
1974 /* Similarly for the ELSE arm. */
1975 if (potentially_threadable_block (false_edge->dest))
1976 thread_across_edge (false_edge);
1980 /* These remove expressions local to BB from the tables. */
1981 remove_local_expressions_from_table ();
1982 restore_vars_to_original_value ();
1985 /* Search for redundant computations in STMT. If any are found, then
1986 replace them with the variable holding the result of the computation.
1988 If safe, record this expression into the available expression hash
1989 table. */
1991 static void
1992 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
1994 tree expr_type;
1995 tree cached_lhs;
1996 tree def;
1997 bool insert = true;
1998 bool assigns_var_p = false;
2000 gimple stmt = gsi_stmt (*gsi);
2002 if (gimple_code (stmt) == GIMPLE_PHI)
2003 def = gimple_phi_result (stmt);
2004 else
2005 def = gimple_get_lhs (stmt);
2007 /* Certain expressions on the RHS can be optimized away, but can not
2008 themselves be entered into the hash tables. */
2009 if (! def
2010 || TREE_CODE (def) != SSA_NAME
2011 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2012 || gimple_vdef (stmt)
2013 /* Do not record equivalences for increments of ivs. This would create
2014 overlapping live ranges for a very questionable gain. */
2015 || simple_iv_increment_p (stmt))
2016 insert = false;
2018 /* Check if the expression has been computed before. */
2019 cached_lhs = lookup_avail_expr (stmt, insert);
2021 opt_stats.num_exprs_considered++;
2023 /* Get the type of the expression we are trying to optimize. */
2024 if (is_gimple_assign (stmt))
2026 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2027 assigns_var_p = true;
2029 else if (gimple_code (stmt) == GIMPLE_COND)
2030 expr_type = boolean_type_node;
2031 else if (is_gimple_call (stmt))
2033 gcc_assert (gimple_call_lhs (stmt));
2034 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2035 assigns_var_p = true;
2037 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2038 expr_type = TREE_TYPE (gimple_switch_index (stmt));
2039 else if (gimple_code (stmt) == GIMPLE_PHI)
2040 /* We can't propagate into a phi, so the logic below doesn't apply.
2041 Instead record an equivalence between the cached LHS and the
2042 PHI result of this statement, provided they are in the same block.
2043 This should be sufficient to kill the redundant phi. */
2045 if (def && cached_lhs)
2046 record_const_or_copy (def, cached_lhs);
2047 return;
2049 else
2050 gcc_unreachable ();
2052 if (!cached_lhs)
2053 return;
2055 /* It is safe to ignore types here since we have already done
2056 type checking in the hashing and equality routines. In fact
2057 type checking here merely gets in the way of constant
2058 propagation. Also, make sure that it is safe to propagate
2059 CACHED_LHS into the expression in STMT. */
2060 if ((TREE_CODE (cached_lhs) != SSA_NAME
2061 && (assigns_var_p
2062 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2063 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2065 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2066 || is_gimple_min_invariant (cached_lhs));
2068 if (dump_file && (dump_flags & TDF_DETAILS))
2070 fprintf (dump_file, " Replaced redundant expr '");
2071 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2072 fprintf (dump_file, "' with '");
2073 print_generic_expr (dump_file, cached_lhs, dump_flags);
2074 fprintf (dump_file, "'\n");
2077 opt_stats.num_re++;
2079 if (assigns_var_p
2080 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2081 cached_lhs = fold_convert (expr_type, cached_lhs);
2083 propagate_tree_value_into_stmt (gsi, cached_lhs);
2085 /* Since it is always necessary to mark the result as modified,
2086 perhaps we should move this into propagate_tree_value_into_stmt
2087 itself. */
2088 gimple_set_modified (gsi_stmt (*gsi), true);
2092 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2093 the available expressions table or the const_and_copies table.
2094 Detect and record those equivalences. */
2095 /* We handle only very simple copy equivalences here. The heavy
2096 lifing is done by eliminate_redundant_computations. */
2098 static void
2099 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2101 tree lhs;
2102 enum tree_code lhs_code;
2104 gcc_assert (is_gimple_assign (stmt));
2106 lhs = gimple_assign_lhs (stmt);
2107 lhs_code = TREE_CODE (lhs);
2109 if (lhs_code == SSA_NAME
2110 && gimple_assign_single_p (stmt))
2112 tree rhs = gimple_assign_rhs1 (stmt);
2114 /* If the RHS of the assignment is a constant or another variable that
2115 may be propagated, register it in the CONST_AND_COPIES table. We
2116 do not need to record unwind data for this, since this is a true
2117 assignment and not an equivalence inferred from a comparison. All
2118 uses of this ssa name are dominated by this assignment, so unwinding
2119 just costs time and space. */
2120 if (may_optimize_p
2121 && (TREE_CODE (rhs) == SSA_NAME
2122 || is_gimple_min_invariant (rhs)))
2124 if (dump_file && (dump_flags & TDF_DETAILS))
2126 fprintf (dump_file, "==== ASGN ");
2127 print_generic_expr (dump_file, lhs, 0);
2128 fprintf (dump_file, " = ");
2129 print_generic_expr (dump_file, rhs, 0);
2130 fprintf (dump_file, "\n");
2133 set_ssa_name_value (lhs, rhs);
2137 /* A memory store, even an aliased store, creates a useful
2138 equivalence. By exchanging the LHS and RHS, creating suitable
2139 vops and recording the result in the available expression table,
2140 we may be able to expose more redundant loads. */
2141 if (!gimple_has_volatile_ops (stmt)
2142 && gimple_references_memory_p (stmt)
2143 && gimple_assign_single_p (stmt)
2144 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2145 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2146 && !is_gimple_reg (lhs))
2148 tree rhs = gimple_assign_rhs1 (stmt);
2149 gimple new_stmt;
2151 /* Build a new statement with the RHS and LHS exchanged. */
2152 if (TREE_CODE (rhs) == SSA_NAME)
2154 /* NOTE tuples. The call to gimple_build_assign below replaced
2155 a call to build_gimple_modify_stmt, which did not set the
2156 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2157 may cause an SSA validation failure, as the LHS may be a
2158 default-initialized name and should have no definition. I'm
2159 a bit dubious of this, as the artificial statement that we
2160 generate here may in fact be ill-formed, but it is simply
2161 used as an internal device in this pass, and never becomes
2162 part of the CFG. */
2163 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2164 new_stmt = gimple_build_assign (rhs, lhs);
2165 SSA_NAME_DEF_STMT (rhs) = defstmt;
2167 else
2168 new_stmt = gimple_build_assign (rhs, lhs);
2170 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2172 /* Finally enter the statement into the available expression
2173 table. */
2174 lookup_avail_expr (new_stmt, true);
2178 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2179 CONST_AND_COPIES. */
2181 static void
2182 cprop_operand (gimple stmt, use_operand_p op_p)
2184 tree val;
2185 tree op = USE_FROM_PTR (op_p);
2187 /* If the operand has a known constant value or it is known to be a
2188 copy of some other variable, use the value or copy stored in
2189 CONST_AND_COPIES. */
2190 val = SSA_NAME_VALUE (op);
2191 if (val && val != op)
2193 /* Do not replace hard register operands in asm statements. */
2194 if (gimple_code (stmt) == GIMPLE_ASM
2195 && !may_propagate_copy_into_asm (op))
2196 return;
2198 /* Certain operands are not allowed to be copy propagated due
2199 to their interaction with exception handling and some GCC
2200 extensions. */
2201 if (!may_propagate_copy (op, val))
2202 return;
2204 /* Do not propagate addresses that point to volatiles into memory
2205 stmts without volatile operands. */
2206 if (POINTER_TYPE_P (TREE_TYPE (val))
2207 && TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (val)))
2208 && gimple_has_mem_ops (stmt)
2209 && !gimple_has_volatile_ops (stmt))
2210 return;
2212 /* Do not propagate copies if the propagated value is at a deeper loop
2213 depth than the propagatee. Otherwise, this may move loop variant
2214 variables outside of their loops and prevent coalescing
2215 opportunities. If the value was loop invariant, it will be hoisted
2216 by LICM and exposed for copy propagation. */
2217 if (loop_depth_of_name (val) > loop_depth_of_name (op))
2218 return;
2220 /* Do not propagate copies into simple IV increment statements.
2221 See PR23821 for how this can disturb IV analysis. */
2222 if (TREE_CODE (val) != INTEGER_CST
2223 && simple_iv_increment_p (stmt))
2224 return;
2226 /* Dump details. */
2227 if (dump_file && (dump_flags & TDF_DETAILS))
2229 fprintf (dump_file, " Replaced '");
2230 print_generic_expr (dump_file, op, dump_flags);
2231 fprintf (dump_file, "' with %s '",
2232 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2233 print_generic_expr (dump_file, val, dump_flags);
2234 fprintf (dump_file, "'\n");
2237 if (TREE_CODE (val) != SSA_NAME)
2238 opt_stats.num_const_prop++;
2239 else
2240 opt_stats.num_copy_prop++;
2242 propagate_value (op_p, val);
2244 /* And note that we modified this statement. This is now
2245 safe, even if we changed virtual operands since we will
2246 rescan the statement and rewrite its operands again. */
2247 gimple_set_modified (stmt, true);
2251 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2252 known value for that SSA_NAME (or NULL if no value is known).
2254 Propagate values from CONST_AND_COPIES into the uses, vuses and
2255 vdef_ops of STMT. */
2257 static void
2258 cprop_into_stmt (gimple stmt)
2260 use_operand_p op_p;
2261 ssa_op_iter iter;
2263 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2264 cprop_operand (stmt, op_p);
2267 /* Optimize the statement pointed to by iterator SI.
2269 We try to perform some simplistic global redundancy elimination and
2270 constant propagation:
2272 1- To detect global redundancy, we keep track of expressions that have
2273 been computed in this block and its dominators. If we find that the
2274 same expression is computed more than once, we eliminate repeated
2275 computations by using the target of the first one.
2277 2- Constant values and copy assignments. This is used to do very
2278 simplistic constant and copy propagation. When a constant or copy
2279 assignment is found, we map the value on the RHS of the assignment to
2280 the variable in the LHS in the CONST_AND_COPIES table. */
2282 static void
2283 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2285 gimple stmt, old_stmt;
2286 bool may_optimize_p;
2287 bool modified_p = false;
2289 old_stmt = stmt = gsi_stmt (si);
2291 if (dump_file && (dump_flags & TDF_DETAILS))
2293 fprintf (dump_file, "Optimizing statement ");
2294 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2297 if (gimple_code (stmt) == GIMPLE_COND)
2298 canonicalize_comparison (stmt);
2300 update_stmt_if_modified (stmt);
2301 opt_stats.num_stmts++;
2303 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2304 cprop_into_stmt (stmt);
2306 /* If the statement has been modified with constant replacements,
2307 fold its RHS before checking for redundant computations. */
2308 if (gimple_modified_p (stmt))
2310 tree rhs = NULL;
2312 /* Try to fold the statement making sure that STMT is kept
2313 up to date. */
2314 if (fold_stmt (&si))
2316 stmt = gsi_stmt (si);
2317 gimple_set_modified (stmt, true);
2319 if (dump_file && (dump_flags & TDF_DETAILS))
2321 fprintf (dump_file, " Folded to: ");
2322 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2326 /* We only need to consider cases that can yield a gimple operand. */
2327 if (gimple_assign_single_p (stmt))
2328 rhs = gimple_assign_rhs1 (stmt);
2329 else if (gimple_code (stmt) == GIMPLE_GOTO)
2330 rhs = gimple_goto_dest (stmt);
2331 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2332 /* This should never be an ADDR_EXPR. */
2333 rhs = gimple_switch_index (stmt);
2335 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2336 recompute_tree_invariant_for_addr_expr (rhs);
2338 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2339 even if fold_stmt updated the stmt already and thus cleared
2340 gimple_modified_p flag on it. */
2341 modified_p = true;
2344 /* Check for redundant computations. Do this optimization only
2345 for assignments that have no volatile ops and conditionals. */
2346 may_optimize_p = (!gimple_has_side_effects (stmt)
2347 && (is_gimple_assign (stmt)
2348 || (is_gimple_call (stmt)
2349 && gimple_call_lhs (stmt) != NULL_TREE)
2350 || gimple_code (stmt) == GIMPLE_COND
2351 || gimple_code (stmt) == GIMPLE_SWITCH));
2353 if (may_optimize_p)
2355 if (gimple_code (stmt) == GIMPLE_CALL)
2357 /* Resolve __builtin_constant_p. If it hasn't been
2358 folded to integer_one_node by now, it's fairly
2359 certain that the value simply isn't constant. */
2360 tree callee = gimple_call_fndecl (stmt);
2361 if (callee
2362 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2363 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2365 propagate_tree_value_into_stmt (&si, integer_zero_node);
2366 stmt = gsi_stmt (si);
2370 update_stmt_if_modified (stmt);
2371 eliminate_redundant_computations (&si);
2372 stmt = gsi_stmt (si);
2374 /* Perform simple redundant store elimination. */
2375 if (gimple_assign_single_p (stmt)
2376 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2378 tree lhs = gimple_assign_lhs (stmt);
2379 tree rhs = gimple_assign_rhs1 (stmt);
2380 tree cached_lhs;
2381 gimple new_stmt;
2382 if (TREE_CODE (rhs) == SSA_NAME)
2384 tree tem = SSA_NAME_VALUE (rhs);
2385 if (tem)
2386 rhs = tem;
2388 /* Build a new statement with the RHS and LHS exchanged. */
2389 if (TREE_CODE (rhs) == SSA_NAME)
2391 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2392 new_stmt = gimple_build_assign (rhs, lhs);
2393 SSA_NAME_DEF_STMT (rhs) = defstmt;
2395 else
2396 new_stmt = gimple_build_assign (rhs, lhs);
2397 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2398 cached_lhs = lookup_avail_expr (new_stmt, false);
2399 if (cached_lhs
2400 && rhs == cached_lhs)
2402 basic_block bb = gimple_bb (stmt);
2403 unlink_stmt_vdef (stmt);
2404 if (gsi_remove (&si, true))
2406 bitmap_set_bit (need_eh_cleanup, bb->index);
2407 if (dump_file && (dump_flags & TDF_DETAILS))
2408 fprintf (dump_file, " Flagged to clear EH edges.\n");
2410 release_defs (stmt);
2411 return;
2416 /* Record any additional equivalences created by this statement. */
2417 if (is_gimple_assign (stmt))
2418 record_equivalences_from_stmt (stmt, may_optimize_p);
2420 /* If STMT is a COND_EXPR and it was modified, then we may know
2421 where it goes. If that is the case, then mark the CFG as altered.
2423 This will cause us to later call remove_unreachable_blocks and
2424 cleanup_tree_cfg when it is safe to do so. It is not safe to
2425 clean things up here since removal of edges and such can trigger
2426 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2427 the manager.
2429 That's all fine and good, except that once SSA_NAMEs are released
2430 to the manager, we must not call create_ssa_name until all references
2431 to released SSA_NAMEs have been eliminated.
2433 All references to the deleted SSA_NAMEs can not be eliminated until
2434 we remove unreachable blocks.
2436 We can not remove unreachable blocks until after we have completed
2437 any queued jump threading.
2439 We can not complete any queued jump threads until we have taken
2440 appropriate variables out of SSA form. Taking variables out of
2441 SSA form can call create_ssa_name and thus we lose.
2443 Ultimately I suspect we're going to need to change the interface
2444 into the SSA_NAME manager. */
2445 if (gimple_modified_p (stmt) || modified_p)
2447 tree val = NULL;
2449 update_stmt_if_modified (stmt);
2451 if (gimple_code (stmt) == GIMPLE_COND)
2452 val = fold_binary_loc (gimple_location (stmt),
2453 gimple_cond_code (stmt), boolean_type_node,
2454 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2455 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2456 val = gimple_switch_index (stmt);
2458 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2459 cfg_altered = true;
2461 /* If we simplified a statement in such a way as to be shown that it
2462 cannot trap, update the eh information and the cfg to match. */
2463 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2465 bitmap_set_bit (need_eh_cleanup, bb->index);
2466 if (dump_file && (dump_flags & TDF_DETAILS))
2467 fprintf (dump_file, " Flagged to clear EH edges.\n");
2472 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2473 If found, return its LHS. Otherwise insert STMT in the table and
2474 return NULL_TREE.
2476 Also, when an expression is first inserted in the table, it is also
2477 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2478 we finish processing this block and its children. */
2480 static tree
2481 lookup_avail_expr (gimple stmt, bool insert)
2483 expr_hash_elt **slot;
2484 tree lhs;
2485 tree temp;
2486 struct expr_hash_elt element;
2488 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2489 if (gimple_code (stmt) == GIMPLE_PHI)
2490 lhs = gimple_phi_result (stmt);
2491 else
2492 lhs = gimple_get_lhs (stmt);
2494 initialize_hash_element (stmt, lhs, &element);
2496 if (dump_file && (dump_flags & TDF_DETAILS))
2498 fprintf (dump_file, "LKUP ");
2499 print_expr_hash_elt (dump_file, &element);
2502 /* Don't bother remembering constant assignments and copy operations.
2503 Constants and copy operations are handled by the constant/copy propagator
2504 in optimize_stmt. */
2505 if (element.expr.kind == EXPR_SINGLE
2506 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2507 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2508 return NULL_TREE;
2510 /* Finally try to find the expression in the main expression hash table. */
2511 slot = avail_exprs.find_slot_with_hash (&element, element.hash,
2512 (insert ? INSERT : NO_INSERT));
2513 if (slot == NULL)
2515 free_expr_hash_elt_contents (&element);
2516 return NULL_TREE;
2518 else if (*slot == NULL)
2520 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2521 *element2 = element;
2522 element2->stamp = element2;
2523 *slot = element2;
2525 if (dump_file && (dump_flags & TDF_DETAILS))
2527 fprintf (dump_file, "2>>> ");
2528 print_expr_hash_elt (dump_file, element2);
2531 avail_exprs_stack.safe_push (element2);
2532 return NULL_TREE;
2534 else
2535 free_expr_hash_elt_contents (&element);
2537 /* Extract the LHS of the assignment so that it can be used as the current
2538 definition of another variable. */
2539 lhs = ((struct expr_hash_elt *)*slot)->lhs;
2541 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2542 use the value from the const_and_copies table. */
2543 if (TREE_CODE (lhs) == SSA_NAME)
2545 temp = SSA_NAME_VALUE (lhs);
2546 if (temp)
2547 lhs = temp;
2550 if (dump_file && (dump_flags & TDF_DETAILS))
2552 fprintf (dump_file, "FIND: ");
2553 print_generic_expr (dump_file, lhs, 0);
2554 fprintf (dump_file, "\n");
2557 return lhs;
2560 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2561 for expressions using the code of the expression and the SSA numbers of
2562 its operands. */
2564 static hashval_t
2565 avail_expr_hash (const void *p)
2567 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2568 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2569 tree vuse;
2570 hashval_t val = 0;
2572 val = iterative_hash_hashable_expr (expr, val);
2574 /* If the hash table entry is not associated with a statement, then we
2575 can just hash the expression and not worry about virtual operands
2576 and such. */
2577 if (!stmt)
2578 return val;
2580 /* Add the SSA version numbers of the vuse operand. This is important
2581 because compound variables like arrays are not renamed in the
2582 operands. Rather, the rename is done on the virtual variable
2583 representing all the elements of the array. */
2584 if ((vuse = gimple_vuse (stmt)))
2585 val = iterative_hash_expr (vuse, val);
2587 return val;
2590 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2591 up degenerate PHIs created by or exposed by jump threading. */
2593 /* Given a statement STMT, which is either a PHI node or an assignment,
2594 remove it from the IL. */
2596 static void
2597 remove_stmt_or_phi (gimple stmt)
2599 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2601 if (gimple_code (stmt) == GIMPLE_PHI)
2602 remove_phi_node (&gsi, true);
2603 else
2605 gsi_remove (&gsi, true);
2606 release_defs (stmt);
2610 /* Given a statement STMT, which is either a PHI node or an assignment,
2611 return the "rhs" of the node, in the case of a non-degenerate
2612 phi, NULL is returned. */
2614 static tree
2615 get_rhs_or_phi_arg (gimple stmt)
2617 if (gimple_code (stmt) == GIMPLE_PHI)
2618 return degenerate_phi_result (stmt);
2619 else if (gimple_assign_single_p (stmt))
2620 return gimple_assign_rhs1 (stmt);
2621 else
2622 gcc_unreachable ();
2626 /* Given a statement STMT, which is either a PHI node or an assignment,
2627 return the "lhs" of the node. */
2629 static tree
2630 get_lhs_or_phi_result (gimple stmt)
2632 if (gimple_code (stmt) == GIMPLE_PHI)
2633 return gimple_phi_result (stmt);
2634 else if (is_gimple_assign (stmt))
2635 return gimple_assign_lhs (stmt);
2636 else
2637 gcc_unreachable ();
2640 /* Propagate RHS into all uses of LHS (when possible).
2642 RHS and LHS are derived from STMT, which is passed in solely so
2643 that we can remove it if propagation is successful.
2645 When propagating into a PHI node or into a statement which turns
2646 into a trivial copy or constant initialization, set the
2647 appropriate bit in INTERESTING_NAMEs so that we will visit those
2648 nodes as well in an effort to pick up secondary optimization
2649 opportunities. */
2651 static void
2652 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2654 /* First verify that propagation is valid and isn't going to move a
2655 loop variant variable outside its loop. */
2656 if (! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
2657 && (TREE_CODE (rhs) != SSA_NAME
2658 || ! SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs))
2659 && may_propagate_copy (lhs, rhs)
2660 && loop_depth_of_name (lhs) >= loop_depth_of_name (rhs))
2662 use_operand_p use_p;
2663 imm_use_iterator iter;
2664 gimple use_stmt;
2665 bool all = true;
2667 /* Dump details. */
2668 if (dump_file && (dump_flags & TDF_DETAILS))
2670 fprintf (dump_file, " Replacing '");
2671 print_generic_expr (dump_file, lhs, dump_flags);
2672 fprintf (dump_file, "' with %s '",
2673 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2674 print_generic_expr (dump_file, rhs, dump_flags);
2675 fprintf (dump_file, "'\n");
2678 /* Walk over every use of LHS and try to replace the use with RHS.
2679 At this point the only reason why such a propagation would not
2680 be successful would be if the use occurs in an ASM_EXPR. */
2681 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2683 /* Leave debug stmts alone. If we succeed in propagating
2684 all non-debug uses, we'll drop the DEF, and propagation
2685 into debug stmts will occur then. */
2686 if (gimple_debug_bind_p (use_stmt))
2687 continue;
2689 /* It's not always safe to propagate into an ASM_EXPR. */
2690 if (gimple_code (use_stmt) == GIMPLE_ASM
2691 && ! may_propagate_copy_into_asm (lhs))
2693 all = false;
2694 continue;
2697 /* It's not ok to propagate into the definition stmt of RHS.
2698 <bb 9>:
2699 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2700 g_67.1_6 = prephitmp.12_36;
2701 goto <bb 9>;
2702 While this is strictly all dead code we do not want to
2703 deal with this here. */
2704 if (TREE_CODE (rhs) == SSA_NAME
2705 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2707 all = false;
2708 continue;
2711 /* Dump details. */
2712 if (dump_file && (dump_flags & TDF_DETAILS))
2714 fprintf (dump_file, " Original statement:");
2715 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2718 /* Propagate the RHS into this use of the LHS. */
2719 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2720 propagate_value (use_p, rhs);
2722 /* Special cases to avoid useless calls into the folding
2723 routines, operand scanning, etc.
2725 Propagation into a PHI may cause the PHI to become
2726 a degenerate, so mark the PHI as interesting. No other
2727 actions are necessary. */
2728 if (gimple_code (use_stmt) == GIMPLE_PHI)
2730 tree result;
2732 /* Dump details. */
2733 if (dump_file && (dump_flags & TDF_DETAILS))
2735 fprintf (dump_file, " Updated statement:");
2736 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2739 result = get_lhs_or_phi_result (use_stmt);
2740 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2741 continue;
2744 /* From this point onward we are propagating into a
2745 real statement. Folding may (or may not) be possible,
2746 we may expose new operands, expose dead EH edges,
2747 etc. */
2748 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2749 cannot fold a call that simplifies to a constant,
2750 because the GIMPLE_CALL must be replaced by a
2751 GIMPLE_ASSIGN, and there is no way to effect such a
2752 transformation in-place. We might want to consider
2753 using the more general fold_stmt here. */
2755 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2756 fold_stmt_inplace (&gsi);
2759 /* Sometimes propagation can expose new operands to the
2760 renamer. */
2761 update_stmt (use_stmt);
2763 /* Dump details. */
2764 if (dump_file && (dump_flags & TDF_DETAILS))
2766 fprintf (dump_file, " Updated statement:");
2767 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2770 /* If we replaced a variable index with a constant, then
2771 we would need to update the invariant flag for ADDR_EXPRs. */
2772 if (gimple_assign_single_p (use_stmt)
2773 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2774 recompute_tree_invariant_for_addr_expr
2775 (gimple_assign_rhs1 (use_stmt));
2777 /* If we cleaned up EH information from the statement,
2778 mark its containing block as needing EH cleanups. */
2779 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2781 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2782 if (dump_file && (dump_flags & TDF_DETAILS))
2783 fprintf (dump_file, " Flagged to clear EH edges.\n");
2786 /* Propagation may expose new trivial copy/constant propagation
2787 opportunities. */
2788 if (gimple_assign_single_p (use_stmt)
2789 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2790 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2791 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2793 tree result = get_lhs_or_phi_result (use_stmt);
2794 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2797 /* Propagation into these nodes may make certain edges in
2798 the CFG unexecutable. We want to identify them as PHI nodes
2799 at the destination of those unexecutable edges may become
2800 degenerates. */
2801 else if (gimple_code (use_stmt) == GIMPLE_COND
2802 || gimple_code (use_stmt) == GIMPLE_SWITCH
2803 || gimple_code (use_stmt) == GIMPLE_GOTO)
2805 tree val;
2807 if (gimple_code (use_stmt) == GIMPLE_COND)
2808 val = fold_binary_loc (gimple_location (use_stmt),
2809 gimple_cond_code (use_stmt),
2810 boolean_type_node,
2811 gimple_cond_lhs (use_stmt),
2812 gimple_cond_rhs (use_stmt));
2813 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2814 val = gimple_switch_index (use_stmt);
2815 else
2816 val = gimple_goto_dest (use_stmt);
2818 if (val && is_gimple_min_invariant (val))
2820 basic_block bb = gimple_bb (use_stmt);
2821 edge te = find_taken_edge (bb, val);
2822 edge_iterator ei;
2823 edge e;
2824 gimple_stmt_iterator gsi, psi;
2826 /* Remove all outgoing edges except TE. */
2827 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2829 if (e != te)
2831 /* Mark all the PHI nodes at the destination of
2832 the unexecutable edge as interesting. */
2833 for (psi = gsi_start_phis (e->dest);
2834 !gsi_end_p (psi);
2835 gsi_next (&psi))
2837 gimple phi = gsi_stmt (psi);
2839 tree result = gimple_phi_result (phi);
2840 int version = SSA_NAME_VERSION (result);
2842 bitmap_set_bit (interesting_names, version);
2845 te->probability += e->probability;
2847 te->count += e->count;
2848 remove_edge (e);
2849 cfg_altered = true;
2851 else
2852 ei_next (&ei);
2855 gsi = gsi_last_bb (gimple_bb (use_stmt));
2856 gsi_remove (&gsi, true);
2858 /* And fixup the flags on the single remaining edge. */
2859 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2860 te->flags &= ~EDGE_ABNORMAL;
2861 te->flags |= EDGE_FALLTHRU;
2862 if (te->probability > REG_BR_PROB_BASE)
2863 te->probability = REG_BR_PROB_BASE;
2868 /* Ensure there is nothing else to do. */
2869 gcc_assert (!all || has_zero_uses (lhs));
2871 /* If we were able to propagate away all uses of LHS, then
2872 we can remove STMT. */
2873 if (all)
2874 remove_stmt_or_phi (stmt);
2878 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2879 a statement that is a trivial copy or constant initialization.
2881 Attempt to eliminate T by propagating its RHS into all uses of
2882 its LHS. This may in turn set new bits in INTERESTING_NAMES
2883 for nodes we want to revisit later.
2885 All exit paths should clear INTERESTING_NAMES for the result
2886 of STMT. */
2888 static void
2889 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2891 tree lhs = get_lhs_or_phi_result (stmt);
2892 tree rhs;
2893 int version = SSA_NAME_VERSION (lhs);
2895 /* If the LHS of this statement or PHI has no uses, then we can
2896 just eliminate it. This can occur if, for example, the PHI
2897 was created by block duplication due to threading and its only
2898 use was in the conditional at the end of the block which was
2899 deleted. */
2900 if (has_zero_uses (lhs))
2902 bitmap_clear_bit (interesting_names, version);
2903 remove_stmt_or_phi (stmt);
2904 return;
2907 /* Get the RHS of the assignment or PHI node if the PHI is a
2908 degenerate. */
2909 rhs = get_rhs_or_phi_arg (stmt);
2910 if (!rhs)
2912 bitmap_clear_bit (interesting_names, version);
2913 return;
2916 if (!virtual_operand_p (lhs))
2917 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2918 else
2920 gimple use_stmt;
2921 imm_use_iterator iter;
2922 use_operand_p use_p;
2923 /* For virtual operands we have to propagate into all uses as
2924 otherwise we will create overlapping life-ranges. */
2925 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2926 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2927 SET_USE (use_p, rhs);
2928 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
2929 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
2930 remove_stmt_or_phi (stmt);
2933 /* Note that STMT may well have been deleted by now, so do
2934 not access it, instead use the saved version # to clear
2935 T's entry in the worklist. */
2936 bitmap_clear_bit (interesting_names, version);
2939 /* The first phase in degenerate PHI elimination.
2941 Eliminate the degenerate PHIs in BB, then recurse on the
2942 dominator children of BB. */
2944 static void
2945 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2947 gimple_stmt_iterator gsi;
2948 basic_block son;
2950 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2952 gimple phi = gsi_stmt (gsi);
2954 eliminate_const_or_copy (phi, interesting_names);
2957 /* Recurse into the dominator children of BB. */
2958 for (son = first_dom_son (CDI_DOMINATORS, bb);
2959 son;
2960 son = next_dom_son (CDI_DOMINATORS, son))
2961 eliminate_degenerate_phis_1 (son, interesting_names);
2965 /* A very simple pass to eliminate degenerate PHI nodes from the
2966 IL. This is meant to be fast enough to be able to be run several
2967 times in the optimization pipeline.
2969 Certain optimizations, particularly those which duplicate blocks
2970 or remove edges from the CFG can create or expose PHIs which are
2971 trivial copies or constant initializations.
2973 While we could pick up these optimizations in DOM or with the
2974 combination of copy-prop and CCP, those solutions are far too
2975 heavy-weight for our needs.
2977 This implementation has two phases so that we can efficiently
2978 eliminate the first order degenerate PHIs and second order
2979 degenerate PHIs.
2981 The first phase performs a dominator walk to identify and eliminate
2982 the vast majority of the degenerate PHIs. When a degenerate PHI
2983 is identified and eliminated any affected statements or PHIs
2984 are put on a worklist.
2986 The second phase eliminates degenerate PHIs and trivial copies
2987 or constant initializations using the worklist. This is how we
2988 pick up the secondary optimization opportunities with minimal
2989 cost. */
2991 static unsigned int
2992 eliminate_degenerate_phis (void)
2994 bitmap interesting_names;
2995 bitmap interesting_names1;
2997 /* Bitmap of blocks which need EH information updated. We can not
2998 update it on-the-fly as doing so invalidates the dominator tree. */
2999 need_eh_cleanup = BITMAP_ALLOC (NULL);
3001 /* INTERESTING_NAMES is effectively our worklist, indexed by
3002 SSA_NAME_VERSION.
3004 A set bit indicates that the statement or PHI node which
3005 defines the SSA_NAME should be (re)examined to determine if
3006 it has become a degenerate PHI or trivial const/copy propagation
3007 opportunity.
3009 Experiments have show we generally get better compilation
3010 time behavior with bitmaps rather than sbitmaps. */
3011 interesting_names = BITMAP_ALLOC (NULL);
3012 interesting_names1 = BITMAP_ALLOC (NULL);
3014 calculate_dominance_info (CDI_DOMINATORS);
3015 cfg_altered = false;
3017 /* First phase. Eliminate degenerate PHIs via a dominator
3018 walk of the CFG.
3020 Experiments have indicated that we generally get better
3021 compile-time behavior by visiting blocks in the first
3022 phase in dominator order. Presumably this is because walking
3023 in dominator order leaves fewer PHIs for later examination
3024 by the worklist phase. */
3025 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR, interesting_names);
3027 /* Second phase. Eliminate second order degenerate PHIs as well
3028 as trivial copies or constant initializations identified by
3029 the first phase or this phase. Basically we keep iterating
3030 until our set of INTERESTING_NAMEs is empty. */
3031 while (!bitmap_empty_p (interesting_names))
3033 unsigned int i;
3034 bitmap_iterator bi;
3036 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3037 changed during the loop. Copy it to another bitmap and
3038 use that. */
3039 bitmap_copy (interesting_names1, interesting_names);
3041 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3043 tree name = ssa_name (i);
3045 /* Ignore SSA_NAMEs that have been released because
3046 their defining statement was deleted (unreachable). */
3047 if (name)
3048 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3049 interesting_names);
3053 if (cfg_altered)
3055 free_dominance_info (CDI_DOMINATORS);
3056 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3057 if (current_loops)
3058 loops_state_set (LOOPS_NEED_FIXUP);
3061 /* Propagation of const and copies may make some EH edges dead. Purge
3062 such edges from the CFG as needed. */
3063 if (!bitmap_empty_p (need_eh_cleanup))
3065 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3066 BITMAP_FREE (need_eh_cleanup);
3069 BITMAP_FREE (interesting_names);
3070 BITMAP_FREE (interesting_names1);
3071 return 0;
3074 namespace {
3076 const pass_data pass_data_phi_only_cprop =
3078 GIMPLE_PASS, /* type */
3079 "phicprop", /* name */
3080 OPTGROUP_NONE, /* optinfo_flags */
3081 true, /* has_gate */
3082 true, /* has_execute */
3083 TV_TREE_PHI_CPROP, /* tv_id */
3084 ( PROP_cfg | PROP_ssa ), /* properties_required */
3085 0, /* properties_provided */
3086 0, /* properties_destroyed */
3087 0, /* todo_flags_start */
3088 ( TODO_cleanup_cfg | TODO_verify_ssa
3089 | TODO_verify_stmts
3090 | TODO_update_ssa ), /* todo_flags_finish */
3093 class pass_phi_only_cprop : public gimple_opt_pass
3095 public:
3096 pass_phi_only_cprop (gcc::context *ctxt)
3097 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3100 /* opt_pass methods: */
3101 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3102 bool gate () { return gate_dominator (); }
3103 unsigned int execute () { return eliminate_degenerate_phis (); }
3105 }; // class pass_phi_only_cprop
3107 } // anon namespace
3109 gimple_opt_pass *
3110 make_pass_phi_only_cprop (gcc::context *ctxt)
3112 return new pass_phi_only_cprop (ctxt);