2014-07-29 Ed Smith-Rowland <3dw4rd@verizon.net>
[official-gcc.git] / gcc / tree-ssa-dom.c
blob08fd2faf452e9ffd311f62d1191cd5c9c61d4552
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2014 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "tree.h"
27 #include "stor-layout.h"
28 #include "flags.h"
29 #include "tm_p.h"
30 #include "basic-block.h"
31 #include "cfgloop.h"
32 #include "inchash.h"
33 #include "function.h"
34 #include "gimple-pretty-print.h"
35 #include "tree-ssa-alias.h"
36 #include "internal-fn.h"
37 #include "gimple-fold.h"
38 #include "tree-eh.h"
39 #include "gimple-expr.h"
40 #include "is-a.h"
41 #include "gimple.h"
42 #include "gimple-iterator.h"
43 #include "gimple-ssa.h"
44 #include "tree-cfg.h"
45 #include "tree-phinodes.h"
46 #include "ssa-iterators.h"
47 #include "stringpool.h"
48 #include "tree-ssanames.h"
49 #include "tree-into-ssa.h"
50 #include "domwalk.h"
51 #include "tree-pass.h"
52 #include "tree-ssa-propagate.h"
53 #include "tree-ssa-threadupdate.h"
54 #include "langhooks.h"
55 #include "params.h"
56 #include "tree-ssa-threadedge.h"
57 #include "tree-ssa-dom.h"
59 /* This file implements optimizations on the dominator tree. */
61 /* Representation of a "naked" right-hand-side expression, to be used
62 in recording available expressions in the expression hash table. */
64 enum expr_kind
66 EXPR_SINGLE,
67 EXPR_UNARY,
68 EXPR_BINARY,
69 EXPR_TERNARY,
70 EXPR_CALL,
71 EXPR_PHI
74 struct hashable_expr
76 tree type;
77 enum expr_kind kind;
78 union {
79 struct { tree rhs; } single;
80 struct { enum tree_code op; tree opnd; } unary;
81 struct { enum tree_code op; tree opnd0, opnd1; } binary;
82 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
83 struct { gimple fn_from; bool pure; size_t nargs; tree *args; } call;
84 struct { size_t nargs; tree *args; } phi;
85 } ops;
88 /* Structure for recording known values of a conditional expression
89 at the exits from its block. */
91 typedef struct cond_equivalence_s
93 struct hashable_expr cond;
94 tree value;
95 } cond_equivalence;
98 /* Structure for recording edge equivalences as well as any pending
99 edge redirections during the dominator optimizer.
101 Computing and storing the edge equivalences instead of creating
102 them on-demand can save significant amounts of time, particularly
103 for pathological cases involving switch statements.
105 These structures live for a single iteration of the dominator
106 optimizer in the edge's AUX field. At the end of an iteration we
107 free each of these structures and update the AUX field to point
108 to any requested redirection target (the code for updating the
109 CFG and SSA graph for edge redirection expects redirection edge
110 targets to be in the AUX field for each edge. */
112 struct edge_info
114 /* If this edge creates a simple equivalence, the LHS and RHS of
115 the equivalence will be stored here. */
116 tree lhs;
117 tree rhs;
119 /* Traversing an edge may also indicate one or more particular conditions
120 are true or false. */
121 vec<cond_equivalence> cond_equivalences;
124 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
125 expressions it enters into the hash table along with a marker entry
126 (null). When we finish processing the block, we pop off entries and
127 remove the expressions from the global hash table until we hit the
128 marker. */
129 typedef struct expr_hash_elt * expr_hash_elt_t;
131 static vec<expr_hash_elt_t> avail_exprs_stack;
133 /* Structure for entries in the expression hash table. */
135 struct expr_hash_elt
137 /* The value (lhs) of this expression. */
138 tree lhs;
140 /* The expression (rhs) we want to record. */
141 struct hashable_expr expr;
143 /* The stmt pointer if this element corresponds to a statement. */
144 gimple stmt;
146 /* The hash value for RHS. */
147 hashval_t hash;
149 /* A unique stamp, typically the address of the hash
150 element itself, used in removing entries from the table. */
151 struct expr_hash_elt *stamp;
154 /* Hashtable helpers. */
156 static bool hashable_expr_equal_p (const struct hashable_expr *,
157 const struct hashable_expr *);
158 static void free_expr_hash_elt (void *);
160 struct expr_elt_hasher
162 typedef expr_hash_elt *value_type;
163 typedef expr_hash_elt *compare_type;
164 typedef int store_values_directly;
165 static inline hashval_t hash (const value_type &);
166 static inline bool equal (const value_type &, const compare_type &);
167 static inline void remove (value_type &);
170 inline hashval_t
171 expr_elt_hasher::hash (const value_type &p)
173 return p->hash;
176 inline bool
177 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
179 gimple stmt1 = p1->stmt;
180 const struct hashable_expr *expr1 = &p1->expr;
181 const struct expr_hash_elt *stamp1 = p1->stamp;
182 gimple stmt2 = p2->stmt;
183 const struct hashable_expr *expr2 = &p2->expr;
184 const struct expr_hash_elt *stamp2 = p2->stamp;
186 /* This case should apply only when removing entries from the table. */
187 if (stamp1 == stamp2)
188 return true;
190 /* FIXME tuples:
191 We add stmts to a hash table and them modify them. To detect the case
192 that we modify a stmt and then search for it, we assume that the hash
193 is always modified by that change.
194 We have to fully check why this doesn't happen on trunk or rewrite
195 this in a more reliable (and easier to understand) way. */
196 if (((const struct expr_hash_elt *)p1)->hash
197 != ((const struct expr_hash_elt *)p2)->hash)
198 return false;
200 /* In case of a collision, both RHS have to be identical and have the
201 same VUSE operands. */
202 if (hashable_expr_equal_p (expr1, expr2)
203 && types_compatible_p (expr1->type, expr2->type))
205 /* Note that STMT1 and/or STMT2 may be NULL. */
206 return ((stmt1 ? gimple_vuse (stmt1) : NULL_TREE)
207 == (stmt2 ? gimple_vuse (stmt2) : NULL_TREE));
210 return false;
213 /* Delete an expr_hash_elt and reclaim its storage. */
215 inline void
216 expr_elt_hasher::remove (value_type &element)
218 free_expr_hash_elt (element);
221 /* Hash table with expressions made available during the renaming process.
222 When an assignment of the form X_i = EXPR is found, the statement is
223 stored in this table. If the same expression EXPR is later found on the
224 RHS of another statement, it is replaced with X_i (thus performing
225 global redundancy elimination). Similarly as we pass through conditionals
226 we record the conditional itself as having either a true or false value
227 in this table. */
228 static hash_table<expr_elt_hasher> *avail_exprs;
230 /* Stack of dest,src pairs that need to be restored during finalization.
232 A NULL entry is used to mark the end of pairs which need to be
233 restored during finalization of this block. */
234 static vec<tree> const_and_copies_stack;
236 /* Track whether or not we have changed the control flow graph. */
237 static bool cfg_altered;
239 /* Bitmap of blocks that have had EH statements cleaned. We should
240 remove their dead edges eventually. */
241 static bitmap need_eh_cleanup;
243 /* Statistics for dominator optimizations. */
244 struct opt_stats_d
246 long num_stmts;
247 long num_exprs_considered;
248 long num_re;
249 long num_const_prop;
250 long num_copy_prop;
253 static struct opt_stats_d opt_stats;
255 /* Local functions. */
256 static void optimize_stmt (basic_block, gimple_stmt_iterator);
257 static tree lookup_avail_expr (gimple, bool);
258 static hashval_t avail_expr_hash (const void *);
259 static void htab_statistics (FILE *,
260 const hash_table<expr_elt_hasher> &);
261 static void record_cond (cond_equivalence *);
262 static void record_const_or_copy (tree, tree);
263 static void record_equality (tree, tree);
264 static void record_equivalences_from_phis (basic_block);
265 static void record_equivalences_from_incoming_edge (basic_block);
266 static void eliminate_redundant_computations (gimple_stmt_iterator *);
267 static void record_equivalences_from_stmt (gimple, int);
268 static void remove_local_expressions_from_table (void);
269 static void restore_vars_to_original_value (void);
270 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
273 /* Given a statement STMT, initialize the hash table element pointed to
274 by ELEMENT. */
276 static void
277 initialize_hash_element (gimple stmt, tree lhs,
278 struct expr_hash_elt *element)
280 enum gimple_code code = gimple_code (stmt);
281 struct hashable_expr *expr = &element->expr;
283 if (code == GIMPLE_ASSIGN)
285 enum tree_code subcode = gimple_assign_rhs_code (stmt);
287 switch (get_gimple_rhs_class (subcode))
289 case GIMPLE_SINGLE_RHS:
290 expr->kind = EXPR_SINGLE;
291 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
292 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
293 break;
294 case GIMPLE_UNARY_RHS:
295 expr->kind = EXPR_UNARY;
296 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
297 expr->ops.unary.op = subcode;
298 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
299 break;
300 case GIMPLE_BINARY_RHS:
301 expr->kind = EXPR_BINARY;
302 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
303 expr->ops.binary.op = subcode;
304 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
305 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
306 break;
307 case GIMPLE_TERNARY_RHS:
308 expr->kind = EXPR_TERNARY;
309 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
310 expr->ops.ternary.op = subcode;
311 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
312 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
313 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
314 break;
315 default:
316 gcc_unreachable ();
319 else if (code == GIMPLE_COND)
321 expr->type = boolean_type_node;
322 expr->kind = EXPR_BINARY;
323 expr->ops.binary.op = gimple_cond_code (stmt);
324 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
325 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
327 else if (code == GIMPLE_CALL)
329 size_t nargs = gimple_call_num_args (stmt);
330 size_t i;
332 gcc_assert (gimple_call_lhs (stmt));
334 expr->type = TREE_TYPE (gimple_call_lhs (stmt));
335 expr->kind = EXPR_CALL;
336 expr->ops.call.fn_from = stmt;
338 if (gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE))
339 expr->ops.call.pure = true;
340 else
341 expr->ops.call.pure = false;
343 expr->ops.call.nargs = nargs;
344 expr->ops.call.args = XCNEWVEC (tree, nargs);
345 for (i = 0; i < nargs; i++)
346 expr->ops.call.args[i] = gimple_call_arg (stmt, i);
348 else if (code == GIMPLE_SWITCH)
350 expr->type = TREE_TYPE (gimple_switch_index (stmt));
351 expr->kind = EXPR_SINGLE;
352 expr->ops.single.rhs = gimple_switch_index (stmt);
354 else if (code == GIMPLE_GOTO)
356 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
357 expr->kind = EXPR_SINGLE;
358 expr->ops.single.rhs = gimple_goto_dest (stmt);
360 else if (code == GIMPLE_PHI)
362 size_t nargs = gimple_phi_num_args (stmt);
363 size_t i;
365 expr->type = TREE_TYPE (gimple_phi_result (stmt));
366 expr->kind = EXPR_PHI;
367 expr->ops.phi.nargs = nargs;
368 expr->ops.phi.args = XCNEWVEC (tree, nargs);
370 for (i = 0; i < nargs; i++)
371 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
373 else
374 gcc_unreachable ();
376 element->lhs = lhs;
377 element->stmt = stmt;
378 element->hash = avail_expr_hash (element);
379 element->stamp = element;
382 /* Given a conditional expression COND as a tree, initialize
383 a hashable_expr expression EXPR. The conditional must be a
384 comparison or logical negation. A constant or a variable is
385 not permitted. */
387 static void
388 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
390 expr->type = boolean_type_node;
392 if (COMPARISON_CLASS_P (cond))
394 expr->kind = EXPR_BINARY;
395 expr->ops.binary.op = TREE_CODE (cond);
396 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
397 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
399 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
401 expr->kind = EXPR_UNARY;
402 expr->ops.unary.op = TRUTH_NOT_EXPR;
403 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
405 else
406 gcc_unreachable ();
409 /* Given a hashable_expr expression EXPR and an LHS,
410 initialize the hash table element pointed to by ELEMENT. */
412 static void
413 initialize_hash_element_from_expr (struct hashable_expr *expr,
414 tree lhs,
415 struct expr_hash_elt *element)
417 element->expr = *expr;
418 element->lhs = lhs;
419 element->stmt = NULL;
420 element->hash = avail_expr_hash (element);
421 element->stamp = element;
424 /* Compare two hashable_expr structures for equivalence.
425 They are considered equivalent when the the expressions
426 they denote must necessarily be equal. The logic is intended
427 to follow that of operand_equal_p in fold-const.c */
429 static bool
430 hashable_expr_equal_p (const struct hashable_expr *expr0,
431 const struct hashable_expr *expr1)
433 tree type0 = expr0->type;
434 tree type1 = expr1->type;
436 /* If either type is NULL, there is nothing to check. */
437 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
438 return false;
440 /* If both types don't have the same signedness, precision, and mode,
441 then we can't consider them equal. */
442 if (type0 != type1
443 && (TREE_CODE (type0) == ERROR_MARK
444 || TREE_CODE (type1) == ERROR_MARK
445 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
446 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
447 || TYPE_MODE (type0) != TYPE_MODE (type1)))
448 return false;
450 if (expr0->kind != expr1->kind)
451 return false;
453 switch (expr0->kind)
455 case EXPR_SINGLE:
456 return operand_equal_p (expr0->ops.single.rhs,
457 expr1->ops.single.rhs, 0);
459 case EXPR_UNARY:
460 if (expr0->ops.unary.op != expr1->ops.unary.op)
461 return false;
463 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
464 || expr0->ops.unary.op == NON_LVALUE_EXPR)
465 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
466 return false;
468 return operand_equal_p (expr0->ops.unary.opnd,
469 expr1->ops.unary.opnd, 0);
471 case EXPR_BINARY:
472 if (expr0->ops.binary.op != expr1->ops.binary.op)
473 return false;
475 if (operand_equal_p (expr0->ops.binary.opnd0,
476 expr1->ops.binary.opnd0, 0)
477 && operand_equal_p (expr0->ops.binary.opnd1,
478 expr1->ops.binary.opnd1, 0))
479 return true;
481 /* For commutative ops, allow the other order. */
482 return (commutative_tree_code (expr0->ops.binary.op)
483 && operand_equal_p (expr0->ops.binary.opnd0,
484 expr1->ops.binary.opnd1, 0)
485 && operand_equal_p (expr0->ops.binary.opnd1,
486 expr1->ops.binary.opnd0, 0));
488 case EXPR_TERNARY:
489 if (expr0->ops.ternary.op != expr1->ops.ternary.op
490 || !operand_equal_p (expr0->ops.ternary.opnd2,
491 expr1->ops.ternary.opnd2, 0))
492 return false;
494 if (operand_equal_p (expr0->ops.ternary.opnd0,
495 expr1->ops.ternary.opnd0, 0)
496 && operand_equal_p (expr0->ops.ternary.opnd1,
497 expr1->ops.ternary.opnd1, 0))
498 return true;
500 /* For commutative ops, allow the other order. */
501 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
502 && operand_equal_p (expr0->ops.ternary.opnd0,
503 expr1->ops.ternary.opnd1, 0)
504 && operand_equal_p (expr0->ops.ternary.opnd1,
505 expr1->ops.ternary.opnd0, 0));
507 case EXPR_CALL:
509 size_t i;
511 /* If the calls are to different functions, then they
512 clearly cannot be equal. */
513 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
514 expr1->ops.call.fn_from))
515 return false;
517 if (! expr0->ops.call.pure)
518 return false;
520 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
521 return false;
523 for (i = 0; i < expr0->ops.call.nargs; i++)
524 if (! operand_equal_p (expr0->ops.call.args[i],
525 expr1->ops.call.args[i], 0))
526 return false;
528 if (stmt_could_throw_p (expr0->ops.call.fn_from))
530 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
531 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
532 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
533 return false;
536 return true;
539 case EXPR_PHI:
541 size_t i;
543 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
544 return false;
546 for (i = 0; i < expr0->ops.phi.nargs; i++)
547 if (! operand_equal_p (expr0->ops.phi.args[i],
548 expr1->ops.phi.args[i], 0))
549 return false;
551 return true;
554 default:
555 gcc_unreachable ();
559 /* Generate a hash value for a pair of expressions. This can be used
560 iteratively by passing a previous result as the VAL argument.
562 The same hash value is always returned for a given pair of expressions,
563 regardless of the order in which they are presented. This is useful in
564 hashing the operands of commutative functions. */
566 static hashval_t
567 iterative_hash_exprs_commutative (const_tree t1,
568 const_tree t2, hashval_t val)
570 hashval_t one = iterative_hash_expr (t1, 0);
571 hashval_t two = iterative_hash_expr (t2, 0);
572 hashval_t t;
574 if (one > two)
575 t = one, one = two, two = t;
576 val = iterative_hash_hashval_t (one, val);
577 val = iterative_hash_hashval_t (two, val);
579 return val;
582 /* Compute a hash value for a hashable_expr value EXPR and a
583 previously accumulated hash value VAL. If two hashable_expr
584 values compare equal with hashable_expr_equal_p, they must
585 hash to the same value, given an identical value of VAL.
586 The logic is intended to follow iterative_hash_expr in tree.c. */
588 static hashval_t
589 iterative_hash_hashable_expr (const struct hashable_expr *expr, hashval_t val)
591 switch (expr->kind)
593 case EXPR_SINGLE:
594 val = iterative_hash_expr (expr->ops.single.rhs, val);
595 break;
597 case EXPR_UNARY:
598 val = iterative_hash_object (expr->ops.unary.op, val);
600 /* Make sure to include signedness in the hash computation.
601 Don't hash the type, that can lead to having nodes which
602 compare equal according to operand_equal_p, but which
603 have different hash codes. */
604 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
605 || expr->ops.unary.op == NON_LVALUE_EXPR)
606 val += TYPE_UNSIGNED (expr->type);
608 val = iterative_hash_expr (expr->ops.unary.opnd, val);
609 break;
611 case EXPR_BINARY:
612 val = iterative_hash_object (expr->ops.binary.op, val);
613 if (commutative_tree_code (expr->ops.binary.op))
614 val = iterative_hash_exprs_commutative (expr->ops.binary.opnd0,
615 expr->ops.binary.opnd1, val);
616 else
618 val = iterative_hash_expr (expr->ops.binary.opnd0, val);
619 val = iterative_hash_expr (expr->ops.binary.opnd1, val);
621 break;
623 case EXPR_TERNARY:
624 val = iterative_hash_object (expr->ops.ternary.op, val);
625 if (commutative_ternary_tree_code (expr->ops.ternary.op))
626 val = iterative_hash_exprs_commutative (expr->ops.ternary.opnd0,
627 expr->ops.ternary.opnd1, val);
628 else
630 val = iterative_hash_expr (expr->ops.ternary.opnd0, val);
631 val = iterative_hash_expr (expr->ops.ternary.opnd1, val);
633 val = iterative_hash_expr (expr->ops.ternary.opnd2, val);
634 break;
636 case EXPR_CALL:
638 size_t i;
639 enum tree_code code = CALL_EXPR;
640 gimple fn_from;
642 val = iterative_hash_object (code, val);
643 fn_from = expr->ops.call.fn_from;
644 if (gimple_call_internal_p (fn_from))
645 val = iterative_hash_hashval_t
646 ((hashval_t) gimple_call_internal_fn (fn_from), val);
647 else
648 val = iterative_hash_expr (gimple_call_fn (fn_from), val);
649 for (i = 0; i < expr->ops.call.nargs; i++)
650 val = iterative_hash_expr (expr->ops.call.args[i], val);
652 break;
654 case EXPR_PHI:
656 size_t i;
658 for (i = 0; i < expr->ops.phi.nargs; i++)
659 val = iterative_hash_expr (expr->ops.phi.args[i], val);
661 break;
663 default:
664 gcc_unreachable ();
667 return val;
670 /* Print a diagnostic dump of an expression hash table entry. */
672 static void
673 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
675 if (element->stmt)
676 fprintf (stream, "STMT ");
677 else
678 fprintf (stream, "COND ");
680 if (element->lhs)
682 print_generic_expr (stream, element->lhs, 0);
683 fprintf (stream, " = ");
686 switch (element->expr.kind)
688 case EXPR_SINGLE:
689 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
690 break;
692 case EXPR_UNARY:
693 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
694 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
695 break;
697 case EXPR_BINARY:
698 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
699 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
700 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
701 break;
703 case EXPR_TERNARY:
704 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
705 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
706 fputs (", ", stream);
707 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
708 fputs (", ", stream);
709 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
710 fputs (">", stream);
711 break;
713 case EXPR_CALL:
715 size_t i;
716 size_t nargs = element->expr.ops.call.nargs;
717 gimple fn_from;
719 fn_from = element->expr.ops.call.fn_from;
720 if (gimple_call_internal_p (fn_from))
721 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
722 stream);
723 else
724 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
725 fprintf (stream, " (");
726 for (i = 0; i < nargs; i++)
728 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
729 if (i + 1 < nargs)
730 fprintf (stream, ", ");
732 fprintf (stream, ")");
734 break;
736 case EXPR_PHI:
738 size_t i;
739 size_t nargs = element->expr.ops.phi.nargs;
741 fprintf (stream, "PHI <");
742 for (i = 0; i < nargs; i++)
744 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
745 if (i + 1 < nargs)
746 fprintf (stream, ", ");
748 fprintf (stream, ">");
750 break;
752 fprintf (stream, "\n");
754 if (element->stmt)
756 fprintf (stream, " ");
757 print_gimple_stmt (stream, element->stmt, 0, 0);
761 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
763 static void
764 free_expr_hash_elt_contents (struct expr_hash_elt *element)
766 if (element->expr.kind == EXPR_CALL)
767 free (element->expr.ops.call.args);
768 else if (element->expr.kind == EXPR_PHI)
769 free (element->expr.ops.phi.args);
772 /* Delete an expr_hash_elt and reclaim its storage. */
774 static void
775 free_expr_hash_elt (void *elt)
777 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
778 free_expr_hash_elt_contents (element);
779 free (element);
782 /* Allocate an EDGE_INFO for edge E and attach it to E.
783 Return the new EDGE_INFO structure. */
785 static struct edge_info *
786 allocate_edge_info (edge e)
788 struct edge_info *edge_info;
790 edge_info = XCNEW (struct edge_info);
792 e->aux = edge_info;
793 return edge_info;
796 /* Free all EDGE_INFO structures associated with edges in the CFG.
797 If a particular edge can be threaded, copy the redirection
798 target from the EDGE_INFO structure into the edge's AUX field
799 as required by code to update the CFG and SSA graph for
800 jump threading. */
802 static void
803 free_all_edge_infos (void)
805 basic_block bb;
806 edge_iterator ei;
807 edge e;
809 FOR_EACH_BB_FN (bb, cfun)
811 FOR_EACH_EDGE (e, ei, bb->preds)
813 struct edge_info *edge_info = (struct edge_info *) e->aux;
815 if (edge_info)
817 edge_info->cond_equivalences.release ();
818 free (edge_info);
819 e->aux = NULL;
825 class dom_opt_dom_walker : public dom_walker
827 public:
828 dom_opt_dom_walker (cdi_direction direction)
829 : dom_walker (direction), m_dummy_cond (NULL) {}
831 virtual void before_dom_children (basic_block);
832 virtual void after_dom_children (basic_block);
834 private:
835 void thread_across_edge (edge);
837 gimple m_dummy_cond;
840 /* Jump threading, redundancy elimination and const/copy propagation.
842 This pass may expose new symbols that need to be renamed into SSA. For
843 every new symbol exposed, its corresponding bit will be set in
844 VARS_TO_RENAME. */
846 namespace {
848 const pass_data pass_data_dominator =
850 GIMPLE_PASS, /* type */
851 "dom", /* name */
852 OPTGROUP_NONE, /* optinfo_flags */
853 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
854 ( PROP_cfg | PROP_ssa ), /* properties_required */
855 0, /* properties_provided */
856 0, /* properties_destroyed */
857 0, /* todo_flags_start */
858 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
861 class pass_dominator : public gimple_opt_pass
863 public:
864 pass_dominator (gcc::context *ctxt)
865 : gimple_opt_pass (pass_data_dominator, ctxt)
868 /* opt_pass methods: */
869 opt_pass * clone () { return new pass_dominator (m_ctxt); }
870 virtual bool gate (function *) { return flag_tree_dom != 0; }
871 virtual unsigned int execute (function *);
873 }; // class pass_dominator
875 unsigned int
876 pass_dominator::execute (function *fun)
878 memset (&opt_stats, 0, sizeof (opt_stats));
880 /* Create our hash tables. */
881 avail_exprs = new hash_table<expr_elt_hasher> (1024);
882 avail_exprs_stack.create (20);
883 const_and_copies_stack.create (20);
884 need_eh_cleanup = BITMAP_ALLOC (NULL);
886 calculate_dominance_info (CDI_DOMINATORS);
887 cfg_altered = false;
889 /* We need to know loop structures in order to avoid destroying them
890 in jump threading. Note that we still can e.g. thread through loop
891 headers to an exit edge, or through loop header to the loop body, assuming
892 that we update the loop info.
894 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
895 to several overly conservative bail-outs in jump threading, case
896 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
897 missing. We should improve jump threading in future then
898 LOOPS_HAVE_PREHEADERS won't be needed here. */
899 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
901 /* Initialize the value-handle array. */
902 threadedge_initialize_values ();
904 /* We need accurate information regarding back edges in the CFG
905 for jump threading; this may include back edges that are not part of
906 a single loop. */
907 mark_dfs_back_edges ();
909 /* Recursively walk the dominator tree optimizing statements. */
910 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
913 gimple_stmt_iterator gsi;
914 basic_block bb;
915 FOR_EACH_BB_FN (bb, fun)
917 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
918 update_stmt_if_modified (gsi_stmt (gsi));
922 /* If we exposed any new variables, go ahead and put them into
923 SSA form now, before we handle jump threading. This simplifies
924 interactions between rewriting of _DECL nodes into SSA form
925 and rewriting SSA_NAME nodes into SSA form after block
926 duplication and CFG manipulation. */
927 update_ssa (TODO_update_ssa);
929 free_all_edge_infos ();
931 /* Thread jumps, creating duplicate blocks as needed. */
932 cfg_altered |= thread_through_all_blocks (first_pass_instance);
934 if (cfg_altered)
935 free_dominance_info (CDI_DOMINATORS);
937 /* Removal of statements may make some EH edges dead. Purge
938 such edges from the CFG as needed. */
939 if (!bitmap_empty_p (need_eh_cleanup))
941 unsigned i;
942 bitmap_iterator bi;
944 /* Jump threading may have created forwarder blocks from blocks
945 needing EH cleanup; the new successor of these blocks, which
946 has inherited from the original block, needs the cleanup.
947 Don't clear bits in the bitmap, as that can break the bitmap
948 iterator. */
949 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
951 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
952 if (bb == NULL)
953 continue;
954 while (single_succ_p (bb)
955 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
956 bb = single_succ (bb);
957 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
958 continue;
959 if ((unsigned) bb->index != i)
960 bitmap_set_bit (need_eh_cleanup, bb->index);
963 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
964 bitmap_clear (need_eh_cleanup);
967 statistics_counter_event (fun, "Redundant expressions eliminated",
968 opt_stats.num_re);
969 statistics_counter_event (fun, "Constants propagated",
970 opt_stats.num_const_prop);
971 statistics_counter_event (fun, "Copies propagated",
972 opt_stats.num_copy_prop);
974 /* Debugging dumps. */
975 if (dump_file && (dump_flags & TDF_STATS))
976 dump_dominator_optimization_stats (dump_file);
978 loop_optimizer_finalize ();
980 /* Delete our main hashtable. */
981 delete avail_exprs;
982 avail_exprs = NULL;
984 /* Free asserted bitmaps and stacks. */
985 BITMAP_FREE (need_eh_cleanup);
987 avail_exprs_stack.release ();
988 const_and_copies_stack.release ();
990 /* Free the value-handle array. */
991 threadedge_finalize_values ();
993 return 0;
996 } // anon namespace
998 gimple_opt_pass *
999 make_pass_dominator (gcc::context *ctxt)
1001 return new pass_dominator (ctxt);
1005 /* Given a conditional statement CONDSTMT, convert the
1006 condition to a canonical form. */
1008 static void
1009 canonicalize_comparison (gimple condstmt)
1011 tree op0;
1012 tree op1;
1013 enum tree_code code;
1015 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1017 op0 = gimple_cond_lhs (condstmt);
1018 op1 = gimple_cond_rhs (condstmt);
1020 code = gimple_cond_code (condstmt);
1022 /* If it would be profitable to swap the operands, then do so to
1023 canonicalize the statement, enabling better optimization.
1025 By placing canonicalization of such expressions here we
1026 transparently keep statements in canonical form, even
1027 when the statement is modified. */
1028 if (tree_swap_operands_p (op0, op1, false))
1030 /* For relationals we need to swap the operands
1031 and change the code. */
1032 if (code == LT_EXPR
1033 || code == GT_EXPR
1034 || code == LE_EXPR
1035 || code == GE_EXPR)
1037 code = swap_tree_comparison (code);
1039 gimple_cond_set_code (condstmt, code);
1040 gimple_cond_set_lhs (condstmt, op1);
1041 gimple_cond_set_rhs (condstmt, op0);
1043 update_stmt (condstmt);
1048 /* Initialize local stacks for this optimizer and record equivalences
1049 upon entry to BB. Equivalences can come from the edge traversed to
1050 reach BB or they may come from PHI nodes at the start of BB. */
1052 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1053 LIMIT entries left in LOCALs. */
1055 static void
1056 remove_local_expressions_from_table (void)
1058 /* Remove all the expressions made available in this block. */
1059 while (avail_exprs_stack.length () > 0)
1061 expr_hash_elt_t victim = avail_exprs_stack.pop ();
1062 expr_hash_elt **slot;
1064 if (victim == NULL)
1065 break;
1067 /* This must precede the actual removal from the hash table,
1068 as ELEMENT and the table entry may share a call argument
1069 vector which will be freed during removal. */
1070 if (dump_file && (dump_flags & TDF_DETAILS))
1072 fprintf (dump_file, "<<<< ");
1073 print_expr_hash_elt (dump_file, victim);
1076 slot = avail_exprs->find_slot (victim, NO_INSERT);
1077 gcc_assert (slot && *slot == victim);
1078 avail_exprs->clear_slot (slot);
1082 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1083 CONST_AND_COPIES to its original state, stopping when we hit a
1084 NULL marker. */
1086 static void
1087 restore_vars_to_original_value (void)
1089 while (const_and_copies_stack.length () > 0)
1091 tree prev_value, dest;
1093 dest = const_and_copies_stack.pop ();
1095 if (dest == NULL)
1096 break;
1098 if (dump_file && (dump_flags & TDF_DETAILS))
1100 fprintf (dump_file, "<<<< COPY ");
1101 print_generic_expr (dump_file, dest, 0);
1102 fprintf (dump_file, " = ");
1103 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1104 fprintf (dump_file, "\n");
1107 prev_value = const_and_copies_stack.pop ();
1108 set_ssa_name_value (dest, prev_value);
1112 /* A trivial wrapper so that we can present the generic jump
1113 threading code with a simple API for simplifying statements. */
1114 static tree
1115 simplify_stmt_for_jump_threading (gimple stmt,
1116 gimple within_stmt ATTRIBUTE_UNUSED)
1118 return lookup_avail_expr (stmt, false);
1121 /* Record into the equivalence tables any equivalences implied by
1122 traversing edge E (which are cached in E->aux).
1124 Callers are responsible for managing the unwinding markers. */
1125 static void
1126 record_temporary_equivalences (edge e)
1128 int i;
1129 struct edge_info *edge_info = (struct edge_info *) e->aux;
1131 /* If we have info associated with this edge, record it into
1132 our equivalence tables. */
1133 if (edge_info)
1135 cond_equivalence *eq;
1136 tree lhs = edge_info->lhs;
1137 tree rhs = edge_info->rhs;
1139 /* If we have a simple NAME = VALUE equivalence, record it. */
1140 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1141 record_const_or_copy (lhs, rhs);
1143 /* If we have 0 = COND or 1 = COND equivalences, record them
1144 into our expression hash tables. */
1145 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1146 record_cond (eq);
1150 /* Wrapper for common code to attempt to thread an edge. For example,
1151 it handles lazily building the dummy condition and the bookkeeping
1152 when jump threading is successful. */
1154 void
1155 dom_opt_dom_walker::thread_across_edge (edge e)
1157 if (! m_dummy_cond)
1158 m_dummy_cond =
1159 gimple_build_cond (NE_EXPR,
1160 integer_zero_node, integer_zero_node,
1161 NULL, NULL);
1163 /* Push a marker on both stacks so we can unwind the tables back to their
1164 current state. */
1165 avail_exprs_stack.safe_push (NULL);
1166 const_and_copies_stack.safe_push (NULL_TREE);
1168 /* Traversing E may result in equivalences we can utilize. */
1169 record_temporary_equivalences (e);
1171 /* With all the edge equivalences in the tables, go ahead and attempt
1172 to thread through E->dest. */
1173 ::thread_across_edge (m_dummy_cond, e, false,
1174 &const_and_copies_stack,
1175 simplify_stmt_for_jump_threading);
1177 /* And restore the various tables to their state before
1178 we threaded this edge.
1180 XXX The code in tree-ssa-threadedge.c will restore the state of
1181 the const_and_copies table. We we just have to restore the expression
1182 table. */
1183 remove_local_expressions_from_table ();
1186 /* PHI nodes can create equivalences too.
1188 Ignoring any alternatives which are the same as the result, if
1189 all the alternatives are equal, then the PHI node creates an
1190 equivalence. */
1192 static void
1193 record_equivalences_from_phis (basic_block bb)
1195 gimple_stmt_iterator gsi;
1197 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1199 gimple phi = gsi_stmt (gsi);
1201 tree lhs = gimple_phi_result (phi);
1202 tree rhs = NULL;
1203 size_t i;
1205 for (i = 0; i < gimple_phi_num_args (phi); i++)
1207 tree t = gimple_phi_arg_def (phi, i);
1209 /* Ignore alternatives which are the same as our LHS. Since
1210 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1211 can simply compare pointers. */
1212 if (lhs == t)
1213 continue;
1215 /* If we have not processed an alternative yet, then set
1216 RHS to this alternative. */
1217 if (rhs == NULL)
1218 rhs = t;
1219 /* If we have processed an alternative (stored in RHS), then
1220 see if it is equal to this one. If it isn't, then stop
1221 the search. */
1222 else if (! operand_equal_for_phi_arg_p (rhs, t))
1223 break;
1226 /* If we had no interesting alternatives, then all the RHS alternatives
1227 must have been the same as LHS. */
1228 if (!rhs)
1229 rhs = lhs;
1231 /* If we managed to iterate through each PHI alternative without
1232 breaking out of the loop, then we have a PHI which may create
1233 a useful equivalence. We do not need to record unwind data for
1234 this, since this is a true assignment and not an equivalence
1235 inferred from a comparison. All uses of this ssa name are dominated
1236 by this assignment, so unwinding just costs time and space. */
1237 if (i == gimple_phi_num_args (phi)
1238 && may_propagate_copy (lhs, rhs))
1239 set_ssa_name_value (lhs, rhs);
1243 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1244 return that edge. Otherwise return NULL. */
1245 static edge
1246 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1248 edge retval = NULL;
1249 edge e;
1250 edge_iterator ei;
1252 FOR_EACH_EDGE (e, ei, bb->preds)
1254 /* A loop back edge can be identified by the destination of
1255 the edge dominating the source of the edge. */
1256 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1257 continue;
1259 /* If we have already seen a non-loop edge, then we must have
1260 multiple incoming non-loop edges and thus we return NULL. */
1261 if (retval)
1262 return NULL;
1264 /* This is the first non-loop incoming edge we have found. Record
1265 it. */
1266 retval = e;
1269 return retval;
1272 /* Record any equivalences created by the incoming edge to BB. If BB
1273 has more than one incoming edge, then no equivalence is created. */
1275 static void
1276 record_equivalences_from_incoming_edge (basic_block bb)
1278 edge e;
1279 basic_block parent;
1280 struct edge_info *edge_info;
1282 /* If our parent block ended with a control statement, then we may be
1283 able to record some equivalences based on which outgoing edge from
1284 the parent was followed. */
1285 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1287 e = single_incoming_edge_ignoring_loop_edges (bb);
1289 /* If we had a single incoming edge from our parent block, then enter
1290 any data associated with the edge into our tables. */
1291 if (e && e->src == parent)
1293 unsigned int i;
1295 edge_info = (struct edge_info *) e->aux;
1297 if (edge_info)
1299 tree lhs = edge_info->lhs;
1300 tree rhs = edge_info->rhs;
1301 cond_equivalence *eq;
1303 if (lhs)
1304 record_equality (lhs, rhs);
1306 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1307 set via a widening type conversion, then we may be able to record
1308 additional equivalences. */
1309 if (lhs
1310 && TREE_CODE (lhs) == SSA_NAME
1311 && is_gimple_constant (rhs)
1312 && TREE_CODE (rhs) == INTEGER_CST)
1314 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1316 if (defstmt
1317 && is_gimple_assign (defstmt)
1318 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1320 tree old_rhs = gimple_assign_rhs1 (defstmt);
1322 /* If the conversion widens the original value and
1323 the constant is in the range of the type of OLD_RHS,
1324 then convert the constant and record the equivalence.
1326 Note that int_fits_type_p does not check the precision
1327 if the upper and lower bounds are OK. */
1328 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1329 && (TYPE_PRECISION (TREE_TYPE (lhs))
1330 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1331 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1333 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1334 record_equality (old_rhs, newval);
1339 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1340 record_cond (eq);
1345 /* Dump SSA statistics on FILE. */
1347 void
1348 dump_dominator_optimization_stats (FILE *file)
1350 fprintf (file, "Total number of statements: %6ld\n\n",
1351 opt_stats.num_stmts);
1352 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1353 opt_stats.num_exprs_considered);
1355 fprintf (file, "\nHash table statistics:\n");
1357 fprintf (file, " avail_exprs: ");
1358 htab_statistics (file, *avail_exprs);
1362 /* Dump SSA statistics on stderr. */
1364 DEBUG_FUNCTION void
1365 debug_dominator_optimization_stats (void)
1367 dump_dominator_optimization_stats (stderr);
1371 /* Dump statistics for the hash table HTAB. */
1373 static void
1374 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1376 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1377 (long) htab.size (),
1378 (long) htab.elements (),
1379 htab.collisions ());
1383 /* Enter condition equivalence into the expression hash table.
1384 This indicates that a conditional expression has a known
1385 boolean value. */
1387 static void
1388 record_cond (cond_equivalence *p)
1390 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1391 expr_hash_elt **slot;
1393 initialize_hash_element_from_expr (&p->cond, p->value, element);
1395 slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1396 if (*slot == NULL)
1398 *slot = element;
1400 if (dump_file && (dump_flags & TDF_DETAILS))
1402 fprintf (dump_file, "1>>> ");
1403 print_expr_hash_elt (dump_file, element);
1406 avail_exprs_stack.safe_push (element);
1408 else
1409 free_expr_hash_elt (element);
1412 /* Build a cond_equivalence record indicating that the comparison
1413 CODE holds between operands OP0 and OP1 and push it to **P. */
1415 static void
1416 build_and_record_new_cond (enum tree_code code,
1417 tree op0, tree op1,
1418 vec<cond_equivalence> *p)
1420 cond_equivalence c;
1421 struct hashable_expr *cond = &c.cond;
1423 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
1425 cond->type = boolean_type_node;
1426 cond->kind = EXPR_BINARY;
1427 cond->ops.binary.op = code;
1428 cond->ops.binary.opnd0 = op0;
1429 cond->ops.binary.opnd1 = op1;
1431 c.value = boolean_true_node;
1432 p->safe_push (c);
1435 /* Record that COND is true and INVERTED is false into the edge information
1436 structure. Also record that any conditions dominated by COND are true
1437 as well.
1439 For example, if a < b is true, then a <= b must also be true. */
1441 static void
1442 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
1444 tree op0, op1;
1445 cond_equivalence c;
1447 if (!COMPARISON_CLASS_P (cond))
1448 return;
1450 op0 = TREE_OPERAND (cond, 0);
1451 op1 = TREE_OPERAND (cond, 1);
1453 switch (TREE_CODE (cond))
1455 case LT_EXPR:
1456 case GT_EXPR:
1457 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1459 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1460 &edge_info->cond_equivalences);
1461 build_and_record_new_cond (LTGT_EXPR, op0, op1,
1462 &edge_info->cond_equivalences);
1465 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
1466 ? LE_EXPR : GE_EXPR),
1467 op0, op1, &edge_info->cond_equivalences);
1468 build_and_record_new_cond (NE_EXPR, op0, op1,
1469 &edge_info->cond_equivalences);
1470 break;
1472 case GE_EXPR:
1473 case LE_EXPR:
1474 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1476 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1477 &edge_info->cond_equivalences);
1479 break;
1481 case EQ_EXPR:
1482 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
1484 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1485 &edge_info->cond_equivalences);
1487 build_and_record_new_cond (LE_EXPR, op0, op1,
1488 &edge_info->cond_equivalences);
1489 build_and_record_new_cond (GE_EXPR, op0, op1,
1490 &edge_info->cond_equivalences);
1491 break;
1493 case UNORDERED_EXPR:
1494 build_and_record_new_cond (NE_EXPR, op0, op1,
1495 &edge_info->cond_equivalences);
1496 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1497 &edge_info->cond_equivalences);
1498 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1499 &edge_info->cond_equivalences);
1500 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
1501 &edge_info->cond_equivalences);
1502 build_and_record_new_cond (UNLT_EXPR, op0, op1,
1503 &edge_info->cond_equivalences);
1504 build_and_record_new_cond (UNGT_EXPR, op0, op1,
1505 &edge_info->cond_equivalences);
1506 break;
1508 case UNLT_EXPR:
1509 case UNGT_EXPR:
1510 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
1511 ? UNLE_EXPR : UNGE_EXPR),
1512 op0, op1, &edge_info->cond_equivalences);
1513 build_and_record_new_cond (NE_EXPR, op0, op1,
1514 &edge_info->cond_equivalences);
1515 break;
1517 case UNEQ_EXPR:
1518 build_and_record_new_cond (UNLE_EXPR, op0, op1,
1519 &edge_info->cond_equivalences);
1520 build_and_record_new_cond (UNGE_EXPR, op0, op1,
1521 &edge_info->cond_equivalences);
1522 break;
1524 case LTGT_EXPR:
1525 build_and_record_new_cond (NE_EXPR, op0, op1,
1526 &edge_info->cond_equivalences);
1527 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
1528 &edge_info->cond_equivalences);
1529 break;
1531 default:
1532 break;
1535 /* Now store the original true and false conditions into the first
1536 two slots. */
1537 initialize_expr_from_cond (cond, &c.cond);
1538 c.value = boolean_true_node;
1539 edge_info->cond_equivalences.safe_push (c);
1541 /* It is possible for INVERTED to be the negation of a comparison,
1542 and not a valid RHS or GIMPLE_COND condition. This happens because
1543 invert_truthvalue may return such an expression when asked to invert
1544 a floating-point comparison. These comparisons are not assumed to
1545 obey the trichotomy law. */
1546 initialize_expr_from_cond (inverted, &c.cond);
1547 c.value = boolean_false_node;
1548 edge_info->cond_equivalences.safe_push (c);
1551 /* A helper function for record_const_or_copy and record_equality.
1552 Do the work of recording the value and undo info. */
1554 static void
1555 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1557 set_ssa_name_value (x, y);
1559 if (dump_file && (dump_flags & TDF_DETAILS))
1561 fprintf (dump_file, "0>>> COPY ");
1562 print_generic_expr (dump_file, x, 0);
1563 fprintf (dump_file, " = ");
1564 print_generic_expr (dump_file, y, 0);
1565 fprintf (dump_file, "\n");
1568 const_and_copies_stack.reserve (2);
1569 const_and_copies_stack.quick_push (prev_x);
1570 const_and_copies_stack.quick_push (x);
1573 /* Record that X is equal to Y in const_and_copies. Record undo
1574 information in the block-local vector. */
1576 static void
1577 record_const_or_copy (tree x, tree y)
1579 tree prev_x = SSA_NAME_VALUE (x);
1581 gcc_assert (TREE_CODE (x) == SSA_NAME);
1583 if (TREE_CODE (y) == SSA_NAME)
1585 tree tmp = SSA_NAME_VALUE (y);
1586 if (tmp)
1587 y = tmp;
1590 record_const_or_copy_1 (x, y, prev_x);
1593 /* Return the loop depth of the basic block of the defining statement of X.
1594 This number should not be treated as absolutely correct because the loop
1595 information may not be completely up-to-date when dom runs. However, it
1596 will be relatively correct, and as more passes are taught to keep loop info
1597 up to date, the result will become more and more accurate. */
1599 static int
1600 loop_depth_of_name (tree x)
1602 gimple defstmt;
1603 basic_block defbb;
1605 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1606 if (TREE_CODE (x) != SSA_NAME)
1607 return 0;
1609 /* Otherwise return the loop depth of the defining statement's bb.
1610 Note that there may not actually be a bb for this statement, if the
1611 ssa_name is live on entry. */
1612 defstmt = SSA_NAME_DEF_STMT (x);
1613 defbb = gimple_bb (defstmt);
1614 if (!defbb)
1615 return 0;
1617 return bb_loop_depth (defbb);
1620 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1621 This constrains the cases in which we may treat this as assignment. */
1623 static void
1624 record_equality (tree x, tree y)
1626 tree prev_x = NULL, prev_y = NULL;
1628 if (TREE_CODE (x) == SSA_NAME)
1629 prev_x = SSA_NAME_VALUE (x);
1630 if (TREE_CODE (y) == SSA_NAME)
1631 prev_y = SSA_NAME_VALUE (y);
1633 /* If one of the previous values is invariant, or invariant in more loops
1634 (by depth), then use that.
1635 Otherwise it doesn't matter which value we choose, just so
1636 long as we canonicalize on one value. */
1637 if (is_gimple_min_invariant (y))
1639 else if (is_gimple_min_invariant (x)
1640 /* ??? When threading over backedges the following is important
1641 for correctness. See PR61757. */
1642 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1643 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1644 else if (prev_x && is_gimple_min_invariant (prev_x))
1645 x = y, y = prev_x, prev_x = prev_y;
1646 else if (prev_y)
1647 y = prev_y;
1649 /* After the swapping, we must have one SSA_NAME. */
1650 if (TREE_CODE (x) != SSA_NAME)
1651 return;
1653 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1654 variable compared against zero. If we're honoring signed zeros,
1655 then we cannot record this value unless we know that the value is
1656 nonzero. */
1657 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (x)))
1658 && (TREE_CODE (y) != REAL_CST
1659 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1660 return;
1662 record_const_or_copy_1 (x, y, prev_x);
1665 /* Returns true when STMT is a simple iv increment. It detects the
1666 following situation:
1668 i_1 = phi (..., i_2)
1669 i_2 = i_1 +/- ... */
1671 bool
1672 simple_iv_increment_p (gimple stmt)
1674 enum tree_code code;
1675 tree lhs, preinc;
1676 gimple phi;
1677 size_t i;
1679 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1680 return false;
1682 lhs = gimple_assign_lhs (stmt);
1683 if (TREE_CODE (lhs) != SSA_NAME)
1684 return false;
1686 code = gimple_assign_rhs_code (stmt);
1687 if (code != PLUS_EXPR
1688 && code != MINUS_EXPR
1689 && code != POINTER_PLUS_EXPR)
1690 return false;
1692 preinc = gimple_assign_rhs1 (stmt);
1693 if (TREE_CODE (preinc) != SSA_NAME)
1694 return false;
1696 phi = SSA_NAME_DEF_STMT (preinc);
1697 if (gimple_code (phi) != GIMPLE_PHI)
1698 return false;
1700 for (i = 0; i < gimple_phi_num_args (phi); i++)
1701 if (gimple_phi_arg_def (phi, i) == lhs)
1702 return true;
1704 return false;
1707 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1708 known value for that SSA_NAME (or NULL if no value is known).
1710 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1711 successors of BB. */
1713 static void
1714 cprop_into_successor_phis (basic_block bb)
1716 edge e;
1717 edge_iterator ei;
1719 FOR_EACH_EDGE (e, ei, bb->succs)
1721 int indx;
1722 gimple_stmt_iterator gsi;
1724 /* If this is an abnormal edge, then we do not want to copy propagate
1725 into the PHI alternative associated with this edge. */
1726 if (e->flags & EDGE_ABNORMAL)
1727 continue;
1729 gsi = gsi_start_phis (e->dest);
1730 if (gsi_end_p (gsi))
1731 continue;
1733 /* We may have an equivalence associated with this edge. While
1734 we can not propagate it into non-dominated blocks, we can
1735 propagate them into PHIs in non-dominated blocks. */
1737 /* Push the unwind marker so we can reset the const and copies
1738 table back to its original state after processing this edge. */
1739 const_and_copies_stack.safe_push (NULL_TREE);
1741 /* Extract and record any simple NAME = VALUE equivalences.
1743 Don't bother with [01] = COND equivalences, they're not useful
1744 here. */
1745 struct edge_info *edge_info = (struct edge_info *) e->aux;
1746 if (edge_info)
1748 tree lhs = edge_info->lhs;
1749 tree rhs = edge_info->rhs;
1751 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1752 record_const_or_copy (lhs, rhs);
1755 indx = e->dest_idx;
1756 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1758 tree new_val;
1759 use_operand_p orig_p;
1760 tree orig_val;
1761 gimple phi = gsi_stmt (gsi);
1763 /* The alternative may be associated with a constant, so verify
1764 it is an SSA_NAME before doing anything with it. */
1765 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1766 orig_val = get_use_from_ptr (orig_p);
1767 if (TREE_CODE (orig_val) != SSA_NAME)
1768 continue;
1770 /* If we have *ORIG_P in our constant/copy table, then replace
1771 ORIG_P with its value in our constant/copy table. */
1772 new_val = SSA_NAME_VALUE (orig_val);
1773 if (new_val
1774 && new_val != orig_val
1775 && (TREE_CODE (new_val) == SSA_NAME
1776 || is_gimple_min_invariant (new_val))
1777 && may_propagate_copy (orig_val, new_val))
1778 propagate_value (orig_p, new_val);
1781 restore_vars_to_original_value ();
1785 /* We have finished optimizing BB, record any information implied by
1786 taking a specific outgoing edge from BB. */
1788 static void
1789 record_edge_info (basic_block bb)
1791 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1792 struct edge_info *edge_info;
1794 if (! gsi_end_p (gsi))
1796 gimple stmt = gsi_stmt (gsi);
1797 location_t loc = gimple_location (stmt);
1799 if (gimple_code (stmt) == GIMPLE_SWITCH)
1801 tree index = gimple_switch_index (stmt);
1803 if (TREE_CODE (index) == SSA_NAME)
1805 int i;
1806 int n_labels = gimple_switch_num_labels (stmt);
1807 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
1808 edge e;
1809 edge_iterator ei;
1811 for (i = 0; i < n_labels; i++)
1813 tree label = gimple_switch_label (stmt, i);
1814 basic_block target_bb = label_to_block (CASE_LABEL (label));
1815 if (CASE_HIGH (label)
1816 || !CASE_LOW (label)
1817 || info[target_bb->index])
1818 info[target_bb->index] = error_mark_node;
1819 else
1820 info[target_bb->index] = label;
1823 FOR_EACH_EDGE (e, ei, bb->succs)
1825 basic_block target_bb = e->dest;
1826 tree label = info[target_bb->index];
1828 if (label != NULL && label != error_mark_node)
1830 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1831 CASE_LOW (label));
1832 edge_info = allocate_edge_info (e);
1833 edge_info->lhs = index;
1834 edge_info->rhs = x;
1837 free (info);
1841 /* A COND_EXPR may create equivalences too. */
1842 if (gimple_code (stmt) == GIMPLE_COND)
1844 edge true_edge;
1845 edge false_edge;
1847 tree op0 = gimple_cond_lhs (stmt);
1848 tree op1 = gimple_cond_rhs (stmt);
1849 enum tree_code code = gimple_cond_code (stmt);
1851 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1853 /* Special case comparing booleans against a constant as we
1854 know the value of OP0 on both arms of the branch. i.e., we
1855 can record an equivalence for OP0 rather than COND. */
1856 if ((code == EQ_EXPR || code == NE_EXPR)
1857 && TREE_CODE (op0) == SSA_NAME
1858 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1859 && is_gimple_min_invariant (op1))
1861 if (code == EQ_EXPR)
1863 edge_info = allocate_edge_info (true_edge);
1864 edge_info->lhs = op0;
1865 edge_info->rhs = (integer_zerop (op1)
1866 ? boolean_false_node
1867 : boolean_true_node);
1869 edge_info = allocate_edge_info (false_edge);
1870 edge_info->lhs = op0;
1871 edge_info->rhs = (integer_zerop (op1)
1872 ? boolean_true_node
1873 : boolean_false_node);
1875 else
1877 edge_info = allocate_edge_info (true_edge);
1878 edge_info->lhs = op0;
1879 edge_info->rhs = (integer_zerop (op1)
1880 ? boolean_true_node
1881 : boolean_false_node);
1883 edge_info = allocate_edge_info (false_edge);
1884 edge_info->lhs = op0;
1885 edge_info->rhs = (integer_zerop (op1)
1886 ? boolean_false_node
1887 : boolean_true_node);
1890 else if (is_gimple_min_invariant (op0)
1891 && (TREE_CODE (op1) == SSA_NAME
1892 || is_gimple_min_invariant (op1)))
1894 tree cond = build2 (code, boolean_type_node, op0, op1);
1895 tree inverted = invert_truthvalue_loc (loc, cond);
1896 bool can_infer_simple_equiv
1897 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op0)))
1898 && real_zerop (op0));
1899 struct edge_info *edge_info;
1901 edge_info = allocate_edge_info (true_edge);
1902 record_conditions (edge_info, cond, inverted);
1904 if (can_infer_simple_equiv && code == EQ_EXPR)
1906 edge_info->lhs = op1;
1907 edge_info->rhs = op0;
1910 edge_info = allocate_edge_info (false_edge);
1911 record_conditions (edge_info, inverted, cond);
1913 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1915 edge_info->lhs = op1;
1916 edge_info->rhs = op0;
1920 else if (TREE_CODE (op0) == SSA_NAME
1921 && (TREE_CODE (op1) == SSA_NAME
1922 || is_gimple_min_invariant (op1)))
1924 tree cond = build2 (code, boolean_type_node, op0, op1);
1925 tree inverted = invert_truthvalue_loc (loc, cond);
1926 bool can_infer_simple_equiv
1927 = !(HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (op1)))
1928 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1929 struct edge_info *edge_info;
1931 edge_info = allocate_edge_info (true_edge);
1932 record_conditions (edge_info, cond, inverted);
1934 if (can_infer_simple_equiv && code == EQ_EXPR)
1936 edge_info->lhs = op0;
1937 edge_info->rhs = op1;
1940 edge_info = allocate_edge_info (false_edge);
1941 record_conditions (edge_info, inverted, cond);
1943 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1945 edge_info->lhs = op0;
1946 edge_info->rhs = op1;
1951 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1955 void
1956 dom_opt_dom_walker::before_dom_children (basic_block bb)
1958 gimple_stmt_iterator gsi;
1960 if (dump_file && (dump_flags & TDF_DETAILS))
1961 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1963 /* Push a marker on the stacks of local information so that we know how
1964 far to unwind when we finalize this block. */
1965 avail_exprs_stack.safe_push (NULL);
1966 const_and_copies_stack.safe_push (NULL_TREE);
1968 record_equivalences_from_incoming_edge (bb);
1970 /* PHI nodes can create equivalences too. */
1971 record_equivalences_from_phis (bb);
1973 /* Create equivalences from redundant PHIs. PHIs are only truly
1974 redundant when they exist in the same block, so push another
1975 marker and unwind right afterwards. */
1976 avail_exprs_stack.safe_push (NULL);
1977 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1978 eliminate_redundant_computations (&gsi);
1979 remove_local_expressions_from_table ();
1981 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1982 optimize_stmt (bb, gsi);
1984 /* Now prepare to process dominated blocks. */
1985 record_edge_info (bb);
1986 cprop_into_successor_phis (bb);
1989 /* We have finished processing the dominator children of BB, perform
1990 any finalization actions in preparation for leaving this node in
1991 the dominator tree. */
1993 void
1994 dom_opt_dom_walker::after_dom_children (basic_block bb)
1996 gimple last;
1998 /* If we have an outgoing edge to a block with multiple incoming and
1999 outgoing edges, then we may be able to thread the edge, i.e., we
2000 may be able to statically determine which of the outgoing edges
2001 will be traversed when the incoming edge from BB is traversed. */
2002 if (single_succ_p (bb)
2003 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
2004 && potentially_threadable_block (single_succ (bb)))
2006 thread_across_edge (single_succ_edge (bb));
2008 else if ((last = last_stmt (bb))
2009 && gimple_code (last) == GIMPLE_COND
2010 && EDGE_COUNT (bb->succs) == 2
2011 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
2012 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
2014 edge true_edge, false_edge;
2016 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2018 /* Only try to thread the edge if it reaches a target block with
2019 more than one predecessor and more than one successor. */
2020 if (potentially_threadable_block (true_edge->dest))
2021 thread_across_edge (true_edge);
2023 /* Similarly for the ELSE arm. */
2024 if (potentially_threadable_block (false_edge->dest))
2025 thread_across_edge (false_edge);
2029 /* These remove expressions local to BB from the tables. */
2030 remove_local_expressions_from_table ();
2031 restore_vars_to_original_value ();
2034 /* Search for redundant computations in STMT. If any are found, then
2035 replace them with the variable holding the result of the computation.
2037 If safe, record this expression into the available expression hash
2038 table. */
2040 static void
2041 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2043 tree expr_type;
2044 tree cached_lhs;
2045 tree def;
2046 bool insert = true;
2047 bool assigns_var_p = false;
2049 gimple stmt = gsi_stmt (*gsi);
2051 if (gimple_code (stmt) == GIMPLE_PHI)
2052 def = gimple_phi_result (stmt);
2053 else
2054 def = gimple_get_lhs (stmt);
2056 /* Certain expressions on the RHS can be optimized away, but can not
2057 themselves be entered into the hash tables. */
2058 if (! def
2059 || TREE_CODE (def) != SSA_NAME
2060 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2061 || gimple_vdef (stmt)
2062 /* Do not record equivalences for increments of ivs. This would create
2063 overlapping live ranges for a very questionable gain. */
2064 || simple_iv_increment_p (stmt))
2065 insert = false;
2067 /* Check if the expression has been computed before. */
2068 cached_lhs = lookup_avail_expr (stmt, insert);
2070 opt_stats.num_exprs_considered++;
2072 /* Get the type of the expression we are trying to optimize. */
2073 if (is_gimple_assign (stmt))
2075 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2076 assigns_var_p = true;
2078 else if (gimple_code (stmt) == GIMPLE_COND)
2079 expr_type = boolean_type_node;
2080 else if (is_gimple_call (stmt))
2082 gcc_assert (gimple_call_lhs (stmt));
2083 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2084 assigns_var_p = true;
2086 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2087 expr_type = TREE_TYPE (gimple_switch_index (stmt));
2088 else if (gimple_code (stmt) == GIMPLE_PHI)
2089 /* We can't propagate into a phi, so the logic below doesn't apply.
2090 Instead record an equivalence between the cached LHS and the
2091 PHI result of this statement, provided they are in the same block.
2092 This should be sufficient to kill the redundant phi. */
2094 if (def && cached_lhs)
2095 record_const_or_copy (def, cached_lhs);
2096 return;
2098 else
2099 gcc_unreachable ();
2101 if (!cached_lhs)
2102 return;
2104 /* It is safe to ignore types here since we have already done
2105 type checking in the hashing and equality routines. In fact
2106 type checking here merely gets in the way of constant
2107 propagation. Also, make sure that it is safe to propagate
2108 CACHED_LHS into the expression in STMT. */
2109 if ((TREE_CODE (cached_lhs) != SSA_NAME
2110 && (assigns_var_p
2111 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2112 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2114 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2115 || is_gimple_min_invariant (cached_lhs));
2117 if (dump_file && (dump_flags & TDF_DETAILS))
2119 fprintf (dump_file, " Replaced redundant expr '");
2120 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2121 fprintf (dump_file, "' with '");
2122 print_generic_expr (dump_file, cached_lhs, dump_flags);
2123 fprintf (dump_file, "'\n");
2126 opt_stats.num_re++;
2128 if (assigns_var_p
2129 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2130 cached_lhs = fold_convert (expr_type, cached_lhs);
2132 propagate_tree_value_into_stmt (gsi, cached_lhs);
2134 /* Since it is always necessary to mark the result as modified,
2135 perhaps we should move this into propagate_tree_value_into_stmt
2136 itself. */
2137 gimple_set_modified (gsi_stmt (*gsi), true);
2141 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2142 the available expressions table or the const_and_copies table.
2143 Detect and record those equivalences. */
2144 /* We handle only very simple copy equivalences here. The heavy
2145 lifing is done by eliminate_redundant_computations. */
2147 static void
2148 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2150 tree lhs;
2151 enum tree_code lhs_code;
2153 gcc_assert (is_gimple_assign (stmt));
2155 lhs = gimple_assign_lhs (stmt);
2156 lhs_code = TREE_CODE (lhs);
2158 if (lhs_code == SSA_NAME
2159 && gimple_assign_single_p (stmt))
2161 tree rhs = gimple_assign_rhs1 (stmt);
2163 /* If the RHS of the assignment is a constant or another variable that
2164 may be propagated, register it in the CONST_AND_COPIES table. We
2165 do not need to record unwind data for this, since this is a true
2166 assignment and not an equivalence inferred from a comparison. All
2167 uses of this ssa name are dominated by this assignment, so unwinding
2168 just costs time and space. */
2169 if (may_optimize_p
2170 && (TREE_CODE (rhs) == SSA_NAME
2171 || is_gimple_min_invariant (rhs)))
2173 if (dump_file && (dump_flags & TDF_DETAILS))
2175 fprintf (dump_file, "==== ASGN ");
2176 print_generic_expr (dump_file, lhs, 0);
2177 fprintf (dump_file, " = ");
2178 print_generic_expr (dump_file, rhs, 0);
2179 fprintf (dump_file, "\n");
2182 set_ssa_name_value (lhs, rhs);
2186 /* A memory store, even an aliased store, creates a useful
2187 equivalence. By exchanging the LHS and RHS, creating suitable
2188 vops and recording the result in the available expression table,
2189 we may be able to expose more redundant loads. */
2190 if (!gimple_has_volatile_ops (stmt)
2191 && gimple_references_memory_p (stmt)
2192 && gimple_assign_single_p (stmt)
2193 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2194 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2195 && !is_gimple_reg (lhs))
2197 tree rhs = gimple_assign_rhs1 (stmt);
2198 gimple new_stmt;
2200 /* Build a new statement with the RHS and LHS exchanged. */
2201 if (TREE_CODE (rhs) == SSA_NAME)
2203 /* NOTE tuples. The call to gimple_build_assign below replaced
2204 a call to build_gimple_modify_stmt, which did not set the
2205 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2206 may cause an SSA validation failure, as the LHS may be a
2207 default-initialized name and should have no definition. I'm
2208 a bit dubious of this, as the artificial statement that we
2209 generate here may in fact be ill-formed, but it is simply
2210 used as an internal device in this pass, and never becomes
2211 part of the CFG. */
2212 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2213 new_stmt = gimple_build_assign (rhs, lhs);
2214 SSA_NAME_DEF_STMT (rhs) = defstmt;
2216 else
2217 new_stmt = gimple_build_assign (rhs, lhs);
2219 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2221 /* Finally enter the statement into the available expression
2222 table. */
2223 lookup_avail_expr (new_stmt, true);
2227 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2228 CONST_AND_COPIES. */
2230 static void
2231 cprop_operand (gimple stmt, use_operand_p op_p)
2233 tree val;
2234 tree op = USE_FROM_PTR (op_p);
2236 /* If the operand has a known constant value or it is known to be a
2237 copy of some other variable, use the value or copy stored in
2238 CONST_AND_COPIES. */
2239 val = SSA_NAME_VALUE (op);
2240 if (val && val != op)
2242 /* Do not replace hard register operands in asm statements. */
2243 if (gimple_code (stmt) == GIMPLE_ASM
2244 && !may_propagate_copy_into_asm (op))
2245 return;
2247 /* Certain operands are not allowed to be copy propagated due
2248 to their interaction with exception handling and some GCC
2249 extensions. */
2250 if (!may_propagate_copy (op, val))
2251 return;
2253 /* Do not propagate copies into simple IV increment statements.
2254 See PR23821 for how this can disturb IV analysis. */
2255 if (TREE_CODE (val) != INTEGER_CST
2256 && simple_iv_increment_p (stmt))
2257 return;
2259 /* Dump details. */
2260 if (dump_file && (dump_flags & TDF_DETAILS))
2262 fprintf (dump_file, " Replaced '");
2263 print_generic_expr (dump_file, op, dump_flags);
2264 fprintf (dump_file, "' with %s '",
2265 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2266 print_generic_expr (dump_file, val, dump_flags);
2267 fprintf (dump_file, "'\n");
2270 if (TREE_CODE (val) != SSA_NAME)
2271 opt_stats.num_const_prop++;
2272 else
2273 opt_stats.num_copy_prop++;
2275 propagate_value (op_p, val);
2277 /* And note that we modified this statement. This is now
2278 safe, even if we changed virtual operands since we will
2279 rescan the statement and rewrite its operands again. */
2280 gimple_set_modified (stmt, true);
2284 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2285 known value for that SSA_NAME (or NULL if no value is known).
2287 Propagate values from CONST_AND_COPIES into the uses, vuses and
2288 vdef_ops of STMT. */
2290 static void
2291 cprop_into_stmt (gimple stmt)
2293 use_operand_p op_p;
2294 ssa_op_iter iter;
2296 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2297 cprop_operand (stmt, op_p);
2300 /* Optimize the statement pointed to by iterator SI.
2302 We try to perform some simplistic global redundancy elimination and
2303 constant propagation:
2305 1- To detect global redundancy, we keep track of expressions that have
2306 been computed in this block and its dominators. If we find that the
2307 same expression is computed more than once, we eliminate repeated
2308 computations by using the target of the first one.
2310 2- Constant values and copy assignments. This is used to do very
2311 simplistic constant and copy propagation. When a constant or copy
2312 assignment is found, we map the value on the RHS of the assignment to
2313 the variable in the LHS in the CONST_AND_COPIES table. */
2315 static void
2316 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2318 gimple stmt, old_stmt;
2319 bool may_optimize_p;
2320 bool modified_p = false;
2322 old_stmt = stmt = gsi_stmt (si);
2324 if (dump_file && (dump_flags & TDF_DETAILS))
2326 fprintf (dump_file, "Optimizing statement ");
2327 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2330 if (gimple_code (stmt) == GIMPLE_COND)
2331 canonicalize_comparison (stmt);
2333 update_stmt_if_modified (stmt);
2334 opt_stats.num_stmts++;
2336 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2337 cprop_into_stmt (stmt);
2339 /* If the statement has been modified with constant replacements,
2340 fold its RHS before checking for redundant computations. */
2341 if (gimple_modified_p (stmt))
2343 tree rhs = NULL;
2345 /* Try to fold the statement making sure that STMT is kept
2346 up to date. */
2347 if (fold_stmt (&si))
2349 stmt = gsi_stmt (si);
2350 gimple_set_modified (stmt, true);
2352 if (dump_file && (dump_flags & TDF_DETAILS))
2354 fprintf (dump_file, " Folded to: ");
2355 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2359 /* We only need to consider cases that can yield a gimple operand. */
2360 if (gimple_assign_single_p (stmt))
2361 rhs = gimple_assign_rhs1 (stmt);
2362 else if (gimple_code (stmt) == GIMPLE_GOTO)
2363 rhs = gimple_goto_dest (stmt);
2364 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2365 /* This should never be an ADDR_EXPR. */
2366 rhs = gimple_switch_index (stmt);
2368 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2369 recompute_tree_invariant_for_addr_expr (rhs);
2371 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2372 even if fold_stmt updated the stmt already and thus cleared
2373 gimple_modified_p flag on it. */
2374 modified_p = true;
2377 /* Check for redundant computations. Do this optimization only
2378 for assignments that have no volatile ops and conditionals. */
2379 may_optimize_p = (!gimple_has_side_effects (stmt)
2380 && (is_gimple_assign (stmt)
2381 || (is_gimple_call (stmt)
2382 && gimple_call_lhs (stmt) != NULL_TREE)
2383 || gimple_code (stmt) == GIMPLE_COND
2384 || gimple_code (stmt) == GIMPLE_SWITCH));
2386 if (may_optimize_p)
2388 if (gimple_code (stmt) == GIMPLE_CALL)
2390 /* Resolve __builtin_constant_p. If it hasn't been
2391 folded to integer_one_node by now, it's fairly
2392 certain that the value simply isn't constant. */
2393 tree callee = gimple_call_fndecl (stmt);
2394 if (callee
2395 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2396 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2398 propagate_tree_value_into_stmt (&si, integer_zero_node);
2399 stmt = gsi_stmt (si);
2403 update_stmt_if_modified (stmt);
2404 eliminate_redundant_computations (&si);
2405 stmt = gsi_stmt (si);
2407 /* Perform simple redundant store elimination. */
2408 if (gimple_assign_single_p (stmt)
2409 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2411 tree lhs = gimple_assign_lhs (stmt);
2412 tree rhs = gimple_assign_rhs1 (stmt);
2413 tree cached_lhs;
2414 gimple new_stmt;
2415 if (TREE_CODE (rhs) == SSA_NAME)
2417 tree tem = SSA_NAME_VALUE (rhs);
2418 if (tem)
2419 rhs = tem;
2421 /* Build a new statement with the RHS and LHS exchanged. */
2422 if (TREE_CODE (rhs) == SSA_NAME)
2424 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2425 new_stmt = gimple_build_assign (rhs, lhs);
2426 SSA_NAME_DEF_STMT (rhs) = defstmt;
2428 else
2429 new_stmt = gimple_build_assign (rhs, lhs);
2430 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2431 cached_lhs = lookup_avail_expr (new_stmt, false);
2432 if (cached_lhs
2433 && rhs == cached_lhs)
2435 basic_block bb = gimple_bb (stmt);
2436 unlink_stmt_vdef (stmt);
2437 if (gsi_remove (&si, true))
2439 bitmap_set_bit (need_eh_cleanup, bb->index);
2440 if (dump_file && (dump_flags & TDF_DETAILS))
2441 fprintf (dump_file, " Flagged to clear EH edges.\n");
2443 release_defs (stmt);
2444 return;
2449 /* Record any additional equivalences created by this statement. */
2450 if (is_gimple_assign (stmt))
2451 record_equivalences_from_stmt (stmt, may_optimize_p);
2453 /* If STMT is a COND_EXPR and it was modified, then we may know
2454 where it goes. If that is the case, then mark the CFG as altered.
2456 This will cause us to later call remove_unreachable_blocks and
2457 cleanup_tree_cfg when it is safe to do so. It is not safe to
2458 clean things up here since removal of edges and such can trigger
2459 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2460 the manager.
2462 That's all fine and good, except that once SSA_NAMEs are released
2463 to the manager, we must not call create_ssa_name until all references
2464 to released SSA_NAMEs have been eliminated.
2466 All references to the deleted SSA_NAMEs can not be eliminated until
2467 we remove unreachable blocks.
2469 We can not remove unreachable blocks until after we have completed
2470 any queued jump threading.
2472 We can not complete any queued jump threads until we have taken
2473 appropriate variables out of SSA form. Taking variables out of
2474 SSA form can call create_ssa_name and thus we lose.
2476 Ultimately I suspect we're going to need to change the interface
2477 into the SSA_NAME manager. */
2478 if (gimple_modified_p (stmt) || modified_p)
2480 tree val = NULL;
2482 update_stmt_if_modified (stmt);
2484 if (gimple_code (stmt) == GIMPLE_COND)
2485 val = fold_binary_loc (gimple_location (stmt),
2486 gimple_cond_code (stmt), boolean_type_node,
2487 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2488 else if (gimple_code (stmt) == GIMPLE_SWITCH)
2489 val = gimple_switch_index (stmt);
2491 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2492 cfg_altered = true;
2494 /* If we simplified a statement in such a way as to be shown that it
2495 cannot trap, update the eh information and the cfg to match. */
2496 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2498 bitmap_set_bit (need_eh_cleanup, bb->index);
2499 if (dump_file && (dump_flags & TDF_DETAILS))
2500 fprintf (dump_file, " Flagged to clear EH edges.\n");
2505 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2506 If found, return its LHS. Otherwise insert STMT in the table and
2507 return NULL_TREE.
2509 Also, when an expression is first inserted in the table, it is also
2510 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2511 we finish processing this block and its children. */
2513 static tree
2514 lookup_avail_expr (gimple stmt, bool insert)
2516 expr_hash_elt **slot;
2517 tree lhs;
2518 tree temp;
2519 struct expr_hash_elt element;
2521 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2522 if (gimple_code (stmt) == GIMPLE_PHI)
2523 lhs = gimple_phi_result (stmt);
2524 else
2525 lhs = gimple_get_lhs (stmt);
2527 initialize_hash_element (stmt, lhs, &element);
2529 if (dump_file && (dump_flags & TDF_DETAILS))
2531 fprintf (dump_file, "LKUP ");
2532 print_expr_hash_elt (dump_file, &element);
2535 /* Don't bother remembering constant assignments and copy operations.
2536 Constants and copy operations are handled by the constant/copy propagator
2537 in optimize_stmt. */
2538 if (element.expr.kind == EXPR_SINGLE
2539 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2540 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2541 return NULL_TREE;
2543 /* Finally try to find the expression in the main expression hash table. */
2544 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2545 if (slot == NULL)
2547 free_expr_hash_elt_contents (&element);
2548 return NULL_TREE;
2550 else if (*slot == NULL)
2552 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2553 *element2 = element;
2554 element2->stamp = element2;
2555 *slot = element2;
2557 if (dump_file && (dump_flags & TDF_DETAILS))
2559 fprintf (dump_file, "2>>> ");
2560 print_expr_hash_elt (dump_file, element2);
2563 avail_exprs_stack.safe_push (element2);
2564 return NULL_TREE;
2566 else
2567 free_expr_hash_elt_contents (&element);
2569 /* Extract the LHS of the assignment so that it can be used as the current
2570 definition of another variable. */
2571 lhs = ((struct expr_hash_elt *)*slot)->lhs;
2573 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2574 use the value from the const_and_copies table. */
2575 if (TREE_CODE (lhs) == SSA_NAME)
2577 temp = SSA_NAME_VALUE (lhs);
2578 if (temp)
2579 lhs = temp;
2582 if (dump_file && (dump_flags & TDF_DETAILS))
2584 fprintf (dump_file, "FIND: ");
2585 print_generic_expr (dump_file, lhs, 0);
2586 fprintf (dump_file, "\n");
2589 return lhs;
2592 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2593 for expressions using the code of the expression and the SSA numbers of
2594 its operands. */
2596 static hashval_t
2597 avail_expr_hash (const void *p)
2599 gimple stmt = ((const struct expr_hash_elt *)p)->stmt;
2600 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2601 tree vuse;
2602 hashval_t val = 0;
2604 val = iterative_hash_hashable_expr (expr, val);
2606 /* If the hash table entry is not associated with a statement, then we
2607 can just hash the expression and not worry about virtual operands
2608 and such. */
2609 if (!stmt)
2610 return val;
2612 /* Add the SSA version numbers of the vuse operand. This is important
2613 because compound variables like arrays are not renamed in the
2614 operands. Rather, the rename is done on the virtual variable
2615 representing all the elements of the array. */
2616 if ((vuse = gimple_vuse (stmt)))
2617 val = iterative_hash_expr (vuse, val);
2619 return val;
2622 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2623 up degenerate PHIs created by or exposed by jump threading. */
2625 /* Given a statement STMT, which is either a PHI node or an assignment,
2626 remove it from the IL. */
2628 static void
2629 remove_stmt_or_phi (gimple stmt)
2631 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2633 if (gimple_code (stmt) == GIMPLE_PHI)
2634 remove_phi_node (&gsi, true);
2635 else
2637 gsi_remove (&gsi, true);
2638 release_defs (stmt);
2642 /* Given a statement STMT, which is either a PHI node or an assignment,
2643 return the "rhs" of the node, in the case of a non-degenerate
2644 phi, NULL is returned. */
2646 static tree
2647 get_rhs_or_phi_arg (gimple stmt)
2649 if (gimple_code (stmt) == GIMPLE_PHI)
2650 return degenerate_phi_result (stmt);
2651 else if (gimple_assign_single_p (stmt))
2652 return gimple_assign_rhs1 (stmt);
2653 else
2654 gcc_unreachable ();
2658 /* Given a statement STMT, which is either a PHI node or an assignment,
2659 return the "lhs" of the node. */
2661 static tree
2662 get_lhs_or_phi_result (gimple stmt)
2664 if (gimple_code (stmt) == GIMPLE_PHI)
2665 return gimple_phi_result (stmt);
2666 else if (is_gimple_assign (stmt))
2667 return gimple_assign_lhs (stmt);
2668 else
2669 gcc_unreachable ();
2672 /* Propagate RHS into all uses of LHS (when possible).
2674 RHS and LHS are derived from STMT, which is passed in solely so
2675 that we can remove it if propagation is successful.
2677 When propagating into a PHI node or into a statement which turns
2678 into a trivial copy or constant initialization, set the
2679 appropriate bit in INTERESTING_NAMEs so that we will visit those
2680 nodes as well in an effort to pick up secondary optimization
2681 opportunities. */
2683 static void
2684 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2686 /* First verify that propagation is valid. */
2687 if (may_propagate_copy (lhs, rhs))
2689 use_operand_p use_p;
2690 imm_use_iterator iter;
2691 gimple use_stmt;
2692 bool all = true;
2694 /* Dump details. */
2695 if (dump_file && (dump_flags & TDF_DETAILS))
2697 fprintf (dump_file, " Replacing '");
2698 print_generic_expr (dump_file, lhs, dump_flags);
2699 fprintf (dump_file, "' with %s '",
2700 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2701 print_generic_expr (dump_file, rhs, dump_flags);
2702 fprintf (dump_file, "'\n");
2705 /* Walk over every use of LHS and try to replace the use with RHS.
2706 At this point the only reason why such a propagation would not
2707 be successful would be if the use occurs in an ASM_EXPR. */
2708 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2710 /* Leave debug stmts alone. If we succeed in propagating
2711 all non-debug uses, we'll drop the DEF, and propagation
2712 into debug stmts will occur then. */
2713 if (gimple_debug_bind_p (use_stmt))
2714 continue;
2716 /* It's not always safe to propagate into an ASM_EXPR. */
2717 if (gimple_code (use_stmt) == GIMPLE_ASM
2718 && ! may_propagate_copy_into_asm (lhs))
2720 all = false;
2721 continue;
2724 /* It's not ok to propagate into the definition stmt of RHS.
2725 <bb 9>:
2726 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2727 g_67.1_6 = prephitmp.12_36;
2728 goto <bb 9>;
2729 While this is strictly all dead code we do not want to
2730 deal with this here. */
2731 if (TREE_CODE (rhs) == SSA_NAME
2732 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2734 all = false;
2735 continue;
2738 /* Dump details. */
2739 if (dump_file && (dump_flags & TDF_DETAILS))
2741 fprintf (dump_file, " Original statement:");
2742 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2745 /* Propagate the RHS into this use of the LHS. */
2746 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2747 propagate_value (use_p, rhs);
2749 /* Special cases to avoid useless calls into the folding
2750 routines, operand scanning, etc.
2752 Propagation into a PHI may cause the PHI to become
2753 a degenerate, so mark the PHI as interesting. No other
2754 actions are necessary. */
2755 if (gimple_code (use_stmt) == GIMPLE_PHI)
2757 tree result;
2759 /* Dump details. */
2760 if (dump_file && (dump_flags & TDF_DETAILS))
2762 fprintf (dump_file, " Updated statement:");
2763 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2766 result = get_lhs_or_phi_result (use_stmt);
2767 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2768 continue;
2771 /* From this point onward we are propagating into a
2772 real statement. Folding may (or may not) be possible,
2773 we may expose new operands, expose dead EH edges,
2774 etc. */
2775 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2776 cannot fold a call that simplifies to a constant,
2777 because the GIMPLE_CALL must be replaced by a
2778 GIMPLE_ASSIGN, and there is no way to effect such a
2779 transformation in-place. We might want to consider
2780 using the more general fold_stmt here. */
2782 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2783 fold_stmt_inplace (&gsi);
2786 /* Sometimes propagation can expose new operands to the
2787 renamer. */
2788 update_stmt (use_stmt);
2790 /* Dump details. */
2791 if (dump_file && (dump_flags & TDF_DETAILS))
2793 fprintf (dump_file, " Updated statement:");
2794 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2797 /* If we replaced a variable index with a constant, then
2798 we would need to update the invariant flag for ADDR_EXPRs. */
2799 if (gimple_assign_single_p (use_stmt)
2800 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2801 recompute_tree_invariant_for_addr_expr
2802 (gimple_assign_rhs1 (use_stmt));
2804 /* If we cleaned up EH information from the statement,
2805 mark its containing block as needing EH cleanups. */
2806 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2808 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2809 if (dump_file && (dump_flags & TDF_DETAILS))
2810 fprintf (dump_file, " Flagged to clear EH edges.\n");
2813 /* Propagation may expose new trivial copy/constant propagation
2814 opportunities. */
2815 if (gimple_assign_single_p (use_stmt)
2816 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2817 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2818 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2820 tree result = get_lhs_or_phi_result (use_stmt);
2821 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2824 /* Propagation into these nodes may make certain edges in
2825 the CFG unexecutable. We want to identify them as PHI nodes
2826 at the destination of those unexecutable edges may become
2827 degenerates. */
2828 else if (gimple_code (use_stmt) == GIMPLE_COND
2829 || gimple_code (use_stmt) == GIMPLE_SWITCH
2830 || gimple_code (use_stmt) == GIMPLE_GOTO)
2832 tree val;
2834 if (gimple_code (use_stmt) == GIMPLE_COND)
2835 val = fold_binary_loc (gimple_location (use_stmt),
2836 gimple_cond_code (use_stmt),
2837 boolean_type_node,
2838 gimple_cond_lhs (use_stmt),
2839 gimple_cond_rhs (use_stmt));
2840 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2841 val = gimple_switch_index (use_stmt);
2842 else
2843 val = gimple_goto_dest (use_stmt);
2845 if (val && is_gimple_min_invariant (val))
2847 basic_block bb = gimple_bb (use_stmt);
2848 edge te = find_taken_edge (bb, val);
2849 edge_iterator ei;
2850 edge e;
2851 gimple_stmt_iterator gsi, psi;
2853 /* Remove all outgoing edges except TE. */
2854 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2856 if (e != te)
2858 /* Mark all the PHI nodes at the destination of
2859 the unexecutable edge as interesting. */
2860 for (psi = gsi_start_phis (e->dest);
2861 !gsi_end_p (psi);
2862 gsi_next (&psi))
2864 gimple phi = gsi_stmt (psi);
2866 tree result = gimple_phi_result (phi);
2867 int version = SSA_NAME_VERSION (result);
2869 bitmap_set_bit (interesting_names, version);
2872 te->probability += e->probability;
2874 te->count += e->count;
2875 remove_edge (e);
2876 cfg_altered = true;
2878 else
2879 ei_next (&ei);
2882 gsi = gsi_last_bb (gimple_bb (use_stmt));
2883 gsi_remove (&gsi, true);
2885 /* And fixup the flags on the single remaining edge. */
2886 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2887 te->flags &= ~EDGE_ABNORMAL;
2888 te->flags |= EDGE_FALLTHRU;
2889 if (te->probability > REG_BR_PROB_BASE)
2890 te->probability = REG_BR_PROB_BASE;
2895 /* Ensure there is nothing else to do. */
2896 gcc_assert (!all || has_zero_uses (lhs));
2898 /* If we were able to propagate away all uses of LHS, then
2899 we can remove STMT. */
2900 if (all)
2901 remove_stmt_or_phi (stmt);
2905 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2906 a statement that is a trivial copy or constant initialization.
2908 Attempt to eliminate T by propagating its RHS into all uses of
2909 its LHS. This may in turn set new bits in INTERESTING_NAMES
2910 for nodes we want to revisit later.
2912 All exit paths should clear INTERESTING_NAMES for the result
2913 of STMT. */
2915 static void
2916 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2918 tree lhs = get_lhs_or_phi_result (stmt);
2919 tree rhs;
2920 int version = SSA_NAME_VERSION (lhs);
2922 /* If the LHS of this statement or PHI has no uses, then we can
2923 just eliminate it. This can occur if, for example, the PHI
2924 was created by block duplication due to threading and its only
2925 use was in the conditional at the end of the block which was
2926 deleted. */
2927 if (has_zero_uses (lhs))
2929 bitmap_clear_bit (interesting_names, version);
2930 remove_stmt_or_phi (stmt);
2931 return;
2934 /* Get the RHS of the assignment or PHI node if the PHI is a
2935 degenerate. */
2936 rhs = get_rhs_or_phi_arg (stmt);
2937 if (!rhs)
2939 bitmap_clear_bit (interesting_names, version);
2940 return;
2943 if (!virtual_operand_p (lhs))
2944 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
2945 else
2947 gimple use_stmt;
2948 imm_use_iterator iter;
2949 use_operand_p use_p;
2950 /* For virtual operands we have to propagate into all uses as
2951 otherwise we will create overlapping life-ranges. */
2952 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2953 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2954 SET_USE (use_p, rhs);
2955 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
2956 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
2957 remove_stmt_or_phi (stmt);
2960 /* Note that STMT may well have been deleted by now, so do
2961 not access it, instead use the saved version # to clear
2962 T's entry in the worklist. */
2963 bitmap_clear_bit (interesting_names, version);
2966 /* The first phase in degenerate PHI elimination.
2968 Eliminate the degenerate PHIs in BB, then recurse on the
2969 dominator children of BB. */
2971 static void
2972 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
2974 gimple_stmt_iterator gsi;
2975 basic_block son;
2977 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2979 gimple phi = gsi_stmt (gsi);
2981 eliminate_const_or_copy (phi, interesting_names);
2984 /* Recurse into the dominator children of BB. */
2985 for (son = first_dom_son (CDI_DOMINATORS, bb);
2986 son;
2987 son = next_dom_son (CDI_DOMINATORS, son))
2988 eliminate_degenerate_phis_1 (son, interesting_names);
2992 /* A very simple pass to eliminate degenerate PHI nodes from the
2993 IL. This is meant to be fast enough to be able to be run several
2994 times in the optimization pipeline.
2996 Certain optimizations, particularly those which duplicate blocks
2997 or remove edges from the CFG can create or expose PHIs which are
2998 trivial copies or constant initializations.
3000 While we could pick up these optimizations in DOM or with the
3001 combination of copy-prop and CCP, those solutions are far too
3002 heavy-weight for our needs.
3004 This implementation has two phases so that we can efficiently
3005 eliminate the first order degenerate PHIs and second order
3006 degenerate PHIs.
3008 The first phase performs a dominator walk to identify and eliminate
3009 the vast majority of the degenerate PHIs. When a degenerate PHI
3010 is identified and eliminated any affected statements or PHIs
3011 are put on a worklist.
3013 The second phase eliminates degenerate PHIs and trivial copies
3014 or constant initializations using the worklist. This is how we
3015 pick up the secondary optimization opportunities with minimal
3016 cost. */
3018 namespace {
3020 const pass_data pass_data_phi_only_cprop =
3022 GIMPLE_PASS, /* type */
3023 "phicprop", /* name */
3024 OPTGROUP_NONE, /* optinfo_flags */
3025 TV_TREE_PHI_CPROP, /* tv_id */
3026 ( PROP_cfg | PROP_ssa ), /* properties_required */
3027 0, /* properties_provided */
3028 0, /* properties_destroyed */
3029 0, /* todo_flags_start */
3030 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3033 class pass_phi_only_cprop : public gimple_opt_pass
3035 public:
3036 pass_phi_only_cprop (gcc::context *ctxt)
3037 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3040 /* opt_pass methods: */
3041 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3042 virtual bool gate (function *) { return flag_tree_dom != 0; }
3043 virtual unsigned int execute (function *);
3045 }; // class pass_phi_only_cprop
3047 unsigned int
3048 pass_phi_only_cprop::execute (function *fun)
3050 bitmap interesting_names;
3051 bitmap interesting_names1;
3053 /* Bitmap of blocks which need EH information updated. We can not
3054 update it on-the-fly as doing so invalidates the dominator tree. */
3055 need_eh_cleanup = BITMAP_ALLOC (NULL);
3057 /* INTERESTING_NAMES is effectively our worklist, indexed by
3058 SSA_NAME_VERSION.
3060 A set bit indicates that the statement or PHI node which
3061 defines the SSA_NAME should be (re)examined to determine if
3062 it has become a degenerate PHI or trivial const/copy propagation
3063 opportunity.
3065 Experiments have show we generally get better compilation
3066 time behavior with bitmaps rather than sbitmaps. */
3067 interesting_names = BITMAP_ALLOC (NULL);
3068 interesting_names1 = BITMAP_ALLOC (NULL);
3070 calculate_dominance_info (CDI_DOMINATORS);
3071 cfg_altered = false;
3073 /* First phase. Eliminate degenerate PHIs via a dominator
3074 walk of the CFG.
3076 Experiments have indicated that we generally get better
3077 compile-time behavior by visiting blocks in the first
3078 phase in dominator order. Presumably this is because walking
3079 in dominator order leaves fewer PHIs for later examination
3080 by the worklist phase. */
3081 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3082 interesting_names);
3084 /* Second phase. Eliminate second order degenerate PHIs as well
3085 as trivial copies or constant initializations identified by
3086 the first phase or this phase. Basically we keep iterating
3087 until our set of INTERESTING_NAMEs is empty. */
3088 while (!bitmap_empty_p (interesting_names))
3090 unsigned int i;
3091 bitmap_iterator bi;
3093 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3094 changed during the loop. Copy it to another bitmap and
3095 use that. */
3096 bitmap_copy (interesting_names1, interesting_names);
3098 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3100 tree name = ssa_name (i);
3102 /* Ignore SSA_NAMEs that have been released because
3103 their defining statement was deleted (unreachable). */
3104 if (name)
3105 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3106 interesting_names);
3110 if (cfg_altered)
3112 free_dominance_info (CDI_DOMINATORS);
3113 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3114 loops_state_set (LOOPS_NEED_FIXUP);
3117 /* Propagation of const and copies may make some EH edges dead. Purge
3118 such edges from the CFG as needed. */
3119 if (!bitmap_empty_p (need_eh_cleanup))
3121 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3122 BITMAP_FREE (need_eh_cleanup);
3125 BITMAP_FREE (interesting_names);
3126 BITMAP_FREE (interesting_names1);
3127 return 0;
3130 } // anon namespace
3132 gimple_opt_pass *
3133 make_pass_phi_only_cprop (gcc::context *ctxt)
3135 return new pass_phi_only_cprop (ctxt);