2015-06-11 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / tree-ssa-dom.c
blobd0030b1eda4cf934287db6abadef81cb8bc7b8cc
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "input.h"
26 #include "alias.h"
27 #include "symtab.h"
28 #include "tree.h"
29 #include "fold-const.h"
30 #include "stor-layout.h"
31 #include "flags.h"
32 #include "tm_p.h"
33 #include "predict.h"
34 #include "hard-reg-set.h"
35 #include "input.h"
36 #include "function.h"
37 #include "dominance.h"
38 #include "cfg.h"
39 #include "cfganal.h"
40 #include "basic-block.h"
41 #include "cfgloop.h"
42 #include "gimple-pretty-print.h"
43 #include "tree-ssa-alias.h"
44 #include "internal-fn.h"
45 #include "gimple-fold.h"
46 #include "tree-eh.h"
47 #include "gimple-expr.h"
48 #include "is-a.h"
49 #include "gimple.h"
50 #include "gimple-iterator.h"
51 #include "gimple-ssa.h"
52 #include "tree-cfg.h"
53 #include "tree-phinodes.h"
54 #include "ssa-iterators.h"
55 #include "stringpool.h"
56 #include "tree-ssanames.h"
57 #include "tree-into-ssa.h"
58 #include "domwalk.h"
59 #include "tree-pass.h"
60 #include "tree-ssa-propagate.h"
61 #include "tree-ssa-threadupdate.h"
62 #include "langhooks.h"
63 #include "params.h"
64 #include "tree-ssa-scopedtables.h"
65 #include "tree-ssa-threadedge.h"
66 #include "tree-ssa-dom.h"
67 #include "gimplify.h"
68 #include "tree-cfgcleanup.h"
70 /* This file implements optimizations on the dominator tree. */
72 /* Representation of a "naked" right-hand-side expression, to be used
73 in recording available expressions in the expression hash table. */
75 enum expr_kind
77 EXPR_SINGLE,
78 EXPR_UNARY,
79 EXPR_BINARY,
80 EXPR_TERNARY,
81 EXPR_CALL,
82 EXPR_PHI
85 struct hashable_expr
87 tree type;
88 enum expr_kind kind;
89 union {
90 struct { tree rhs; } single;
91 struct { enum tree_code op; tree opnd; } unary;
92 struct { enum tree_code op; tree opnd0, opnd1; } binary;
93 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
94 struct { gcall *fn_from; bool pure; size_t nargs; tree *args; } call;
95 struct { size_t nargs; tree *args; } phi;
96 } ops;
99 /* Structure for recording known values of a conditional expression
100 at the exits from its block. */
102 typedef struct cond_equivalence_s
104 struct hashable_expr cond;
105 tree value;
106 } cond_equivalence;
109 /* Structure for recording edge equivalences as well as any pending
110 edge redirections during the dominator optimizer.
112 Computing and storing the edge equivalences instead of creating
113 them on-demand can save significant amounts of time, particularly
114 for pathological cases involving switch statements.
116 These structures live for a single iteration of the dominator
117 optimizer in the edge's AUX field. At the end of an iteration we
118 free each of these structures and update the AUX field to point
119 to any requested redirection target (the code for updating the
120 CFG and SSA graph for edge redirection expects redirection edge
121 targets to be in the AUX field for each edge. */
123 struct edge_info
125 /* If this edge creates a simple equivalence, the LHS and RHS of
126 the equivalence will be stored here. */
127 tree lhs;
128 tree rhs;
130 /* Traversing an edge may also indicate one or more particular conditions
131 are true or false. */
132 vec<cond_equivalence> cond_equivalences;
135 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
136 expressions it enters into the hash table along with a marker entry
137 (null). When we finish processing the block, we pop off entries and
138 remove the expressions from the global hash table until we hit the
139 marker. */
140 typedef struct expr_hash_elt * expr_hash_elt_t;
142 static vec<std::pair<expr_hash_elt_t, expr_hash_elt_t> > avail_exprs_stack;
144 /* Structure for entries in the expression hash table. */
146 struct expr_hash_elt
148 /* The value (lhs) of this expression. */
149 tree lhs;
151 /* The expression (rhs) we want to record. */
152 struct hashable_expr expr;
154 /* The virtual operand associated with the nearest dominating stmt
155 loading from or storing to expr. */
156 tree vop;
158 /* The hash value for RHS. */
159 hashval_t hash;
161 /* A unique stamp, typically the address of the hash
162 element itself, used in removing entries from the table. */
163 struct expr_hash_elt *stamp;
166 /* Hashtable helpers. */
168 static bool hashable_expr_equal_p (const struct hashable_expr *,
169 const struct hashable_expr *);
170 static void free_expr_hash_elt (void *);
172 struct expr_elt_hasher
174 typedef expr_hash_elt *value_type;
175 typedef expr_hash_elt *compare_type;
176 static inline hashval_t hash (const value_type &);
177 static inline bool equal (const value_type &, const compare_type &);
178 static inline void remove (value_type &);
181 inline hashval_t
182 expr_elt_hasher::hash (const value_type &p)
184 return p->hash;
187 inline bool
188 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
190 const struct hashable_expr *expr1 = &p1->expr;
191 const struct expr_hash_elt *stamp1 = p1->stamp;
192 const struct hashable_expr *expr2 = &p2->expr;
193 const struct expr_hash_elt *stamp2 = p2->stamp;
195 /* This case should apply only when removing entries from the table. */
196 if (stamp1 == stamp2)
197 return true;
199 if (p1->hash != p2->hash)
200 return false;
202 /* In case of a collision, both RHS have to be identical and have the
203 same VUSE operands. */
204 if (hashable_expr_equal_p (expr1, expr2)
205 && types_compatible_p (expr1->type, expr2->type))
206 return true;
208 return false;
211 /* Delete an expr_hash_elt and reclaim its storage. */
213 inline void
214 expr_elt_hasher::remove (value_type &element)
216 free_expr_hash_elt (element);
219 /* Hash table with expressions made available during the renaming process.
220 When an assignment of the form X_i = EXPR is found, the statement is
221 stored in this table. If the same expression EXPR is later found on the
222 RHS of another statement, it is replaced with X_i (thus performing
223 global redundancy elimination). Similarly as we pass through conditionals
224 we record the conditional itself as having either a true or false value
225 in this table. */
226 static hash_table<expr_elt_hasher> *avail_exprs;
228 /* Unwindable const/copy equivalences. */
229 static const_and_copies *const_and_copies;
231 /* Track whether or not we have changed the control flow graph. */
232 static bool cfg_altered;
234 /* Bitmap of blocks that have had EH statements cleaned. We should
235 remove their dead edges eventually. */
236 static bitmap need_eh_cleanup;
237 static vec<gimple> need_noreturn_fixup;
239 /* Statistics for dominator optimizations. */
240 struct opt_stats_d
242 long num_stmts;
243 long num_exprs_considered;
244 long num_re;
245 long num_const_prop;
246 long num_copy_prop;
249 static struct opt_stats_d opt_stats;
251 /* Local functions. */
252 static void optimize_stmt (basic_block, gimple_stmt_iterator);
253 static tree lookup_avail_expr (gimple, bool);
254 static hashval_t avail_expr_hash (const void *);
255 static void htab_statistics (FILE *,
256 const hash_table<expr_elt_hasher> &);
257 static void record_cond (cond_equivalence *);
258 static void record_equality (tree, tree);
259 static void record_equivalences_from_phis (basic_block);
260 static void record_equivalences_from_incoming_edge (basic_block);
261 static void eliminate_redundant_computations (gimple_stmt_iterator *);
262 static void record_equivalences_from_stmt (gimple, int);
263 static void remove_local_expressions_from_table (void);
264 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
267 /* Given a statement STMT, initialize the hash table element pointed to
268 by ELEMENT. */
270 static void
271 initialize_hash_element (gimple stmt, tree lhs,
272 struct expr_hash_elt *element)
274 enum gimple_code code = gimple_code (stmt);
275 struct hashable_expr *expr = &element->expr;
277 if (code == GIMPLE_ASSIGN)
279 enum tree_code subcode = gimple_assign_rhs_code (stmt);
281 switch (get_gimple_rhs_class (subcode))
283 case GIMPLE_SINGLE_RHS:
284 expr->kind = EXPR_SINGLE;
285 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
286 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
287 break;
288 case GIMPLE_UNARY_RHS:
289 expr->kind = EXPR_UNARY;
290 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
291 if (CONVERT_EXPR_CODE_P (subcode))
292 subcode = NOP_EXPR;
293 expr->ops.unary.op = subcode;
294 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
295 break;
296 case GIMPLE_BINARY_RHS:
297 expr->kind = EXPR_BINARY;
298 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
299 expr->ops.binary.op = subcode;
300 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
301 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
302 break;
303 case GIMPLE_TERNARY_RHS:
304 expr->kind = EXPR_TERNARY;
305 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
306 expr->ops.ternary.op = subcode;
307 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
308 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
309 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
310 break;
311 default:
312 gcc_unreachable ();
315 else if (code == GIMPLE_COND)
317 expr->type = boolean_type_node;
318 expr->kind = EXPR_BINARY;
319 expr->ops.binary.op = gimple_cond_code (stmt);
320 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
321 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
323 else if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
325 size_t nargs = gimple_call_num_args (call_stmt);
326 size_t i;
328 gcc_assert (gimple_call_lhs (call_stmt));
330 expr->type = TREE_TYPE (gimple_call_lhs (call_stmt));
331 expr->kind = EXPR_CALL;
332 expr->ops.call.fn_from = call_stmt;
334 if (gimple_call_flags (call_stmt) & (ECF_CONST | ECF_PURE))
335 expr->ops.call.pure = true;
336 else
337 expr->ops.call.pure = false;
339 expr->ops.call.nargs = nargs;
340 expr->ops.call.args = XCNEWVEC (tree, nargs);
341 for (i = 0; i < nargs; i++)
342 expr->ops.call.args[i] = gimple_call_arg (call_stmt, i);
344 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
346 expr->type = TREE_TYPE (gimple_switch_index (swtch_stmt));
347 expr->kind = EXPR_SINGLE;
348 expr->ops.single.rhs = gimple_switch_index (swtch_stmt);
350 else if (code == GIMPLE_GOTO)
352 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
353 expr->kind = EXPR_SINGLE;
354 expr->ops.single.rhs = gimple_goto_dest (stmt);
356 else if (code == GIMPLE_PHI)
358 size_t nargs = gimple_phi_num_args (stmt);
359 size_t i;
361 expr->type = TREE_TYPE (gimple_phi_result (stmt));
362 expr->kind = EXPR_PHI;
363 expr->ops.phi.nargs = nargs;
364 expr->ops.phi.args = XCNEWVEC (tree, nargs);
366 for (i = 0; i < nargs; i++)
367 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
369 else
370 gcc_unreachable ();
372 element->lhs = lhs;
373 element->vop = gimple_vuse (stmt);
374 element->hash = avail_expr_hash (element);
375 element->stamp = element;
378 /* Given a conditional expression COND as a tree, initialize
379 a hashable_expr expression EXPR. The conditional must be a
380 comparison or logical negation. A constant or a variable is
381 not permitted. */
383 static void
384 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
386 expr->type = boolean_type_node;
388 if (COMPARISON_CLASS_P (cond))
390 expr->kind = EXPR_BINARY;
391 expr->ops.binary.op = TREE_CODE (cond);
392 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
393 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
395 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
397 expr->kind = EXPR_UNARY;
398 expr->ops.unary.op = TRUTH_NOT_EXPR;
399 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
401 else
402 gcc_unreachable ();
405 /* Given a hashable_expr expression EXPR and an LHS,
406 initialize the hash table element pointed to by ELEMENT. */
408 static void
409 initialize_hash_element_from_expr (struct hashable_expr *expr,
410 tree lhs,
411 struct expr_hash_elt *element)
413 element->expr = *expr;
414 element->lhs = lhs;
415 element->vop = NULL_TREE;
416 element->hash = avail_expr_hash (element);
417 element->stamp = element;
420 /* Compare two hashable_expr structures for equivalence.
421 They are considered equivalent when the the expressions
422 they denote must necessarily be equal. The logic is intended
423 to follow that of operand_equal_p in fold-const.c */
425 static bool
426 hashable_expr_equal_p (const struct hashable_expr *expr0,
427 const struct hashable_expr *expr1)
429 tree type0 = expr0->type;
430 tree type1 = expr1->type;
432 /* If either type is NULL, there is nothing to check. */
433 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
434 return false;
436 /* If both types don't have the same signedness, precision, and mode,
437 then we can't consider them equal. */
438 if (type0 != type1
439 && (TREE_CODE (type0) == ERROR_MARK
440 || TREE_CODE (type1) == ERROR_MARK
441 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
442 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
443 || TYPE_MODE (type0) != TYPE_MODE (type1)))
444 return false;
446 if (expr0->kind != expr1->kind)
447 return false;
449 switch (expr0->kind)
451 case EXPR_SINGLE:
452 return operand_equal_p (expr0->ops.single.rhs,
453 expr1->ops.single.rhs, 0);
455 case EXPR_UNARY:
456 if (expr0->ops.unary.op != expr1->ops.unary.op)
457 return false;
459 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
460 || expr0->ops.unary.op == NON_LVALUE_EXPR)
461 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
462 return false;
464 return operand_equal_p (expr0->ops.unary.opnd,
465 expr1->ops.unary.opnd, 0);
467 case EXPR_BINARY:
468 if (expr0->ops.binary.op != expr1->ops.binary.op)
469 return false;
471 if (operand_equal_p (expr0->ops.binary.opnd0,
472 expr1->ops.binary.opnd0, 0)
473 && operand_equal_p (expr0->ops.binary.opnd1,
474 expr1->ops.binary.opnd1, 0))
475 return true;
477 /* For commutative ops, allow the other order. */
478 return (commutative_tree_code (expr0->ops.binary.op)
479 && operand_equal_p (expr0->ops.binary.opnd0,
480 expr1->ops.binary.opnd1, 0)
481 && operand_equal_p (expr0->ops.binary.opnd1,
482 expr1->ops.binary.opnd0, 0));
484 case EXPR_TERNARY:
485 if (expr0->ops.ternary.op != expr1->ops.ternary.op
486 || !operand_equal_p (expr0->ops.ternary.opnd2,
487 expr1->ops.ternary.opnd2, 0))
488 return false;
490 if (operand_equal_p (expr0->ops.ternary.opnd0,
491 expr1->ops.ternary.opnd0, 0)
492 && operand_equal_p (expr0->ops.ternary.opnd1,
493 expr1->ops.ternary.opnd1, 0))
494 return true;
496 /* For commutative ops, allow the other order. */
497 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
498 && operand_equal_p (expr0->ops.ternary.opnd0,
499 expr1->ops.ternary.opnd1, 0)
500 && operand_equal_p (expr0->ops.ternary.opnd1,
501 expr1->ops.ternary.opnd0, 0));
503 case EXPR_CALL:
505 size_t i;
507 /* If the calls are to different functions, then they
508 clearly cannot be equal. */
509 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
510 expr1->ops.call.fn_from))
511 return false;
513 if (! expr0->ops.call.pure)
514 return false;
516 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
517 return false;
519 for (i = 0; i < expr0->ops.call.nargs; i++)
520 if (! operand_equal_p (expr0->ops.call.args[i],
521 expr1->ops.call.args[i], 0))
522 return false;
524 if (stmt_could_throw_p (expr0->ops.call.fn_from))
526 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
527 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
528 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
529 return false;
532 return true;
535 case EXPR_PHI:
537 size_t i;
539 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
540 return false;
542 for (i = 0; i < expr0->ops.phi.nargs; i++)
543 if (! operand_equal_p (expr0->ops.phi.args[i],
544 expr1->ops.phi.args[i], 0))
545 return false;
547 return true;
550 default:
551 gcc_unreachable ();
555 /* Generate a hash value for a pair of expressions. This can be used
556 iteratively by passing a previous result in HSTATE.
558 The same hash value is always returned for a given pair of expressions,
559 regardless of the order in which they are presented. This is useful in
560 hashing the operands of commutative functions. */
562 namespace inchash
565 static void
566 add_expr_commutative (const_tree t1, const_tree t2, hash &hstate)
568 hash one, two;
570 inchash::add_expr (t1, one);
571 inchash::add_expr (t2, two);
572 hstate.add_commutative (one, two);
575 /* Compute a hash value for a hashable_expr value EXPR and a
576 previously accumulated hash value VAL. If two hashable_expr
577 values compare equal with hashable_expr_equal_p, they must
578 hash to the same value, given an identical value of VAL.
579 The logic is intended to follow inchash::add_expr in tree.c. */
581 static void
582 add_hashable_expr (const struct hashable_expr *expr, hash &hstate)
584 switch (expr->kind)
586 case EXPR_SINGLE:
587 inchash::add_expr (expr->ops.single.rhs, hstate);
588 break;
590 case EXPR_UNARY:
591 hstate.add_object (expr->ops.unary.op);
593 /* Make sure to include signedness in the hash computation.
594 Don't hash the type, that can lead to having nodes which
595 compare equal according to operand_equal_p, but which
596 have different hash codes. */
597 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
598 || expr->ops.unary.op == NON_LVALUE_EXPR)
599 hstate.add_int (TYPE_UNSIGNED (expr->type));
601 inchash::add_expr (expr->ops.unary.opnd, hstate);
602 break;
604 case EXPR_BINARY:
605 hstate.add_object (expr->ops.binary.op);
606 if (commutative_tree_code (expr->ops.binary.op))
607 inchash::add_expr_commutative (expr->ops.binary.opnd0,
608 expr->ops.binary.opnd1, hstate);
609 else
611 inchash::add_expr (expr->ops.binary.opnd0, hstate);
612 inchash::add_expr (expr->ops.binary.opnd1, hstate);
614 break;
616 case EXPR_TERNARY:
617 hstate.add_object (expr->ops.ternary.op);
618 if (commutative_ternary_tree_code (expr->ops.ternary.op))
619 inchash::add_expr_commutative (expr->ops.ternary.opnd0,
620 expr->ops.ternary.opnd1, hstate);
621 else
623 inchash::add_expr (expr->ops.ternary.opnd0, hstate);
624 inchash::add_expr (expr->ops.ternary.opnd1, hstate);
626 inchash::add_expr (expr->ops.ternary.opnd2, hstate);
627 break;
629 case EXPR_CALL:
631 size_t i;
632 enum tree_code code = CALL_EXPR;
633 gcall *fn_from;
635 hstate.add_object (code);
636 fn_from = expr->ops.call.fn_from;
637 if (gimple_call_internal_p (fn_from))
638 hstate.merge_hash ((hashval_t) gimple_call_internal_fn (fn_from));
639 else
640 inchash::add_expr (gimple_call_fn (fn_from), hstate);
641 for (i = 0; i < expr->ops.call.nargs; i++)
642 inchash::add_expr (expr->ops.call.args[i], hstate);
644 break;
646 case EXPR_PHI:
648 size_t i;
650 for (i = 0; i < expr->ops.phi.nargs; i++)
651 inchash::add_expr (expr->ops.phi.args[i], hstate);
653 break;
655 default:
656 gcc_unreachable ();
662 /* Print a diagnostic dump of an expression hash table entry. */
664 static void
665 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
667 fprintf (stream, "STMT ");
669 if (element->lhs)
671 print_generic_expr (stream, element->lhs, 0);
672 fprintf (stream, " = ");
675 switch (element->expr.kind)
677 case EXPR_SINGLE:
678 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
679 break;
681 case EXPR_UNARY:
682 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
683 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
684 break;
686 case EXPR_BINARY:
687 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
688 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
689 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
690 break;
692 case EXPR_TERNARY:
693 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
694 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
695 fputs (", ", stream);
696 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
697 fputs (", ", stream);
698 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
699 fputs (">", stream);
700 break;
702 case EXPR_CALL:
704 size_t i;
705 size_t nargs = element->expr.ops.call.nargs;
706 gcall *fn_from;
708 fn_from = element->expr.ops.call.fn_from;
709 if (gimple_call_internal_p (fn_from))
710 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
711 stream);
712 else
713 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
714 fprintf (stream, " (");
715 for (i = 0; i < nargs; i++)
717 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
718 if (i + 1 < nargs)
719 fprintf (stream, ", ");
721 fprintf (stream, ")");
723 break;
725 case EXPR_PHI:
727 size_t i;
728 size_t nargs = element->expr.ops.phi.nargs;
730 fprintf (stream, "PHI <");
731 for (i = 0; i < nargs; i++)
733 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
734 if (i + 1 < nargs)
735 fprintf (stream, ", ");
737 fprintf (stream, ">");
739 break;
742 if (element->vop)
744 fprintf (stream, " with ");
745 print_generic_expr (stream, element->vop, 0);
748 fprintf (stream, "\n");
751 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
753 static void
754 free_expr_hash_elt_contents (struct expr_hash_elt *element)
756 if (element->expr.kind == EXPR_CALL)
757 free (element->expr.ops.call.args);
758 else if (element->expr.kind == EXPR_PHI)
759 free (element->expr.ops.phi.args);
762 /* Delete an expr_hash_elt and reclaim its storage. */
764 static void
765 free_expr_hash_elt (void *elt)
767 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
768 free_expr_hash_elt_contents (element);
769 free (element);
772 /* Allocate an EDGE_INFO for edge E and attach it to E.
773 Return the new EDGE_INFO structure. */
775 static struct edge_info *
776 allocate_edge_info (edge e)
778 struct edge_info *edge_info;
780 edge_info = XCNEW (struct edge_info);
782 e->aux = edge_info;
783 return edge_info;
786 /* Free all EDGE_INFO structures associated with edges in the CFG.
787 If a particular edge can be threaded, copy the redirection
788 target from the EDGE_INFO structure into the edge's AUX field
789 as required by code to update the CFG and SSA graph for
790 jump threading. */
792 static void
793 free_all_edge_infos (void)
795 basic_block bb;
796 edge_iterator ei;
797 edge e;
799 FOR_EACH_BB_FN (bb, cfun)
801 FOR_EACH_EDGE (e, ei, bb->preds)
803 struct edge_info *edge_info = (struct edge_info *) e->aux;
805 if (edge_info)
807 edge_info->cond_equivalences.release ();
808 free (edge_info);
809 e->aux = NULL;
815 /* Build a cond_equivalence record indicating that the comparison
816 CODE holds between operands OP0 and OP1 and push it to **P. */
818 static void
819 build_and_record_new_cond (enum tree_code code,
820 tree op0, tree op1,
821 vec<cond_equivalence> *p)
823 cond_equivalence c;
824 struct hashable_expr *cond = &c.cond;
826 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
828 cond->type = boolean_type_node;
829 cond->kind = EXPR_BINARY;
830 cond->ops.binary.op = code;
831 cond->ops.binary.opnd0 = op0;
832 cond->ops.binary.opnd1 = op1;
834 c.value = boolean_true_node;
835 p->safe_push (c);
838 /* Record that COND is true and INVERTED is false into the edge information
839 structure. Also record that any conditions dominated by COND are true
840 as well.
842 For example, if a < b is true, then a <= b must also be true. */
844 static void
845 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
847 tree op0, op1;
848 cond_equivalence c;
850 if (!COMPARISON_CLASS_P (cond))
851 return;
853 op0 = TREE_OPERAND (cond, 0);
854 op1 = TREE_OPERAND (cond, 1);
856 switch (TREE_CODE (cond))
858 case LT_EXPR:
859 case GT_EXPR:
860 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
862 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
863 &edge_info->cond_equivalences);
864 build_and_record_new_cond (LTGT_EXPR, op0, op1,
865 &edge_info->cond_equivalences);
868 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
869 ? LE_EXPR : GE_EXPR),
870 op0, op1, &edge_info->cond_equivalences);
871 build_and_record_new_cond (NE_EXPR, op0, op1,
872 &edge_info->cond_equivalences);
873 break;
875 case GE_EXPR:
876 case LE_EXPR:
877 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
879 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
880 &edge_info->cond_equivalences);
882 break;
884 case EQ_EXPR:
885 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
887 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
888 &edge_info->cond_equivalences);
890 build_and_record_new_cond (LE_EXPR, op0, op1,
891 &edge_info->cond_equivalences);
892 build_and_record_new_cond (GE_EXPR, op0, op1,
893 &edge_info->cond_equivalences);
894 break;
896 case UNORDERED_EXPR:
897 build_and_record_new_cond (NE_EXPR, op0, op1,
898 &edge_info->cond_equivalences);
899 build_and_record_new_cond (UNLE_EXPR, op0, op1,
900 &edge_info->cond_equivalences);
901 build_and_record_new_cond (UNGE_EXPR, op0, op1,
902 &edge_info->cond_equivalences);
903 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
904 &edge_info->cond_equivalences);
905 build_and_record_new_cond (UNLT_EXPR, op0, op1,
906 &edge_info->cond_equivalences);
907 build_and_record_new_cond (UNGT_EXPR, op0, op1,
908 &edge_info->cond_equivalences);
909 break;
911 case UNLT_EXPR:
912 case UNGT_EXPR:
913 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
914 ? UNLE_EXPR : UNGE_EXPR),
915 op0, op1, &edge_info->cond_equivalences);
916 build_and_record_new_cond (NE_EXPR, op0, op1,
917 &edge_info->cond_equivalences);
918 break;
920 case UNEQ_EXPR:
921 build_and_record_new_cond (UNLE_EXPR, op0, op1,
922 &edge_info->cond_equivalences);
923 build_and_record_new_cond (UNGE_EXPR, op0, op1,
924 &edge_info->cond_equivalences);
925 break;
927 case LTGT_EXPR:
928 build_and_record_new_cond (NE_EXPR, op0, op1,
929 &edge_info->cond_equivalences);
930 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
931 &edge_info->cond_equivalences);
932 break;
934 default:
935 break;
938 /* Now store the original true and false conditions into the first
939 two slots. */
940 initialize_expr_from_cond (cond, &c.cond);
941 c.value = boolean_true_node;
942 edge_info->cond_equivalences.safe_push (c);
944 /* It is possible for INVERTED to be the negation of a comparison,
945 and not a valid RHS or GIMPLE_COND condition. This happens because
946 invert_truthvalue may return such an expression when asked to invert
947 a floating-point comparison. These comparisons are not assumed to
948 obey the trichotomy law. */
949 initialize_expr_from_cond (inverted, &c.cond);
950 c.value = boolean_false_node;
951 edge_info->cond_equivalences.safe_push (c);
954 /* We have finished optimizing BB, record any information implied by
955 taking a specific outgoing edge from BB. */
957 static void
958 record_edge_info (basic_block bb)
960 gimple_stmt_iterator gsi = gsi_last_bb (bb);
961 struct edge_info *edge_info;
963 if (! gsi_end_p (gsi))
965 gimple stmt = gsi_stmt (gsi);
966 location_t loc = gimple_location (stmt);
968 if (gimple_code (stmt) == GIMPLE_SWITCH)
970 gswitch *switch_stmt = as_a <gswitch *> (stmt);
971 tree index = gimple_switch_index (switch_stmt);
973 if (TREE_CODE (index) == SSA_NAME)
975 int i;
976 int n_labels = gimple_switch_num_labels (switch_stmt);
977 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
978 edge e;
979 edge_iterator ei;
981 for (i = 0; i < n_labels; i++)
983 tree label = gimple_switch_label (switch_stmt, i);
984 basic_block target_bb = label_to_block (CASE_LABEL (label));
985 if (CASE_HIGH (label)
986 || !CASE_LOW (label)
987 || info[target_bb->index])
988 info[target_bb->index] = error_mark_node;
989 else
990 info[target_bb->index] = label;
993 FOR_EACH_EDGE (e, ei, bb->succs)
995 basic_block target_bb = e->dest;
996 tree label = info[target_bb->index];
998 if (label != NULL && label != error_mark_node)
1000 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1001 CASE_LOW (label));
1002 edge_info = allocate_edge_info (e);
1003 edge_info->lhs = index;
1004 edge_info->rhs = x;
1007 free (info);
1011 /* A COND_EXPR may create equivalences too. */
1012 if (gimple_code (stmt) == GIMPLE_COND)
1014 edge true_edge;
1015 edge false_edge;
1017 tree op0 = gimple_cond_lhs (stmt);
1018 tree op1 = gimple_cond_rhs (stmt);
1019 enum tree_code code = gimple_cond_code (stmt);
1021 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1023 /* Special case comparing booleans against a constant as we
1024 know the value of OP0 on both arms of the branch. i.e., we
1025 can record an equivalence for OP0 rather than COND. */
1026 if ((code == EQ_EXPR || code == NE_EXPR)
1027 && TREE_CODE (op0) == SSA_NAME
1028 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1029 && is_gimple_min_invariant (op1))
1031 if (code == EQ_EXPR)
1033 edge_info = allocate_edge_info (true_edge);
1034 edge_info->lhs = op0;
1035 edge_info->rhs = (integer_zerop (op1)
1036 ? boolean_false_node
1037 : boolean_true_node);
1039 edge_info = allocate_edge_info (false_edge);
1040 edge_info->lhs = op0;
1041 edge_info->rhs = (integer_zerop (op1)
1042 ? boolean_true_node
1043 : boolean_false_node);
1045 else
1047 edge_info = allocate_edge_info (true_edge);
1048 edge_info->lhs = op0;
1049 edge_info->rhs = (integer_zerop (op1)
1050 ? boolean_true_node
1051 : boolean_false_node);
1053 edge_info = allocate_edge_info (false_edge);
1054 edge_info->lhs = op0;
1055 edge_info->rhs = (integer_zerop (op1)
1056 ? boolean_false_node
1057 : boolean_true_node);
1060 else if (is_gimple_min_invariant (op0)
1061 && (TREE_CODE (op1) == SSA_NAME
1062 || is_gimple_min_invariant (op1)))
1064 tree cond = build2 (code, boolean_type_node, op0, op1);
1065 tree inverted = invert_truthvalue_loc (loc, cond);
1066 bool can_infer_simple_equiv
1067 = !(HONOR_SIGNED_ZEROS (op0)
1068 && real_zerop (op0));
1069 struct edge_info *edge_info;
1071 edge_info = allocate_edge_info (true_edge);
1072 record_conditions (edge_info, cond, inverted);
1074 if (can_infer_simple_equiv && code == EQ_EXPR)
1076 edge_info->lhs = op1;
1077 edge_info->rhs = op0;
1080 edge_info = allocate_edge_info (false_edge);
1081 record_conditions (edge_info, inverted, cond);
1083 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1085 edge_info->lhs = op1;
1086 edge_info->rhs = op0;
1090 else if (TREE_CODE (op0) == SSA_NAME
1091 && (TREE_CODE (op1) == SSA_NAME
1092 || is_gimple_min_invariant (op1)))
1094 tree cond = build2 (code, boolean_type_node, op0, op1);
1095 tree inverted = invert_truthvalue_loc (loc, cond);
1096 bool can_infer_simple_equiv
1097 = !(HONOR_SIGNED_ZEROS (op1)
1098 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1099 struct edge_info *edge_info;
1101 edge_info = allocate_edge_info (true_edge);
1102 record_conditions (edge_info, cond, inverted);
1104 if (can_infer_simple_equiv && code == EQ_EXPR)
1106 edge_info->lhs = op0;
1107 edge_info->rhs = op1;
1110 edge_info = allocate_edge_info (false_edge);
1111 record_conditions (edge_info, inverted, cond);
1113 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1115 edge_info->lhs = op0;
1116 edge_info->rhs = op1;
1121 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1126 class dom_opt_dom_walker : public dom_walker
1128 public:
1129 dom_opt_dom_walker (cdi_direction direction)
1130 : dom_walker (direction), m_dummy_cond (NULL) {}
1132 virtual void before_dom_children (basic_block);
1133 virtual void after_dom_children (basic_block);
1135 private:
1136 void thread_across_edge (edge);
1138 gcond *m_dummy_cond;
1141 /* Jump threading, redundancy elimination and const/copy propagation.
1143 This pass may expose new symbols that need to be renamed into SSA. For
1144 every new symbol exposed, its corresponding bit will be set in
1145 VARS_TO_RENAME. */
1147 namespace {
1149 const pass_data pass_data_dominator =
1151 GIMPLE_PASS, /* type */
1152 "dom", /* name */
1153 OPTGROUP_NONE, /* optinfo_flags */
1154 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
1155 ( PROP_cfg | PROP_ssa ), /* properties_required */
1156 0, /* properties_provided */
1157 0, /* properties_destroyed */
1158 0, /* todo_flags_start */
1159 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
1162 class pass_dominator : public gimple_opt_pass
1164 public:
1165 pass_dominator (gcc::context *ctxt)
1166 : gimple_opt_pass (pass_data_dominator, ctxt)
1169 /* opt_pass methods: */
1170 opt_pass * clone () { return new pass_dominator (m_ctxt); }
1171 virtual bool gate (function *) { return flag_tree_dom != 0; }
1172 virtual unsigned int execute (function *);
1174 }; // class pass_dominator
1176 unsigned int
1177 pass_dominator::execute (function *fun)
1179 memset (&opt_stats, 0, sizeof (opt_stats));
1181 /* Create our hash tables. */
1182 avail_exprs = new hash_table<expr_elt_hasher> (1024);
1183 avail_exprs_stack.create (20);
1184 const_and_copies = new class const_and_copies (dump_file, dump_flags);
1185 need_eh_cleanup = BITMAP_ALLOC (NULL);
1186 need_noreturn_fixup.create (0);
1188 calculate_dominance_info (CDI_DOMINATORS);
1189 cfg_altered = false;
1191 /* We need to know loop structures in order to avoid destroying them
1192 in jump threading. Note that we still can e.g. thread through loop
1193 headers to an exit edge, or through loop header to the loop body, assuming
1194 that we update the loop info.
1196 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
1197 to several overly conservative bail-outs in jump threading, case
1198 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
1199 missing. We should improve jump threading in future then
1200 LOOPS_HAVE_PREHEADERS won't be needed here. */
1201 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
1203 /* Initialize the value-handle array. */
1204 threadedge_initialize_values ();
1206 /* We need accurate information regarding back edges in the CFG
1207 for jump threading; this may include back edges that are not part of
1208 a single loop. */
1209 mark_dfs_back_edges ();
1211 /* Recursively walk the dominator tree optimizing statements. */
1212 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
1215 gimple_stmt_iterator gsi;
1216 basic_block bb;
1217 FOR_EACH_BB_FN (bb, fun)
1219 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1220 update_stmt_if_modified (gsi_stmt (gsi));
1224 /* If we exposed any new variables, go ahead and put them into
1225 SSA form now, before we handle jump threading. This simplifies
1226 interactions between rewriting of _DECL nodes into SSA form
1227 and rewriting SSA_NAME nodes into SSA form after block
1228 duplication and CFG manipulation. */
1229 update_ssa (TODO_update_ssa);
1231 free_all_edge_infos ();
1233 /* Thread jumps, creating duplicate blocks as needed. */
1234 cfg_altered |= thread_through_all_blocks (first_pass_instance);
1236 if (cfg_altered)
1237 free_dominance_info (CDI_DOMINATORS);
1239 /* Removal of statements may make some EH edges dead. Purge
1240 such edges from the CFG as needed. */
1241 if (!bitmap_empty_p (need_eh_cleanup))
1243 unsigned i;
1244 bitmap_iterator bi;
1246 /* Jump threading may have created forwarder blocks from blocks
1247 needing EH cleanup; the new successor of these blocks, which
1248 has inherited from the original block, needs the cleanup.
1249 Don't clear bits in the bitmap, as that can break the bitmap
1250 iterator. */
1251 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
1253 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
1254 if (bb == NULL)
1255 continue;
1256 while (single_succ_p (bb)
1257 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
1258 bb = single_succ (bb);
1259 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
1260 continue;
1261 if ((unsigned) bb->index != i)
1262 bitmap_set_bit (need_eh_cleanup, bb->index);
1265 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
1266 bitmap_clear (need_eh_cleanup);
1269 /* Fixup stmts that became noreturn calls. This may require splitting
1270 blocks and thus isn't possible during the dominator walk or before
1271 jump threading finished. Do this in reverse order so we don't
1272 inadvertedly remove a stmt we want to fixup by visiting a dominating
1273 now noreturn call first. */
1274 while (!need_noreturn_fixup.is_empty ())
1276 gimple stmt = need_noreturn_fixup.pop ();
1277 if (dump_file && dump_flags & TDF_DETAILS)
1279 fprintf (dump_file, "Fixing up noreturn call ");
1280 print_gimple_stmt (dump_file, stmt, 0, 0);
1281 fprintf (dump_file, "\n");
1283 fixup_noreturn_call (stmt);
1286 statistics_counter_event (fun, "Redundant expressions eliminated",
1287 opt_stats.num_re);
1288 statistics_counter_event (fun, "Constants propagated",
1289 opt_stats.num_const_prop);
1290 statistics_counter_event (fun, "Copies propagated",
1291 opt_stats.num_copy_prop);
1293 /* Debugging dumps. */
1294 if (dump_file && (dump_flags & TDF_STATS))
1295 dump_dominator_optimization_stats (dump_file);
1297 loop_optimizer_finalize ();
1299 /* Delete our main hashtable. */
1300 delete avail_exprs;
1301 avail_exprs = NULL;
1303 /* Free asserted bitmaps and stacks. */
1304 BITMAP_FREE (need_eh_cleanup);
1305 need_noreturn_fixup.release ();
1306 avail_exprs_stack.release ();
1307 delete const_and_copies;
1309 /* Free the value-handle array. */
1310 threadedge_finalize_values ();
1312 return 0;
1315 } // anon namespace
1317 gimple_opt_pass *
1318 make_pass_dominator (gcc::context *ctxt)
1320 return new pass_dominator (ctxt);
1324 /* Given a conditional statement CONDSTMT, convert the
1325 condition to a canonical form. */
1327 static void
1328 canonicalize_comparison (gcond *condstmt)
1330 tree op0;
1331 tree op1;
1332 enum tree_code code;
1334 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1336 op0 = gimple_cond_lhs (condstmt);
1337 op1 = gimple_cond_rhs (condstmt);
1339 code = gimple_cond_code (condstmt);
1341 /* If it would be profitable to swap the operands, then do so to
1342 canonicalize the statement, enabling better optimization.
1344 By placing canonicalization of such expressions here we
1345 transparently keep statements in canonical form, even
1346 when the statement is modified. */
1347 if (tree_swap_operands_p (op0, op1, false))
1349 /* For relationals we need to swap the operands
1350 and change the code. */
1351 if (code == LT_EXPR
1352 || code == GT_EXPR
1353 || code == LE_EXPR
1354 || code == GE_EXPR)
1356 code = swap_tree_comparison (code);
1358 gimple_cond_set_code (condstmt, code);
1359 gimple_cond_set_lhs (condstmt, op1);
1360 gimple_cond_set_rhs (condstmt, op0);
1362 update_stmt (condstmt);
1367 /* Initialize local stacks for this optimizer and record equivalences
1368 upon entry to BB. Equivalences can come from the edge traversed to
1369 reach BB or they may come from PHI nodes at the start of BB. */
1371 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1372 LIMIT entries left in LOCALs. */
1374 static void
1375 remove_local_expressions_from_table (void)
1377 /* Remove all the expressions made available in this block. */
1378 while (avail_exprs_stack.length () > 0)
1380 std::pair<expr_hash_elt_t, expr_hash_elt_t> victim
1381 = avail_exprs_stack.pop ();
1382 expr_hash_elt **slot;
1384 if (victim.first == NULL)
1385 break;
1387 /* This must precede the actual removal from the hash table,
1388 as ELEMENT and the table entry may share a call argument
1389 vector which will be freed during removal. */
1390 if (dump_file && (dump_flags & TDF_DETAILS))
1392 fprintf (dump_file, "<<<< ");
1393 print_expr_hash_elt (dump_file, victim.first);
1396 slot = avail_exprs->find_slot (victim.first, NO_INSERT);
1397 gcc_assert (slot && *slot == victim.first);
1398 if (victim.second != NULL)
1400 free_expr_hash_elt (*slot);
1401 *slot = victim.second;
1403 else
1404 avail_exprs->clear_slot (slot);
1408 /* A trivial wrapper so that we can present the generic jump
1409 threading code with a simple API for simplifying statements. */
1410 static tree
1411 simplify_stmt_for_jump_threading (gimple stmt,
1412 gimple within_stmt ATTRIBUTE_UNUSED)
1414 return lookup_avail_expr (stmt, false);
1417 /* Record into the equivalence tables any equivalences implied by
1418 traversing edge E (which are cached in E->aux).
1420 Callers are responsible for managing the unwinding markers. */
1421 static void
1422 record_temporary_equivalences (edge e)
1424 int i;
1425 struct edge_info *edge_info = (struct edge_info *) e->aux;
1427 /* If we have info associated with this edge, record it into
1428 our equivalence tables. */
1429 if (edge_info)
1431 cond_equivalence *eq;
1432 tree lhs = edge_info->lhs;
1433 tree rhs = edge_info->rhs;
1435 /* If we have a simple NAME = VALUE equivalence, record it. */
1436 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1437 const_and_copies->record_const_or_copy (lhs, rhs);
1439 /* If we have 0 = COND or 1 = COND equivalences, record them
1440 into our expression hash tables. */
1441 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1442 record_cond (eq);
1446 /* Wrapper for common code to attempt to thread an edge. For example,
1447 it handles lazily building the dummy condition and the bookkeeping
1448 when jump threading is successful. */
1450 void
1451 dom_opt_dom_walker::thread_across_edge (edge e)
1453 if (! m_dummy_cond)
1454 m_dummy_cond =
1455 gimple_build_cond (NE_EXPR,
1456 integer_zero_node, integer_zero_node,
1457 NULL, NULL);
1459 /* Push a marker on both stacks so we can unwind the tables back to their
1460 current state. */
1461 avail_exprs_stack.safe_push
1462 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1463 const_and_copies->push_marker ();
1465 /* Traversing E may result in equivalences we can utilize. */
1466 record_temporary_equivalences (e);
1468 /* With all the edge equivalences in the tables, go ahead and attempt
1469 to thread through E->dest. */
1470 ::thread_across_edge (m_dummy_cond, e, false,
1471 const_and_copies,
1472 simplify_stmt_for_jump_threading);
1474 /* And restore the various tables to their state before
1475 we threaded this edge.
1477 XXX The code in tree-ssa-threadedge.c will restore the state of
1478 the const_and_copies table. We we just have to restore the expression
1479 table. */
1480 remove_local_expressions_from_table ();
1483 /* PHI nodes can create equivalences too.
1485 Ignoring any alternatives which are the same as the result, if
1486 all the alternatives are equal, then the PHI node creates an
1487 equivalence. */
1489 static void
1490 record_equivalences_from_phis (basic_block bb)
1492 gphi_iterator gsi;
1494 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1496 gphi *phi = gsi.phi ();
1498 tree lhs = gimple_phi_result (phi);
1499 tree rhs = NULL;
1500 size_t i;
1502 for (i = 0; i < gimple_phi_num_args (phi); i++)
1504 tree t = gimple_phi_arg_def (phi, i);
1506 /* Ignore alternatives which are the same as our LHS. Since
1507 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1508 can simply compare pointers. */
1509 if (lhs == t)
1510 continue;
1512 /* Valueize t. */
1513 if (TREE_CODE (t) == SSA_NAME)
1515 tree tmp = SSA_NAME_VALUE (t);
1516 t = tmp ? tmp : t;
1519 /* If we have not processed an alternative yet, then set
1520 RHS to this alternative. */
1521 if (rhs == NULL)
1522 rhs = t;
1523 /* If we have processed an alternative (stored in RHS), then
1524 see if it is equal to this one. If it isn't, then stop
1525 the search. */
1526 else if (! operand_equal_for_phi_arg_p (rhs, t))
1527 break;
1530 /* If we had no interesting alternatives, then all the RHS alternatives
1531 must have been the same as LHS. */
1532 if (!rhs)
1533 rhs = lhs;
1535 /* If we managed to iterate through each PHI alternative without
1536 breaking out of the loop, then we have a PHI which may create
1537 a useful equivalence. We do not need to record unwind data for
1538 this, since this is a true assignment and not an equivalence
1539 inferred from a comparison. All uses of this ssa name are dominated
1540 by this assignment, so unwinding just costs time and space. */
1541 if (i == gimple_phi_num_args (phi)
1542 && may_propagate_copy (lhs, rhs))
1543 set_ssa_name_value (lhs, rhs);
1547 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1548 return that edge. Otherwise return NULL. */
1549 static edge
1550 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1552 edge retval = NULL;
1553 edge e;
1554 edge_iterator ei;
1556 FOR_EACH_EDGE (e, ei, bb->preds)
1558 /* A loop back edge can be identified by the destination of
1559 the edge dominating the source of the edge. */
1560 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1561 continue;
1563 /* If we have already seen a non-loop edge, then we must have
1564 multiple incoming non-loop edges and thus we return NULL. */
1565 if (retval)
1566 return NULL;
1568 /* This is the first non-loop incoming edge we have found. Record
1569 it. */
1570 retval = e;
1573 return retval;
1576 /* Record any equivalences created by the incoming edge to BB. If BB
1577 has more than one incoming edge, then no equivalence is created. */
1579 static void
1580 record_equivalences_from_incoming_edge (basic_block bb)
1582 edge e;
1583 basic_block parent;
1584 struct edge_info *edge_info;
1586 /* If our parent block ended with a control statement, then we may be
1587 able to record some equivalences based on which outgoing edge from
1588 the parent was followed. */
1589 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1591 e = single_incoming_edge_ignoring_loop_edges (bb);
1593 /* If we had a single incoming edge from our parent block, then enter
1594 any data associated with the edge into our tables. */
1595 if (e && e->src == parent)
1597 unsigned int i;
1599 edge_info = (struct edge_info *) e->aux;
1601 if (edge_info)
1603 tree lhs = edge_info->lhs;
1604 tree rhs = edge_info->rhs;
1605 cond_equivalence *eq;
1607 if (lhs)
1608 record_equality (lhs, rhs);
1610 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1611 set via a widening type conversion, then we may be able to record
1612 additional equivalences. */
1613 if (lhs
1614 && TREE_CODE (lhs) == SSA_NAME
1615 && is_gimple_constant (rhs)
1616 && TREE_CODE (rhs) == INTEGER_CST)
1618 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1620 if (defstmt
1621 && is_gimple_assign (defstmt)
1622 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1624 tree old_rhs = gimple_assign_rhs1 (defstmt);
1626 /* If the conversion widens the original value and
1627 the constant is in the range of the type of OLD_RHS,
1628 then convert the constant and record the equivalence.
1630 Note that int_fits_type_p does not check the precision
1631 if the upper and lower bounds are OK. */
1632 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1633 && (TYPE_PRECISION (TREE_TYPE (lhs))
1634 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1635 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1637 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1638 record_equality (old_rhs, newval);
1643 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1644 record_cond (eq);
1649 /* Dump SSA statistics on FILE. */
1651 void
1652 dump_dominator_optimization_stats (FILE *file)
1654 fprintf (file, "Total number of statements: %6ld\n\n",
1655 opt_stats.num_stmts);
1656 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1657 opt_stats.num_exprs_considered);
1659 fprintf (file, "\nHash table statistics:\n");
1661 fprintf (file, " avail_exprs: ");
1662 htab_statistics (file, *avail_exprs);
1666 /* Dump SSA statistics on stderr. */
1668 DEBUG_FUNCTION void
1669 debug_dominator_optimization_stats (void)
1671 dump_dominator_optimization_stats (stderr);
1675 /* Dump statistics for the hash table HTAB. */
1677 static void
1678 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1680 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1681 (long) htab.size (),
1682 (long) htab.elements (),
1683 htab.collisions ());
1687 /* Enter condition equivalence into the expression hash table.
1688 This indicates that a conditional expression has a known
1689 boolean value. */
1691 static void
1692 record_cond (cond_equivalence *p)
1694 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1695 expr_hash_elt **slot;
1697 initialize_hash_element_from_expr (&p->cond, p->value, element);
1699 slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1700 if (*slot == NULL)
1702 *slot = element;
1704 if (dump_file && (dump_flags & TDF_DETAILS))
1706 fprintf (dump_file, "1>>> ");
1707 print_expr_hash_elt (dump_file, element);
1710 avail_exprs_stack.safe_push
1711 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element, NULL));
1713 else
1714 free_expr_hash_elt (element);
1717 /* Return the loop depth of the basic block of the defining statement of X.
1718 This number should not be treated as absolutely correct because the loop
1719 information may not be completely up-to-date when dom runs. However, it
1720 will be relatively correct, and as more passes are taught to keep loop info
1721 up to date, the result will become more and more accurate. */
1723 static int
1724 loop_depth_of_name (tree x)
1726 gimple defstmt;
1727 basic_block defbb;
1729 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1730 if (TREE_CODE (x) != SSA_NAME)
1731 return 0;
1733 /* Otherwise return the loop depth of the defining statement's bb.
1734 Note that there may not actually be a bb for this statement, if the
1735 ssa_name is live on entry. */
1736 defstmt = SSA_NAME_DEF_STMT (x);
1737 defbb = gimple_bb (defstmt);
1738 if (!defbb)
1739 return 0;
1741 return bb_loop_depth (defbb);
1744 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1745 This constrains the cases in which we may treat this as assignment. */
1747 static void
1748 record_equality (tree x, tree y)
1750 tree prev_x = NULL, prev_y = NULL;
1752 if (tree_swap_operands_p (x, y, false))
1753 std::swap (x, y);
1755 /* Most of the time tree_swap_operands_p does what we want. But there
1756 are cases where we know one operand is better for copy propagation than
1757 the other. Given no other code cares about ordering of equality
1758 comparison operators for that purpose, we just handle the special cases
1759 here. */
1760 if (TREE_CODE (x) == SSA_NAME && TREE_CODE (y) == SSA_NAME)
1762 /* If one operand is a single use operand, then make it
1763 X. This will preserve its single use properly and if this
1764 conditional is eliminated, the computation of X can be
1765 eliminated as well. */
1766 if (has_single_use (y) && ! has_single_use (x))
1767 std::swap (x, y);
1769 if (TREE_CODE (x) == SSA_NAME)
1770 prev_x = SSA_NAME_VALUE (x);
1771 if (TREE_CODE (y) == SSA_NAME)
1772 prev_y = SSA_NAME_VALUE (y);
1774 /* If one of the previous values is invariant, or invariant in more loops
1775 (by depth), then use that.
1776 Otherwise it doesn't matter which value we choose, just so
1777 long as we canonicalize on one value. */
1778 if (is_gimple_min_invariant (y))
1780 else if (is_gimple_min_invariant (x)
1781 /* ??? When threading over backedges the following is important
1782 for correctness. See PR61757. */
1783 || (loop_depth_of_name (x) < loop_depth_of_name (y)))
1784 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1785 else if (prev_x && is_gimple_min_invariant (prev_x))
1786 x = y, y = prev_x, prev_x = prev_y;
1787 else if (prev_y)
1788 y = prev_y;
1790 /* After the swapping, we must have one SSA_NAME. */
1791 if (TREE_CODE (x) != SSA_NAME)
1792 return;
1794 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1795 variable compared against zero. If we're honoring signed zeros,
1796 then we cannot record this value unless we know that the value is
1797 nonzero. */
1798 if (HONOR_SIGNED_ZEROS (x)
1799 && (TREE_CODE (y) != REAL_CST
1800 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1801 return;
1803 const_and_copies->record_const_or_copy (x, y, prev_x);
1806 /* Returns true when STMT is a simple iv increment. It detects the
1807 following situation:
1809 i_1 = phi (..., i_2)
1810 i_2 = i_1 +/- ... */
1812 bool
1813 simple_iv_increment_p (gimple stmt)
1815 enum tree_code code;
1816 tree lhs, preinc;
1817 gimple phi;
1818 size_t i;
1820 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1821 return false;
1823 lhs = gimple_assign_lhs (stmt);
1824 if (TREE_CODE (lhs) != SSA_NAME)
1825 return false;
1827 code = gimple_assign_rhs_code (stmt);
1828 if (code != PLUS_EXPR
1829 && code != MINUS_EXPR
1830 && code != POINTER_PLUS_EXPR)
1831 return false;
1833 preinc = gimple_assign_rhs1 (stmt);
1834 if (TREE_CODE (preinc) != SSA_NAME)
1835 return false;
1837 phi = SSA_NAME_DEF_STMT (preinc);
1838 if (gimple_code (phi) != GIMPLE_PHI)
1839 return false;
1841 for (i = 0; i < gimple_phi_num_args (phi); i++)
1842 if (gimple_phi_arg_def (phi, i) == lhs)
1843 return true;
1845 return false;
1848 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1849 known value for that SSA_NAME (or NULL if no value is known).
1851 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1852 successors of BB. */
1854 static void
1855 cprop_into_successor_phis (basic_block bb)
1857 edge e;
1858 edge_iterator ei;
1860 FOR_EACH_EDGE (e, ei, bb->succs)
1862 int indx;
1863 gphi_iterator gsi;
1865 /* If this is an abnormal edge, then we do not want to copy propagate
1866 into the PHI alternative associated with this edge. */
1867 if (e->flags & EDGE_ABNORMAL)
1868 continue;
1870 gsi = gsi_start_phis (e->dest);
1871 if (gsi_end_p (gsi))
1872 continue;
1874 /* We may have an equivalence associated with this edge. While
1875 we can not propagate it into non-dominated blocks, we can
1876 propagate them into PHIs in non-dominated blocks. */
1878 /* Push the unwind marker so we can reset the const and copies
1879 table back to its original state after processing this edge. */
1880 const_and_copies->push_marker ();
1882 /* Extract and record any simple NAME = VALUE equivalences.
1884 Don't bother with [01] = COND equivalences, they're not useful
1885 here. */
1886 struct edge_info *edge_info = (struct edge_info *) e->aux;
1887 if (edge_info)
1889 tree lhs = edge_info->lhs;
1890 tree rhs = edge_info->rhs;
1892 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1893 const_and_copies->record_const_or_copy (lhs, rhs);
1896 indx = e->dest_idx;
1897 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1899 tree new_val;
1900 use_operand_p orig_p;
1901 tree orig_val;
1902 gphi *phi = gsi.phi ();
1904 /* The alternative may be associated with a constant, so verify
1905 it is an SSA_NAME before doing anything with it. */
1906 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1907 orig_val = get_use_from_ptr (orig_p);
1908 if (TREE_CODE (orig_val) != SSA_NAME)
1909 continue;
1911 /* If we have *ORIG_P in our constant/copy table, then replace
1912 ORIG_P with its value in our constant/copy table. */
1913 new_val = SSA_NAME_VALUE (orig_val);
1914 if (new_val
1915 && new_val != orig_val
1916 && (TREE_CODE (new_val) == SSA_NAME
1917 || is_gimple_min_invariant (new_val))
1918 && may_propagate_copy (orig_val, new_val))
1919 propagate_value (orig_p, new_val);
1922 const_and_copies->pop_to_marker ();
1926 void
1927 dom_opt_dom_walker::before_dom_children (basic_block bb)
1929 gimple_stmt_iterator gsi;
1931 if (dump_file && (dump_flags & TDF_DETAILS))
1932 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1934 /* Push a marker on the stacks of local information so that we know how
1935 far to unwind when we finalize this block. */
1936 avail_exprs_stack.safe_push
1937 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1938 const_and_copies->push_marker ();
1940 record_equivalences_from_incoming_edge (bb);
1942 /* PHI nodes can create equivalences too. */
1943 record_equivalences_from_phis (bb);
1945 /* Create equivalences from redundant PHIs. PHIs are only truly
1946 redundant when they exist in the same block, so push another
1947 marker and unwind right afterwards. */
1948 avail_exprs_stack.safe_push
1949 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1950 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1951 eliminate_redundant_computations (&gsi);
1952 remove_local_expressions_from_table ();
1954 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1955 optimize_stmt (bb, gsi);
1957 /* Now prepare to process dominated blocks. */
1958 record_edge_info (bb);
1959 cprop_into_successor_phis (bb);
1962 /* We have finished processing the dominator children of BB, perform
1963 any finalization actions in preparation for leaving this node in
1964 the dominator tree. */
1966 void
1967 dom_opt_dom_walker::after_dom_children (basic_block bb)
1969 gimple last;
1971 /* If we have an outgoing edge to a block with multiple incoming and
1972 outgoing edges, then we may be able to thread the edge, i.e., we
1973 may be able to statically determine which of the outgoing edges
1974 will be traversed when the incoming edge from BB is traversed. */
1975 if (single_succ_p (bb)
1976 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1977 && potentially_threadable_block (single_succ (bb)))
1979 thread_across_edge (single_succ_edge (bb));
1981 else if ((last = last_stmt (bb))
1982 && gimple_code (last) == GIMPLE_COND
1983 && EDGE_COUNT (bb->succs) == 2
1984 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1985 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1987 edge true_edge, false_edge;
1989 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1991 /* Only try to thread the edge if it reaches a target block with
1992 more than one predecessor and more than one successor. */
1993 if (potentially_threadable_block (true_edge->dest))
1994 thread_across_edge (true_edge);
1996 /* Similarly for the ELSE arm. */
1997 if (potentially_threadable_block (false_edge->dest))
1998 thread_across_edge (false_edge);
2002 /* These remove expressions local to BB from the tables. */
2003 remove_local_expressions_from_table ();
2004 const_and_copies->pop_to_marker ();
2007 /* Search for redundant computations in STMT. If any are found, then
2008 replace them with the variable holding the result of the computation.
2010 If safe, record this expression into the available expression hash
2011 table. */
2013 static void
2014 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2016 tree expr_type;
2017 tree cached_lhs;
2018 tree def;
2019 bool insert = true;
2020 bool assigns_var_p = false;
2022 gimple stmt = gsi_stmt (*gsi);
2024 if (gimple_code (stmt) == GIMPLE_PHI)
2025 def = gimple_phi_result (stmt);
2026 else
2027 def = gimple_get_lhs (stmt);
2029 /* Certain expressions on the RHS can be optimized away, but can not
2030 themselves be entered into the hash tables. */
2031 if (! def
2032 || TREE_CODE (def) != SSA_NAME
2033 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2034 || gimple_vdef (stmt)
2035 /* Do not record equivalences for increments of ivs. This would create
2036 overlapping live ranges for a very questionable gain. */
2037 || simple_iv_increment_p (stmt))
2038 insert = false;
2040 /* Check if the expression has been computed before. */
2041 cached_lhs = lookup_avail_expr (stmt, insert);
2043 opt_stats.num_exprs_considered++;
2045 /* Get the type of the expression we are trying to optimize. */
2046 if (is_gimple_assign (stmt))
2048 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2049 assigns_var_p = true;
2051 else if (gimple_code (stmt) == GIMPLE_COND)
2052 expr_type = boolean_type_node;
2053 else if (is_gimple_call (stmt))
2055 gcc_assert (gimple_call_lhs (stmt));
2056 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2057 assigns_var_p = true;
2059 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2060 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
2061 else if (gimple_code (stmt) == GIMPLE_PHI)
2062 /* We can't propagate into a phi, so the logic below doesn't apply.
2063 Instead record an equivalence between the cached LHS and the
2064 PHI result of this statement, provided they are in the same block.
2065 This should be sufficient to kill the redundant phi. */
2067 if (def && cached_lhs)
2068 const_and_copies->record_const_or_copy (def, cached_lhs);
2069 return;
2071 else
2072 gcc_unreachable ();
2074 if (!cached_lhs)
2075 return;
2077 /* It is safe to ignore types here since we have already done
2078 type checking in the hashing and equality routines. In fact
2079 type checking here merely gets in the way of constant
2080 propagation. Also, make sure that it is safe to propagate
2081 CACHED_LHS into the expression in STMT. */
2082 if ((TREE_CODE (cached_lhs) != SSA_NAME
2083 && (assigns_var_p
2084 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2085 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2087 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2088 || is_gimple_min_invariant (cached_lhs));
2090 if (dump_file && (dump_flags & TDF_DETAILS))
2092 fprintf (dump_file, " Replaced redundant expr '");
2093 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2094 fprintf (dump_file, "' with '");
2095 print_generic_expr (dump_file, cached_lhs, dump_flags);
2096 fprintf (dump_file, "'\n");
2099 opt_stats.num_re++;
2101 if (assigns_var_p
2102 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2103 cached_lhs = fold_convert (expr_type, cached_lhs);
2105 propagate_tree_value_into_stmt (gsi, cached_lhs);
2107 /* Since it is always necessary to mark the result as modified,
2108 perhaps we should move this into propagate_tree_value_into_stmt
2109 itself. */
2110 gimple_set_modified (gsi_stmt (*gsi), true);
2114 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2115 the available expressions table or the const_and_copies table.
2116 Detect and record those equivalences. */
2117 /* We handle only very simple copy equivalences here. The heavy
2118 lifing is done by eliminate_redundant_computations. */
2120 static void
2121 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2123 tree lhs;
2124 enum tree_code lhs_code;
2126 gcc_assert (is_gimple_assign (stmt));
2128 lhs = gimple_assign_lhs (stmt);
2129 lhs_code = TREE_CODE (lhs);
2131 if (lhs_code == SSA_NAME
2132 && gimple_assign_single_p (stmt))
2134 tree rhs = gimple_assign_rhs1 (stmt);
2136 /* If the RHS of the assignment is a constant or another variable that
2137 may be propagated, register it in the CONST_AND_COPIES table. We
2138 do not need to record unwind data for this, since this is a true
2139 assignment and not an equivalence inferred from a comparison. All
2140 uses of this ssa name are dominated by this assignment, so unwinding
2141 just costs time and space. */
2142 if (may_optimize_p
2143 && (TREE_CODE (rhs) == SSA_NAME
2144 || is_gimple_min_invariant (rhs)))
2146 /* Valueize rhs. */
2147 if (TREE_CODE (rhs) == SSA_NAME)
2149 tree tmp = SSA_NAME_VALUE (rhs);
2150 rhs = tmp ? tmp : rhs;
2153 if (dump_file && (dump_flags & TDF_DETAILS))
2155 fprintf (dump_file, "==== ASGN ");
2156 print_generic_expr (dump_file, lhs, 0);
2157 fprintf (dump_file, " = ");
2158 print_generic_expr (dump_file, rhs, 0);
2159 fprintf (dump_file, "\n");
2162 set_ssa_name_value (lhs, rhs);
2166 /* Make sure we can propagate &x + CST. */
2167 if (lhs_code == SSA_NAME
2168 && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
2169 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
2170 && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
2172 tree op0 = gimple_assign_rhs1 (stmt);
2173 tree op1 = gimple_assign_rhs2 (stmt);
2174 tree new_rhs
2175 = build_fold_addr_expr (fold_build2 (MEM_REF,
2176 TREE_TYPE (TREE_TYPE (op0)),
2177 unshare_expr (op0),
2178 fold_convert (ptr_type_node,
2179 op1)));
2180 if (dump_file && (dump_flags & TDF_DETAILS))
2182 fprintf (dump_file, "==== ASGN ");
2183 print_generic_expr (dump_file, lhs, 0);
2184 fprintf (dump_file, " = ");
2185 print_generic_expr (dump_file, new_rhs, 0);
2186 fprintf (dump_file, "\n");
2189 set_ssa_name_value (lhs, new_rhs);
2192 /* A memory store, even an aliased store, creates a useful
2193 equivalence. By exchanging the LHS and RHS, creating suitable
2194 vops and recording the result in the available expression table,
2195 we may be able to expose more redundant loads. */
2196 if (!gimple_has_volatile_ops (stmt)
2197 && gimple_references_memory_p (stmt)
2198 && gimple_assign_single_p (stmt)
2199 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2200 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2201 && !is_gimple_reg (lhs))
2203 tree rhs = gimple_assign_rhs1 (stmt);
2204 gassign *new_stmt;
2206 /* Build a new statement with the RHS and LHS exchanged. */
2207 if (TREE_CODE (rhs) == SSA_NAME)
2209 /* NOTE tuples. The call to gimple_build_assign below replaced
2210 a call to build_gimple_modify_stmt, which did not set the
2211 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2212 may cause an SSA validation failure, as the LHS may be a
2213 default-initialized name and should have no definition. I'm
2214 a bit dubious of this, as the artificial statement that we
2215 generate here may in fact be ill-formed, but it is simply
2216 used as an internal device in this pass, and never becomes
2217 part of the CFG. */
2218 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2219 new_stmt = gimple_build_assign (rhs, lhs);
2220 SSA_NAME_DEF_STMT (rhs) = defstmt;
2222 else
2223 new_stmt = gimple_build_assign (rhs, lhs);
2225 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2227 /* Finally enter the statement into the available expression
2228 table. */
2229 lookup_avail_expr (new_stmt, true);
2233 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2234 CONST_AND_COPIES. */
2236 static void
2237 cprop_operand (gimple stmt, use_operand_p op_p)
2239 tree val;
2240 tree op = USE_FROM_PTR (op_p);
2242 /* If the operand has a known constant value or it is known to be a
2243 copy of some other variable, use the value or copy stored in
2244 CONST_AND_COPIES. */
2245 val = SSA_NAME_VALUE (op);
2246 if (val && val != op)
2248 /* Do not replace hard register operands in asm statements. */
2249 if (gimple_code (stmt) == GIMPLE_ASM
2250 && !may_propagate_copy_into_asm (op))
2251 return;
2253 /* Certain operands are not allowed to be copy propagated due
2254 to their interaction with exception handling and some GCC
2255 extensions. */
2256 if (!may_propagate_copy (op, val))
2257 return;
2259 /* Do not propagate copies into BIVs.
2260 See PR23821 and PR62217 for how this can disturb IV and
2261 number of iteration analysis. */
2262 if (TREE_CODE (val) != INTEGER_CST)
2264 gimple def = SSA_NAME_DEF_STMT (op);
2265 if (gimple_code (def) == GIMPLE_PHI
2266 && gimple_bb (def)->loop_father->header == gimple_bb (def))
2267 return;
2270 /* Dump details. */
2271 if (dump_file && (dump_flags & TDF_DETAILS))
2273 fprintf (dump_file, " Replaced '");
2274 print_generic_expr (dump_file, op, dump_flags);
2275 fprintf (dump_file, "' with %s '",
2276 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2277 print_generic_expr (dump_file, val, dump_flags);
2278 fprintf (dump_file, "'\n");
2281 if (TREE_CODE (val) != SSA_NAME)
2282 opt_stats.num_const_prop++;
2283 else
2284 opt_stats.num_copy_prop++;
2286 propagate_value (op_p, val);
2288 /* And note that we modified this statement. This is now
2289 safe, even if we changed virtual operands since we will
2290 rescan the statement and rewrite its operands again. */
2291 gimple_set_modified (stmt, true);
2295 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2296 known value for that SSA_NAME (or NULL if no value is known).
2298 Propagate values from CONST_AND_COPIES into the uses, vuses and
2299 vdef_ops of STMT. */
2301 static void
2302 cprop_into_stmt (gimple stmt)
2304 use_operand_p op_p;
2305 ssa_op_iter iter;
2307 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2308 cprop_operand (stmt, op_p);
2311 /* Optimize the statement pointed to by iterator SI.
2313 We try to perform some simplistic global redundancy elimination and
2314 constant propagation:
2316 1- To detect global redundancy, we keep track of expressions that have
2317 been computed in this block and its dominators. If we find that the
2318 same expression is computed more than once, we eliminate repeated
2319 computations by using the target of the first one.
2321 2- Constant values and copy assignments. This is used to do very
2322 simplistic constant and copy propagation. When a constant or copy
2323 assignment is found, we map the value on the RHS of the assignment to
2324 the variable in the LHS in the CONST_AND_COPIES table. */
2326 static void
2327 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2329 gimple stmt, old_stmt;
2330 bool may_optimize_p;
2331 bool modified_p = false;
2332 bool was_noreturn;
2334 old_stmt = stmt = gsi_stmt (si);
2335 was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
2337 if (dump_file && (dump_flags & TDF_DETAILS))
2339 fprintf (dump_file, "Optimizing statement ");
2340 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2343 if (gimple_code (stmt) == GIMPLE_COND)
2344 canonicalize_comparison (as_a <gcond *> (stmt));
2346 update_stmt_if_modified (stmt);
2347 opt_stats.num_stmts++;
2349 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2350 cprop_into_stmt (stmt);
2352 /* If the statement has been modified with constant replacements,
2353 fold its RHS before checking for redundant computations. */
2354 if (gimple_modified_p (stmt))
2356 tree rhs = NULL;
2358 /* Try to fold the statement making sure that STMT is kept
2359 up to date. */
2360 if (fold_stmt (&si))
2362 stmt = gsi_stmt (si);
2363 gimple_set_modified (stmt, true);
2365 if (dump_file && (dump_flags & TDF_DETAILS))
2367 fprintf (dump_file, " Folded to: ");
2368 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2372 /* We only need to consider cases that can yield a gimple operand. */
2373 if (gimple_assign_single_p (stmt))
2374 rhs = gimple_assign_rhs1 (stmt);
2375 else if (gimple_code (stmt) == GIMPLE_GOTO)
2376 rhs = gimple_goto_dest (stmt);
2377 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2378 /* This should never be an ADDR_EXPR. */
2379 rhs = gimple_switch_index (swtch_stmt);
2381 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2382 recompute_tree_invariant_for_addr_expr (rhs);
2384 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2385 even if fold_stmt updated the stmt already and thus cleared
2386 gimple_modified_p flag on it. */
2387 modified_p = true;
2390 /* Check for redundant computations. Do this optimization only
2391 for assignments that have no volatile ops and conditionals. */
2392 may_optimize_p = (!gimple_has_side_effects (stmt)
2393 && (is_gimple_assign (stmt)
2394 || (is_gimple_call (stmt)
2395 && gimple_call_lhs (stmt) != NULL_TREE)
2396 || gimple_code (stmt) == GIMPLE_COND
2397 || gimple_code (stmt) == GIMPLE_SWITCH));
2399 if (may_optimize_p)
2401 if (gimple_code (stmt) == GIMPLE_CALL)
2403 /* Resolve __builtin_constant_p. If it hasn't been
2404 folded to integer_one_node by now, it's fairly
2405 certain that the value simply isn't constant. */
2406 tree callee = gimple_call_fndecl (stmt);
2407 if (callee
2408 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2409 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2411 propagate_tree_value_into_stmt (&si, integer_zero_node);
2412 stmt = gsi_stmt (si);
2416 update_stmt_if_modified (stmt);
2417 eliminate_redundant_computations (&si);
2418 stmt = gsi_stmt (si);
2420 /* Perform simple redundant store elimination. */
2421 if (gimple_assign_single_p (stmt)
2422 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2424 tree lhs = gimple_assign_lhs (stmt);
2425 tree rhs = gimple_assign_rhs1 (stmt);
2426 tree cached_lhs;
2427 gassign *new_stmt;
2428 if (TREE_CODE (rhs) == SSA_NAME)
2430 tree tem = SSA_NAME_VALUE (rhs);
2431 if (tem)
2432 rhs = tem;
2434 /* Build a new statement with the RHS and LHS exchanged. */
2435 if (TREE_CODE (rhs) == SSA_NAME)
2437 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2438 new_stmt = gimple_build_assign (rhs, lhs);
2439 SSA_NAME_DEF_STMT (rhs) = defstmt;
2441 else
2442 new_stmt = gimple_build_assign (rhs, lhs);
2443 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2444 cached_lhs = lookup_avail_expr (new_stmt, false);
2445 if (cached_lhs
2446 && rhs == cached_lhs)
2448 basic_block bb = gimple_bb (stmt);
2449 unlink_stmt_vdef (stmt);
2450 if (gsi_remove (&si, true))
2452 bitmap_set_bit (need_eh_cleanup, bb->index);
2453 if (dump_file && (dump_flags & TDF_DETAILS))
2454 fprintf (dump_file, " Flagged to clear EH edges.\n");
2456 release_defs (stmt);
2457 return;
2462 /* Record any additional equivalences created by this statement. */
2463 if (is_gimple_assign (stmt))
2464 record_equivalences_from_stmt (stmt, may_optimize_p);
2466 /* If STMT is a COND_EXPR and it was modified, then we may know
2467 where it goes. If that is the case, then mark the CFG as altered.
2469 This will cause us to later call remove_unreachable_blocks and
2470 cleanup_tree_cfg when it is safe to do so. It is not safe to
2471 clean things up here since removal of edges and such can trigger
2472 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2473 the manager.
2475 That's all fine and good, except that once SSA_NAMEs are released
2476 to the manager, we must not call create_ssa_name until all references
2477 to released SSA_NAMEs have been eliminated.
2479 All references to the deleted SSA_NAMEs can not be eliminated until
2480 we remove unreachable blocks.
2482 We can not remove unreachable blocks until after we have completed
2483 any queued jump threading.
2485 We can not complete any queued jump threads until we have taken
2486 appropriate variables out of SSA form. Taking variables out of
2487 SSA form can call create_ssa_name and thus we lose.
2489 Ultimately I suspect we're going to need to change the interface
2490 into the SSA_NAME manager. */
2491 if (gimple_modified_p (stmt) || modified_p)
2493 tree val = NULL;
2495 update_stmt_if_modified (stmt);
2497 if (gimple_code (stmt) == GIMPLE_COND)
2498 val = fold_binary_loc (gimple_location (stmt),
2499 gimple_cond_code (stmt), boolean_type_node,
2500 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2501 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2502 val = gimple_switch_index (swtch_stmt);
2504 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2505 cfg_altered = true;
2507 /* If we simplified a statement in such a way as to be shown that it
2508 cannot trap, update the eh information and the cfg to match. */
2509 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2511 bitmap_set_bit (need_eh_cleanup, bb->index);
2512 if (dump_file && (dump_flags & TDF_DETAILS))
2513 fprintf (dump_file, " Flagged to clear EH edges.\n");
2516 if (!was_noreturn
2517 && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
2518 need_noreturn_fixup.safe_push (stmt);
2522 /* Helper for walk_non_aliased_vuses. Determine if we arrived at
2523 the desired memory state. */
2525 static void *
2526 vuse_eq (ao_ref *, tree vuse1, unsigned int cnt, void *data)
2528 tree vuse2 = (tree) data;
2529 if (vuse1 == vuse2)
2530 return data;
2532 /* This bounds the stmt walks we perform on reference lookups
2533 to O(1) instead of O(N) where N is the number of dominating
2534 stores leading to a candidate. We re-use the SCCVN param
2535 for this as it is basically the same complexity. */
2536 if (cnt > (unsigned) PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS))
2537 return (void *)-1;
2539 return NULL;
2542 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2543 If found, return its LHS. Otherwise insert STMT in the table and
2544 return NULL_TREE.
2546 Also, when an expression is first inserted in the table, it is also
2547 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2548 we finish processing this block and its children. */
2550 static tree
2551 lookup_avail_expr (gimple stmt, bool insert)
2553 expr_hash_elt **slot;
2554 tree lhs;
2555 tree temp;
2556 struct expr_hash_elt element;
2558 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2559 if (gimple_code (stmt) == GIMPLE_PHI)
2560 lhs = gimple_phi_result (stmt);
2561 else
2562 lhs = gimple_get_lhs (stmt);
2564 initialize_hash_element (stmt, lhs, &element);
2566 if (dump_file && (dump_flags & TDF_DETAILS))
2568 fprintf (dump_file, "LKUP ");
2569 print_expr_hash_elt (dump_file, &element);
2572 /* Don't bother remembering constant assignments and copy operations.
2573 Constants and copy operations are handled by the constant/copy propagator
2574 in optimize_stmt. */
2575 if (element.expr.kind == EXPR_SINGLE
2576 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2577 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2578 return NULL_TREE;
2580 /* Finally try to find the expression in the main expression hash table. */
2581 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2582 if (slot == NULL)
2584 free_expr_hash_elt_contents (&element);
2585 return NULL_TREE;
2587 else if (*slot == NULL)
2589 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2590 *element2 = element;
2591 element2->stamp = element2;
2592 *slot = element2;
2594 if (dump_file && (dump_flags & TDF_DETAILS))
2596 fprintf (dump_file, "2>>> ");
2597 print_expr_hash_elt (dump_file, element2);
2600 avail_exprs_stack.safe_push
2601 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element2, NULL));
2602 return NULL_TREE;
2605 /* If we found a redundant memory operation do an alias walk to
2606 check if we can re-use it. */
2607 if (gimple_vuse (stmt) != (*slot)->vop)
2609 tree vuse1 = (*slot)->vop;
2610 tree vuse2 = gimple_vuse (stmt);
2611 /* If we have a load of a register and a candidate in the
2612 hash with vuse1 then try to reach its stmt by walking
2613 up the virtual use-def chain using walk_non_aliased_vuses.
2614 But don't do this when removing expressions from the hash. */
2615 ao_ref ref;
2616 if (!(vuse1 && vuse2
2617 && gimple_assign_single_p (stmt)
2618 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
2619 && (ao_ref_init (&ref, gimple_assign_rhs1 (stmt)), true)
2620 && walk_non_aliased_vuses (&ref, vuse2,
2621 vuse_eq, NULL, NULL, vuse1) != NULL))
2623 if (insert)
2625 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2626 *element2 = element;
2627 element2->stamp = element2;
2629 /* Insert the expr into the hash by replacing the current
2630 entry and recording the value to restore in the
2631 avail_exprs_stack. */
2632 avail_exprs_stack.safe_push (std::make_pair (element2, *slot));
2633 *slot = element2;
2634 if (dump_file && (dump_flags & TDF_DETAILS))
2636 fprintf (dump_file, "2>>> ");
2637 print_expr_hash_elt (dump_file, *slot);
2640 return NULL_TREE;
2644 free_expr_hash_elt_contents (&element);
2646 /* Extract the LHS of the assignment so that it can be used as the current
2647 definition of another variable. */
2648 lhs = (*slot)->lhs;
2650 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2651 use the value from the const_and_copies table. */
2652 if (TREE_CODE (lhs) == SSA_NAME)
2654 temp = SSA_NAME_VALUE (lhs);
2655 if (temp)
2656 lhs = temp;
2659 if (dump_file && (dump_flags & TDF_DETAILS))
2661 fprintf (dump_file, "FIND: ");
2662 print_generic_expr (dump_file, lhs, 0);
2663 fprintf (dump_file, "\n");
2666 return lhs;
2669 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2670 for expressions using the code of the expression and the SSA numbers of
2671 its operands. */
2673 static hashval_t
2674 avail_expr_hash (const void *p)
2676 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2677 inchash::hash hstate;
2679 inchash::add_hashable_expr (expr, hstate);
2681 return hstate.end ();
2684 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2685 up degenerate PHIs created by or exposed by jump threading. */
2687 /* Given a statement STMT, which is either a PHI node or an assignment,
2688 remove it from the IL. */
2690 static void
2691 remove_stmt_or_phi (gimple stmt)
2693 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2695 if (gimple_code (stmt) == GIMPLE_PHI)
2696 remove_phi_node (&gsi, true);
2697 else
2699 gsi_remove (&gsi, true);
2700 release_defs (stmt);
2704 /* Given a statement STMT, which is either a PHI node or an assignment,
2705 return the "rhs" of the node, in the case of a non-degenerate
2706 phi, NULL is returned. */
2708 static tree
2709 get_rhs_or_phi_arg (gimple stmt)
2711 if (gimple_code (stmt) == GIMPLE_PHI)
2712 return degenerate_phi_result (as_a <gphi *> (stmt));
2713 else if (gimple_assign_single_p (stmt))
2714 return gimple_assign_rhs1 (stmt);
2715 else
2716 gcc_unreachable ();
2720 /* Given a statement STMT, which is either a PHI node or an assignment,
2721 return the "lhs" of the node. */
2723 static tree
2724 get_lhs_or_phi_result (gimple stmt)
2726 if (gimple_code (stmt) == GIMPLE_PHI)
2727 return gimple_phi_result (stmt);
2728 else if (is_gimple_assign (stmt))
2729 return gimple_assign_lhs (stmt);
2730 else
2731 gcc_unreachable ();
2734 /* Propagate RHS into all uses of LHS (when possible).
2736 RHS and LHS are derived from STMT, which is passed in solely so
2737 that we can remove it if propagation is successful.
2739 When propagating into a PHI node or into a statement which turns
2740 into a trivial copy or constant initialization, set the
2741 appropriate bit in INTERESTING_NAMEs so that we will visit those
2742 nodes as well in an effort to pick up secondary optimization
2743 opportunities. */
2745 static void
2746 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2748 /* First verify that propagation is valid. */
2749 if (may_propagate_copy (lhs, rhs))
2751 use_operand_p use_p;
2752 imm_use_iterator iter;
2753 gimple use_stmt;
2754 bool all = true;
2756 /* Dump details. */
2757 if (dump_file && (dump_flags & TDF_DETAILS))
2759 fprintf (dump_file, " Replacing '");
2760 print_generic_expr (dump_file, lhs, dump_flags);
2761 fprintf (dump_file, "' with %s '",
2762 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2763 print_generic_expr (dump_file, rhs, dump_flags);
2764 fprintf (dump_file, "'\n");
2767 /* Walk over every use of LHS and try to replace the use with RHS.
2768 At this point the only reason why such a propagation would not
2769 be successful would be if the use occurs in an ASM_EXPR. */
2770 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2772 /* Leave debug stmts alone. If we succeed in propagating
2773 all non-debug uses, we'll drop the DEF, and propagation
2774 into debug stmts will occur then. */
2775 if (gimple_debug_bind_p (use_stmt))
2776 continue;
2778 /* It's not always safe to propagate into an ASM_EXPR. */
2779 if (gimple_code (use_stmt) == GIMPLE_ASM
2780 && ! may_propagate_copy_into_asm (lhs))
2782 all = false;
2783 continue;
2786 /* It's not ok to propagate into the definition stmt of RHS.
2787 <bb 9>:
2788 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2789 g_67.1_6 = prephitmp.12_36;
2790 goto <bb 9>;
2791 While this is strictly all dead code we do not want to
2792 deal with this here. */
2793 if (TREE_CODE (rhs) == SSA_NAME
2794 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2796 all = false;
2797 continue;
2800 /* Dump details. */
2801 if (dump_file && (dump_flags & TDF_DETAILS))
2803 fprintf (dump_file, " Original statement:");
2804 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2807 /* Propagate the RHS into this use of the LHS. */
2808 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2809 propagate_value (use_p, rhs);
2811 /* Special cases to avoid useless calls into the folding
2812 routines, operand scanning, etc.
2814 Propagation into a PHI may cause the PHI to become
2815 a degenerate, so mark the PHI as interesting. No other
2816 actions are necessary. */
2817 if (gimple_code (use_stmt) == GIMPLE_PHI)
2819 tree result;
2821 /* Dump details. */
2822 if (dump_file && (dump_flags & TDF_DETAILS))
2824 fprintf (dump_file, " Updated statement:");
2825 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2828 result = get_lhs_or_phi_result (use_stmt);
2829 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2830 continue;
2833 /* From this point onward we are propagating into a
2834 real statement. Folding may (or may not) be possible,
2835 we may expose new operands, expose dead EH edges,
2836 etc. */
2837 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2838 cannot fold a call that simplifies to a constant,
2839 because the GIMPLE_CALL must be replaced by a
2840 GIMPLE_ASSIGN, and there is no way to effect such a
2841 transformation in-place. We might want to consider
2842 using the more general fold_stmt here. */
2844 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2845 fold_stmt_inplace (&gsi);
2848 /* Sometimes propagation can expose new operands to the
2849 renamer. */
2850 update_stmt (use_stmt);
2852 /* Dump details. */
2853 if (dump_file && (dump_flags & TDF_DETAILS))
2855 fprintf (dump_file, " Updated statement:");
2856 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2859 /* If we replaced a variable index with a constant, then
2860 we would need to update the invariant flag for ADDR_EXPRs. */
2861 if (gimple_assign_single_p (use_stmt)
2862 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2863 recompute_tree_invariant_for_addr_expr
2864 (gimple_assign_rhs1 (use_stmt));
2866 /* If we cleaned up EH information from the statement,
2867 mark its containing block as needing EH cleanups. */
2868 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2870 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2871 if (dump_file && (dump_flags & TDF_DETAILS))
2872 fprintf (dump_file, " Flagged to clear EH edges.\n");
2875 /* Propagation may expose new trivial copy/constant propagation
2876 opportunities. */
2877 if (gimple_assign_single_p (use_stmt)
2878 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2879 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2880 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2882 tree result = get_lhs_or_phi_result (use_stmt);
2883 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2886 /* Propagation into these nodes may make certain edges in
2887 the CFG unexecutable. We want to identify them as PHI nodes
2888 at the destination of those unexecutable edges may become
2889 degenerates. */
2890 else if (gimple_code (use_stmt) == GIMPLE_COND
2891 || gimple_code (use_stmt) == GIMPLE_SWITCH
2892 || gimple_code (use_stmt) == GIMPLE_GOTO)
2894 tree val;
2896 if (gimple_code (use_stmt) == GIMPLE_COND)
2897 val = fold_binary_loc (gimple_location (use_stmt),
2898 gimple_cond_code (use_stmt),
2899 boolean_type_node,
2900 gimple_cond_lhs (use_stmt),
2901 gimple_cond_rhs (use_stmt));
2902 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2903 val = gimple_switch_index (as_a <gswitch *> (use_stmt));
2904 else
2905 val = gimple_goto_dest (use_stmt);
2907 if (val && is_gimple_min_invariant (val))
2909 basic_block bb = gimple_bb (use_stmt);
2910 edge te = find_taken_edge (bb, val);
2911 if (!te)
2912 continue;
2914 edge_iterator ei;
2915 edge e;
2916 gimple_stmt_iterator gsi;
2917 gphi_iterator psi;
2919 /* Remove all outgoing edges except TE. */
2920 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2922 if (e != te)
2924 /* Mark all the PHI nodes at the destination of
2925 the unexecutable edge as interesting. */
2926 for (psi = gsi_start_phis (e->dest);
2927 !gsi_end_p (psi);
2928 gsi_next (&psi))
2930 gphi *phi = psi.phi ();
2932 tree result = gimple_phi_result (phi);
2933 int version = SSA_NAME_VERSION (result);
2935 bitmap_set_bit (interesting_names, version);
2938 te->probability += e->probability;
2940 te->count += e->count;
2941 remove_edge (e);
2942 cfg_altered = true;
2944 else
2945 ei_next (&ei);
2948 gsi = gsi_last_bb (gimple_bb (use_stmt));
2949 gsi_remove (&gsi, true);
2951 /* And fixup the flags on the single remaining edge. */
2952 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2953 te->flags &= ~EDGE_ABNORMAL;
2954 te->flags |= EDGE_FALLTHRU;
2955 if (te->probability > REG_BR_PROB_BASE)
2956 te->probability = REG_BR_PROB_BASE;
2961 /* Ensure there is nothing else to do. */
2962 gcc_assert (!all || has_zero_uses (lhs));
2964 /* If we were able to propagate away all uses of LHS, then
2965 we can remove STMT. */
2966 if (all)
2967 remove_stmt_or_phi (stmt);
2971 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2972 a statement that is a trivial copy or constant initialization.
2974 Attempt to eliminate T by propagating its RHS into all uses of
2975 its LHS. This may in turn set new bits in INTERESTING_NAMES
2976 for nodes we want to revisit later.
2978 All exit paths should clear INTERESTING_NAMES for the result
2979 of STMT. */
2981 static void
2982 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2984 tree lhs = get_lhs_or_phi_result (stmt);
2985 tree rhs;
2986 int version = SSA_NAME_VERSION (lhs);
2988 /* If the LHS of this statement or PHI has no uses, then we can
2989 just eliminate it. This can occur if, for example, the PHI
2990 was created by block duplication due to threading and its only
2991 use was in the conditional at the end of the block which was
2992 deleted. */
2993 if (has_zero_uses (lhs))
2995 bitmap_clear_bit (interesting_names, version);
2996 remove_stmt_or_phi (stmt);
2997 return;
3000 /* Get the RHS of the assignment or PHI node if the PHI is a
3001 degenerate. */
3002 rhs = get_rhs_or_phi_arg (stmt);
3003 if (!rhs)
3005 bitmap_clear_bit (interesting_names, version);
3006 return;
3009 if (!virtual_operand_p (lhs))
3010 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
3011 else
3013 gimple use_stmt;
3014 imm_use_iterator iter;
3015 use_operand_p use_p;
3016 /* For virtual operands we have to propagate into all uses as
3017 otherwise we will create overlapping life-ranges. */
3018 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3019 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3020 SET_USE (use_p, rhs);
3021 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
3022 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
3023 remove_stmt_or_phi (stmt);
3026 /* Note that STMT may well have been deleted by now, so do
3027 not access it, instead use the saved version # to clear
3028 T's entry in the worklist. */
3029 bitmap_clear_bit (interesting_names, version);
3032 /* The first phase in degenerate PHI elimination.
3034 Eliminate the degenerate PHIs in BB, then recurse on the
3035 dominator children of BB. */
3037 static void
3038 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
3040 gphi_iterator gsi;
3041 basic_block son;
3043 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3045 gphi *phi = gsi.phi ();
3047 eliminate_const_or_copy (phi, interesting_names);
3050 /* Recurse into the dominator children of BB. */
3051 for (son = first_dom_son (CDI_DOMINATORS, bb);
3052 son;
3053 son = next_dom_son (CDI_DOMINATORS, son))
3054 eliminate_degenerate_phis_1 (son, interesting_names);
3058 /* A very simple pass to eliminate degenerate PHI nodes from the
3059 IL. This is meant to be fast enough to be able to be run several
3060 times in the optimization pipeline.
3062 Certain optimizations, particularly those which duplicate blocks
3063 or remove edges from the CFG can create or expose PHIs which are
3064 trivial copies or constant initializations.
3066 While we could pick up these optimizations in DOM or with the
3067 combination of copy-prop and CCP, those solutions are far too
3068 heavy-weight for our needs.
3070 This implementation has two phases so that we can efficiently
3071 eliminate the first order degenerate PHIs and second order
3072 degenerate PHIs.
3074 The first phase performs a dominator walk to identify and eliminate
3075 the vast majority of the degenerate PHIs. When a degenerate PHI
3076 is identified and eliminated any affected statements or PHIs
3077 are put on a worklist.
3079 The second phase eliminates degenerate PHIs and trivial copies
3080 or constant initializations using the worklist. This is how we
3081 pick up the secondary optimization opportunities with minimal
3082 cost. */
3084 namespace {
3086 const pass_data pass_data_phi_only_cprop =
3088 GIMPLE_PASS, /* type */
3089 "phicprop", /* name */
3090 OPTGROUP_NONE, /* optinfo_flags */
3091 TV_TREE_PHI_CPROP, /* tv_id */
3092 ( PROP_cfg | PROP_ssa ), /* properties_required */
3093 0, /* properties_provided */
3094 0, /* properties_destroyed */
3095 0, /* todo_flags_start */
3096 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3099 class pass_phi_only_cprop : public gimple_opt_pass
3101 public:
3102 pass_phi_only_cprop (gcc::context *ctxt)
3103 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3106 /* opt_pass methods: */
3107 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3108 virtual bool gate (function *) { return flag_tree_dom != 0; }
3109 virtual unsigned int execute (function *);
3111 }; // class pass_phi_only_cprop
3113 unsigned int
3114 pass_phi_only_cprop::execute (function *fun)
3116 bitmap interesting_names;
3117 bitmap interesting_names1;
3119 /* Bitmap of blocks which need EH information updated. We can not
3120 update it on-the-fly as doing so invalidates the dominator tree. */
3121 need_eh_cleanup = BITMAP_ALLOC (NULL);
3123 /* INTERESTING_NAMES is effectively our worklist, indexed by
3124 SSA_NAME_VERSION.
3126 A set bit indicates that the statement or PHI node which
3127 defines the SSA_NAME should be (re)examined to determine if
3128 it has become a degenerate PHI or trivial const/copy propagation
3129 opportunity.
3131 Experiments have show we generally get better compilation
3132 time behavior with bitmaps rather than sbitmaps. */
3133 interesting_names = BITMAP_ALLOC (NULL);
3134 interesting_names1 = BITMAP_ALLOC (NULL);
3136 calculate_dominance_info (CDI_DOMINATORS);
3137 cfg_altered = false;
3139 /* First phase. Eliminate degenerate PHIs via a dominator
3140 walk of the CFG.
3142 Experiments have indicated that we generally get better
3143 compile-time behavior by visiting blocks in the first
3144 phase in dominator order. Presumably this is because walking
3145 in dominator order leaves fewer PHIs for later examination
3146 by the worklist phase. */
3147 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3148 interesting_names);
3150 /* Second phase. Eliminate second order degenerate PHIs as well
3151 as trivial copies or constant initializations identified by
3152 the first phase or this phase. Basically we keep iterating
3153 until our set of INTERESTING_NAMEs is empty. */
3154 while (!bitmap_empty_p (interesting_names))
3156 unsigned int i;
3157 bitmap_iterator bi;
3159 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3160 changed during the loop. Copy it to another bitmap and
3161 use that. */
3162 bitmap_copy (interesting_names1, interesting_names);
3164 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3166 tree name = ssa_name (i);
3168 /* Ignore SSA_NAMEs that have been released because
3169 their defining statement was deleted (unreachable). */
3170 if (name)
3171 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3172 interesting_names);
3176 if (cfg_altered)
3178 free_dominance_info (CDI_DOMINATORS);
3179 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3180 loops_state_set (LOOPS_NEED_FIXUP);
3183 /* Propagation of const and copies may make some EH edges dead. Purge
3184 such edges from the CFG as needed. */
3185 if (!bitmap_empty_p (need_eh_cleanup))
3187 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3188 BITMAP_FREE (need_eh_cleanup);
3191 BITMAP_FREE (interesting_names);
3192 BITMAP_FREE (interesting_names1);
3193 return 0;
3196 } // anon namespace
3198 gimple_opt_pass *
3199 make_pass_phi_only_cprop (gcc::context *ctxt)
3201 return new pass_phi_only_cprop (ctxt);