gcc/ChangeLog
[official-gcc.git] / gcc / tree-ssa-dom.c
bloba26ae558b0e0a19e06d0a525b7d21700481c29a4
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "cfghooks.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "hard-reg-set.h"
29 #include "ssa.h"
30 #include "alias.h"
31 #include "fold-const.h"
32 #include "stor-layout.h"
33 #include "flags.h"
34 #include "tm_p.h"
35 #include "cfganal.h"
36 #include "cfgloop.h"
37 #include "gimple-pretty-print.h"
38 #include "internal-fn.h"
39 #include "gimple-fold.h"
40 #include "tree-eh.h"
41 #include "gimple-iterator.h"
42 #include "tree-cfg.h"
43 #include "tree-into-ssa.h"
44 #include "domwalk.h"
45 #include "tree-pass.h"
46 #include "tree-ssa-propagate.h"
47 #include "tree-ssa-threadupdate.h"
48 #include "langhooks.h"
49 #include "params.h"
50 #include "tree-ssa-scopedtables.h"
51 #include "tree-ssa-threadedge.h"
52 #include "tree-ssa-dom.h"
53 #include "gimplify.h"
54 #include "tree-cfgcleanup.h"
56 /* This file implements optimizations on the dominator tree. */
58 /* Representation of a "naked" right-hand-side expression, to be used
59 in recording available expressions in the expression hash table. */
61 enum expr_kind
63 EXPR_SINGLE,
64 EXPR_UNARY,
65 EXPR_BINARY,
66 EXPR_TERNARY,
67 EXPR_CALL,
68 EXPR_PHI
71 struct hashable_expr
73 tree type;
74 enum expr_kind kind;
75 union {
76 struct { tree rhs; } single;
77 struct { enum tree_code op; tree opnd; } unary;
78 struct { enum tree_code op; tree opnd0, opnd1; } binary;
79 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
80 struct { gcall *fn_from; bool pure; size_t nargs; tree *args; } call;
81 struct { size_t nargs; tree *args; } phi;
82 } ops;
85 /* Structure for recording known values of a conditional expression
86 at the exits from its block. */
88 struct cond_equivalence
90 struct hashable_expr cond;
91 tree value;
95 /* Structure for recording edge equivalences as well as any pending
96 edge redirections during the dominator optimizer.
98 Computing and storing the edge equivalences instead of creating
99 them on-demand can save significant amounts of time, particularly
100 for pathological cases involving switch statements.
102 These structures live for a single iteration of the dominator
103 optimizer in the edge's AUX field. At the end of an iteration we
104 free each of these structures and update the AUX field to point
105 to any requested redirection target (the code for updating the
106 CFG and SSA graph for edge redirection expects redirection edge
107 targets to be in the AUX field for each edge. */
109 struct edge_info
111 /* If this edge creates a simple equivalence, the LHS and RHS of
112 the equivalence will be stored here. */
113 tree lhs;
114 tree rhs;
116 /* Traversing an edge may also indicate one or more particular conditions
117 are true or false. */
118 vec<cond_equivalence> cond_equivalences;
121 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
122 expressions it enters into the hash table along with a marker entry
123 (null). When we finish processing the block, we pop off entries and
124 remove the expressions from the global hash table until we hit the
125 marker. */
126 typedef struct expr_hash_elt * expr_hash_elt_t;
128 static vec<std::pair<expr_hash_elt_t, expr_hash_elt_t> > avail_exprs_stack;
130 /* Structure for entries in the expression hash table. */
132 struct expr_hash_elt
134 /* The value (lhs) of this expression. */
135 tree lhs;
137 /* The expression (rhs) we want to record. */
138 struct hashable_expr expr;
140 /* The virtual operand associated with the nearest dominating stmt
141 loading from or storing to expr. */
142 tree vop;
144 /* The hash value for RHS. */
145 hashval_t hash;
147 /* A unique stamp, typically the address of the hash
148 element itself, used in removing entries from the table. */
149 struct expr_hash_elt *stamp;
152 /* Hashtable helpers. */
154 static bool hashable_expr_equal_p (const struct hashable_expr *,
155 const struct hashable_expr *);
156 static void free_expr_hash_elt (void *);
158 struct expr_elt_hasher : pointer_hash <expr_hash_elt>
160 static inline hashval_t hash (const value_type &);
161 static inline bool equal (const value_type &, const compare_type &);
162 static inline void remove (value_type &);
165 inline hashval_t
166 expr_elt_hasher::hash (const value_type &p)
168 return p->hash;
171 inline bool
172 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
174 const struct hashable_expr *expr1 = &p1->expr;
175 const struct expr_hash_elt *stamp1 = p1->stamp;
176 const struct hashable_expr *expr2 = &p2->expr;
177 const struct expr_hash_elt *stamp2 = p2->stamp;
179 /* This case should apply only when removing entries from the table. */
180 if (stamp1 == stamp2)
181 return true;
183 if (p1->hash != p2->hash)
184 return false;
186 /* In case of a collision, both RHS have to be identical and have the
187 same VUSE operands. */
188 if (hashable_expr_equal_p (expr1, expr2)
189 && types_compatible_p (expr1->type, expr2->type))
190 return true;
192 return false;
195 /* Delete an expr_hash_elt and reclaim its storage. */
197 inline void
198 expr_elt_hasher::remove (value_type &element)
200 free_expr_hash_elt (element);
203 /* Hash table with expressions made available during the renaming process.
204 When an assignment of the form X_i = EXPR is found, the statement is
205 stored in this table. If the same expression EXPR is later found on the
206 RHS of another statement, it is replaced with X_i (thus performing
207 global redundancy elimination). Similarly as we pass through conditionals
208 we record the conditional itself as having either a true or false value
209 in this table. */
210 static hash_table<expr_elt_hasher> *avail_exprs;
212 /* Unwindable const/copy equivalences. */
213 static const_and_copies *const_and_copies;
215 /* Track whether or not we have changed the control flow graph. */
216 static bool cfg_altered;
218 /* Bitmap of blocks that have had EH statements cleaned. We should
219 remove their dead edges eventually. */
220 static bitmap need_eh_cleanup;
221 static vec<gimple> need_noreturn_fixup;
223 /* Statistics for dominator optimizations. */
224 struct opt_stats_d
226 long num_stmts;
227 long num_exprs_considered;
228 long num_re;
229 long num_const_prop;
230 long num_copy_prop;
233 static struct opt_stats_d opt_stats;
235 /* Local functions. */
236 static void optimize_stmt (basic_block, gimple_stmt_iterator);
237 static tree lookup_avail_expr (gimple, bool);
238 static hashval_t avail_expr_hash (const void *);
239 static void htab_statistics (FILE *,
240 const hash_table<expr_elt_hasher> &);
241 static void record_cond (cond_equivalence *);
242 static void record_equality (tree, tree);
243 static void record_equivalences_from_phis (basic_block);
244 static void record_equivalences_from_incoming_edge (basic_block);
245 static void eliminate_redundant_computations (gimple_stmt_iterator *);
246 static void record_equivalences_from_stmt (gimple, int);
247 static void remove_local_expressions_from_table (void);
248 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
251 /* Given a statement STMT, initialize the hash table element pointed to
252 by ELEMENT. */
254 static void
255 initialize_hash_element (gimple stmt, tree lhs,
256 struct expr_hash_elt *element)
258 enum gimple_code code = gimple_code (stmt);
259 struct hashable_expr *expr = &element->expr;
261 if (code == GIMPLE_ASSIGN)
263 enum tree_code subcode = gimple_assign_rhs_code (stmt);
265 switch (get_gimple_rhs_class (subcode))
267 case GIMPLE_SINGLE_RHS:
268 expr->kind = EXPR_SINGLE;
269 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
270 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
271 break;
272 case GIMPLE_UNARY_RHS:
273 expr->kind = EXPR_UNARY;
274 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
275 if (CONVERT_EXPR_CODE_P (subcode))
276 subcode = NOP_EXPR;
277 expr->ops.unary.op = subcode;
278 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
279 break;
280 case GIMPLE_BINARY_RHS:
281 expr->kind = EXPR_BINARY;
282 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
283 expr->ops.binary.op = subcode;
284 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
285 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
286 break;
287 case GIMPLE_TERNARY_RHS:
288 expr->kind = EXPR_TERNARY;
289 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
290 expr->ops.ternary.op = subcode;
291 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
292 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
293 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
294 break;
295 default:
296 gcc_unreachable ();
299 else if (code == GIMPLE_COND)
301 expr->type = boolean_type_node;
302 expr->kind = EXPR_BINARY;
303 expr->ops.binary.op = gimple_cond_code (stmt);
304 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
305 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
307 else if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
309 size_t nargs = gimple_call_num_args (call_stmt);
310 size_t i;
312 gcc_assert (gimple_call_lhs (call_stmt));
314 expr->type = TREE_TYPE (gimple_call_lhs (call_stmt));
315 expr->kind = EXPR_CALL;
316 expr->ops.call.fn_from = call_stmt;
318 if (gimple_call_flags (call_stmt) & (ECF_CONST | ECF_PURE))
319 expr->ops.call.pure = true;
320 else
321 expr->ops.call.pure = false;
323 expr->ops.call.nargs = nargs;
324 expr->ops.call.args = XCNEWVEC (tree, nargs);
325 for (i = 0; i < nargs; i++)
326 expr->ops.call.args[i] = gimple_call_arg (call_stmt, i);
328 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
330 expr->type = TREE_TYPE (gimple_switch_index (swtch_stmt));
331 expr->kind = EXPR_SINGLE;
332 expr->ops.single.rhs = gimple_switch_index (swtch_stmt);
334 else if (code == GIMPLE_GOTO)
336 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
337 expr->kind = EXPR_SINGLE;
338 expr->ops.single.rhs = gimple_goto_dest (stmt);
340 else if (code == GIMPLE_PHI)
342 size_t nargs = gimple_phi_num_args (stmt);
343 size_t i;
345 expr->type = TREE_TYPE (gimple_phi_result (stmt));
346 expr->kind = EXPR_PHI;
347 expr->ops.phi.nargs = nargs;
348 expr->ops.phi.args = XCNEWVEC (tree, nargs);
350 for (i = 0; i < nargs; i++)
351 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
353 else
354 gcc_unreachable ();
356 element->lhs = lhs;
357 element->vop = gimple_vuse (stmt);
358 element->hash = avail_expr_hash (element);
359 element->stamp = element;
362 /* Given a conditional expression COND as a tree, initialize
363 a hashable_expr expression EXPR. The conditional must be a
364 comparison or logical negation. A constant or a variable is
365 not permitted. */
367 static void
368 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
370 expr->type = boolean_type_node;
372 if (COMPARISON_CLASS_P (cond))
374 expr->kind = EXPR_BINARY;
375 expr->ops.binary.op = TREE_CODE (cond);
376 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
377 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
379 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
381 expr->kind = EXPR_UNARY;
382 expr->ops.unary.op = TRUTH_NOT_EXPR;
383 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
385 else
386 gcc_unreachable ();
389 /* Given a hashable_expr expression EXPR and an LHS,
390 initialize the hash table element pointed to by ELEMENT. */
392 static void
393 initialize_hash_element_from_expr (struct hashable_expr *expr,
394 tree lhs,
395 struct expr_hash_elt *element)
397 element->expr = *expr;
398 element->lhs = lhs;
399 element->vop = NULL_TREE;
400 element->hash = avail_expr_hash (element);
401 element->stamp = element;
404 /* Compare two hashable_expr structures for equivalence. They are
405 considered equivalent when the expressions they denote must
406 necessarily be equal. The logic is intended to follow that of
407 operand_equal_p in fold-const.c */
409 static bool
410 hashable_expr_equal_p (const struct hashable_expr *expr0,
411 const struct hashable_expr *expr1)
413 tree type0 = expr0->type;
414 tree type1 = expr1->type;
416 /* If either type is NULL, there is nothing to check. */
417 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
418 return false;
420 /* If both types don't have the same signedness, precision, and mode,
421 then we can't consider them equal. */
422 if (type0 != type1
423 && (TREE_CODE (type0) == ERROR_MARK
424 || TREE_CODE (type1) == ERROR_MARK
425 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
426 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
427 || TYPE_MODE (type0) != TYPE_MODE (type1)))
428 return false;
430 if (expr0->kind != expr1->kind)
431 return false;
433 switch (expr0->kind)
435 case EXPR_SINGLE:
436 return operand_equal_p (expr0->ops.single.rhs,
437 expr1->ops.single.rhs, 0);
439 case EXPR_UNARY:
440 if (expr0->ops.unary.op != expr1->ops.unary.op)
441 return false;
443 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
444 || expr0->ops.unary.op == NON_LVALUE_EXPR)
445 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
446 return false;
448 return operand_equal_p (expr0->ops.unary.opnd,
449 expr1->ops.unary.opnd, 0);
451 case EXPR_BINARY:
452 if (expr0->ops.binary.op != expr1->ops.binary.op)
453 return false;
455 if (operand_equal_p (expr0->ops.binary.opnd0,
456 expr1->ops.binary.opnd0, 0)
457 && operand_equal_p (expr0->ops.binary.opnd1,
458 expr1->ops.binary.opnd1, 0))
459 return true;
461 /* For commutative ops, allow the other order. */
462 return (commutative_tree_code (expr0->ops.binary.op)
463 && operand_equal_p (expr0->ops.binary.opnd0,
464 expr1->ops.binary.opnd1, 0)
465 && operand_equal_p (expr0->ops.binary.opnd1,
466 expr1->ops.binary.opnd0, 0));
468 case EXPR_TERNARY:
469 if (expr0->ops.ternary.op != expr1->ops.ternary.op
470 || !operand_equal_p (expr0->ops.ternary.opnd2,
471 expr1->ops.ternary.opnd2, 0))
472 return false;
474 if (operand_equal_p (expr0->ops.ternary.opnd0,
475 expr1->ops.ternary.opnd0, 0)
476 && operand_equal_p (expr0->ops.ternary.opnd1,
477 expr1->ops.ternary.opnd1, 0))
478 return true;
480 /* For commutative ops, allow the other order. */
481 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
482 && operand_equal_p (expr0->ops.ternary.opnd0,
483 expr1->ops.ternary.opnd1, 0)
484 && operand_equal_p (expr0->ops.ternary.opnd1,
485 expr1->ops.ternary.opnd0, 0));
487 case EXPR_CALL:
489 size_t i;
491 /* If the calls are to different functions, then they
492 clearly cannot be equal. */
493 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
494 expr1->ops.call.fn_from))
495 return false;
497 if (! expr0->ops.call.pure)
498 return false;
500 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
501 return false;
503 for (i = 0; i < expr0->ops.call.nargs; i++)
504 if (! operand_equal_p (expr0->ops.call.args[i],
505 expr1->ops.call.args[i], 0))
506 return false;
508 if (stmt_could_throw_p (expr0->ops.call.fn_from))
510 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
511 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
512 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
513 return false;
516 return true;
519 case EXPR_PHI:
521 size_t i;
523 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
524 return false;
526 for (i = 0; i < expr0->ops.phi.nargs; i++)
527 if (! operand_equal_p (expr0->ops.phi.args[i],
528 expr1->ops.phi.args[i], 0))
529 return false;
531 return true;
534 default:
535 gcc_unreachable ();
539 /* Generate a hash value for a pair of expressions. This can be used
540 iteratively by passing a previous result in HSTATE.
542 The same hash value is always returned for a given pair of expressions,
543 regardless of the order in which they are presented. This is useful in
544 hashing the operands of commutative functions. */
546 namespace inchash
549 static void
550 add_expr_commutative (const_tree t1, const_tree t2, hash &hstate)
552 hash one, two;
554 inchash::add_expr (t1, one);
555 inchash::add_expr (t2, two);
556 hstate.add_commutative (one, two);
559 /* Compute a hash value for a hashable_expr value EXPR and a
560 previously accumulated hash value VAL. If two hashable_expr
561 values compare equal with hashable_expr_equal_p, they must
562 hash to the same value, given an identical value of VAL.
563 The logic is intended to follow inchash::add_expr in tree.c. */
565 static void
566 add_hashable_expr (const struct hashable_expr *expr, hash &hstate)
568 switch (expr->kind)
570 case EXPR_SINGLE:
571 inchash::add_expr (expr->ops.single.rhs, hstate);
572 break;
574 case EXPR_UNARY:
575 hstate.add_object (expr->ops.unary.op);
577 /* Make sure to include signedness in the hash computation.
578 Don't hash the type, that can lead to having nodes which
579 compare equal according to operand_equal_p, but which
580 have different hash codes. */
581 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
582 || expr->ops.unary.op == NON_LVALUE_EXPR)
583 hstate.add_int (TYPE_UNSIGNED (expr->type));
585 inchash::add_expr (expr->ops.unary.opnd, hstate);
586 break;
588 case EXPR_BINARY:
589 hstate.add_object (expr->ops.binary.op);
590 if (commutative_tree_code (expr->ops.binary.op))
591 inchash::add_expr_commutative (expr->ops.binary.opnd0,
592 expr->ops.binary.opnd1, hstate);
593 else
595 inchash::add_expr (expr->ops.binary.opnd0, hstate);
596 inchash::add_expr (expr->ops.binary.opnd1, hstate);
598 break;
600 case EXPR_TERNARY:
601 hstate.add_object (expr->ops.ternary.op);
602 if (commutative_ternary_tree_code (expr->ops.ternary.op))
603 inchash::add_expr_commutative (expr->ops.ternary.opnd0,
604 expr->ops.ternary.opnd1, hstate);
605 else
607 inchash::add_expr (expr->ops.ternary.opnd0, hstate);
608 inchash::add_expr (expr->ops.ternary.opnd1, hstate);
610 inchash::add_expr (expr->ops.ternary.opnd2, hstate);
611 break;
613 case EXPR_CALL:
615 size_t i;
616 enum tree_code code = CALL_EXPR;
617 gcall *fn_from;
619 hstate.add_object (code);
620 fn_from = expr->ops.call.fn_from;
621 if (gimple_call_internal_p (fn_from))
622 hstate.merge_hash ((hashval_t) gimple_call_internal_fn (fn_from));
623 else
624 inchash::add_expr (gimple_call_fn (fn_from), hstate);
625 for (i = 0; i < expr->ops.call.nargs; i++)
626 inchash::add_expr (expr->ops.call.args[i], hstate);
628 break;
630 case EXPR_PHI:
632 size_t i;
634 for (i = 0; i < expr->ops.phi.nargs; i++)
635 inchash::add_expr (expr->ops.phi.args[i], hstate);
637 break;
639 default:
640 gcc_unreachable ();
646 /* Print a diagnostic dump of an expression hash table entry. */
648 static void
649 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
651 fprintf (stream, "STMT ");
653 if (element->lhs)
655 print_generic_expr (stream, element->lhs, 0);
656 fprintf (stream, " = ");
659 switch (element->expr.kind)
661 case EXPR_SINGLE:
662 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
663 break;
665 case EXPR_UNARY:
666 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
667 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
668 break;
670 case EXPR_BINARY:
671 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
672 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
673 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
674 break;
676 case EXPR_TERNARY:
677 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
678 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
679 fputs (", ", stream);
680 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
681 fputs (", ", stream);
682 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
683 fputs (">", stream);
684 break;
686 case EXPR_CALL:
688 size_t i;
689 size_t nargs = element->expr.ops.call.nargs;
690 gcall *fn_from;
692 fn_from = element->expr.ops.call.fn_from;
693 if (gimple_call_internal_p (fn_from))
694 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
695 stream);
696 else
697 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
698 fprintf (stream, " (");
699 for (i = 0; i < nargs; i++)
701 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
702 if (i + 1 < nargs)
703 fprintf (stream, ", ");
705 fprintf (stream, ")");
707 break;
709 case EXPR_PHI:
711 size_t i;
712 size_t nargs = element->expr.ops.phi.nargs;
714 fprintf (stream, "PHI <");
715 for (i = 0; i < nargs; i++)
717 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
718 if (i + 1 < nargs)
719 fprintf (stream, ", ");
721 fprintf (stream, ">");
723 break;
726 if (element->vop)
728 fprintf (stream, " with ");
729 print_generic_expr (stream, element->vop, 0);
732 fprintf (stream, "\n");
735 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
737 static void
738 free_expr_hash_elt_contents (struct expr_hash_elt *element)
740 if (element->expr.kind == EXPR_CALL)
741 free (element->expr.ops.call.args);
742 else if (element->expr.kind == EXPR_PHI)
743 free (element->expr.ops.phi.args);
746 /* Delete an expr_hash_elt and reclaim its storage. */
748 static void
749 free_expr_hash_elt (void *elt)
751 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
752 free_expr_hash_elt_contents (element);
753 free (element);
756 /* Allocate an EDGE_INFO for edge E and attach it to E.
757 Return the new EDGE_INFO structure. */
759 static struct edge_info *
760 allocate_edge_info (edge e)
762 struct edge_info *edge_info;
764 edge_info = XCNEW (struct edge_info);
766 e->aux = edge_info;
767 return edge_info;
770 /* Free all EDGE_INFO structures associated with edges in the CFG.
771 If a particular edge can be threaded, copy the redirection
772 target from the EDGE_INFO structure into the edge's AUX field
773 as required by code to update the CFG and SSA graph for
774 jump threading. */
776 static void
777 free_all_edge_infos (void)
779 basic_block bb;
780 edge_iterator ei;
781 edge e;
783 FOR_EACH_BB_FN (bb, cfun)
785 FOR_EACH_EDGE (e, ei, bb->preds)
787 struct edge_info *edge_info = (struct edge_info *) e->aux;
789 if (edge_info)
791 edge_info->cond_equivalences.release ();
792 free (edge_info);
793 e->aux = NULL;
799 /* Build a cond_equivalence record indicating that the comparison
800 CODE holds between operands OP0 and OP1 and push it to **P. */
802 static void
803 build_and_record_new_cond (enum tree_code code,
804 tree op0, tree op1,
805 vec<cond_equivalence> *p,
806 bool val = true)
808 cond_equivalence c;
809 struct hashable_expr *cond = &c.cond;
811 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
813 cond->type = boolean_type_node;
814 cond->kind = EXPR_BINARY;
815 cond->ops.binary.op = code;
816 cond->ops.binary.opnd0 = op0;
817 cond->ops.binary.opnd1 = op1;
819 c.value = val ? boolean_true_node : boolean_false_node;
820 p->safe_push (c);
823 /* Record that COND is true and INVERTED is false into the edge information
824 structure. Also record that any conditions dominated by COND are true
825 as well.
827 For example, if a < b is true, then a <= b must also be true. */
829 static void
830 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
832 tree op0, op1;
833 cond_equivalence c;
835 if (!COMPARISON_CLASS_P (cond))
836 return;
838 op0 = TREE_OPERAND (cond, 0);
839 op1 = TREE_OPERAND (cond, 1);
841 switch (TREE_CODE (cond))
843 case LT_EXPR:
844 case GT_EXPR:
845 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
847 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
848 &edge_info->cond_equivalences);
849 build_and_record_new_cond (LTGT_EXPR, op0, op1,
850 &edge_info->cond_equivalences);
853 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
854 ? LE_EXPR : GE_EXPR),
855 op0, op1, &edge_info->cond_equivalences);
856 build_and_record_new_cond (NE_EXPR, op0, op1,
857 &edge_info->cond_equivalences);
858 build_and_record_new_cond (EQ_EXPR, op0, op1,
859 &edge_info->cond_equivalences, false);
860 break;
862 case GE_EXPR:
863 case LE_EXPR:
864 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
866 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
867 &edge_info->cond_equivalences);
869 break;
871 case EQ_EXPR:
872 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
874 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
875 &edge_info->cond_equivalences);
877 build_and_record_new_cond (LE_EXPR, op0, op1,
878 &edge_info->cond_equivalences);
879 build_and_record_new_cond (GE_EXPR, op0, op1,
880 &edge_info->cond_equivalences);
881 break;
883 case UNORDERED_EXPR:
884 build_and_record_new_cond (NE_EXPR, op0, op1,
885 &edge_info->cond_equivalences);
886 build_and_record_new_cond (UNLE_EXPR, op0, op1,
887 &edge_info->cond_equivalences);
888 build_and_record_new_cond (UNGE_EXPR, op0, op1,
889 &edge_info->cond_equivalences);
890 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
891 &edge_info->cond_equivalences);
892 build_and_record_new_cond (UNLT_EXPR, op0, op1,
893 &edge_info->cond_equivalences);
894 build_and_record_new_cond (UNGT_EXPR, op0, op1,
895 &edge_info->cond_equivalences);
896 break;
898 case UNLT_EXPR:
899 case UNGT_EXPR:
900 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
901 ? UNLE_EXPR : UNGE_EXPR),
902 op0, op1, &edge_info->cond_equivalences);
903 build_and_record_new_cond (NE_EXPR, op0, op1,
904 &edge_info->cond_equivalences);
905 break;
907 case UNEQ_EXPR:
908 build_and_record_new_cond (UNLE_EXPR, op0, op1,
909 &edge_info->cond_equivalences);
910 build_and_record_new_cond (UNGE_EXPR, op0, op1,
911 &edge_info->cond_equivalences);
912 break;
914 case LTGT_EXPR:
915 build_and_record_new_cond (NE_EXPR, op0, op1,
916 &edge_info->cond_equivalences);
917 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
918 &edge_info->cond_equivalences);
919 break;
921 default:
922 break;
925 /* Now store the original true and false conditions into the first
926 two slots. */
927 initialize_expr_from_cond (cond, &c.cond);
928 c.value = boolean_true_node;
929 edge_info->cond_equivalences.safe_push (c);
931 /* It is possible for INVERTED to be the negation of a comparison,
932 and not a valid RHS or GIMPLE_COND condition. This happens because
933 invert_truthvalue may return such an expression when asked to invert
934 a floating-point comparison. These comparisons are not assumed to
935 obey the trichotomy law. */
936 initialize_expr_from_cond (inverted, &c.cond);
937 c.value = boolean_false_node;
938 edge_info->cond_equivalences.safe_push (c);
941 /* We have finished optimizing BB, record any information implied by
942 taking a specific outgoing edge from BB. */
944 static void
945 record_edge_info (basic_block bb)
947 gimple_stmt_iterator gsi = gsi_last_bb (bb);
948 struct edge_info *edge_info;
950 if (! gsi_end_p (gsi))
952 gimple stmt = gsi_stmt (gsi);
953 location_t loc = gimple_location (stmt);
955 if (gimple_code (stmt) == GIMPLE_SWITCH)
957 gswitch *switch_stmt = as_a <gswitch *> (stmt);
958 tree index = gimple_switch_index (switch_stmt);
960 if (TREE_CODE (index) == SSA_NAME)
962 int i;
963 int n_labels = gimple_switch_num_labels (switch_stmt);
964 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
965 edge e;
966 edge_iterator ei;
968 for (i = 0; i < n_labels; i++)
970 tree label = gimple_switch_label (switch_stmt, i);
971 basic_block target_bb = label_to_block (CASE_LABEL (label));
972 if (CASE_HIGH (label)
973 || !CASE_LOW (label)
974 || info[target_bb->index])
975 info[target_bb->index] = error_mark_node;
976 else
977 info[target_bb->index] = label;
980 FOR_EACH_EDGE (e, ei, bb->succs)
982 basic_block target_bb = e->dest;
983 tree label = info[target_bb->index];
985 if (label != NULL && label != error_mark_node)
987 tree x = fold_convert_loc (loc, TREE_TYPE (index),
988 CASE_LOW (label));
989 edge_info = allocate_edge_info (e);
990 edge_info->lhs = index;
991 edge_info->rhs = x;
994 free (info);
998 /* A COND_EXPR may create equivalences too. */
999 if (gimple_code (stmt) == GIMPLE_COND)
1001 edge true_edge;
1002 edge false_edge;
1004 tree op0 = gimple_cond_lhs (stmt);
1005 tree op1 = gimple_cond_rhs (stmt);
1006 enum tree_code code = gimple_cond_code (stmt);
1008 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1010 /* Special case comparing booleans against a constant as we
1011 know the value of OP0 on both arms of the branch. i.e., we
1012 can record an equivalence for OP0 rather than COND. */
1013 if ((code == EQ_EXPR || code == NE_EXPR)
1014 && TREE_CODE (op0) == SSA_NAME
1015 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1016 && is_gimple_min_invariant (op1))
1018 if (code == EQ_EXPR)
1020 edge_info = allocate_edge_info (true_edge);
1021 edge_info->lhs = op0;
1022 edge_info->rhs = (integer_zerop (op1)
1023 ? boolean_false_node
1024 : boolean_true_node);
1026 edge_info = allocate_edge_info (false_edge);
1027 edge_info->lhs = op0;
1028 edge_info->rhs = (integer_zerop (op1)
1029 ? boolean_true_node
1030 : boolean_false_node);
1032 else
1034 edge_info = allocate_edge_info (true_edge);
1035 edge_info->lhs = op0;
1036 edge_info->rhs = (integer_zerop (op1)
1037 ? boolean_true_node
1038 : boolean_false_node);
1040 edge_info = allocate_edge_info (false_edge);
1041 edge_info->lhs = op0;
1042 edge_info->rhs = (integer_zerop (op1)
1043 ? boolean_false_node
1044 : boolean_true_node);
1047 else if (is_gimple_min_invariant (op0)
1048 && (TREE_CODE (op1) == SSA_NAME
1049 || is_gimple_min_invariant (op1)))
1051 tree cond = build2 (code, boolean_type_node, op0, op1);
1052 tree inverted = invert_truthvalue_loc (loc, cond);
1053 bool can_infer_simple_equiv
1054 = !(HONOR_SIGNED_ZEROS (op0)
1055 && real_zerop (op0));
1056 struct edge_info *edge_info;
1058 edge_info = allocate_edge_info (true_edge);
1059 record_conditions (edge_info, cond, inverted);
1061 if (can_infer_simple_equiv && code == EQ_EXPR)
1063 edge_info->lhs = op1;
1064 edge_info->rhs = op0;
1067 edge_info = allocate_edge_info (false_edge);
1068 record_conditions (edge_info, inverted, cond);
1070 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1072 edge_info->lhs = op1;
1073 edge_info->rhs = op0;
1077 else if (TREE_CODE (op0) == SSA_NAME
1078 && (TREE_CODE (op1) == SSA_NAME
1079 || is_gimple_min_invariant (op1)))
1081 tree cond = build2 (code, boolean_type_node, op0, op1);
1082 tree inverted = invert_truthvalue_loc (loc, cond);
1083 bool can_infer_simple_equiv
1084 = !(HONOR_SIGNED_ZEROS (op1)
1085 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1086 struct edge_info *edge_info;
1088 edge_info = allocate_edge_info (true_edge);
1089 record_conditions (edge_info, cond, inverted);
1091 if (can_infer_simple_equiv && code == EQ_EXPR)
1093 edge_info->lhs = op0;
1094 edge_info->rhs = op1;
1097 edge_info = allocate_edge_info (false_edge);
1098 record_conditions (edge_info, inverted, cond);
1100 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1102 edge_info->lhs = op0;
1103 edge_info->rhs = op1;
1108 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1113 class dom_opt_dom_walker : public dom_walker
1115 public:
1116 dom_opt_dom_walker (cdi_direction direction)
1117 : dom_walker (direction), m_dummy_cond (NULL) {}
1119 virtual void before_dom_children (basic_block);
1120 virtual void after_dom_children (basic_block);
1122 private:
1123 void thread_across_edge (edge);
1125 gcond *m_dummy_cond;
1128 /* Jump threading, redundancy elimination and const/copy propagation.
1130 This pass may expose new symbols that need to be renamed into SSA. For
1131 every new symbol exposed, its corresponding bit will be set in
1132 VARS_TO_RENAME. */
1134 namespace {
1136 const pass_data pass_data_dominator =
1138 GIMPLE_PASS, /* type */
1139 "dom", /* name */
1140 OPTGROUP_NONE, /* optinfo_flags */
1141 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
1142 ( PROP_cfg | PROP_ssa ), /* properties_required */
1143 0, /* properties_provided */
1144 0, /* properties_destroyed */
1145 0, /* todo_flags_start */
1146 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
1149 class pass_dominator : public gimple_opt_pass
1151 public:
1152 pass_dominator (gcc::context *ctxt)
1153 : gimple_opt_pass (pass_data_dominator, ctxt)
1156 /* opt_pass methods: */
1157 opt_pass * clone () { return new pass_dominator (m_ctxt); }
1158 virtual bool gate (function *) { return flag_tree_dom != 0; }
1159 virtual unsigned int execute (function *);
1161 }; // class pass_dominator
1163 unsigned int
1164 pass_dominator::execute (function *fun)
1166 memset (&opt_stats, 0, sizeof (opt_stats));
1168 /* Create our hash tables. */
1169 avail_exprs = new hash_table<expr_elt_hasher> (1024);
1170 avail_exprs_stack.create (20);
1171 const_and_copies = new class const_and_copies (dump_file, dump_flags);
1172 need_eh_cleanup = BITMAP_ALLOC (NULL);
1173 need_noreturn_fixup.create (0);
1175 calculate_dominance_info (CDI_DOMINATORS);
1176 cfg_altered = false;
1178 /* We need to know loop structures in order to avoid destroying them
1179 in jump threading. Note that we still can e.g. thread through loop
1180 headers to an exit edge, or through loop header to the loop body, assuming
1181 that we update the loop info.
1183 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
1184 to several overly conservative bail-outs in jump threading, case
1185 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
1186 missing. We should improve jump threading in future then
1187 LOOPS_HAVE_PREHEADERS won't be needed here. */
1188 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
1190 /* Initialize the value-handle array. */
1191 threadedge_initialize_values ();
1193 /* We need accurate information regarding back edges in the CFG
1194 for jump threading; this may include back edges that are not part of
1195 a single loop. */
1196 mark_dfs_back_edges ();
1198 /* Recursively walk the dominator tree optimizing statements. */
1199 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
1202 gimple_stmt_iterator gsi;
1203 basic_block bb;
1204 FOR_EACH_BB_FN (bb, fun)
1206 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1207 update_stmt_if_modified (gsi_stmt (gsi));
1211 /* If we exposed any new variables, go ahead and put them into
1212 SSA form now, before we handle jump threading. This simplifies
1213 interactions between rewriting of _DECL nodes into SSA form
1214 and rewriting SSA_NAME nodes into SSA form after block
1215 duplication and CFG manipulation. */
1216 update_ssa (TODO_update_ssa);
1218 free_all_edge_infos ();
1220 /* Thread jumps, creating duplicate blocks as needed. */
1221 cfg_altered |= thread_through_all_blocks (first_pass_instance);
1223 if (cfg_altered)
1224 free_dominance_info (CDI_DOMINATORS);
1226 /* Removal of statements may make some EH edges dead. Purge
1227 such edges from the CFG as needed. */
1228 if (!bitmap_empty_p (need_eh_cleanup))
1230 unsigned i;
1231 bitmap_iterator bi;
1233 /* Jump threading may have created forwarder blocks from blocks
1234 needing EH cleanup; the new successor of these blocks, which
1235 has inherited from the original block, needs the cleanup.
1236 Don't clear bits in the bitmap, as that can break the bitmap
1237 iterator. */
1238 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
1240 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
1241 if (bb == NULL)
1242 continue;
1243 while (single_succ_p (bb)
1244 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
1245 bb = single_succ (bb);
1246 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
1247 continue;
1248 if ((unsigned) bb->index != i)
1249 bitmap_set_bit (need_eh_cleanup, bb->index);
1252 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
1253 bitmap_clear (need_eh_cleanup);
1256 /* Fixup stmts that became noreturn calls. This may require splitting
1257 blocks and thus isn't possible during the dominator walk or before
1258 jump threading finished. Do this in reverse order so we don't
1259 inadvertedly remove a stmt we want to fixup by visiting a dominating
1260 now noreturn call first. */
1261 while (!need_noreturn_fixup.is_empty ())
1263 gimple stmt = need_noreturn_fixup.pop ();
1264 if (dump_file && dump_flags & TDF_DETAILS)
1266 fprintf (dump_file, "Fixing up noreturn call ");
1267 print_gimple_stmt (dump_file, stmt, 0, 0);
1268 fprintf (dump_file, "\n");
1270 fixup_noreturn_call (stmt);
1273 statistics_counter_event (fun, "Redundant expressions eliminated",
1274 opt_stats.num_re);
1275 statistics_counter_event (fun, "Constants propagated",
1276 opt_stats.num_const_prop);
1277 statistics_counter_event (fun, "Copies propagated",
1278 opt_stats.num_copy_prop);
1280 /* Debugging dumps. */
1281 if (dump_file && (dump_flags & TDF_STATS))
1282 dump_dominator_optimization_stats (dump_file);
1284 loop_optimizer_finalize ();
1286 /* Delete our main hashtable. */
1287 delete avail_exprs;
1288 avail_exprs = NULL;
1290 /* Free asserted bitmaps and stacks. */
1291 BITMAP_FREE (need_eh_cleanup);
1292 need_noreturn_fixup.release ();
1293 avail_exprs_stack.release ();
1294 delete const_and_copies;
1296 /* Free the value-handle array. */
1297 threadedge_finalize_values ();
1299 return 0;
1302 } // anon namespace
1304 gimple_opt_pass *
1305 make_pass_dominator (gcc::context *ctxt)
1307 return new pass_dominator (ctxt);
1311 /* Given a conditional statement CONDSTMT, convert the
1312 condition to a canonical form. */
1314 static void
1315 canonicalize_comparison (gcond *condstmt)
1317 tree op0;
1318 tree op1;
1319 enum tree_code code;
1321 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1323 op0 = gimple_cond_lhs (condstmt);
1324 op1 = gimple_cond_rhs (condstmt);
1326 code = gimple_cond_code (condstmt);
1328 /* If it would be profitable to swap the operands, then do so to
1329 canonicalize the statement, enabling better optimization.
1331 By placing canonicalization of such expressions here we
1332 transparently keep statements in canonical form, even
1333 when the statement is modified. */
1334 if (tree_swap_operands_p (op0, op1, false))
1336 /* For relationals we need to swap the operands
1337 and change the code. */
1338 if (code == LT_EXPR
1339 || code == GT_EXPR
1340 || code == LE_EXPR
1341 || code == GE_EXPR)
1343 code = swap_tree_comparison (code);
1345 gimple_cond_set_code (condstmt, code);
1346 gimple_cond_set_lhs (condstmt, op1);
1347 gimple_cond_set_rhs (condstmt, op0);
1349 update_stmt (condstmt);
1354 /* Initialize local stacks for this optimizer and record equivalences
1355 upon entry to BB. Equivalences can come from the edge traversed to
1356 reach BB or they may come from PHI nodes at the start of BB. */
1358 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1359 LIMIT entries left in LOCALs. */
1361 static void
1362 remove_local_expressions_from_table (void)
1364 /* Remove all the expressions made available in this block. */
1365 while (avail_exprs_stack.length () > 0)
1367 std::pair<expr_hash_elt_t, expr_hash_elt_t> victim
1368 = avail_exprs_stack.pop ();
1369 expr_hash_elt **slot;
1371 if (victim.first == NULL)
1372 break;
1374 /* This must precede the actual removal from the hash table,
1375 as ELEMENT and the table entry may share a call argument
1376 vector which will be freed during removal. */
1377 if (dump_file && (dump_flags & TDF_DETAILS))
1379 fprintf (dump_file, "<<<< ");
1380 print_expr_hash_elt (dump_file, victim.first);
1383 slot = avail_exprs->find_slot (victim.first, NO_INSERT);
1384 gcc_assert (slot && *slot == victim.first);
1385 if (victim.second != NULL)
1387 free_expr_hash_elt (*slot);
1388 *slot = victim.second;
1390 else
1391 avail_exprs->clear_slot (slot);
1395 /* A trivial wrapper so that we can present the generic jump
1396 threading code with a simple API for simplifying statements. */
1397 static tree
1398 simplify_stmt_for_jump_threading (gimple stmt,
1399 gimple within_stmt ATTRIBUTE_UNUSED)
1401 return lookup_avail_expr (stmt, false);
1404 /* Valueize hook for gimple_fold_stmt_to_constant_1. */
1406 static tree
1407 dom_valueize (tree t)
1409 if (TREE_CODE (t) == SSA_NAME)
1411 tree tem = SSA_NAME_VALUE (t);
1412 if (tem)
1413 return tem;
1415 return t;
1418 /* Record into the equivalence tables any equivalences implied by
1419 traversing edge E (which are cached in E->aux).
1421 Callers are responsible for managing the unwinding markers. */
1422 static void
1423 record_temporary_equivalences (edge e)
1425 int i;
1426 struct edge_info *edge_info = (struct edge_info *) e->aux;
1428 /* If we have info associated with this edge, record it into
1429 our equivalence tables. */
1430 if (edge_info)
1432 cond_equivalence *eq;
1433 tree lhs = edge_info->lhs;
1434 tree rhs = edge_info->rhs;
1436 /* If we have a simple NAME = VALUE equivalence, record it. */
1437 if (lhs)
1438 record_equality (lhs, rhs);
1440 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1441 set via a widening type conversion, then we may be able to record
1442 additional equivalences. */
1443 if (lhs
1444 && TREE_CODE (lhs) == SSA_NAME
1445 && TREE_CODE (rhs) == INTEGER_CST)
1447 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1449 if (defstmt
1450 && is_gimple_assign (defstmt)
1451 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1453 tree old_rhs = gimple_assign_rhs1 (defstmt);
1455 /* If the conversion widens the original value and
1456 the constant is in the range of the type of OLD_RHS,
1457 then convert the constant and record the equivalence.
1459 Note that int_fits_type_p does not check the precision
1460 if the upper and lower bounds are OK. */
1461 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1462 && (TYPE_PRECISION (TREE_TYPE (lhs))
1463 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1464 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1466 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1467 record_equality (old_rhs, newval);
1472 /* If LHS is an SSA_NAME with a new equivalency then try if
1473 stmts with uses of that LHS that dominate the edge destination
1474 simplify and allow further equivalences to be recorded. */
1475 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1477 use_operand_p use_p;
1478 imm_use_iterator iter;
1479 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
1481 gimple use_stmt = USE_STMT (use_p);
1483 /* Only bother to record more equivalences for lhs that
1484 can be directly used by e->dest.
1485 ??? If the code gets re-organized to a worklist to
1486 catch more indirect opportunities and it is made to
1487 handle PHIs then this should only consider use_stmts
1488 in basic-blocks we have already visited. */
1489 if (e->dest == gimple_bb (use_stmt)
1490 || !dominated_by_p (CDI_DOMINATORS,
1491 e->dest, gimple_bb (use_stmt)))
1492 continue;
1493 tree lhs2 = gimple_get_lhs (use_stmt);
1494 if (lhs2 && TREE_CODE (lhs2) == SSA_NAME)
1496 tree res
1497 = gimple_fold_stmt_to_constant_1 (use_stmt, dom_valueize,
1498 no_follow_ssa_edges);
1499 if (res
1500 && (TREE_CODE (res) == SSA_NAME
1501 || is_gimple_min_invariant (res)))
1502 record_equality (lhs2, res);
1507 /* If we have 0 = COND or 1 = COND equivalences, record them
1508 into our expression hash tables. */
1509 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1510 record_cond (eq);
1514 /* Wrapper for common code to attempt to thread an edge. For example,
1515 it handles lazily building the dummy condition and the bookkeeping
1516 when jump threading is successful. */
1518 void
1519 dom_opt_dom_walker::thread_across_edge (edge e)
1521 if (! m_dummy_cond)
1522 m_dummy_cond =
1523 gimple_build_cond (NE_EXPR,
1524 integer_zero_node, integer_zero_node,
1525 NULL, NULL);
1527 /* Push a marker on both stacks so we can unwind the tables back to their
1528 current state. */
1529 avail_exprs_stack.safe_push
1530 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1531 const_and_copies->push_marker ();
1533 /* Traversing E may result in equivalences we can utilize. */
1534 record_temporary_equivalences (e);
1536 /* With all the edge equivalences in the tables, go ahead and attempt
1537 to thread through E->dest. */
1538 ::thread_across_edge (m_dummy_cond, e, false,
1539 const_and_copies,
1540 simplify_stmt_for_jump_threading);
1542 /* And restore the various tables to their state before
1543 we threaded this edge.
1545 XXX The code in tree-ssa-threadedge.c will restore the state of
1546 the const_and_copies table. We we just have to restore the expression
1547 table. */
1548 remove_local_expressions_from_table ();
1551 /* PHI nodes can create equivalences too.
1553 Ignoring any alternatives which are the same as the result, if
1554 all the alternatives are equal, then the PHI node creates an
1555 equivalence. */
1557 static void
1558 record_equivalences_from_phis (basic_block bb)
1560 gphi_iterator gsi;
1562 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1564 gphi *phi = gsi.phi ();
1566 tree lhs = gimple_phi_result (phi);
1567 tree rhs = NULL;
1568 size_t i;
1570 for (i = 0; i < gimple_phi_num_args (phi); i++)
1572 tree t = gimple_phi_arg_def (phi, i);
1574 /* Ignore alternatives which are the same as our LHS. Since
1575 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1576 can simply compare pointers. */
1577 if (lhs == t)
1578 continue;
1580 t = dom_valueize (t);
1582 /* If we have not processed an alternative yet, then set
1583 RHS to this alternative. */
1584 if (rhs == NULL)
1585 rhs = t;
1586 /* If we have processed an alternative (stored in RHS), then
1587 see if it is equal to this one. If it isn't, then stop
1588 the search. */
1589 else if (! operand_equal_for_phi_arg_p (rhs, t))
1590 break;
1593 /* If we had no interesting alternatives, then all the RHS alternatives
1594 must have been the same as LHS. */
1595 if (!rhs)
1596 rhs = lhs;
1598 /* If we managed to iterate through each PHI alternative without
1599 breaking out of the loop, then we have a PHI which may create
1600 a useful equivalence. We do not need to record unwind data for
1601 this, since this is a true assignment and not an equivalence
1602 inferred from a comparison. All uses of this ssa name are dominated
1603 by this assignment, so unwinding just costs time and space. */
1604 if (i == gimple_phi_num_args (phi)
1605 && may_propagate_copy (lhs, rhs))
1606 set_ssa_name_value (lhs, rhs);
1610 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1611 return that edge. Otherwise return NULL. */
1612 static edge
1613 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1615 edge retval = NULL;
1616 edge e;
1617 edge_iterator ei;
1619 FOR_EACH_EDGE (e, ei, bb->preds)
1621 /* A loop back edge can be identified by the destination of
1622 the edge dominating the source of the edge. */
1623 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1624 continue;
1626 /* If we have already seen a non-loop edge, then we must have
1627 multiple incoming non-loop edges and thus we return NULL. */
1628 if (retval)
1629 return NULL;
1631 /* This is the first non-loop incoming edge we have found. Record
1632 it. */
1633 retval = e;
1636 return retval;
1639 /* Record any equivalences created by the incoming edge to BB. If BB
1640 has more than one incoming edge, then no equivalence is created. */
1642 static void
1643 record_equivalences_from_incoming_edge (basic_block bb)
1645 edge e;
1646 basic_block parent;
1648 /* If our parent block ended with a control statement, then we may be
1649 able to record some equivalences based on which outgoing edge from
1650 the parent was followed. */
1651 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1653 e = single_incoming_edge_ignoring_loop_edges (bb);
1655 /* If we had a single incoming edge from our parent block, then enter
1656 any data associated with the edge into our tables. */
1657 if (e && e->src == parent)
1658 record_temporary_equivalences (e);
1661 /* Dump SSA statistics on FILE. */
1663 void
1664 dump_dominator_optimization_stats (FILE *file)
1666 fprintf (file, "Total number of statements: %6ld\n\n",
1667 opt_stats.num_stmts);
1668 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1669 opt_stats.num_exprs_considered);
1671 fprintf (file, "\nHash table statistics:\n");
1673 fprintf (file, " avail_exprs: ");
1674 htab_statistics (file, *avail_exprs);
1678 /* Dump SSA statistics on stderr. */
1680 DEBUG_FUNCTION void
1681 debug_dominator_optimization_stats (void)
1683 dump_dominator_optimization_stats (stderr);
1687 /* Dump statistics for the hash table HTAB. */
1689 static void
1690 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1692 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1693 (long) htab.size (),
1694 (long) htab.elements (),
1695 htab.collisions ());
1699 /* Enter condition equivalence into the expression hash table.
1700 This indicates that a conditional expression has a known
1701 boolean value. */
1703 static void
1704 record_cond (cond_equivalence *p)
1706 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1707 expr_hash_elt **slot;
1709 initialize_hash_element_from_expr (&p->cond, p->value, element);
1711 slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1712 if (*slot == NULL)
1714 *slot = element;
1716 if (dump_file && (dump_flags & TDF_DETAILS))
1718 fprintf (dump_file, "1>>> ");
1719 print_expr_hash_elt (dump_file, element);
1722 avail_exprs_stack.safe_push
1723 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element, NULL));
1725 else
1726 free_expr_hash_elt (element);
1729 /* Return the loop depth of the basic block of the defining statement of X.
1730 This number should not be treated as absolutely correct because the loop
1731 information may not be completely up-to-date when dom runs. However, it
1732 will be relatively correct, and as more passes are taught to keep loop info
1733 up to date, the result will become more and more accurate. */
1735 static int
1736 loop_depth_of_name (tree x)
1738 gimple defstmt;
1739 basic_block defbb;
1741 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1742 if (TREE_CODE (x) != SSA_NAME)
1743 return 0;
1745 /* Otherwise return the loop depth of the defining statement's bb.
1746 Note that there may not actually be a bb for this statement, if the
1747 ssa_name is live on entry. */
1748 defstmt = SSA_NAME_DEF_STMT (x);
1749 defbb = gimple_bb (defstmt);
1750 if (!defbb)
1751 return 0;
1753 return bb_loop_depth (defbb);
1756 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1757 This constrains the cases in which we may treat this as assignment. */
1759 static void
1760 record_equality (tree x, tree y)
1762 tree prev_x = NULL, prev_y = NULL;
1764 if (tree_swap_operands_p (x, y, false))
1765 std::swap (x, y);
1767 /* Most of the time tree_swap_operands_p does what we want. But there
1768 are cases where we know one operand is better for copy propagation than
1769 the other. Given no other code cares about ordering of equality
1770 comparison operators for that purpose, we just handle the special cases
1771 here. */
1772 if (TREE_CODE (x) == SSA_NAME && TREE_CODE (y) == SSA_NAME)
1774 /* If one operand is a single use operand, then make it
1775 X. This will preserve its single use properly and if this
1776 conditional is eliminated, the computation of X can be
1777 eliminated as well. */
1778 if (has_single_use (y) && ! has_single_use (x))
1779 std::swap (x, y);
1781 if (TREE_CODE (x) == SSA_NAME)
1782 prev_x = SSA_NAME_VALUE (x);
1783 if (TREE_CODE (y) == SSA_NAME)
1784 prev_y = SSA_NAME_VALUE (y);
1786 /* If one of the previous values is invariant, or invariant in more loops
1787 (by depth), then use that.
1788 Otherwise it doesn't matter which value we choose, just so
1789 long as we canonicalize on one value. */
1790 if (is_gimple_min_invariant (y))
1792 else if (is_gimple_min_invariant (x)
1793 /* ??? When threading over backedges the following is important
1794 for correctness. See PR61757. */
1795 || (loop_depth_of_name (x) < loop_depth_of_name (y)))
1796 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1797 else if (prev_x && is_gimple_min_invariant (prev_x))
1798 x = y, y = prev_x, prev_x = prev_y;
1799 else if (prev_y)
1800 y = prev_y;
1802 /* After the swapping, we must have one SSA_NAME. */
1803 if (TREE_CODE (x) != SSA_NAME)
1804 return;
1806 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1807 variable compared against zero. If we're honoring signed zeros,
1808 then we cannot record this value unless we know that the value is
1809 nonzero. */
1810 if (HONOR_SIGNED_ZEROS (x)
1811 && (TREE_CODE (y) != REAL_CST
1812 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1813 return;
1815 const_and_copies->record_const_or_copy (x, y, prev_x);
1818 /* Returns true when STMT is a simple iv increment. It detects the
1819 following situation:
1821 i_1 = phi (..., i_2)
1822 i_2 = i_1 +/- ... */
1824 bool
1825 simple_iv_increment_p (gimple stmt)
1827 enum tree_code code;
1828 tree lhs, preinc;
1829 gimple phi;
1830 size_t i;
1832 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1833 return false;
1835 lhs = gimple_assign_lhs (stmt);
1836 if (TREE_CODE (lhs) != SSA_NAME)
1837 return false;
1839 code = gimple_assign_rhs_code (stmt);
1840 if (code != PLUS_EXPR
1841 && code != MINUS_EXPR
1842 && code != POINTER_PLUS_EXPR)
1843 return false;
1845 preinc = gimple_assign_rhs1 (stmt);
1846 if (TREE_CODE (preinc) != SSA_NAME)
1847 return false;
1849 phi = SSA_NAME_DEF_STMT (preinc);
1850 if (gimple_code (phi) != GIMPLE_PHI)
1851 return false;
1853 for (i = 0; i < gimple_phi_num_args (phi); i++)
1854 if (gimple_phi_arg_def (phi, i) == lhs)
1855 return true;
1857 return false;
1860 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1861 known value for that SSA_NAME (or NULL if no value is known).
1863 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1864 successors of BB. */
1866 static void
1867 cprop_into_successor_phis (basic_block bb)
1869 edge e;
1870 edge_iterator ei;
1872 FOR_EACH_EDGE (e, ei, bb->succs)
1874 int indx;
1875 gphi_iterator gsi;
1877 /* If this is an abnormal edge, then we do not want to copy propagate
1878 into the PHI alternative associated with this edge. */
1879 if (e->flags & EDGE_ABNORMAL)
1880 continue;
1882 gsi = gsi_start_phis (e->dest);
1883 if (gsi_end_p (gsi))
1884 continue;
1886 /* We may have an equivalence associated with this edge. While
1887 we can not propagate it into non-dominated blocks, we can
1888 propagate them into PHIs in non-dominated blocks. */
1890 /* Push the unwind marker so we can reset the const and copies
1891 table back to its original state after processing this edge. */
1892 const_and_copies->push_marker ();
1894 /* Extract and record any simple NAME = VALUE equivalences.
1896 Don't bother with [01] = COND equivalences, they're not useful
1897 here. */
1898 struct edge_info *edge_info = (struct edge_info *) e->aux;
1899 if (edge_info)
1901 tree lhs = edge_info->lhs;
1902 tree rhs = edge_info->rhs;
1904 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1905 const_and_copies->record_const_or_copy (lhs, rhs);
1908 indx = e->dest_idx;
1909 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1911 tree new_val;
1912 use_operand_p orig_p;
1913 tree orig_val;
1914 gphi *phi = gsi.phi ();
1916 /* The alternative may be associated with a constant, so verify
1917 it is an SSA_NAME before doing anything with it. */
1918 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1919 orig_val = get_use_from_ptr (orig_p);
1920 if (TREE_CODE (orig_val) != SSA_NAME)
1921 continue;
1923 /* If we have *ORIG_P in our constant/copy table, then replace
1924 ORIG_P with its value in our constant/copy table. */
1925 new_val = SSA_NAME_VALUE (orig_val);
1926 if (new_val
1927 && new_val != orig_val
1928 && (TREE_CODE (new_val) == SSA_NAME
1929 || is_gimple_min_invariant (new_val))
1930 && may_propagate_copy (orig_val, new_val))
1931 propagate_value (orig_p, new_val);
1934 const_and_copies->pop_to_marker ();
1938 void
1939 dom_opt_dom_walker::before_dom_children (basic_block bb)
1941 gimple_stmt_iterator gsi;
1943 if (dump_file && (dump_flags & TDF_DETAILS))
1944 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1946 /* Push a marker on the stacks of local information so that we know how
1947 far to unwind when we finalize this block. */
1948 avail_exprs_stack.safe_push
1949 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1950 const_and_copies->push_marker ();
1952 record_equivalences_from_incoming_edge (bb);
1954 /* PHI nodes can create equivalences too. */
1955 record_equivalences_from_phis (bb);
1957 /* Create equivalences from redundant PHIs. PHIs are only truly
1958 redundant when they exist in the same block, so push another
1959 marker and unwind right afterwards. */
1960 avail_exprs_stack.safe_push
1961 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1962 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1963 eliminate_redundant_computations (&gsi);
1964 remove_local_expressions_from_table ();
1966 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1967 optimize_stmt (bb, gsi);
1969 /* Now prepare to process dominated blocks. */
1970 record_edge_info (bb);
1971 cprop_into_successor_phis (bb);
1974 /* We have finished processing the dominator children of BB, perform
1975 any finalization actions in preparation for leaving this node in
1976 the dominator tree. */
1978 void
1979 dom_opt_dom_walker::after_dom_children (basic_block bb)
1981 gimple last;
1983 /* If we have an outgoing edge to a block with multiple incoming and
1984 outgoing edges, then we may be able to thread the edge, i.e., we
1985 may be able to statically determine which of the outgoing edges
1986 will be traversed when the incoming edge from BB is traversed. */
1987 if (single_succ_p (bb)
1988 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1989 && potentially_threadable_block (single_succ (bb)))
1991 thread_across_edge (single_succ_edge (bb));
1993 else if ((last = last_stmt (bb))
1994 && gimple_code (last) == GIMPLE_COND
1995 && EDGE_COUNT (bb->succs) == 2
1996 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1997 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1999 edge true_edge, false_edge;
2001 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2003 /* Only try to thread the edge if it reaches a target block with
2004 more than one predecessor and more than one successor. */
2005 if (potentially_threadable_block (true_edge->dest))
2006 thread_across_edge (true_edge);
2008 /* Similarly for the ELSE arm. */
2009 if (potentially_threadable_block (false_edge->dest))
2010 thread_across_edge (false_edge);
2014 /* These remove expressions local to BB from the tables. */
2015 remove_local_expressions_from_table ();
2016 const_and_copies->pop_to_marker ();
2019 /* Search for redundant computations in STMT. If any are found, then
2020 replace them with the variable holding the result of the computation.
2022 If safe, record this expression into the available expression hash
2023 table. */
2025 static void
2026 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2028 tree expr_type;
2029 tree cached_lhs;
2030 tree def;
2031 bool insert = true;
2032 bool assigns_var_p = false;
2034 gimple stmt = gsi_stmt (*gsi);
2036 if (gimple_code (stmt) == GIMPLE_PHI)
2037 def = gimple_phi_result (stmt);
2038 else
2039 def = gimple_get_lhs (stmt);
2041 /* Certain expressions on the RHS can be optimized away, but can not
2042 themselves be entered into the hash tables. */
2043 if (! def
2044 || TREE_CODE (def) != SSA_NAME
2045 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2046 || gimple_vdef (stmt)
2047 /* Do not record equivalences for increments of ivs. This would create
2048 overlapping live ranges for a very questionable gain. */
2049 || simple_iv_increment_p (stmt))
2050 insert = false;
2052 /* Check if the expression has been computed before. */
2053 cached_lhs = lookup_avail_expr (stmt, insert);
2055 opt_stats.num_exprs_considered++;
2057 /* Get the type of the expression we are trying to optimize. */
2058 if (is_gimple_assign (stmt))
2060 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2061 assigns_var_p = true;
2063 else if (gimple_code (stmt) == GIMPLE_COND)
2064 expr_type = boolean_type_node;
2065 else if (is_gimple_call (stmt))
2067 gcc_assert (gimple_call_lhs (stmt));
2068 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2069 assigns_var_p = true;
2071 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2072 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
2073 else if (gimple_code (stmt) == GIMPLE_PHI)
2074 /* We can't propagate into a phi, so the logic below doesn't apply.
2075 Instead record an equivalence between the cached LHS and the
2076 PHI result of this statement, provided they are in the same block.
2077 This should be sufficient to kill the redundant phi. */
2079 if (def && cached_lhs)
2080 const_and_copies->record_const_or_copy (def, cached_lhs);
2081 return;
2083 else
2084 gcc_unreachable ();
2086 if (!cached_lhs)
2087 return;
2089 /* It is safe to ignore types here since we have already done
2090 type checking in the hashing and equality routines. In fact
2091 type checking here merely gets in the way of constant
2092 propagation. Also, make sure that it is safe to propagate
2093 CACHED_LHS into the expression in STMT. */
2094 if ((TREE_CODE (cached_lhs) != SSA_NAME
2095 && (assigns_var_p
2096 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2097 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2099 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2100 || is_gimple_min_invariant (cached_lhs));
2102 if (dump_file && (dump_flags & TDF_DETAILS))
2104 fprintf (dump_file, " Replaced redundant expr '");
2105 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2106 fprintf (dump_file, "' with '");
2107 print_generic_expr (dump_file, cached_lhs, dump_flags);
2108 fprintf (dump_file, "'\n");
2111 opt_stats.num_re++;
2113 if (assigns_var_p
2114 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2115 cached_lhs = fold_convert (expr_type, cached_lhs);
2117 propagate_tree_value_into_stmt (gsi, cached_lhs);
2119 /* Since it is always necessary to mark the result as modified,
2120 perhaps we should move this into propagate_tree_value_into_stmt
2121 itself. */
2122 gimple_set_modified (gsi_stmt (*gsi), true);
2126 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2127 the available expressions table or the const_and_copies table.
2128 Detect and record those equivalences. */
2129 /* We handle only very simple copy equivalences here. The heavy
2130 lifing is done by eliminate_redundant_computations. */
2132 static void
2133 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2135 tree lhs;
2136 enum tree_code lhs_code;
2138 gcc_assert (is_gimple_assign (stmt));
2140 lhs = gimple_assign_lhs (stmt);
2141 lhs_code = TREE_CODE (lhs);
2143 if (lhs_code == SSA_NAME
2144 && gimple_assign_single_p (stmt))
2146 tree rhs = gimple_assign_rhs1 (stmt);
2148 /* If the RHS of the assignment is a constant or another variable that
2149 may be propagated, register it in the CONST_AND_COPIES table. We
2150 do not need to record unwind data for this, since this is a true
2151 assignment and not an equivalence inferred from a comparison. All
2152 uses of this ssa name are dominated by this assignment, so unwinding
2153 just costs time and space. */
2154 if (may_optimize_p
2155 && (TREE_CODE (rhs) == SSA_NAME
2156 || is_gimple_min_invariant (rhs)))
2158 rhs = dom_valueize (rhs);
2160 if (dump_file && (dump_flags & TDF_DETAILS))
2162 fprintf (dump_file, "==== ASGN ");
2163 print_generic_expr (dump_file, lhs, 0);
2164 fprintf (dump_file, " = ");
2165 print_generic_expr (dump_file, rhs, 0);
2166 fprintf (dump_file, "\n");
2169 set_ssa_name_value (lhs, rhs);
2173 /* Make sure we can propagate &x + CST. */
2174 if (lhs_code == SSA_NAME
2175 && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
2176 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
2177 && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
2179 tree op0 = gimple_assign_rhs1 (stmt);
2180 tree op1 = gimple_assign_rhs2 (stmt);
2181 tree new_rhs
2182 = build_fold_addr_expr (fold_build2 (MEM_REF,
2183 TREE_TYPE (TREE_TYPE (op0)),
2184 unshare_expr (op0),
2185 fold_convert (ptr_type_node,
2186 op1)));
2187 if (dump_file && (dump_flags & TDF_DETAILS))
2189 fprintf (dump_file, "==== ASGN ");
2190 print_generic_expr (dump_file, lhs, 0);
2191 fprintf (dump_file, " = ");
2192 print_generic_expr (dump_file, new_rhs, 0);
2193 fprintf (dump_file, "\n");
2196 set_ssa_name_value (lhs, new_rhs);
2199 /* A memory store, even an aliased store, creates a useful
2200 equivalence. By exchanging the LHS and RHS, creating suitable
2201 vops and recording the result in the available expression table,
2202 we may be able to expose more redundant loads. */
2203 if (!gimple_has_volatile_ops (stmt)
2204 && gimple_references_memory_p (stmt)
2205 && gimple_assign_single_p (stmt)
2206 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2207 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2208 && !is_gimple_reg (lhs))
2210 tree rhs = gimple_assign_rhs1 (stmt);
2211 gassign *new_stmt;
2213 /* Build a new statement with the RHS and LHS exchanged. */
2214 if (TREE_CODE (rhs) == SSA_NAME)
2216 /* NOTE tuples. The call to gimple_build_assign below replaced
2217 a call to build_gimple_modify_stmt, which did not set the
2218 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2219 may cause an SSA validation failure, as the LHS may be a
2220 default-initialized name and should have no definition. I'm
2221 a bit dubious of this, as the artificial statement that we
2222 generate here may in fact be ill-formed, but it is simply
2223 used as an internal device in this pass, and never becomes
2224 part of the CFG. */
2225 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2226 new_stmt = gimple_build_assign (rhs, lhs);
2227 SSA_NAME_DEF_STMT (rhs) = defstmt;
2229 else
2230 new_stmt = gimple_build_assign (rhs, lhs);
2232 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2234 /* Finally enter the statement into the available expression
2235 table. */
2236 lookup_avail_expr (new_stmt, true);
2240 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2241 CONST_AND_COPIES. */
2243 static void
2244 cprop_operand (gimple stmt, use_operand_p op_p)
2246 tree val;
2247 tree op = USE_FROM_PTR (op_p);
2249 /* If the operand has a known constant value or it is known to be a
2250 copy of some other variable, use the value or copy stored in
2251 CONST_AND_COPIES. */
2252 val = SSA_NAME_VALUE (op);
2253 if (val && val != op)
2255 /* Do not replace hard register operands in asm statements. */
2256 if (gimple_code (stmt) == GIMPLE_ASM
2257 && !may_propagate_copy_into_asm (op))
2258 return;
2260 /* Certain operands are not allowed to be copy propagated due
2261 to their interaction with exception handling and some GCC
2262 extensions. */
2263 if (!may_propagate_copy (op, val))
2264 return;
2266 /* Do not propagate copies into BIVs.
2267 See PR23821 and PR62217 for how this can disturb IV and
2268 number of iteration analysis. */
2269 if (TREE_CODE (val) != INTEGER_CST)
2271 gimple def = SSA_NAME_DEF_STMT (op);
2272 if (gimple_code (def) == GIMPLE_PHI
2273 && gimple_bb (def)->loop_father->header == gimple_bb (def))
2274 return;
2277 /* Dump details. */
2278 if (dump_file && (dump_flags & TDF_DETAILS))
2280 fprintf (dump_file, " Replaced '");
2281 print_generic_expr (dump_file, op, dump_flags);
2282 fprintf (dump_file, "' with %s '",
2283 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2284 print_generic_expr (dump_file, val, dump_flags);
2285 fprintf (dump_file, "'\n");
2288 if (TREE_CODE (val) != SSA_NAME)
2289 opt_stats.num_const_prop++;
2290 else
2291 opt_stats.num_copy_prop++;
2293 propagate_value (op_p, val);
2295 /* And note that we modified this statement. This is now
2296 safe, even if we changed virtual operands since we will
2297 rescan the statement and rewrite its operands again. */
2298 gimple_set_modified (stmt, true);
2302 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2303 known value for that SSA_NAME (or NULL if no value is known).
2305 Propagate values from CONST_AND_COPIES into the uses, vuses and
2306 vdef_ops of STMT. */
2308 static void
2309 cprop_into_stmt (gimple stmt)
2311 use_operand_p op_p;
2312 ssa_op_iter iter;
2314 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2315 cprop_operand (stmt, op_p);
2318 /* Optimize the statement pointed to by iterator SI.
2320 We try to perform some simplistic global redundancy elimination and
2321 constant propagation:
2323 1- To detect global redundancy, we keep track of expressions that have
2324 been computed in this block and its dominators. If we find that the
2325 same expression is computed more than once, we eliminate repeated
2326 computations by using the target of the first one.
2328 2- Constant values and copy assignments. This is used to do very
2329 simplistic constant and copy propagation. When a constant or copy
2330 assignment is found, we map the value on the RHS of the assignment to
2331 the variable in the LHS in the CONST_AND_COPIES table. */
2333 static void
2334 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2336 gimple stmt, old_stmt;
2337 bool may_optimize_p;
2338 bool modified_p = false;
2339 bool was_noreturn;
2341 old_stmt = stmt = gsi_stmt (si);
2342 was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
2344 if (dump_file && (dump_flags & TDF_DETAILS))
2346 fprintf (dump_file, "Optimizing statement ");
2347 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2350 if (gimple_code (stmt) == GIMPLE_COND)
2351 canonicalize_comparison (as_a <gcond *> (stmt));
2353 update_stmt_if_modified (stmt);
2354 opt_stats.num_stmts++;
2356 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2357 cprop_into_stmt (stmt);
2359 /* If the statement has been modified with constant replacements,
2360 fold its RHS before checking for redundant computations. */
2361 if (gimple_modified_p (stmt))
2363 tree rhs = NULL;
2365 /* Try to fold the statement making sure that STMT is kept
2366 up to date. */
2367 if (fold_stmt (&si))
2369 stmt = gsi_stmt (si);
2370 gimple_set_modified (stmt, true);
2372 if (dump_file && (dump_flags & TDF_DETAILS))
2374 fprintf (dump_file, " Folded to: ");
2375 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2379 /* We only need to consider cases that can yield a gimple operand. */
2380 if (gimple_assign_single_p (stmt))
2381 rhs = gimple_assign_rhs1 (stmt);
2382 else if (gimple_code (stmt) == GIMPLE_GOTO)
2383 rhs = gimple_goto_dest (stmt);
2384 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2385 /* This should never be an ADDR_EXPR. */
2386 rhs = gimple_switch_index (swtch_stmt);
2388 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2389 recompute_tree_invariant_for_addr_expr (rhs);
2391 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2392 even if fold_stmt updated the stmt already and thus cleared
2393 gimple_modified_p flag on it. */
2394 modified_p = true;
2397 /* Check for redundant computations. Do this optimization only
2398 for assignments that have no volatile ops and conditionals. */
2399 may_optimize_p = (!gimple_has_side_effects (stmt)
2400 && (is_gimple_assign (stmt)
2401 || (is_gimple_call (stmt)
2402 && gimple_call_lhs (stmt) != NULL_TREE)
2403 || gimple_code (stmt) == GIMPLE_COND
2404 || gimple_code (stmt) == GIMPLE_SWITCH));
2406 if (may_optimize_p)
2408 if (gimple_code (stmt) == GIMPLE_CALL)
2410 /* Resolve __builtin_constant_p. If it hasn't been
2411 folded to integer_one_node by now, it's fairly
2412 certain that the value simply isn't constant. */
2413 tree callee = gimple_call_fndecl (stmt);
2414 if (callee
2415 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2416 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2418 propagate_tree_value_into_stmt (&si, integer_zero_node);
2419 stmt = gsi_stmt (si);
2423 update_stmt_if_modified (stmt);
2424 eliminate_redundant_computations (&si);
2425 stmt = gsi_stmt (si);
2427 /* Perform simple redundant store elimination. */
2428 if (gimple_assign_single_p (stmt)
2429 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2431 tree lhs = gimple_assign_lhs (stmt);
2432 tree rhs = gimple_assign_rhs1 (stmt);
2433 tree cached_lhs;
2434 gassign *new_stmt;
2435 rhs = dom_valueize (rhs);
2436 /* Build a new statement with the RHS and LHS exchanged. */
2437 if (TREE_CODE (rhs) == SSA_NAME)
2439 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2440 new_stmt = gimple_build_assign (rhs, lhs);
2441 SSA_NAME_DEF_STMT (rhs) = defstmt;
2443 else
2444 new_stmt = gimple_build_assign (rhs, lhs);
2445 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2446 cached_lhs = lookup_avail_expr (new_stmt, false);
2447 if (cached_lhs
2448 && rhs == cached_lhs)
2450 basic_block bb = gimple_bb (stmt);
2451 unlink_stmt_vdef (stmt);
2452 if (gsi_remove (&si, true))
2454 bitmap_set_bit (need_eh_cleanup, bb->index);
2455 if (dump_file && (dump_flags & TDF_DETAILS))
2456 fprintf (dump_file, " Flagged to clear EH edges.\n");
2458 release_defs (stmt);
2459 return;
2464 /* Record any additional equivalences created by this statement. */
2465 if (is_gimple_assign (stmt))
2466 record_equivalences_from_stmt (stmt, may_optimize_p);
2468 /* If STMT is a COND_EXPR and it was modified, then we may know
2469 where it goes. If that is the case, then mark the CFG as altered.
2471 This will cause us to later call remove_unreachable_blocks and
2472 cleanup_tree_cfg when it is safe to do so. It is not safe to
2473 clean things up here since removal of edges and such can trigger
2474 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2475 the manager.
2477 That's all fine and good, except that once SSA_NAMEs are released
2478 to the manager, we must not call create_ssa_name until all references
2479 to released SSA_NAMEs have been eliminated.
2481 All references to the deleted SSA_NAMEs can not be eliminated until
2482 we remove unreachable blocks.
2484 We can not remove unreachable blocks until after we have completed
2485 any queued jump threading.
2487 We can not complete any queued jump threads until we have taken
2488 appropriate variables out of SSA form. Taking variables out of
2489 SSA form can call create_ssa_name and thus we lose.
2491 Ultimately I suspect we're going to need to change the interface
2492 into the SSA_NAME manager. */
2493 if (gimple_modified_p (stmt) || modified_p)
2495 tree val = NULL;
2497 update_stmt_if_modified (stmt);
2499 if (gimple_code (stmt) == GIMPLE_COND)
2500 val = fold_binary_loc (gimple_location (stmt),
2501 gimple_cond_code (stmt), boolean_type_node,
2502 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2503 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2504 val = gimple_switch_index (swtch_stmt);
2506 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2507 cfg_altered = true;
2509 /* If we simplified a statement in such a way as to be shown that it
2510 cannot trap, update the eh information and the cfg to match. */
2511 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2513 bitmap_set_bit (need_eh_cleanup, bb->index);
2514 if (dump_file && (dump_flags & TDF_DETAILS))
2515 fprintf (dump_file, " Flagged to clear EH edges.\n");
2518 if (!was_noreturn
2519 && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
2520 need_noreturn_fixup.safe_push (stmt);
2524 /* Helper for walk_non_aliased_vuses. Determine if we arrived at
2525 the desired memory state. */
2527 static void *
2528 vuse_eq (ao_ref *, tree vuse1, unsigned int cnt, void *data)
2530 tree vuse2 = (tree) data;
2531 if (vuse1 == vuse2)
2532 return data;
2534 /* This bounds the stmt walks we perform on reference lookups
2535 to O(1) instead of O(N) where N is the number of dominating
2536 stores leading to a candidate. We re-use the SCCVN param
2537 for this as it is basically the same complexity. */
2538 if (cnt > (unsigned) PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS))
2539 return (void *)-1;
2541 return NULL;
2544 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2545 If found, return its LHS. Otherwise insert STMT in the table and
2546 return NULL_TREE.
2548 Also, when an expression is first inserted in the table, it is also
2549 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2550 we finish processing this block and its children. */
2552 static tree
2553 lookup_avail_expr (gimple stmt, bool insert)
2555 expr_hash_elt **slot;
2556 tree lhs;
2557 struct expr_hash_elt element;
2559 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2560 if (gimple_code (stmt) == GIMPLE_PHI)
2561 lhs = gimple_phi_result (stmt);
2562 else
2563 lhs = gimple_get_lhs (stmt);
2565 initialize_hash_element (stmt, lhs, &element);
2567 if (dump_file && (dump_flags & TDF_DETAILS))
2569 fprintf (dump_file, "LKUP ");
2570 print_expr_hash_elt (dump_file, &element);
2573 /* Don't bother remembering constant assignments and copy operations.
2574 Constants and copy operations are handled by the constant/copy propagator
2575 in optimize_stmt. */
2576 if (element.expr.kind == EXPR_SINGLE
2577 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2578 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2579 return NULL_TREE;
2581 /* Finally try to find the expression in the main expression hash table. */
2582 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2583 if (slot == NULL)
2585 free_expr_hash_elt_contents (&element);
2586 return NULL_TREE;
2588 else if (*slot == NULL)
2590 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2591 *element2 = element;
2592 element2->stamp = element2;
2593 *slot = element2;
2595 if (dump_file && (dump_flags & TDF_DETAILS))
2597 fprintf (dump_file, "2>>> ");
2598 print_expr_hash_elt (dump_file, element2);
2601 avail_exprs_stack.safe_push
2602 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element2, NULL));
2603 return NULL_TREE;
2606 /* If we found a redundant memory operation do an alias walk to
2607 check if we can re-use it. */
2608 if (gimple_vuse (stmt) != (*slot)->vop)
2610 tree vuse1 = (*slot)->vop;
2611 tree vuse2 = gimple_vuse (stmt);
2612 /* If we have a load of a register and a candidate in the
2613 hash with vuse1 then try to reach its stmt by walking
2614 up the virtual use-def chain using walk_non_aliased_vuses.
2615 But don't do this when removing expressions from the hash. */
2616 ao_ref ref;
2617 if (!(vuse1 && vuse2
2618 && gimple_assign_single_p (stmt)
2619 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
2620 && (ao_ref_init (&ref, gimple_assign_rhs1 (stmt)), true)
2621 && walk_non_aliased_vuses (&ref, vuse2,
2622 vuse_eq, NULL, NULL, vuse1) != NULL))
2624 if (insert)
2626 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2627 *element2 = element;
2628 element2->stamp = element2;
2630 /* Insert the expr into the hash by replacing the current
2631 entry and recording the value to restore in the
2632 avail_exprs_stack. */
2633 avail_exprs_stack.safe_push (std::make_pair (element2, *slot));
2634 *slot = element2;
2635 if (dump_file && (dump_flags & TDF_DETAILS))
2637 fprintf (dump_file, "2>>> ");
2638 print_expr_hash_elt (dump_file, *slot);
2641 return NULL_TREE;
2645 free_expr_hash_elt_contents (&element);
2647 /* Extract the LHS of the assignment so that it can be used as the current
2648 definition of another variable. */
2649 lhs = (*slot)->lhs;
2651 lhs = dom_valueize (lhs);
2653 if (dump_file && (dump_flags & TDF_DETAILS))
2655 fprintf (dump_file, "FIND: ");
2656 print_generic_expr (dump_file, lhs, 0);
2657 fprintf (dump_file, "\n");
2660 return lhs;
2663 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2664 for expressions using the code of the expression and the SSA numbers of
2665 its operands. */
2667 static hashval_t
2668 avail_expr_hash (const void *p)
2670 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2671 inchash::hash hstate;
2673 inchash::add_hashable_expr (expr, hstate);
2675 return hstate.end ();
2678 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2679 up degenerate PHIs created by or exposed by jump threading. */
2681 /* Given a statement STMT, which is either a PHI node or an assignment,
2682 remove it from the IL. */
2684 static void
2685 remove_stmt_or_phi (gimple stmt)
2687 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2689 if (gimple_code (stmt) == GIMPLE_PHI)
2690 remove_phi_node (&gsi, true);
2691 else
2693 gsi_remove (&gsi, true);
2694 release_defs (stmt);
2698 /* Given a statement STMT, which is either a PHI node or an assignment,
2699 return the "rhs" of the node, in the case of a non-degenerate
2700 phi, NULL is returned. */
2702 static tree
2703 get_rhs_or_phi_arg (gimple stmt)
2705 if (gimple_code (stmt) == GIMPLE_PHI)
2706 return degenerate_phi_result (as_a <gphi *> (stmt));
2707 else if (gimple_assign_single_p (stmt))
2708 return gimple_assign_rhs1 (stmt);
2709 else
2710 gcc_unreachable ();
2714 /* Given a statement STMT, which is either a PHI node or an assignment,
2715 return the "lhs" of the node. */
2717 static tree
2718 get_lhs_or_phi_result (gimple stmt)
2720 if (gimple_code (stmt) == GIMPLE_PHI)
2721 return gimple_phi_result (stmt);
2722 else if (is_gimple_assign (stmt))
2723 return gimple_assign_lhs (stmt);
2724 else
2725 gcc_unreachable ();
2728 /* Propagate RHS into all uses of LHS (when possible).
2730 RHS and LHS are derived from STMT, which is passed in solely so
2731 that we can remove it if propagation is successful.
2733 When propagating into a PHI node or into a statement which turns
2734 into a trivial copy or constant initialization, set the
2735 appropriate bit in INTERESTING_NAMEs so that we will visit those
2736 nodes as well in an effort to pick up secondary optimization
2737 opportunities. */
2739 static void
2740 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2742 /* First verify that propagation is valid. */
2743 if (may_propagate_copy (lhs, rhs))
2745 use_operand_p use_p;
2746 imm_use_iterator iter;
2747 gimple use_stmt;
2748 bool all = true;
2750 /* Dump details. */
2751 if (dump_file && (dump_flags & TDF_DETAILS))
2753 fprintf (dump_file, " Replacing '");
2754 print_generic_expr (dump_file, lhs, dump_flags);
2755 fprintf (dump_file, "' with %s '",
2756 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2757 print_generic_expr (dump_file, rhs, dump_flags);
2758 fprintf (dump_file, "'\n");
2761 /* Walk over every use of LHS and try to replace the use with RHS.
2762 At this point the only reason why such a propagation would not
2763 be successful would be if the use occurs in an ASM_EXPR. */
2764 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2766 /* Leave debug stmts alone. If we succeed in propagating
2767 all non-debug uses, we'll drop the DEF, and propagation
2768 into debug stmts will occur then. */
2769 if (gimple_debug_bind_p (use_stmt))
2770 continue;
2772 /* It's not always safe to propagate into an ASM_EXPR. */
2773 if (gimple_code (use_stmt) == GIMPLE_ASM
2774 && ! may_propagate_copy_into_asm (lhs))
2776 all = false;
2777 continue;
2780 /* It's not ok to propagate into the definition stmt of RHS.
2781 <bb 9>:
2782 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2783 g_67.1_6 = prephitmp.12_36;
2784 goto <bb 9>;
2785 While this is strictly all dead code we do not want to
2786 deal with this here. */
2787 if (TREE_CODE (rhs) == SSA_NAME
2788 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2790 all = false;
2791 continue;
2794 /* Dump details. */
2795 if (dump_file && (dump_flags & TDF_DETAILS))
2797 fprintf (dump_file, " Original statement:");
2798 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2801 /* Propagate the RHS into this use of the LHS. */
2802 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2803 propagate_value (use_p, rhs);
2805 /* Special cases to avoid useless calls into the folding
2806 routines, operand scanning, etc.
2808 Propagation into a PHI may cause the PHI to become
2809 a degenerate, so mark the PHI as interesting. No other
2810 actions are necessary. */
2811 if (gimple_code (use_stmt) == GIMPLE_PHI)
2813 tree result;
2815 /* Dump details. */
2816 if (dump_file && (dump_flags & TDF_DETAILS))
2818 fprintf (dump_file, " Updated statement:");
2819 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2822 result = get_lhs_or_phi_result (use_stmt);
2823 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2824 continue;
2827 /* From this point onward we are propagating into a
2828 real statement. Folding may (or may not) be possible,
2829 we may expose new operands, expose dead EH edges,
2830 etc. */
2831 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2832 cannot fold a call that simplifies to a constant,
2833 because the GIMPLE_CALL must be replaced by a
2834 GIMPLE_ASSIGN, and there is no way to effect such a
2835 transformation in-place. We might want to consider
2836 using the more general fold_stmt here. */
2838 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2839 fold_stmt_inplace (&gsi);
2842 /* Sometimes propagation can expose new operands to the
2843 renamer. */
2844 update_stmt (use_stmt);
2846 /* Dump details. */
2847 if (dump_file && (dump_flags & TDF_DETAILS))
2849 fprintf (dump_file, " Updated statement:");
2850 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2853 /* If we replaced a variable index with a constant, then
2854 we would need to update the invariant flag for ADDR_EXPRs. */
2855 if (gimple_assign_single_p (use_stmt)
2856 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2857 recompute_tree_invariant_for_addr_expr
2858 (gimple_assign_rhs1 (use_stmt));
2860 /* If we cleaned up EH information from the statement,
2861 mark its containing block as needing EH cleanups. */
2862 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2864 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2865 if (dump_file && (dump_flags & TDF_DETAILS))
2866 fprintf (dump_file, " Flagged to clear EH edges.\n");
2869 /* Propagation may expose new trivial copy/constant propagation
2870 opportunities. */
2871 if (gimple_assign_single_p (use_stmt)
2872 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2873 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2874 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2876 tree result = get_lhs_or_phi_result (use_stmt);
2877 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2880 /* Propagation into these nodes may make certain edges in
2881 the CFG unexecutable. We want to identify them as PHI nodes
2882 at the destination of those unexecutable edges may become
2883 degenerates. */
2884 else if (gimple_code (use_stmt) == GIMPLE_COND
2885 || gimple_code (use_stmt) == GIMPLE_SWITCH
2886 || gimple_code (use_stmt) == GIMPLE_GOTO)
2888 tree val;
2890 if (gimple_code (use_stmt) == GIMPLE_COND)
2891 val = fold_binary_loc (gimple_location (use_stmt),
2892 gimple_cond_code (use_stmt),
2893 boolean_type_node,
2894 gimple_cond_lhs (use_stmt),
2895 gimple_cond_rhs (use_stmt));
2896 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2897 val = gimple_switch_index (as_a <gswitch *> (use_stmt));
2898 else
2899 val = gimple_goto_dest (use_stmt);
2901 if (val && is_gimple_min_invariant (val))
2903 basic_block bb = gimple_bb (use_stmt);
2904 edge te = find_taken_edge (bb, val);
2905 if (!te)
2906 continue;
2908 edge_iterator ei;
2909 edge e;
2910 gimple_stmt_iterator gsi;
2911 gphi_iterator psi;
2913 /* Remove all outgoing edges except TE. */
2914 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2916 if (e != te)
2918 /* Mark all the PHI nodes at the destination of
2919 the unexecutable edge as interesting. */
2920 for (psi = gsi_start_phis (e->dest);
2921 !gsi_end_p (psi);
2922 gsi_next (&psi))
2924 gphi *phi = psi.phi ();
2926 tree result = gimple_phi_result (phi);
2927 int version = SSA_NAME_VERSION (result);
2929 bitmap_set_bit (interesting_names, version);
2932 te->probability += e->probability;
2934 te->count += e->count;
2935 remove_edge (e);
2936 cfg_altered = true;
2938 else
2939 ei_next (&ei);
2942 gsi = gsi_last_bb (gimple_bb (use_stmt));
2943 gsi_remove (&gsi, true);
2945 /* And fixup the flags on the single remaining edge. */
2946 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2947 te->flags &= ~EDGE_ABNORMAL;
2948 te->flags |= EDGE_FALLTHRU;
2949 if (te->probability > REG_BR_PROB_BASE)
2950 te->probability = REG_BR_PROB_BASE;
2955 /* Ensure there is nothing else to do. */
2956 gcc_assert (!all || has_zero_uses (lhs));
2958 /* If we were able to propagate away all uses of LHS, then
2959 we can remove STMT. */
2960 if (all)
2961 remove_stmt_or_phi (stmt);
2965 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2966 a statement that is a trivial copy or constant initialization.
2968 Attempt to eliminate T by propagating its RHS into all uses of
2969 its LHS. This may in turn set new bits in INTERESTING_NAMES
2970 for nodes we want to revisit later.
2972 All exit paths should clear INTERESTING_NAMES for the result
2973 of STMT. */
2975 static void
2976 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2978 tree lhs = get_lhs_or_phi_result (stmt);
2979 tree rhs;
2980 int version = SSA_NAME_VERSION (lhs);
2982 /* If the LHS of this statement or PHI has no uses, then we can
2983 just eliminate it. This can occur if, for example, the PHI
2984 was created by block duplication due to threading and its only
2985 use was in the conditional at the end of the block which was
2986 deleted. */
2987 if (has_zero_uses (lhs))
2989 bitmap_clear_bit (interesting_names, version);
2990 remove_stmt_or_phi (stmt);
2991 return;
2994 /* Get the RHS of the assignment or PHI node if the PHI is a
2995 degenerate. */
2996 rhs = get_rhs_or_phi_arg (stmt);
2997 if (!rhs)
2999 bitmap_clear_bit (interesting_names, version);
3000 return;
3003 if (!virtual_operand_p (lhs))
3004 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
3005 else
3007 gimple use_stmt;
3008 imm_use_iterator iter;
3009 use_operand_p use_p;
3010 /* For virtual operands we have to propagate into all uses as
3011 otherwise we will create overlapping life-ranges. */
3012 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3013 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3014 SET_USE (use_p, rhs);
3015 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
3016 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
3017 remove_stmt_or_phi (stmt);
3020 /* Note that STMT may well have been deleted by now, so do
3021 not access it, instead use the saved version # to clear
3022 T's entry in the worklist. */
3023 bitmap_clear_bit (interesting_names, version);
3026 /* The first phase in degenerate PHI elimination.
3028 Eliminate the degenerate PHIs in BB, then recurse on the
3029 dominator children of BB. */
3031 static void
3032 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
3034 gphi_iterator gsi;
3035 basic_block son;
3037 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3039 gphi *phi = gsi.phi ();
3041 eliminate_const_or_copy (phi, interesting_names);
3044 /* Recurse into the dominator children of BB. */
3045 for (son = first_dom_son (CDI_DOMINATORS, bb);
3046 son;
3047 son = next_dom_son (CDI_DOMINATORS, son))
3048 eliminate_degenerate_phis_1 (son, interesting_names);
3052 /* A very simple pass to eliminate degenerate PHI nodes from the
3053 IL. This is meant to be fast enough to be able to be run several
3054 times in the optimization pipeline.
3056 Certain optimizations, particularly those which duplicate blocks
3057 or remove edges from the CFG can create or expose PHIs which are
3058 trivial copies or constant initializations.
3060 While we could pick up these optimizations in DOM or with the
3061 combination of copy-prop and CCP, those solutions are far too
3062 heavy-weight for our needs.
3064 This implementation has two phases so that we can efficiently
3065 eliminate the first order degenerate PHIs and second order
3066 degenerate PHIs.
3068 The first phase performs a dominator walk to identify and eliminate
3069 the vast majority of the degenerate PHIs. When a degenerate PHI
3070 is identified and eliminated any affected statements or PHIs
3071 are put on a worklist.
3073 The second phase eliminates degenerate PHIs and trivial copies
3074 or constant initializations using the worklist. This is how we
3075 pick up the secondary optimization opportunities with minimal
3076 cost. */
3078 namespace {
3080 const pass_data pass_data_phi_only_cprop =
3082 GIMPLE_PASS, /* type */
3083 "phicprop", /* name */
3084 OPTGROUP_NONE, /* optinfo_flags */
3085 TV_TREE_PHI_CPROP, /* tv_id */
3086 ( PROP_cfg | PROP_ssa ), /* properties_required */
3087 0, /* properties_provided */
3088 0, /* properties_destroyed */
3089 0, /* todo_flags_start */
3090 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3093 class pass_phi_only_cprop : public gimple_opt_pass
3095 public:
3096 pass_phi_only_cprop (gcc::context *ctxt)
3097 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3100 /* opt_pass methods: */
3101 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3102 virtual bool gate (function *) { return flag_tree_dom != 0; }
3103 virtual unsigned int execute (function *);
3105 }; // class pass_phi_only_cprop
3107 unsigned int
3108 pass_phi_only_cprop::execute (function *fun)
3110 bitmap interesting_names;
3111 bitmap interesting_names1;
3113 /* Bitmap of blocks which need EH information updated. We can not
3114 update it on-the-fly as doing so invalidates the dominator tree. */
3115 need_eh_cleanup = BITMAP_ALLOC (NULL);
3117 /* INTERESTING_NAMES is effectively our worklist, indexed by
3118 SSA_NAME_VERSION.
3120 A set bit indicates that the statement or PHI node which
3121 defines the SSA_NAME should be (re)examined to determine if
3122 it has become a degenerate PHI or trivial const/copy propagation
3123 opportunity.
3125 Experiments have show we generally get better compilation
3126 time behavior with bitmaps rather than sbitmaps. */
3127 interesting_names = BITMAP_ALLOC (NULL);
3128 interesting_names1 = BITMAP_ALLOC (NULL);
3130 calculate_dominance_info (CDI_DOMINATORS);
3131 cfg_altered = false;
3133 /* First phase. Eliminate degenerate PHIs via a dominator
3134 walk of the CFG.
3136 Experiments have indicated that we generally get better
3137 compile-time behavior by visiting blocks in the first
3138 phase in dominator order. Presumably this is because walking
3139 in dominator order leaves fewer PHIs for later examination
3140 by the worklist phase. */
3141 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3142 interesting_names);
3144 /* Second phase. Eliminate second order degenerate PHIs as well
3145 as trivial copies or constant initializations identified by
3146 the first phase or this phase. Basically we keep iterating
3147 until our set of INTERESTING_NAMEs is empty. */
3148 while (!bitmap_empty_p (interesting_names))
3150 unsigned int i;
3151 bitmap_iterator bi;
3153 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3154 changed during the loop. Copy it to another bitmap and
3155 use that. */
3156 bitmap_copy (interesting_names1, interesting_names);
3158 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3160 tree name = ssa_name (i);
3162 /* Ignore SSA_NAMEs that have been released because
3163 their defining statement was deleted (unreachable). */
3164 if (name)
3165 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3166 interesting_names);
3170 if (cfg_altered)
3172 free_dominance_info (CDI_DOMINATORS);
3173 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3174 loops_state_set (LOOPS_NEED_FIXUP);
3177 /* Propagation of const and copies may make some EH edges dead. Purge
3178 such edges from the CFG as needed. */
3179 if (!bitmap_empty_p (need_eh_cleanup))
3181 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3182 BITMAP_FREE (need_eh_cleanup);
3185 BITMAP_FREE (interesting_names);
3186 BITMAP_FREE (interesting_names1);
3187 return 0;
3190 } // anon namespace
3192 gimple_opt_pass *
3193 make_pass_phi_only_cprop (gcc::context *ctxt)
3195 return new pass_phi_only_cprop (ctxt);