compiler: Don't make erroneous type descriptors.
[official-gcc.git] / gcc / tree-ssa-dom.c
blob3eb003c728a92b6eca2312a483ea37c9dc03f211
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "cfghooks.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "hard-reg-set.h"
29 #include "ssa.h"
30 #include "alias.h"
31 #include "fold-const.h"
32 #include "stor-layout.h"
33 #include "flags.h"
34 #include "tm_p.h"
35 #include "cfganal.h"
36 #include "cfgloop.h"
37 #include "gimple-pretty-print.h"
38 #include "internal-fn.h"
39 #include "gimple-fold.h"
40 #include "tree-eh.h"
41 #include "gimple-iterator.h"
42 #include "tree-cfg.h"
43 #include "tree-into-ssa.h"
44 #include "domwalk.h"
45 #include "tree-pass.h"
46 #include "tree-ssa-propagate.h"
47 #include "tree-ssa-threadupdate.h"
48 #include "langhooks.h"
49 #include "params.h"
50 #include "tree-ssa-scopedtables.h"
51 #include "tree-ssa-threadedge.h"
52 #include "tree-ssa-dom.h"
53 #include "gimplify.h"
54 #include "tree-cfgcleanup.h"
56 /* This file implements optimizations on the dominator tree. */
58 /* Representation of a "naked" right-hand-side expression, to be used
59 in recording available expressions in the expression hash table. */
61 enum expr_kind
63 EXPR_SINGLE,
64 EXPR_UNARY,
65 EXPR_BINARY,
66 EXPR_TERNARY,
67 EXPR_CALL,
68 EXPR_PHI
71 struct hashable_expr
73 tree type;
74 enum expr_kind kind;
75 union {
76 struct { tree rhs; } single;
77 struct { enum tree_code op; tree opnd; } unary;
78 struct { enum tree_code op; tree opnd0, opnd1; } binary;
79 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
80 struct { gcall *fn_from; bool pure; size_t nargs; tree *args; } call;
81 struct { size_t nargs; tree *args; } phi;
82 } ops;
85 /* Structure for recording known values of a conditional expression
86 at the exits from its block. */
88 typedef struct cond_equivalence_s
90 struct hashable_expr cond;
91 tree value;
92 } cond_equivalence;
95 /* Structure for recording edge equivalences as well as any pending
96 edge redirections during the dominator optimizer.
98 Computing and storing the edge equivalences instead of creating
99 them on-demand can save significant amounts of time, particularly
100 for pathological cases involving switch statements.
102 These structures live for a single iteration of the dominator
103 optimizer in the edge's AUX field. At the end of an iteration we
104 free each of these structures and update the AUX field to point
105 to any requested redirection target (the code for updating the
106 CFG and SSA graph for edge redirection expects redirection edge
107 targets to be in the AUX field for each edge. */
109 struct edge_info
111 /* If this edge creates a simple equivalence, the LHS and RHS of
112 the equivalence will be stored here. */
113 tree lhs;
114 tree rhs;
116 /* Traversing an edge may also indicate one or more particular conditions
117 are true or false. */
118 vec<cond_equivalence> cond_equivalences;
121 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
122 expressions it enters into the hash table along with a marker entry
123 (null). When we finish processing the block, we pop off entries and
124 remove the expressions from the global hash table until we hit the
125 marker. */
126 typedef struct expr_hash_elt * expr_hash_elt_t;
128 static vec<std::pair<expr_hash_elt_t, expr_hash_elt_t> > avail_exprs_stack;
130 /* Structure for entries in the expression hash table. */
132 struct expr_hash_elt
134 /* The value (lhs) of this expression. */
135 tree lhs;
137 /* The expression (rhs) we want to record. */
138 struct hashable_expr expr;
140 /* The virtual operand associated with the nearest dominating stmt
141 loading from or storing to expr. */
142 tree vop;
144 /* The hash value for RHS. */
145 hashval_t hash;
147 /* A unique stamp, typically the address of the hash
148 element itself, used in removing entries from the table. */
149 struct expr_hash_elt *stamp;
152 /* Hashtable helpers. */
154 static bool hashable_expr_equal_p (const struct hashable_expr *,
155 const struct hashable_expr *);
156 static void free_expr_hash_elt (void *);
158 struct expr_elt_hasher : pointer_hash <expr_hash_elt>
160 static inline hashval_t hash (const value_type &);
161 static inline bool equal (const value_type &, const compare_type &);
162 static inline void remove (value_type &);
165 inline hashval_t
166 expr_elt_hasher::hash (const value_type &p)
168 return p->hash;
171 inline bool
172 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
174 const struct hashable_expr *expr1 = &p1->expr;
175 const struct expr_hash_elt *stamp1 = p1->stamp;
176 const struct hashable_expr *expr2 = &p2->expr;
177 const struct expr_hash_elt *stamp2 = p2->stamp;
179 /* This case should apply only when removing entries from the table. */
180 if (stamp1 == stamp2)
181 return true;
183 if (p1->hash != p2->hash)
184 return false;
186 /* In case of a collision, both RHS have to be identical and have the
187 same VUSE operands. */
188 if (hashable_expr_equal_p (expr1, expr2)
189 && types_compatible_p (expr1->type, expr2->type))
190 return true;
192 return false;
195 /* Delete an expr_hash_elt and reclaim its storage. */
197 inline void
198 expr_elt_hasher::remove (value_type &element)
200 free_expr_hash_elt (element);
203 /* Hash table with expressions made available during the renaming process.
204 When an assignment of the form X_i = EXPR is found, the statement is
205 stored in this table. If the same expression EXPR is later found on the
206 RHS of another statement, it is replaced with X_i (thus performing
207 global redundancy elimination). Similarly as we pass through conditionals
208 we record the conditional itself as having either a true or false value
209 in this table. */
210 static hash_table<expr_elt_hasher> *avail_exprs;
212 /* Unwindable const/copy equivalences. */
213 static const_and_copies *const_and_copies;
215 /* Track whether or not we have changed the control flow graph. */
216 static bool cfg_altered;
218 /* Bitmap of blocks that have had EH statements cleaned. We should
219 remove their dead edges eventually. */
220 static bitmap need_eh_cleanup;
221 static vec<gimple> need_noreturn_fixup;
223 /* Statistics for dominator optimizations. */
224 struct opt_stats_d
226 long num_stmts;
227 long num_exprs_considered;
228 long num_re;
229 long num_const_prop;
230 long num_copy_prop;
233 static struct opt_stats_d opt_stats;
235 /* Local functions. */
236 static void optimize_stmt (basic_block, gimple_stmt_iterator);
237 static tree lookup_avail_expr (gimple, bool);
238 static hashval_t avail_expr_hash (const void *);
239 static void htab_statistics (FILE *,
240 const hash_table<expr_elt_hasher> &);
241 static void record_cond (cond_equivalence *);
242 static void record_equality (tree, tree);
243 static void record_equivalences_from_phis (basic_block);
244 static void record_equivalences_from_incoming_edge (basic_block);
245 static void eliminate_redundant_computations (gimple_stmt_iterator *);
246 static void record_equivalences_from_stmt (gimple, int);
247 static void remove_local_expressions_from_table (void);
248 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
251 /* Given a statement STMT, initialize the hash table element pointed to
252 by ELEMENT. */
254 static void
255 initialize_hash_element (gimple stmt, tree lhs,
256 struct expr_hash_elt *element)
258 enum gimple_code code = gimple_code (stmt);
259 struct hashable_expr *expr = &element->expr;
261 if (code == GIMPLE_ASSIGN)
263 enum tree_code subcode = gimple_assign_rhs_code (stmt);
265 switch (get_gimple_rhs_class (subcode))
267 case GIMPLE_SINGLE_RHS:
268 expr->kind = EXPR_SINGLE;
269 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
270 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
271 break;
272 case GIMPLE_UNARY_RHS:
273 expr->kind = EXPR_UNARY;
274 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
275 if (CONVERT_EXPR_CODE_P (subcode))
276 subcode = NOP_EXPR;
277 expr->ops.unary.op = subcode;
278 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
279 break;
280 case GIMPLE_BINARY_RHS:
281 expr->kind = EXPR_BINARY;
282 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
283 expr->ops.binary.op = subcode;
284 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
285 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
286 break;
287 case GIMPLE_TERNARY_RHS:
288 expr->kind = EXPR_TERNARY;
289 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
290 expr->ops.ternary.op = subcode;
291 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
292 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
293 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
294 break;
295 default:
296 gcc_unreachable ();
299 else if (code == GIMPLE_COND)
301 expr->type = boolean_type_node;
302 expr->kind = EXPR_BINARY;
303 expr->ops.binary.op = gimple_cond_code (stmt);
304 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
305 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
307 else if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
309 size_t nargs = gimple_call_num_args (call_stmt);
310 size_t i;
312 gcc_assert (gimple_call_lhs (call_stmt));
314 expr->type = TREE_TYPE (gimple_call_lhs (call_stmt));
315 expr->kind = EXPR_CALL;
316 expr->ops.call.fn_from = call_stmt;
318 if (gimple_call_flags (call_stmt) & (ECF_CONST | ECF_PURE))
319 expr->ops.call.pure = true;
320 else
321 expr->ops.call.pure = false;
323 expr->ops.call.nargs = nargs;
324 expr->ops.call.args = XCNEWVEC (tree, nargs);
325 for (i = 0; i < nargs; i++)
326 expr->ops.call.args[i] = gimple_call_arg (call_stmt, i);
328 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
330 expr->type = TREE_TYPE (gimple_switch_index (swtch_stmt));
331 expr->kind = EXPR_SINGLE;
332 expr->ops.single.rhs = gimple_switch_index (swtch_stmt);
334 else if (code == GIMPLE_GOTO)
336 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
337 expr->kind = EXPR_SINGLE;
338 expr->ops.single.rhs = gimple_goto_dest (stmt);
340 else if (code == GIMPLE_PHI)
342 size_t nargs = gimple_phi_num_args (stmt);
343 size_t i;
345 expr->type = TREE_TYPE (gimple_phi_result (stmt));
346 expr->kind = EXPR_PHI;
347 expr->ops.phi.nargs = nargs;
348 expr->ops.phi.args = XCNEWVEC (tree, nargs);
350 for (i = 0; i < nargs; i++)
351 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
353 else
354 gcc_unreachable ();
356 element->lhs = lhs;
357 element->vop = gimple_vuse (stmt);
358 element->hash = avail_expr_hash (element);
359 element->stamp = element;
362 /* Given a conditional expression COND as a tree, initialize
363 a hashable_expr expression EXPR. The conditional must be a
364 comparison or logical negation. A constant or a variable is
365 not permitted. */
367 static void
368 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
370 expr->type = boolean_type_node;
372 if (COMPARISON_CLASS_P (cond))
374 expr->kind = EXPR_BINARY;
375 expr->ops.binary.op = TREE_CODE (cond);
376 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
377 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
379 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
381 expr->kind = EXPR_UNARY;
382 expr->ops.unary.op = TRUTH_NOT_EXPR;
383 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
385 else
386 gcc_unreachable ();
389 /* Given a hashable_expr expression EXPR and an LHS,
390 initialize the hash table element pointed to by ELEMENT. */
392 static void
393 initialize_hash_element_from_expr (struct hashable_expr *expr,
394 tree lhs,
395 struct expr_hash_elt *element)
397 element->expr = *expr;
398 element->lhs = lhs;
399 element->vop = NULL_TREE;
400 element->hash = avail_expr_hash (element);
401 element->stamp = element;
404 /* Compare two hashable_expr structures for equivalence. They are
405 considered equivalent when the expressions they denote must
406 necessarily be equal. The logic is intended to follow that of
407 operand_equal_p in fold-const.c */
409 static bool
410 hashable_expr_equal_p (const struct hashable_expr *expr0,
411 const struct hashable_expr *expr1)
413 tree type0 = expr0->type;
414 tree type1 = expr1->type;
416 /* If either type is NULL, there is nothing to check. */
417 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
418 return false;
420 /* If both types don't have the same signedness, precision, and mode,
421 then we can't consider them equal. */
422 if (type0 != type1
423 && (TREE_CODE (type0) == ERROR_MARK
424 || TREE_CODE (type1) == ERROR_MARK
425 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
426 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
427 || TYPE_MODE (type0) != TYPE_MODE (type1)))
428 return false;
430 if (expr0->kind != expr1->kind)
431 return false;
433 switch (expr0->kind)
435 case EXPR_SINGLE:
436 return operand_equal_p (expr0->ops.single.rhs,
437 expr1->ops.single.rhs, 0);
439 case EXPR_UNARY:
440 if (expr0->ops.unary.op != expr1->ops.unary.op)
441 return false;
443 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
444 || expr0->ops.unary.op == NON_LVALUE_EXPR)
445 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
446 return false;
448 return operand_equal_p (expr0->ops.unary.opnd,
449 expr1->ops.unary.opnd, 0);
451 case EXPR_BINARY:
452 if (expr0->ops.binary.op != expr1->ops.binary.op)
453 return false;
455 if (operand_equal_p (expr0->ops.binary.opnd0,
456 expr1->ops.binary.opnd0, 0)
457 && operand_equal_p (expr0->ops.binary.opnd1,
458 expr1->ops.binary.opnd1, 0))
459 return true;
461 /* For commutative ops, allow the other order. */
462 return (commutative_tree_code (expr0->ops.binary.op)
463 && operand_equal_p (expr0->ops.binary.opnd0,
464 expr1->ops.binary.opnd1, 0)
465 && operand_equal_p (expr0->ops.binary.opnd1,
466 expr1->ops.binary.opnd0, 0));
468 case EXPR_TERNARY:
469 if (expr0->ops.ternary.op != expr1->ops.ternary.op
470 || !operand_equal_p (expr0->ops.ternary.opnd2,
471 expr1->ops.ternary.opnd2, 0))
472 return false;
474 if (operand_equal_p (expr0->ops.ternary.opnd0,
475 expr1->ops.ternary.opnd0, 0)
476 && operand_equal_p (expr0->ops.ternary.opnd1,
477 expr1->ops.ternary.opnd1, 0))
478 return true;
480 /* For commutative ops, allow the other order. */
481 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
482 && operand_equal_p (expr0->ops.ternary.opnd0,
483 expr1->ops.ternary.opnd1, 0)
484 && operand_equal_p (expr0->ops.ternary.opnd1,
485 expr1->ops.ternary.opnd0, 0));
487 case EXPR_CALL:
489 size_t i;
491 /* If the calls are to different functions, then they
492 clearly cannot be equal. */
493 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
494 expr1->ops.call.fn_from))
495 return false;
497 if (! expr0->ops.call.pure)
498 return false;
500 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
501 return false;
503 for (i = 0; i < expr0->ops.call.nargs; i++)
504 if (! operand_equal_p (expr0->ops.call.args[i],
505 expr1->ops.call.args[i], 0))
506 return false;
508 if (stmt_could_throw_p (expr0->ops.call.fn_from))
510 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
511 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
512 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
513 return false;
516 return true;
519 case EXPR_PHI:
521 size_t i;
523 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
524 return false;
526 for (i = 0; i < expr0->ops.phi.nargs; i++)
527 if (! operand_equal_p (expr0->ops.phi.args[i],
528 expr1->ops.phi.args[i], 0))
529 return false;
531 return true;
534 default:
535 gcc_unreachable ();
539 /* Generate a hash value for a pair of expressions. This can be used
540 iteratively by passing a previous result in HSTATE.
542 The same hash value is always returned for a given pair of expressions,
543 regardless of the order in which they are presented. This is useful in
544 hashing the operands of commutative functions. */
546 namespace inchash
549 static void
550 add_expr_commutative (const_tree t1, const_tree t2, hash &hstate)
552 hash one, two;
554 inchash::add_expr (t1, one);
555 inchash::add_expr (t2, two);
556 hstate.add_commutative (one, two);
559 /* Compute a hash value for a hashable_expr value EXPR and a
560 previously accumulated hash value VAL. If two hashable_expr
561 values compare equal with hashable_expr_equal_p, they must
562 hash to the same value, given an identical value of VAL.
563 The logic is intended to follow inchash::add_expr in tree.c. */
565 static void
566 add_hashable_expr (const struct hashable_expr *expr, hash &hstate)
568 switch (expr->kind)
570 case EXPR_SINGLE:
571 inchash::add_expr (expr->ops.single.rhs, hstate);
572 break;
574 case EXPR_UNARY:
575 hstate.add_object (expr->ops.unary.op);
577 /* Make sure to include signedness in the hash computation.
578 Don't hash the type, that can lead to having nodes which
579 compare equal according to operand_equal_p, but which
580 have different hash codes. */
581 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
582 || expr->ops.unary.op == NON_LVALUE_EXPR)
583 hstate.add_int (TYPE_UNSIGNED (expr->type));
585 inchash::add_expr (expr->ops.unary.opnd, hstate);
586 break;
588 case EXPR_BINARY:
589 hstate.add_object (expr->ops.binary.op);
590 if (commutative_tree_code (expr->ops.binary.op))
591 inchash::add_expr_commutative (expr->ops.binary.opnd0,
592 expr->ops.binary.opnd1, hstate);
593 else
595 inchash::add_expr (expr->ops.binary.opnd0, hstate);
596 inchash::add_expr (expr->ops.binary.opnd1, hstate);
598 break;
600 case EXPR_TERNARY:
601 hstate.add_object (expr->ops.ternary.op);
602 if (commutative_ternary_tree_code (expr->ops.ternary.op))
603 inchash::add_expr_commutative (expr->ops.ternary.opnd0,
604 expr->ops.ternary.opnd1, hstate);
605 else
607 inchash::add_expr (expr->ops.ternary.opnd0, hstate);
608 inchash::add_expr (expr->ops.ternary.opnd1, hstate);
610 inchash::add_expr (expr->ops.ternary.opnd2, hstate);
611 break;
613 case EXPR_CALL:
615 size_t i;
616 enum tree_code code = CALL_EXPR;
617 gcall *fn_from;
619 hstate.add_object (code);
620 fn_from = expr->ops.call.fn_from;
621 if (gimple_call_internal_p (fn_from))
622 hstate.merge_hash ((hashval_t) gimple_call_internal_fn (fn_from));
623 else
624 inchash::add_expr (gimple_call_fn (fn_from), hstate);
625 for (i = 0; i < expr->ops.call.nargs; i++)
626 inchash::add_expr (expr->ops.call.args[i], hstate);
628 break;
630 case EXPR_PHI:
632 size_t i;
634 for (i = 0; i < expr->ops.phi.nargs; i++)
635 inchash::add_expr (expr->ops.phi.args[i], hstate);
637 break;
639 default:
640 gcc_unreachable ();
646 /* Print a diagnostic dump of an expression hash table entry. */
648 static void
649 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
651 fprintf (stream, "STMT ");
653 if (element->lhs)
655 print_generic_expr (stream, element->lhs, 0);
656 fprintf (stream, " = ");
659 switch (element->expr.kind)
661 case EXPR_SINGLE:
662 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
663 break;
665 case EXPR_UNARY:
666 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
667 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
668 break;
670 case EXPR_BINARY:
671 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
672 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
673 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
674 break;
676 case EXPR_TERNARY:
677 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
678 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
679 fputs (", ", stream);
680 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
681 fputs (", ", stream);
682 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
683 fputs (">", stream);
684 break;
686 case EXPR_CALL:
688 size_t i;
689 size_t nargs = element->expr.ops.call.nargs;
690 gcall *fn_from;
692 fn_from = element->expr.ops.call.fn_from;
693 if (gimple_call_internal_p (fn_from))
694 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
695 stream);
696 else
697 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
698 fprintf (stream, " (");
699 for (i = 0; i < nargs; i++)
701 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
702 if (i + 1 < nargs)
703 fprintf (stream, ", ");
705 fprintf (stream, ")");
707 break;
709 case EXPR_PHI:
711 size_t i;
712 size_t nargs = element->expr.ops.phi.nargs;
714 fprintf (stream, "PHI <");
715 for (i = 0; i < nargs; i++)
717 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
718 if (i + 1 < nargs)
719 fprintf (stream, ", ");
721 fprintf (stream, ">");
723 break;
726 if (element->vop)
728 fprintf (stream, " with ");
729 print_generic_expr (stream, element->vop, 0);
732 fprintf (stream, "\n");
735 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
737 static void
738 free_expr_hash_elt_contents (struct expr_hash_elt *element)
740 if (element->expr.kind == EXPR_CALL)
741 free (element->expr.ops.call.args);
742 else if (element->expr.kind == EXPR_PHI)
743 free (element->expr.ops.phi.args);
746 /* Delete an expr_hash_elt and reclaim its storage. */
748 static void
749 free_expr_hash_elt (void *elt)
751 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
752 free_expr_hash_elt_contents (element);
753 free (element);
756 /* Allocate an EDGE_INFO for edge E and attach it to E.
757 Return the new EDGE_INFO structure. */
759 static struct edge_info *
760 allocate_edge_info (edge e)
762 struct edge_info *edge_info;
764 edge_info = XCNEW (struct edge_info);
766 e->aux = edge_info;
767 return edge_info;
770 /* Free all EDGE_INFO structures associated with edges in the CFG.
771 If a particular edge can be threaded, copy the redirection
772 target from the EDGE_INFO structure into the edge's AUX field
773 as required by code to update the CFG and SSA graph for
774 jump threading. */
776 static void
777 free_all_edge_infos (void)
779 basic_block bb;
780 edge_iterator ei;
781 edge e;
783 FOR_EACH_BB_FN (bb, cfun)
785 FOR_EACH_EDGE (e, ei, bb->preds)
787 struct edge_info *edge_info = (struct edge_info *) e->aux;
789 if (edge_info)
791 edge_info->cond_equivalences.release ();
792 free (edge_info);
793 e->aux = NULL;
799 /* Build a cond_equivalence record indicating that the comparison
800 CODE holds between operands OP0 and OP1 and push it to **P. */
802 static void
803 build_and_record_new_cond (enum tree_code code,
804 tree op0, tree op1,
805 vec<cond_equivalence> *p,
806 bool val = true)
808 cond_equivalence c;
809 struct hashable_expr *cond = &c.cond;
811 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
813 cond->type = boolean_type_node;
814 cond->kind = EXPR_BINARY;
815 cond->ops.binary.op = code;
816 cond->ops.binary.opnd0 = op0;
817 cond->ops.binary.opnd1 = op1;
819 c.value = val ? boolean_true_node : boolean_false_node;
820 p->safe_push (c);
823 /* Record that COND is true and INVERTED is false into the edge information
824 structure. Also record that any conditions dominated by COND are true
825 as well.
827 For example, if a < b is true, then a <= b must also be true. */
829 static void
830 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
832 tree op0, op1;
833 cond_equivalence c;
835 if (!COMPARISON_CLASS_P (cond))
836 return;
838 op0 = TREE_OPERAND (cond, 0);
839 op1 = TREE_OPERAND (cond, 1);
841 switch (TREE_CODE (cond))
843 case LT_EXPR:
844 case GT_EXPR:
845 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
847 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
848 &edge_info->cond_equivalences);
849 build_and_record_new_cond (LTGT_EXPR, op0, op1,
850 &edge_info->cond_equivalences);
853 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
854 ? LE_EXPR : GE_EXPR),
855 op0, op1, &edge_info->cond_equivalences);
856 build_and_record_new_cond (NE_EXPR, op0, op1,
857 &edge_info->cond_equivalences);
858 build_and_record_new_cond (EQ_EXPR, op0, op1,
859 &edge_info->cond_equivalences, false);
860 break;
862 case GE_EXPR:
863 case LE_EXPR:
864 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
866 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
867 &edge_info->cond_equivalences);
869 break;
871 case EQ_EXPR:
872 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
874 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
875 &edge_info->cond_equivalences);
877 build_and_record_new_cond (LE_EXPR, op0, op1,
878 &edge_info->cond_equivalences);
879 build_and_record_new_cond (GE_EXPR, op0, op1,
880 &edge_info->cond_equivalences);
881 break;
883 case UNORDERED_EXPR:
884 build_and_record_new_cond (NE_EXPR, op0, op1,
885 &edge_info->cond_equivalences);
886 build_and_record_new_cond (UNLE_EXPR, op0, op1,
887 &edge_info->cond_equivalences);
888 build_and_record_new_cond (UNGE_EXPR, op0, op1,
889 &edge_info->cond_equivalences);
890 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
891 &edge_info->cond_equivalences);
892 build_and_record_new_cond (UNLT_EXPR, op0, op1,
893 &edge_info->cond_equivalences);
894 build_and_record_new_cond (UNGT_EXPR, op0, op1,
895 &edge_info->cond_equivalences);
896 break;
898 case UNLT_EXPR:
899 case UNGT_EXPR:
900 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
901 ? UNLE_EXPR : UNGE_EXPR),
902 op0, op1, &edge_info->cond_equivalences);
903 build_and_record_new_cond (NE_EXPR, op0, op1,
904 &edge_info->cond_equivalences);
905 break;
907 case UNEQ_EXPR:
908 build_and_record_new_cond (UNLE_EXPR, op0, op1,
909 &edge_info->cond_equivalences);
910 build_and_record_new_cond (UNGE_EXPR, op0, op1,
911 &edge_info->cond_equivalences);
912 break;
914 case LTGT_EXPR:
915 build_and_record_new_cond (NE_EXPR, op0, op1,
916 &edge_info->cond_equivalences);
917 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
918 &edge_info->cond_equivalences);
919 break;
921 default:
922 break;
925 /* Now store the original true and false conditions into the first
926 two slots. */
927 initialize_expr_from_cond (cond, &c.cond);
928 c.value = boolean_true_node;
929 edge_info->cond_equivalences.safe_push (c);
931 /* It is possible for INVERTED to be the negation of a comparison,
932 and not a valid RHS or GIMPLE_COND condition. This happens because
933 invert_truthvalue may return such an expression when asked to invert
934 a floating-point comparison. These comparisons are not assumed to
935 obey the trichotomy law. */
936 initialize_expr_from_cond (inverted, &c.cond);
937 c.value = boolean_false_node;
938 edge_info->cond_equivalences.safe_push (c);
941 /* We have finished optimizing BB, record any information implied by
942 taking a specific outgoing edge from BB. */
944 static void
945 record_edge_info (basic_block bb)
947 gimple_stmt_iterator gsi = gsi_last_bb (bb);
948 struct edge_info *edge_info;
950 if (! gsi_end_p (gsi))
952 gimple stmt = gsi_stmt (gsi);
953 location_t loc = gimple_location (stmt);
955 if (gimple_code (stmt) == GIMPLE_SWITCH)
957 gswitch *switch_stmt = as_a <gswitch *> (stmt);
958 tree index = gimple_switch_index (switch_stmt);
960 if (TREE_CODE (index) == SSA_NAME)
962 int i;
963 int n_labels = gimple_switch_num_labels (switch_stmt);
964 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
965 edge e;
966 edge_iterator ei;
968 for (i = 0; i < n_labels; i++)
970 tree label = gimple_switch_label (switch_stmt, i);
971 basic_block target_bb = label_to_block (CASE_LABEL (label));
972 if (CASE_HIGH (label)
973 || !CASE_LOW (label)
974 || info[target_bb->index])
975 info[target_bb->index] = error_mark_node;
976 else
977 info[target_bb->index] = label;
980 FOR_EACH_EDGE (e, ei, bb->succs)
982 basic_block target_bb = e->dest;
983 tree label = info[target_bb->index];
985 if (label != NULL && label != error_mark_node)
987 tree x = fold_convert_loc (loc, TREE_TYPE (index),
988 CASE_LOW (label));
989 edge_info = allocate_edge_info (e);
990 edge_info->lhs = index;
991 edge_info->rhs = x;
994 free (info);
998 /* A COND_EXPR may create equivalences too. */
999 if (gimple_code (stmt) == GIMPLE_COND)
1001 edge true_edge;
1002 edge false_edge;
1004 tree op0 = gimple_cond_lhs (stmt);
1005 tree op1 = gimple_cond_rhs (stmt);
1006 enum tree_code code = gimple_cond_code (stmt);
1008 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1010 /* Special case comparing booleans against a constant as we
1011 know the value of OP0 on both arms of the branch. i.e., we
1012 can record an equivalence for OP0 rather than COND. */
1013 if ((code == EQ_EXPR || code == NE_EXPR)
1014 && TREE_CODE (op0) == SSA_NAME
1015 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1016 && is_gimple_min_invariant (op1))
1018 if (code == EQ_EXPR)
1020 edge_info = allocate_edge_info (true_edge);
1021 edge_info->lhs = op0;
1022 edge_info->rhs = (integer_zerop (op1)
1023 ? boolean_false_node
1024 : boolean_true_node);
1026 edge_info = allocate_edge_info (false_edge);
1027 edge_info->lhs = op0;
1028 edge_info->rhs = (integer_zerop (op1)
1029 ? boolean_true_node
1030 : boolean_false_node);
1032 else
1034 edge_info = allocate_edge_info (true_edge);
1035 edge_info->lhs = op0;
1036 edge_info->rhs = (integer_zerop (op1)
1037 ? boolean_true_node
1038 : boolean_false_node);
1040 edge_info = allocate_edge_info (false_edge);
1041 edge_info->lhs = op0;
1042 edge_info->rhs = (integer_zerop (op1)
1043 ? boolean_false_node
1044 : boolean_true_node);
1047 else if (is_gimple_min_invariant (op0)
1048 && (TREE_CODE (op1) == SSA_NAME
1049 || is_gimple_min_invariant (op1)))
1051 tree cond = build2 (code, boolean_type_node, op0, op1);
1052 tree inverted = invert_truthvalue_loc (loc, cond);
1053 bool can_infer_simple_equiv
1054 = !(HONOR_SIGNED_ZEROS (op0)
1055 && real_zerop (op0));
1056 struct edge_info *edge_info;
1058 edge_info = allocate_edge_info (true_edge);
1059 record_conditions (edge_info, cond, inverted);
1061 if (can_infer_simple_equiv && code == EQ_EXPR)
1063 edge_info->lhs = op1;
1064 edge_info->rhs = op0;
1067 edge_info = allocate_edge_info (false_edge);
1068 record_conditions (edge_info, inverted, cond);
1070 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1072 edge_info->lhs = op1;
1073 edge_info->rhs = op0;
1077 else if (TREE_CODE (op0) == SSA_NAME
1078 && (TREE_CODE (op1) == SSA_NAME
1079 || is_gimple_min_invariant (op1)))
1081 tree cond = build2 (code, boolean_type_node, op0, op1);
1082 tree inverted = invert_truthvalue_loc (loc, cond);
1083 bool can_infer_simple_equiv
1084 = !(HONOR_SIGNED_ZEROS (op1)
1085 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1086 struct edge_info *edge_info;
1088 edge_info = allocate_edge_info (true_edge);
1089 record_conditions (edge_info, cond, inverted);
1091 if (can_infer_simple_equiv && code == EQ_EXPR)
1093 edge_info->lhs = op0;
1094 edge_info->rhs = op1;
1097 edge_info = allocate_edge_info (false_edge);
1098 record_conditions (edge_info, inverted, cond);
1100 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1102 edge_info->lhs = op0;
1103 edge_info->rhs = op1;
1108 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1113 class dom_opt_dom_walker : public dom_walker
1115 public:
1116 dom_opt_dom_walker (cdi_direction direction)
1117 : dom_walker (direction), m_dummy_cond (NULL) {}
1119 virtual void before_dom_children (basic_block);
1120 virtual void after_dom_children (basic_block);
1122 private:
1123 void thread_across_edge (edge);
1125 gcond *m_dummy_cond;
1128 /* Jump threading, redundancy elimination and const/copy propagation.
1130 This pass may expose new symbols that need to be renamed into SSA. For
1131 every new symbol exposed, its corresponding bit will be set in
1132 VARS_TO_RENAME. */
1134 namespace {
1136 const pass_data pass_data_dominator =
1138 GIMPLE_PASS, /* type */
1139 "dom", /* name */
1140 OPTGROUP_NONE, /* optinfo_flags */
1141 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
1142 ( PROP_cfg | PROP_ssa ), /* properties_required */
1143 0, /* properties_provided */
1144 0, /* properties_destroyed */
1145 0, /* todo_flags_start */
1146 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
1149 class pass_dominator : public gimple_opt_pass
1151 public:
1152 pass_dominator (gcc::context *ctxt)
1153 : gimple_opt_pass (pass_data_dominator, ctxt)
1156 /* opt_pass methods: */
1157 opt_pass * clone () { return new pass_dominator (m_ctxt); }
1158 virtual bool gate (function *) { return flag_tree_dom != 0; }
1159 virtual unsigned int execute (function *);
1161 }; // class pass_dominator
1163 unsigned int
1164 pass_dominator::execute (function *fun)
1166 memset (&opt_stats, 0, sizeof (opt_stats));
1168 /* Create our hash tables. */
1169 avail_exprs = new hash_table<expr_elt_hasher> (1024);
1170 avail_exprs_stack.create (20);
1171 const_and_copies = new class const_and_copies (dump_file, dump_flags);
1172 need_eh_cleanup = BITMAP_ALLOC (NULL);
1173 need_noreturn_fixup.create (0);
1175 calculate_dominance_info (CDI_DOMINATORS);
1176 cfg_altered = false;
1178 /* We need to know loop structures in order to avoid destroying them
1179 in jump threading. Note that we still can e.g. thread through loop
1180 headers to an exit edge, or through loop header to the loop body, assuming
1181 that we update the loop info.
1183 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
1184 to several overly conservative bail-outs in jump threading, case
1185 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
1186 missing. We should improve jump threading in future then
1187 LOOPS_HAVE_PREHEADERS won't be needed here. */
1188 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
1190 /* Initialize the value-handle array. */
1191 threadedge_initialize_values ();
1193 /* We need accurate information regarding back edges in the CFG
1194 for jump threading; this may include back edges that are not part of
1195 a single loop. */
1196 mark_dfs_back_edges ();
1198 /* Recursively walk the dominator tree optimizing statements. */
1199 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
1202 gimple_stmt_iterator gsi;
1203 basic_block bb;
1204 FOR_EACH_BB_FN (bb, fun)
1206 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1207 update_stmt_if_modified (gsi_stmt (gsi));
1211 /* If we exposed any new variables, go ahead and put them into
1212 SSA form now, before we handle jump threading. This simplifies
1213 interactions between rewriting of _DECL nodes into SSA form
1214 and rewriting SSA_NAME nodes into SSA form after block
1215 duplication and CFG manipulation. */
1216 update_ssa (TODO_update_ssa);
1218 free_all_edge_infos ();
1220 /* Thread jumps, creating duplicate blocks as needed. */
1221 cfg_altered |= thread_through_all_blocks (first_pass_instance);
1223 if (cfg_altered)
1224 free_dominance_info (CDI_DOMINATORS);
1226 /* Removal of statements may make some EH edges dead. Purge
1227 such edges from the CFG as needed. */
1228 if (!bitmap_empty_p (need_eh_cleanup))
1230 unsigned i;
1231 bitmap_iterator bi;
1233 /* Jump threading may have created forwarder blocks from blocks
1234 needing EH cleanup; the new successor of these blocks, which
1235 has inherited from the original block, needs the cleanup.
1236 Don't clear bits in the bitmap, as that can break the bitmap
1237 iterator. */
1238 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
1240 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
1241 if (bb == NULL)
1242 continue;
1243 while (single_succ_p (bb)
1244 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
1245 bb = single_succ (bb);
1246 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
1247 continue;
1248 if ((unsigned) bb->index != i)
1249 bitmap_set_bit (need_eh_cleanup, bb->index);
1252 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
1253 bitmap_clear (need_eh_cleanup);
1256 /* Fixup stmts that became noreturn calls. This may require splitting
1257 blocks and thus isn't possible during the dominator walk or before
1258 jump threading finished. Do this in reverse order so we don't
1259 inadvertedly remove a stmt we want to fixup by visiting a dominating
1260 now noreturn call first. */
1261 while (!need_noreturn_fixup.is_empty ())
1263 gimple stmt = need_noreturn_fixup.pop ();
1264 if (dump_file && dump_flags & TDF_DETAILS)
1266 fprintf (dump_file, "Fixing up noreturn call ");
1267 print_gimple_stmt (dump_file, stmt, 0, 0);
1268 fprintf (dump_file, "\n");
1270 fixup_noreturn_call (stmt);
1273 statistics_counter_event (fun, "Redundant expressions eliminated",
1274 opt_stats.num_re);
1275 statistics_counter_event (fun, "Constants propagated",
1276 opt_stats.num_const_prop);
1277 statistics_counter_event (fun, "Copies propagated",
1278 opt_stats.num_copy_prop);
1280 /* Debugging dumps. */
1281 if (dump_file && (dump_flags & TDF_STATS))
1282 dump_dominator_optimization_stats (dump_file);
1284 loop_optimizer_finalize ();
1286 /* Delete our main hashtable. */
1287 delete avail_exprs;
1288 avail_exprs = NULL;
1290 /* Free asserted bitmaps and stacks. */
1291 BITMAP_FREE (need_eh_cleanup);
1292 need_noreturn_fixup.release ();
1293 avail_exprs_stack.release ();
1294 delete const_and_copies;
1296 /* Free the value-handle array. */
1297 threadedge_finalize_values ();
1299 return 0;
1302 } // anon namespace
1304 gimple_opt_pass *
1305 make_pass_dominator (gcc::context *ctxt)
1307 return new pass_dominator (ctxt);
1311 /* Given a conditional statement CONDSTMT, convert the
1312 condition to a canonical form. */
1314 static void
1315 canonicalize_comparison (gcond *condstmt)
1317 tree op0;
1318 tree op1;
1319 enum tree_code code;
1321 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1323 op0 = gimple_cond_lhs (condstmt);
1324 op1 = gimple_cond_rhs (condstmt);
1326 code = gimple_cond_code (condstmt);
1328 /* If it would be profitable to swap the operands, then do so to
1329 canonicalize the statement, enabling better optimization.
1331 By placing canonicalization of such expressions here we
1332 transparently keep statements in canonical form, even
1333 when the statement is modified. */
1334 if (tree_swap_operands_p (op0, op1, false))
1336 /* For relationals we need to swap the operands
1337 and change the code. */
1338 if (code == LT_EXPR
1339 || code == GT_EXPR
1340 || code == LE_EXPR
1341 || code == GE_EXPR)
1343 code = swap_tree_comparison (code);
1345 gimple_cond_set_code (condstmt, code);
1346 gimple_cond_set_lhs (condstmt, op1);
1347 gimple_cond_set_rhs (condstmt, op0);
1349 update_stmt (condstmt);
1354 /* Initialize local stacks for this optimizer and record equivalences
1355 upon entry to BB. Equivalences can come from the edge traversed to
1356 reach BB or they may come from PHI nodes at the start of BB. */
1358 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1359 LIMIT entries left in LOCALs. */
1361 static void
1362 remove_local_expressions_from_table (void)
1364 /* Remove all the expressions made available in this block. */
1365 while (avail_exprs_stack.length () > 0)
1367 std::pair<expr_hash_elt_t, expr_hash_elt_t> victim
1368 = avail_exprs_stack.pop ();
1369 expr_hash_elt **slot;
1371 if (victim.first == NULL)
1372 break;
1374 /* This must precede the actual removal from the hash table,
1375 as ELEMENT and the table entry may share a call argument
1376 vector which will be freed during removal. */
1377 if (dump_file && (dump_flags & TDF_DETAILS))
1379 fprintf (dump_file, "<<<< ");
1380 print_expr_hash_elt (dump_file, victim.first);
1383 slot = avail_exprs->find_slot (victim.first, NO_INSERT);
1384 gcc_assert (slot && *slot == victim.first);
1385 if (victim.second != NULL)
1387 free_expr_hash_elt (*slot);
1388 *slot = victim.second;
1390 else
1391 avail_exprs->clear_slot (slot);
1395 /* A trivial wrapper so that we can present the generic jump
1396 threading code with a simple API for simplifying statements. */
1397 static tree
1398 simplify_stmt_for_jump_threading (gimple stmt,
1399 gimple within_stmt ATTRIBUTE_UNUSED)
1401 return lookup_avail_expr (stmt, false);
1404 /* Valueize hook for gimple_fold_stmt_to_constant_1. */
1406 static tree
1407 dom_valueize (tree t)
1409 if (TREE_CODE (t) == SSA_NAME)
1411 tree tem = SSA_NAME_VALUE (t);
1412 if (tem)
1413 return tem;
1415 return t;
1418 /* Record into the equivalence tables any equivalences implied by
1419 traversing edge E (which are cached in E->aux).
1421 Callers are responsible for managing the unwinding markers. */
1422 static void
1423 record_temporary_equivalences (edge e)
1425 int i;
1426 struct edge_info *edge_info = (struct edge_info *) e->aux;
1428 /* If we have info associated with this edge, record it into
1429 our equivalence tables. */
1430 if (edge_info)
1432 cond_equivalence *eq;
1433 tree lhs = edge_info->lhs;
1434 tree rhs = edge_info->rhs;
1436 /* If we have a simple NAME = VALUE equivalence, record it. */
1437 if (lhs)
1438 record_equality (lhs, rhs);
1440 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1441 set via a widening type conversion, then we may be able to record
1442 additional equivalences. */
1443 if (lhs
1444 && TREE_CODE (lhs) == SSA_NAME
1445 && TREE_CODE (rhs) == INTEGER_CST)
1447 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1449 if (defstmt
1450 && is_gimple_assign (defstmt)
1451 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1453 tree old_rhs = gimple_assign_rhs1 (defstmt);
1455 /* If the conversion widens the original value and
1456 the constant is in the range of the type of OLD_RHS,
1457 then convert the constant and record the equivalence.
1459 Note that int_fits_type_p does not check the precision
1460 if the upper and lower bounds are OK. */
1461 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1462 && (TYPE_PRECISION (TREE_TYPE (lhs))
1463 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1464 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1466 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1467 record_equality (old_rhs, newval);
1472 /* If LHS is an SSA_NAME with a new equivalency then try if
1473 stmts with uses of that LHS that dominate the edge destination
1474 simplify and allow further equivalences to be recorded. */
1475 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1477 use_operand_p use_p;
1478 imm_use_iterator iter;
1479 FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
1481 gimple use_stmt = USE_STMT (use_p);
1483 /* Only bother to record more equivalences for lhs that
1484 can be directly used by e->dest.
1485 ??? If the code gets re-organized to a worklist to
1486 catch more indirect opportunities and it is made to
1487 handle PHIs then this should only consider use_stmts
1488 in basic-blocks we have already visited. */
1489 if (e->dest == gimple_bb (use_stmt)
1490 || !dominated_by_p (CDI_DOMINATORS,
1491 e->dest, gimple_bb (use_stmt)))
1492 continue;
1493 tree lhs2 = gimple_get_lhs (use_stmt);
1494 if (lhs2 && TREE_CODE (lhs2) == SSA_NAME)
1496 tree res
1497 = gimple_fold_stmt_to_constant_1 (use_stmt, dom_valueize,
1498 no_follow_ssa_edges);
1499 if (res
1500 && (TREE_CODE (res) == SSA_NAME
1501 || is_gimple_min_invariant (res)))
1502 record_equality (lhs2, res);
1507 /* If we have 0 = COND or 1 = COND equivalences, record them
1508 into our expression hash tables. */
1509 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1510 record_cond (eq);
1514 /* Wrapper for common code to attempt to thread an edge. For example,
1515 it handles lazily building the dummy condition and the bookkeeping
1516 when jump threading is successful. */
1518 void
1519 dom_opt_dom_walker::thread_across_edge (edge e)
1521 if (! m_dummy_cond)
1522 m_dummy_cond =
1523 gimple_build_cond (NE_EXPR,
1524 integer_zero_node, integer_zero_node,
1525 NULL, NULL);
1527 /* Push a marker on both stacks so we can unwind the tables back to their
1528 current state. */
1529 avail_exprs_stack.safe_push
1530 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1531 const_and_copies->push_marker ();
1533 /* Traversing E may result in equivalences we can utilize. */
1534 record_temporary_equivalences (e);
1536 /* With all the edge equivalences in the tables, go ahead and attempt
1537 to thread through E->dest. */
1538 ::thread_across_edge (m_dummy_cond, e, false,
1539 const_and_copies,
1540 simplify_stmt_for_jump_threading);
1542 /* And restore the various tables to their state before
1543 we threaded this edge.
1545 XXX The code in tree-ssa-threadedge.c will restore the state of
1546 the const_and_copies table. We we just have to restore the expression
1547 table. */
1548 remove_local_expressions_from_table ();
1551 /* PHI nodes can create equivalences too.
1553 Ignoring any alternatives which are the same as the result, if
1554 all the alternatives are equal, then the PHI node creates an
1555 equivalence. */
1557 static void
1558 record_equivalences_from_phis (basic_block bb)
1560 gphi_iterator gsi;
1562 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1564 gphi *phi = gsi.phi ();
1566 tree lhs = gimple_phi_result (phi);
1567 tree rhs = NULL;
1568 size_t i;
1570 for (i = 0; i < gimple_phi_num_args (phi); i++)
1572 tree t = gimple_phi_arg_def (phi, i);
1574 /* Ignore alternatives which are the same as our LHS. Since
1575 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1576 can simply compare pointers. */
1577 if (lhs == t)
1578 continue;
1580 /* Valueize t. */
1581 if (TREE_CODE (t) == SSA_NAME)
1583 tree tmp = SSA_NAME_VALUE (t);
1584 t = tmp ? tmp : t;
1587 /* If we have not processed an alternative yet, then set
1588 RHS to this alternative. */
1589 if (rhs == NULL)
1590 rhs = t;
1591 /* If we have processed an alternative (stored in RHS), then
1592 see if it is equal to this one. If it isn't, then stop
1593 the search. */
1594 else if (! operand_equal_for_phi_arg_p (rhs, t))
1595 break;
1598 /* If we had no interesting alternatives, then all the RHS alternatives
1599 must have been the same as LHS. */
1600 if (!rhs)
1601 rhs = lhs;
1603 /* If we managed to iterate through each PHI alternative without
1604 breaking out of the loop, then we have a PHI which may create
1605 a useful equivalence. We do not need to record unwind data for
1606 this, since this is a true assignment and not an equivalence
1607 inferred from a comparison. All uses of this ssa name are dominated
1608 by this assignment, so unwinding just costs time and space. */
1609 if (i == gimple_phi_num_args (phi)
1610 && may_propagate_copy (lhs, rhs))
1611 set_ssa_name_value (lhs, rhs);
1615 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1616 return that edge. Otherwise return NULL. */
1617 static edge
1618 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1620 edge retval = NULL;
1621 edge e;
1622 edge_iterator ei;
1624 FOR_EACH_EDGE (e, ei, bb->preds)
1626 /* A loop back edge can be identified by the destination of
1627 the edge dominating the source of the edge. */
1628 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1629 continue;
1631 /* If we have already seen a non-loop edge, then we must have
1632 multiple incoming non-loop edges and thus we return NULL. */
1633 if (retval)
1634 return NULL;
1636 /* This is the first non-loop incoming edge we have found. Record
1637 it. */
1638 retval = e;
1641 return retval;
1644 /* Record any equivalences created by the incoming edge to BB. If BB
1645 has more than one incoming edge, then no equivalence is created. */
1647 static void
1648 record_equivalences_from_incoming_edge (basic_block bb)
1650 edge e;
1651 basic_block parent;
1653 /* If our parent block ended with a control statement, then we may be
1654 able to record some equivalences based on which outgoing edge from
1655 the parent was followed. */
1656 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1658 e = single_incoming_edge_ignoring_loop_edges (bb);
1660 /* If we had a single incoming edge from our parent block, then enter
1661 any data associated with the edge into our tables. */
1662 if (e && e->src == parent)
1663 record_temporary_equivalences (e);
1666 /* Dump SSA statistics on FILE. */
1668 void
1669 dump_dominator_optimization_stats (FILE *file)
1671 fprintf (file, "Total number of statements: %6ld\n\n",
1672 opt_stats.num_stmts);
1673 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1674 opt_stats.num_exprs_considered);
1676 fprintf (file, "\nHash table statistics:\n");
1678 fprintf (file, " avail_exprs: ");
1679 htab_statistics (file, *avail_exprs);
1683 /* Dump SSA statistics on stderr. */
1685 DEBUG_FUNCTION void
1686 debug_dominator_optimization_stats (void)
1688 dump_dominator_optimization_stats (stderr);
1692 /* Dump statistics for the hash table HTAB. */
1694 static void
1695 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1697 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1698 (long) htab.size (),
1699 (long) htab.elements (),
1700 htab.collisions ());
1704 /* Enter condition equivalence into the expression hash table.
1705 This indicates that a conditional expression has a known
1706 boolean value. */
1708 static void
1709 record_cond (cond_equivalence *p)
1711 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1712 expr_hash_elt **slot;
1714 initialize_hash_element_from_expr (&p->cond, p->value, element);
1716 slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1717 if (*slot == NULL)
1719 *slot = element;
1721 if (dump_file && (dump_flags & TDF_DETAILS))
1723 fprintf (dump_file, "1>>> ");
1724 print_expr_hash_elt (dump_file, element);
1727 avail_exprs_stack.safe_push
1728 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element, NULL));
1730 else
1731 free_expr_hash_elt (element);
1734 /* Return the loop depth of the basic block of the defining statement of X.
1735 This number should not be treated as absolutely correct because the loop
1736 information may not be completely up-to-date when dom runs. However, it
1737 will be relatively correct, and as more passes are taught to keep loop info
1738 up to date, the result will become more and more accurate. */
1740 static int
1741 loop_depth_of_name (tree x)
1743 gimple defstmt;
1744 basic_block defbb;
1746 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1747 if (TREE_CODE (x) != SSA_NAME)
1748 return 0;
1750 /* Otherwise return the loop depth of the defining statement's bb.
1751 Note that there may not actually be a bb for this statement, if the
1752 ssa_name is live on entry. */
1753 defstmt = SSA_NAME_DEF_STMT (x);
1754 defbb = gimple_bb (defstmt);
1755 if (!defbb)
1756 return 0;
1758 return bb_loop_depth (defbb);
1761 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1762 This constrains the cases in which we may treat this as assignment. */
1764 static void
1765 record_equality (tree x, tree y)
1767 tree prev_x = NULL, prev_y = NULL;
1769 if (tree_swap_operands_p (x, y, false))
1770 std::swap (x, y);
1772 /* Most of the time tree_swap_operands_p does what we want. But there
1773 are cases where we know one operand is better for copy propagation than
1774 the other. Given no other code cares about ordering of equality
1775 comparison operators for that purpose, we just handle the special cases
1776 here. */
1777 if (TREE_CODE (x) == SSA_NAME && TREE_CODE (y) == SSA_NAME)
1779 /* If one operand is a single use operand, then make it
1780 X. This will preserve its single use properly and if this
1781 conditional is eliminated, the computation of X can be
1782 eliminated as well. */
1783 if (has_single_use (y) && ! has_single_use (x))
1784 std::swap (x, y);
1786 if (TREE_CODE (x) == SSA_NAME)
1787 prev_x = SSA_NAME_VALUE (x);
1788 if (TREE_CODE (y) == SSA_NAME)
1789 prev_y = SSA_NAME_VALUE (y);
1791 /* If one of the previous values is invariant, or invariant in more loops
1792 (by depth), then use that.
1793 Otherwise it doesn't matter which value we choose, just so
1794 long as we canonicalize on one value. */
1795 if (is_gimple_min_invariant (y))
1797 else if (is_gimple_min_invariant (x)
1798 /* ??? When threading over backedges the following is important
1799 for correctness. See PR61757. */
1800 || (loop_depth_of_name (x) < loop_depth_of_name (y)))
1801 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1802 else if (prev_x && is_gimple_min_invariant (prev_x))
1803 x = y, y = prev_x, prev_x = prev_y;
1804 else if (prev_y)
1805 y = prev_y;
1807 /* After the swapping, we must have one SSA_NAME. */
1808 if (TREE_CODE (x) != SSA_NAME)
1809 return;
1811 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1812 variable compared against zero. If we're honoring signed zeros,
1813 then we cannot record this value unless we know that the value is
1814 nonzero. */
1815 if (HONOR_SIGNED_ZEROS (x)
1816 && (TREE_CODE (y) != REAL_CST
1817 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1818 return;
1820 const_and_copies->record_const_or_copy (x, y, prev_x);
1823 /* Returns true when STMT is a simple iv increment. It detects the
1824 following situation:
1826 i_1 = phi (..., i_2)
1827 i_2 = i_1 +/- ... */
1829 bool
1830 simple_iv_increment_p (gimple stmt)
1832 enum tree_code code;
1833 tree lhs, preinc;
1834 gimple phi;
1835 size_t i;
1837 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1838 return false;
1840 lhs = gimple_assign_lhs (stmt);
1841 if (TREE_CODE (lhs) != SSA_NAME)
1842 return false;
1844 code = gimple_assign_rhs_code (stmt);
1845 if (code != PLUS_EXPR
1846 && code != MINUS_EXPR
1847 && code != POINTER_PLUS_EXPR)
1848 return false;
1850 preinc = gimple_assign_rhs1 (stmt);
1851 if (TREE_CODE (preinc) != SSA_NAME)
1852 return false;
1854 phi = SSA_NAME_DEF_STMT (preinc);
1855 if (gimple_code (phi) != GIMPLE_PHI)
1856 return false;
1858 for (i = 0; i < gimple_phi_num_args (phi); i++)
1859 if (gimple_phi_arg_def (phi, i) == lhs)
1860 return true;
1862 return false;
1865 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1866 known value for that SSA_NAME (or NULL if no value is known).
1868 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1869 successors of BB. */
1871 static void
1872 cprop_into_successor_phis (basic_block bb)
1874 edge e;
1875 edge_iterator ei;
1877 FOR_EACH_EDGE (e, ei, bb->succs)
1879 int indx;
1880 gphi_iterator gsi;
1882 /* If this is an abnormal edge, then we do not want to copy propagate
1883 into the PHI alternative associated with this edge. */
1884 if (e->flags & EDGE_ABNORMAL)
1885 continue;
1887 gsi = gsi_start_phis (e->dest);
1888 if (gsi_end_p (gsi))
1889 continue;
1891 /* We may have an equivalence associated with this edge. While
1892 we can not propagate it into non-dominated blocks, we can
1893 propagate them into PHIs in non-dominated blocks. */
1895 /* Push the unwind marker so we can reset the const and copies
1896 table back to its original state after processing this edge. */
1897 const_and_copies->push_marker ();
1899 /* Extract and record any simple NAME = VALUE equivalences.
1901 Don't bother with [01] = COND equivalences, they're not useful
1902 here. */
1903 struct edge_info *edge_info = (struct edge_info *) e->aux;
1904 if (edge_info)
1906 tree lhs = edge_info->lhs;
1907 tree rhs = edge_info->rhs;
1909 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1910 const_and_copies->record_const_or_copy (lhs, rhs);
1913 indx = e->dest_idx;
1914 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1916 tree new_val;
1917 use_operand_p orig_p;
1918 tree orig_val;
1919 gphi *phi = gsi.phi ();
1921 /* The alternative may be associated with a constant, so verify
1922 it is an SSA_NAME before doing anything with it. */
1923 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1924 orig_val = get_use_from_ptr (orig_p);
1925 if (TREE_CODE (orig_val) != SSA_NAME)
1926 continue;
1928 /* If we have *ORIG_P in our constant/copy table, then replace
1929 ORIG_P with its value in our constant/copy table. */
1930 new_val = SSA_NAME_VALUE (orig_val);
1931 if (new_val
1932 && new_val != orig_val
1933 && (TREE_CODE (new_val) == SSA_NAME
1934 || is_gimple_min_invariant (new_val))
1935 && may_propagate_copy (orig_val, new_val))
1936 propagate_value (orig_p, new_val);
1939 const_and_copies->pop_to_marker ();
1943 void
1944 dom_opt_dom_walker::before_dom_children (basic_block bb)
1946 gimple_stmt_iterator gsi;
1948 if (dump_file && (dump_flags & TDF_DETAILS))
1949 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1951 /* Push a marker on the stacks of local information so that we know how
1952 far to unwind when we finalize this block. */
1953 avail_exprs_stack.safe_push
1954 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1955 const_and_copies->push_marker ();
1957 record_equivalences_from_incoming_edge (bb);
1959 /* PHI nodes can create equivalences too. */
1960 record_equivalences_from_phis (bb);
1962 /* Create equivalences from redundant PHIs. PHIs are only truly
1963 redundant when they exist in the same block, so push another
1964 marker and unwind right afterwards. */
1965 avail_exprs_stack.safe_push
1966 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1967 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1968 eliminate_redundant_computations (&gsi);
1969 remove_local_expressions_from_table ();
1971 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1972 optimize_stmt (bb, gsi);
1974 /* Now prepare to process dominated blocks. */
1975 record_edge_info (bb);
1976 cprop_into_successor_phis (bb);
1979 /* We have finished processing the dominator children of BB, perform
1980 any finalization actions in preparation for leaving this node in
1981 the dominator tree. */
1983 void
1984 dom_opt_dom_walker::after_dom_children (basic_block bb)
1986 gimple last;
1988 /* If we have an outgoing edge to a block with multiple incoming and
1989 outgoing edges, then we may be able to thread the edge, i.e., we
1990 may be able to statically determine which of the outgoing edges
1991 will be traversed when the incoming edge from BB is traversed. */
1992 if (single_succ_p (bb)
1993 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1994 && potentially_threadable_block (single_succ (bb)))
1996 thread_across_edge (single_succ_edge (bb));
1998 else if ((last = last_stmt (bb))
1999 && gimple_code (last) == GIMPLE_COND
2000 && EDGE_COUNT (bb->succs) == 2
2001 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
2002 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
2004 edge true_edge, false_edge;
2006 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2008 /* Only try to thread the edge if it reaches a target block with
2009 more than one predecessor and more than one successor. */
2010 if (potentially_threadable_block (true_edge->dest))
2011 thread_across_edge (true_edge);
2013 /* Similarly for the ELSE arm. */
2014 if (potentially_threadable_block (false_edge->dest))
2015 thread_across_edge (false_edge);
2019 /* These remove expressions local to BB from the tables. */
2020 remove_local_expressions_from_table ();
2021 const_and_copies->pop_to_marker ();
2024 /* Search for redundant computations in STMT. If any are found, then
2025 replace them with the variable holding the result of the computation.
2027 If safe, record this expression into the available expression hash
2028 table. */
2030 static void
2031 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2033 tree expr_type;
2034 tree cached_lhs;
2035 tree def;
2036 bool insert = true;
2037 bool assigns_var_p = false;
2039 gimple stmt = gsi_stmt (*gsi);
2041 if (gimple_code (stmt) == GIMPLE_PHI)
2042 def = gimple_phi_result (stmt);
2043 else
2044 def = gimple_get_lhs (stmt);
2046 /* Certain expressions on the RHS can be optimized away, but can not
2047 themselves be entered into the hash tables. */
2048 if (! def
2049 || TREE_CODE (def) != SSA_NAME
2050 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2051 || gimple_vdef (stmt)
2052 /* Do not record equivalences for increments of ivs. This would create
2053 overlapping live ranges for a very questionable gain. */
2054 || simple_iv_increment_p (stmt))
2055 insert = false;
2057 /* Check if the expression has been computed before. */
2058 cached_lhs = lookup_avail_expr (stmt, insert);
2060 opt_stats.num_exprs_considered++;
2062 /* Get the type of the expression we are trying to optimize. */
2063 if (is_gimple_assign (stmt))
2065 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2066 assigns_var_p = true;
2068 else if (gimple_code (stmt) == GIMPLE_COND)
2069 expr_type = boolean_type_node;
2070 else if (is_gimple_call (stmt))
2072 gcc_assert (gimple_call_lhs (stmt));
2073 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2074 assigns_var_p = true;
2076 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2077 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
2078 else if (gimple_code (stmt) == GIMPLE_PHI)
2079 /* We can't propagate into a phi, so the logic below doesn't apply.
2080 Instead record an equivalence between the cached LHS and the
2081 PHI result of this statement, provided they are in the same block.
2082 This should be sufficient to kill the redundant phi. */
2084 if (def && cached_lhs)
2085 const_and_copies->record_const_or_copy (def, cached_lhs);
2086 return;
2088 else
2089 gcc_unreachable ();
2091 if (!cached_lhs)
2092 return;
2094 /* It is safe to ignore types here since we have already done
2095 type checking in the hashing and equality routines. In fact
2096 type checking here merely gets in the way of constant
2097 propagation. Also, make sure that it is safe to propagate
2098 CACHED_LHS into the expression in STMT. */
2099 if ((TREE_CODE (cached_lhs) != SSA_NAME
2100 && (assigns_var_p
2101 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2102 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2104 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2105 || is_gimple_min_invariant (cached_lhs));
2107 if (dump_file && (dump_flags & TDF_DETAILS))
2109 fprintf (dump_file, " Replaced redundant expr '");
2110 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2111 fprintf (dump_file, "' with '");
2112 print_generic_expr (dump_file, cached_lhs, dump_flags);
2113 fprintf (dump_file, "'\n");
2116 opt_stats.num_re++;
2118 if (assigns_var_p
2119 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2120 cached_lhs = fold_convert (expr_type, cached_lhs);
2122 propagate_tree_value_into_stmt (gsi, cached_lhs);
2124 /* Since it is always necessary to mark the result as modified,
2125 perhaps we should move this into propagate_tree_value_into_stmt
2126 itself. */
2127 gimple_set_modified (gsi_stmt (*gsi), true);
2131 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2132 the available expressions table or the const_and_copies table.
2133 Detect and record those equivalences. */
2134 /* We handle only very simple copy equivalences here. The heavy
2135 lifing is done by eliminate_redundant_computations. */
2137 static void
2138 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2140 tree lhs;
2141 enum tree_code lhs_code;
2143 gcc_assert (is_gimple_assign (stmt));
2145 lhs = gimple_assign_lhs (stmt);
2146 lhs_code = TREE_CODE (lhs);
2148 if (lhs_code == SSA_NAME
2149 && gimple_assign_single_p (stmt))
2151 tree rhs = gimple_assign_rhs1 (stmt);
2153 /* If the RHS of the assignment is a constant or another variable that
2154 may be propagated, register it in the CONST_AND_COPIES table. We
2155 do not need to record unwind data for this, since this is a true
2156 assignment and not an equivalence inferred from a comparison. All
2157 uses of this ssa name are dominated by this assignment, so unwinding
2158 just costs time and space. */
2159 if (may_optimize_p
2160 && (TREE_CODE (rhs) == SSA_NAME
2161 || is_gimple_min_invariant (rhs)))
2163 /* Valueize rhs. */
2164 if (TREE_CODE (rhs) == SSA_NAME)
2166 tree tmp = SSA_NAME_VALUE (rhs);
2167 rhs = tmp ? tmp : rhs;
2170 if (dump_file && (dump_flags & TDF_DETAILS))
2172 fprintf (dump_file, "==== ASGN ");
2173 print_generic_expr (dump_file, lhs, 0);
2174 fprintf (dump_file, " = ");
2175 print_generic_expr (dump_file, rhs, 0);
2176 fprintf (dump_file, "\n");
2179 set_ssa_name_value (lhs, rhs);
2183 /* Make sure we can propagate &x + CST. */
2184 if (lhs_code == SSA_NAME
2185 && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
2186 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
2187 && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
2189 tree op0 = gimple_assign_rhs1 (stmt);
2190 tree op1 = gimple_assign_rhs2 (stmt);
2191 tree new_rhs
2192 = build_fold_addr_expr (fold_build2 (MEM_REF,
2193 TREE_TYPE (TREE_TYPE (op0)),
2194 unshare_expr (op0),
2195 fold_convert (ptr_type_node,
2196 op1)));
2197 if (dump_file && (dump_flags & TDF_DETAILS))
2199 fprintf (dump_file, "==== ASGN ");
2200 print_generic_expr (dump_file, lhs, 0);
2201 fprintf (dump_file, " = ");
2202 print_generic_expr (dump_file, new_rhs, 0);
2203 fprintf (dump_file, "\n");
2206 set_ssa_name_value (lhs, new_rhs);
2209 /* A memory store, even an aliased store, creates a useful
2210 equivalence. By exchanging the LHS and RHS, creating suitable
2211 vops and recording the result in the available expression table,
2212 we may be able to expose more redundant loads. */
2213 if (!gimple_has_volatile_ops (stmt)
2214 && gimple_references_memory_p (stmt)
2215 && gimple_assign_single_p (stmt)
2216 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2217 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2218 && !is_gimple_reg (lhs))
2220 tree rhs = gimple_assign_rhs1 (stmt);
2221 gassign *new_stmt;
2223 /* Build a new statement with the RHS and LHS exchanged. */
2224 if (TREE_CODE (rhs) == SSA_NAME)
2226 /* NOTE tuples. The call to gimple_build_assign below replaced
2227 a call to build_gimple_modify_stmt, which did not set the
2228 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2229 may cause an SSA validation failure, as the LHS may be a
2230 default-initialized name and should have no definition. I'm
2231 a bit dubious of this, as the artificial statement that we
2232 generate here may in fact be ill-formed, but it is simply
2233 used as an internal device in this pass, and never becomes
2234 part of the CFG. */
2235 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2236 new_stmt = gimple_build_assign (rhs, lhs);
2237 SSA_NAME_DEF_STMT (rhs) = defstmt;
2239 else
2240 new_stmt = gimple_build_assign (rhs, lhs);
2242 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2244 /* Finally enter the statement into the available expression
2245 table. */
2246 lookup_avail_expr (new_stmt, true);
2250 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2251 CONST_AND_COPIES. */
2253 static void
2254 cprop_operand (gimple stmt, use_operand_p op_p)
2256 tree val;
2257 tree op = USE_FROM_PTR (op_p);
2259 /* If the operand has a known constant value or it is known to be a
2260 copy of some other variable, use the value or copy stored in
2261 CONST_AND_COPIES. */
2262 val = SSA_NAME_VALUE (op);
2263 if (val && val != op)
2265 /* Do not replace hard register operands in asm statements. */
2266 if (gimple_code (stmt) == GIMPLE_ASM
2267 && !may_propagate_copy_into_asm (op))
2268 return;
2270 /* Certain operands are not allowed to be copy propagated due
2271 to their interaction with exception handling and some GCC
2272 extensions. */
2273 if (!may_propagate_copy (op, val))
2274 return;
2276 /* Do not propagate copies into BIVs.
2277 See PR23821 and PR62217 for how this can disturb IV and
2278 number of iteration analysis. */
2279 if (TREE_CODE (val) != INTEGER_CST)
2281 gimple def = SSA_NAME_DEF_STMT (op);
2282 if (gimple_code (def) == GIMPLE_PHI
2283 && gimple_bb (def)->loop_father->header == gimple_bb (def))
2284 return;
2287 /* Dump details. */
2288 if (dump_file && (dump_flags & TDF_DETAILS))
2290 fprintf (dump_file, " Replaced '");
2291 print_generic_expr (dump_file, op, dump_flags);
2292 fprintf (dump_file, "' with %s '",
2293 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2294 print_generic_expr (dump_file, val, dump_flags);
2295 fprintf (dump_file, "'\n");
2298 if (TREE_CODE (val) != SSA_NAME)
2299 opt_stats.num_const_prop++;
2300 else
2301 opt_stats.num_copy_prop++;
2303 propagate_value (op_p, val);
2305 /* And note that we modified this statement. This is now
2306 safe, even if we changed virtual operands since we will
2307 rescan the statement and rewrite its operands again. */
2308 gimple_set_modified (stmt, true);
2312 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2313 known value for that SSA_NAME (or NULL if no value is known).
2315 Propagate values from CONST_AND_COPIES into the uses, vuses and
2316 vdef_ops of STMT. */
2318 static void
2319 cprop_into_stmt (gimple stmt)
2321 use_operand_p op_p;
2322 ssa_op_iter iter;
2324 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2325 cprop_operand (stmt, op_p);
2328 /* Optimize the statement pointed to by iterator SI.
2330 We try to perform some simplistic global redundancy elimination and
2331 constant propagation:
2333 1- To detect global redundancy, we keep track of expressions that have
2334 been computed in this block and its dominators. If we find that the
2335 same expression is computed more than once, we eliminate repeated
2336 computations by using the target of the first one.
2338 2- Constant values and copy assignments. This is used to do very
2339 simplistic constant and copy propagation. When a constant or copy
2340 assignment is found, we map the value on the RHS of the assignment to
2341 the variable in the LHS in the CONST_AND_COPIES table. */
2343 static void
2344 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2346 gimple stmt, old_stmt;
2347 bool may_optimize_p;
2348 bool modified_p = false;
2349 bool was_noreturn;
2351 old_stmt = stmt = gsi_stmt (si);
2352 was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
2354 if (dump_file && (dump_flags & TDF_DETAILS))
2356 fprintf (dump_file, "Optimizing statement ");
2357 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2360 if (gimple_code (stmt) == GIMPLE_COND)
2361 canonicalize_comparison (as_a <gcond *> (stmt));
2363 update_stmt_if_modified (stmt);
2364 opt_stats.num_stmts++;
2366 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2367 cprop_into_stmt (stmt);
2369 /* If the statement has been modified with constant replacements,
2370 fold its RHS before checking for redundant computations. */
2371 if (gimple_modified_p (stmt))
2373 tree rhs = NULL;
2375 /* Try to fold the statement making sure that STMT is kept
2376 up to date. */
2377 if (fold_stmt (&si))
2379 stmt = gsi_stmt (si);
2380 gimple_set_modified (stmt, true);
2382 if (dump_file && (dump_flags & TDF_DETAILS))
2384 fprintf (dump_file, " Folded to: ");
2385 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2389 /* We only need to consider cases that can yield a gimple operand. */
2390 if (gimple_assign_single_p (stmt))
2391 rhs = gimple_assign_rhs1 (stmt);
2392 else if (gimple_code (stmt) == GIMPLE_GOTO)
2393 rhs = gimple_goto_dest (stmt);
2394 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2395 /* This should never be an ADDR_EXPR. */
2396 rhs = gimple_switch_index (swtch_stmt);
2398 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2399 recompute_tree_invariant_for_addr_expr (rhs);
2401 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2402 even if fold_stmt updated the stmt already and thus cleared
2403 gimple_modified_p flag on it. */
2404 modified_p = true;
2407 /* Check for redundant computations. Do this optimization only
2408 for assignments that have no volatile ops and conditionals. */
2409 may_optimize_p = (!gimple_has_side_effects (stmt)
2410 && (is_gimple_assign (stmt)
2411 || (is_gimple_call (stmt)
2412 && gimple_call_lhs (stmt) != NULL_TREE)
2413 || gimple_code (stmt) == GIMPLE_COND
2414 || gimple_code (stmt) == GIMPLE_SWITCH));
2416 if (may_optimize_p)
2418 if (gimple_code (stmt) == GIMPLE_CALL)
2420 /* Resolve __builtin_constant_p. If it hasn't been
2421 folded to integer_one_node by now, it's fairly
2422 certain that the value simply isn't constant. */
2423 tree callee = gimple_call_fndecl (stmt);
2424 if (callee
2425 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2426 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2428 propagate_tree_value_into_stmt (&si, integer_zero_node);
2429 stmt = gsi_stmt (si);
2433 update_stmt_if_modified (stmt);
2434 eliminate_redundant_computations (&si);
2435 stmt = gsi_stmt (si);
2437 /* Perform simple redundant store elimination. */
2438 if (gimple_assign_single_p (stmt)
2439 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2441 tree lhs = gimple_assign_lhs (stmt);
2442 tree rhs = gimple_assign_rhs1 (stmt);
2443 tree cached_lhs;
2444 gassign *new_stmt;
2445 if (TREE_CODE (rhs) == SSA_NAME)
2447 tree tem = SSA_NAME_VALUE (rhs);
2448 if (tem)
2449 rhs = tem;
2451 /* Build a new statement with the RHS and LHS exchanged. */
2452 if (TREE_CODE (rhs) == SSA_NAME)
2454 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2455 new_stmt = gimple_build_assign (rhs, lhs);
2456 SSA_NAME_DEF_STMT (rhs) = defstmt;
2458 else
2459 new_stmt = gimple_build_assign (rhs, lhs);
2460 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2461 cached_lhs = lookup_avail_expr (new_stmt, false);
2462 if (cached_lhs
2463 && rhs == cached_lhs)
2465 basic_block bb = gimple_bb (stmt);
2466 unlink_stmt_vdef (stmt);
2467 if (gsi_remove (&si, true))
2469 bitmap_set_bit (need_eh_cleanup, bb->index);
2470 if (dump_file && (dump_flags & TDF_DETAILS))
2471 fprintf (dump_file, " Flagged to clear EH edges.\n");
2473 release_defs (stmt);
2474 return;
2479 /* Record any additional equivalences created by this statement. */
2480 if (is_gimple_assign (stmt))
2481 record_equivalences_from_stmt (stmt, may_optimize_p);
2483 /* If STMT is a COND_EXPR and it was modified, then we may know
2484 where it goes. If that is the case, then mark the CFG as altered.
2486 This will cause us to later call remove_unreachable_blocks and
2487 cleanup_tree_cfg when it is safe to do so. It is not safe to
2488 clean things up here since removal of edges and such can trigger
2489 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2490 the manager.
2492 That's all fine and good, except that once SSA_NAMEs are released
2493 to the manager, we must not call create_ssa_name until all references
2494 to released SSA_NAMEs have been eliminated.
2496 All references to the deleted SSA_NAMEs can not be eliminated until
2497 we remove unreachable blocks.
2499 We can not remove unreachable blocks until after we have completed
2500 any queued jump threading.
2502 We can not complete any queued jump threads until we have taken
2503 appropriate variables out of SSA form. Taking variables out of
2504 SSA form can call create_ssa_name and thus we lose.
2506 Ultimately I suspect we're going to need to change the interface
2507 into the SSA_NAME manager. */
2508 if (gimple_modified_p (stmt) || modified_p)
2510 tree val = NULL;
2512 update_stmt_if_modified (stmt);
2514 if (gimple_code (stmt) == GIMPLE_COND)
2515 val = fold_binary_loc (gimple_location (stmt),
2516 gimple_cond_code (stmt), boolean_type_node,
2517 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2518 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2519 val = gimple_switch_index (swtch_stmt);
2521 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2522 cfg_altered = true;
2524 /* If we simplified a statement in such a way as to be shown that it
2525 cannot trap, update the eh information and the cfg to match. */
2526 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2528 bitmap_set_bit (need_eh_cleanup, bb->index);
2529 if (dump_file && (dump_flags & TDF_DETAILS))
2530 fprintf (dump_file, " Flagged to clear EH edges.\n");
2533 if (!was_noreturn
2534 && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
2535 need_noreturn_fixup.safe_push (stmt);
2539 /* Helper for walk_non_aliased_vuses. Determine if we arrived at
2540 the desired memory state. */
2542 static void *
2543 vuse_eq (ao_ref *, tree vuse1, unsigned int cnt, void *data)
2545 tree vuse2 = (tree) data;
2546 if (vuse1 == vuse2)
2547 return data;
2549 /* This bounds the stmt walks we perform on reference lookups
2550 to O(1) instead of O(N) where N is the number of dominating
2551 stores leading to a candidate. We re-use the SCCVN param
2552 for this as it is basically the same complexity. */
2553 if (cnt > (unsigned) PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS))
2554 return (void *)-1;
2556 return NULL;
2559 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2560 If found, return its LHS. Otherwise insert STMT in the table and
2561 return NULL_TREE.
2563 Also, when an expression is first inserted in the table, it is also
2564 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2565 we finish processing this block and its children. */
2567 static tree
2568 lookup_avail_expr (gimple stmt, bool insert)
2570 expr_hash_elt **slot;
2571 tree lhs;
2572 tree temp;
2573 struct expr_hash_elt element;
2575 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2576 if (gimple_code (stmt) == GIMPLE_PHI)
2577 lhs = gimple_phi_result (stmt);
2578 else
2579 lhs = gimple_get_lhs (stmt);
2581 initialize_hash_element (stmt, lhs, &element);
2583 if (dump_file && (dump_flags & TDF_DETAILS))
2585 fprintf (dump_file, "LKUP ");
2586 print_expr_hash_elt (dump_file, &element);
2589 /* Don't bother remembering constant assignments and copy operations.
2590 Constants and copy operations are handled by the constant/copy propagator
2591 in optimize_stmt. */
2592 if (element.expr.kind == EXPR_SINGLE
2593 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2594 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2595 return NULL_TREE;
2597 /* Finally try to find the expression in the main expression hash table. */
2598 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2599 if (slot == NULL)
2601 free_expr_hash_elt_contents (&element);
2602 return NULL_TREE;
2604 else if (*slot == NULL)
2606 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2607 *element2 = element;
2608 element2->stamp = element2;
2609 *slot = element2;
2611 if (dump_file && (dump_flags & TDF_DETAILS))
2613 fprintf (dump_file, "2>>> ");
2614 print_expr_hash_elt (dump_file, element2);
2617 avail_exprs_stack.safe_push
2618 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element2, NULL));
2619 return NULL_TREE;
2622 /* If we found a redundant memory operation do an alias walk to
2623 check if we can re-use it. */
2624 if (gimple_vuse (stmt) != (*slot)->vop)
2626 tree vuse1 = (*slot)->vop;
2627 tree vuse2 = gimple_vuse (stmt);
2628 /* If we have a load of a register and a candidate in the
2629 hash with vuse1 then try to reach its stmt by walking
2630 up the virtual use-def chain using walk_non_aliased_vuses.
2631 But don't do this when removing expressions from the hash. */
2632 ao_ref ref;
2633 if (!(vuse1 && vuse2
2634 && gimple_assign_single_p (stmt)
2635 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
2636 && (ao_ref_init (&ref, gimple_assign_rhs1 (stmt)), true)
2637 && walk_non_aliased_vuses (&ref, vuse2,
2638 vuse_eq, NULL, NULL, vuse1) != NULL))
2640 if (insert)
2642 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2643 *element2 = element;
2644 element2->stamp = element2;
2646 /* Insert the expr into the hash by replacing the current
2647 entry and recording the value to restore in the
2648 avail_exprs_stack. */
2649 avail_exprs_stack.safe_push (std::make_pair (element2, *slot));
2650 *slot = element2;
2651 if (dump_file && (dump_flags & TDF_DETAILS))
2653 fprintf (dump_file, "2>>> ");
2654 print_expr_hash_elt (dump_file, *slot);
2657 return NULL_TREE;
2661 free_expr_hash_elt_contents (&element);
2663 /* Extract the LHS of the assignment so that it can be used as the current
2664 definition of another variable. */
2665 lhs = (*slot)->lhs;
2667 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2668 use the value from the const_and_copies table. */
2669 if (TREE_CODE (lhs) == SSA_NAME)
2671 temp = SSA_NAME_VALUE (lhs);
2672 if (temp)
2673 lhs = temp;
2676 if (dump_file && (dump_flags & TDF_DETAILS))
2678 fprintf (dump_file, "FIND: ");
2679 print_generic_expr (dump_file, lhs, 0);
2680 fprintf (dump_file, "\n");
2683 return lhs;
2686 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2687 for expressions using the code of the expression and the SSA numbers of
2688 its operands. */
2690 static hashval_t
2691 avail_expr_hash (const void *p)
2693 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2694 inchash::hash hstate;
2696 inchash::add_hashable_expr (expr, hstate);
2698 return hstate.end ();
2701 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2702 up degenerate PHIs created by or exposed by jump threading. */
2704 /* Given a statement STMT, which is either a PHI node or an assignment,
2705 remove it from the IL. */
2707 static void
2708 remove_stmt_or_phi (gimple stmt)
2710 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2712 if (gimple_code (stmt) == GIMPLE_PHI)
2713 remove_phi_node (&gsi, true);
2714 else
2716 gsi_remove (&gsi, true);
2717 release_defs (stmt);
2721 /* Given a statement STMT, which is either a PHI node or an assignment,
2722 return the "rhs" of the node, in the case of a non-degenerate
2723 phi, NULL is returned. */
2725 static tree
2726 get_rhs_or_phi_arg (gimple stmt)
2728 if (gimple_code (stmt) == GIMPLE_PHI)
2729 return degenerate_phi_result (as_a <gphi *> (stmt));
2730 else if (gimple_assign_single_p (stmt))
2731 return gimple_assign_rhs1 (stmt);
2732 else
2733 gcc_unreachable ();
2737 /* Given a statement STMT, which is either a PHI node or an assignment,
2738 return the "lhs" of the node. */
2740 static tree
2741 get_lhs_or_phi_result (gimple stmt)
2743 if (gimple_code (stmt) == GIMPLE_PHI)
2744 return gimple_phi_result (stmt);
2745 else if (is_gimple_assign (stmt))
2746 return gimple_assign_lhs (stmt);
2747 else
2748 gcc_unreachable ();
2751 /* Propagate RHS into all uses of LHS (when possible).
2753 RHS and LHS are derived from STMT, which is passed in solely so
2754 that we can remove it if propagation is successful.
2756 When propagating into a PHI node or into a statement which turns
2757 into a trivial copy or constant initialization, set the
2758 appropriate bit in INTERESTING_NAMEs so that we will visit those
2759 nodes as well in an effort to pick up secondary optimization
2760 opportunities. */
2762 static void
2763 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2765 /* First verify that propagation is valid. */
2766 if (may_propagate_copy (lhs, rhs))
2768 use_operand_p use_p;
2769 imm_use_iterator iter;
2770 gimple use_stmt;
2771 bool all = true;
2773 /* Dump details. */
2774 if (dump_file && (dump_flags & TDF_DETAILS))
2776 fprintf (dump_file, " Replacing '");
2777 print_generic_expr (dump_file, lhs, dump_flags);
2778 fprintf (dump_file, "' with %s '",
2779 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2780 print_generic_expr (dump_file, rhs, dump_flags);
2781 fprintf (dump_file, "'\n");
2784 /* Walk over every use of LHS and try to replace the use with RHS.
2785 At this point the only reason why such a propagation would not
2786 be successful would be if the use occurs in an ASM_EXPR. */
2787 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2789 /* Leave debug stmts alone. If we succeed in propagating
2790 all non-debug uses, we'll drop the DEF, and propagation
2791 into debug stmts will occur then. */
2792 if (gimple_debug_bind_p (use_stmt))
2793 continue;
2795 /* It's not always safe to propagate into an ASM_EXPR. */
2796 if (gimple_code (use_stmt) == GIMPLE_ASM
2797 && ! may_propagate_copy_into_asm (lhs))
2799 all = false;
2800 continue;
2803 /* It's not ok to propagate into the definition stmt of RHS.
2804 <bb 9>:
2805 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2806 g_67.1_6 = prephitmp.12_36;
2807 goto <bb 9>;
2808 While this is strictly all dead code we do not want to
2809 deal with this here. */
2810 if (TREE_CODE (rhs) == SSA_NAME
2811 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2813 all = false;
2814 continue;
2817 /* Dump details. */
2818 if (dump_file && (dump_flags & TDF_DETAILS))
2820 fprintf (dump_file, " Original statement:");
2821 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2824 /* Propagate the RHS into this use of the LHS. */
2825 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2826 propagate_value (use_p, rhs);
2828 /* Special cases to avoid useless calls into the folding
2829 routines, operand scanning, etc.
2831 Propagation into a PHI may cause the PHI to become
2832 a degenerate, so mark the PHI as interesting. No other
2833 actions are necessary. */
2834 if (gimple_code (use_stmt) == GIMPLE_PHI)
2836 tree result;
2838 /* Dump details. */
2839 if (dump_file && (dump_flags & TDF_DETAILS))
2841 fprintf (dump_file, " Updated statement:");
2842 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2845 result = get_lhs_or_phi_result (use_stmt);
2846 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2847 continue;
2850 /* From this point onward we are propagating into a
2851 real statement. Folding may (or may not) be possible,
2852 we may expose new operands, expose dead EH edges,
2853 etc. */
2854 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2855 cannot fold a call that simplifies to a constant,
2856 because the GIMPLE_CALL must be replaced by a
2857 GIMPLE_ASSIGN, and there is no way to effect such a
2858 transformation in-place. We might want to consider
2859 using the more general fold_stmt here. */
2861 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2862 fold_stmt_inplace (&gsi);
2865 /* Sometimes propagation can expose new operands to the
2866 renamer. */
2867 update_stmt (use_stmt);
2869 /* Dump details. */
2870 if (dump_file && (dump_flags & TDF_DETAILS))
2872 fprintf (dump_file, " Updated statement:");
2873 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2876 /* If we replaced a variable index with a constant, then
2877 we would need to update the invariant flag for ADDR_EXPRs. */
2878 if (gimple_assign_single_p (use_stmt)
2879 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2880 recompute_tree_invariant_for_addr_expr
2881 (gimple_assign_rhs1 (use_stmt));
2883 /* If we cleaned up EH information from the statement,
2884 mark its containing block as needing EH cleanups. */
2885 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2887 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2888 if (dump_file && (dump_flags & TDF_DETAILS))
2889 fprintf (dump_file, " Flagged to clear EH edges.\n");
2892 /* Propagation may expose new trivial copy/constant propagation
2893 opportunities. */
2894 if (gimple_assign_single_p (use_stmt)
2895 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2896 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2897 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2899 tree result = get_lhs_or_phi_result (use_stmt);
2900 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2903 /* Propagation into these nodes may make certain edges in
2904 the CFG unexecutable. We want to identify them as PHI nodes
2905 at the destination of those unexecutable edges may become
2906 degenerates. */
2907 else if (gimple_code (use_stmt) == GIMPLE_COND
2908 || gimple_code (use_stmt) == GIMPLE_SWITCH
2909 || gimple_code (use_stmt) == GIMPLE_GOTO)
2911 tree val;
2913 if (gimple_code (use_stmt) == GIMPLE_COND)
2914 val = fold_binary_loc (gimple_location (use_stmt),
2915 gimple_cond_code (use_stmt),
2916 boolean_type_node,
2917 gimple_cond_lhs (use_stmt),
2918 gimple_cond_rhs (use_stmt));
2919 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2920 val = gimple_switch_index (as_a <gswitch *> (use_stmt));
2921 else
2922 val = gimple_goto_dest (use_stmt);
2924 if (val && is_gimple_min_invariant (val))
2926 basic_block bb = gimple_bb (use_stmt);
2927 edge te = find_taken_edge (bb, val);
2928 if (!te)
2929 continue;
2931 edge_iterator ei;
2932 edge e;
2933 gimple_stmt_iterator gsi;
2934 gphi_iterator psi;
2936 /* Remove all outgoing edges except TE. */
2937 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2939 if (e != te)
2941 /* Mark all the PHI nodes at the destination of
2942 the unexecutable edge as interesting. */
2943 for (psi = gsi_start_phis (e->dest);
2944 !gsi_end_p (psi);
2945 gsi_next (&psi))
2947 gphi *phi = psi.phi ();
2949 tree result = gimple_phi_result (phi);
2950 int version = SSA_NAME_VERSION (result);
2952 bitmap_set_bit (interesting_names, version);
2955 te->probability += e->probability;
2957 te->count += e->count;
2958 remove_edge (e);
2959 cfg_altered = true;
2961 else
2962 ei_next (&ei);
2965 gsi = gsi_last_bb (gimple_bb (use_stmt));
2966 gsi_remove (&gsi, true);
2968 /* And fixup the flags on the single remaining edge. */
2969 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2970 te->flags &= ~EDGE_ABNORMAL;
2971 te->flags |= EDGE_FALLTHRU;
2972 if (te->probability > REG_BR_PROB_BASE)
2973 te->probability = REG_BR_PROB_BASE;
2978 /* Ensure there is nothing else to do. */
2979 gcc_assert (!all || has_zero_uses (lhs));
2981 /* If we were able to propagate away all uses of LHS, then
2982 we can remove STMT. */
2983 if (all)
2984 remove_stmt_or_phi (stmt);
2988 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2989 a statement that is a trivial copy or constant initialization.
2991 Attempt to eliminate T by propagating its RHS into all uses of
2992 its LHS. This may in turn set new bits in INTERESTING_NAMES
2993 for nodes we want to revisit later.
2995 All exit paths should clear INTERESTING_NAMES for the result
2996 of STMT. */
2998 static void
2999 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
3001 tree lhs = get_lhs_or_phi_result (stmt);
3002 tree rhs;
3003 int version = SSA_NAME_VERSION (lhs);
3005 /* If the LHS of this statement or PHI has no uses, then we can
3006 just eliminate it. This can occur if, for example, the PHI
3007 was created by block duplication due to threading and its only
3008 use was in the conditional at the end of the block which was
3009 deleted. */
3010 if (has_zero_uses (lhs))
3012 bitmap_clear_bit (interesting_names, version);
3013 remove_stmt_or_phi (stmt);
3014 return;
3017 /* Get the RHS of the assignment or PHI node if the PHI is a
3018 degenerate. */
3019 rhs = get_rhs_or_phi_arg (stmt);
3020 if (!rhs)
3022 bitmap_clear_bit (interesting_names, version);
3023 return;
3026 if (!virtual_operand_p (lhs))
3027 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
3028 else
3030 gimple use_stmt;
3031 imm_use_iterator iter;
3032 use_operand_p use_p;
3033 /* For virtual operands we have to propagate into all uses as
3034 otherwise we will create overlapping life-ranges. */
3035 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3036 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3037 SET_USE (use_p, rhs);
3038 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
3039 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
3040 remove_stmt_or_phi (stmt);
3043 /* Note that STMT may well have been deleted by now, so do
3044 not access it, instead use the saved version # to clear
3045 T's entry in the worklist. */
3046 bitmap_clear_bit (interesting_names, version);
3049 /* The first phase in degenerate PHI elimination.
3051 Eliminate the degenerate PHIs in BB, then recurse on the
3052 dominator children of BB. */
3054 static void
3055 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
3057 gphi_iterator gsi;
3058 basic_block son;
3060 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3062 gphi *phi = gsi.phi ();
3064 eliminate_const_or_copy (phi, interesting_names);
3067 /* Recurse into the dominator children of BB. */
3068 for (son = first_dom_son (CDI_DOMINATORS, bb);
3069 son;
3070 son = next_dom_son (CDI_DOMINATORS, son))
3071 eliminate_degenerate_phis_1 (son, interesting_names);
3075 /* A very simple pass to eliminate degenerate PHI nodes from the
3076 IL. This is meant to be fast enough to be able to be run several
3077 times in the optimization pipeline.
3079 Certain optimizations, particularly those which duplicate blocks
3080 or remove edges from the CFG can create or expose PHIs which are
3081 trivial copies or constant initializations.
3083 While we could pick up these optimizations in DOM or with the
3084 combination of copy-prop and CCP, those solutions are far too
3085 heavy-weight for our needs.
3087 This implementation has two phases so that we can efficiently
3088 eliminate the first order degenerate PHIs and second order
3089 degenerate PHIs.
3091 The first phase performs a dominator walk to identify and eliminate
3092 the vast majority of the degenerate PHIs. When a degenerate PHI
3093 is identified and eliminated any affected statements or PHIs
3094 are put on a worklist.
3096 The second phase eliminates degenerate PHIs and trivial copies
3097 or constant initializations using the worklist. This is how we
3098 pick up the secondary optimization opportunities with minimal
3099 cost. */
3101 namespace {
3103 const pass_data pass_data_phi_only_cprop =
3105 GIMPLE_PASS, /* type */
3106 "phicprop", /* name */
3107 OPTGROUP_NONE, /* optinfo_flags */
3108 TV_TREE_PHI_CPROP, /* tv_id */
3109 ( PROP_cfg | PROP_ssa ), /* properties_required */
3110 0, /* properties_provided */
3111 0, /* properties_destroyed */
3112 0, /* todo_flags_start */
3113 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3116 class pass_phi_only_cprop : public gimple_opt_pass
3118 public:
3119 pass_phi_only_cprop (gcc::context *ctxt)
3120 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3123 /* opt_pass methods: */
3124 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3125 virtual bool gate (function *) { return flag_tree_dom != 0; }
3126 virtual unsigned int execute (function *);
3128 }; // class pass_phi_only_cprop
3130 unsigned int
3131 pass_phi_only_cprop::execute (function *fun)
3133 bitmap interesting_names;
3134 bitmap interesting_names1;
3136 /* Bitmap of blocks which need EH information updated. We can not
3137 update it on-the-fly as doing so invalidates the dominator tree. */
3138 need_eh_cleanup = BITMAP_ALLOC (NULL);
3140 /* INTERESTING_NAMES is effectively our worklist, indexed by
3141 SSA_NAME_VERSION.
3143 A set bit indicates that the statement or PHI node which
3144 defines the SSA_NAME should be (re)examined to determine if
3145 it has become a degenerate PHI or trivial const/copy propagation
3146 opportunity.
3148 Experiments have show we generally get better compilation
3149 time behavior with bitmaps rather than sbitmaps. */
3150 interesting_names = BITMAP_ALLOC (NULL);
3151 interesting_names1 = BITMAP_ALLOC (NULL);
3153 calculate_dominance_info (CDI_DOMINATORS);
3154 cfg_altered = false;
3156 /* First phase. Eliminate degenerate PHIs via a dominator
3157 walk of the CFG.
3159 Experiments have indicated that we generally get better
3160 compile-time behavior by visiting blocks in the first
3161 phase in dominator order. Presumably this is because walking
3162 in dominator order leaves fewer PHIs for later examination
3163 by the worklist phase. */
3164 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3165 interesting_names);
3167 /* Second phase. Eliminate second order degenerate PHIs as well
3168 as trivial copies or constant initializations identified by
3169 the first phase or this phase. Basically we keep iterating
3170 until our set of INTERESTING_NAMEs is empty. */
3171 while (!bitmap_empty_p (interesting_names))
3173 unsigned int i;
3174 bitmap_iterator bi;
3176 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3177 changed during the loop. Copy it to another bitmap and
3178 use that. */
3179 bitmap_copy (interesting_names1, interesting_names);
3181 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3183 tree name = ssa_name (i);
3185 /* Ignore SSA_NAMEs that have been released because
3186 their defining statement was deleted (unreachable). */
3187 if (name)
3188 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3189 interesting_names);
3193 if (cfg_altered)
3195 free_dominance_info (CDI_DOMINATORS);
3196 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3197 loops_state_set (LOOPS_NEED_FIXUP);
3200 /* Propagation of const and copies may make some EH edges dead. Purge
3201 such edges from the CFG as needed. */
3202 if (!bitmap_empty_p (need_eh_cleanup))
3204 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3205 BITMAP_FREE (need_eh_cleanup);
3208 BITMAP_FREE (interesting_names);
3209 BITMAP_FREE (interesting_names1);
3210 return 0;
3213 } // anon namespace
3215 gimple_opt_pass *
3216 make_pass_phi_only_cprop (gcc::context *ctxt)
3218 return new pass_phi_only_cprop (ctxt);