PR c++/65727
[official-gcc.git] / gcc / tree-ssa-dom.c
blob907fa970775c02603b6f27c9625a8cf9f28bd637
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "hash-table.h"
25 #include "tm.h"
26 #include "hash-set.h"
27 #include "machmode.h"
28 #include "vec.h"
29 #include "double-int.h"
30 #include "input.h"
31 #include "alias.h"
32 #include "symtab.h"
33 #include "wide-int.h"
34 #include "inchash.h"
35 #include "real.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "flags.h"
40 #include "tm_p.h"
41 #include "predict.h"
42 #include "hard-reg-set.h"
43 #include "input.h"
44 #include "function.h"
45 #include "dominance.h"
46 #include "cfg.h"
47 #include "cfganal.h"
48 #include "basic-block.h"
49 #include "cfgloop.h"
50 #include "inchash.h"
51 #include "gimple-pretty-print.h"
52 #include "tree-ssa-alias.h"
53 #include "internal-fn.h"
54 #include "gimple-fold.h"
55 #include "tree-eh.h"
56 #include "gimple-expr.h"
57 #include "is-a.h"
58 #include "gimple.h"
59 #include "gimple-iterator.h"
60 #include "gimple-ssa.h"
61 #include "tree-cfg.h"
62 #include "tree-phinodes.h"
63 #include "ssa-iterators.h"
64 #include "stringpool.h"
65 #include "tree-ssanames.h"
66 #include "tree-into-ssa.h"
67 #include "domwalk.h"
68 #include "tree-pass.h"
69 #include "tree-ssa-propagate.h"
70 #include "tree-ssa-threadupdate.h"
71 #include "langhooks.h"
72 #include "params.h"
73 #include "tree-ssa-threadedge.h"
74 #include "tree-ssa-dom.h"
75 #include "inchash.h"
76 #include "gimplify.h"
77 #include "tree-cfgcleanup.h"
79 /* This file implements optimizations on the dominator tree. */
81 /* Representation of a "naked" right-hand-side expression, to be used
82 in recording available expressions in the expression hash table. */
84 enum expr_kind
86 EXPR_SINGLE,
87 EXPR_UNARY,
88 EXPR_BINARY,
89 EXPR_TERNARY,
90 EXPR_CALL,
91 EXPR_PHI
94 struct hashable_expr
96 tree type;
97 enum expr_kind kind;
98 union {
99 struct { tree rhs; } single;
100 struct { enum tree_code op; tree opnd; } unary;
101 struct { enum tree_code op; tree opnd0, opnd1; } binary;
102 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
103 struct { gcall *fn_from; bool pure; size_t nargs; tree *args; } call;
104 struct { size_t nargs; tree *args; } phi;
105 } ops;
108 /* Structure for recording known values of a conditional expression
109 at the exits from its block. */
111 typedef struct cond_equivalence_s
113 struct hashable_expr cond;
114 tree value;
115 } cond_equivalence;
118 /* Structure for recording edge equivalences as well as any pending
119 edge redirections during the dominator optimizer.
121 Computing and storing the edge equivalences instead of creating
122 them on-demand can save significant amounts of time, particularly
123 for pathological cases involving switch statements.
125 These structures live for a single iteration of the dominator
126 optimizer in the edge's AUX field. At the end of an iteration we
127 free each of these structures and update the AUX field to point
128 to any requested redirection target (the code for updating the
129 CFG and SSA graph for edge redirection expects redirection edge
130 targets to be in the AUX field for each edge. */
132 struct edge_info
134 /* If this edge creates a simple equivalence, the LHS and RHS of
135 the equivalence will be stored here. */
136 tree lhs;
137 tree rhs;
139 /* Traversing an edge may also indicate one or more particular conditions
140 are true or false. */
141 vec<cond_equivalence> cond_equivalences;
144 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
145 expressions it enters into the hash table along with a marker entry
146 (null). When we finish processing the block, we pop off entries and
147 remove the expressions from the global hash table until we hit the
148 marker. */
149 typedef struct expr_hash_elt * expr_hash_elt_t;
151 static vec<std::pair<expr_hash_elt_t, expr_hash_elt_t> > avail_exprs_stack;
153 /* Structure for entries in the expression hash table. */
155 struct expr_hash_elt
157 /* The value (lhs) of this expression. */
158 tree lhs;
160 /* The expression (rhs) we want to record. */
161 struct hashable_expr expr;
163 /* The virtual operand associated with the nearest dominating stmt
164 loading from or storing to expr. */
165 tree vop;
167 /* The hash value for RHS. */
168 hashval_t hash;
170 /* A unique stamp, typically the address of the hash
171 element itself, used in removing entries from the table. */
172 struct expr_hash_elt *stamp;
175 /* Hashtable helpers. */
177 static bool hashable_expr_equal_p (const struct hashable_expr *,
178 const struct hashable_expr *);
179 static void free_expr_hash_elt (void *);
181 struct expr_elt_hasher
183 typedef expr_hash_elt *value_type;
184 typedef expr_hash_elt *compare_type;
185 typedef int store_values_directly;
186 static inline hashval_t hash (const value_type &);
187 static inline bool equal (const value_type &, const compare_type &);
188 static inline void remove (value_type &);
191 inline hashval_t
192 expr_elt_hasher::hash (const value_type &p)
194 return p->hash;
197 inline bool
198 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
200 const struct hashable_expr *expr1 = &p1->expr;
201 const struct expr_hash_elt *stamp1 = p1->stamp;
202 const struct hashable_expr *expr2 = &p2->expr;
203 const struct expr_hash_elt *stamp2 = p2->stamp;
205 /* This case should apply only when removing entries from the table. */
206 if (stamp1 == stamp2)
207 return true;
209 if (p1->hash != p2->hash)
210 return false;
212 /* In case of a collision, both RHS have to be identical and have the
213 same VUSE operands. */
214 if (hashable_expr_equal_p (expr1, expr2)
215 && types_compatible_p (expr1->type, expr2->type))
216 return true;
218 return false;
221 /* Delete an expr_hash_elt and reclaim its storage. */
223 inline void
224 expr_elt_hasher::remove (value_type &element)
226 free_expr_hash_elt (element);
229 /* Hash table with expressions made available during the renaming process.
230 When an assignment of the form X_i = EXPR is found, the statement is
231 stored in this table. If the same expression EXPR is later found on the
232 RHS of another statement, it is replaced with X_i (thus performing
233 global redundancy elimination). Similarly as we pass through conditionals
234 we record the conditional itself as having either a true or false value
235 in this table. */
236 static hash_table<expr_elt_hasher> *avail_exprs;
238 /* Stack of dest,src pairs that need to be restored during finalization.
240 A NULL entry is used to mark the end of pairs which need to be
241 restored during finalization of this block. */
242 static vec<tree> const_and_copies_stack;
244 /* Track whether or not we have changed the control flow graph. */
245 static bool cfg_altered;
247 /* Bitmap of blocks that have had EH statements cleaned. We should
248 remove their dead edges eventually. */
249 static bitmap need_eh_cleanup;
250 static vec<gimple> need_noreturn_fixup;
252 /* Statistics for dominator optimizations. */
253 struct opt_stats_d
255 long num_stmts;
256 long num_exprs_considered;
257 long num_re;
258 long num_const_prop;
259 long num_copy_prop;
262 static struct opt_stats_d opt_stats;
264 /* Local functions. */
265 static void optimize_stmt (basic_block, gimple_stmt_iterator);
266 static tree lookup_avail_expr (gimple, bool);
267 static hashval_t avail_expr_hash (const void *);
268 static void htab_statistics (FILE *,
269 const hash_table<expr_elt_hasher> &);
270 static void record_cond (cond_equivalence *);
271 static void record_const_or_copy (tree, tree);
272 static void record_equality (tree, tree);
273 static void record_equivalences_from_phis (basic_block);
274 static void record_equivalences_from_incoming_edge (basic_block);
275 static void eliminate_redundant_computations (gimple_stmt_iterator *);
276 static void record_equivalences_from_stmt (gimple, int);
277 static void remove_local_expressions_from_table (void);
278 static void restore_vars_to_original_value (void);
279 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
282 /* Given a statement STMT, initialize the hash table element pointed to
283 by ELEMENT. */
285 static void
286 initialize_hash_element (gimple stmt, tree lhs,
287 struct expr_hash_elt *element)
289 enum gimple_code code = gimple_code (stmt);
290 struct hashable_expr *expr = &element->expr;
292 if (code == GIMPLE_ASSIGN)
294 enum tree_code subcode = gimple_assign_rhs_code (stmt);
296 switch (get_gimple_rhs_class (subcode))
298 case GIMPLE_SINGLE_RHS:
299 expr->kind = EXPR_SINGLE;
300 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
301 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
302 break;
303 case GIMPLE_UNARY_RHS:
304 expr->kind = EXPR_UNARY;
305 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
306 if (CONVERT_EXPR_CODE_P (subcode))
307 subcode = NOP_EXPR;
308 expr->ops.unary.op = subcode;
309 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
310 break;
311 case GIMPLE_BINARY_RHS:
312 expr->kind = EXPR_BINARY;
313 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
314 expr->ops.binary.op = subcode;
315 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
316 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
317 break;
318 case GIMPLE_TERNARY_RHS:
319 expr->kind = EXPR_TERNARY;
320 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
321 expr->ops.ternary.op = subcode;
322 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
323 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
324 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
325 break;
326 default:
327 gcc_unreachable ();
330 else if (code == GIMPLE_COND)
332 expr->type = boolean_type_node;
333 expr->kind = EXPR_BINARY;
334 expr->ops.binary.op = gimple_cond_code (stmt);
335 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
336 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
338 else if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
340 size_t nargs = gimple_call_num_args (call_stmt);
341 size_t i;
343 gcc_assert (gimple_call_lhs (call_stmt));
345 expr->type = TREE_TYPE (gimple_call_lhs (call_stmt));
346 expr->kind = EXPR_CALL;
347 expr->ops.call.fn_from = call_stmt;
349 if (gimple_call_flags (call_stmt) & (ECF_CONST | ECF_PURE))
350 expr->ops.call.pure = true;
351 else
352 expr->ops.call.pure = false;
354 expr->ops.call.nargs = nargs;
355 expr->ops.call.args = XCNEWVEC (tree, nargs);
356 for (i = 0; i < nargs; i++)
357 expr->ops.call.args[i] = gimple_call_arg (call_stmt, i);
359 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
361 expr->type = TREE_TYPE (gimple_switch_index (swtch_stmt));
362 expr->kind = EXPR_SINGLE;
363 expr->ops.single.rhs = gimple_switch_index (swtch_stmt);
365 else if (code == GIMPLE_GOTO)
367 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
368 expr->kind = EXPR_SINGLE;
369 expr->ops.single.rhs = gimple_goto_dest (stmt);
371 else if (code == GIMPLE_PHI)
373 size_t nargs = gimple_phi_num_args (stmt);
374 size_t i;
376 expr->type = TREE_TYPE (gimple_phi_result (stmt));
377 expr->kind = EXPR_PHI;
378 expr->ops.phi.nargs = nargs;
379 expr->ops.phi.args = XCNEWVEC (tree, nargs);
381 for (i = 0; i < nargs; i++)
382 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
384 else
385 gcc_unreachable ();
387 element->lhs = lhs;
388 element->vop = gimple_vuse (stmt);
389 element->hash = avail_expr_hash (element);
390 element->stamp = element;
393 /* Given a conditional expression COND as a tree, initialize
394 a hashable_expr expression EXPR. The conditional must be a
395 comparison or logical negation. A constant or a variable is
396 not permitted. */
398 static void
399 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
401 expr->type = boolean_type_node;
403 if (COMPARISON_CLASS_P (cond))
405 expr->kind = EXPR_BINARY;
406 expr->ops.binary.op = TREE_CODE (cond);
407 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
408 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
410 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
412 expr->kind = EXPR_UNARY;
413 expr->ops.unary.op = TRUTH_NOT_EXPR;
414 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
416 else
417 gcc_unreachable ();
420 /* Given a hashable_expr expression EXPR and an LHS,
421 initialize the hash table element pointed to by ELEMENT. */
423 static void
424 initialize_hash_element_from_expr (struct hashable_expr *expr,
425 tree lhs,
426 struct expr_hash_elt *element)
428 element->expr = *expr;
429 element->lhs = lhs;
430 element->vop = NULL_TREE;
431 element->hash = avail_expr_hash (element);
432 element->stamp = element;
435 /* Compare two hashable_expr structures for equivalence.
436 They are considered equivalent when the the expressions
437 they denote must necessarily be equal. The logic is intended
438 to follow that of operand_equal_p in fold-const.c */
440 static bool
441 hashable_expr_equal_p (const struct hashable_expr *expr0,
442 const struct hashable_expr *expr1)
444 tree type0 = expr0->type;
445 tree type1 = expr1->type;
447 /* If either type is NULL, there is nothing to check. */
448 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
449 return false;
451 /* If both types don't have the same signedness, precision, and mode,
452 then we can't consider them equal. */
453 if (type0 != type1
454 && (TREE_CODE (type0) == ERROR_MARK
455 || TREE_CODE (type1) == ERROR_MARK
456 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
457 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
458 || TYPE_MODE (type0) != TYPE_MODE (type1)))
459 return false;
461 if (expr0->kind != expr1->kind)
462 return false;
464 switch (expr0->kind)
466 case EXPR_SINGLE:
467 return operand_equal_p (expr0->ops.single.rhs,
468 expr1->ops.single.rhs, 0);
470 case EXPR_UNARY:
471 if (expr0->ops.unary.op != expr1->ops.unary.op)
472 return false;
474 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
475 || expr0->ops.unary.op == NON_LVALUE_EXPR)
476 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
477 return false;
479 return operand_equal_p (expr0->ops.unary.opnd,
480 expr1->ops.unary.opnd, 0);
482 case EXPR_BINARY:
483 if (expr0->ops.binary.op != expr1->ops.binary.op)
484 return false;
486 if (operand_equal_p (expr0->ops.binary.opnd0,
487 expr1->ops.binary.opnd0, 0)
488 && operand_equal_p (expr0->ops.binary.opnd1,
489 expr1->ops.binary.opnd1, 0))
490 return true;
492 /* For commutative ops, allow the other order. */
493 return (commutative_tree_code (expr0->ops.binary.op)
494 && operand_equal_p (expr0->ops.binary.opnd0,
495 expr1->ops.binary.opnd1, 0)
496 && operand_equal_p (expr0->ops.binary.opnd1,
497 expr1->ops.binary.opnd0, 0));
499 case EXPR_TERNARY:
500 if (expr0->ops.ternary.op != expr1->ops.ternary.op
501 || !operand_equal_p (expr0->ops.ternary.opnd2,
502 expr1->ops.ternary.opnd2, 0))
503 return false;
505 if (operand_equal_p (expr0->ops.ternary.opnd0,
506 expr1->ops.ternary.opnd0, 0)
507 && operand_equal_p (expr0->ops.ternary.opnd1,
508 expr1->ops.ternary.opnd1, 0))
509 return true;
511 /* For commutative ops, allow the other order. */
512 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
513 && operand_equal_p (expr0->ops.ternary.opnd0,
514 expr1->ops.ternary.opnd1, 0)
515 && operand_equal_p (expr0->ops.ternary.opnd1,
516 expr1->ops.ternary.opnd0, 0));
518 case EXPR_CALL:
520 size_t i;
522 /* If the calls are to different functions, then they
523 clearly cannot be equal. */
524 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
525 expr1->ops.call.fn_from))
526 return false;
528 if (! expr0->ops.call.pure)
529 return false;
531 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
532 return false;
534 for (i = 0; i < expr0->ops.call.nargs; i++)
535 if (! operand_equal_p (expr0->ops.call.args[i],
536 expr1->ops.call.args[i], 0))
537 return false;
539 if (stmt_could_throw_p (expr0->ops.call.fn_from))
541 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
542 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
543 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
544 return false;
547 return true;
550 case EXPR_PHI:
552 size_t i;
554 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
555 return false;
557 for (i = 0; i < expr0->ops.phi.nargs; i++)
558 if (! operand_equal_p (expr0->ops.phi.args[i],
559 expr1->ops.phi.args[i], 0))
560 return false;
562 return true;
565 default:
566 gcc_unreachable ();
570 /* Generate a hash value for a pair of expressions. This can be used
571 iteratively by passing a previous result in HSTATE.
573 The same hash value is always returned for a given pair of expressions,
574 regardless of the order in which they are presented. This is useful in
575 hashing the operands of commutative functions. */
577 namespace inchash
580 static void
581 add_expr_commutative (const_tree t1, const_tree t2, hash &hstate)
583 hash one, two;
585 inchash::add_expr (t1, one);
586 inchash::add_expr (t2, two);
587 hstate.add_commutative (one, two);
590 /* Compute a hash value for a hashable_expr value EXPR and a
591 previously accumulated hash value VAL. If two hashable_expr
592 values compare equal with hashable_expr_equal_p, they must
593 hash to the same value, given an identical value of VAL.
594 The logic is intended to follow inchash::add_expr in tree.c. */
596 static void
597 add_hashable_expr (const struct hashable_expr *expr, hash &hstate)
599 switch (expr->kind)
601 case EXPR_SINGLE:
602 inchash::add_expr (expr->ops.single.rhs, hstate);
603 break;
605 case EXPR_UNARY:
606 hstate.add_object (expr->ops.unary.op);
608 /* Make sure to include signedness in the hash computation.
609 Don't hash the type, that can lead to having nodes which
610 compare equal according to operand_equal_p, but which
611 have different hash codes. */
612 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
613 || expr->ops.unary.op == NON_LVALUE_EXPR)
614 hstate.add_int (TYPE_UNSIGNED (expr->type));
616 inchash::add_expr (expr->ops.unary.opnd, hstate);
617 break;
619 case EXPR_BINARY:
620 hstate.add_object (expr->ops.binary.op);
621 if (commutative_tree_code (expr->ops.binary.op))
622 inchash::add_expr_commutative (expr->ops.binary.opnd0,
623 expr->ops.binary.opnd1, hstate);
624 else
626 inchash::add_expr (expr->ops.binary.opnd0, hstate);
627 inchash::add_expr (expr->ops.binary.opnd1, hstate);
629 break;
631 case EXPR_TERNARY:
632 hstate.add_object (expr->ops.ternary.op);
633 if (commutative_ternary_tree_code (expr->ops.ternary.op))
634 inchash::add_expr_commutative (expr->ops.ternary.opnd0,
635 expr->ops.ternary.opnd1, hstate);
636 else
638 inchash::add_expr (expr->ops.ternary.opnd0, hstate);
639 inchash::add_expr (expr->ops.ternary.opnd1, hstate);
641 inchash::add_expr (expr->ops.ternary.opnd2, hstate);
642 break;
644 case EXPR_CALL:
646 size_t i;
647 enum tree_code code = CALL_EXPR;
648 gcall *fn_from;
650 hstate.add_object (code);
651 fn_from = expr->ops.call.fn_from;
652 if (gimple_call_internal_p (fn_from))
653 hstate.merge_hash ((hashval_t) gimple_call_internal_fn (fn_from));
654 else
655 inchash::add_expr (gimple_call_fn (fn_from), hstate);
656 for (i = 0; i < expr->ops.call.nargs; i++)
657 inchash::add_expr (expr->ops.call.args[i], hstate);
659 break;
661 case EXPR_PHI:
663 size_t i;
665 for (i = 0; i < expr->ops.phi.nargs; i++)
666 inchash::add_expr (expr->ops.phi.args[i], hstate);
668 break;
670 default:
671 gcc_unreachable ();
677 /* Print a diagnostic dump of an expression hash table entry. */
679 static void
680 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
682 fprintf (stream, "STMT ");
684 if (element->lhs)
686 print_generic_expr (stream, element->lhs, 0);
687 fprintf (stream, " = ");
690 switch (element->expr.kind)
692 case EXPR_SINGLE:
693 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
694 break;
696 case EXPR_UNARY:
697 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
698 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
699 break;
701 case EXPR_BINARY:
702 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
703 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
704 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
705 break;
707 case EXPR_TERNARY:
708 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
709 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
710 fputs (", ", stream);
711 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
712 fputs (", ", stream);
713 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
714 fputs (">", stream);
715 break;
717 case EXPR_CALL:
719 size_t i;
720 size_t nargs = element->expr.ops.call.nargs;
721 gcall *fn_from;
723 fn_from = element->expr.ops.call.fn_from;
724 if (gimple_call_internal_p (fn_from))
725 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
726 stream);
727 else
728 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
729 fprintf (stream, " (");
730 for (i = 0; i < nargs; i++)
732 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
733 if (i + 1 < nargs)
734 fprintf (stream, ", ");
736 fprintf (stream, ")");
738 break;
740 case EXPR_PHI:
742 size_t i;
743 size_t nargs = element->expr.ops.phi.nargs;
745 fprintf (stream, "PHI <");
746 for (i = 0; i < nargs; i++)
748 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
749 if (i + 1 < nargs)
750 fprintf (stream, ", ");
752 fprintf (stream, ">");
754 break;
757 if (element->vop)
759 fprintf (stream, " with ");
760 print_generic_expr (stream, element->vop, 0);
763 fprintf (stream, "\n");
766 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
768 static void
769 free_expr_hash_elt_contents (struct expr_hash_elt *element)
771 if (element->expr.kind == EXPR_CALL)
772 free (element->expr.ops.call.args);
773 else if (element->expr.kind == EXPR_PHI)
774 free (element->expr.ops.phi.args);
777 /* Delete an expr_hash_elt and reclaim its storage. */
779 static void
780 free_expr_hash_elt (void *elt)
782 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
783 free_expr_hash_elt_contents (element);
784 free (element);
787 /* Allocate an EDGE_INFO for edge E and attach it to E.
788 Return the new EDGE_INFO structure. */
790 static struct edge_info *
791 allocate_edge_info (edge e)
793 struct edge_info *edge_info;
795 edge_info = XCNEW (struct edge_info);
797 e->aux = edge_info;
798 return edge_info;
801 /* Free all EDGE_INFO structures associated with edges in the CFG.
802 If a particular edge can be threaded, copy the redirection
803 target from the EDGE_INFO structure into the edge's AUX field
804 as required by code to update the CFG and SSA graph for
805 jump threading. */
807 static void
808 free_all_edge_infos (void)
810 basic_block bb;
811 edge_iterator ei;
812 edge e;
814 FOR_EACH_BB_FN (bb, cfun)
816 FOR_EACH_EDGE (e, ei, bb->preds)
818 struct edge_info *edge_info = (struct edge_info *) e->aux;
820 if (edge_info)
822 edge_info->cond_equivalences.release ();
823 free (edge_info);
824 e->aux = NULL;
830 /* Build a cond_equivalence record indicating that the comparison
831 CODE holds between operands OP0 and OP1 and push it to **P. */
833 static void
834 build_and_record_new_cond (enum tree_code code,
835 tree op0, tree op1,
836 vec<cond_equivalence> *p)
838 cond_equivalence c;
839 struct hashable_expr *cond = &c.cond;
841 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
843 cond->type = boolean_type_node;
844 cond->kind = EXPR_BINARY;
845 cond->ops.binary.op = code;
846 cond->ops.binary.opnd0 = op0;
847 cond->ops.binary.opnd1 = op1;
849 c.value = boolean_true_node;
850 p->safe_push (c);
853 /* Record that COND is true and INVERTED is false into the edge information
854 structure. Also record that any conditions dominated by COND are true
855 as well.
857 For example, if a < b is true, then a <= b must also be true. */
859 static void
860 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
862 tree op0, op1;
863 cond_equivalence c;
865 if (!COMPARISON_CLASS_P (cond))
866 return;
868 op0 = TREE_OPERAND (cond, 0);
869 op1 = TREE_OPERAND (cond, 1);
871 switch (TREE_CODE (cond))
873 case LT_EXPR:
874 case GT_EXPR:
875 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
877 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
878 &edge_info->cond_equivalences);
879 build_and_record_new_cond (LTGT_EXPR, op0, op1,
880 &edge_info->cond_equivalences);
883 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
884 ? LE_EXPR : GE_EXPR),
885 op0, op1, &edge_info->cond_equivalences);
886 build_and_record_new_cond (NE_EXPR, op0, op1,
887 &edge_info->cond_equivalences);
888 break;
890 case GE_EXPR:
891 case LE_EXPR:
892 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
894 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
895 &edge_info->cond_equivalences);
897 break;
899 case EQ_EXPR:
900 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
902 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
903 &edge_info->cond_equivalences);
905 build_and_record_new_cond (LE_EXPR, op0, op1,
906 &edge_info->cond_equivalences);
907 build_and_record_new_cond (GE_EXPR, op0, op1,
908 &edge_info->cond_equivalences);
909 break;
911 case UNORDERED_EXPR:
912 build_and_record_new_cond (NE_EXPR, op0, op1,
913 &edge_info->cond_equivalences);
914 build_and_record_new_cond (UNLE_EXPR, op0, op1,
915 &edge_info->cond_equivalences);
916 build_and_record_new_cond (UNGE_EXPR, op0, op1,
917 &edge_info->cond_equivalences);
918 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
919 &edge_info->cond_equivalences);
920 build_and_record_new_cond (UNLT_EXPR, op0, op1,
921 &edge_info->cond_equivalences);
922 build_and_record_new_cond (UNGT_EXPR, op0, op1,
923 &edge_info->cond_equivalences);
924 break;
926 case UNLT_EXPR:
927 case UNGT_EXPR:
928 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
929 ? UNLE_EXPR : UNGE_EXPR),
930 op0, op1, &edge_info->cond_equivalences);
931 build_and_record_new_cond (NE_EXPR, op0, op1,
932 &edge_info->cond_equivalences);
933 break;
935 case UNEQ_EXPR:
936 build_and_record_new_cond (UNLE_EXPR, op0, op1,
937 &edge_info->cond_equivalences);
938 build_and_record_new_cond (UNGE_EXPR, op0, op1,
939 &edge_info->cond_equivalences);
940 break;
942 case LTGT_EXPR:
943 build_and_record_new_cond (NE_EXPR, op0, op1,
944 &edge_info->cond_equivalences);
945 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
946 &edge_info->cond_equivalences);
947 break;
949 default:
950 break;
953 /* Now store the original true and false conditions into the first
954 two slots. */
955 initialize_expr_from_cond (cond, &c.cond);
956 c.value = boolean_true_node;
957 edge_info->cond_equivalences.safe_push (c);
959 /* It is possible for INVERTED to be the negation of a comparison,
960 and not a valid RHS or GIMPLE_COND condition. This happens because
961 invert_truthvalue may return such an expression when asked to invert
962 a floating-point comparison. These comparisons are not assumed to
963 obey the trichotomy law. */
964 initialize_expr_from_cond (inverted, &c.cond);
965 c.value = boolean_false_node;
966 edge_info->cond_equivalences.safe_push (c);
969 /* We have finished optimizing BB, record any information implied by
970 taking a specific outgoing edge from BB. */
972 static void
973 record_edge_info (basic_block bb)
975 gimple_stmt_iterator gsi = gsi_last_bb (bb);
976 struct edge_info *edge_info;
978 if (! gsi_end_p (gsi))
980 gimple stmt = gsi_stmt (gsi);
981 location_t loc = gimple_location (stmt);
983 if (gimple_code (stmt) == GIMPLE_SWITCH)
985 gswitch *switch_stmt = as_a <gswitch *> (stmt);
986 tree index = gimple_switch_index (switch_stmt);
988 if (TREE_CODE (index) == SSA_NAME)
990 int i;
991 int n_labels = gimple_switch_num_labels (switch_stmt);
992 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
993 edge e;
994 edge_iterator ei;
996 for (i = 0; i < n_labels; i++)
998 tree label = gimple_switch_label (switch_stmt, i);
999 basic_block target_bb = label_to_block (CASE_LABEL (label));
1000 if (CASE_HIGH (label)
1001 || !CASE_LOW (label)
1002 || info[target_bb->index])
1003 info[target_bb->index] = error_mark_node;
1004 else
1005 info[target_bb->index] = label;
1008 FOR_EACH_EDGE (e, ei, bb->succs)
1010 basic_block target_bb = e->dest;
1011 tree label = info[target_bb->index];
1013 if (label != NULL && label != error_mark_node)
1015 tree x = fold_convert_loc (loc, TREE_TYPE (index),
1016 CASE_LOW (label));
1017 edge_info = allocate_edge_info (e);
1018 edge_info->lhs = index;
1019 edge_info->rhs = x;
1022 free (info);
1026 /* A COND_EXPR may create equivalences too. */
1027 if (gimple_code (stmt) == GIMPLE_COND)
1029 edge true_edge;
1030 edge false_edge;
1032 tree op0 = gimple_cond_lhs (stmt);
1033 tree op1 = gimple_cond_rhs (stmt);
1034 enum tree_code code = gimple_cond_code (stmt);
1036 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1038 /* Special case comparing booleans against a constant as we
1039 know the value of OP0 on both arms of the branch. i.e., we
1040 can record an equivalence for OP0 rather than COND. */
1041 if ((code == EQ_EXPR || code == NE_EXPR)
1042 && TREE_CODE (op0) == SSA_NAME
1043 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1044 && is_gimple_min_invariant (op1))
1046 if (code == EQ_EXPR)
1048 edge_info = allocate_edge_info (true_edge);
1049 edge_info->lhs = op0;
1050 edge_info->rhs = (integer_zerop (op1)
1051 ? boolean_false_node
1052 : boolean_true_node);
1054 edge_info = allocate_edge_info (false_edge);
1055 edge_info->lhs = op0;
1056 edge_info->rhs = (integer_zerop (op1)
1057 ? boolean_true_node
1058 : boolean_false_node);
1060 else
1062 edge_info = allocate_edge_info (true_edge);
1063 edge_info->lhs = op0;
1064 edge_info->rhs = (integer_zerop (op1)
1065 ? boolean_true_node
1066 : boolean_false_node);
1068 edge_info = allocate_edge_info (false_edge);
1069 edge_info->lhs = op0;
1070 edge_info->rhs = (integer_zerop (op1)
1071 ? boolean_false_node
1072 : boolean_true_node);
1075 else if (is_gimple_min_invariant (op0)
1076 && (TREE_CODE (op1) == SSA_NAME
1077 || is_gimple_min_invariant (op1)))
1079 tree cond = build2 (code, boolean_type_node, op0, op1);
1080 tree inverted = invert_truthvalue_loc (loc, cond);
1081 bool can_infer_simple_equiv
1082 = !(HONOR_SIGNED_ZEROS (op0)
1083 && real_zerop (op0));
1084 struct edge_info *edge_info;
1086 edge_info = allocate_edge_info (true_edge);
1087 record_conditions (edge_info, cond, inverted);
1089 if (can_infer_simple_equiv && code == EQ_EXPR)
1091 edge_info->lhs = op1;
1092 edge_info->rhs = op0;
1095 edge_info = allocate_edge_info (false_edge);
1096 record_conditions (edge_info, inverted, cond);
1098 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1100 edge_info->lhs = op1;
1101 edge_info->rhs = op0;
1105 else if (TREE_CODE (op0) == SSA_NAME
1106 && (TREE_CODE (op1) == SSA_NAME
1107 || is_gimple_min_invariant (op1)))
1109 tree cond = build2 (code, boolean_type_node, op0, op1);
1110 tree inverted = invert_truthvalue_loc (loc, cond);
1111 bool can_infer_simple_equiv
1112 = !(HONOR_SIGNED_ZEROS (op1)
1113 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1114 struct edge_info *edge_info;
1116 edge_info = allocate_edge_info (true_edge);
1117 record_conditions (edge_info, cond, inverted);
1119 if (can_infer_simple_equiv && code == EQ_EXPR)
1121 edge_info->lhs = op0;
1122 edge_info->rhs = op1;
1125 edge_info = allocate_edge_info (false_edge);
1126 record_conditions (edge_info, inverted, cond);
1128 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1130 edge_info->lhs = op0;
1131 edge_info->rhs = op1;
1136 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1141 class dom_opt_dom_walker : public dom_walker
1143 public:
1144 dom_opt_dom_walker (cdi_direction direction)
1145 : dom_walker (direction), m_dummy_cond (NULL) {}
1147 virtual void before_dom_children (basic_block);
1148 virtual void after_dom_children (basic_block);
1150 private:
1151 void thread_across_edge (edge);
1153 gcond *m_dummy_cond;
1156 /* Jump threading, redundancy elimination and const/copy propagation.
1158 This pass may expose new symbols that need to be renamed into SSA. For
1159 every new symbol exposed, its corresponding bit will be set in
1160 VARS_TO_RENAME. */
1162 namespace {
1164 const pass_data pass_data_dominator =
1166 GIMPLE_PASS, /* type */
1167 "dom", /* name */
1168 OPTGROUP_NONE, /* optinfo_flags */
1169 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
1170 ( PROP_cfg | PROP_ssa ), /* properties_required */
1171 0, /* properties_provided */
1172 0, /* properties_destroyed */
1173 0, /* todo_flags_start */
1174 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
1177 class pass_dominator : public gimple_opt_pass
1179 public:
1180 pass_dominator (gcc::context *ctxt)
1181 : gimple_opt_pass (pass_data_dominator, ctxt)
1184 /* opt_pass methods: */
1185 opt_pass * clone () { return new pass_dominator (m_ctxt); }
1186 virtual bool gate (function *) { return flag_tree_dom != 0; }
1187 virtual unsigned int execute (function *);
1189 }; // class pass_dominator
1191 unsigned int
1192 pass_dominator::execute (function *fun)
1194 memset (&opt_stats, 0, sizeof (opt_stats));
1196 /* Create our hash tables. */
1197 avail_exprs = new hash_table<expr_elt_hasher> (1024);
1198 avail_exprs_stack.create (20);
1199 const_and_copies_stack.create (20);
1200 need_eh_cleanup = BITMAP_ALLOC (NULL);
1201 need_noreturn_fixup.create (0);
1203 calculate_dominance_info (CDI_DOMINATORS);
1204 cfg_altered = false;
1206 /* We need to know loop structures in order to avoid destroying them
1207 in jump threading. Note that we still can e.g. thread through loop
1208 headers to an exit edge, or through loop header to the loop body, assuming
1209 that we update the loop info.
1211 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
1212 to several overly conservative bail-outs in jump threading, case
1213 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
1214 missing. We should improve jump threading in future then
1215 LOOPS_HAVE_PREHEADERS won't be needed here. */
1216 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
1218 /* Initialize the value-handle array. */
1219 threadedge_initialize_values ();
1221 /* We need accurate information regarding back edges in the CFG
1222 for jump threading; this may include back edges that are not part of
1223 a single loop. */
1224 mark_dfs_back_edges ();
1226 /* Recursively walk the dominator tree optimizing statements. */
1227 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
1230 gimple_stmt_iterator gsi;
1231 basic_block bb;
1232 FOR_EACH_BB_FN (bb, fun)
1234 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1235 update_stmt_if_modified (gsi_stmt (gsi));
1239 /* If we exposed any new variables, go ahead and put them into
1240 SSA form now, before we handle jump threading. This simplifies
1241 interactions between rewriting of _DECL nodes into SSA form
1242 and rewriting SSA_NAME nodes into SSA form after block
1243 duplication and CFG manipulation. */
1244 update_ssa (TODO_update_ssa);
1246 free_all_edge_infos ();
1248 /* Thread jumps, creating duplicate blocks as needed. */
1249 cfg_altered |= thread_through_all_blocks (first_pass_instance);
1251 if (cfg_altered)
1252 free_dominance_info (CDI_DOMINATORS);
1254 /* Removal of statements may make some EH edges dead. Purge
1255 such edges from the CFG as needed. */
1256 if (!bitmap_empty_p (need_eh_cleanup))
1258 unsigned i;
1259 bitmap_iterator bi;
1261 /* Jump threading may have created forwarder blocks from blocks
1262 needing EH cleanup; the new successor of these blocks, which
1263 has inherited from the original block, needs the cleanup.
1264 Don't clear bits in the bitmap, as that can break the bitmap
1265 iterator. */
1266 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
1268 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
1269 if (bb == NULL)
1270 continue;
1271 while (single_succ_p (bb)
1272 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
1273 bb = single_succ (bb);
1274 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
1275 continue;
1276 if ((unsigned) bb->index != i)
1277 bitmap_set_bit (need_eh_cleanup, bb->index);
1280 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
1281 bitmap_clear (need_eh_cleanup);
1284 /* Fixup stmts that became noreturn calls. This may require splitting
1285 blocks and thus isn't possible during the dominator walk or before
1286 jump threading finished. Do this in reverse order so we don't
1287 inadvertedly remove a stmt we want to fixup by visiting a dominating
1288 now noreturn call first. */
1289 while (!need_noreturn_fixup.is_empty ())
1291 gimple stmt = need_noreturn_fixup.pop ();
1292 if (dump_file && dump_flags & TDF_DETAILS)
1294 fprintf (dump_file, "Fixing up noreturn call ");
1295 print_gimple_stmt (dump_file, stmt, 0, 0);
1296 fprintf (dump_file, "\n");
1298 fixup_noreturn_call (stmt);
1301 statistics_counter_event (fun, "Redundant expressions eliminated",
1302 opt_stats.num_re);
1303 statistics_counter_event (fun, "Constants propagated",
1304 opt_stats.num_const_prop);
1305 statistics_counter_event (fun, "Copies propagated",
1306 opt_stats.num_copy_prop);
1308 /* Debugging dumps. */
1309 if (dump_file && (dump_flags & TDF_STATS))
1310 dump_dominator_optimization_stats (dump_file);
1312 loop_optimizer_finalize ();
1314 /* Delete our main hashtable. */
1315 delete avail_exprs;
1316 avail_exprs = NULL;
1318 /* Free asserted bitmaps and stacks. */
1319 BITMAP_FREE (need_eh_cleanup);
1320 need_noreturn_fixup.release ();
1321 avail_exprs_stack.release ();
1322 const_and_copies_stack.release ();
1324 /* Free the value-handle array. */
1325 threadedge_finalize_values ();
1327 return 0;
1330 } // anon namespace
1332 gimple_opt_pass *
1333 make_pass_dominator (gcc::context *ctxt)
1335 return new pass_dominator (ctxt);
1339 /* Given a conditional statement CONDSTMT, convert the
1340 condition to a canonical form. */
1342 static void
1343 canonicalize_comparison (gcond *condstmt)
1345 tree op0;
1346 tree op1;
1347 enum tree_code code;
1349 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1351 op0 = gimple_cond_lhs (condstmt);
1352 op1 = gimple_cond_rhs (condstmt);
1354 code = gimple_cond_code (condstmt);
1356 /* If it would be profitable to swap the operands, then do so to
1357 canonicalize the statement, enabling better optimization.
1359 By placing canonicalization of such expressions here we
1360 transparently keep statements in canonical form, even
1361 when the statement is modified. */
1362 if (tree_swap_operands_p (op0, op1, false))
1364 /* For relationals we need to swap the operands
1365 and change the code. */
1366 if (code == LT_EXPR
1367 || code == GT_EXPR
1368 || code == LE_EXPR
1369 || code == GE_EXPR)
1371 code = swap_tree_comparison (code);
1373 gimple_cond_set_code (condstmt, code);
1374 gimple_cond_set_lhs (condstmt, op1);
1375 gimple_cond_set_rhs (condstmt, op0);
1377 update_stmt (condstmt);
1382 /* Initialize local stacks for this optimizer and record equivalences
1383 upon entry to BB. Equivalences can come from the edge traversed to
1384 reach BB or they may come from PHI nodes at the start of BB. */
1386 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1387 LIMIT entries left in LOCALs. */
1389 static void
1390 remove_local_expressions_from_table (void)
1392 /* Remove all the expressions made available in this block. */
1393 while (avail_exprs_stack.length () > 0)
1395 std::pair<expr_hash_elt_t, expr_hash_elt_t> victim
1396 = avail_exprs_stack.pop ();
1397 expr_hash_elt **slot;
1399 if (victim.first == NULL)
1400 break;
1402 /* This must precede the actual removal from the hash table,
1403 as ELEMENT and the table entry may share a call argument
1404 vector which will be freed during removal. */
1405 if (dump_file && (dump_flags & TDF_DETAILS))
1407 fprintf (dump_file, "<<<< ");
1408 print_expr_hash_elt (dump_file, victim.first);
1411 slot = avail_exprs->find_slot (victim.first, NO_INSERT);
1412 gcc_assert (slot && *slot == victim.first);
1413 if (victim.second != NULL)
1415 free_expr_hash_elt (*slot);
1416 *slot = victim.second;
1418 else
1419 avail_exprs->clear_slot (slot);
1423 /* Use the source/dest pairs in CONST_AND_COPIES_STACK to restore
1424 CONST_AND_COPIES to its original state, stopping when we hit a
1425 NULL marker. */
1427 static void
1428 restore_vars_to_original_value (void)
1430 while (const_and_copies_stack.length () > 0)
1432 tree prev_value, dest;
1434 dest = const_and_copies_stack.pop ();
1436 if (dest == NULL)
1437 break;
1439 if (dump_file && (dump_flags & TDF_DETAILS))
1441 fprintf (dump_file, "<<<< COPY ");
1442 print_generic_expr (dump_file, dest, 0);
1443 fprintf (dump_file, " = ");
1444 print_generic_expr (dump_file, SSA_NAME_VALUE (dest), 0);
1445 fprintf (dump_file, "\n");
1448 prev_value = const_and_copies_stack.pop ();
1449 set_ssa_name_value (dest, prev_value);
1453 /* A trivial wrapper so that we can present the generic jump
1454 threading code with a simple API for simplifying statements. */
1455 static tree
1456 simplify_stmt_for_jump_threading (gimple stmt,
1457 gimple within_stmt ATTRIBUTE_UNUSED)
1459 return lookup_avail_expr (stmt, false);
1462 /* Record into the equivalence tables any equivalences implied by
1463 traversing edge E (which are cached in E->aux).
1465 Callers are responsible for managing the unwinding markers. */
1466 static void
1467 record_temporary_equivalences (edge e)
1469 int i;
1470 struct edge_info *edge_info = (struct edge_info *) e->aux;
1472 /* If we have info associated with this edge, record it into
1473 our equivalence tables. */
1474 if (edge_info)
1476 cond_equivalence *eq;
1477 tree lhs = edge_info->lhs;
1478 tree rhs = edge_info->rhs;
1480 /* If we have a simple NAME = VALUE equivalence, record it. */
1481 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1482 record_const_or_copy (lhs, rhs);
1484 /* If we have 0 = COND or 1 = COND equivalences, record them
1485 into our expression hash tables. */
1486 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1487 record_cond (eq);
1491 /* Wrapper for common code to attempt to thread an edge. For example,
1492 it handles lazily building the dummy condition and the bookkeeping
1493 when jump threading is successful. */
1495 void
1496 dom_opt_dom_walker::thread_across_edge (edge e)
1498 if (! m_dummy_cond)
1499 m_dummy_cond =
1500 gimple_build_cond (NE_EXPR,
1501 integer_zero_node, integer_zero_node,
1502 NULL, NULL);
1504 /* Push a marker on both stacks so we can unwind the tables back to their
1505 current state. */
1506 avail_exprs_stack.safe_push
1507 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1508 const_and_copies_stack.safe_push (NULL_TREE);
1510 /* Traversing E may result in equivalences we can utilize. */
1511 record_temporary_equivalences (e);
1513 /* With all the edge equivalences in the tables, go ahead and attempt
1514 to thread through E->dest. */
1515 ::thread_across_edge (m_dummy_cond, e, false,
1516 &const_and_copies_stack,
1517 simplify_stmt_for_jump_threading);
1519 /* And restore the various tables to their state before
1520 we threaded this edge.
1522 XXX The code in tree-ssa-threadedge.c will restore the state of
1523 the const_and_copies table. We we just have to restore the expression
1524 table. */
1525 remove_local_expressions_from_table ();
1528 /* PHI nodes can create equivalences too.
1530 Ignoring any alternatives which are the same as the result, if
1531 all the alternatives are equal, then the PHI node creates an
1532 equivalence. */
1534 static void
1535 record_equivalences_from_phis (basic_block bb)
1537 gphi_iterator gsi;
1539 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1541 gphi *phi = gsi.phi ();
1543 tree lhs = gimple_phi_result (phi);
1544 tree rhs = NULL;
1545 size_t i;
1547 for (i = 0; i < gimple_phi_num_args (phi); i++)
1549 tree t = gimple_phi_arg_def (phi, i);
1551 /* Ignore alternatives which are the same as our LHS. Since
1552 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1553 can simply compare pointers. */
1554 if (lhs == t)
1555 continue;
1557 /* If we have not processed an alternative yet, then set
1558 RHS to this alternative. */
1559 if (rhs == NULL)
1560 rhs = t;
1561 /* If we have processed an alternative (stored in RHS), then
1562 see if it is equal to this one. If it isn't, then stop
1563 the search. */
1564 else if (! operand_equal_for_phi_arg_p (rhs, t))
1565 break;
1568 /* If we had no interesting alternatives, then all the RHS alternatives
1569 must have been the same as LHS. */
1570 if (!rhs)
1571 rhs = lhs;
1573 /* If we managed to iterate through each PHI alternative without
1574 breaking out of the loop, then we have a PHI which may create
1575 a useful equivalence. We do not need to record unwind data for
1576 this, since this is a true assignment and not an equivalence
1577 inferred from a comparison. All uses of this ssa name are dominated
1578 by this assignment, so unwinding just costs time and space. */
1579 if (i == gimple_phi_num_args (phi)
1580 && may_propagate_copy (lhs, rhs))
1581 set_ssa_name_value (lhs, rhs);
1585 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1586 return that edge. Otherwise return NULL. */
1587 static edge
1588 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1590 edge retval = NULL;
1591 edge e;
1592 edge_iterator ei;
1594 FOR_EACH_EDGE (e, ei, bb->preds)
1596 /* A loop back edge can be identified by the destination of
1597 the edge dominating the source of the edge. */
1598 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1599 continue;
1601 /* If we have already seen a non-loop edge, then we must have
1602 multiple incoming non-loop edges and thus we return NULL. */
1603 if (retval)
1604 return NULL;
1606 /* This is the first non-loop incoming edge we have found. Record
1607 it. */
1608 retval = e;
1611 return retval;
1614 /* Record any equivalences created by the incoming edge to BB. If BB
1615 has more than one incoming edge, then no equivalence is created. */
1617 static void
1618 record_equivalences_from_incoming_edge (basic_block bb)
1620 edge e;
1621 basic_block parent;
1622 struct edge_info *edge_info;
1624 /* If our parent block ended with a control statement, then we may be
1625 able to record some equivalences based on which outgoing edge from
1626 the parent was followed. */
1627 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1629 e = single_incoming_edge_ignoring_loop_edges (bb);
1631 /* If we had a single incoming edge from our parent block, then enter
1632 any data associated with the edge into our tables. */
1633 if (e && e->src == parent)
1635 unsigned int i;
1637 edge_info = (struct edge_info *) e->aux;
1639 if (edge_info)
1641 tree lhs = edge_info->lhs;
1642 tree rhs = edge_info->rhs;
1643 cond_equivalence *eq;
1645 if (lhs)
1646 record_equality (lhs, rhs);
1648 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1649 set via a widening type conversion, then we may be able to record
1650 additional equivalences. */
1651 if (lhs
1652 && TREE_CODE (lhs) == SSA_NAME
1653 && is_gimple_constant (rhs)
1654 && TREE_CODE (rhs) == INTEGER_CST)
1656 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1658 if (defstmt
1659 && is_gimple_assign (defstmt)
1660 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1662 tree old_rhs = gimple_assign_rhs1 (defstmt);
1664 /* If the conversion widens the original value and
1665 the constant is in the range of the type of OLD_RHS,
1666 then convert the constant and record the equivalence.
1668 Note that int_fits_type_p does not check the precision
1669 if the upper and lower bounds are OK. */
1670 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1671 && (TYPE_PRECISION (TREE_TYPE (lhs))
1672 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1673 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1675 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1676 record_equality (old_rhs, newval);
1681 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1682 record_cond (eq);
1687 /* Dump SSA statistics on FILE. */
1689 void
1690 dump_dominator_optimization_stats (FILE *file)
1692 fprintf (file, "Total number of statements: %6ld\n\n",
1693 opt_stats.num_stmts);
1694 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1695 opt_stats.num_exprs_considered);
1697 fprintf (file, "\nHash table statistics:\n");
1699 fprintf (file, " avail_exprs: ");
1700 htab_statistics (file, *avail_exprs);
1704 /* Dump SSA statistics on stderr. */
1706 DEBUG_FUNCTION void
1707 debug_dominator_optimization_stats (void)
1709 dump_dominator_optimization_stats (stderr);
1713 /* Dump statistics for the hash table HTAB. */
1715 static void
1716 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1718 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1719 (long) htab.size (),
1720 (long) htab.elements (),
1721 htab.collisions ());
1725 /* Enter condition equivalence into the expression hash table.
1726 This indicates that a conditional expression has a known
1727 boolean value. */
1729 static void
1730 record_cond (cond_equivalence *p)
1732 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1733 expr_hash_elt **slot;
1735 initialize_hash_element_from_expr (&p->cond, p->value, element);
1737 slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1738 if (*slot == NULL)
1740 *slot = element;
1742 if (dump_file && (dump_flags & TDF_DETAILS))
1744 fprintf (dump_file, "1>>> ");
1745 print_expr_hash_elt (dump_file, element);
1748 avail_exprs_stack.safe_push
1749 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element, NULL));
1751 else
1752 free_expr_hash_elt (element);
1755 /* A helper function for record_const_or_copy and record_equality.
1756 Do the work of recording the value and undo info. */
1758 static void
1759 record_const_or_copy_1 (tree x, tree y, tree prev_x)
1761 set_ssa_name_value (x, y);
1763 if (dump_file && (dump_flags & TDF_DETAILS))
1765 fprintf (dump_file, "0>>> COPY ");
1766 print_generic_expr (dump_file, x, 0);
1767 fprintf (dump_file, " = ");
1768 print_generic_expr (dump_file, y, 0);
1769 fprintf (dump_file, "\n");
1772 const_and_copies_stack.reserve (2);
1773 const_and_copies_stack.quick_push (prev_x);
1774 const_and_copies_stack.quick_push (x);
1777 /* Record that X is equal to Y in const_and_copies. Record undo
1778 information in the block-local vector. */
1780 static void
1781 record_const_or_copy (tree x, tree y)
1783 tree prev_x = SSA_NAME_VALUE (x);
1785 gcc_assert (TREE_CODE (x) == SSA_NAME);
1787 if (TREE_CODE (y) == SSA_NAME)
1789 tree tmp = SSA_NAME_VALUE (y);
1790 if (tmp)
1791 y = tmp;
1794 record_const_or_copy_1 (x, y, prev_x);
1797 /* Return the loop depth of the basic block of the defining statement of X.
1798 This number should not be treated as absolutely correct because the loop
1799 information may not be completely up-to-date when dom runs. However, it
1800 will be relatively correct, and as more passes are taught to keep loop info
1801 up to date, the result will become more and more accurate. */
1803 static int
1804 loop_depth_of_name (tree x)
1806 gimple defstmt;
1807 basic_block defbb;
1809 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1810 if (TREE_CODE (x) != SSA_NAME)
1811 return 0;
1813 /* Otherwise return the loop depth of the defining statement's bb.
1814 Note that there may not actually be a bb for this statement, if the
1815 ssa_name is live on entry. */
1816 defstmt = SSA_NAME_DEF_STMT (x);
1817 defbb = gimple_bb (defstmt);
1818 if (!defbb)
1819 return 0;
1821 return bb_loop_depth (defbb);
1824 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1825 This constrains the cases in which we may treat this as assignment. */
1827 static void
1828 record_equality (tree x, tree y)
1830 tree prev_x = NULL, prev_y = NULL;
1832 if (TREE_CODE (x) == SSA_NAME)
1833 prev_x = SSA_NAME_VALUE (x);
1834 if (TREE_CODE (y) == SSA_NAME)
1835 prev_y = SSA_NAME_VALUE (y);
1837 /* If one of the previous values is invariant, or invariant in more loops
1838 (by depth), then use that.
1839 Otherwise it doesn't matter which value we choose, just so
1840 long as we canonicalize on one value. */
1841 if (is_gimple_min_invariant (y))
1843 else if (is_gimple_min_invariant (x)
1844 /* ??? When threading over backedges the following is important
1845 for correctness. See PR61757. */
1846 || (loop_depth_of_name (x) <= loop_depth_of_name (y)))
1847 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1848 else if (prev_x && is_gimple_min_invariant (prev_x))
1849 x = y, y = prev_x, prev_x = prev_y;
1850 else if (prev_y)
1851 y = prev_y;
1853 /* After the swapping, we must have one SSA_NAME. */
1854 if (TREE_CODE (x) != SSA_NAME)
1855 return;
1857 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1858 variable compared against zero. If we're honoring signed zeros,
1859 then we cannot record this value unless we know that the value is
1860 nonzero. */
1861 if (HONOR_SIGNED_ZEROS (x)
1862 && (TREE_CODE (y) != REAL_CST
1863 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1864 return;
1866 record_const_or_copy_1 (x, y, prev_x);
1869 /* Returns true when STMT is a simple iv increment. It detects the
1870 following situation:
1872 i_1 = phi (..., i_2)
1873 i_2 = i_1 +/- ... */
1875 bool
1876 simple_iv_increment_p (gimple stmt)
1878 enum tree_code code;
1879 tree lhs, preinc;
1880 gimple phi;
1881 size_t i;
1883 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1884 return false;
1886 lhs = gimple_assign_lhs (stmt);
1887 if (TREE_CODE (lhs) != SSA_NAME)
1888 return false;
1890 code = gimple_assign_rhs_code (stmt);
1891 if (code != PLUS_EXPR
1892 && code != MINUS_EXPR
1893 && code != POINTER_PLUS_EXPR)
1894 return false;
1896 preinc = gimple_assign_rhs1 (stmt);
1897 if (TREE_CODE (preinc) != SSA_NAME)
1898 return false;
1900 phi = SSA_NAME_DEF_STMT (preinc);
1901 if (gimple_code (phi) != GIMPLE_PHI)
1902 return false;
1904 for (i = 0; i < gimple_phi_num_args (phi); i++)
1905 if (gimple_phi_arg_def (phi, i) == lhs)
1906 return true;
1908 return false;
1911 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1912 known value for that SSA_NAME (or NULL if no value is known).
1914 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1915 successors of BB. */
1917 static void
1918 cprop_into_successor_phis (basic_block bb)
1920 edge e;
1921 edge_iterator ei;
1923 FOR_EACH_EDGE (e, ei, bb->succs)
1925 int indx;
1926 gphi_iterator gsi;
1928 /* If this is an abnormal edge, then we do not want to copy propagate
1929 into the PHI alternative associated with this edge. */
1930 if (e->flags & EDGE_ABNORMAL)
1931 continue;
1933 gsi = gsi_start_phis (e->dest);
1934 if (gsi_end_p (gsi))
1935 continue;
1937 /* We may have an equivalence associated with this edge. While
1938 we can not propagate it into non-dominated blocks, we can
1939 propagate them into PHIs in non-dominated blocks. */
1941 /* Push the unwind marker so we can reset the const and copies
1942 table back to its original state after processing this edge. */
1943 const_and_copies_stack.safe_push (NULL_TREE);
1945 /* Extract and record any simple NAME = VALUE equivalences.
1947 Don't bother with [01] = COND equivalences, they're not useful
1948 here. */
1949 struct edge_info *edge_info = (struct edge_info *) e->aux;
1950 if (edge_info)
1952 tree lhs = edge_info->lhs;
1953 tree rhs = edge_info->rhs;
1955 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1956 record_const_or_copy (lhs, rhs);
1959 indx = e->dest_idx;
1960 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1962 tree new_val;
1963 use_operand_p orig_p;
1964 tree orig_val;
1965 gphi *phi = gsi.phi ();
1967 /* The alternative may be associated with a constant, so verify
1968 it is an SSA_NAME before doing anything with it. */
1969 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1970 orig_val = get_use_from_ptr (orig_p);
1971 if (TREE_CODE (orig_val) != SSA_NAME)
1972 continue;
1974 /* If we have *ORIG_P in our constant/copy table, then replace
1975 ORIG_P with its value in our constant/copy table. */
1976 new_val = SSA_NAME_VALUE (orig_val);
1977 if (new_val
1978 && new_val != orig_val
1979 && (TREE_CODE (new_val) == SSA_NAME
1980 || is_gimple_min_invariant (new_val))
1981 && may_propagate_copy (orig_val, new_val))
1982 propagate_value (orig_p, new_val);
1985 restore_vars_to_original_value ();
1989 void
1990 dom_opt_dom_walker::before_dom_children (basic_block bb)
1992 gimple_stmt_iterator gsi;
1994 if (dump_file && (dump_flags & TDF_DETAILS))
1995 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1997 /* Push a marker on the stacks of local information so that we know how
1998 far to unwind when we finalize this block. */
1999 avail_exprs_stack.safe_push
2000 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
2001 const_and_copies_stack.safe_push (NULL_TREE);
2003 record_equivalences_from_incoming_edge (bb);
2005 /* PHI nodes can create equivalences too. */
2006 record_equivalences_from_phis (bb);
2008 /* Create equivalences from redundant PHIs. PHIs are only truly
2009 redundant when they exist in the same block, so push another
2010 marker and unwind right afterwards. */
2011 avail_exprs_stack.safe_push
2012 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
2013 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2014 eliminate_redundant_computations (&gsi);
2015 remove_local_expressions_from_table ();
2017 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2018 optimize_stmt (bb, gsi);
2020 /* Now prepare to process dominated blocks. */
2021 record_edge_info (bb);
2022 cprop_into_successor_phis (bb);
2025 /* We have finished processing the dominator children of BB, perform
2026 any finalization actions in preparation for leaving this node in
2027 the dominator tree. */
2029 void
2030 dom_opt_dom_walker::after_dom_children (basic_block bb)
2032 gimple last;
2034 /* If we have an outgoing edge to a block with multiple incoming and
2035 outgoing edges, then we may be able to thread the edge, i.e., we
2036 may be able to statically determine which of the outgoing edges
2037 will be traversed when the incoming edge from BB is traversed. */
2038 if (single_succ_p (bb)
2039 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
2040 && potentially_threadable_block (single_succ (bb)))
2042 thread_across_edge (single_succ_edge (bb));
2044 else if ((last = last_stmt (bb))
2045 && gimple_code (last) == GIMPLE_COND
2046 && EDGE_COUNT (bb->succs) == 2
2047 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
2048 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
2050 edge true_edge, false_edge;
2052 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
2054 /* Only try to thread the edge if it reaches a target block with
2055 more than one predecessor and more than one successor. */
2056 if (potentially_threadable_block (true_edge->dest))
2057 thread_across_edge (true_edge);
2059 /* Similarly for the ELSE arm. */
2060 if (potentially_threadable_block (false_edge->dest))
2061 thread_across_edge (false_edge);
2065 /* These remove expressions local to BB from the tables. */
2066 remove_local_expressions_from_table ();
2067 restore_vars_to_original_value ();
2070 /* Search for redundant computations in STMT. If any are found, then
2071 replace them with the variable holding the result of the computation.
2073 If safe, record this expression into the available expression hash
2074 table. */
2076 static void
2077 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2079 tree expr_type;
2080 tree cached_lhs;
2081 tree def;
2082 bool insert = true;
2083 bool assigns_var_p = false;
2085 gimple stmt = gsi_stmt (*gsi);
2087 if (gimple_code (stmt) == GIMPLE_PHI)
2088 def = gimple_phi_result (stmt);
2089 else
2090 def = gimple_get_lhs (stmt);
2092 /* Certain expressions on the RHS can be optimized away, but can not
2093 themselves be entered into the hash tables. */
2094 if (! def
2095 || TREE_CODE (def) != SSA_NAME
2096 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2097 || gimple_vdef (stmt)
2098 /* Do not record equivalences for increments of ivs. This would create
2099 overlapping live ranges for a very questionable gain. */
2100 || simple_iv_increment_p (stmt))
2101 insert = false;
2103 /* Check if the expression has been computed before. */
2104 cached_lhs = lookup_avail_expr (stmt, insert);
2106 opt_stats.num_exprs_considered++;
2108 /* Get the type of the expression we are trying to optimize. */
2109 if (is_gimple_assign (stmt))
2111 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2112 assigns_var_p = true;
2114 else if (gimple_code (stmt) == GIMPLE_COND)
2115 expr_type = boolean_type_node;
2116 else if (is_gimple_call (stmt))
2118 gcc_assert (gimple_call_lhs (stmt));
2119 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2120 assigns_var_p = true;
2122 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2123 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
2124 else if (gimple_code (stmt) == GIMPLE_PHI)
2125 /* We can't propagate into a phi, so the logic below doesn't apply.
2126 Instead record an equivalence between the cached LHS and the
2127 PHI result of this statement, provided they are in the same block.
2128 This should be sufficient to kill the redundant phi. */
2130 if (def && cached_lhs)
2131 record_const_or_copy (def, cached_lhs);
2132 return;
2134 else
2135 gcc_unreachable ();
2137 if (!cached_lhs)
2138 return;
2140 /* It is safe to ignore types here since we have already done
2141 type checking in the hashing and equality routines. In fact
2142 type checking here merely gets in the way of constant
2143 propagation. Also, make sure that it is safe to propagate
2144 CACHED_LHS into the expression in STMT. */
2145 if ((TREE_CODE (cached_lhs) != SSA_NAME
2146 && (assigns_var_p
2147 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2148 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2150 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2151 || is_gimple_min_invariant (cached_lhs));
2153 if (dump_file && (dump_flags & TDF_DETAILS))
2155 fprintf (dump_file, " Replaced redundant expr '");
2156 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2157 fprintf (dump_file, "' with '");
2158 print_generic_expr (dump_file, cached_lhs, dump_flags);
2159 fprintf (dump_file, "'\n");
2162 opt_stats.num_re++;
2164 if (assigns_var_p
2165 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2166 cached_lhs = fold_convert (expr_type, cached_lhs);
2168 propagate_tree_value_into_stmt (gsi, cached_lhs);
2170 /* Since it is always necessary to mark the result as modified,
2171 perhaps we should move this into propagate_tree_value_into_stmt
2172 itself. */
2173 gimple_set_modified (gsi_stmt (*gsi), true);
2177 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2178 the available expressions table or the const_and_copies table.
2179 Detect and record those equivalences. */
2180 /* We handle only very simple copy equivalences here. The heavy
2181 lifing is done by eliminate_redundant_computations. */
2183 static void
2184 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2186 tree lhs;
2187 enum tree_code lhs_code;
2189 gcc_assert (is_gimple_assign (stmt));
2191 lhs = gimple_assign_lhs (stmt);
2192 lhs_code = TREE_CODE (lhs);
2194 if (lhs_code == SSA_NAME
2195 && gimple_assign_single_p (stmt))
2197 tree rhs = gimple_assign_rhs1 (stmt);
2199 /* If the RHS of the assignment is a constant or another variable that
2200 may be propagated, register it in the CONST_AND_COPIES table. We
2201 do not need to record unwind data for this, since this is a true
2202 assignment and not an equivalence inferred from a comparison. All
2203 uses of this ssa name are dominated by this assignment, so unwinding
2204 just costs time and space. */
2205 if (may_optimize_p
2206 && (TREE_CODE (rhs) == SSA_NAME
2207 || is_gimple_min_invariant (rhs)))
2209 if (dump_file && (dump_flags & TDF_DETAILS))
2211 fprintf (dump_file, "==== ASGN ");
2212 print_generic_expr (dump_file, lhs, 0);
2213 fprintf (dump_file, " = ");
2214 print_generic_expr (dump_file, rhs, 0);
2215 fprintf (dump_file, "\n");
2218 set_ssa_name_value (lhs, rhs);
2222 /* Make sure we can propagate &x + CST. */
2223 if (lhs_code == SSA_NAME
2224 && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
2225 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
2226 && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
2228 tree op0 = gimple_assign_rhs1 (stmt);
2229 tree op1 = gimple_assign_rhs2 (stmt);
2230 tree new_rhs
2231 = build_fold_addr_expr (fold_build2 (MEM_REF,
2232 TREE_TYPE (TREE_TYPE (op0)),
2233 unshare_expr (op0),
2234 fold_convert (ptr_type_node,
2235 op1)));
2236 if (dump_file && (dump_flags & TDF_DETAILS))
2238 fprintf (dump_file, "==== ASGN ");
2239 print_generic_expr (dump_file, lhs, 0);
2240 fprintf (dump_file, " = ");
2241 print_generic_expr (dump_file, new_rhs, 0);
2242 fprintf (dump_file, "\n");
2245 set_ssa_name_value (lhs, new_rhs);
2248 /* A memory store, even an aliased store, creates a useful
2249 equivalence. By exchanging the LHS and RHS, creating suitable
2250 vops and recording the result in the available expression table,
2251 we may be able to expose more redundant loads. */
2252 if (!gimple_has_volatile_ops (stmt)
2253 && gimple_references_memory_p (stmt)
2254 && gimple_assign_single_p (stmt)
2255 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2256 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2257 && !is_gimple_reg (lhs))
2259 tree rhs = gimple_assign_rhs1 (stmt);
2260 gassign *new_stmt;
2262 /* Build a new statement with the RHS and LHS exchanged. */
2263 if (TREE_CODE (rhs) == SSA_NAME)
2265 /* NOTE tuples. The call to gimple_build_assign below replaced
2266 a call to build_gimple_modify_stmt, which did not set the
2267 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2268 may cause an SSA validation failure, as the LHS may be a
2269 default-initialized name and should have no definition. I'm
2270 a bit dubious of this, as the artificial statement that we
2271 generate here may in fact be ill-formed, but it is simply
2272 used as an internal device in this pass, and never becomes
2273 part of the CFG. */
2274 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2275 new_stmt = gimple_build_assign (rhs, lhs);
2276 SSA_NAME_DEF_STMT (rhs) = defstmt;
2278 else
2279 new_stmt = gimple_build_assign (rhs, lhs);
2281 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2283 /* Finally enter the statement into the available expression
2284 table. */
2285 lookup_avail_expr (new_stmt, true);
2289 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2290 CONST_AND_COPIES. */
2292 static void
2293 cprop_operand (gimple stmt, use_operand_p op_p)
2295 tree val;
2296 tree op = USE_FROM_PTR (op_p);
2298 /* If the operand has a known constant value or it is known to be a
2299 copy of some other variable, use the value or copy stored in
2300 CONST_AND_COPIES. */
2301 val = SSA_NAME_VALUE (op);
2302 if (val && val != op)
2304 /* Do not replace hard register operands in asm statements. */
2305 if (gimple_code (stmt) == GIMPLE_ASM
2306 && !may_propagate_copy_into_asm (op))
2307 return;
2309 /* Certain operands are not allowed to be copy propagated due
2310 to their interaction with exception handling and some GCC
2311 extensions. */
2312 if (!may_propagate_copy (op, val))
2313 return;
2315 /* Do not propagate copies into BIVs.
2316 See PR23821 and PR62217 for how this can disturb IV and
2317 number of iteration analysis. */
2318 if (TREE_CODE (val) != INTEGER_CST)
2320 gimple def = SSA_NAME_DEF_STMT (op);
2321 if (gimple_code (def) == GIMPLE_PHI
2322 && gimple_bb (def)->loop_father->header == gimple_bb (def))
2323 return;
2326 /* Dump details. */
2327 if (dump_file && (dump_flags & TDF_DETAILS))
2329 fprintf (dump_file, " Replaced '");
2330 print_generic_expr (dump_file, op, dump_flags);
2331 fprintf (dump_file, "' with %s '",
2332 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2333 print_generic_expr (dump_file, val, dump_flags);
2334 fprintf (dump_file, "'\n");
2337 if (TREE_CODE (val) != SSA_NAME)
2338 opt_stats.num_const_prop++;
2339 else
2340 opt_stats.num_copy_prop++;
2342 propagate_value (op_p, val);
2344 /* And note that we modified this statement. This is now
2345 safe, even if we changed virtual operands since we will
2346 rescan the statement and rewrite its operands again. */
2347 gimple_set_modified (stmt, true);
2351 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2352 known value for that SSA_NAME (or NULL if no value is known).
2354 Propagate values from CONST_AND_COPIES into the uses, vuses and
2355 vdef_ops of STMT. */
2357 static void
2358 cprop_into_stmt (gimple stmt)
2360 use_operand_p op_p;
2361 ssa_op_iter iter;
2363 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2364 cprop_operand (stmt, op_p);
2367 /* Optimize the statement pointed to by iterator SI.
2369 We try to perform some simplistic global redundancy elimination and
2370 constant propagation:
2372 1- To detect global redundancy, we keep track of expressions that have
2373 been computed in this block and its dominators. If we find that the
2374 same expression is computed more than once, we eliminate repeated
2375 computations by using the target of the first one.
2377 2- Constant values and copy assignments. This is used to do very
2378 simplistic constant and copy propagation. When a constant or copy
2379 assignment is found, we map the value on the RHS of the assignment to
2380 the variable in the LHS in the CONST_AND_COPIES table. */
2382 static void
2383 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2385 gimple stmt, old_stmt;
2386 bool may_optimize_p;
2387 bool modified_p = false;
2388 bool was_noreturn;
2390 old_stmt = stmt = gsi_stmt (si);
2391 was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
2393 if (dump_file && (dump_flags & TDF_DETAILS))
2395 fprintf (dump_file, "Optimizing statement ");
2396 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2399 if (gimple_code (stmt) == GIMPLE_COND)
2400 canonicalize_comparison (as_a <gcond *> (stmt));
2402 update_stmt_if_modified (stmt);
2403 opt_stats.num_stmts++;
2405 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2406 cprop_into_stmt (stmt);
2408 /* If the statement has been modified with constant replacements,
2409 fold its RHS before checking for redundant computations. */
2410 if (gimple_modified_p (stmt))
2412 tree rhs = NULL;
2414 /* Try to fold the statement making sure that STMT is kept
2415 up to date. */
2416 if (fold_stmt (&si))
2418 stmt = gsi_stmt (si);
2419 gimple_set_modified (stmt, true);
2421 if (dump_file && (dump_flags & TDF_DETAILS))
2423 fprintf (dump_file, " Folded to: ");
2424 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2428 /* We only need to consider cases that can yield a gimple operand. */
2429 if (gimple_assign_single_p (stmt))
2430 rhs = gimple_assign_rhs1 (stmt);
2431 else if (gimple_code (stmt) == GIMPLE_GOTO)
2432 rhs = gimple_goto_dest (stmt);
2433 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2434 /* This should never be an ADDR_EXPR. */
2435 rhs = gimple_switch_index (swtch_stmt);
2437 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2438 recompute_tree_invariant_for_addr_expr (rhs);
2440 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2441 even if fold_stmt updated the stmt already and thus cleared
2442 gimple_modified_p flag on it. */
2443 modified_p = true;
2446 /* Check for redundant computations. Do this optimization only
2447 for assignments that have no volatile ops and conditionals. */
2448 may_optimize_p = (!gimple_has_side_effects (stmt)
2449 && (is_gimple_assign (stmt)
2450 || (is_gimple_call (stmt)
2451 && gimple_call_lhs (stmt) != NULL_TREE)
2452 || gimple_code (stmt) == GIMPLE_COND
2453 || gimple_code (stmt) == GIMPLE_SWITCH));
2455 if (may_optimize_p)
2457 if (gimple_code (stmt) == GIMPLE_CALL)
2459 /* Resolve __builtin_constant_p. If it hasn't been
2460 folded to integer_one_node by now, it's fairly
2461 certain that the value simply isn't constant. */
2462 tree callee = gimple_call_fndecl (stmt);
2463 if (callee
2464 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2465 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2467 propagate_tree_value_into_stmt (&si, integer_zero_node);
2468 stmt = gsi_stmt (si);
2472 update_stmt_if_modified (stmt);
2473 eliminate_redundant_computations (&si);
2474 stmt = gsi_stmt (si);
2476 /* Perform simple redundant store elimination. */
2477 if (gimple_assign_single_p (stmt)
2478 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2480 tree lhs = gimple_assign_lhs (stmt);
2481 tree rhs = gimple_assign_rhs1 (stmt);
2482 tree cached_lhs;
2483 gassign *new_stmt;
2484 if (TREE_CODE (rhs) == SSA_NAME)
2486 tree tem = SSA_NAME_VALUE (rhs);
2487 if (tem)
2488 rhs = tem;
2490 /* Build a new statement with the RHS and LHS exchanged. */
2491 if (TREE_CODE (rhs) == SSA_NAME)
2493 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2494 new_stmt = gimple_build_assign (rhs, lhs);
2495 SSA_NAME_DEF_STMT (rhs) = defstmt;
2497 else
2498 new_stmt = gimple_build_assign (rhs, lhs);
2499 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2500 cached_lhs = lookup_avail_expr (new_stmt, false);
2501 if (cached_lhs
2502 && rhs == cached_lhs)
2504 basic_block bb = gimple_bb (stmt);
2505 unlink_stmt_vdef (stmt);
2506 if (gsi_remove (&si, true))
2508 bitmap_set_bit (need_eh_cleanup, bb->index);
2509 if (dump_file && (dump_flags & TDF_DETAILS))
2510 fprintf (dump_file, " Flagged to clear EH edges.\n");
2512 release_defs (stmt);
2513 return;
2518 /* Record any additional equivalences created by this statement. */
2519 if (is_gimple_assign (stmt))
2520 record_equivalences_from_stmt (stmt, may_optimize_p);
2522 /* If STMT is a COND_EXPR and it was modified, then we may know
2523 where it goes. If that is the case, then mark the CFG as altered.
2525 This will cause us to later call remove_unreachable_blocks and
2526 cleanup_tree_cfg when it is safe to do so. It is not safe to
2527 clean things up here since removal of edges and such can trigger
2528 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2529 the manager.
2531 That's all fine and good, except that once SSA_NAMEs are released
2532 to the manager, we must not call create_ssa_name until all references
2533 to released SSA_NAMEs have been eliminated.
2535 All references to the deleted SSA_NAMEs can not be eliminated until
2536 we remove unreachable blocks.
2538 We can not remove unreachable blocks until after we have completed
2539 any queued jump threading.
2541 We can not complete any queued jump threads until we have taken
2542 appropriate variables out of SSA form. Taking variables out of
2543 SSA form can call create_ssa_name and thus we lose.
2545 Ultimately I suspect we're going to need to change the interface
2546 into the SSA_NAME manager. */
2547 if (gimple_modified_p (stmt) || modified_p)
2549 tree val = NULL;
2551 update_stmt_if_modified (stmt);
2553 if (gimple_code (stmt) == GIMPLE_COND)
2554 val = fold_binary_loc (gimple_location (stmt),
2555 gimple_cond_code (stmt), boolean_type_node,
2556 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2557 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2558 val = gimple_switch_index (swtch_stmt);
2560 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2561 cfg_altered = true;
2563 /* If we simplified a statement in such a way as to be shown that it
2564 cannot trap, update the eh information and the cfg to match. */
2565 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2567 bitmap_set_bit (need_eh_cleanup, bb->index);
2568 if (dump_file && (dump_flags & TDF_DETAILS))
2569 fprintf (dump_file, " Flagged to clear EH edges.\n");
2572 if (!was_noreturn
2573 && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
2574 need_noreturn_fixup.safe_push (stmt);
2578 /* Helper for walk_non_aliased_vuses. Determine if we arrived at
2579 the desired memory state. */
2581 static void *
2582 vuse_eq (ao_ref *, tree vuse1, unsigned int cnt, void *data)
2584 tree vuse2 = (tree) data;
2585 if (vuse1 == vuse2)
2586 return data;
2588 /* This bounds the stmt walks we perform on reference lookups
2589 to O(1) instead of O(N) where N is the number of dominating
2590 stores leading to a candidate. We re-use the SCCVN param
2591 for this as it is basically the same complexity. */
2592 if (cnt > (unsigned) PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS))
2593 return (void *)-1;
2595 return NULL;
2598 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2599 If found, return its LHS. Otherwise insert STMT in the table and
2600 return NULL_TREE.
2602 Also, when an expression is first inserted in the table, it is also
2603 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2604 we finish processing this block and its children. */
2606 static tree
2607 lookup_avail_expr (gimple stmt, bool insert)
2609 expr_hash_elt **slot;
2610 tree lhs;
2611 tree temp;
2612 struct expr_hash_elt element;
2614 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2615 if (gimple_code (stmt) == GIMPLE_PHI)
2616 lhs = gimple_phi_result (stmt);
2617 else
2618 lhs = gimple_get_lhs (stmt);
2620 initialize_hash_element (stmt, lhs, &element);
2622 if (dump_file && (dump_flags & TDF_DETAILS))
2624 fprintf (dump_file, "LKUP ");
2625 print_expr_hash_elt (dump_file, &element);
2628 /* Don't bother remembering constant assignments and copy operations.
2629 Constants and copy operations are handled by the constant/copy propagator
2630 in optimize_stmt. */
2631 if (element.expr.kind == EXPR_SINGLE
2632 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2633 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2634 return NULL_TREE;
2636 /* Finally try to find the expression in the main expression hash table. */
2637 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2638 if (slot == NULL)
2640 free_expr_hash_elt_contents (&element);
2641 return NULL_TREE;
2643 else if (*slot == NULL)
2645 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2646 *element2 = element;
2647 element2->stamp = element2;
2648 *slot = element2;
2650 if (dump_file && (dump_flags & TDF_DETAILS))
2652 fprintf (dump_file, "2>>> ");
2653 print_expr_hash_elt (dump_file, element2);
2656 avail_exprs_stack.safe_push
2657 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element2, NULL));
2658 return NULL_TREE;
2661 /* If we found a redundant memory operation do an alias walk to
2662 check if we can re-use it. */
2663 if (gimple_vuse (stmt) != (*slot)->vop)
2665 tree vuse1 = (*slot)->vop;
2666 tree vuse2 = gimple_vuse (stmt);
2667 /* If we have a load of a register and a candidate in the
2668 hash with vuse1 then try to reach its stmt by walking
2669 up the virtual use-def chain using walk_non_aliased_vuses.
2670 But don't do this when removing expressions from the hash. */
2671 ao_ref ref;
2672 if (!(vuse1 && vuse2
2673 && gimple_assign_single_p (stmt)
2674 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
2675 && (ao_ref_init (&ref, gimple_assign_rhs1 (stmt)), true)
2676 && walk_non_aliased_vuses (&ref, vuse2,
2677 vuse_eq, NULL, NULL, vuse1) != NULL))
2679 if (insert)
2681 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2682 *element2 = element;
2683 element2->stamp = element2;
2685 /* Insert the expr into the hash by replacing the current
2686 entry and recording the value to restore in the
2687 avail_exprs_stack. */
2688 avail_exprs_stack.safe_push (std::make_pair (element2, *slot));
2689 *slot = element2;
2690 if (dump_file && (dump_flags & TDF_DETAILS))
2692 fprintf (dump_file, "2>>> ");
2693 print_expr_hash_elt (dump_file, *slot);
2696 return NULL_TREE;
2700 free_expr_hash_elt_contents (&element);
2702 /* Extract the LHS of the assignment so that it can be used as the current
2703 definition of another variable. */
2704 lhs = (*slot)->lhs;
2706 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2707 use the value from the const_and_copies table. */
2708 if (TREE_CODE (lhs) == SSA_NAME)
2710 temp = SSA_NAME_VALUE (lhs);
2711 if (temp)
2712 lhs = temp;
2715 if (dump_file && (dump_flags & TDF_DETAILS))
2717 fprintf (dump_file, "FIND: ");
2718 print_generic_expr (dump_file, lhs, 0);
2719 fprintf (dump_file, "\n");
2722 return lhs;
2725 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2726 for expressions using the code of the expression and the SSA numbers of
2727 its operands. */
2729 static hashval_t
2730 avail_expr_hash (const void *p)
2732 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2733 inchash::hash hstate;
2735 inchash::add_hashable_expr (expr, hstate);
2737 return hstate.end ();
2740 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2741 up degenerate PHIs created by or exposed by jump threading. */
2743 /* Given a statement STMT, which is either a PHI node or an assignment,
2744 remove it from the IL. */
2746 static void
2747 remove_stmt_or_phi (gimple stmt)
2749 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2751 if (gimple_code (stmt) == GIMPLE_PHI)
2752 remove_phi_node (&gsi, true);
2753 else
2755 gsi_remove (&gsi, true);
2756 release_defs (stmt);
2760 /* Given a statement STMT, which is either a PHI node or an assignment,
2761 return the "rhs" of the node, in the case of a non-degenerate
2762 phi, NULL is returned. */
2764 static tree
2765 get_rhs_or_phi_arg (gimple stmt)
2767 if (gimple_code (stmt) == GIMPLE_PHI)
2768 return degenerate_phi_result (as_a <gphi *> (stmt));
2769 else if (gimple_assign_single_p (stmt))
2770 return gimple_assign_rhs1 (stmt);
2771 else
2772 gcc_unreachable ();
2776 /* Given a statement STMT, which is either a PHI node or an assignment,
2777 return the "lhs" of the node. */
2779 static tree
2780 get_lhs_or_phi_result (gimple stmt)
2782 if (gimple_code (stmt) == GIMPLE_PHI)
2783 return gimple_phi_result (stmt);
2784 else if (is_gimple_assign (stmt))
2785 return gimple_assign_lhs (stmt);
2786 else
2787 gcc_unreachable ();
2790 /* Propagate RHS into all uses of LHS (when possible).
2792 RHS and LHS are derived from STMT, which is passed in solely so
2793 that we can remove it if propagation is successful.
2795 When propagating into a PHI node or into a statement which turns
2796 into a trivial copy or constant initialization, set the
2797 appropriate bit in INTERESTING_NAMEs so that we will visit those
2798 nodes as well in an effort to pick up secondary optimization
2799 opportunities. */
2801 static void
2802 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2804 /* First verify that propagation is valid. */
2805 if (may_propagate_copy (lhs, rhs))
2807 use_operand_p use_p;
2808 imm_use_iterator iter;
2809 gimple use_stmt;
2810 bool all = true;
2812 /* Dump details. */
2813 if (dump_file && (dump_flags & TDF_DETAILS))
2815 fprintf (dump_file, " Replacing '");
2816 print_generic_expr (dump_file, lhs, dump_flags);
2817 fprintf (dump_file, "' with %s '",
2818 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2819 print_generic_expr (dump_file, rhs, dump_flags);
2820 fprintf (dump_file, "'\n");
2823 /* Walk over every use of LHS and try to replace the use with RHS.
2824 At this point the only reason why such a propagation would not
2825 be successful would be if the use occurs in an ASM_EXPR. */
2826 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2828 /* Leave debug stmts alone. If we succeed in propagating
2829 all non-debug uses, we'll drop the DEF, and propagation
2830 into debug stmts will occur then. */
2831 if (gimple_debug_bind_p (use_stmt))
2832 continue;
2834 /* It's not always safe to propagate into an ASM_EXPR. */
2835 if (gimple_code (use_stmt) == GIMPLE_ASM
2836 && ! may_propagate_copy_into_asm (lhs))
2838 all = false;
2839 continue;
2842 /* It's not ok to propagate into the definition stmt of RHS.
2843 <bb 9>:
2844 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2845 g_67.1_6 = prephitmp.12_36;
2846 goto <bb 9>;
2847 While this is strictly all dead code we do not want to
2848 deal with this here. */
2849 if (TREE_CODE (rhs) == SSA_NAME
2850 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2852 all = false;
2853 continue;
2856 /* Dump details. */
2857 if (dump_file && (dump_flags & TDF_DETAILS))
2859 fprintf (dump_file, " Original statement:");
2860 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2863 /* Propagate the RHS into this use of the LHS. */
2864 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2865 propagate_value (use_p, rhs);
2867 /* Special cases to avoid useless calls into the folding
2868 routines, operand scanning, etc.
2870 Propagation into a PHI may cause the PHI to become
2871 a degenerate, so mark the PHI as interesting. No other
2872 actions are necessary. */
2873 if (gimple_code (use_stmt) == GIMPLE_PHI)
2875 tree result;
2877 /* Dump details. */
2878 if (dump_file && (dump_flags & TDF_DETAILS))
2880 fprintf (dump_file, " Updated statement:");
2881 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2884 result = get_lhs_or_phi_result (use_stmt);
2885 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2886 continue;
2889 /* From this point onward we are propagating into a
2890 real statement. Folding may (or may not) be possible,
2891 we may expose new operands, expose dead EH edges,
2892 etc. */
2893 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2894 cannot fold a call that simplifies to a constant,
2895 because the GIMPLE_CALL must be replaced by a
2896 GIMPLE_ASSIGN, and there is no way to effect such a
2897 transformation in-place. We might want to consider
2898 using the more general fold_stmt here. */
2900 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2901 fold_stmt_inplace (&gsi);
2904 /* Sometimes propagation can expose new operands to the
2905 renamer. */
2906 update_stmt (use_stmt);
2908 /* Dump details. */
2909 if (dump_file && (dump_flags & TDF_DETAILS))
2911 fprintf (dump_file, " Updated statement:");
2912 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2915 /* If we replaced a variable index with a constant, then
2916 we would need to update the invariant flag for ADDR_EXPRs. */
2917 if (gimple_assign_single_p (use_stmt)
2918 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2919 recompute_tree_invariant_for_addr_expr
2920 (gimple_assign_rhs1 (use_stmt));
2922 /* If we cleaned up EH information from the statement,
2923 mark its containing block as needing EH cleanups. */
2924 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2926 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2927 if (dump_file && (dump_flags & TDF_DETAILS))
2928 fprintf (dump_file, " Flagged to clear EH edges.\n");
2931 /* Propagation may expose new trivial copy/constant propagation
2932 opportunities. */
2933 if (gimple_assign_single_p (use_stmt)
2934 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2935 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2936 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2938 tree result = get_lhs_or_phi_result (use_stmt);
2939 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2942 /* Propagation into these nodes may make certain edges in
2943 the CFG unexecutable. We want to identify them as PHI nodes
2944 at the destination of those unexecutable edges may become
2945 degenerates. */
2946 else if (gimple_code (use_stmt) == GIMPLE_COND
2947 || gimple_code (use_stmt) == GIMPLE_SWITCH
2948 || gimple_code (use_stmt) == GIMPLE_GOTO)
2950 tree val;
2952 if (gimple_code (use_stmt) == GIMPLE_COND)
2953 val = fold_binary_loc (gimple_location (use_stmt),
2954 gimple_cond_code (use_stmt),
2955 boolean_type_node,
2956 gimple_cond_lhs (use_stmt),
2957 gimple_cond_rhs (use_stmt));
2958 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2959 val = gimple_switch_index (as_a <gswitch *> (use_stmt));
2960 else
2961 val = gimple_goto_dest (use_stmt);
2963 if (val && is_gimple_min_invariant (val))
2965 basic_block bb = gimple_bb (use_stmt);
2966 edge te = find_taken_edge (bb, val);
2967 edge_iterator ei;
2968 edge e;
2969 gimple_stmt_iterator gsi;
2970 gphi_iterator psi;
2972 /* Remove all outgoing edges except TE. */
2973 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2975 if (e != te)
2977 /* Mark all the PHI nodes at the destination of
2978 the unexecutable edge as interesting. */
2979 for (psi = gsi_start_phis (e->dest);
2980 !gsi_end_p (psi);
2981 gsi_next (&psi))
2983 gphi *phi = psi.phi ();
2985 tree result = gimple_phi_result (phi);
2986 int version = SSA_NAME_VERSION (result);
2988 bitmap_set_bit (interesting_names, version);
2991 te->probability += e->probability;
2993 te->count += e->count;
2994 remove_edge (e);
2995 cfg_altered = true;
2997 else
2998 ei_next (&ei);
3001 gsi = gsi_last_bb (gimple_bb (use_stmt));
3002 gsi_remove (&gsi, true);
3004 /* And fixup the flags on the single remaining edge. */
3005 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
3006 te->flags &= ~EDGE_ABNORMAL;
3007 te->flags |= EDGE_FALLTHRU;
3008 if (te->probability > REG_BR_PROB_BASE)
3009 te->probability = REG_BR_PROB_BASE;
3014 /* Ensure there is nothing else to do. */
3015 gcc_assert (!all || has_zero_uses (lhs));
3017 /* If we were able to propagate away all uses of LHS, then
3018 we can remove STMT. */
3019 if (all)
3020 remove_stmt_or_phi (stmt);
3024 /* STMT is either a PHI node (potentially a degenerate PHI node) or
3025 a statement that is a trivial copy or constant initialization.
3027 Attempt to eliminate T by propagating its RHS into all uses of
3028 its LHS. This may in turn set new bits in INTERESTING_NAMES
3029 for nodes we want to revisit later.
3031 All exit paths should clear INTERESTING_NAMES for the result
3032 of STMT. */
3034 static void
3035 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
3037 tree lhs = get_lhs_or_phi_result (stmt);
3038 tree rhs;
3039 int version = SSA_NAME_VERSION (lhs);
3041 /* If the LHS of this statement or PHI has no uses, then we can
3042 just eliminate it. This can occur if, for example, the PHI
3043 was created by block duplication due to threading and its only
3044 use was in the conditional at the end of the block which was
3045 deleted. */
3046 if (has_zero_uses (lhs))
3048 bitmap_clear_bit (interesting_names, version);
3049 remove_stmt_or_phi (stmt);
3050 return;
3053 /* Get the RHS of the assignment or PHI node if the PHI is a
3054 degenerate. */
3055 rhs = get_rhs_or_phi_arg (stmt);
3056 if (!rhs)
3058 bitmap_clear_bit (interesting_names, version);
3059 return;
3062 if (!virtual_operand_p (lhs))
3063 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
3064 else
3066 gimple use_stmt;
3067 imm_use_iterator iter;
3068 use_operand_p use_p;
3069 /* For virtual operands we have to propagate into all uses as
3070 otherwise we will create overlapping life-ranges. */
3071 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3072 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3073 SET_USE (use_p, rhs);
3074 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
3075 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
3076 remove_stmt_or_phi (stmt);
3079 /* Note that STMT may well have been deleted by now, so do
3080 not access it, instead use the saved version # to clear
3081 T's entry in the worklist. */
3082 bitmap_clear_bit (interesting_names, version);
3085 /* The first phase in degenerate PHI elimination.
3087 Eliminate the degenerate PHIs in BB, then recurse on the
3088 dominator children of BB. */
3090 static void
3091 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
3093 gphi_iterator gsi;
3094 basic_block son;
3096 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3098 gphi *phi = gsi.phi ();
3100 eliminate_const_or_copy (phi, interesting_names);
3103 /* Recurse into the dominator children of BB. */
3104 for (son = first_dom_son (CDI_DOMINATORS, bb);
3105 son;
3106 son = next_dom_son (CDI_DOMINATORS, son))
3107 eliminate_degenerate_phis_1 (son, interesting_names);
3111 /* A very simple pass to eliminate degenerate PHI nodes from the
3112 IL. This is meant to be fast enough to be able to be run several
3113 times in the optimization pipeline.
3115 Certain optimizations, particularly those which duplicate blocks
3116 or remove edges from the CFG can create or expose PHIs which are
3117 trivial copies or constant initializations.
3119 While we could pick up these optimizations in DOM or with the
3120 combination of copy-prop and CCP, those solutions are far too
3121 heavy-weight for our needs.
3123 This implementation has two phases so that we can efficiently
3124 eliminate the first order degenerate PHIs and second order
3125 degenerate PHIs.
3127 The first phase performs a dominator walk to identify and eliminate
3128 the vast majority of the degenerate PHIs. When a degenerate PHI
3129 is identified and eliminated any affected statements or PHIs
3130 are put on a worklist.
3132 The second phase eliminates degenerate PHIs and trivial copies
3133 or constant initializations using the worklist. This is how we
3134 pick up the secondary optimization opportunities with minimal
3135 cost. */
3137 namespace {
3139 const pass_data pass_data_phi_only_cprop =
3141 GIMPLE_PASS, /* type */
3142 "phicprop", /* name */
3143 OPTGROUP_NONE, /* optinfo_flags */
3144 TV_TREE_PHI_CPROP, /* tv_id */
3145 ( PROP_cfg | PROP_ssa ), /* properties_required */
3146 0, /* properties_provided */
3147 0, /* properties_destroyed */
3148 0, /* todo_flags_start */
3149 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3152 class pass_phi_only_cprop : public gimple_opt_pass
3154 public:
3155 pass_phi_only_cprop (gcc::context *ctxt)
3156 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3159 /* opt_pass methods: */
3160 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3161 virtual bool gate (function *) { return flag_tree_dom != 0; }
3162 virtual unsigned int execute (function *);
3164 }; // class pass_phi_only_cprop
3166 unsigned int
3167 pass_phi_only_cprop::execute (function *fun)
3169 bitmap interesting_names;
3170 bitmap interesting_names1;
3172 /* Bitmap of blocks which need EH information updated. We can not
3173 update it on-the-fly as doing so invalidates the dominator tree. */
3174 need_eh_cleanup = BITMAP_ALLOC (NULL);
3176 /* INTERESTING_NAMES is effectively our worklist, indexed by
3177 SSA_NAME_VERSION.
3179 A set bit indicates that the statement or PHI node which
3180 defines the SSA_NAME should be (re)examined to determine if
3181 it has become a degenerate PHI or trivial const/copy propagation
3182 opportunity.
3184 Experiments have show we generally get better compilation
3185 time behavior with bitmaps rather than sbitmaps. */
3186 interesting_names = BITMAP_ALLOC (NULL);
3187 interesting_names1 = BITMAP_ALLOC (NULL);
3189 calculate_dominance_info (CDI_DOMINATORS);
3190 cfg_altered = false;
3192 /* First phase. Eliminate degenerate PHIs via a dominator
3193 walk of the CFG.
3195 Experiments have indicated that we generally get better
3196 compile-time behavior by visiting blocks in the first
3197 phase in dominator order. Presumably this is because walking
3198 in dominator order leaves fewer PHIs for later examination
3199 by the worklist phase. */
3200 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3201 interesting_names);
3203 /* Second phase. Eliminate second order degenerate PHIs as well
3204 as trivial copies or constant initializations identified by
3205 the first phase or this phase. Basically we keep iterating
3206 until our set of INTERESTING_NAMEs is empty. */
3207 while (!bitmap_empty_p (interesting_names))
3209 unsigned int i;
3210 bitmap_iterator bi;
3212 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3213 changed during the loop. Copy it to another bitmap and
3214 use that. */
3215 bitmap_copy (interesting_names1, interesting_names);
3217 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3219 tree name = ssa_name (i);
3221 /* Ignore SSA_NAMEs that have been released because
3222 their defining statement was deleted (unreachable). */
3223 if (name)
3224 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3225 interesting_names);
3229 if (cfg_altered)
3231 free_dominance_info (CDI_DOMINATORS);
3232 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3233 loops_state_set (LOOPS_NEED_FIXUP);
3236 /* Propagation of const and copies may make some EH edges dead. Purge
3237 such edges from the CFG as needed. */
3238 if (!bitmap_empty_p (need_eh_cleanup))
3240 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3241 BITMAP_FREE (need_eh_cleanup);
3244 BITMAP_FREE (interesting_names);
3245 BITMAP_FREE (interesting_names1);
3246 return 0;
3249 } // anon namespace
3251 gimple_opt_pass *
3252 make_pass_phi_only_cprop (gcc::context *ctxt)
3254 return new pass_phi_only_cprop (ctxt);