Daily bump.
[official-gcc.git] / gcc / tree-ssa-dom.c
blob698f78a52396efcc9849801a38fa60b807d48089
1 /* SSA Dominator optimizations for trees
2 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "alias.h"
26 #include "symtab.h"
27 #include "tree.h"
28 #include "fold-const.h"
29 #include "stor-layout.h"
30 #include "flags.h"
31 #include "tm_p.h"
32 #include "predict.h"
33 #include "hard-reg-set.h"
34 #include "function.h"
35 #include "dominance.h"
36 #include "cfg.h"
37 #include "cfganal.h"
38 #include "basic-block.h"
39 #include "cfgloop.h"
40 #include "gimple-pretty-print.h"
41 #include "tree-ssa-alias.h"
42 #include "internal-fn.h"
43 #include "gimple-fold.h"
44 #include "tree-eh.h"
45 #include "gimple-expr.h"
46 #include "gimple.h"
47 #include "gimple-iterator.h"
48 #include "gimple-ssa.h"
49 #include "tree-cfg.h"
50 #include "tree-phinodes.h"
51 #include "ssa-iterators.h"
52 #include "stringpool.h"
53 #include "tree-ssanames.h"
54 #include "tree-into-ssa.h"
55 #include "domwalk.h"
56 #include "tree-pass.h"
57 #include "tree-ssa-propagate.h"
58 #include "tree-ssa-threadupdate.h"
59 #include "langhooks.h"
60 #include "params.h"
61 #include "tree-ssa-scopedtables.h"
62 #include "tree-ssa-threadedge.h"
63 #include "tree-ssa-dom.h"
64 #include "gimplify.h"
65 #include "tree-cfgcleanup.h"
67 /* This file implements optimizations on the dominator tree. */
69 /* Representation of a "naked" right-hand-side expression, to be used
70 in recording available expressions in the expression hash table. */
72 enum expr_kind
74 EXPR_SINGLE,
75 EXPR_UNARY,
76 EXPR_BINARY,
77 EXPR_TERNARY,
78 EXPR_CALL,
79 EXPR_PHI
82 struct hashable_expr
84 tree type;
85 enum expr_kind kind;
86 union {
87 struct { tree rhs; } single;
88 struct { enum tree_code op; tree opnd; } unary;
89 struct { enum tree_code op; tree opnd0, opnd1; } binary;
90 struct { enum tree_code op; tree opnd0, opnd1, opnd2; } ternary;
91 struct { gcall *fn_from; bool pure; size_t nargs; tree *args; } call;
92 struct { size_t nargs; tree *args; } phi;
93 } ops;
96 /* Structure for recording known values of a conditional expression
97 at the exits from its block. */
99 typedef struct cond_equivalence_s
101 struct hashable_expr cond;
102 tree value;
103 } cond_equivalence;
106 /* Structure for recording edge equivalences as well as any pending
107 edge redirections during the dominator optimizer.
109 Computing and storing the edge equivalences instead of creating
110 them on-demand can save significant amounts of time, particularly
111 for pathological cases involving switch statements.
113 These structures live for a single iteration of the dominator
114 optimizer in the edge's AUX field. At the end of an iteration we
115 free each of these structures and update the AUX field to point
116 to any requested redirection target (the code for updating the
117 CFG and SSA graph for edge redirection expects redirection edge
118 targets to be in the AUX field for each edge. */
120 struct edge_info
122 /* If this edge creates a simple equivalence, the LHS and RHS of
123 the equivalence will be stored here. */
124 tree lhs;
125 tree rhs;
127 /* Traversing an edge may also indicate one or more particular conditions
128 are true or false. */
129 vec<cond_equivalence> cond_equivalences;
132 /* Stack of available expressions in AVAIL_EXPRs. Each block pushes any
133 expressions it enters into the hash table along with a marker entry
134 (null). When we finish processing the block, we pop off entries and
135 remove the expressions from the global hash table until we hit the
136 marker. */
137 typedef struct expr_hash_elt * expr_hash_elt_t;
139 static vec<std::pair<expr_hash_elt_t, expr_hash_elt_t> > avail_exprs_stack;
141 /* Structure for entries in the expression hash table. */
143 struct expr_hash_elt
145 /* The value (lhs) of this expression. */
146 tree lhs;
148 /* The expression (rhs) we want to record. */
149 struct hashable_expr expr;
151 /* The virtual operand associated with the nearest dominating stmt
152 loading from or storing to expr. */
153 tree vop;
155 /* The hash value for RHS. */
156 hashval_t hash;
158 /* A unique stamp, typically the address of the hash
159 element itself, used in removing entries from the table. */
160 struct expr_hash_elt *stamp;
163 /* Hashtable helpers. */
165 static bool hashable_expr_equal_p (const struct hashable_expr *,
166 const struct hashable_expr *);
167 static void free_expr_hash_elt (void *);
169 struct expr_elt_hasher : pointer_hash <expr_hash_elt>
171 static inline hashval_t hash (const value_type &);
172 static inline bool equal (const value_type &, const compare_type &);
173 static inline void remove (value_type &);
176 inline hashval_t
177 expr_elt_hasher::hash (const value_type &p)
179 return p->hash;
182 inline bool
183 expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
185 const struct hashable_expr *expr1 = &p1->expr;
186 const struct expr_hash_elt *stamp1 = p1->stamp;
187 const struct hashable_expr *expr2 = &p2->expr;
188 const struct expr_hash_elt *stamp2 = p2->stamp;
190 /* This case should apply only when removing entries from the table. */
191 if (stamp1 == stamp2)
192 return true;
194 if (p1->hash != p2->hash)
195 return false;
197 /* In case of a collision, both RHS have to be identical and have the
198 same VUSE operands. */
199 if (hashable_expr_equal_p (expr1, expr2)
200 && types_compatible_p (expr1->type, expr2->type))
201 return true;
203 return false;
206 /* Delete an expr_hash_elt and reclaim its storage. */
208 inline void
209 expr_elt_hasher::remove (value_type &element)
211 free_expr_hash_elt (element);
214 /* Hash table with expressions made available during the renaming process.
215 When an assignment of the form X_i = EXPR is found, the statement is
216 stored in this table. If the same expression EXPR is later found on the
217 RHS of another statement, it is replaced with X_i (thus performing
218 global redundancy elimination). Similarly as we pass through conditionals
219 we record the conditional itself as having either a true or false value
220 in this table. */
221 static hash_table<expr_elt_hasher> *avail_exprs;
223 /* Unwindable const/copy equivalences. */
224 static const_and_copies *const_and_copies;
226 /* Track whether or not we have changed the control flow graph. */
227 static bool cfg_altered;
229 /* Bitmap of blocks that have had EH statements cleaned. We should
230 remove their dead edges eventually. */
231 static bitmap need_eh_cleanup;
232 static vec<gimple> need_noreturn_fixup;
234 /* Statistics for dominator optimizations. */
235 struct opt_stats_d
237 long num_stmts;
238 long num_exprs_considered;
239 long num_re;
240 long num_const_prop;
241 long num_copy_prop;
244 static struct opt_stats_d opt_stats;
246 /* Local functions. */
247 static void optimize_stmt (basic_block, gimple_stmt_iterator);
248 static tree lookup_avail_expr (gimple, bool);
249 static hashval_t avail_expr_hash (const void *);
250 static void htab_statistics (FILE *,
251 const hash_table<expr_elt_hasher> &);
252 static void record_cond (cond_equivalence *);
253 static void record_equality (tree, tree);
254 static void record_equivalences_from_phis (basic_block);
255 static void record_equivalences_from_incoming_edge (basic_block);
256 static void eliminate_redundant_computations (gimple_stmt_iterator *);
257 static void record_equivalences_from_stmt (gimple, int);
258 static void remove_local_expressions_from_table (void);
259 static edge single_incoming_edge_ignoring_loop_edges (basic_block);
262 /* Given a statement STMT, initialize the hash table element pointed to
263 by ELEMENT. */
265 static void
266 initialize_hash_element (gimple stmt, tree lhs,
267 struct expr_hash_elt *element)
269 enum gimple_code code = gimple_code (stmt);
270 struct hashable_expr *expr = &element->expr;
272 if (code == GIMPLE_ASSIGN)
274 enum tree_code subcode = gimple_assign_rhs_code (stmt);
276 switch (get_gimple_rhs_class (subcode))
278 case GIMPLE_SINGLE_RHS:
279 expr->kind = EXPR_SINGLE;
280 expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt));
281 expr->ops.single.rhs = gimple_assign_rhs1 (stmt);
282 break;
283 case GIMPLE_UNARY_RHS:
284 expr->kind = EXPR_UNARY;
285 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
286 if (CONVERT_EXPR_CODE_P (subcode))
287 subcode = NOP_EXPR;
288 expr->ops.unary.op = subcode;
289 expr->ops.unary.opnd = gimple_assign_rhs1 (stmt);
290 break;
291 case GIMPLE_BINARY_RHS:
292 expr->kind = EXPR_BINARY;
293 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
294 expr->ops.binary.op = subcode;
295 expr->ops.binary.opnd0 = gimple_assign_rhs1 (stmt);
296 expr->ops.binary.opnd1 = gimple_assign_rhs2 (stmt);
297 break;
298 case GIMPLE_TERNARY_RHS:
299 expr->kind = EXPR_TERNARY;
300 expr->type = TREE_TYPE (gimple_assign_lhs (stmt));
301 expr->ops.ternary.op = subcode;
302 expr->ops.ternary.opnd0 = gimple_assign_rhs1 (stmt);
303 expr->ops.ternary.opnd1 = gimple_assign_rhs2 (stmt);
304 expr->ops.ternary.opnd2 = gimple_assign_rhs3 (stmt);
305 break;
306 default:
307 gcc_unreachable ();
310 else if (code == GIMPLE_COND)
312 expr->type = boolean_type_node;
313 expr->kind = EXPR_BINARY;
314 expr->ops.binary.op = gimple_cond_code (stmt);
315 expr->ops.binary.opnd0 = gimple_cond_lhs (stmt);
316 expr->ops.binary.opnd1 = gimple_cond_rhs (stmt);
318 else if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
320 size_t nargs = gimple_call_num_args (call_stmt);
321 size_t i;
323 gcc_assert (gimple_call_lhs (call_stmt));
325 expr->type = TREE_TYPE (gimple_call_lhs (call_stmt));
326 expr->kind = EXPR_CALL;
327 expr->ops.call.fn_from = call_stmt;
329 if (gimple_call_flags (call_stmt) & (ECF_CONST | ECF_PURE))
330 expr->ops.call.pure = true;
331 else
332 expr->ops.call.pure = false;
334 expr->ops.call.nargs = nargs;
335 expr->ops.call.args = XCNEWVEC (tree, nargs);
336 for (i = 0; i < nargs; i++)
337 expr->ops.call.args[i] = gimple_call_arg (call_stmt, i);
339 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
341 expr->type = TREE_TYPE (gimple_switch_index (swtch_stmt));
342 expr->kind = EXPR_SINGLE;
343 expr->ops.single.rhs = gimple_switch_index (swtch_stmt);
345 else if (code == GIMPLE_GOTO)
347 expr->type = TREE_TYPE (gimple_goto_dest (stmt));
348 expr->kind = EXPR_SINGLE;
349 expr->ops.single.rhs = gimple_goto_dest (stmt);
351 else if (code == GIMPLE_PHI)
353 size_t nargs = gimple_phi_num_args (stmt);
354 size_t i;
356 expr->type = TREE_TYPE (gimple_phi_result (stmt));
357 expr->kind = EXPR_PHI;
358 expr->ops.phi.nargs = nargs;
359 expr->ops.phi.args = XCNEWVEC (tree, nargs);
361 for (i = 0; i < nargs; i++)
362 expr->ops.phi.args[i] = gimple_phi_arg_def (stmt, i);
364 else
365 gcc_unreachable ();
367 element->lhs = lhs;
368 element->vop = gimple_vuse (stmt);
369 element->hash = avail_expr_hash (element);
370 element->stamp = element;
373 /* Given a conditional expression COND as a tree, initialize
374 a hashable_expr expression EXPR. The conditional must be a
375 comparison or logical negation. A constant or a variable is
376 not permitted. */
378 static void
379 initialize_expr_from_cond (tree cond, struct hashable_expr *expr)
381 expr->type = boolean_type_node;
383 if (COMPARISON_CLASS_P (cond))
385 expr->kind = EXPR_BINARY;
386 expr->ops.binary.op = TREE_CODE (cond);
387 expr->ops.binary.opnd0 = TREE_OPERAND (cond, 0);
388 expr->ops.binary.opnd1 = TREE_OPERAND (cond, 1);
390 else if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
392 expr->kind = EXPR_UNARY;
393 expr->ops.unary.op = TRUTH_NOT_EXPR;
394 expr->ops.unary.opnd = TREE_OPERAND (cond, 0);
396 else
397 gcc_unreachable ();
400 /* Given a hashable_expr expression EXPR and an LHS,
401 initialize the hash table element pointed to by ELEMENT. */
403 static void
404 initialize_hash_element_from_expr (struct hashable_expr *expr,
405 tree lhs,
406 struct expr_hash_elt *element)
408 element->expr = *expr;
409 element->lhs = lhs;
410 element->vop = NULL_TREE;
411 element->hash = avail_expr_hash (element);
412 element->stamp = element;
415 /* Compare two hashable_expr structures for equivalence.
416 They are considered equivalent when the the expressions
417 they denote must necessarily be equal. The logic is intended
418 to follow that of operand_equal_p in fold-const.c */
420 static bool
421 hashable_expr_equal_p (const struct hashable_expr *expr0,
422 const struct hashable_expr *expr1)
424 tree type0 = expr0->type;
425 tree type1 = expr1->type;
427 /* If either type is NULL, there is nothing to check. */
428 if ((type0 == NULL_TREE) ^ (type1 == NULL_TREE))
429 return false;
431 /* If both types don't have the same signedness, precision, and mode,
432 then we can't consider them equal. */
433 if (type0 != type1
434 && (TREE_CODE (type0) == ERROR_MARK
435 || TREE_CODE (type1) == ERROR_MARK
436 || TYPE_UNSIGNED (type0) != TYPE_UNSIGNED (type1)
437 || TYPE_PRECISION (type0) != TYPE_PRECISION (type1)
438 || TYPE_MODE (type0) != TYPE_MODE (type1)))
439 return false;
441 if (expr0->kind != expr1->kind)
442 return false;
444 switch (expr0->kind)
446 case EXPR_SINGLE:
447 return operand_equal_p (expr0->ops.single.rhs,
448 expr1->ops.single.rhs, 0);
450 case EXPR_UNARY:
451 if (expr0->ops.unary.op != expr1->ops.unary.op)
452 return false;
454 if ((CONVERT_EXPR_CODE_P (expr0->ops.unary.op)
455 || expr0->ops.unary.op == NON_LVALUE_EXPR)
456 && TYPE_UNSIGNED (expr0->type) != TYPE_UNSIGNED (expr1->type))
457 return false;
459 return operand_equal_p (expr0->ops.unary.opnd,
460 expr1->ops.unary.opnd, 0);
462 case EXPR_BINARY:
463 if (expr0->ops.binary.op != expr1->ops.binary.op)
464 return false;
466 if (operand_equal_p (expr0->ops.binary.opnd0,
467 expr1->ops.binary.opnd0, 0)
468 && operand_equal_p (expr0->ops.binary.opnd1,
469 expr1->ops.binary.opnd1, 0))
470 return true;
472 /* For commutative ops, allow the other order. */
473 return (commutative_tree_code (expr0->ops.binary.op)
474 && operand_equal_p (expr0->ops.binary.opnd0,
475 expr1->ops.binary.opnd1, 0)
476 && operand_equal_p (expr0->ops.binary.opnd1,
477 expr1->ops.binary.opnd0, 0));
479 case EXPR_TERNARY:
480 if (expr0->ops.ternary.op != expr1->ops.ternary.op
481 || !operand_equal_p (expr0->ops.ternary.opnd2,
482 expr1->ops.ternary.opnd2, 0))
483 return false;
485 if (operand_equal_p (expr0->ops.ternary.opnd0,
486 expr1->ops.ternary.opnd0, 0)
487 && operand_equal_p (expr0->ops.ternary.opnd1,
488 expr1->ops.ternary.opnd1, 0))
489 return true;
491 /* For commutative ops, allow the other order. */
492 return (commutative_ternary_tree_code (expr0->ops.ternary.op)
493 && operand_equal_p (expr0->ops.ternary.opnd0,
494 expr1->ops.ternary.opnd1, 0)
495 && operand_equal_p (expr0->ops.ternary.opnd1,
496 expr1->ops.ternary.opnd0, 0));
498 case EXPR_CALL:
500 size_t i;
502 /* If the calls are to different functions, then they
503 clearly cannot be equal. */
504 if (!gimple_call_same_target_p (expr0->ops.call.fn_from,
505 expr1->ops.call.fn_from))
506 return false;
508 if (! expr0->ops.call.pure)
509 return false;
511 if (expr0->ops.call.nargs != expr1->ops.call.nargs)
512 return false;
514 for (i = 0; i < expr0->ops.call.nargs; i++)
515 if (! operand_equal_p (expr0->ops.call.args[i],
516 expr1->ops.call.args[i], 0))
517 return false;
519 if (stmt_could_throw_p (expr0->ops.call.fn_from))
521 int lp0 = lookup_stmt_eh_lp (expr0->ops.call.fn_from);
522 int lp1 = lookup_stmt_eh_lp (expr1->ops.call.fn_from);
523 if ((lp0 > 0 || lp1 > 0) && lp0 != lp1)
524 return false;
527 return true;
530 case EXPR_PHI:
532 size_t i;
534 if (expr0->ops.phi.nargs != expr1->ops.phi.nargs)
535 return false;
537 for (i = 0; i < expr0->ops.phi.nargs; i++)
538 if (! operand_equal_p (expr0->ops.phi.args[i],
539 expr1->ops.phi.args[i], 0))
540 return false;
542 return true;
545 default:
546 gcc_unreachable ();
550 /* Generate a hash value for a pair of expressions. This can be used
551 iteratively by passing a previous result in HSTATE.
553 The same hash value is always returned for a given pair of expressions,
554 regardless of the order in which they are presented. This is useful in
555 hashing the operands of commutative functions. */
557 namespace inchash
560 static void
561 add_expr_commutative (const_tree t1, const_tree t2, hash &hstate)
563 hash one, two;
565 inchash::add_expr (t1, one);
566 inchash::add_expr (t2, two);
567 hstate.add_commutative (one, two);
570 /* Compute a hash value for a hashable_expr value EXPR and a
571 previously accumulated hash value VAL. If two hashable_expr
572 values compare equal with hashable_expr_equal_p, they must
573 hash to the same value, given an identical value of VAL.
574 The logic is intended to follow inchash::add_expr in tree.c. */
576 static void
577 add_hashable_expr (const struct hashable_expr *expr, hash &hstate)
579 switch (expr->kind)
581 case EXPR_SINGLE:
582 inchash::add_expr (expr->ops.single.rhs, hstate);
583 break;
585 case EXPR_UNARY:
586 hstate.add_object (expr->ops.unary.op);
588 /* Make sure to include signedness in the hash computation.
589 Don't hash the type, that can lead to having nodes which
590 compare equal according to operand_equal_p, but which
591 have different hash codes. */
592 if (CONVERT_EXPR_CODE_P (expr->ops.unary.op)
593 || expr->ops.unary.op == NON_LVALUE_EXPR)
594 hstate.add_int (TYPE_UNSIGNED (expr->type));
596 inchash::add_expr (expr->ops.unary.opnd, hstate);
597 break;
599 case EXPR_BINARY:
600 hstate.add_object (expr->ops.binary.op);
601 if (commutative_tree_code (expr->ops.binary.op))
602 inchash::add_expr_commutative (expr->ops.binary.opnd0,
603 expr->ops.binary.opnd1, hstate);
604 else
606 inchash::add_expr (expr->ops.binary.opnd0, hstate);
607 inchash::add_expr (expr->ops.binary.opnd1, hstate);
609 break;
611 case EXPR_TERNARY:
612 hstate.add_object (expr->ops.ternary.op);
613 if (commutative_ternary_tree_code (expr->ops.ternary.op))
614 inchash::add_expr_commutative (expr->ops.ternary.opnd0,
615 expr->ops.ternary.opnd1, hstate);
616 else
618 inchash::add_expr (expr->ops.ternary.opnd0, hstate);
619 inchash::add_expr (expr->ops.ternary.opnd1, hstate);
621 inchash::add_expr (expr->ops.ternary.opnd2, hstate);
622 break;
624 case EXPR_CALL:
626 size_t i;
627 enum tree_code code = CALL_EXPR;
628 gcall *fn_from;
630 hstate.add_object (code);
631 fn_from = expr->ops.call.fn_from;
632 if (gimple_call_internal_p (fn_from))
633 hstate.merge_hash ((hashval_t) gimple_call_internal_fn (fn_from));
634 else
635 inchash::add_expr (gimple_call_fn (fn_from), hstate);
636 for (i = 0; i < expr->ops.call.nargs; i++)
637 inchash::add_expr (expr->ops.call.args[i], hstate);
639 break;
641 case EXPR_PHI:
643 size_t i;
645 for (i = 0; i < expr->ops.phi.nargs; i++)
646 inchash::add_expr (expr->ops.phi.args[i], hstate);
648 break;
650 default:
651 gcc_unreachable ();
657 /* Print a diagnostic dump of an expression hash table entry. */
659 static void
660 print_expr_hash_elt (FILE * stream, const struct expr_hash_elt *element)
662 fprintf (stream, "STMT ");
664 if (element->lhs)
666 print_generic_expr (stream, element->lhs, 0);
667 fprintf (stream, " = ");
670 switch (element->expr.kind)
672 case EXPR_SINGLE:
673 print_generic_expr (stream, element->expr.ops.single.rhs, 0);
674 break;
676 case EXPR_UNARY:
677 fprintf (stream, "%s ", get_tree_code_name (element->expr.ops.unary.op));
678 print_generic_expr (stream, element->expr.ops.unary.opnd, 0);
679 break;
681 case EXPR_BINARY:
682 print_generic_expr (stream, element->expr.ops.binary.opnd0, 0);
683 fprintf (stream, " %s ", get_tree_code_name (element->expr.ops.binary.op));
684 print_generic_expr (stream, element->expr.ops.binary.opnd1, 0);
685 break;
687 case EXPR_TERNARY:
688 fprintf (stream, " %s <", get_tree_code_name (element->expr.ops.ternary.op));
689 print_generic_expr (stream, element->expr.ops.ternary.opnd0, 0);
690 fputs (", ", stream);
691 print_generic_expr (stream, element->expr.ops.ternary.opnd1, 0);
692 fputs (", ", stream);
693 print_generic_expr (stream, element->expr.ops.ternary.opnd2, 0);
694 fputs (">", stream);
695 break;
697 case EXPR_CALL:
699 size_t i;
700 size_t nargs = element->expr.ops.call.nargs;
701 gcall *fn_from;
703 fn_from = element->expr.ops.call.fn_from;
704 if (gimple_call_internal_p (fn_from))
705 fputs (internal_fn_name (gimple_call_internal_fn (fn_from)),
706 stream);
707 else
708 print_generic_expr (stream, gimple_call_fn (fn_from), 0);
709 fprintf (stream, " (");
710 for (i = 0; i < nargs; i++)
712 print_generic_expr (stream, element->expr.ops.call.args[i], 0);
713 if (i + 1 < nargs)
714 fprintf (stream, ", ");
716 fprintf (stream, ")");
718 break;
720 case EXPR_PHI:
722 size_t i;
723 size_t nargs = element->expr.ops.phi.nargs;
725 fprintf (stream, "PHI <");
726 for (i = 0; i < nargs; i++)
728 print_generic_expr (stream, element->expr.ops.phi.args[i], 0);
729 if (i + 1 < nargs)
730 fprintf (stream, ", ");
732 fprintf (stream, ">");
734 break;
737 if (element->vop)
739 fprintf (stream, " with ");
740 print_generic_expr (stream, element->vop, 0);
743 fprintf (stream, "\n");
746 /* Delete variable sized pieces of the expr_hash_elt ELEMENT. */
748 static void
749 free_expr_hash_elt_contents (struct expr_hash_elt *element)
751 if (element->expr.kind == EXPR_CALL)
752 free (element->expr.ops.call.args);
753 else if (element->expr.kind == EXPR_PHI)
754 free (element->expr.ops.phi.args);
757 /* Delete an expr_hash_elt and reclaim its storage. */
759 static void
760 free_expr_hash_elt (void *elt)
762 struct expr_hash_elt *element = ((struct expr_hash_elt *)elt);
763 free_expr_hash_elt_contents (element);
764 free (element);
767 /* Allocate an EDGE_INFO for edge E and attach it to E.
768 Return the new EDGE_INFO structure. */
770 static struct edge_info *
771 allocate_edge_info (edge e)
773 struct edge_info *edge_info;
775 edge_info = XCNEW (struct edge_info);
777 e->aux = edge_info;
778 return edge_info;
781 /* Free all EDGE_INFO structures associated with edges in the CFG.
782 If a particular edge can be threaded, copy the redirection
783 target from the EDGE_INFO structure into the edge's AUX field
784 as required by code to update the CFG and SSA graph for
785 jump threading. */
787 static void
788 free_all_edge_infos (void)
790 basic_block bb;
791 edge_iterator ei;
792 edge e;
794 FOR_EACH_BB_FN (bb, cfun)
796 FOR_EACH_EDGE (e, ei, bb->preds)
798 struct edge_info *edge_info = (struct edge_info *) e->aux;
800 if (edge_info)
802 edge_info->cond_equivalences.release ();
803 free (edge_info);
804 e->aux = NULL;
810 /* Build a cond_equivalence record indicating that the comparison
811 CODE holds between operands OP0 and OP1 and push it to **P. */
813 static void
814 build_and_record_new_cond (enum tree_code code,
815 tree op0, tree op1,
816 vec<cond_equivalence> *p,
817 bool val = true)
819 cond_equivalence c;
820 struct hashable_expr *cond = &c.cond;
822 gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison);
824 cond->type = boolean_type_node;
825 cond->kind = EXPR_BINARY;
826 cond->ops.binary.op = code;
827 cond->ops.binary.opnd0 = op0;
828 cond->ops.binary.opnd1 = op1;
830 c.value = val ? boolean_true_node : boolean_false_node;
831 p->safe_push (c);
834 /* Record that COND is true and INVERTED is false into the edge information
835 structure. Also record that any conditions dominated by COND are true
836 as well.
838 For example, if a < b is true, then a <= b must also be true. */
840 static void
841 record_conditions (struct edge_info *edge_info, tree cond, tree inverted)
843 tree op0, op1;
844 cond_equivalence c;
846 if (!COMPARISON_CLASS_P (cond))
847 return;
849 op0 = TREE_OPERAND (cond, 0);
850 op1 = TREE_OPERAND (cond, 1);
852 switch (TREE_CODE (cond))
854 case LT_EXPR:
855 case GT_EXPR:
856 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
858 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
859 &edge_info->cond_equivalences);
860 build_and_record_new_cond (LTGT_EXPR, op0, op1,
861 &edge_info->cond_equivalences);
864 build_and_record_new_cond ((TREE_CODE (cond) == LT_EXPR
865 ? LE_EXPR : GE_EXPR),
866 op0, op1, &edge_info->cond_equivalences);
867 build_and_record_new_cond (NE_EXPR, op0, op1,
868 &edge_info->cond_equivalences);
869 build_and_record_new_cond (EQ_EXPR, op0, op1,
870 &edge_info->cond_equivalences, false);
871 break;
873 case GE_EXPR:
874 case LE_EXPR:
875 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
877 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
878 &edge_info->cond_equivalences);
880 break;
882 case EQ_EXPR:
883 if (FLOAT_TYPE_P (TREE_TYPE (op0)))
885 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
886 &edge_info->cond_equivalences);
888 build_and_record_new_cond (LE_EXPR, op0, op1,
889 &edge_info->cond_equivalences);
890 build_and_record_new_cond (GE_EXPR, op0, op1,
891 &edge_info->cond_equivalences);
892 break;
894 case UNORDERED_EXPR:
895 build_and_record_new_cond (NE_EXPR, op0, op1,
896 &edge_info->cond_equivalences);
897 build_and_record_new_cond (UNLE_EXPR, op0, op1,
898 &edge_info->cond_equivalences);
899 build_and_record_new_cond (UNGE_EXPR, op0, op1,
900 &edge_info->cond_equivalences);
901 build_and_record_new_cond (UNEQ_EXPR, op0, op1,
902 &edge_info->cond_equivalences);
903 build_and_record_new_cond (UNLT_EXPR, op0, op1,
904 &edge_info->cond_equivalences);
905 build_and_record_new_cond (UNGT_EXPR, op0, op1,
906 &edge_info->cond_equivalences);
907 break;
909 case UNLT_EXPR:
910 case UNGT_EXPR:
911 build_and_record_new_cond ((TREE_CODE (cond) == UNLT_EXPR
912 ? UNLE_EXPR : UNGE_EXPR),
913 op0, op1, &edge_info->cond_equivalences);
914 build_and_record_new_cond (NE_EXPR, op0, op1,
915 &edge_info->cond_equivalences);
916 break;
918 case UNEQ_EXPR:
919 build_and_record_new_cond (UNLE_EXPR, op0, op1,
920 &edge_info->cond_equivalences);
921 build_and_record_new_cond (UNGE_EXPR, op0, op1,
922 &edge_info->cond_equivalences);
923 break;
925 case LTGT_EXPR:
926 build_and_record_new_cond (NE_EXPR, op0, op1,
927 &edge_info->cond_equivalences);
928 build_and_record_new_cond (ORDERED_EXPR, op0, op1,
929 &edge_info->cond_equivalences);
930 break;
932 default:
933 break;
936 /* Now store the original true and false conditions into the first
937 two slots. */
938 initialize_expr_from_cond (cond, &c.cond);
939 c.value = boolean_true_node;
940 edge_info->cond_equivalences.safe_push (c);
942 /* It is possible for INVERTED to be the negation of a comparison,
943 and not a valid RHS or GIMPLE_COND condition. This happens because
944 invert_truthvalue may return such an expression when asked to invert
945 a floating-point comparison. These comparisons are not assumed to
946 obey the trichotomy law. */
947 initialize_expr_from_cond (inverted, &c.cond);
948 c.value = boolean_false_node;
949 edge_info->cond_equivalences.safe_push (c);
952 /* We have finished optimizing BB, record any information implied by
953 taking a specific outgoing edge from BB. */
955 static void
956 record_edge_info (basic_block bb)
958 gimple_stmt_iterator gsi = gsi_last_bb (bb);
959 struct edge_info *edge_info;
961 if (! gsi_end_p (gsi))
963 gimple stmt = gsi_stmt (gsi);
964 location_t loc = gimple_location (stmt);
966 if (gimple_code (stmt) == GIMPLE_SWITCH)
968 gswitch *switch_stmt = as_a <gswitch *> (stmt);
969 tree index = gimple_switch_index (switch_stmt);
971 if (TREE_CODE (index) == SSA_NAME)
973 int i;
974 int n_labels = gimple_switch_num_labels (switch_stmt);
975 tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun));
976 edge e;
977 edge_iterator ei;
979 for (i = 0; i < n_labels; i++)
981 tree label = gimple_switch_label (switch_stmt, i);
982 basic_block target_bb = label_to_block (CASE_LABEL (label));
983 if (CASE_HIGH (label)
984 || !CASE_LOW (label)
985 || info[target_bb->index])
986 info[target_bb->index] = error_mark_node;
987 else
988 info[target_bb->index] = label;
991 FOR_EACH_EDGE (e, ei, bb->succs)
993 basic_block target_bb = e->dest;
994 tree label = info[target_bb->index];
996 if (label != NULL && label != error_mark_node)
998 tree x = fold_convert_loc (loc, TREE_TYPE (index),
999 CASE_LOW (label));
1000 edge_info = allocate_edge_info (e);
1001 edge_info->lhs = index;
1002 edge_info->rhs = x;
1005 free (info);
1009 /* A COND_EXPR may create equivalences too. */
1010 if (gimple_code (stmt) == GIMPLE_COND)
1012 edge true_edge;
1013 edge false_edge;
1015 tree op0 = gimple_cond_lhs (stmt);
1016 tree op1 = gimple_cond_rhs (stmt);
1017 enum tree_code code = gimple_cond_code (stmt);
1019 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1021 /* Special case comparing booleans against a constant as we
1022 know the value of OP0 on both arms of the branch. i.e., we
1023 can record an equivalence for OP0 rather than COND. */
1024 if ((code == EQ_EXPR || code == NE_EXPR)
1025 && TREE_CODE (op0) == SSA_NAME
1026 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
1027 && is_gimple_min_invariant (op1))
1029 if (code == EQ_EXPR)
1031 edge_info = allocate_edge_info (true_edge);
1032 edge_info->lhs = op0;
1033 edge_info->rhs = (integer_zerop (op1)
1034 ? boolean_false_node
1035 : boolean_true_node);
1037 edge_info = allocate_edge_info (false_edge);
1038 edge_info->lhs = op0;
1039 edge_info->rhs = (integer_zerop (op1)
1040 ? boolean_true_node
1041 : boolean_false_node);
1043 else
1045 edge_info = allocate_edge_info (true_edge);
1046 edge_info->lhs = op0;
1047 edge_info->rhs = (integer_zerop (op1)
1048 ? boolean_true_node
1049 : boolean_false_node);
1051 edge_info = allocate_edge_info (false_edge);
1052 edge_info->lhs = op0;
1053 edge_info->rhs = (integer_zerop (op1)
1054 ? boolean_false_node
1055 : boolean_true_node);
1058 else if (is_gimple_min_invariant (op0)
1059 && (TREE_CODE (op1) == SSA_NAME
1060 || is_gimple_min_invariant (op1)))
1062 tree cond = build2 (code, boolean_type_node, op0, op1);
1063 tree inverted = invert_truthvalue_loc (loc, cond);
1064 bool can_infer_simple_equiv
1065 = !(HONOR_SIGNED_ZEROS (op0)
1066 && real_zerop (op0));
1067 struct edge_info *edge_info;
1069 edge_info = allocate_edge_info (true_edge);
1070 record_conditions (edge_info, cond, inverted);
1072 if (can_infer_simple_equiv && code == EQ_EXPR)
1074 edge_info->lhs = op1;
1075 edge_info->rhs = op0;
1078 edge_info = allocate_edge_info (false_edge);
1079 record_conditions (edge_info, inverted, cond);
1081 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1083 edge_info->lhs = op1;
1084 edge_info->rhs = op0;
1088 else if (TREE_CODE (op0) == SSA_NAME
1089 && (TREE_CODE (op1) == SSA_NAME
1090 || is_gimple_min_invariant (op1)))
1092 tree cond = build2 (code, boolean_type_node, op0, op1);
1093 tree inverted = invert_truthvalue_loc (loc, cond);
1094 bool can_infer_simple_equiv
1095 = !(HONOR_SIGNED_ZEROS (op1)
1096 && (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
1097 struct edge_info *edge_info;
1099 edge_info = allocate_edge_info (true_edge);
1100 record_conditions (edge_info, cond, inverted);
1102 if (can_infer_simple_equiv && code == EQ_EXPR)
1104 edge_info->lhs = op0;
1105 edge_info->rhs = op1;
1108 edge_info = allocate_edge_info (false_edge);
1109 record_conditions (edge_info, inverted, cond);
1111 if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR)
1113 edge_info->lhs = op0;
1114 edge_info->rhs = op1;
1119 /* ??? TRUTH_NOT_EXPR can create an equivalence too. */
1124 class dom_opt_dom_walker : public dom_walker
1126 public:
1127 dom_opt_dom_walker (cdi_direction direction)
1128 : dom_walker (direction), m_dummy_cond (NULL) {}
1130 virtual void before_dom_children (basic_block);
1131 virtual void after_dom_children (basic_block);
1133 private:
1134 void thread_across_edge (edge);
1136 gcond *m_dummy_cond;
1139 /* Jump threading, redundancy elimination and const/copy propagation.
1141 This pass may expose new symbols that need to be renamed into SSA. For
1142 every new symbol exposed, its corresponding bit will be set in
1143 VARS_TO_RENAME. */
1145 namespace {
1147 const pass_data pass_data_dominator =
1149 GIMPLE_PASS, /* type */
1150 "dom", /* name */
1151 OPTGROUP_NONE, /* optinfo_flags */
1152 TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */
1153 ( PROP_cfg | PROP_ssa ), /* properties_required */
1154 0, /* properties_provided */
1155 0, /* properties_destroyed */
1156 0, /* todo_flags_start */
1157 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
1160 class pass_dominator : public gimple_opt_pass
1162 public:
1163 pass_dominator (gcc::context *ctxt)
1164 : gimple_opt_pass (pass_data_dominator, ctxt)
1167 /* opt_pass methods: */
1168 opt_pass * clone () { return new pass_dominator (m_ctxt); }
1169 virtual bool gate (function *) { return flag_tree_dom != 0; }
1170 virtual unsigned int execute (function *);
1172 }; // class pass_dominator
1174 unsigned int
1175 pass_dominator::execute (function *fun)
1177 memset (&opt_stats, 0, sizeof (opt_stats));
1179 /* Create our hash tables. */
1180 avail_exprs = new hash_table<expr_elt_hasher> (1024);
1181 avail_exprs_stack.create (20);
1182 const_and_copies = new class const_and_copies (dump_file, dump_flags);
1183 need_eh_cleanup = BITMAP_ALLOC (NULL);
1184 need_noreturn_fixup.create (0);
1186 calculate_dominance_info (CDI_DOMINATORS);
1187 cfg_altered = false;
1189 /* We need to know loop structures in order to avoid destroying them
1190 in jump threading. Note that we still can e.g. thread through loop
1191 headers to an exit edge, or through loop header to the loop body, assuming
1192 that we update the loop info.
1194 TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due
1195 to several overly conservative bail-outs in jump threading, case
1196 gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is
1197 missing. We should improve jump threading in future then
1198 LOOPS_HAVE_PREHEADERS won't be needed here. */
1199 loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES);
1201 /* Initialize the value-handle array. */
1202 threadedge_initialize_values ();
1204 /* We need accurate information regarding back edges in the CFG
1205 for jump threading; this may include back edges that are not part of
1206 a single loop. */
1207 mark_dfs_back_edges ();
1209 /* Recursively walk the dominator tree optimizing statements. */
1210 dom_opt_dom_walker (CDI_DOMINATORS).walk (fun->cfg->x_entry_block_ptr);
1213 gimple_stmt_iterator gsi;
1214 basic_block bb;
1215 FOR_EACH_BB_FN (bb, fun)
1217 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1218 update_stmt_if_modified (gsi_stmt (gsi));
1222 /* If we exposed any new variables, go ahead and put them into
1223 SSA form now, before we handle jump threading. This simplifies
1224 interactions between rewriting of _DECL nodes into SSA form
1225 and rewriting SSA_NAME nodes into SSA form after block
1226 duplication and CFG manipulation. */
1227 update_ssa (TODO_update_ssa);
1229 free_all_edge_infos ();
1231 /* Thread jumps, creating duplicate blocks as needed. */
1232 cfg_altered |= thread_through_all_blocks (first_pass_instance);
1234 if (cfg_altered)
1235 free_dominance_info (CDI_DOMINATORS);
1237 /* Removal of statements may make some EH edges dead. Purge
1238 such edges from the CFG as needed. */
1239 if (!bitmap_empty_p (need_eh_cleanup))
1241 unsigned i;
1242 bitmap_iterator bi;
1244 /* Jump threading may have created forwarder blocks from blocks
1245 needing EH cleanup; the new successor of these blocks, which
1246 has inherited from the original block, needs the cleanup.
1247 Don't clear bits in the bitmap, as that can break the bitmap
1248 iterator. */
1249 EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi)
1251 basic_block bb = BASIC_BLOCK_FOR_FN (fun, i);
1252 if (bb == NULL)
1253 continue;
1254 while (single_succ_p (bb)
1255 && (single_succ_edge (bb)->flags & EDGE_EH) == 0)
1256 bb = single_succ (bb);
1257 if (bb == EXIT_BLOCK_PTR_FOR_FN (fun))
1258 continue;
1259 if ((unsigned) bb->index != i)
1260 bitmap_set_bit (need_eh_cleanup, bb->index);
1263 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
1264 bitmap_clear (need_eh_cleanup);
1267 /* Fixup stmts that became noreturn calls. This may require splitting
1268 blocks and thus isn't possible during the dominator walk or before
1269 jump threading finished. Do this in reverse order so we don't
1270 inadvertedly remove a stmt we want to fixup by visiting a dominating
1271 now noreturn call first. */
1272 while (!need_noreturn_fixup.is_empty ())
1274 gimple stmt = need_noreturn_fixup.pop ();
1275 if (dump_file && dump_flags & TDF_DETAILS)
1277 fprintf (dump_file, "Fixing up noreturn call ");
1278 print_gimple_stmt (dump_file, stmt, 0, 0);
1279 fprintf (dump_file, "\n");
1281 fixup_noreturn_call (stmt);
1284 statistics_counter_event (fun, "Redundant expressions eliminated",
1285 opt_stats.num_re);
1286 statistics_counter_event (fun, "Constants propagated",
1287 opt_stats.num_const_prop);
1288 statistics_counter_event (fun, "Copies propagated",
1289 opt_stats.num_copy_prop);
1291 /* Debugging dumps. */
1292 if (dump_file && (dump_flags & TDF_STATS))
1293 dump_dominator_optimization_stats (dump_file);
1295 loop_optimizer_finalize ();
1297 /* Delete our main hashtable. */
1298 delete avail_exprs;
1299 avail_exprs = NULL;
1301 /* Free asserted bitmaps and stacks. */
1302 BITMAP_FREE (need_eh_cleanup);
1303 need_noreturn_fixup.release ();
1304 avail_exprs_stack.release ();
1305 delete const_and_copies;
1307 /* Free the value-handle array. */
1308 threadedge_finalize_values ();
1310 return 0;
1313 } // anon namespace
1315 gimple_opt_pass *
1316 make_pass_dominator (gcc::context *ctxt)
1318 return new pass_dominator (ctxt);
1322 /* Given a conditional statement CONDSTMT, convert the
1323 condition to a canonical form. */
1325 static void
1326 canonicalize_comparison (gcond *condstmt)
1328 tree op0;
1329 tree op1;
1330 enum tree_code code;
1332 gcc_assert (gimple_code (condstmt) == GIMPLE_COND);
1334 op0 = gimple_cond_lhs (condstmt);
1335 op1 = gimple_cond_rhs (condstmt);
1337 code = gimple_cond_code (condstmt);
1339 /* If it would be profitable to swap the operands, then do so to
1340 canonicalize the statement, enabling better optimization.
1342 By placing canonicalization of such expressions here we
1343 transparently keep statements in canonical form, even
1344 when the statement is modified. */
1345 if (tree_swap_operands_p (op0, op1, false))
1347 /* For relationals we need to swap the operands
1348 and change the code. */
1349 if (code == LT_EXPR
1350 || code == GT_EXPR
1351 || code == LE_EXPR
1352 || code == GE_EXPR)
1354 code = swap_tree_comparison (code);
1356 gimple_cond_set_code (condstmt, code);
1357 gimple_cond_set_lhs (condstmt, op1);
1358 gimple_cond_set_rhs (condstmt, op0);
1360 update_stmt (condstmt);
1365 /* Initialize local stacks for this optimizer and record equivalences
1366 upon entry to BB. Equivalences can come from the edge traversed to
1367 reach BB or they may come from PHI nodes at the start of BB. */
1369 /* Remove all the expressions in LOCALS from TABLE, stopping when there are
1370 LIMIT entries left in LOCALs. */
1372 static void
1373 remove_local_expressions_from_table (void)
1375 /* Remove all the expressions made available in this block. */
1376 while (avail_exprs_stack.length () > 0)
1378 std::pair<expr_hash_elt_t, expr_hash_elt_t> victim
1379 = avail_exprs_stack.pop ();
1380 expr_hash_elt **slot;
1382 if (victim.first == NULL)
1383 break;
1385 /* This must precede the actual removal from the hash table,
1386 as ELEMENT and the table entry may share a call argument
1387 vector which will be freed during removal. */
1388 if (dump_file && (dump_flags & TDF_DETAILS))
1390 fprintf (dump_file, "<<<< ");
1391 print_expr_hash_elt (dump_file, victim.first);
1394 slot = avail_exprs->find_slot (victim.first, NO_INSERT);
1395 gcc_assert (slot && *slot == victim.first);
1396 if (victim.second != NULL)
1398 free_expr_hash_elt (*slot);
1399 *slot = victim.second;
1401 else
1402 avail_exprs->clear_slot (slot);
1406 /* A trivial wrapper so that we can present the generic jump
1407 threading code with a simple API for simplifying statements. */
1408 static tree
1409 simplify_stmt_for_jump_threading (gimple stmt,
1410 gimple within_stmt ATTRIBUTE_UNUSED)
1412 return lookup_avail_expr (stmt, false);
1415 /* Record into the equivalence tables any equivalences implied by
1416 traversing edge E (which are cached in E->aux).
1418 Callers are responsible for managing the unwinding markers. */
1419 static void
1420 record_temporary_equivalences (edge e)
1422 int i;
1423 struct edge_info *edge_info = (struct edge_info *) e->aux;
1425 /* If we have info associated with this edge, record it into
1426 our equivalence tables. */
1427 if (edge_info)
1429 cond_equivalence *eq;
1430 tree lhs = edge_info->lhs;
1431 tree rhs = edge_info->rhs;
1433 /* If we have a simple NAME = VALUE equivalence, record it. */
1434 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1435 const_and_copies->record_const_or_copy (lhs, rhs);
1437 /* If we have 0 = COND or 1 = COND equivalences, record them
1438 into our expression hash tables. */
1439 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1440 record_cond (eq);
1444 /* Wrapper for common code to attempt to thread an edge. For example,
1445 it handles lazily building the dummy condition and the bookkeeping
1446 when jump threading is successful. */
1448 void
1449 dom_opt_dom_walker::thread_across_edge (edge e)
1451 if (! m_dummy_cond)
1452 m_dummy_cond =
1453 gimple_build_cond (NE_EXPR,
1454 integer_zero_node, integer_zero_node,
1455 NULL, NULL);
1457 /* Push a marker on both stacks so we can unwind the tables back to their
1458 current state. */
1459 avail_exprs_stack.safe_push
1460 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1461 const_and_copies->push_marker ();
1463 /* Traversing E may result in equivalences we can utilize. */
1464 record_temporary_equivalences (e);
1466 /* With all the edge equivalences in the tables, go ahead and attempt
1467 to thread through E->dest. */
1468 ::thread_across_edge (m_dummy_cond, e, false,
1469 const_and_copies,
1470 simplify_stmt_for_jump_threading);
1472 /* And restore the various tables to their state before
1473 we threaded this edge.
1475 XXX The code in tree-ssa-threadedge.c will restore the state of
1476 the const_and_copies table. We we just have to restore the expression
1477 table. */
1478 remove_local_expressions_from_table ();
1481 /* PHI nodes can create equivalences too.
1483 Ignoring any alternatives which are the same as the result, if
1484 all the alternatives are equal, then the PHI node creates an
1485 equivalence. */
1487 static void
1488 record_equivalences_from_phis (basic_block bb)
1490 gphi_iterator gsi;
1492 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1494 gphi *phi = gsi.phi ();
1496 tree lhs = gimple_phi_result (phi);
1497 tree rhs = NULL;
1498 size_t i;
1500 for (i = 0; i < gimple_phi_num_args (phi); i++)
1502 tree t = gimple_phi_arg_def (phi, i);
1504 /* Ignore alternatives which are the same as our LHS. Since
1505 LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we
1506 can simply compare pointers. */
1507 if (lhs == t)
1508 continue;
1510 /* Valueize t. */
1511 if (TREE_CODE (t) == SSA_NAME)
1513 tree tmp = SSA_NAME_VALUE (t);
1514 t = tmp ? tmp : t;
1517 /* If we have not processed an alternative yet, then set
1518 RHS to this alternative. */
1519 if (rhs == NULL)
1520 rhs = t;
1521 /* If we have processed an alternative (stored in RHS), then
1522 see if it is equal to this one. If it isn't, then stop
1523 the search. */
1524 else if (! operand_equal_for_phi_arg_p (rhs, t))
1525 break;
1528 /* If we had no interesting alternatives, then all the RHS alternatives
1529 must have been the same as LHS. */
1530 if (!rhs)
1531 rhs = lhs;
1533 /* If we managed to iterate through each PHI alternative without
1534 breaking out of the loop, then we have a PHI which may create
1535 a useful equivalence. We do not need to record unwind data for
1536 this, since this is a true assignment and not an equivalence
1537 inferred from a comparison. All uses of this ssa name are dominated
1538 by this assignment, so unwinding just costs time and space. */
1539 if (i == gimple_phi_num_args (phi)
1540 && may_propagate_copy (lhs, rhs))
1541 set_ssa_name_value (lhs, rhs);
1545 /* Ignoring loop backedges, if BB has precisely one incoming edge then
1546 return that edge. Otherwise return NULL. */
1547 static edge
1548 single_incoming_edge_ignoring_loop_edges (basic_block bb)
1550 edge retval = NULL;
1551 edge e;
1552 edge_iterator ei;
1554 FOR_EACH_EDGE (e, ei, bb->preds)
1556 /* A loop back edge can be identified by the destination of
1557 the edge dominating the source of the edge. */
1558 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1559 continue;
1561 /* If we have already seen a non-loop edge, then we must have
1562 multiple incoming non-loop edges and thus we return NULL. */
1563 if (retval)
1564 return NULL;
1566 /* This is the first non-loop incoming edge we have found. Record
1567 it. */
1568 retval = e;
1571 return retval;
1574 /* Record any equivalences created by the incoming edge to BB. If BB
1575 has more than one incoming edge, then no equivalence is created. */
1577 static void
1578 record_equivalences_from_incoming_edge (basic_block bb)
1580 edge e;
1581 basic_block parent;
1582 struct edge_info *edge_info;
1584 /* If our parent block ended with a control statement, then we may be
1585 able to record some equivalences based on which outgoing edge from
1586 the parent was followed. */
1587 parent = get_immediate_dominator (CDI_DOMINATORS, bb);
1589 e = single_incoming_edge_ignoring_loop_edges (bb);
1591 /* If we had a single incoming edge from our parent block, then enter
1592 any data associated with the edge into our tables. */
1593 if (e && e->src == parent)
1595 unsigned int i;
1597 edge_info = (struct edge_info *) e->aux;
1599 if (edge_info)
1601 tree lhs = edge_info->lhs;
1602 tree rhs = edge_info->rhs;
1603 cond_equivalence *eq;
1605 if (lhs)
1606 record_equality (lhs, rhs);
1608 /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was
1609 set via a widening type conversion, then we may be able to record
1610 additional equivalences. */
1611 if (lhs
1612 && TREE_CODE (lhs) == SSA_NAME
1613 && is_gimple_constant (rhs)
1614 && TREE_CODE (rhs) == INTEGER_CST)
1616 gimple defstmt = SSA_NAME_DEF_STMT (lhs);
1618 if (defstmt
1619 && is_gimple_assign (defstmt)
1620 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (defstmt)))
1622 tree old_rhs = gimple_assign_rhs1 (defstmt);
1624 /* If the conversion widens the original value and
1625 the constant is in the range of the type of OLD_RHS,
1626 then convert the constant and record the equivalence.
1628 Note that int_fits_type_p does not check the precision
1629 if the upper and lower bounds are OK. */
1630 if (INTEGRAL_TYPE_P (TREE_TYPE (old_rhs))
1631 && (TYPE_PRECISION (TREE_TYPE (lhs))
1632 > TYPE_PRECISION (TREE_TYPE (old_rhs)))
1633 && int_fits_type_p (rhs, TREE_TYPE (old_rhs)))
1635 tree newval = fold_convert (TREE_TYPE (old_rhs), rhs);
1636 record_equality (old_rhs, newval);
1641 for (i = 0; edge_info->cond_equivalences.iterate (i, &eq); ++i)
1642 record_cond (eq);
1647 /* Dump SSA statistics on FILE. */
1649 void
1650 dump_dominator_optimization_stats (FILE *file)
1652 fprintf (file, "Total number of statements: %6ld\n\n",
1653 opt_stats.num_stmts);
1654 fprintf (file, "Exprs considered for dominator optimizations: %6ld\n",
1655 opt_stats.num_exprs_considered);
1657 fprintf (file, "\nHash table statistics:\n");
1659 fprintf (file, " avail_exprs: ");
1660 htab_statistics (file, *avail_exprs);
1664 /* Dump SSA statistics on stderr. */
1666 DEBUG_FUNCTION void
1667 debug_dominator_optimization_stats (void)
1669 dump_dominator_optimization_stats (stderr);
1673 /* Dump statistics for the hash table HTAB. */
1675 static void
1676 htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab)
1678 fprintf (file, "size %ld, %ld elements, %f collision/search ratio\n",
1679 (long) htab.size (),
1680 (long) htab.elements (),
1681 htab.collisions ());
1685 /* Enter condition equivalence into the expression hash table.
1686 This indicates that a conditional expression has a known
1687 boolean value. */
1689 static void
1690 record_cond (cond_equivalence *p)
1692 struct expr_hash_elt *element = XCNEW (struct expr_hash_elt);
1693 expr_hash_elt **slot;
1695 initialize_hash_element_from_expr (&p->cond, p->value, element);
1697 slot = avail_exprs->find_slot_with_hash (element, element->hash, INSERT);
1698 if (*slot == NULL)
1700 *slot = element;
1702 if (dump_file && (dump_flags & TDF_DETAILS))
1704 fprintf (dump_file, "1>>> ");
1705 print_expr_hash_elt (dump_file, element);
1708 avail_exprs_stack.safe_push
1709 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element, NULL));
1711 else
1712 free_expr_hash_elt (element);
1715 /* Return the loop depth of the basic block of the defining statement of X.
1716 This number should not be treated as absolutely correct because the loop
1717 information may not be completely up-to-date when dom runs. However, it
1718 will be relatively correct, and as more passes are taught to keep loop info
1719 up to date, the result will become more and more accurate. */
1721 static int
1722 loop_depth_of_name (tree x)
1724 gimple defstmt;
1725 basic_block defbb;
1727 /* If it's not an SSA_NAME, we have no clue where the definition is. */
1728 if (TREE_CODE (x) != SSA_NAME)
1729 return 0;
1731 /* Otherwise return the loop depth of the defining statement's bb.
1732 Note that there may not actually be a bb for this statement, if the
1733 ssa_name is live on entry. */
1734 defstmt = SSA_NAME_DEF_STMT (x);
1735 defbb = gimple_bb (defstmt);
1736 if (!defbb)
1737 return 0;
1739 return bb_loop_depth (defbb);
1742 /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR.
1743 This constrains the cases in which we may treat this as assignment. */
1745 static void
1746 record_equality (tree x, tree y)
1748 tree prev_x = NULL, prev_y = NULL;
1750 if (tree_swap_operands_p (x, y, false))
1751 std::swap (x, y);
1753 /* Most of the time tree_swap_operands_p does what we want. But there
1754 are cases where we know one operand is better for copy propagation than
1755 the other. Given no other code cares about ordering of equality
1756 comparison operators for that purpose, we just handle the special cases
1757 here. */
1758 if (TREE_CODE (x) == SSA_NAME && TREE_CODE (y) == SSA_NAME)
1760 /* If one operand is a single use operand, then make it
1761 X. This will preserve its single use properly and if this
1762 conditional is eliminated, the computation of X can be
1763 eliminated as well. */
1764 if (has_single_use (y) && ! has_single_use (x))
1765 std::swap (x, y);
1767 if (TREE_CODE (x) == SSA_NAME)
1768 prev_x = SSA_NAME_VALUE (x);
1769 if (TREE_CODE (y) == SSA_NAME)
1770 prev_y = SSA_NAME_VALUE (y);
1772 /* If one of the previous values is invariant, or invariant in more loops
1773 (by depth), then use that.
1774 Otherwise it doesn't matter which value we choose, just so
1775 long as we canonicalize on one value. */
1776 if (is_gimple_min_invariant (y))
1778 else if (is_gimple_min_invariant (x)
1779 /* ??? When threading over backedges the following is important
1780 for correctness. See PR61757. */
1781 || (loop_depth_of_name (x) < loop_depth_of_name (y)))
1782 prev_x = x, x = y, y = prev_x, prev_x = prev_y;
1783 else if (prev_x && is_gimple_min_invariant (prev_x))
1784 x = y, y = prev_x, prev_x = prev_y;
1785 else if (prev_y)
1786 y = prev_y;
1788 /* After the swapping, we must have one SSA_NAME. */
1789 if (TREE_CODE (x) != SSA_NAME)
1790 return;
1792 /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a
1793 variable compared against zero. If we're honoring signed zeros,
1794 then we cannot record this value unless we know that the value is
1795 nonzero. */
1796 if (HONOR_SIGNED_ZEROS (x)
1797 && (TREE_CODE (y) != REAL_CST
1798 || REAL_VALUES_EQUAL (dconst0, TREE_REAL_CST (y))))
1799 return;
1801 const_and_copies->record_const_or_copy (x, y, prev_x);
1804 /* Returns true when STMT is a simple iv increment. It detects the
1805 following situation:
1807 i_1 = phi (..., i_2)
1808 i_2 = i_1 +/- ... */
1810 bool
1811 simple_iv_increment_p (gimple stmt)
1813 enum tree_code code;
1814 tree lhs, preinc;
1815 gimple phi;
1816 size_t i;
1818 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1819 return false;
1821 lhs = gimple_assign_lhs (stmt);
1822 if (TREE_CODE (lhs) != SSA_NAME)
1823 return false;
1825 code = gimple_assign_rhs_code (stmt);
1826 if (code != PLUS_EXPR
1827 && code != MINUS_EXPR
1828 && code != POINTER_PLUS_EXPR)
1829 return false;
1831 preinc = gimple_assign_rhs1 (stmt);
1832 if (TREE_CODE (preinc) != SSA_NAME)
1833 return false;
1835 phi = SSA_NAME_DEF_STMT (preinc);
1836 if (gimple_code (phi) != GIMPLE_PHI)
1837 return false;
1839 for (i = 0; i < gimple_phi_num_args (phi); i++)
1840 if (gimple_phi_arg_def (phi, i) == lhs)
1841 return true;
1843 return false;
1846 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
1847 known value for that SSA_NAME (or NULL if no value is known).
1849 Propagate values from CONST_AND_COPIES into the PHI nodes of the
1850 successors of BB. */
1852 static void
1853 cprop_into_successor_phis (basic_block bb)
1855 edge e;
1856 edge_iterator ei;
1858 FOR_EACH_EDGE (e, ei, bb->succs)
1860 int indx;
1861 gphi_iterator gsi;
1863 /* If this is an abnormal edge, then we do not want to copy propagate
1864 into the PHI alternative associated with this edge. */
1865 if (e->flags & EDGE_ABNORMAL)
1866 continue;
1868 gsi = gsi_start_phis (e->dest);
1869 if (gsi_end_p (gsi))
1870 continue;
1872 /* We may have an equivalence associated with this edge. While
1873 we can not propagate it into non-dominated blocks, we can
1874 propagate them into PHIs in non-dominated blocks. */
1876 /* Push the unwind marker so we can reset the const and copies
1877 table back to its original state after processing this edge. */
1878 const_and_copies->push_marker ();
1880 /* Extract and record any simple NAME = VALUE equivalences.
1882 Don't bother with [01] = COND equivalences, they're not useful
1883 here. */
1884 struct edge_info *edge_info = (struct edge_info *) e->aux;
1885 if (edge_info)
1887 tree lhs = edge_info->lhs;
1888 tree rhs = edge_info->rhs;
1890 if (lhs && TREE_CODE (lhs) == SSA_NAME)
1891 const_and_copies->record_const_or_copy (lhs, rhs);
1894 indx = e->dest_idx;
1895 for ( ; !gsi_end_p (gsi); gsi_next (&gsi))
1897 tree new_val;
1898 use_operand_p orig_p;
1899 tree orig_val;
1900 gphi *phi = gsi.phi ();
1902 /* The alternative may be associated with a constant, so verify
1903 it is an SSA_NAME before doing anything with it. */
1904 orig_p = gimple_phi_arg_imm_use_ptr (phi, indx);
1905 orig_val = get_use_from_ptr (orig_p);
1906 if (TREE_CODE (orig_val) != SSA_NAME)
1907 continue;
1909 /* If we have *ORIG_P in our constant/copy table, then replace
1910 ORIG_P with its value in our constant/copy table. */
1911 new_val = SSA_NAME_VALUE (orig_val);
1912 if (new_val
1913 && new_val != orig_val
1914 && (TREE_CODE (new_val) == SSA_NAME
1915 || is_gimple_min_invariant (new_val))
1916 && may_propagate_copy (orig_val, new_val))
1917 propagate_value (orig_p, new_val);
1920 const_and_copies->pop_to_marker ();
1924 void
1925 dom_opt_dom_walker::before_dom_children (basic_block bb)
1927 gimple_stmt_iterator gsi;
1929 if (dump_file && (dump_flags & TDF_DETAILS))
1930 fprintf (dump_file, "\n\nOptimizing block #%d\n\n", bb->index);
1932 /* Push a marker on the stacks of local information so that we know how
1933 far to unwind when we finalize this block. */
1934 avail_exprs_stack.safe_push
1935 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1936 const_and_copies->push_marker ();
1938 record_equivalences_from_incoming_edge (bb);
1940 /* PHI nodes can create equivalences too. */
1941 record_equivalences_from_phis (bb);
1943 /* Create equivalences from redundant PHIs. PHIs are only truly
1944 redundant when they exist in the same block, so push another
1945 marker and unwind right afterwards. */
1946 avail_exprs_stack.safe_push
1947 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (NULL, NULL));
1948 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1949 eliminate_redundant_computations (&gsi);
1950 remove_local_expressions_from_table ();
1952 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1953 optimize_stmt (bb, gsi);
1955 /* Now prepare to process dominated blocks. */
1956 record_edge_info (bb);
1957 cprop_into_successor_phis (bb);
1960 /* We have finished processing the dominator children of BB, perform
1961 any finalization actions in preparation for leaving this node in
1962 the dominator tree. */
1964 void
1965 dom_opt_dom_walker::after_dom_children (basic_block bb)
1967 gimple last;
1969 /* If we have an outgoing edge to a block with multiple incoming and
1970 outgoing edges, then we may be able to thread the edge, i.e., we
1971 may be able to statically determine which of the outgoing edges
1972 will be traversed when the incoming edge from BB is traversed. */
1973 if (single_succ_p (bb)
1974 && (single_succ_edge (bb)->flags & EDGE_ABNORMAL) == 0
1975 && potentially_threadable_block (single_succ (bb)))
1977 thread_across_edge (single_succ_edge (bb));
1979 else if ((last = last_stmt (bb))
1980 && gimple_code (last) == GIMPLE_COND
1981 && EDGE_COUNT (bb->succs) == 2
1982 && (EDGE_SUCC (bb, 0)->flags & EDGE_ABNORMAL) == 0
1983 && (EDGE_SUCC (bb, 1)->flags & EDGE_ABNORMAL) == 0)
1985 edge true_edge, false_edge;
1987 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1989 /* Only try to thread the edge if it reaches a target block with
1990 more than one predecessor and more than one successor. */
1991 if (potentially_threadable_block (true_edge->dest))
1992 thread_across_edge (true_edge);
1994 /* Similarly for the ELSE arm. */
1995 if (potentially_threadable_block (false_edge->dest))
1996 thread_across_edge (false_edge);
2000 /* These remove expressions local to BB from the tables. */
2001 remove_local_expressions_from_table ();
2002 const_and_copies->pop_to_marker ();
2005 /* Search for redundant computations in STMT. If any are found, then
2006 replace them with the variable holding the result of the computation.
2008 If safe, record this expression into the available expression hash
2009 table. */
2011 static void
2012 eliminate_redundant_computations (gimple_stmt_iterator* gsi)
2014 tree expr_type;
2015 tree cached_lhs;
2016 tree def;
2017 bool insert = true;
2018 bool assigns_var_p = false;
2020 gimple stmt = gsi_stmt (*gsi);
2022 if (gimple_code (stmt) == GIMPLE_PHI)
2023 def = gimple_phi_result (stmt);
2024 else
2025 def = gimple_get_lhs (stmt);
2027 /* Certain expressions on the RHS can be optimized away, but can not
2028 themselves be entered into the hash tables. */
2029 if (! def
2030 || TREE_CODE (def) != SSA_NAME
2031 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def)
2032 || gimple_vdef (stmt)
2033 /* Do not record equivalences for increments of ivs. This would create
2034 overlapping live ranges for a very questionable gain. */
2035 || simple_iv_increment_p (stmt))
2036 insert = false;
2038 /* Check if the expression has been computed before. */
2039 cached_lhs = lookup_avail_expr (stmt, insert);
2041 opt_stats.num_exprs_considered++;
2043 /* Get the type of the expression we are trying to optimize. */
2044 if (is_gimple_assign (stmt))
2046 expr_type = TREE_TYPE (gimple_assign_lhs (stmt));
2047 assigns_var_p = true;
2049 else if (gimple_code (stmt) == GIMPLE_COND)
2050 expr_type = boolean_type_node;
2051 else if (is_gimple_call (stmt))
2053 gcc_assert (gimple_call_lhs (stmt));
2054 expr_type = TREE_TYPE (gimple_call_lhs (stmt));
2055 assigns_var_p = true;
2057 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2058 expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt));
2059 else if (gimple_code (stmt) == GIMPLE_PHI)
2060 /* We can't propagate into a phi, so the logic below doesn't apply.
2061 Instead record an equivalence between the cached LHS and the
2062 PHI result of this statement, provided they are in the same block.
2063 This should be sufficient to kill the redundant phi. */
2065 if (def && cached_lhs)
2066 const_and_copies->record_const_or_copy (def, cached_lhs);
2067 return;
2069 else
2070 gcc_unreachable ();
2072 if (!cached_lhs)
2073 return;
2075 /* It is safe to ignore types here since we have already done
2076 type checking in the hashing and equality routines. In fact
2077 type checking here merely gets in the way of constant
2078 propagation. Also, make sure that it is safe to propagate
2079 CACHED_LHS into the expression in STMT. */
2080 if ((TREE_CODE (cached_lhs) != SSA_NAME
2081 && (assigns_var_p
2082 || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))))
2083 || may_propagate_copy_into_stmt (stmt, cached_lhs))
2085 gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME
2086 || is_gimple_min_invariant (cached_lhs));
2088 if (dump_file && (dump_flags & TDF_DETAILS))
2090 fprintf (dump_file, " Replaced redundant expr '");
2091 print_gimple_expr (dump_file, stmt, 0, dump_flags);
2092 fprintf (dump_file, "' with '");
2093 print_generic_expr (dump_file, cached_lhs, dump_flags);
2094 fprintf (dump_file, "'\n");
2097 opt_stats.num_re++;
2099 if (assigns_var_p
2100 && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))
2101 cached_lhs = fold_convert (expr_type, cached_lhs);
2103 propagate_tree_value_into_stmt (gsi, cached_lhs);
2105 /* Since it is always necessary to mark the result as modified,
2106 perhaps we should move this into propagate_tree_value_into_stmt
2107 itself. */
2108 gimple_set_modified (gsi_stmt (*gsi), true);
2112 /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either
2113 the available expressions table or the const_and_copies table.
2114 Detect and record those equivalences. */
2115 /* We handle only very simple copy equivalences here. The heavy
2116 lifing is done by eliminate_redundant_computations. */
2118 static void
2119 record_equivalences_from_stmt (gimple stmt, int may_optimize_p)
2121 tree lhs;
2122 enum tree_code lhs_code;
2124 gcc_assert (is_gimple_assign (stmt));
2126 lhs = gimple_assign_lhs (stmt);
2127 lhs_code = TREE_CODE (lhs);
2129 if (lhs_code == SSA_NAME
2130 && gimple_assign_single_p (stmt))
2132 tree rhs = gimple_assign_rhs1 (stmt);
2134 /* If the RHS of the assignment is a constant or another variable that
2135 may be propagated, register it in the CONST_AND_COPIES table. We
2136 do not need to record unwind data for this, since this is a true
2137 assignment and not an equivalence inferred from a comparison. All
2138 uses of this ssa name are dominated by this assignment, so unwinding
2139 just costs time and space. */
2140 if (may_optimize_p
2141 && (TREE_CODE (rhs) == SSA_NAME
2142 || is_gimple_min_invariant (rhs)))
2144 /* Valueize rhs. */
2145 if (TREE_CODE (rhs) == SSA_NAME)
2147 tree tmp = SSA_NAME_VALUE (rhs);
2148 rhs = tmp ? tmp : rhs;
2151 if (dump_file && (dump_flags & TDF_DETAILS))
2153 fprintf (dump_file, "==== ASGN ");
2154 print_generic_expr (dump_file, lhs, 0);
2155 fprintf (dump_file, " = ");
2156 print_generic_expr (dump_file, rhs, 0);
2157 fprintf (dump_file, "\n");
2160 set_ssa_name_value (lhs, rhs);
2164 /* Make sure we can propagate &x + CST. */
2165 if (lhs_code == SSA_NAME
2166 && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
2167 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR
2168 && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST)
2170 tree op0 = gimple_assign_rhs1 (stmt);
2171 tree op1 = gimple_assign_rhs2 (stmt);
2172 tree new_rhs
2173 = build_fold_addr_expr (fold_build2 (MEM_REF,
2174 TREE_TYPE (TREE_TYPE (op0)),
2175 unshare_expr (op0),
2176 fold_convert (ptr_type_node,
2177 op1)));
2178 if (dump_file && (dump_flags & TDF_DETAILS))
2180 fprintf (dump_file, "==== ASGN ");
2181 print_generic_expr (dump_file, lhs, 0);
2182 fprintf (dump_file, " = ");
2183 print_generic_expr (dump_file, new_rhs, 0);
2184 fprintf (dump_file, "\n");
2187 set_ssa_name_value (lhs, new_rhs);
2190 /* A memory store, even an aliased store, creates a useful
2191 equivalence. By exchanging the LHS and RHS, creating suitable
2192 vops and recording the result in the available expression table,
2193 we may be able to expose more redundant loads. */
2194 if (!gimple_has_volatile_ops (stmt)
2195 && gimple_references_memory_p (stmt)
2196 && gimple_assign_single_p (stmt)
2197 && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
2198 || is_gimple_min_invariant (gimple_assign_rhs1 (stmt)))
2199 && !is_gimple_reg (lhs))
2201 tree rhs = gimple_assign_rhs1 (stmt);
2202 gassign *new_stmt;
2204 /* Build a new statement with the RHS and LHS exchanged. */
2205 if (TREE_CODE (rhs) == SSA_NAME)
2207 /* NOTE tuples. The call to gimple_build_assign below replaced
2208 a call to build_gimple_modify_stmt, which did not set the
2209 SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so
2210 may cause an SSA validation failure, as the LHS may be a
2211 default-initialized name and should have no definition. I'm
2212 a bit dubious of this, as the artificial statement that we
2213 generate here may in fact be ill-formed, but it is simply
2214 used as an internal device in this pass, and never becomes
2215 part of the CFG. */
2216 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2217 new_stmt = gimple_build_assign (rhs, lhs);
2218 SSA_NAME_DEF_STMT (rhs) = defstmt;
2220 else
2221 new_stmt = gimple_build_assign (rhs, lhs);
2223 gimple_set_vuse (new_stmt, gimple_vdef (stmt));
2225 /* Finally enter the statement into the available expression
2226 table. */
2227 lookup_avail_expr (new_stmt, true);
2231 /* Replace *OP_P in STMT with any known equivalent value for *OP_P from
2232 CONST_AND_COPIES. */
2234 static void
2235 cprop_operand (gimple stmt, use_operand_p op_p)
2237 tree val;
2238 tree op = USE_FROM_PTR (op_p);
2240 /* If the operand has a known constant value or it is known to be a
2241 copy of some other variable, use the value or copy stored in
2242 CONST_AND_COPIES. */
2243 val = SSA_NAME_VALUE (op);
2244 if (val && val != op)
2246 /* Do not replace hard register operands in asm statements. */
2247 if (gimple_code (stmt) == GIMPLE_ASM
2248 && !may_propagate_copy_into_asm (op))
2249 return;
2251 /* Certain operands are not allowed to be copy propagated due
2252 to their interaction with exception handling and some GCC
2253 extensions. */
2254 if (!may_propagate_copy (op, val))
2255 return;
2257 /* Do not propagate copies into BIVs.
2258 See PR23821 and PR62217 for how this can disturb IV and
2259 number of iteration analysis. */
2260 if (TREE_CODE (val) != INTEGER_CST)
2262 gimple def = SSA_NAME_DEF_STMT (op);
2263 if (gimple_code (def) == GIMPLE_PHI
2264 && gimple_bb (def)->loop_father->header == gimple_bb (def))
2265 return;
2268 /* Dump details. */
2269 if (dump_file && (dump_flags & TDF_DETAILS))
2271 fprintf (dump_file, " Replaced '");
2272 print_generic_expr (dump_file, op, dump_flags);
2273 fprintf (dump_file, "' with %s '",
2274 (TREE_CODE (val) != SSA_NAME ? "constant" : "variable"));
2275 print_generic_expr (dump_file, val, dump_flags);
2276 fprintf (dump_file, "'\n");
2279 if (TREE_CODE (val) != SSA_NAME)
2280 opt_stats.num_const_prop++;
2281 else
2282 opt_stats.num_copy_prop++;
2284 propagate_value (op_p, val);
2286 /* And note that we modified this statement. This is now
2287 safe, even if we changed virtual operands since we will
2288 rescan the statement and rewrite its operands again. */
2289 gimple_set_modified (stmt, true);
2293 /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current
2294 known value for that SSA_NAME (or NULL if no value is known).
2296 Propagate values from CONST_AND_COPIES into the uses, vuses and
2297 vdef_ops of STMT. */
2299 static void
2300 cprop_into_stmt (gimple stmt)
2302 use_operand_p op_p;
2303 ssa_op_iter iter;
2305 FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE)
2306 cprop_operand (stmt, op_p);
2309 /* Optimize the statement pointed to by iterator SI.
2311 We try to perform some simplistic global redundancy elimination and
2312 constant propagation:
2314 1- To detect global redundancy, we keep track of expressions that have
2315 been computed in this block and its dominators. If we find that the
2316 same expression is computed more than once, we eliminate repeated
2317 computations by using the target of the first one.
2319 2- Constant values and copy assignments. This is used to do very
2320 simplistic constant and copy propagation. When a constant or copy
2321 assignment is found, we map the value on the RHS of the assignment to
2322 the variable in the LHS in the CONST_AND_COPIES table. */
2324 static void
2325 optimize_stmt (basic_block bb, gimple_stmt_iterator si)
2327 gimple stmt, old_stmt;
2328 bool may_optimize_p;
2329 bool modified_p = false;
2330 bool was_noreturn;
2332 old_stmt = stmt = gsi_stmt (si);
2333 was_noreturn = is_gimple_call (stmt) && gimple_call_noreturn_p (stmt);
2335 if (dump_file && (dump_flags & TDF_DETAILS))
2337 fprintf (dump_file, "Optimizing statement ");
2338 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2341 if (gimple_code (stmt) == GIMPLE_COND)
2342 canonicalize_comparison (as_a <gcond *> (stmt));
2344 update_stmt_if_modified (stmt);
2345 opt_stats.num_stmts++;
2347 /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */
2348 cprop_into_stmt (stmt);
2350 /* If the statement has been modified with constant replacements,
2351 fold its RHS before checking for redundant computations. */
2352 if (gimple_modified_p (stmt))
2354 tree rhs = NULL;
2356 /* Try to fold the statement making sure that STMT is kept
2357 up to date. */
2358 if (fold_stmt (&si))
2360 stmt = gsi_stmt (si);
2361 gimple_set_modified (stmt, true);
2363 if (dump_file && (dump_flags & TDF_DETAILS))
2365 fprintf (dump_file, " Folded to: ");
2366 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2370 /* We only need to consider cases that can yield a gimple operand. */
2371 if (gimple_assign_single_p (stmt))
2372 rhs = gimple_assign_rhs1 (stmt);
2373 else if (gimple_code (stmt) == GIMPLE_GOTO)
2374 rhs = gimple_goto_dest (stmt);
2375 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2376 /* This should never be an ADDR_EXPR. */
2377 rhs = gimple_switch_index (swtch_stmt);
2379 if (rhs && TREE_CODE (rhs) == ADDR_EXPR)
2380 recompute_tree_invariant_for_addr_expr (rhs);
2382 /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called,
2383 even if fold_stmt updated the stmt already and thus cleared
2384 gimple_modified_p flag on it. */
2385 modified_p = true;
2388 /* Check for redundant computations. Do this optimization only
2389 for assignments that have no volatile ops and conditionals. */
2390 may_optimize_p = (!gimple_has_side_effects (stmt)
2391 && (is_gimple_assign (stmt)
2392 || (is_gimple_call (stmt)
2393 && gimple_call_lhs (stmt) != NULL_TREE)
2394 || gimple_code (stmt) == GIMPLE_COND
2395 || gimple_code (stmt) == GIMPLE_SWITCH));
2397 if (may_optimize_p)
2399 if (gimple_code (stmt) == GIMPLE_CALL)
2401 /* Resolve __builtin_constant_p. If it hasn't been
2402 folded to integer_one_node by now, it's fairly
2403 certain that the value simply isn't constant. */
2404 tree callee = gimple_call_fndecl (stmt);
2405 if (callee
2406 && DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL
2407 && DECL_FUNCTION_CODE (callee) == BUILT_IN_CONSTANT_P)
2409 propagate_tree_value_into_stmt (&si, integer_zero_node);
2410 stmt = gsi_stmt (si);
2414 update_stmt_if_modified (stmt);
2415 eliminate_redundant_computations (&si);
2416 stmt = gsi_stmt (si);
2418 /* Perform simple redundant store elimination. */
2419 if (gimple_assign_single_p (stmt)
2420 && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2422 tree lhs = gimple_assign_lhs (stmt);
2423 tree rhs = gimple_assign_rhs1 (stmt);
2424 tree cached_lhs;
2425 gassign *new_stmt;
2426 if (TREE_CODE (rhs) == SSA_NAME)
2428 tree tem = SSA_NAME_VALUE (rhs);
2429 if (tem)
2430 rhs = tem;
2432 /* Build a new statement with the RHS and LHS exchanged. */
2433 if (TREE_CODE (rhs) == SSA_NAME)
2435 gimple defstmt = SSA_NAME_DEF_STMT (rhs);
2436 new_stmt = gimple_build_assign (rhs, lhs);
2437 SSA_NAME_DEF_STMT (rhs) = defstmt;
2439 else
2440 new_stmt = gimple_build_assign (rhs, lhs);
2441 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2442 cached_lhs = lookup_avail_expr (new_stmt, false);
2443 if (cached_lhs
2444 && rhs == cached_lhs)
2446 basic_block bb = gimple_bb (stmt);
2447 unlink_stmt_vdef (stmt);
2448 if (gsi_remove (&si, true))
2450 bitmap_set_bit (need_eh_cleanup, bb->index);
2451 if (dump_file && (dump_flags & TDF_DETAILS))
2452 fprintf (dump_file, " Flagged to clear EH edges.\n");
2454 release_defs (stmt);
2455 return;
2460 /* Record any additional equivalences created by this statement. */
2461 if (is_gimple_assign (stmt))
2462 record_equivalences_from_stmt (stmt, may_optimize_p);
2464 /* If STMT is a COND_EXPR and it was modified, then we may know
2465 where it goes. If that is the case, then mark the CFG as altered.
2467 This will cause us to later call remove_unreachable_blocks and
2468 cleanup_tree_cfg when it is safe to do so. It is not safe to
2469 clean things up here since removal of edges and such can trigger
2470 the removal of PHI nodes, which in turn can release SSA_NAMEs to
2471 the manager.
2473 That's all fine and good, except that once SSA_NAMEs are released
2474 to the manager, we must not call create_ssa_name until all references
2475 to released SSA_NAMEs have been eliminated.
2477 All references to the deleted SSA_NAMEs can not be eliminated until
2478 we remove unreachable blocks.
2480 We can not remove unreachable blocks until after we have completed
2481 any queued jump threading.
2483 We can not complete any queued jump threads until we have taken
2484 appropriate variables out of SSA form. Taking variables out of
2485 SSA form can call create_ssa_name and thus we lose.
2487 Ultimately I suspect we're going to need to change the interface
2488 into the SSA_NAME manager. */
2489 if (gimple_modified_p (stmt) || modified_p)
2491 tree val = NULL;
2493 update_stmt_if_modified (stmt);
2495 if (gimple_code (stmt) == GIMPLE_COND)
2496 val = fold_binary_loc (gimple_location (stmt),
2497 gimple_cond_code (stmt), boolean_type_node,
2498 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt));
2499 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt))
2500 val = gimple_switch_index (swtch_stmt);
2502 if (val && TREE_CODE (val) == INTEGER_CST && find_taken_edge (bb, val))
2503 cfg_altered = true;
2505 /* If we simplified a statement in such a way as to be shown that it
2506 cannot trap, update the eh information and the cfg to match. */
2507 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
2509 bitmap_set_bit (need_eh_cleanup, bb->index);
2510 if (dump_file && (dump_flags & TDF_DETAILS))
2511 fprintf (dump_file, " Flagged to clear EH edges.\n");
2514 if (!was_noreturn
2515 && is_gimple_call (stmt) && gimple_call_noreturn_p (stmt))
2516 need_noreturn_fixup.safe_push (stmt);
2520 /* Helper for walk_non_aliased_vuses. Determine if we arrived at
2521 the desired memory state. */
2523 static void *
2524 vuse_eq (ao_ref *, tree vuse1, unsigned int cnt, void *data)
2526 tree vuse2 = (tree) data;
2527 if (vuse1 == vuse2)
2528 return data;
2530 /* This bounds the stmt walks we perform on reference lookups
2531 to O(1) instead of O(N) where N is the number of dominating
2532 stores leading to a candidate. We re-use the SCCVN param
2533 for this as it is basically the same complexity. */
2534 if (cnt > (unsigned) PARAM_VALUE (PARAM_SCCVN_MAX_ALIAS_QUERIES_PER_ACCESS))
2535 return (void *)-1;
2537 return NULL;
2540 /* Search for an existing instance of STMT in the AVAIL_EXPRS table.
2541 If found, return its LHS. Otherwise insert STMT in the table and
2542 return NULL_TREE.
2544 Also, when an expression is first inserted in the table, it is also
2545 is also added to AVAIL_EXPRS_STACK, so that it can be removed when
2546 we finish processing this block and its children. */
2548 static tree
2549 lookup_avail_expr (gimple stmt, bool insert)
2551 expr_hash_elt **slot;
2552 tree lhs;
2553 tree temp;
2554 struct expr_hash_elt element;
2556 /* Get LHS of phi, assignment, or call; else NULL_TREE. */
2557 if (gimple_code (stmt) == GIMPLE_PHI)
2558 lhs = gimple_phi_result (stmt);
2559 else
2560 lhs = gimple_get_lhs (stmt);
2562 initialize_hash_element (stmt, lhs, &element);
2564 if (dump_file && (dump_flags & TDF_DETAILS))
2566 fprintf (dump_file, "LKUP ");
2567 print_expr_hash_elt (dump_file, &element);
2570 /* Don't bother remembering constant assignments and copy operations.
2571 Constants and copy operations are handled by the constant/copy propagator
2572 in optimize_stmt. */
2573 if (element.expr.kind == EXPR_SINGLE
2574 && (TREE_CODE (element.expr.ops.single.rhs) == SSA_NAME
2575 || is_gimple_min_invariant (element.expr.ops.single.rhs)))
2576 return NULL_TREE;
2578 /* Finally try to find the expression in the main expression hash table. */
2579 slot = avail_exprs->find_slot (&element, (insert ? INSERT : NO_INSERT));
2580 if (slot == NULL)
2582 free_expr_hash_elt_contents (&element);
2583 return NULL_TREE;
2585 else if (*slot == NULL)
2587 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2588 *element2 = element;
2589 element2->stamp = element2;
2590 *slot = element2;
2592 if (dump_file && (dump_flags & TDF_DETAILS))
2594 fprintf (dump_file, "2>>> ");
2595 print_expr_hash_elt (dump_file, element2);
2598 avail_exprs_stack.safe_push
2599 (std::pair<expr_hash_elt_t, expr_hash_elt_t> (element2, NULL));
2600 return NULL_TREE;
2603 /* If we found a redundant memory operation do an alias walk to
2604 check if we can re-use it. */
2605 if (gimple_vuse (stmt) != (*slot)->vop)
2607 tree vuse1 = (*slot)->vop;
2608 tree vuse2 = gimple_vuse (stmt);
2609 /* If we have a load of a register and a candidate in the
2610 hash with vuse1 then try to reach its stmt by walking
2611 up the virtual use-def chain using walk_non_aliased_vuses.
2612 But don't do this when removing expressions from the hash. */
2613 ao_ref ref;
2614 if (!(vuse1 && vuse2
2615 && gimple_assign_single_p (stmt)
2616 && TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME
2617 && (ao_ref_init (&ref, gimple_assign_rhs1 (stmt)), true)
2618 && walk_non_aliased_vuses (&ref, vuse2,
2619 vuse_eq, NULL, NULL, vuse1) != NULL))
2621 if (insert)
2623 struct expr_hash_elt *element2 = XNEW (struct expr_hash_elt);
2624 *element2 = element;
2625 element2->stamp = element2;
2627 /* Insert the expr into the hash by replacing the current
2628 entry and recording the value to restore in the
2629 avail_exprs_stack. */
2630 avail_exprs_stack.safe_push (std::make_pair (element2, *slot));
2631 *slot = element2;
2632 if (dump_file && (dump_flags & TDF_DETAILS))
2634 fprintf (dump_file, "2>>> ");
2635 print_expr_hash_elt (dump_file, *slot);
2638 return NULL_TREE;
2642 free_expr_hash_elt_contents (&element);
2644 /* Extract the LHS of the assignment so that it can be used as the current
2645 definition of another variable. */
2646 lhs = (*slot)->lhs;
2648 /* See if the LHS appears in the CONST_AND_COPIES table. If it does, then
2649 use the value from the const_and_copies table. */
2650 if (TREE_CODE (lhs) == SSA_NAME)
2652 temp = SSA_NAME_VALUE (lhs);
2653 if (temp)
2654 lhs = temp;
2657 if (dump_file && (dump_flags & TDF_DETAILS))
2659 fprintf (dump_file, "FIND: ");
2660 print_generic_expr (dump_file, lhs, 0);
2661 fprintf (dump_file, "\n");
2664 return lhs;
2667 /* Hashing and equality functions for AVAIL_EXPRS. We compute a value number
2668 for expressions using the code of the expression and the SSA numbers of
2669 its operands. */
2671 static hashval_t
2672 avail_expr_hash (const void *p)
2674 const struct hashable_expr *expr = &((const struct expr_hash_elt *)p)->expr;
2675 inchash::hash hstate;
2677 inchash::add_hashable_expr (expr, hstate);
2679 return hstate.end ();
2682 /* PHI-ONLY copy and constant propagation. This pass is meant to clean
2683 up degenerate PHIs created by or exposed by jump threading. */
2685 /* Given a statement STMT, which is either a PHI node or an assignment,
2686 remove it from the IL. */
2688 static void
2689 remove_stmt_or_phi (gimple stmt)
2691 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2693 if (gimple_code (stmt) == GIMPLE_PHI)
2694 remove_phi_node (&gsi, true);
2695 else
2697 gsi_remove (&gsi, true);
2698 release_defs (stmt);
2702 /* Given a statement STMT, which is either a PHI node or an assignment,
2703 return the "rhs" of the node, in the case of a non-degenerate
2704 phi, NULL is returned. */
2706 static tree
2707 get_rhs_or_phi_arg (gimple stmt)
2709 if (gimple_code (stmt) == GIMPLE_PHI)
2710 return degenerate_phi_result (as_a <gphi *> (stmt));
2711 else if (gimple_assign_single_p (stmt))
2712 return gimple_assign_rhs1 (stmt);
2713 else
2714 gcc_unreachable ();
2718 /* Given a statement STMT, which is either a PHI node or an assignment,
2719 return the "lhs" of the node. */
2721 static tree
2722 get_lhs_or_phi_result (gimple stmt)
2724 if (gimple_code (stmt) == GIMPLE_PHI)
2725 return gimple_phi_result (stmt);
2726 else if (is_gimple_assign (stmt))
2727 return gimple_assign_lhs (stmt);
2728 else
2729 gcc_unreachable ();
2732 /* Propagate RHS into all uses of LHS (when possible).
2734 RHS and LHS are derived from STMT, which is passed in solely so
2735 that we can remove it if propagation is successful.
2737 When propagating into a PHI node or into a statement which turns
2738 into a trivial copy or constant initialization, set the
2739 appropriate bit in INTERESTING_NAMEs so that we will visit those
2740 nodes as well in an effort to pick up secondary optimization
2741 opportunities. */
2743 static void
2744 propagate_rhs_into_lhs (gimple stmt, tree lhs, tree rhs, bitmap interesting_names)
2746 /* First verify that propagation is valid. */
2747 if (may_propagate_copy (lhs, rhs))
2749 use_operand_p use_p;
2750 imm_use_iterator iter;
2751 gimple use_stmt;
2752 bool all = true;
2754 /* Dump details. */
2755 if (dump_file && (dump_flags & TDF_DETAILS))
2757 fprintf (dump_file, " Replacing '");
2758 print_generic_expr (dump_file, lhs, dump_flags);
2759 fprintf (dump_file, "' with %s '",
2760 (TREE_CODE (rhs) != SSA_NAME ? "constant" : "variable"));
2761 print_generic_expr (dump_file, rhs, dump_flags);
2762 fprintf (dump_file, "'\n");
2765 /* Walk over every use of LHS and try to replace the use with RHS.
2766 At this point the only reason why such a propagation would not
2767 be successful would be if the use occurs in an ASM_EXPR. */
2768 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2770 /* Leave debug stmts alone. If we succeed in propagating
2771 all non-debug uses, we'll drop the DEF, and propagation
2772 into debug stmts will occur then. */
2773 if (gimple_debug_bind_p (use_stmt))
2774 continue;
2776 /* It's not always safe to propagate into an ASM_EXPR. */
2777 if (gimple_code (use_stmt) == GIMPLE_ASM
2778 && ! may_propagate_copy_into_asm (lhs))
2780 all = false;
2781 continue;
2784 /* It's not ok to propagate into the definition stmt of RHS.
2785 <bb 9>:
2786 # prephitmp.12_36 = PHI <g_67.1_6(9)>
2787 g_67.1_6 = prephitmp.12_36;
2788 goto <bb 9>;
2789 While this is strictly all dead code we do not want to
2790 deal with this here. */
2791 if (TREE_CODE (rhs) == SSA_NAME
2792 && SSA_NAME_DEF_STMT (rhs) == use_stmt)
2794 all = false;
2795 continue;
2798 /* Dump details. */
2799 if (dump_file && (dump_flags & TDF_DETAILS))
2801 fprintf (dump_file, " Original statement:");
2802 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2805 /* Propagate the RHS into this use of the LHS. */
2806 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2807 propagate_value (use_p, rhs);
2809 /* Special cases to avoid useless calls into the folding
2810 routines, operand scanning, etc.
2812 Propagation into a PHI may cause the PHI to become
2813 a degenerate, so mark the PHI as interesting. No other
2814 actions are necessary. */
2815 if (gimple_code (use_stmt) == GIMPLE_PHI)
2817 tree result;
2819 /* Dump details. */
2820 if (dump_file && (dump_flags & TDF_DETAILS))
2822 fprintf (dump_file, " Updated statement:");
2823 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2826 result = get_lhs_or_phi_result (use_stmt);
2827 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2828 continue;
2831 /* From this point onward we are propagating into a
2832 real statement. Folding may (or may not) be possible,
2833 we may expose new operands, expose dead EH edges,
2834 etc. */
2835 /* NOTE tuples. In the tuples world, fold_stmt_inplace
2836 cannot fold a call that simplifies to a constant,
2837 because the GIMPLE_CALL must be replaced by a
2838 GIMPLE_ASSIGN, and there is no way to effect such a
2839 transformation in-place. We might want to consider
2840 using the more general fold_stmt here. */
2842 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2843 fold_stmt_inplace (&gsi);
2846 /* Sometimes propagation can expose new operands to the
2847 renamer. */
2848 update_stmt (use_stmt);
2850 /* Dump details. */
2851 if (dump_file && (dump_flags & TDF_DETAILS))
2853 fprintf (dump_file, " Updated statement:");
2854 print_gimple_stmt (dump_file, use_stmt, 0, dump_flags);
2857 /* If we replaced a variable index with a constant, then
2858 we would need to update the invariant flag for ADDR_EXPRs. */
2859 if (gimple_assign_single_p (use_stmt)
2860 && TREE_CODE (gimple_assign_rhs1 (use_stmt)) == ADDR_EXPR)
2861 recompute_tree_invariant_for_addr_expr
2862 (gimple_assign_rhs1 (use_stmt));
2864 /* If we cleaned up EH information from the statement,
2865 mark its containing block as needing EH cleanups. */
2866 if (maybe_clean_or_replace_eh_stmt (use_stmt, use_stmt))
2868 bitmap_set_bit (need_eh_cleanup, gimple_bb (use_stmt)->index);
2869 if (dump_file && (dump_flags & TDF_DETAILS))
2870 fprintf (dump_file, " Flagged to clear EH edges.\n");
2873 /* Propagation may expose new trivial copy/constant propagation
2874 opportunities. */
2875 if (gimple_assign_single_p (use_stmt)
2876 && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME
2877 && (TREE_CODE (gimple_assign_rhs1 (use_stmt)) == SSA_NAME
2878 || is_gimple_min_invariant (gimple_assign_rhs1 (use_stmt))))
2880 tree result = get_lhs_or_phi_result (use_stmt);
2881 bitmap_set_bit (interesting_names, SSA_NAME_VERSION (result));
2884 /* Propagation into these nodes may make certain edges in
2885 the CFG unexecutable. We want to identify them as PHI nodes
2886 at the destination of those unexecutable edges may become
2887 degenerates. */
2888 else if (gimple_code (use_stmt) == GIMPLE_COND
2889 || gimple_code (use_stmt) == GIMPLE_SWITCH
2890 || gimple_code (use_stmt) == GIMPLE_GOTO)
2892 tree val;
2894 if (gimple_code (use_stmt) == GIMPLE_COND)
2895 val = fold_binary_loc (gimple_location (use_stmt),
2896 gimple_cond_code (use_stmt),
2897 boolean_type_node,
2898 gimple_cond_lhs (use_stmt),
2899 gimple_cond_rhs (use_stmt));
2900 else if (gimple_code (use_stmt) == GIMPLE_SWITCH)
2901 val = gimple_switch_index (as_a <gswitch *> (use_stmt));
2902 else
2903 val = gimple_goto_dest (use_stmt);
2905 if (val && is_gimple_min_invariant (val))
2907 basic_block bb = gimple_bb (use_stmt);
2908 edge te = find_taken_edge (bb, val);
2909 if (!te)
2910 continue;
2912 edge_iterator ei;
2913 edge e;
2914 gimple_stmt_iterator gsi;
2915 gphi_iterator psi;
2917 /* Remove all outgoing edges except TE. */
2918 for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei));)
2920 if (e != te)
2922 /* Mark all the PHI nodes at the destination of
2923 the unexecutable edge as interesting. */
2924 for (psi = gsi_start_phis (e->dest);
2925 !gsi_end_p (psi);
2926 gsi_next (&psi))
2928 gphi *phi = psi.phi ();
2930 tree result = gimple_phi_result (phi);
2931 int version = SSA_NAME_VERSION (result);
2933 bitmap_set_bit (interesting_names, version);
2936 te->probability += e->probability;
2938 te->count += e->count;
2939 remove_edge (e);
2940 cfg_altered = true;
2942 else
2943 ei_next (&ei);
2946 gsi = gsi_last_bb (gimple_bb (use_stmt));
2947 gsi_remove (&gsi, true);
2949 /* And fixup the flags on the single remaining edge. */
2950 te->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE);
2951 te->flags &= ~EDGE_ABNORMAL;
2952 te->flags |= EDGE_FALLTHRU;
2953 if (te->probability > REG_BR_PROB_BASE)
2954 te->probability = REG_BR_PROB_BASE;
2959 /* Ensure there is nothing else to do. */
2960 gcc_assert (!all || has_zero_uses (lhs));
2962 /* If we were able to propagate away all uses of LHS, then
2963 we can remove STMT. */
2964 if (all)
2965 remove_stmt_or_phi (stmt);
2969 /* STMT is either a PHI node (potentially a degenerate PHI node) or
2970 a statement that is a trivial copy or constant initialization.
2972 Attempt to eliminate T by propagating its RHS into all uses of
2973 its LHS. This may in turn set new bits in INTERESTING_NAMES
2974 for nodes we want to revisit later.
2976 All exit paths should clear INTERESTING_NAMES for the result
2977 of STMT. */
2979 static void
2980 eliminate_const_or_copy (gimple stmt, bitmap interesting_names)
2982 tree lhs = get_lhs_or_phi_result (stmt);
2983 tree rhs;
2984 int version = SSA_NAME_VERSION (lhs);
2986 /* If the LHS of this statement or PHI has no uses, then we can
2987 just eliminate it. This can occur if, for example, the PHI
2988 was created by block duplication due to threading and its only
2989 use was in the conditional at the end of the block which was
2990 deleted. */
2991 if (has_zero_uses (lhs))
2993 bitmap_clear_bit (interesting_names, version);
2994 remove_stmt_or_phi (stmt);
2995 return;
2998 /* Get the RHS of the assignment or PHI node if the PHI is a
2999 degenerate. */
3000 rhs = get_rhs_or_phi_arg (stmt);
3001 if (!rhs)
3003 bitmap_clear_bit (interesting_names, version);
3004 return;
3007 if (!virtual_operand_p (lhs))
3008 propagate_rhs_into_lhs (stmt, lhs, rhs, interesting_names);
3009 else
3011 gimple use_stmt;
3012 imm_use_iterator iter;
3013 use_operand_p use_p;
3014 /* For virtual operands we have to propagate into all uses as
3015 otherwise we will create overlapping life-ranges. */
3016 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
3017 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3018 SET_USE (use_p, rhs);
3019 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
3020 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
3021 remove_stmt_or_phi (stmt);
3024 /* Note that STMT may well have been deleted by now, so do
3025 not access it, instead use the saved version # to clear
3026 T's entry in the worklist. */
3027 bitmap_clear_bit (interesting_names, version);
3030 /* The first phase in degenerate PHI elimination.
3032 Eliminate the degenerate PHIs in BB, then recurse on the
3033 dominator children of BB. */
3035 static void
3036 eliminate_degenerate_phis_1 (basic_block bb, bitmap interesting_names)
3038 gphi_iterator gsi;
3039 basic_block son;
3041 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3043 gphi *phi = gsi.phi ();
3045 eliminate_const_or_copy (phi, interesting_names);
3048 /* Recurse into the dominator children of BB. */
3049 for (son = first_dom_son (CDI_DOMINATORS, bb);
3050 son;
3051 son = next_dom_son (CDI_DOMINATORS, son))
3052 eliminate_degenerate_phis_1 (son, interesting_names);
3056 /* A very simple pass to eliminate degenerate PHI nodes from the
3057 IL. This is meant to be fast enough to be able to be run several
3058 times in the optimization pipeline.
3060 Certain optimizations, particularly those which duplicate blocks
3061 or remove edges from the CFG can create or expose PHIs which are
3062 trivial copies or constant initializations.
3064 While we could pick up these optimizations in DOM or with the
3065 combination of copy-prop and CCP, those solutions are far too
3066 heavy-weight for our needs.
3068 This implementation has two phases so that we can efficiently
3069 eliminate the first order degenerate PHIs and second order
3070 degenerate PHIs.
3072 The first phase performs a dominator walk to identify and eliminate
3073 the vast majority of the degenerate PHIs. When a degenerate PHI
3074 is identified and eliminated any affected statements or PHIs
3075 are put on a worklist.
3077 The second phase eliminates degenerate PHIs and trivial copies
3078 or constant initializations using the worklist. This is how we
3079 pick up the secondary optimization opportunities with minimal
3080 cost. */
3082 namespace {
3084 const pass_data pass_data_phi_only_cprop =
3086 GIMPLE_PASS, /* type */
3087 "phicprop", /* name */
3088 OPTGROUP_NONE, /* optinfo_flags */
3089 TV_TREE_PHI_CPROP, /* tv_id */
3090 ( PROP_cfg | PROP_ssa ), /* properties_required */
3091 0, /* properties_provided */
3092 0, /* properties_destroyed */
3093 0, /* todo_flags_start */
3094 ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
3097 class pass_phi_only_cprop : public gimple_opt_pass
3099 public:
3100 pass_phi_only_cprop (gcc::context *ctxt)
3101 : gimple_opt_pass (pass_data_phi_only_cprop, ctxt)
3104 /* opt_pass methods: */
3105 opt_pass * clone () { return new pass_phi_only_cprop (m_ctxt); }
3106 virtual bool gate (function *) { return flag_tree_dom != 0; }
3107 virtual unsigned int execute (function *);
3109 }; // class pass_phi_only_cprop
3111 unsigned int
3112 pass_phi_only_cprop::execute (function *fun)
3114 bitmap interesting_names;
3115 bitmap interesting_names1;
3117 /* Bitmap of blocks which need EH information updated. We can not
3118 update it on-the-fly as doing so invalidates the dominator tree. */
3119 need_eh_cleanup = BITMAP_ALLOC (NULL);
3121 /* INTERESTING_NAMES is effectively our worklist, indexed by
3122 SSA_NAME_VERSION.
3124 A set bit indicates that the statement or PHI node which
3125 defines the SSA_NAME should be (re)examined to determine if
3126 it has become a degenerate PHI or trivial const/copy propagation
3127 opportunity.
3129 Experiments have show we generally get better compilation
3130 time behavior with bitmaps rather than sbitmaps. */
3131 interesting_names = BITMAP_ALLOC (NULL);
3132 interesting_names1 = BITMAP_ALLOC (NULL);
3134 calculate_dominance_info (CDI_DOMINATORS);
3135 cfg_altered = false;
3137 /* First phase. Eliminate degenerate PHIs via a dominator
3138 walk of the CFG.
3140 Experiments have indicated that we generally get better
3141 compile-time behavior by visiting blocks in the first
3142 phase in dominator order. Presumably this is because walking
3143 in dominator order leaves fewer PHIs for later examination
3144 by the worklist phase. */
3145 eliminate_degenerate_phis_1 (ENTRY_BLOCK_PTR_FOR_FN (fun),
3146 interesting_names);
3148 /* Second phase. Eliminate second order degenerate PHIs as well
3149 as trivial copies or constant initializations identified by
3150 the first phase or this phase. Basically we keep iterating
3151 until our set of INTERESTING_NAMEs is empty. */
3152 while (!bitmap_empty_p (interesting_names))
3154 unsigned int i;
3155 bitmap_iterator bi;
3157 /* EXECUTE_IF_SET_IN_BITMAP does not like its bitmap
3158 changed during the loop. Copy it to another bitmap and
3159 use that. */
3160 bitmap_copy (interesting_names1, interesting_names);
3162 EXECUTE_IF_SET_IN_BITMAP (interesting_names1, 0, i, bi)
3164 tree name = ssa_name (i);
3166 /* Ignore SSA_NAMEs that have been released because
3167 their defining statement was deleted (unreachable). */
3168 if (name)
3169 eliminate_const_or_copy (SSA_NAME_DEF_STMT (ssa_name (i)),
3170 interesting_names);
3174 if (cfg_altered)
3176 free_dominance_info (CDI_DOMINATORS);
3177 /* If we changed the CFG schedule loops for fixup by cfgcleanup. */
3178 loops_state_set (LOOPS_NEED_FIXUP);
3181 /* Propagation of const and copies may make some EH edges dead. Purge
3182 such edges from the CFG as needed. */
3183 if (!bitmap_empty_p (need_eh_cleanup))
3185 gimple_purge_all_dead_eh_edges (need_eh_cleanup);
3186 BITMAP_FREE (need_eh_cleanup);
3189 BITMAP_FREE (interesting_names);
3190 BITMAP_FREE (interesting_names1);
3191 return 0;
3194 } // anon namespace
3196 gimple_opt_pass *
3197 make_pass_phi_only_cprop (gcc::context *ctxt)
3199 return new pass_phi_only_cprop (ctxt);